aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/README.md97
-rw-r--r--test/functional/.gitignore2
-rw-r--r--test/functional/README.md108
-rwxr-xr-xtest/functional/abandonconflict.py166
-rwxr-xr-xtest/functional/assumevalid.py190
-rwxr-xr-xtest/functional/bip65-cltv-p2p.py181
-rwxr-xr-xtest/functional/bip65-cltv.py85
-rwxr-xr-xtest/functional/bip68-112-113-p2p.py539
-rwxr-xr-xtest/functional/bip68-sequence.py398
-rwxr-xr-xtest/functional/bip9-softforks.py246
-rwxr-xr-xtest/functional/bipdersig-p2p.py188
-rwxr-xr-xtest/functional/bipdersig.py84
-rwxr-xr-xtest/functional/blockchain.py83
-rwxr-xr-xtest/functional/bumpfee.py323
-rw-r--r--test/functional/config.ini.in18
-rwxr-xr-xtest/functional/create_cache.py30
-rwxr-xr-xtest/functional/decodescript.py185
-rwxr-xr-xtest/functional/disablewallet.py39
-rwxr-xr-xtest/functional/forknotify.py63
-rwxr-xr-xtest/functional/fundrawtransaction.py733
-rwxr-xr-xtest/functional/getblocktemplate_longpoll.py73
-rwxr-xr-xtest/functional/getblocktemplate_proposals.py161
-rwxr-xr-xtest/functional/getchaintips.py65
-rwxr-xr-xtest/functional/httpbasics.py110
-rwxr-xr-xtest/functional/import-rescan.py193
-rwxr-xr-xtest/functional/importmulti.py453
-rwxr-xr-xtest/functional/importprunedfunds.py124
-rwxr-xr-xtest/functional/invalidateblock.py71
-rwxr-xr-xtest/functional/invalidblockrequest.py116
-rwxr-xr-xtest/functional/invalidtxrequest.py73
-rwxr-xr-xtest/functional/keypool.py72
-rwxr-xr-xtest/functional/listsinceblock.py81
-rwxr-xr-xtest/functional/listtransactions.py207
-rwxr-xr-xtest/functional/maxblocksinflight.py97
-rwxr-xr-xtest/functional/maxuploadtarget.py233
-rwxr-xr-xtest/functional/mempool_limit.py53
-rwxr-xr-xtest/functional/mempool_packages.py239
-rwxr-xr-xtest/functional/mempool_reorg.py107
-rwxr-xr-xtest/functional/mempool_resurrect_test.py77
-rwxr-xr-xtest/functional/mempool_spendcoinbase.py62
-rwxr-xr-xtest/functional/merkle_blocks.py85
-rwxr-xr-xtest/functional/multi_rpc.py117
-rwxr-xr-xtest/functional/nodehandling.py80
-rwxr-xr-xtest/functional/nulldummy.py136
-rwxr-xr-xtest/functional/p2p-acceptblock.py277
-rwxr-xr-xtest/functional/p2p-compactblocks.py968
-rwxr-xr-xtest/functional/p2p-feefilter.py112
-rwxr-xr-xtest/functional/p2p-fullblocktest.py1290
-rwxr-xr-xtest/functional/p2p-leaktests.py143
-rwxr-xr-xtest/functional/p2p-mempool.py106
-rwxr-xr-xtest/functional/p2p-segwit.py2033
-rwxr-xr-xtest/functional/p2p-timeouts.py102
-rwxr-xr-xtest/functional/p2p-versionbits-warning.py163
-rwxr-xr-xtest/functional/preciousblock.py115
-rwxr-xr-xtest/functional/prioritise_transaction.py124
-rwxr-xr-xtest/functional/proxy_test.py204
-rwxr-xr-xtest/functional/pruning.py443
-rwxr-xr-xtest/functional/rawtransactions.py190
-rwxr-xr-xtest/functional/receivedby.py147
-rwxr-xr-xtest/functional/reindex.py48
-rwxr-xr-xtest/functional/replace-by-fee.py525
-rwxr-xr-xtest/functional/rest.py331
-rwxr-xr-xtest/functional/rpcbind_test.py98
-rwxr-xr-xtest/functional/rpcnamedargs.py47
-rwxr-xr-xtest/functional/segwit.py642
-rwxr-xr-xtest/functional/sendheaders.py605
-rwxr-xr-xtest/functional/signmessages.py40
-rwxr-xr-xtest/functional/signrawtransactions.py137
-rwxr-xr-xtest/functional/smartfees.py266
-rw-r--r--test/functional/test_framework/__init__.py0
-rw-r--r--test/functional/test_framework/address.py69
-rw-r--r--test/functional/test_framework/authproxy.py190
-rw-r--r--test/functional/test_framework/bignum.py97
-rw-r--r--test/functional/test_framework/blockstore.py170
-rw-r--r--test/functional/test_framework/blocktools.py105
-rwxr-xr-xtest/functional/test_framework/comptool.py410
-rw-r--r--test/functional/test_framework/coverage.py103
-rw-r--r--test/functional/test_framework/key.py232
-rwxr-xr-xtest/functional/test_framework/mininode.py1797
-rw-r--r--test/functional/test_framework/netutil.py156
-rw-r--r--test/functional/test_framework/script.py939
-rw-r--r--test/functional/test_framework/siphash.py63
-rw-r--r--test/functional/test_framework/socks5.py160
-rwxr-xr-xtest/functional/test_framework/test_framework.py244
-rw-r--r--test/functional/test_framework/util.py670
-rwxr-xr-xtest/functional/test_runner.py393
-rwxr-xr-xtest/functional/txn_clone.py156
-rwxr-xr-xtest/functional/txn_doublespend.py144
-rwxr-xr-xtest/functional/wallet-accounts.py100
-rwxr-xr-xtest/functional/wallet-dump.py109
-rwxr-xr-xtest/functional/wallet-hd.py95
-rwxr-xr-xtest/functional/wallet.py395
-rwxr-xr-xtest/functional/walletbackup.py198
-rwxr-xr-xtest/functional/zapwallettxes.py86
-rwxr-xr-xtest/functional/zmq_test.py94
-rw-r--r--test/util/bctest.py127
-rwxr-xr-xtest/util/bitcoin-util-test.py36
-rw-r--r--test/util/buildenv.py.in4
-rw-r--r--test/util/data/bitcoin-util-test.json356
-rw-r--r--test/util/data/blanktxv1.hex1
-rw-r--r--test/util/data/blanktxv1.json11
-rw-r--r--test/util/data/blanktxv2.hex1
-rw-r--r--test/util/data/blanktxv2.json11
-rw-r--r--test/util/data/tt-delin1-out.hex1
-rw-r--r--test/util/data/tt-delin1-out.json217
-rw-r--r--test/util/data/tt-delout1-out.hex1
-rw-r--r--test/util/data/tt-delout1-out.json213
-rw-r--r--test/util/data/tt-locktime317000-out.hex1
-rw-r--r--test/util/data/tt-locktime317000-out.json226
-rw-r--r--test/util/data/tx394b54bb.hex1
-rw-r--r--test/util/data/txcreate1.hex1
-rw-r--r--test/util/data/txcreate1.json64
-rw-r--r--test/util/data/txcreate2.hex1
-rw-r--r--test/util/data/txcreate2.json20
-rw-r--r--test/util/data/txcreatedata1.hex1
-rw-r--r--test/util/data/txcreatedata1.json42
-rw-r--r--test/util/data/txcreatedata2.hex1
-rw-r--r--test/util/data/txcreatedata2.json42
-rw-r--r--test/util/data/txcreatedata_seq0.hex1
-rw-r--r--test/util/data/txcreatedata_seq0.json33
-rw-r--r--test/util/data/txcreatedata_seq1.hex1
-rw-r--r--test/util/data/txcreatedata_seq1.json42
-rw-r--r--test/util/data/txcreatemultisig1.hex1
-rw-r--r--test/util/data/txcreatemultisig1.json26
-rw-r--r--test/util/data/txcreatemultisig2.hex1
-rw-r--r--test/util/data/txcreatemultisig2.json24
-rw-r--r--test/util/data/txcreatemultisig3.hex1
-rw-r--r--test/util/data/txcreatemultisig3.json20
-rw-r--r--test/util/data/txcreatemultisig4.hex1
-rw-r--r--test/util/data/txcreatemultisig4.json24
-rw-r--r--test/util/data/txcreateoutpubkey1.hex1
-rw-r--r--test/util/data/txcreateoutpubkey1.json24
-rw-r--r--test/util/data/txcreateoutpubkey2.hex1
-rw-r--r--test/util/data/txcreateoutpubkey2.json20
-rw-r--r--test/util/data/txcreateoutpubkey3.hex1
-rw-r--r--test/util/data/txcreateoutpubkey3.json24
-rw-r--r--test/util/data/txcreatescript1.hex1
-rw-r--r--test/util/data/txcreatescript1.json20
-rw-r--r--test/util/data/txcreatescript2.hex1
-rw-r--r--test/util/data/txcreatescript2.json24
-rw-r--r--test/util/data/txcreatescript3.hex1
-rw-r--r--test/util/data/txcreatescript3.json20
-rw-r--r--test/util/data/txcreatescript4.hex1
-rw-r--r--test/util/data/txcreatescript4.json24
-rw-r--r--test/util/data/txcreatesignv1.hex1
-rw-r--r--test/util/data/txcreatesignv1.json33
-rw-r--r--test/util/data/txcreatesignv2.hex1
147 files changed, 24926 insertions, 0 deletions
diff --git a/test/README.md b/test/README.md
new file mode 100644
index 0000000000..dec8db960d
--- /dev/null
+++ b/test/README.md
@@ -0,0 +1,97 @@
+This directory contains integration tests that test bitcoind and its
+utilities in their entirety. It does not contain unit tests, which
+can be found in [/src/test](/src/test), [/src/wallet/test](/src/wallet/test),
+etc.
+
+There are currently two sets of tests in this directory:
+
+- [functional](/test/functional) which test the functionality of
+bitcoind and bitcoin-qt by interacting with them through the RPC and P2P
+interfaces.
+- [util](test/util) which tests the bitcoin utilities, currently only
+bitcoin-tx.
+
+The util tests are run as part of `make check` target. The functional
+tests are run by the travis continuous build process whenever a pull
+request is opened. Both sets of tests can also be run locally.
+
+Functional Test dependencies
+============================
+The ZMQ functional test requires a python ZMQ library. To install it:
+
+- on Unix, run `sudo apt-get install python3-zmq`
+- on mac OS, run `pip3 install pyzmq`
+
+Running tests locally
+=====================
+
+Functional tests
+----------------
+
+You can run any single test by calling
+
+ test/functional/test_runner.py <testname>
+
+Or you can run any combination of tests by calling
+
+ test/functional/test_runner.py <testname1> <testname2> <testname3> ...
+
+Run the regression test suite with
+
+ test/functional/test_runner.py
+
+Run all possible tests with
+
+ test/functional/test_runner.py --extended
+
+By default, tests will be run in parallel. To specify how many jobs to run,
+append `--jobs=n` (default n=4).
+
+If you want to create a basic coverage report for the RPC test suite, append `--coverage`.
+
+Possible options, which apply to each individual test run:
+
+```
+ -h, --help show this help message and exit
+ --nocleanup Leave bitcoinds and test.* datadir on exit or error
+ --noshutdown Don't stop bitcoinds after the test execution
+ --srcdir=SRCDIR Source directory containing bitcoind/bitcoin-cli
+ (default: ../../src)
+ --tmpdir=TMPDIR Root directory for datadirs
+ --tracerpc Print out all RPC calls as they are made
+ --coveragedir=COVERAGEDIR
+ Write tested RPC commands into this directory
+```
+
+If you set the environment variable `PYTHON_DEBUG=1` you will get some debug
+output (example: `PYTHON_DEBUG=1 test/functional/test_runner.py wallet`).
+
+A 200-block -regtest blockchain and wallets for four nodes
+is created the first time a regression test is run and
+is stored in the cache/ directory. Each node has 25 mature
+blocks (25*50=1250 BTC) in its wallet.
+
+After the first run, the cache/ blockchain and wallets are
+copied into a temporary directory and used as the initial
+test state.
+
+If you get into a bad state, you should be able
+to recover with:
+
+```bash
+rm -rf cache
+killall bitcoind
+```
+
+Util tests
+----------
+
+Util tests can be run locally by running `test/util/bitcoin-util-test.py`.
+Use the `-v` option for verbose output.
+
+Writing functional tests
+========================
+
+You are encouraged to write functional tests for new or existing features.
+Further information about the functional test framework and individual
+tests is found in [test/functional](/test/functional).
diff --git a/test/functional/.gitignore b/test/functional/.gitignore
new file mode 100644
index 0000000000..cb41d94423
--- /dev/null
+++ b/test/functional/.gitignore
@@ -0,0 +1,2 @@
+*.pyc
+cache
diff --git a/test/functional/README.md b/test/functional/README.md
new file mode 100644
index 0000000000..651b01f18a
--- /dev/null
+++ b/test/functional/README.md
@@ -0,0 +1,108 @@
+Regression tests
+================
+
+### [test_framework/authproxy.py](test_framework/authproxy.py)
+Taken from the [python-bitcoinrpc repository](https://github.com/jgarzik/python-bitcoinrpc).
+
+### [test_framework/test_framework.py](test_framework/test_framework.py)
+Base class for new regression tests.
+
+### [test_framework/util.py](test_framework/util.py)
+Generally useful functions.
+
+### [test_framework/mininode.py](test_framework/mininode.py)
+Basic code to support p2p connectivity to a bitcoind.
+
+### [test_framework/comptool.py](test_framework/comptool.py)
+Framework for comparison-tool style, p2p tests.
+
+### [test_framework/script.py](test_framework/script.py)
+Utilities for manipulating transaction scripts (originally from python-bitcoinlib)
+
+### [test_framework/blockstore.py](test_framework/blockstore.py)
+Implements disk-backed block and tx storage.
+
+### [test_framework/key.py](test_framework/key.py)
+Wrapper around OpenSSL EC_Key (originally from python-bitcoinlib)
+
+### [test_framework/bignum.py](test_framework/bignum.py)
+Helpers for script.py
+
+### [test_framework/blocktools.py](test_framework/blocktools.py)
+Helper functions for creating blocks and transactions.
+
+P2P test design notes
+---------------------
+
+## Mininode
+
+* ```mininode.py``` contains all the definitions for objects that pass
+over the network (```CBlock```, ```CTransaction```, etc, along with the network-level
+wrappers for them, ```msg_block```, ```msg_tx```, etc).
+
+* P2P tests have two threads. One thread handles all network communication
+with the bitcoind(s) being tested (using python's asyncore package); the other
+implements the test logic.
+
+* ```NodeConn``` is the class used to connect to a bitcoind. If you implement
+a callback class that derives from ```NodeConnCB``` and pass that to the
+```NodeConn``` object, your code will receive the appropriate callbacks when
+events of interest arrive.
+
+* You can pass the same handler to multiple ```NodeConn```'s if you like, or pass
+different ones to each -- whatever makes the most sense for your test.
+
+* Call ```NetworkThread.start()``` after all ```NodeConn``` objects are created to
+start the networking thread. (Continue with the test logic in your existing
+thread.)
+
+* RPC calls are available in p2p tests.
+
+* Can be used to write free-form tests, where specific p2p-protocol behavior
+is tested. Examples: ```p2p-accept-block.py```, ```maxblocksinflight.py```.
+
+## Comptool
+
+* Testing framework for writing tests that compare the block/tx acceptance
+behavior of a bitcoind against 1 or more other bitcoind instances, or against
+known outcomes, or both.
+
+* Set the ```num_nodes``` variable (defined in ```ComparisonTestFramework```) to start up
+1 or more nodes. If using 1 node, then ```--testbinary``` can be used as a command line
+option to change the bitcoind binary used by the test. If using 2 or more nodes,
+then ```--refbinary``` can be optionally used to change the bitcoind that will be used
+on nodes 2 and up.
+
+* Implement a (generator) function called ```get_tests()``` which yields ```TestInstance```s.
+Each ```TestInstance``` consists of:
+ - a list of ```[object, outcome, hash]``` entries
+ * ```object``` is a ```CBlock```, ```CTransaction```, or
+ ```CBlockHeader```. ```CBlock```'s and ```CTransaction```'s are tested for
+ acceptance. ```CBlockHeader```s can be used so that the test runner can deliver
+ complete headers-chains when requested from the bitcoind, to allow writing
+ tests where blocks can be delivered out of order but still processed by
+ headers-first bitcoind's.
+ * ```outcome``` is ```True```, ```False```, or ```None```. If ```True```
+ or ```False```, the tip is compared with the expected tip -- either the
+ block passed in, or the hash specified as the optional 3rd entry. If
+ ```None``` is specified, then the test will compare all the bitcoind's
+ being tested to see if they all agree on what the best tip is.
+ * ```hash``` is the block hash of the tip to compare against. Optional to
+ specify; if left out then the hash of the block passed in will be used as
+ the expected tip. This allows for specifying an expected tip while testing
+ the handling of either invalid blocks or blocks delivered out of order,
+ which complete a longer chain.
+ - ```sync_every_block```: ```True/False```. If ```False```, then all blocks
+ are inv'ed together, and the test runner waits until the node receives the
+ last one, and tests only the last block for tip acceptance using the
+ outcome and specified tip. If ```True```, then each block is tested in
+ sequence and synced (this is slower when processing many blocks).
+ - ```sync_every_transaction```: ```True/False```. Analogous to
+ ```sync_every_block```, except if the outcome on the last tx is "None",
+ then the contents of the entire mempool are compared across all bitcoind
+ connections. If ```True``` or ```False```, then only the last tx's
+ acceptance is tested against the given outcome.
+
+* For examples of tests written in this framework, see
+ ```invalidblockrequest.py``` and ```p2p-fullblocktest.py```.
+
diff --git a/test/functional/abandonconflict.py b/test/functional/abandonconflict.py
new file mode 100755
index 0000000000..887dbebd4f
--- /dev/null
+++ b/test/functional/abandonconflict.py
@@ -0,0 +1,166 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the abandontransaction RPC.
+
+ The abandontransaction RPC marks a transaction and all its in-wallet
+ descendants as abandoned which allows their inputs to be respent. It can be
+ used to replace "stuck" or evicted transactions. It only works on transactions
+ which are not included in a block and are not currently in the mempool. It has
+ no effect on transactions which are already conflicted or abandoned.
+"""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+import urllib.parse
+
+class AbandonConflictTest(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 2
+ self.setup_clean_chain = False
+
+ def setup_network(self):
+ self.nodes = []
+ self.nodes.append(start_node(0, self.options.tmpdir, ["-minrelaytxfee=0.00001"]))
+ self.nodes.append(start_node(1, self.options.tmpdir))
+ connect_nodes(self.nodes[0], 1)
+
+ def run_test(self):
+ self.nodes[1].generate(100)
+ sync_blocks(self.nodes)
+ balance = self.nodes[0].getbalance()
+ txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
+ txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
+ txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
+ sync_mempools(self.nodes)
+ self.nodes[1].generate(1)
+
+ sync_blocks(self.nodes)
+ newbalance = self.nodes[0].getbalance()
+ assert(balance - newbalance < Decimal("0.001")) #no more than fees lost
+ balance = newbalance
+
+ url = urllib.parse.urlparse(self.nodes[1].url)
+ self.nodes[0].disconnectnode(url.hostname+":"+str(p2p_port(1)))
+
+ # Identify the 10btc outputs
+ nA = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txA, 1)["vout"]) if vout["value"] == Decimal("10"))
+ nB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txB, 1)["vout"]) if vout["value"] == Decimal("10"))
+ nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txC, 1)["vout"]) if vout["value"] == Decimal("10"))
+
+ inputs =[]
+ # spend 10btc outputs from txA and txB
+ inputs.append({"txid":txA, "vout":nA})
+ inputs.append({"txid":txB, "vout":nB})
+ outputs = {}
+
+ outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998")
+ outputs[self.nodes[1].getnewaddress()] = Decimal("5")
+ signed = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
+ txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])
+
+ # Identify the 14.99998btc output
+ nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txAB1, 1)["vout"]) if vout["value"] == Decimal("14.99998"))
+
+ #Create a child tx spending AB1 and C
+ inputs = []
+ inputs.append({"txid":txAB1, "vout":nAB})
+ inputs.append({"txid":txC, "vout":nC})
+ outputs = {}
+ outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996")
+ signed2 = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
+ txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])
+
+ # In mempool txs from self should increase balance from change
+ newbalance = self.nodes[0].getbalance()
+ assert_equal(newbalance, balance - Decimal("30") + Decimal("24.9996"))
+ balance = newbalance
+
+ # Restart the node with a higher min relay fee so the parent tx is no longer in mempool
+ # TODO: redo with eviction
+ stop_node(self.nodes[0],0)
+ self.nodes[0]=start_node(0, self.options.tmpdir, ["-minrelaytxfee=0.0001"])
+
+ # Verify txs no longer in mempool
+ assert_equal(len(self.nodes[0].getrawmempool()), 0)
+
+ # Not in mempool txs from self should only reduce balance
+ # inputs are still spent, but change not received
+ newbalance = self.nodes[0].getbalance()
+ assert_equal(newbalance, balance - Decimal("24.9996"))
+ # Unconfirmed received funds that are not in mempool, also shouldn't show
+ # up in unconfirmed balance
+ unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
+ assert_equal(unconfbalance, newbalance)
+ # Also shouldn't show up in listunspent
+ assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)])
+ balance = newbalance
+
+ # Abandon original transaction and verify inputs are available again
+ # including that the child tx was also abandoned
+ self.nodes[0].abandontransaction(txAB1)
+ newbalance = self.nodes[0].getbalance()
+ assert_equal(newbalance, balance + Decimal("30"))
+ balance = newbalance
+
+ # Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
+ stop_node(self.nodes[0],0)
+ self.nodes[0]=start_node(0, self.options.tmpdir, ["-minrelaytxfee=0.00001"])
+ assert_equal(len(self.nodes[0].getrawmempool()), 0)
+ assert_equal(self.nodes[0].getbalance(), balance)
+
+ # But if its received again then it is unabandoned
+ # And since now in mempool, the change is available
+ # But its child tx remains abandoned
+ self.nodes[0].sendrawtransaction(signed["hex"])
+ newbalance = self.nodes[0].getbalance()
+ assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
+ balance = newbalance
+
+ # Send child tx again so its unabandoned
+ self.nodes[0].sendrawtransaction(signed2["hex"])
+ newbalance = self.nodes[0].getbalance()
+ assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
+ balance = newbalance
+
+ # Remove using high relay fee again
+ stop_node(self.nodes[0],0)
+ self.nodes[0]=start_node(0, self.options.tmpdir, ["-minrelaytxfee=0.0001"])
+ assert_equal(len(self.nodes[0].getrawmempool()), 0)
+ newbalance = self.nodes[0].getbalance()
+ assert_equal(newbalance, balance - Decimal("24.9996"))
+ balance = newbalance
+
+ # Create a double spend of AB1 by spending again from only A's 10 output
+ # Mine double spend from node 1
+ inputs =[]
+ inputs.append({"txid":txA, "vout":nA})
+ outputs = {}
+ outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999")
+ tx = self.nodes[0].createrawtransaction(inputs, outputs)
+ signed = self.nodes[0].signrawtransaction(tx)
+ self.nodes[1].sendrawtransaction(signed["hex"])
+ self.nodes[1].generate(1)
+
+ connect_nodes(self.nodes[0], 1)
+ sync_blocks(self.nodes)
+
+ # Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted
+ newbalance = self.nodes[0].getbalance()
+ assert_equal(newbalance, balance + Decimal("20"))
+ balance = newbalance
+
+ # There is currently a minor bug around this and so this test doesn't work. See Issue #7315
+ # Invalidate the block with the double spend and B's 10 BTC output should no longer be available
+ # Don't think C's should either
+ self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
+ newbalance = self.nodes[0].getbalance()
+ #assert_equal(newbalance, balance - Decimal("10"))
+ self.log.info("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
+ self.log.info("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
+ self.log.info(str(balance) + " -> " + str(newbalance) + " ?")
+
+if __name__ == '__main__':
+ AbandonConflictTest().main()
diff --git a/test/functional/assumevalid.py b/test/functional/assumevalid.py
new file mode 100755
index 0000000000..c60c8e6d1a
--- /dev/null
+++ b/test/functional/assumevalid.py
@@ -0,0 +1,190 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test logic for skipping signature validation on old blocks.
+
+Test logic for skipping signature validation on blocks which we've assumed
+valid (https://github.com/bitcoin/bitcoin/pull/9484)
+
+We build a chain that includes and invalid signature for one of the
+transactions:
+
+ 0: genesis block
+ 1: block 1 with coinbase transaction output.
+ 2-101: bury that block with 100 blocks so the coinbase transaction
+ output can be spent
+ 102: a block containing a transaction spending the coinbase
+ transaction output. The transaction has an invalid signature.
+ 103-2202: bury the bad block with just over two weeks' worth of blocks
+ (2100 blocks)
+
+Start three nodes:
+
+ - node0 has no -assumevalid parameter. Try to sync to block 2202. It will
+ reject block 102 and only sync as far as block 101
+ - node1 has -assumevalid set to the hash of block 102. Try to sync to
+ block 2202. node1 will sync all the way to block 2202.
+ - node2 has -assumevalid set to the hash of block 102. Try to sync to
+ block 200. node2 will reject block 102 since it's assumed valid, but it
+ isn't buried by at least two weeks' work.
+"""
+
+from test_framework.mininode import *
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+from test_framework.blocktools import create_block, create_coinbase
+from test_framework.key import CECKey
+from test_framework.script import *
+
+class BaseNode(SingleNodeConnCB):
+ def __init__(self):
+ SingleNodeConnCB.__init__(self)
+ self.last_inv = None
+ self.last_headers = None
+ self.last_block = None
+ self.last_getdata = None
+ self.block_announced = False
+ self.last_getheaders = None
+ self.disconnected = False
+ self.last_blockhash_announced = None
+
+ def on_close(self, conn):
+ self.disconnected = True
+
+ def wait_for_disconnect(self, timeout=60):
+ test_function = lambda: self.disconnected
+ assert(wait_until(test_function, timeout=timeout))
+ return
+
+ def send_header_for_blocks(self, new_blocks):
+ headers_message = msg_headers()
+ headers_message.headers = [ CBlockHeader(b) for b in new_blocks ]
+ self.send_message(headers_message)
+
+class SendHeadersTest(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 3
+
+ def setup_network(self):
+ # Start node0. We don't start the other nodes yet since
+ # we need to pre-mine a block with an invalid transaction
+ # signature so we can pass in the block hash as assumevalid.
+ self.nodes = []
+ self.nodes.append(start_node(0, self.options.tmpdir))
+
+ def run_test(self):
+
+ # Connect to node0
+ node0 = BaseNode()
+ connections = []
+ connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
+ node0.add_connection(connections[0])
+
+ NetworkThread().start() # Start up network handling in another thread
+ node0.wait_for_verack()
+
+ # Build the blockchain
+ self.tip = int(self.nodes[0].getbestblockhash(), 16)
+ self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1
+
+ self.blocks = []
+
+ # Get a pubkey for the coinbase TXO
+ coinbase_key = CECKey()
+ coinbase_key.set_secretbytes(b"horsebattery")
+ coinbase_pubkey = coinbase_key.get_pubkey()
+
+ # Create the first block with a coinbase output to our key
+ height = 1
+ block = create_block(self.tip, create_coinbase(height, coinbase_pubkey), self.block_time)
+ self.blocks.append(block)
+ self.block_time += 1
+ block.solve()
+ # Save the coinbase for later
+ self.block1 = block
+ self.tip = block.sha256
+ height += 1
+
+ # Bury the block 100 deep so the coinbase output is spendable
+ for i in range(100):
+ block = create_block(self.tip, create_coinbase(height), self.block_time)
+ block.solve()
+ self.blocks.append(block)
+ self.tip = block.sha256
+ self.block_time += 1
+ height += 1
+
+ # Create a transaction spending the coinbase output with an invalid (null) signature
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b""))
+ tx.vout.append(CTxOut(49*100000000, CScript([OP_TRUE])))
+ tx.calc_sha256()
+
+ block102 = create_block(self.tip, create_coinbase(height), self.block_time)
+ self.block_time += 1
+ block102.vtx.extend([tx])
+ block102.hashMerkleRoot = block102.calc_merkle_root()
+ block102.rehash()
+ block102.solve()
+ self.blocks.append(block102)
+ self.tip = block102.sha256
+ self.block_time += 1
+ height += 1
+
+ # Bury the assumed valid block 2100 deep
+ for i in range(2100):
+ block = create_block(self.tip, create_coinbase(height), self.block_time)
+ block.nVersion = 4
+ block.solve()
+ self.blocks.append(block)
+ self.tip = block.sha256
+ self.block_time += 1
+ height += 1
+
+ # Start node1 and node2 with assumevalid so they accept a block with a bad signature.
+ self.nodes.append(start_node(1, self.options.tmpdir,
+ ["-assumevalid=" + hex(block102.sha256)]))
+ node1 = BaseNode() # connects to node1
+ connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], node1))
+ node1.add_connection(connections[1])
+ node1.wait_for_verack()
+
+ self.nodes.append(start_node(2, self.options.tmpdir,
+ ["-assumevalid=" + hex(block102.sha256)]))
+ node2 = BaseNode() # connects to node2
+ connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
+ node2.add_connection(connections[2])
+ node2.wait_for_verack()
+
+ # send header lists to all three nodes
+ node0.send_header_for_blocks(self.blocks[0:2000])
+ node0.send_header_for_blocks(self.blocks[2000:])
+ node1.send_header_for_blocks(self.blocks[0:2000])
+ node1.send_header_for_blocks(self.blocks[2000:])
+ node2.send_header_for_blocks(self.blocks[0:200])
+
+ # Send 102 blocks to node0. Block 102 will be rejected.
+ for i in range(101):
+ node0.send_message(msg_block(self.blocks[i]))
+ node0.sync_with_ping() # make sure the most recent block is synced
+ node0.send_message(msg_block(self.blocks[101]))
+ assert_equal(self.nodes[0].getblock(self.nodes[0].getbestblockhash())['height'], 101)
+
+ # Send 3102 blocks to node1. All blocks will be accepted.
+ for i in range(2202):
+ node1.send_message(msg_block(self.blocks[i]))
+ node1.sync_with_ping() # make sure the most recent block is synced
+ assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202)
+
+ # Send 102 blocks to node2. Block 102 will be rejected.
+ for i in range(101):
+ node2.send_message(msg_block(self.blocks[i]))
+ node2.sync_with_ping() # make sure the most recent block is synced
+ node2.send_message(msg_block(self.blocks[101]))
+ assert_equal(self.nodes[2].getblock(self.nodes[2].getbestblockhash())['height'], 101)
+
+if __name__ == '__main__':
+ SendHeadersTest().main()
diff --git a/test/functional/bip65-cltv-p2p.py b/test/functional/bip65-cltv-p2p.py
new file mode 100755
index 0000000000..63d05e8fc9
--- /dev/null
+++ b/test/functional/bip65-cltv-p2p.py
@@ -0,0 +1,181 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test BIP65 (CHECKLOCKTIMEVERIFY).
+
+Connect to a single node.
+Mine 2 (version 3) blocks (save the coinbases for later).
+Generate 98 more version 3 blocks, verify the node accepts.
+Mine 749 version 4 blocks, verify the node accepts.
+Check that the new CLTV rules are not enforced on the 750th version 4 block.
+Check that the new CLTV rules are enforced on the 751st version 4 block.
+Mine 199 new version blocks.
+Mine 1 old-version block.
+Mine 1 new version block.
+Mine 1 old version block, see that the node rejects.
+"""
+
+from test_framework.test_framework import ComparisonTestFramework
+from test_framework.util import *
+from test_framework.mininode import CTransaction, NetworkThread
+from test_framework.blocktools import create_coinbase, create_block
+from test_framework.comptool import TestInstance, TestManager
+from test_framework.script import CScript, OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP
+from io import BytesIO
+import time
+
+def cltv_invalidate(tx):
+ '''Modify the signature in vin 0 of the tx to fail CLTV
+
+ Prepends -1 CLTV DROP in the scriptSig itself.
+ '''
+ tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP] +
+ list(CScript(tx.vin[0].scriptSig)))
+
+
+class BIP65Test(ComparisonTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 1
+
+ def setup_network(self):
+ # Must set the blockversion for this test
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
+ extra_args=[['-whitelist=127.0.0.1', '-blockversion=3']],
+ binary=[self.options.testbinary])
+
+ def run_test(self):
+ test = TestManager(self, self.options.tmpdir)
+ test.add_all_connections(self.nodes)
+ NetworkThread().start() # Start up network handling in another thread
+ test.run()
+
+ def create_transaction(self, node, coinbase, to_address, amount):
+ from_txid = node.getblock(coinbase)['tx'][0]
+ inputs = [{ "txid" : from_txid, "vout" : 0}]
+ outputs = { to_address : amount }
+ rawtx = node.createrawtransaction(inputs, outputs)
+ signresult = node.signrawtransaction(rawtx)
+ tx = CTransaction()
+ f = BytesIO(hex_str_to_bytes(signresult['hex']))
+ tx.deserialize(f)
+ return tx
+
+ def get_tests(self):
+
+ self.coinbase_blocks = self.nodes[0].generate(2)
+ height = 3 # height of the next block to build
+ self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
+ self.nodeaddress = self.nodes[0].getnewaddress()
+ self.last_block_time = int(time.time())
+
+ ''' 398 more version 3 blocks '''
+ test_blocks = []
+ for i in range(398):
+ block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
+ block.nVersion = 3
+ block.rehash()
+ block.solve()
+ test_blocks.append([block, True])
+ self.last_block_time += 1
+ self.tip = block.sha256
+ height += 1
+ yield TestInstance(test_blocks, sync_every_block=False)
+
+ ''' Mine 749 version 4 blocks '''
+ test_blocks = []
+ for i in range(749):
+ block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
+ block.nVersion = 4
+ block.rehash()
+ block.solve()
+ test_blocks.append([block, True])
+ self.last_block_time += 1
+ self.tip = block.sha256
+ height += 1
+ yield TestInstance(test_blocks, sync_every_block=False)
+
+ '''
+ Check that the new CLTV rules are not enforced in the 750th
+ version 3 block.
+ '''
+ spendtx = self.create_transaction(self.nodes[0],
+ self.coinbase_blocks[0], self.nodeaddress, 1.0)
+ cltv_invalidate(spendtx)
+ spendtx.rehash()
+
+ block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
+ block.nVersion = 4
+ block.vtx.append(spendtx)
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.rehash()
+ block.solve()
+
+ self.last_block_time += 1
+ self.tip = block.sha256
+ height += 1
+ yield TestInstance([[block, True]])
+
+ ''' Mine 199 new version blocks on last valid tip '''
+ test_blocks = []
+ for i in range(199):
+ block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
+ block.nVersion = 4
+ block.rehash()
+ block.solve()
+ test_blocks.append([block, True])
+ self.last_block_time += 1
+ self.tip = block.sha256
+ height += 1
+ yield TestInstance(test_blocks, sync_every_block=False)
+
+ ''' Mine 1 old version block '''
+ block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
+ block.nVersion = 3
+ block.rehash()
+ block.solve()
+ self.last_block_time += 1
+ self.tip = block.sha256
+ height += 1
+ yield TestInstance([[block, True]])
+
+ ''' Mine 1 new version block '''
+ block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
+ block.nVersion = 4
+ block.rehash()
+ block.solve()
+ self.last_block_time += 1
+ self.tip = block.sha256
+ height += 1
+ yield TestInstance([[block, True]])
+
+ '''
+ Check that the new CLTV rules are enforced in the 951st version 4
+ block.
+ '''
+ spendtx = self.create_transaction(self.nodes[0],
+ self.coinbase_blocks[1], self.nodeaddress, 1.0)
+ cltv_invalidate(spendtx)
+ spendtx.rehash()
+
+ block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
+ block.nVersion = 4
+ block.vtx.append(spendtx)
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.rehash()
+ block.solve()
+ self.last_block_time += 1
+ yield TestInstance([[block, False]])
+
+ ''' Mine 1 old version block, should be invalid '''
+ block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
+ block.nVersion = 3
+ block.rehash()
+ block.solve()
+ self.last_block_time += 1
+ yield TestInstance([[block, False]])
+
+if __name__ == '__main__':
+ BIP65Test().main()
diff --git a/test/functional/bip65-cltv.py b/test/functional/bip65-cltv.py
new file mode 100755
index 0000000000..7f13bb9952
--- /dev/null
+++ b/test/functional/bip65-cltv.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the CHECKLOCKTIMEVERIFY (BIP65) soft-fork logic."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+class BIP65Test(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 3
+ self.setup_clean_chain = False
+
+ def setup_network(self):
+ self.nodes = []
+ self.nodes.append(start_node(0, self.options.tmpdir, []))
+ self.nodes.append(start_node(1, self.options.tmpdir, ["-blockversion=3"]))
+ self.nodes.append(start_node(2, self.options.tmpdir, ["-blockversion=4"]))
+ connect_nodes(self.nodes[1], 0)
+ connect_nodes(self.nodes[2], 0)
+ self.is_network_split = False
+ self.sync_all()
+
+ def run_test(self):
+ cnt = self.nodes[0].getblockcount()
+
+ # Mine some old-version blocks
+ self.nodes[1].generate(200)
+ cnt += 100
+ self.sync_all()
+ if (self.nodes[0].getblockcount() != cnt + 100):
+ raise AssertionError("Failed to mine 100 version=3 blocks")
+
+ # Mine 750 new-version blocks
+ for i in range(15):
+ self.nodes[2].generate(50)
+ self.sync_all()
+ if (self.nodes[0].getblockcount() != cnt + 850):
+ raise AssertionError("Failed to mine 750 version=4 blocks")
+
+ # TODO: check that new CHECKLOCKTIMEVERIFY rules are not enforced
+
+ # Mine 1 new-version block
+ self.nodes[2].generate(1)
+ self.sync_all()
+ if (self.nodes[0].getblockcount() != cnt + 851):
+ raise AssertionError("Failed to mine a version=4 blocks")
+
+ # TODO: check that new CHECKLOCKTIMEVERIFY rules are enforced
+
+ # Mine 198 new-version blocks
+ for i in range(2):
+ self.nodes[2].generate(99)
+ self.sync_all()
+ if (self.nodes[0].getblockcount() != cnt + 1049):
+ raise AssertionError("Failed to mine 198 version=4 blocks")
+
+ # Mine 1 old-version block
+ self.nodes[1].generate(1)
+ self.sync_all()
+ if (self.nodes[0].getblockcount() != cnt + 1050):
+ raise AssertionError("Failed to mine a version=3 block after 949 version=4 blocks")
+
+ # Mine 1 new-version blocks
+ self.nodes[2].generate(1)
+ self.sync_all()
+ if (self.nodes[0].getblockcount() != cnt + 1051):
+ raise AssertionError("Failed to mine a version=4 block")
+
+ # Mine 1 old-version blocks. This should fail
+ assert_raises_jsonrpc(-1,"CreateNewBlock: TestBlockValidity failed: bad-version(0x00000003)", self.nodes[1].generate, 1)
+ self.sync_all()
+ if (self.nodes[0].getblockcount() != cnt + 1051):
+ raise AssertionError("Accepted a version=3 block after 950 version=4 blocks")
+
+ # Mine 1 new-version blocks
+ self.nodes[2].generate(1)
+ self.sync_all()
+ if (self.nodes[0].getblockcount() != cnt + 1052):
+ raise AssertionError("Failed to mine a version=4 block")
+
+if __name__ == '__main__':
+ BIP65Test().main()
diff --git a/test/functional/bip68-112-113-p2p.py b/test/functional/bip68-112-113-p2p.py
new file mode 100755
index 0000000000..0867f42585
--- /dev/null
+++ b/test/functional/bip68-112-113-p2p.py
@@ -0,0 +1,539 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test activation of the first version bits soft fork.
+
+This soft fork will activate the following BIPS:
+BIP 68 - nSequence relative lock times
+BIP 112 - CHECKSEQUENCEVERIFY
+BIP 113 - MedianTimePast semantics for nLockTime
+
+regtest lock-in with 108/144 block signalling
+activation after a further 144 blocks
+
+mine 82 blocks whose coinbases will be used to generate inputs for our tests
+mine 61 blocks to transition from DEFINED to STARTED
+mine 144 blocks only 100 of which are signaling readiness in order to fail to change state this period
+mine 144 blocks with 108 signaling and verify STARTED->LOCKED_IN
+mine 140 blocks and seed block chain with the 82 inputs will use for our tests at height 572
+mine 3 blocks and verify still at LOCKED_IN and test that enforcement has not triggered
+mine 1 block and test that enforcement has triggered (which triggers ACTIVE)
+Test BIP 113 is enforced
+Mine 4 blocks so next height is 580 and test BIP 68 is enforced for time and height
+Mine 1 block so next height is 581 and test BIP 68 now passes time but not height
+Mine 1 block so next height is 582 and test BIP 68 now passes time and height
+Test that BIP 112 is enforced
+
+Various transactions will be used to test that the BIPs rules are not enforced before the soft fork activates
+And that after the soft fork activates transactions pass and fail as they should according to the rules.
+For each BIP, transactions of versions 1 and 2 will be tested.
+----------------
+BIP 113:
+bip113tx - modify the nLocktime variable
+
+BIP 68:
+bip68txs - 16 txs with nSequence relative locktime of 10 with various bits set as per the relative_locktimes below
+
+BIP 112:
+bip112txs_vary_nSequence - 16 txs with nSequence relative_locktimes of 10 evaluated against 10 OP_CSV OP_DROP
+bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evaluated against 10 OP_CSV OP_DROP
+bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
+bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
+bip112tx_special - test negative argument to OP_CSV
+"""
+
+from test_framework.test_framework import ComparisonTestFramework
+from test_framework.util import *
+from test_framework.mininode import ToHex, CTransaction, NetworkThread
+from test_framework.blocktools import create_coinbase, create_block
+from test_framework.comptool import TestInstance, TestManager
+from test_framework.script import *
+from io import BytesIO
+import time
+
+base_relative_locktime = 10
+seq_disable_flag = 1<<31
+seq_random_high_bit = 1<<25
+seq_type_flag = 1<<22
+seq_random_low_bit = 1<<18
+
+# b31,b25,b22,b18 represent the 31st, 25th, 22nd and 18th bits respectively in the nSequence field
+# relative_locktimes[b31][b25][b22][b18] is a base_relative_locktime with the indicated bits set if their indices are 1
+relative_locktimes = []
+for b31 in range(2):
+ b25times = []
+ for b25 in range(2):
+ b22times = []
+ for b22 in range(2):
+ b18times = []
+ for b18 in range(2):
+ rlt = base_relative_locktime
+ if (b31):
+ rlt = rlt | seq_disable_flag
+ if (b25):
+ rlt = rlt | seq_random_high_bit
+ if (b22):
+ rlt = rlt | seq_type_flag
+ if (b18):
+ rlt = rlt | seq_random_low_bit
+ b18times.append(rlt)
+ b22times.append(b18times)
+ b25times.append(b22times)
+ relative_locktimes.append(b25times)
+
+def all_rlt_txs(txarray):
+ txs = []
+ for b31 in range(2):
+ for b25 in range(2):
+ for b22 in range(2):
+ for b18 in range(2):
+ txs.append(txarray[b31][b25][b22][b18])
+ return txs
+
+class BIP68_112_113Test(ComparisonTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 1
+
+ def setup_network(self):
+ # Must set the blockversion for this test
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
+ extra_args=[['-whitelist=127.0.0.1', '-blockversion=4']],
+ binary=[self.options.testbinary])
+
+ def run_test(self):
+ test = TestManager(self, self.options.tmpdir)
+ test.add_all_connections(self.nodes)
+ NetworkThread().start() # Start up network handling in another thread
+ test.run()
+
+ def send_generic_input_tx(self, node, coinbases):
+ amount = Decimal("49.99")
+ return node.sendrawtransaction(ToHex(self.sign_transaction(node, self.create_transaction(node, node.getblock(coinbases.pop())['tx'][0], self.nodeaddress, amount))))
+
+ def create_transaction(self, node, txid, to_address, amount):
+ inputs = [{ "txid" : txid, "vout" : 0}]
+ outputs = { to_address : amount }
+ rawtx = node.createrawtransaction(inputs, outputs)
+ tx = CTransaction()
+ f = BytesIO(hex_str_to_bytes(rawtx))
+ tx.deserialize(f)
+ return tx
+
+ def sign_transaction(self, node, unsignedtx):
+ rawtx = ToHex(unsignedtx)
+ signresult = node.signrawtransaction(rawtx)
+ tx = CTransaction()
+ f = BytesIO(hex_str_to_bytes(signresult['hex']))
+ tx.deserialize(f)
+ return tx
+
+ def generate_blocks(self, number, version, test_blocks = []):
+ for i in range(number):
+ block = self.create_test_block([], version)
+ test_blocks.append([block, True])
+ self.last_block_time += 600
+ self.tip = block.sha256
+ self.tipheight += 1
+ return test_blocks
+
+ def create_test_block(self, txs, version = 536870912):
+ block = create_block(self.tip, create_coinbase(self.tipheight + 1), self.last_block_time + 600)
+ block.nVersion = version
+ block.vtx.extend(txs)
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.rehash()
+ block.solve()
+ return block
+
+ def create_bip68txs(self, bip68inputs, txversion, locktime_delta = 0):
+ txs = []
+ assert(len(bip68inputs) >= 16)
+ i = 0
+ for b31 in range(2):
+ b25txs = []
+ for b25 in range(2):
+ b22txs = []
+ for b22 in range(2):
+ b18txs = []
+ for b18 in range(2):
+ tx = self.create_transaction(self.nodes[0], bip68inputs[i], self.nodeaddress, Decimal("49.98"))
+ i += 1
+ tx.nVersion = txversion
+ tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta
+ b18txs.append(self.sign_transaction(self.nodes[0], tx))
+ b22txs.append(b18txs)
+ b25txs.append(b22txs)
+ txs.append(b25txs)
+ return txs
+
+ def create_bip112special(self, input, txversion):
+ tx = self.create_transaction(self.nodes[0], input, self.nodeaddress, Decimal("49.98"))
+ tx.nVersion = txversion
+ signtx = self.sign_transaction(self.nodes[0], tx)
+ signtx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
+ return signtx
+
+ def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta = 0):
+ txs = []
+ assert(len(bip112inputs) >= 16)
+ i = 0
+ for b31 in range(2):
+ b25txs = []
+ for b25 in range(2):
+ b22txs = []
+ for b22 in range(2):
+ b18txs = []
+ for b18 in range(2):
+ tx = self.create_transaction(self.nodes[0], bip112inputs[i], self.nodeaddress, Decimal("49.98"))
+ i += 1
+ if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed
+ tx.vin[0].nSequence = base_relative_locktime + locktime_delta
+ else: # vary nSequence instead, OP_CSV is fixed
+ tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta
+ tx.nVersion = txversion
+ signtx = self.sign_transaction(self.nodes[0], tx)
+ if (varyOP_CSV):
+ signtx.vin[0].scriptSig = CScript([relative_locktimes[b31][b25][b22][b18], OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
+ else:
+ signtx.vin[0].scriptSig = CScript([base_relative_locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
+ b18txs.append(signtx)
+ b22txs.append(b18txs)
+ b25txs.append(b22txs)
+ txs.append(b25txs)
+ return txs
+
+ def get_tests(self):
+ long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future
+ self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time
+ self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2*32 + 1) # 82 blocks generated for inputs
+ self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time
+ self.tipheight = 82 # height of the next block to build
+ self.last_block_time = long_past_time
+ self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
+ self.nodeaddress = self.nodes[0].getnewaddress()
+
+ assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'defined')
+ test_blocks = self.generate_blocks(61, 4)
+ yield TestInstance(test_blocks, sync_every_block=False) # 1
+ # Advanced from DEFINED to STARTED, height = 143
+ assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
+
+ # Fail to achieve LOCKED_IN 100 out of 144 signal bit 0
+ # using a variety of bits to simulate multiple parallel softforks
+ test_blocks = self.generate_blocks(50, 536870913) # 0x20000001 (signalling ready)
+ test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
+ test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
+ test_blocks = self.generate_blocks(24, 536936448, test_blocks) # 0x20010000 (signalling not)
+ yield TestInstance(test_blocks, sync_every_block=False) # 2
+ # Failed to advance past STARTED, height = 287
+ assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
+
+ # 108 out of 144 signal bit 0 to achieve lock-in
+ # using a variety of bits to simulate multiple parallel softforks
+ test_blocks = self.generate_blocks(58, 536870913) # 0x20000001 (signalling ready)
+ test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
+ test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
+ test_blocks = self.generate_blocks(10, 536936448, test_blocks) # 0x20010000 (signalling not)
+ yield TestInstance(test_blocks, sync_every_block=False) # 3
+ # Advanced from STARTED to LOCKED_IN, height = 431
+ assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
+
+ # 140 more version 4 blocks
+ test_blocks = self.generate_blocks(140, 4)
+ yield TestInstance(test_blocks, sync_every_block=False) # 4
+
+ ### Inputs at height = 572
+ # Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block)
+ # Note we reuse inputs for v1 and v2 txs so must test these separately
+ # 16 normal inputs
+ bip68inputs = []
+ for i in range(16):
+ bip68inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
+ # 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
+ bip112basicinputs = []
+ for j in range(2):
+ inputs = []
+ for i in range(16):
+ inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
+ bip112basicinputs.append(inputs)
+ # 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
+ bip112diverseinputs = []
+ for j in range(2):
+ inputs = []
+ for i in range(16):
+ inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
+ bip112diverseinputs.append(inputs)
+ # 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
+ bip112specialinput = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)
+ # 1 normal input
+ bip113input = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)
+
+ self.nodes[0].setmocktime(self.last_block_time + 600)
+ inputblockhash = self.nodes[0].generate(1)[0] # 1 block generated for inputs to be in chain at height 572
+ self.nodes[0].setmocktime(0)
+ self.tip = int("0x" + inputblockhash, 0)
+ self.tipheight += 1
+ self.last_block_time += 600
+ assert_equal(len(self.nodes[0].getblock(inputblockhash,True)["tx"]), 82+1)
+
+ # 2 more version 4 blocks
+ test_blocks = self.generate_blocks(2, 4)
+ yield TestInstance(test_blocks, sync_every_block=False) # 5
+ # Not yet advanced to ACTIVE, height = 574 (will activate for block 576, not 575)
+ assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
+
+ # Test both version 1 and version 2 transactions for all tests
+ # BIP113 test transaction will be modified before each use to put in appropriate block time
+ bip113tx_v1 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98"))
+ bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE
+ bip113tx_v1.nVersion = 1
+ bip113tx_v2 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98"))
+ bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE
+ bip113tx_v2.nVersion = 2
+
+ # For BIP68 test all 16 relative sequence locktimes
+ bip68txs_v1 = self.create_bip68txs(bip68inputs, 1)
+ bip68txs_v2 = self.create_bip68txs(bip68inputs, 2)
+
+ # For BIP112 test:
+ # 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs
+ bip112txs_vary_nSequence_v1 = self.create_bip112txs(bip112basicinputs[0], False, 1)
+ bip112txs_vary_nSequence_v2 = self.create_bip112txs(bip112basicinputs[0], False, 2)
+ # 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs
+ bip112txs_vary_nSequence_9_v1 = self.create_bip112txs(bip112basicinputs[1], False, 1, -1)
+ bip112txs_vary_nSequence_9_v2 = self.create_bip112txs(bip112basicinputs[1], False, 2, -1)
+ # sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
+ bip112txs_vary_OP_CSV_v1 = self.create_bip112txs(bip112diverseinputs[0], True, 1)
+ bip112txs_vary_OP_CSV_v2 = self.create_bip112txs(bip112diverseinputs[0], True, 2)
+ # sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
+ bip112txs_vary_OP_CSV_9_v1 = self.create_bip112txs(bip112diverseinputs[1], True, 1, -1)
+ bip112txs_vary_OP_CSV_9_v2 = self.create_bip112txs(bip112diverseinputs[1], True, 2, -1)
+ # -1 OP_CSV OP_DROP input
+ bip112tx_special_v1 = self.create_bip112special(bip112specialinput, 1)
+ bip112tx_special_v2 = self.create_bip112special(bip112specialinput, 2)
+
+
+ ### TESTING ###
+ ##################################
+ ### Before Soft Forks Activate ###
+ ##################################
+ # All txs should pass
+ ### Version 1 txs ###
+ success_txs = []
+ # add BIP113 tx and -1 CSV tx
+ bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
+ bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
+ success_txs.append(bip113signed1)
+ success_txs.append(bip112tx_special_v1)
+ # add BIP 68 txs
+ success_txs.extend(all_rlt_txs(bip68txs_v1))
+ # add BIP 112 with seq=10 txs
+ success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
+ success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1))
+ # try BIP 112 with seq=9 txs
+ success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
+ success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1))
+ yield TestInstance([[self.create_test_block(success_txs), True]]) # 6
+ self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
+
+ ### Version 2 txs ###
+ success_txs = []
+ # add BIP113 tx and -1 CSV tx
+ bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
+ bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
+ success_txs.append(bip113signed2)
+ success_txs.append(bip112tx_special_v2)
+ # add BIP 68 txs
+ success_txs.extend(all_rlt_txs(bip68txs_v2))
+ # add BIP 112 with seq=10 txs
+ success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2))
+ success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2))
+ # try BIP 112 with seq=9 txs
+ success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2))
+ success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2))
+ yield TestInstance([[self.create_test_block(success_txs), True]]) # 7
+ self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
+
+
+ # 1 more version 4 block to get us to height 575 so the fork should now be active for the next block
+ test_blocks = self.generate_blocks(1, 4)
+ yield TestInstance(test_blocks, sync_every_block=False) # 8
+ assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'active')
+
+
+ #################################
+ ### After Soft Forks Activate ###
+ #################################
+ ### BIP 113 ###
+ # BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules
+ bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
+ bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
+ bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
+ bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
+ for bip113tx in [bip113signed1, bip113signed2]:
+ yield TestInstance([[self.create_test_block([bip113tx]), False]]) # 9,10
+ # BIP 113 tests should now pass if the locktime is < MTP
+ bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
+ bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
+ bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
+ bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
+ for bip113tx in [bip113signed1, bip113signed2]:
+ yield TestInstance([[self.create_test_block([bip113tx]), True]]) # 11,12
+ self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
+
+ # Next block height = 580 after 4 blocks of random version
+ test_blocks = self.generate_blocks(4, 1234)
+ yield TestInstance(test_blocks, sync_every_block=False) # 13
+
+ ### BIP 68 ###
+ ### Version 1 txs ###
+ # All still pass
+ success_txs = []
+ success_txs.extend(all_rlt_txs(bip68txs_v1))
+ yield TestInstance([[self.create_test_block(success_txs), True]]) # 14
+ self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
+
+ ### Version 2 txs ###
+ bip68success_txs = []
+ # All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass
+ for b25 in range(2):
+ for b22 in range(2):
+ for b18 in range(2):
+ bip68success_txs.append(bip68txs_v2[1][b25][b22][b18])
+ yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 15
+ self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
+ # All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512
+ bip68timetxs = []
+ for b25 in range(2):
+ for b18 in range(2):
+ bip68timetxs.append(bip68txs_v2[0][b25][1][b18])
+ for tx in bip68timetxs:
+ yield TestInstance([[self.create_test_block([tx]), False]]) # 16 - 19
+ bip68heighttxs = []
+ for b25 in range(2):
+ for b18 in range(2):
+ bip68heighttxs.append(bip68txs_v2[0][b25][0][b18])
+ for tx in bip68heighttxs:
+ yield TestInstance([[self.create_test_block([tx]), False]]) # 20 - 23
+
+ # Advance one block to 581
+ test_blocks = self.generate_blocks(1, 1234)
+ yield TestInstance(test_blocks, sync_every_block=False) # 24
+
+ # Height txs should fail and time txs should now pass 9 * 600 > 10 * 512
+ bip68success_txs.extend(bip68timetxs)
+ yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 25
+ self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
+ for tx in bip68heighttxs:
+ yield TestInstance([[self.create_test_block([tx]), False]]) # 26 - 29
+
+ # Advance one block to 582
+ test_blocks = self.generate_blocks(1, 1234)
+ yield TestInstance(test_blocks, sync_every_block=False) # 30
+
+ # All BIP 68 txs should pass
+ bip68success_txs.extend(bip68heighttxs)
+ yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 31
+ self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
+
+
+ ### BIP 112 ###
+ ### Version 1 txs ###
+ # -1 OP_CSV tx should fail
+ yield TestInstance([[self.create_test_block([bip112tx_special_v1]), False]]) #32
+ # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass
+ success_txs = []
+ for b25 in range(2):
+ for b22 in range(2):
+ for b18 in range(2):
+ success_txs.append(bip112txs_vary_OP_CSV_v1[1][b25][b22][b18])
+ success_txs.append(bip112txs_vary_OP_CSV_9_v1[1][b25][b22][b18])
+ yield TestInstance([[self.create_test_block(success_txs), True]]) # 33
+ self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
+
+ # If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail
+ fail_txs = []
+ fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
+ fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
+ for b25 in range(2):
+ for b22 in range(2):
+ for b18 in range(2):
+ fail_txs.append(bip112txs_vary_OP_CSV_v1[0][b25][b22][b18])
+ fail_txs.append(bip112txs_vary_OP_CSV_9_v1[0][b25][b22][b18])
+
+ for tx in fail_txs:
+ yield TestInstance([[self.create_test_block([tx]), False]]) # 34 - 81
+
+ ### Version 2 txs ###
+ # -1 OP_CSV tx should fail
+ yield TestInstance([[self.create_test_block([bip112tx_special_v2]), False]]) #82
+
+ # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met)
+ success_txs = []
+ for b25 in range(2):
+ for b22 in range(2):
+ for b18 in range(2):
+ success_txs.append(bip112txs_vary_OP_CSV_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV
+ success_txs.append(bip112txs_vary_OP_CSV_9_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV_9
+
+ yield TestInstance([[self.create_test_block(success_txs), True]]) # 83
+ self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
+
+ ## SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ##
+ # All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check
+ fail_txs = []
+ fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) # 16/16 of vary_nSequence_9
+ for b25 in range(2):
+ for b22 in range(2):
+ for b18 in range(2):
+ fail_txs.append(bip112txs_vary_OP_CSV_9_v2[0][b25][b22][b18]) # 16/16 of vary_OP_CSV_9
+
+ for tx in fail_txs:
+ yield TestInstance([[self.create_test_block([tx]), False]]) # 84 - 107
+
+ # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail
+ fail_txs = []
+ for b25 in range(2):
+ for b22 in range(2):
+ for b18 in range(2):
+ fail_txs.append(bip112txs_vary_nSequence_v2[1][b25][b22][b18]) # 8/16 of vary_nSequence
+ for tx in fail_txs:
+ yield TestInstance([[self.create_test_block([tx]), False]]) # 108-115
+
+ # If sequencelock types mismatch, tx should fail
+ fail_txs = []
+ for b25 in range(2):
+ for b18 in range(2):
+ fail_txs.append(bip112txs_vary_nSequence_v2[0][b25][1][b18]) # 12/16 of vary_nSequence
+ fail_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][1][b18]) # 12/16 of vary_OP_CSV
+ for tx in fail_txs:
+ yield TestInstance([[self.create_test_block([tx]), False]]) # 116-123
+
+ # Remaining txs should pass, just test masking works properly
+ success_txs = []
+ for b25 in range(2):
+ for b18 in range(2):
+ success_txs.append(bip112txs_vary_nSequence_v2[0][b25][0][b18]) # 16/16 of vary_nSequence
+ success_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][0][b18]) # 16/16 of vary_OP_CSV
+ yield TestInstance([[self.create_test_block(success_txs), True]]) # 124
+ self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
+
+ # Additional test, of checking that comparison of two time types works properly
+ time_txs = []
+ for b25 in range(2):
+ for b18 in range(2):
+ tx = bip112txs_vary_OP_CSV_v2[0][b25][1][b18]
+ tx.vin[0].nSequence = base_relative_locktime | seq_type_flag
+ signtx = self.sign_transaction(self.nodes[0], tx)
+ time_txs.append(signtx)
+ yield TestInstance([[self.create_test_block(time_txs), True]]) # 125
+ self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
+
+ ### Missing aspects of test
+ ## Testing empty stack fails
+
+
+if __name__ == '__main__':
+ BIP68_112_113Test().main()
diff --git a/test/functional/bip68-sequence.py b/test/functional/bip68-sequence.py
new file mode 100755
index 0000000000..3ed6ebe044
--- /dev/null
+++ b/test/functional/bip68-sequence.py
@@ -0,0 +1,398 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test BIP68 implementation."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+from test_framework.blocktools import *
+
+SEQUENCE_LOCKTIME_DISABLE_FLAG = (1<<31)
+SEQUENCE_LOCKTIME_TYPE_FLAG = (1<<22) # this means use time (0 means height)
+SEQUENCE_LOCKTIME_GRANULARITY = 9 # this is a bit-shift
+SEQUENCE_LOCKTIME_MASK = 0x0000ffff
+
+# RPC error for non-BIP68 final transactions
+NOT_FINAL_ERROR = "64: non-BIP68-final"
+
+class BIP68Test(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 2
+ self.setup_clean_chain = False
+
+ def setup_network(self):
+ self.nodes = []
+ self.nodes.append(start_node(0, self.options.tmpdir))
+ self.nodes.append(start_node(1, self.options.tmpdir, ["-acceptnonstdtxn=0"]))
+ self.is_network_split = False
+ self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
+ connect_nodes(self.nodes[0], 1)
+
+ def run_test(self):
+ # Generate some coins
+ self.nodes[0].generate(110)
+
+ self.log.info("Running test disable flag")
+ self.test_disable_flag()
+
+ self.log.info("Running test sequence-lock-confirmed-inputs")
+ self.test_sequence_lock_confirmed_inputs()
+
+ self.log.info("Running test sequence-lock-unconfirmed-inputs")
+ self.test_sequence_lock_unconfirmed_inputs()
+
+ self.log.info("Running test BIP68 not consensus before versionbits activation")
+ self.test_bip68_not_consensus()
+
+ self.log.info("Activating BIP68 (and 112/113)")
+ self.activateCSV()
+
+ self.log.info("Verifying nVersion=2 transactions are standard.")
+ self.log.info("Note that nVersion=2 transactions are always standard (independent of BIP68 activation status).")
+ self.test_version2_relay()
+
+ self.log.info("Passed")
+
+ # Test that BIP68 is not in effect if tx version is 1, or if
+ # the first sequence bit is set.
+ def test_disable_flag(self):
+ # Create some unconfirmed inputs
+ new_addr = self.nodes[0].getnewaddress()
+ self.nodes[0].sendtoaddress(new_addr, 2) # send 2 BTC
+
+ utxos = self.nodes[0].listunspent(0, 0)
+ assert(len(utxos) > 0)
+
+ utxo = utxos[0]
+
+ tx1 = CTransaction()
+ value = int(satoshi_round(utxo["amount"] - self.relayfee)*COIN)
+
+ # Check that the disable flag disables relative locktime.
+ # If sequence locks were used, this would require 1 block for the
+ # input to mature.
+ sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1
+ tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)]
+ tx1.vout = [CTxOut(value, CScript([b'a']))]
+
+ tx1_signed = self.nodes[0].signrawtransaction(ToHex(tx1))["hex"]
+ tx1_id = self.nodes[0].sendrawtransaction(tx1_signed)
+ tx1_id = int(tx1_id, 16)
+
+ # This transaction will enable sequence-locks, so this transaction should
+ # fail
+ tx2 = CTransaction()
+ tx2.nVersion = 2
+ sequence_value = sequence_value & 0x7fffffff
+ tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)]
+ tx2.vout = [CTxOut(int(value-self.relayfee*COIN), CScript([b'a']))]
+ tx2.rehash()
+
+ assert_raises_jsonrpc(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx2))
+
+ # Setting the version back down to 1 should disable the sequence lock,
+ # so this should be accepted.
+ tx2.nVersion = 1
+
+ self.nodes[0].sendrawtransaction(ToHex(tx2))
+
+ # Calculate the median time past of a prior block ("confirmations" before
+ # the current tip).
+ def get_median_time_past(self, confirmations):
+ block_hash = self.nodes[0].getblockhash(self.nodes[0].getblockcount()-confirmations)
+ return self.nodes[0].getblockheader(block_hash)["mediantime"]
+
+ # Test that sequence locks are respected for transactions spending confirmed inputs.
+ def test_sequence_lock_confirmed_inputs(self):
+ # Create lots of confirmed utxos, and use them to generate lots of random
+ # transactions.
+ max_outputs = 50
+ addresses = []
+ while len(addresses) < max_outputs:
+ addresses.append(self.nodes[0].getnewaddress())
+ while len(self.nodes[0].listunspent()) < 200:
+ import random
+ random.shuffle(addresses)
+ num_outputs = random.randint(1, max_outputs)
+ outputs = {}
+ for i in range(num_outputs):
+ outputs[addresses[i]] = random.randint(1, 20)*0.01
+ self.nodes[0].sendmany("", outputs)
+ self.nodes[0].generate(1)
+
+ utxos = self.nodes[0].listunspent()
+
+ # Try creating a lot of random transactions.
+ # Each time, choose a random number of inputs, and randomly set
+ # some of those inputs to be sequence locked (and randomly choose
+ # between height/time locking). Small random chance of making the locks
+ # all pass.
+ for i in range(400):
+ # Randomly choose up to 10 inputs
+ num_inputs = random.randint(1, 10)
+ random.shuffle(utxos)
+
+ # Track whether any sequence locks used should fail
+ should_pass = True
+
+ # Track whether this transaction was built with sequence locks
+ using_sequence_locks = False
+
+ tx = CTransaction()
+ tx.nVersion = 2
+ value = 0
+ for j in range(num_inputs):
+ sequence_value = 0xfffffffe # this disables sequence locks
+
+ # 50% chance we enable sequence locks
+ if random.randint(0,1):
+ using_sequence_locks = True
+
+ # 10% of the time, make the input sequence value pass
+ input_will_pass = (random.randint(1,10) == 1)
+ sequence_value = utxos[j]["confirmations"]
+ if not input_will_pass:
+ sequence_value += 1
+ should_pass = False
+
+ # Figure out what the median-time-past was for the confirmed input
+ # Note that if an input has N confirmations, we're going back N blocks
+ # from the tip so that we're looking up MTP of the block
+ # PRIOR to the one the input appears in, as per the BIP68 spec.
+ orig_time = self.get_median_time_past(utxos[j]["confirmations"])
+ cur_time = self.get_median_time_past(0) # MTP of the tip
+
+ # can only timelock this input if it's not too old -- otherwise use height
+ can_time_lock = True
+ if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK:
+ can_time_lock = False
+
+ # if time-lockable, then 50% chance we make this a time lock
+ if random.randint(0,1) and can_time_lock:
+ # Find first time-lock value that fails, or latest one that succeeds
+ time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY
+ if input_will_pass and time_delta > cur_time - orig_time:
+ sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)
+ elif (not input_will_pass and time_delta <= cur_time - orig_time):
+ sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)+1
+ sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
+ tx.vin.append(CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value))
+ value += utxos[j]["amount"]*COIN
+ # Overestimate the size of the tx - signatures should be less than 120 bytes, and leave 50 for the output
+ tx_size = len(ToHex(tx))//2 + 120*num_inputs + 50
+ tx.vout.append(CTxOut(int(value-self.relayfee*tx_size*COIN/1000), CScript([b'a'])))
+ rawtx = self.nodes[0].signrawtransaction(ToHex(tx))["hex"]
+
+ if (using_sequence_locks and not should_pass):
+ # This transaction should be rejected
+ assert_raises_jsonrpc(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, rawtx)
+ else:
+ # This raw transaction should be accepted
+ self.nodes[0].sendrawtransaction(rawtx)
+ utxos = self.nodes[0].listunspent()
+
+ # Test that sequence locks on unconfirmed inputs must have nSequence
+ # height or time of 0 to be accepted.
+ # Then test that BIP68-invalid transactions are removed from the mempool
+ # after a reorg.
+ def test_sequence_lock_unconfirmed_inputs(self):
+ # Store height so we can easily reset the chain at the end of the test
+ cur_height = self.nodes[0].getblockcount()
+
+ # Create a mempool tx.
+ txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
+ tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
+ tx1.rehash()
+
+ # Anyone-can-spend mempool tx.
+ # Sequence lock of 0 should pass.
+ tx2 = CTransaction()
+ tx2.nVersion = 2
+ tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
+ tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
+ tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"]
+ tx2 = FromHex(tx2, tx2_raw)
+ tx2.rehash()
+
+ self.nodes[0].sendrawtransaction(tx2_raw)
+
+ # Create a spend of the 0th output of orig_tx with a sequence lock
+ # of 1, and test what happens when submitting.
+ # orig_tx.vout[0] must be an anyone-can-spend output
+ def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock):
+ sequence_value = 1
+ if not use_height_lock:
+ sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
+
+ tx = CTransaction()
+ tx.nVersion = 2
+ tx.vin = [CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)]
+ tx.vout = [CTxOut(int(orig_tx.vout[0].nValue - relayfee*COIN), CScript([b'a']))]
+ tx.rehash()
+
+ if (orig_tx.hash in node.getrawmempool()):
+ # sendrawtransaction should fail if the tx is in the mempool
+ assert_raises_jsonrpc(-26, NOT_FINAL_ERROR, node.sendrawtransaction, ToHex(tx))
+ else:
+ # sendrawtransaction should succeed if the tx is not in the mempool
+ node.sendrawtransaction(ToHex(tx))
+
+ return tx
+
+ test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
+ test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
+
+ # Now mine some blocks, but make sure tx2 doesn't get mined.
+ # Use prioritisetransaction to lower the effective feerate to 0
+ self.nodes[0].prioritisetransaction(tx2.hash, int(-self.relayfee*COIN))
+ cur_time = int(time.time())
+ for i in range(10):
+ self.nodes[0].setmocktime(cur_time + 600)
+ self.nodes[0].generate(1)
+ cur_time += 600
+
+ assert(tx2.hash in self.nodes[0].getrawmempool())
+
+ test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
+ test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
+
+ # Mine tx2, and then try again
+ self.nodes[0].prioritisetransaction(tx2.hash, int(self.relayfee*COIN))
+
+ # Advance the time on the node so that we can test timelocks
+ self.nodes[0].setmocktime(cur_time+600)
+ self.nodes[0].generate(1)
+ assert(tx2.hash not in self.nodes[0].getrawmempool())
+
+ # Now that tx2 is not in the mempool, a sequence locked spend should
+ # succeed
+ tx3 = test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
+ assert(tx3.hash in self.nodes[0].getrawmempool())
+
+ self.nodes[0].generate(1)
+ assert(tx3.hash not in self.nodes[0].getrawmempool())
+
+ # One more test, this time using height locks
+ tx4 = test_nonzero_locks(tx3, self.nodes[0], self.relayfee, use_height_lock=True)
+ assert(tx4.hash in self.nodes[0].getrawmempool())
+
+ # Now try combining confirmed and unconfirmed inputs
+ tx5 = test_nonzero_locks(tx4, self.nodes[0], self.relayfee, use_height_lock=True)
+ assert(tx5.hash not in self.nodes[0].getrawmempool())
+
+ utxos = self.nodes[0].listunspent()
+ tx5.vin.append(CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1))
+ tx5.vout[0].nValue += int(utxos[0]["amount"]*COIN)
+ raw_tx5 = self.nodes[0].signrawtransaction(ToHex(tx5))["hex"]
+
+ assert_raises_jsonrpc(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, raw_tx5)
+
+ # Test mempool-BIP68 consistency after reorg
+ #
+ # State of the transactions in the last blocks:
+ # ... -> [ tx2 ] -> [ tx3 ]
+ # tip-1 tip
+ # And currently tx4 is in the mempool.
+ #
+ # If we invalidate the tip, tx3 should get added to the mempool, causing
+ # tx4 to be removed (fails sequence-lock).
+ self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
+ assert(tx4.hash not in self.nodes[0].getrawmempool())
+ assert(tx3.hash in self.nodes[0].getrawmempool())
+
+ # Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in
+ # diagram above).
+ # This would cause tx2 to be added back to the mempool, which in turn causes
+ # tx3 to be removed.
+ tip = int(self.nodes[0].getblockhash(self.nodes[0].getblockcount()-1), 16)
+ height = self.nodes[0].getblockcount()
+ for i in range(2):
+ block = create_block(tip, create_coinbase(height), cur_time)
+ block.nVersion = 3
+ block.rehash()
+ block.solve()
+ tip = block.sha256
+ height += 1
+ self.nodes[0].submitblock(ToHex(block))
+ cur_time += 1
+
+ mempool = self.nodes[0].getrawmempool()
+ assert(tx3.hash not in mempool)
+ assert(tx2.hash in mempool)
+
+ # Reset the chain and get rid of the mocktimed-blocks
+ self.nodes[0].setmocktime(0)
+ self.nodes[0].invalidateblock(self.nodes[0].getblockhash(cur_height+1))
+ self.nodes[0].generate(10)
+
+ # Make sure that BIP68 isn't being used to validate blocks, prior to
+ # versionbits activation. If more blocks are mined prior to this test
+ # being run, then it's possible the test has activated the soft fork, and
+ # this test should be moved to run earlier, or deleted.
+ def test_bip68_not_consensus(self):
+ assert(get_bip9_status(self.nodes[0], 'csv')['status'] != 'active')
+ txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
+
+ tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
+ tx1.rehash()
+
+ # Make an anyone-can-spend transaction
+ tx2 = CTransaction()
+ tx2.nVersion = 1
+ tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
+ tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
+
+ # sign tx2
+ tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"]
+ tx2 = FromHex(tx2, tx2_raw)
+ tx2.rehash()
+
+ self.nodes[0].sendrawtransaction(ToHex(tx2))
+
+ # Now make an invalid spend of tx2 according to BIP68
+ sequence_value = 100 # 100 block relative locktime
+
+ tx3 = CTransaction()
+ tx3.nVersion = 2
+ tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)]
+ tx3.vout = [CTxOut(int(tx2.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
+ tx3.rehash()
+
+ assert_raises_jsonrpc(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx3))
+
+ # make a block that violates bip68; ensure that the tip updates
+ tip = int(self.nodes[0].getbestblockhash(), 16)
+ block = create_block(tip, create_coinbase(self.nodes[0].getblockcount()+1))
+ block.nVersion = 3
+ block.vtx.extend([tx1, tx2, tx3])
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.rehash()
+ block.solve()
+
+ self.nodes[0].submitblock(ToHex(block))
+ assert_equal(self.nodes[0].getbestblockhash(), block.hash)
+
+ def activateCSV(self):
+ # activation should happen at block height 432 (3 periods)
+ min_activation_height = 432
+ height = self.nodes[0].getblockcount()
+ assert(height < 432)
+ self.nodes[0].generate(432-height)
+ assert(get_bip9_status(self.nodes[0], 'csv')['status'] == 'active')
+ sync_blocks(self.nodes)
+
+ # Use self.nodes[1] to test that version 2 transactions are standard.
+ def test_version2_relay(self):
+ inputs = [ ]
+ outputs = { self.nodes[1].getnewaddress() : 1.0 }
+ rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
+ rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex']
+ tx = FromHex(CTransaction(), rawtxfund)
+ tx.nVersion = 2
+ tx_signed = self.nodes[1].signrawtransaction(ToHex(tx))["hex"]
+ tx_id = self.nodes[1].sendrawtransaction(tx_signed)
+
+if __name__ == '__main__':
+ BIP68Test().main()
diff --git a/test/functional/bip9-softforks.py b/test/functional/bip9-softforks.py
new file mode 100755
index 0000000000..0dffd06e1a
--- /dev/null
+++ b/test/functional/bip9-softforks.py
@@ -0,0 +1,246 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test BIP 9 soft forks.
+
+Connect to a single node.
+regtest lock-in with 108/144 block signalling
+activation after a further 144 blocks
+mine 2 block and save coinbases for later use
+mine 141 blocks to transition from DEFINED to STARTED
+mine 100 blocks signalling readiness and 44 not in order to fail to change state this period
+mine 108 blocks signalling readiness and 36 blocks not signalling readiness (STARTED->LOCKED_IN)
+mine a further 143 blocks (LOCKED_IN)
+test that enforcement has not triggered (which triggers ACTIVE)
+test that enforcement has triggered
+"""
+
+from test_framework.blockstore import BlockStore
+from test_framework.test_framework import ComparisonTestFramework
+from test_framework.util import *
+from test_framework.mininode import CTransaction, NetworkThread
+from test_framework.blocktools import create_coinbase, create_block
+from test_framework.comptool import TestInstance, TestManager
+from test_framework.script import CScript, OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP
+from io import BytesIO
+import time
+import itertools
+
+class BIP9SoftForksTest(ComparisonTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 1
+
+ def setup_network(self):
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
+ extra_args=[['-whitelist=127.0.0.1']],
+ binary=[self.options.testbinary])
+
+ def run_test(self):
+ self.test = TestManager(self, self.options.tmpdir)
+ self.test.add_all_connections(self.nodes)
+ NetworkThread().start() # Start up network handling in another thread
+ self.test.run()
+
+ def create_transaction(self, node, coinbase, to_address, amount):
+ from_txid = node.getblock(coinbase)['tx'][0]
+ inputs = [{ "txid" : from_txid, "vout" : 0}]
+ outputs = { to_address : amount }
+ rawtx = node.createrawtransaction(inputs, outputs)
+ tx = CTransaction()
+ f = BytesIO(hex_str_to_bytes(rawtx))
+ tx.deserialize(f)
+ tx.nVersion = 2
+ return tx
+
+ def sign_transaction(self, node, tx):
+ signresult = node.signrawtransaction(bytes_to_hex_str(tx.serialize()))
+ tx = CTransaction()
+ f = BytesIO(hex_str_to_bytes(signresult['hex']))
+ tx.deserialize(f)
+ return tx
+
+ def generate_blocks(self, number, version, test_blocks = []):
+ for i in range(number):
+ block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
+ block.nVersion = version
+ block.rehash()
+ block.solve()
+ test_blocks.append([block, True])
+ self.last_block_time += 1
+ self.tip = block.sha256
+ self.height += 1
+ return test_blocks
+
+ def get_bip9_status(self, key):
+ info = self.nodes[0].getblockchaininfo()
+ return info['bip9_softforks'][key]
+
+ def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignature, bitno):
+ assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
+ assert_equal(self.get_bip9_status(bipName)['since'], 0)
+
+ # generate some coins for later
+ self.coinbase_blocks = self.nodes[0].generate(2)
+ self.height = 3 # height of the next block to build
+ self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
+ self.nodeaddress = self.nodes[0].getnewaddress()
+ self.last_block_time = int(time.time())
+
+ assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
+ assert_equal(self.get_bip9_status(bipName)['since'], 0)
+ tmpl = self.nodes[0].getblocktemplate({})
+ assert(bipName not in tmpl['rules'])
+ assert(bipName not in tmpl['vbavailable'])
+ assert_equal(tmpl['vbrequired'], 0)
+ assert_equal(tmpl['version'], 0x20000000)
+
+ # Test 1
+ # Advance from DEFINED to STARTED
+ test_blocks = self.generate_blocks(141, 4)
+ yield TestInstance(test_blocks, sync_every_block=False)
+
+ assert_equal(self.get_bip9_status(bipName)['status'], 'started')
+ assert_equal(self.get_bip9_status(bipName)['since'], 144)
+ tmpl = self.nodes[0].getblocktemplate({})
+ assert(bipName not in tmpl['rules'])
+ assert_equal(tmpl['vbavailable'][bipName], bitno)
+ assert_equal(tmpl['vbrequired'], 0)
+ assert(tmpl['version'] & activated_version)
+
+ # Test 2
+ # Fail to achieve LOCKED_IN 100 out of 144 signal bit 1
+ # using a variety of bits to simulate multiple parallel softforks
+ test_blocks = self.generate_blocks(50, activated_version) # 0x20000001 (signalling ready)
+ test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
+ test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
+ test_blocks = self.generate_blocks(24, 4, test_blocks) # 0x20010000 (signalling not)
+ yield TestInstance(test_blocks, sync_every_block=False)
+
+ assert_equal(self.get_bip9_status(bipName)['status'], 'started')
+ assert_equal(self.get_bip9_status(bipName)['since'], 144)
+ tmpl = self.nodes[0].getblocktemplate({})
+ assert(bipName not in tmpl['rules'])
+ assert_equal(tmpl['vbavailable'][bipName], bitno)
+ assert_equal(tmpl['vbrequired'], 0)
+ assert(tmpl['version'] & activated_version)
+
+ # Test 3
+ # 108 out of 144 signal bit 1 to achieve LOCKED_IN
+ # using a variety of bits to simulate multiple parallel softforks
+ test_blocks = self.generate_blocks(58, activated_version) # 0x20000001 (signalling ready)
+ test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
+ test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
+ test_blocks = self.generate_blocks(10, 4, test_blocks) # 0x20010000 (signalling not)
+ yield TestInstance(test_blocks, sync_every_block=False)
+
+ assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
+ assert_equal(self.get_bip9_status(bipName)['since'], 432)
+ tmpl = self.nodes[0].getblocktemplate({})
+ assert(bipName not in tmpl['rules'])
+
+ # Test 4
+ # 143 more version 536870913 blocks (waiting period-1)
+ test_blocks = self.generate_blocks(143, 4)
+ yield TestInstance(test_blocks, sync_every_block=False)
+
+ assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
+ assert_equal(self.get_bip9_status(bipName)['since'], 432)
+ tmpl = self.nodes[0].getblocktemplate({})
+ assert(bipName not in tmpl['rules'])
+
+ # Test 5
+ # Check that the new rule is enforced
+ spendtx = self.create_transaction(self.nodes[0],
+ self.coinbase_blocks[0], self.nodeaddress, 1.0)
+ invalidate(spendtx)
+ spendtx = self.sign_transaction(self.nodes[0], spendtx)
+ spendtx.rehash()
+ invalidatePostSignature(spendtx)
+ spendtx.rehash()
+ block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
+ block.nVersion = activated_version
+ block.vtx.append(spendtx)
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.rehash()
+ block.solve()
+
+ self.last_block_time += 1
+ self.tip = block.sha256
+ self.height += 1
+ yield TestInstance([[block, True]])
+
+ assert_equal(self.get_bip9_status(bipName)['status'], 'active')
+ assert_equal(self.get_bip9_status(bipName)['since'], 576)
+ tmpl = self.nodes[0].getblocktemplate({})
+ assert(bipName in tmpl['rules'])
+ assert(bipName not in tmpl['vbavailable'])
+ assert_equal(tmpl['vbrequired'], 0)
+ assert(not (tmpl['version'] & (1 << bitno)))
+
+ # Test 6
+ # Check that the new sequence lock rules are enforced
+ spendtx = self.create_transaction(self.nodes[0],
+ self.coinbase_blocks[1], self.nodeaddress, 1.0)
+ invalidate(spendtx)
+ spendtx = self.sign_transaction(self.nodes[0], spendtx)
+ spendtx.rehash()
+ invalidatePostSignature(spendtx)
+ spendtx.rehash()
+
+ block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
+ block.nVersion = 5
+ block.vtx.append(spendtx)
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.rehash()
+ block.solve()
+ self.last_block_time += 1
+ yield TestInstance([[block, False]])
+
+ # Restart all
+ self.test.block_store.close()
+ stop_nodes(self.nodes)
+ shutil.rmtree(self.options.tmpdir)
+ self.setup_chain()
+ self.setup_network()
+ self.test.block_store = BlockStore(self.options.tmpdir)
+ self.test.clear_all_connections()
+ self.test.add_all_connections(self.nodes)
+ NetworkThread().start() # Start up network handling in another thread
+
+
+ def get_tests(self):
+ for test in itertools.chain(
+ self.test_BIP('csv', 0x20000001, self.sequence_lock_invalidate, self.donothing, 0),
+ self.test_BIP('csv', 0x20000001, self.mtp_invalidate, self.donothing, 0),
+ self.test_BIP('csv', 0x20000001, self.donothing, self.csv_invalidate, 0)
+ ):
+ yield test
+
+ def donothing(self, tx):
+ return
+
+ def csv_invalidate(self, tx):
+ """Modify the signature in vin 0 of the tx to fail CSV
+ Prepends -1 CSV DROP in the scriptSig itself.
+ """
+ tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP] +
+ list(CScript(tx.vin[0].scriptSig)))
+
+ def sequence_lock_invalidate(self, tx):
+ """Modify the nSequence to make it fails once sequence lock rule is
+ activated (high timespan).
+ """
+ tx.vin[0].nSequence = 0x00FFFFFF
+ tx.nLockTime = 0
+
+ def mtp_invalidate(self, tx):
+ """Modify the nLockTime to make it fails once MTP rule is activated."""
+ # Disable Sequence lock, Activate nLockTime
+ tx.vin[0].nSequence = 0x90FFFFFF
+ tx.nLockTime = self.last_block_time
+
+if __name__ == '__main__':
+ BIP9SoftForksTest().main()
diff --git a/test/functional/bipdersig-p2p.py b/test/functional/bipdersig-p2p.py
new file mode 100755
index 0000000000..22bd39fbe5
--- /dev/null
+++ b/test/functional/bipdersig-p2p.py
@@ -0,0 +1,188 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test BIP66 (DER SIG).
+
+Connect to a single node.
+Mine 2 (version 2) blocks (save the coinbases for later).
+Generate 98 more version 2 blocks, verify the node accepts.
+Mine 749 version 3 blocks, verify the node accepts.
+Check that the new DERSIG rules are not enforced on the 750th version 3 block.
+Check that the new DERSIG rules are enforced on the 751st version 3 block.
+Mine 199 new version blocks.
+Mine 1 old-version block.
+Mine 1 new version block.
+Mine 1 old version block, see that the node rejects.
+"""
+
+from test_framework.test_framework import ComparisonTestFramework
+from test_framework.util import *
+from test_framework.mininode import CTransaction, NetworkThread
+from test_framework.blocktools import create_coinbase, create_block
+from test_framework.comptool import TestInstance, TestManager
+from test_framework.script import CScript
+from io import BytesIO
+import time
+
+# A canonical signature consists of:
+# <30> <total len> <02> <len R> <R> <02> <len S> <S> <hashtype>
+def unDERify(tx):
+ """
+ Make the signature in vin 0 of a tx non-DER-compliant,
+ by adding padding after the S-value.
+ """
+ scriptSig = CScript(tx.vin[0].scriptSig)
+ newscript = []
+ for i in scriptSig:
+ if (len(newscript) == 0):
+ newscript.append(i[0:-1] + b'\0' + i[-1:])
+ else:
+ newscript.append(i)
+ tx.vin[0].scriptSig = CScript(newscript)
+
+class BIP66Test(ComparisonTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 1
+
+ def setup_network(self):
+ # Must set the blockversion for this test
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
+ extra_args=[['-whitelist=127.0.0.1', '-blockversion=2']],
+ binary=[self.options.testbinary])
+
+ def run_test(self):
+ test = TestManager(self, self.options.tmpdir)
+ test.add_all_connections(self.nodes)
+ NetworkThread().start() # Start up network handling in another thread
+ test.run()
+
+ def create_transaction(self, node, coinbase, to_address, amount):
+ from_txid = node.getblock(coinbase)['tx'][0]
+ inputs = [{ "txid" : from_txid, "vout" : 0}]
+ outputs = { to_address : amount }
+ rawtx = node.createrawtransaction(inputs, outputs)
+ signresult = node.signrawtransaction(rawtx)
+ tx = CTransaction()
+ f = BytesIO(hex_str_to_bytes(signresult['hex']))
+ tx.deserialize(f)
+ return tx
+
+ def get_tests(self):
+
+ self.coinbase_blocks = self.nodes[0].generate(2)
+ height = 3 # height of the next block to build
+ self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
+ self.nodeaddress = self.nodes[0].getnewaddress()
+ self.last_block_time = int(time.time())
+
+ ''' 298 more version 2 blocks '''
+ test_blocks = []
+ for i in range(298):
+ block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
+ block.nVersion = 2
+ block.rehash()
+ block.solve()
+ test_blocks.append([block, True])
+ self.last_block_time += 1
+ self.tip = block.sha256
+ height += 1
+ yield TestInstance(test_blocks, sync_every_block=False)
+
+ ''' Mine 749 version 3 blocks '''
+ test_blocks = []
+ for i in range(749):
+ block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
+ block.nVersion = 3
+ block.rehash()
+ block.solve()
+ test_blocks.append([block, True])
+ self.last_block_time += 1
+ self.tip = block.sha256
+ height += 1
+ yield TestInstance(test_blocks, sync_every_block=False)
+
+ '''
+ Check that the new DERSIG rules are not enforced in the 750th
+ version 3 block.
+ '''
+ spendtx = self.create_transaction(self.nodes[0],
+ self.coinbase_blocks[0], self.nodeaddress, 1.0)
+ unDERify(spendtx)
+ spendtx.rehash()
+
+ block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
+ block.nVersion = 3
+ block.vtx.append(spendtx)
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.rehash()
+ block.solve()
+
+ self.last_block_time += 1
+ self.tip = block.sha256
+ height += 1
+ yield TestInstance([[block, True]])
+
+ ''' Mine 199 new version blocks on last valid tip '''
+ test_blocks = []
+ for i in range(199):
+ block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
+ block.nVersion = 3
+ block.rehash()
+ block.solve()
+ test_blocks.append([block, True])
+ self.last_block_time += 1
+ self.tip = block.sha256
+ height += 1
+ yield TestInstance(test_blocks, sync_every_block=False)
+
+ ''' Mine 1 old version block '''
+ block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
+ block.nVersion = 2
+ block.rehash()
+ block.solve()
+ self.last_block_time += 1
+ self.tip = block.sha256
+ height += 1
+ yield TestInstance([[block, True]])
+
+ ''' Mine 1 new version block '''
+ block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
+ block.nVersion = 3
+ block.rehash()
+ block.solve()
+ self.last_block_time += 1
+ self.tip = block.sha256
+ height += 1
+ yield TestInstance([[block, True]])
+
+ '''
+ Check that the new DERSIG rules are enforced in the 951st version 3
+ block.
+ '''
+ spendtx = self.create_transaction(self.nodes[0],
+ self.coinbase_blocks[1], self.nodeaddress, 1.0)
+ unDERify(spendtx)
+ spendtx.rehash()
+
+ block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
+ block.nVersion = 3
+ block.vtx.append(spendtx)
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.rehash()
+ block.solve()
+ self.last_block_time += 1
+ yield TestInstance([[block, False]])
+
+ ''' Mine 1 old version block, should be invalid '''
+ block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
+ block.nVersion = 2
+ block.rehash()
+ block.solve()
+ self.last_block_time += 1
+ yield TestInstance([[block, False]])
+
+if __name__ == '__main__':
+ BIP66Test().main()
diff --git a/test/functional/bipdersig.py b/test/functional/bipdersig.py
new file mode 100755
index 0000000000..371cc41bb7
--- /dev/null
+++ b/test/functional/bipdersig.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the BIP66 changeover logic."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+class BIP66Test(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 3
+ self.setup_clean_chain = False
+
+ def setup_network(self):
+ self.nodes = []
+ self.nodes.append(start_node(0, self.options.tmpdir, []))
+ self.nodes.append(start_node(1, self.options.tmpdir, ["-blockversion=2"]))
+ self.nodes.append(start_node(2, self.options.tmpdir, ["-blockversion=3"]))
+ connect_nodes(self.nodes[1], 0)
+ connect_nodes(self.nodes[2], 0)
+ self.is_network_split = False
+ self.sync_all()
+
+ def run_test(self):
+ cnt = self.nodes[0].getblockcount()
+
+ # Mine some old-version blocks
+ self.nodes[1].generate(100)
+ self.sync_all()
+ if (self.nodes[0].getblockcount() != cnt + 100):
+ raise AssertionError("Failed to mine 100 version=2 blocks")
+
+ # Mine 750 new-version blocks
+ for i in range(15):
+ self.nodes[2].generate(50)
+ self.sync_all()
+ if (self.nodes[0].getblockcount() != cnt + 850):
+ raise AssertionError("Failed to mine 750 version=3 blocks")
+
+ # TODO: check that new DERSIG rules are not enforced
+
+ # Mine 1 new-version block
+ self.nodes[2].generate(1)
+ self.sync_all()
+ if (self.nodes[0].getblockcount() != cnt + 851):
+ raise AssertionError("Failed to mine a version=3 blocks")
+
+ # TODO: check that new DERSIG rules are enforced
+
+ # Mine 198 new-version blocks
+ for i in range(2):
+ self.nodes[2].generate(99)
+ self.sync_all()
+ if (self.nodes[0].getblockcount() != cnt + 1049):
+ raise AssertionError("Failed to mine 198 version=3 blocks")
+
+ # Mine 1 old-version block
+ self.nodes[1].generate(1)
+ self.sync_all()
+ if (self.nodes[0].getblockcount() != cnt + 1050):
+ raise AssertionError("Failed to mine a version=2 block after 949 version=3 blocks")
+
+ # Mine 1 new-version blocks
+ self.nodes[2].generate(1)
+ self.sync_all()
+ if (self.nodes[0].getblockcount() != cnt + 1051):
+ raise AssertionError("Failed to mine a version=3 block")
+
+ # Mine 1 old-version blocks. This should fail
+ assert_raises_jsonrpc(-1, "CreateNewBlock: TestBlockValidity failed: bad-version(0x00000002)", self.nodes[1].generate, 1)
+ self.sync_all()
+ if (self.nodes[0].getblockcount() != cnt + 1051):
+ raise AssertionError("Accepted a version=2 block after 950 version=3 blocks")
+
+ # Mine 1 new-version blocks
+ self.nodes[2].generate(1)
+ self.sync_all()
+ if (self.nodes[0].getblockcount() != cnt + 1052):
+ raise AssertionError("Failed to mine a version=3 block")
+
+if __name__ == '__main__':
+ BIP66Test().main()
diff --git a/test/functional/blockchain.py b/test/functional/blockchain.py
new file mode 100755
index 0000000000..596aed50ec
--- /dev/null
+++ b/test/functional/blockchain.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test RPCs related to blockchainstate.
+
+Test the following RPCs:
+ - gettxoutsetinfo
+ - verifychain
+
+Tests correspond to code in rpc/blockchain.cpp.
+"""
+
+from decimal import Decimal
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_equal,
+ assert_raises_jsonrpc,
+ assert_is_hex_string,
+ assert_is_hash_string,
+ start_nodes,
+ connect_nodes_bi,
+)
+
+
+class BlockchainTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = False
+ self.num_nodes = 2
+
+ def setup_network(self, split=False):
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
+ connect_nodes_bi(self.nodes, 0, 1)
+ self.is_network_split = False
+ self.sync_all()
+
+ def run_test(self):
+ self._test_gettxoutsetinfo()
+ self._test_getblockheader()
+ self.nodes[0].verifychain(4, 0)
+
+ def _test_gettxoutsetinfo(self):
+ node = self.nodes[0]
+ res = node.gettxoutsetinfo()
+
+ assert_equal(res['total_amount'], Decimal('8725.00000000'))
+ assert_equal(res['transactions'], 200)
+ assert_equal(res['height'], 200)
+ assert_equal(res['txouts'], 200)
+ assert_equal(res['bytes_serialized'], 13924),
+ assert_equal(len(res['bestblock']), 64)
+ assert_equal(len(res['hash_serialized']), 64)
+
+ def _test_getblockheader(self):
+ node = self.nodes[0]
+
+ assert_raises_jsonrpc(-5, "Block not found", node.getblockheader, "nonsense")
+
+ besthash = node.getbestblockhash()
+ secondbesthash = node.getblockhash(199)
+ header = node.getblockheader(besthash)
+
+ assert_equal(header['hash'], besthash)
+ assert_equal(header['height'], 200)
+ assert_equal(header['confirmations'], 1)
+ assert_equal(header['previousblockhash'], secondbesthash)
+ assert_is_hex_string(header['chainwork'])
+ assert_is_hash_string(header['hash'])
+ assert_is_hash_string(header['previousblockhash'])
+ assert_is_hash_string(header['merkleroot'])
+ assert_is_hash_string(header['bits'], length=None)
+ assert isinstance(header['time'], int)
+ assert isinstance(header['mediantime'], int)
+ assert isinstance(header['nonce'], int)
+ assert isinstance(header['version'], int)
+ assert isinstance(int(header['versionHex'], 16), int)
+ assert isinstance(header['difficulty'], Decimal)
+
+if __name__ == '__main__':
+ BlockchainTest().main()
diff --git a/test/functional/bumpfee.py b/test/functional/bumpfee.py
new file mode 100755
index 0000000000..8f75e9ed4d
--- /dev/null
+++ b/test/functional/bumpfee.py
@@ -0,0 +1,323 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the bumpfee RPC."""
+
+from segwit import send_to_witness
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework import blocktools
+from test_framework.mininode import CTransaction
+from test_framework.util import *
+
+import io
+
+# Sequence number that is BIP 125 opt-in and BIP 68-compliant
+BIP125_SEQUENCE_NUMBER = 0xfffffffd
+
+WALLET_PASSPHRASE = "test"
+WALLET_PASSPHRASE_TIMEOUT = 3600
+
+
+class BumpFeeTest(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 2
+ self.setup_clean_chain = True
+
+ def setup_network(self, split=False):
+ extra_args = [["-prematurewitness", "-walletprematurewitness", "-walletrbf={}".format(i)]
+ for i in range(self.num_nodes)]
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
+
+ # Encrypt wallet for test_locked_wallet_fails test
+ self.nodes[1].encryptwallet(WALLET_PASSPHRASE)
+ bitcoind_processes[1].wait()
+ self.nodes[1] = start_node(1, self.options.tmpdir, extra_args[1])
+ self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
+
+ connect_nodes_bi(self.nodes, 0, 1)
+ self.is_network_split = False
+ self.sync_all()
+
+ def run_test(self):
+ peer_node, rbf_node = self.nodes
+ rbf_node_address = rbf_node.getnewaddress()
+
+ # fund rbf node with 10 coins of 0.001 btc (100,000 satoshis)
+ self.log.info("Mining blocks...")
+ peer_node.generate(110)
+ self.sync_all()
+ for i in range(25):
+ peer_node.sendtoaddress(rbf_node_address, 0.001)
+ self.sync_all()
+ peer_node.generate(1)
+ self.sync_all()
+ assert_equal(rbf_node.getbalance(), Decimal("0.025"))
+
+ self.log.info("Running tests")
+ dest_address = peer_node.getnewaddress()
+ test_small_output_fails(rbf_node, dest_address)
+ test_dust_to_fee(rbf_node, dest_address)
+ test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address)
+ test_segwit_bumpfee_succeeds(rbf_node, dest_address)
+ test_nonrbf_bumpfee_fails(peer_node, dest_address)
+ test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
+ test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
+ test_settxfee(rbf_node, dest_address)
+ test_rebumping(rbf_node, dest_address)
+ test_rebumping_not_replaceable(rbf_node, dest_address)
+ test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
+ test_bumpfee_metadata(rbf_node, dest_address)
+ test_locked_wallet_fails(rbf_node, dest_address)
+ self.log.info("Success")
+
+
+def test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address):
+ rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.00090000})
+ rbftx = rbf_node.gettransaction(rbfid)
+ sync_mempools((rbf_node, peer_node))
+ assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
+ bumped_tx = rbf_node.bumpfee(rbfid)
+ assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0
+ # check that bumped_tx propogates, original tx was evicted and has a wallet conflict
+ sync_mempools((rbf_node, peer_node))
+ assert bumped_tx["txid"] in rbf_node.getrawmempool()
+ assert bumped_tx["txid"] in peer_node.getrawmempool()
+ assert rbfid not in rbf_node.getrawmempool()
+ assert rbfid not in peer_node.getrawmempool()
+ oldwtx = rbf_node.gettransaction(rbfid)
+ assert len(oldwtx["walletconflicts"]) > 0
+ # check wallet transaction replaces and replaced_by values
+ bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
+ assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
+ assert_equal(bumpedwtx["replaces_txid"], rbfid)
+
+
+def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
+ # Create a transaction with segwit output, then create an RBF transaction
+ # which spends it, and make sure bumpfee can be called on it.
+
+ segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.001"))
+ segwit_out = rbf_node.validateaddress(rbf_node.getnewaddress())
+ rbf_node.addwitnessaddress(segwit_out["address"])
+ segwitid = send_to_witness(
+ use_p2wsh=False,
+ node=rbf_node,
+ utxo=segwit_in,
+ pubkey=segwit_out["pubkey"],
+ encode_p2sh=False,
+ amount=Decimal("0.0009"),
+ sign=True)
+
+ rbfraw = rbf_node.createrawtransaction([{
+ 'txid': segwitid,
+ 'vout': 0,
+ "sequence": BIP125_SEQUENCE_NUMBER
+ }], {dest_address: Decimal("0.0005"),
+ get_change_address(rbf_node): Decimal("0.0003")})
+ rbfsigned = rbf_node.signrawtransaction(rbfraw)
+ rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
+ assert rbfid in rbf_node.getrawmempool()
+
+ bumped_tx = rbf_node.bumpfee(rbfid)
+ assert bumped_tx["txid"] in rbf_node.getrawmempool()
+ assert rbfid not in rbf_node.getrawmempool()
+
+
+def test_nonrbf_bumpfee_fails(peer_node, dest_address):
+ # cannot replace a non RBF transaction (from node which did not enable RBF)
+ not_rbfid = create_fund_sign_send(peer_node, {dest_address: 0.00090000})
+ assert_raises_jsonrpc(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
+
+
+def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
+ # cannot bump fee unless the tx has only inputs that we own.
+ # here, the rbftx has a peer_node coin and then adds a rbf_node input
+ # Note that this test depends upon the RPC code checking input ownership prior to change outputs
+ # (since it can't use fundrawtransaction, it lacks a proper change output)
+ utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)]
+ inputs = [{
+ "txid": utxo["txid"],
+ "vout": utxo["vout"],
+ "address": utxo["address"],
+ "sequence": BIP125_SEQUENCE_NUMBER
+ } for utxo in utxos]
+ output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.001")
+ rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
+ signedtx = rbf_node.signrawtransaction(rawtx)
+ signedtx = peer_node.signrawtransaction(signedtx["hex"])
+ rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
+ assert_raises_jsonrpc(-4, "Transaction contains inputs that don't belong to this wallet",
+ rbf_node.bumpfee, rbfid)
+
+
+def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address):
+ # cannot bump fee if the transaction has a descendant
+ # parent is send-to-self, so we don't have to check which output is change when creating the child tx
+ parent_id = create_fund_sign_send(rbf_node, {rbf_node_address: 0.00050000})
+ tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000})
+ tx = rbf_node.signrawtransaction(tx)
+ txid = rbf_node.sendrawtransaction(tx["hex"])
+ assert_raises_jsonrpc(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
+
+
+def test_small_output_fails(rbf_node, dest_address):
+ # cannot bump fee with a too-small output
+ rbfid = spend_one_input(rbf_node,
+ Decimal("0.00100000"),
+ {dest_address: 0.00080000,
+ get_change_address(rbf_node): Decimal("0.00010000")})
+ rbf_node.bumpfee(rbfid, {"totalFee": 20000})
+
+ rbfid = spend_one_input(rbf_node,
+ Decimal("0.00100000"),
+ {dest_address: 0.00080000,
+ get_change_address(rbf_node): Decimal("0.00010000")})
+ assert_raises_jsonrpc(-4, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 20001})
+
+
+def test_dust_to_fee(rbf_node, dest_address):
+ # check that if output is reduced to dust, it will be converted to fee
+ # the bumped tx sets fee=9900, but it converts to 10,000
+ rbfid = spend_one_input(rbf_node,
+ Decimal("0.00100000"),
+ {dest_address: 0.00080000,
+ get_change_address(rbf_node): Decimal("0.00010000")})
+ fulltx = rbf_node.getrawtransaction(rbfid, 1)
+ bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 19900})
+ full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
+ assert_equal(bumped_tx["fee"], Decimal("0.00020000"))
+ assert_equal(len(fulltx["vout"]), 2)
+ assert_equal(len(full_bumped_tx["vout"]), 1) #change output is eliminated
+
+
+def test_settxfee(rbf_node, dest_address):
+ # check that bumpfee reacts correctly to the use of settxfee (paytxfee)
+ # increase feerate by 2.5x, test that fee increased at least 2x
+ rbf_node.settxfee(Decimal("0.00001000"))
+ rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.00090000})
+ rbftx = rbf_node.gettransaction(rbfid)
+ rbf_node.settxfee(Decimal("0.00002500"))
+ bumped_tx = rbf_node.bumpfee(rbfid)
+ assert bumped_tx["fee"] > 2 * abs(rbftx["fee"])
+ rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
+
+
+def test_rebumping(rbf_node, dest_address):
+ # check that re-bumping the original tx fails, but bumping the bumper succeeds
+ rbf_node.settxfee(Decimal("0.00001000"))
+ rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.00090000})
+ bumped = rbf_node.bumpfee(rbfid, {"totalFee": 1000})
+ assert_raises_jsonrpc(-4, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 2000})
+ rbf_node.bumpfee(bumped["txid"], {"totalFee": 2000})
+
+
+def test_rebumping_not_replaceable(rbf_node, dest_address):
+ # check that re-bumping a non-replaceable bump tx fails
+ rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.00090000})
+ bumped = rbf_node.bumpfee(rbfid, {"totalFee": 10000, "replaceable": False})
+ assert_raises_jsonrpc(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
+ {"totalFee": 20000})
+
+
+def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
+ # check that unconfirmed outputs from bumped transactions are not spendable
+ rbfid = create_fund_sign_send(rbf_node, {rbf_node_address: 0.00090000})
+ rbftx = rbf_node.gettransaction(rbfid)["hex"]
+ assert rbfid in rbf_node.getrawmempool()
+ bumpid = rbf_node.bumpfee(rbfid)["txid"]
+ assert bumpid in rbf_node.getrawmempool()
+ assert rbfid not in rbf_node.getrawmempool()
+
+ # check that outputs from the bump transaction are not spendable
+ # due to the replaces_txid check in CWallet::AvailableCoins
+ assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
+
+ # submit a block with the rbf tx to clear the bump tx out of the mempool,
+ # then call abandon to make sure the wallet doesn't attempt to resubmit the
+ # bump tx, then invalidate the block so the rbf tx will be put back in the
+ # mempool. this makes it possible to check whether the rbf tx outputs are
+ # spendable before the rbf tx is confirmed.
+ block = submit_block_with_tx(rbf_node, rbftx)
+ rbf_node.abandontransaction(bumpid)
+ rbf_node.invalidateblock(block.hash)
+ assert bumpid not in rbf_node.getrawmempool()
+ assert rbfid in rbf_node.getrawmempool()
+
+ # check that outputs from the rbf tx are not spendable before the
+ # transaction is confirmed, due to the replaced_by_txid check in
+ # CWallet::AvailableCoins
+ assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
+
+ # check that the main output from the rbf tx is spendable after confirmed
+ rbf_node.generate(1)
+ assert_equal(
+ sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
+ if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
+
+
+def test_bumpfee_metadata(rbf_node, dest_address):
+ rbfid = rbf_node.sendtoaddress(dest_address, 0.00090000, "comment value", "to value")
+ bumped_tx = rbf_node.bumpfee(rbfid)
+ bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
+ assert_equal(bumped_wtx["comment"], "comment value")
+ assert_equal(bumped_wtx["to"], "to value")
+
+
+def test_locked_wallet_fails(rbf_node, dest_address):
+ rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.00090000})
+ rbf_node.walletlock()
+ assert_raises_jsonrpc(-13, "Please enter the wallet passphrase with walletpassphrase first.",
+ rbf_node.bumpfee, rbfid)
+
+
+def create_fund_sign_send(node, outputs):
+ rawtx = node.createrawtransaction([], outputs)
+ fundtx = node.fundrawtransaction(rawtx)
+ signedtx = node.signrawtransaction(fundtx["hex"])
+ txid = node.sendrawtransaction(signedtx["hex"])
+ return txid
+
+
+def spend_one_input(node, input_amount, outputs):
+ input = dict(sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == input_amount))
+ rawtx = node.createrawtransaction([input], outputs)
+ signedtx = node.signrawtransaction(rawtx)
+ txid = node.sendrawtransaction(signedtx["hex"])
+ return txid
+
+
+def get_change_address(node):
+ """Get a wallet change address.
+
+ There is no wallet RPC to access unused change addresses, so this creates a
+ dummy transaction, calls fundrawtransaction to give add an input and change
+ output, then returns the change address."""
+ dest_address = node.getnewaddress()
+ dest_amount = Decimal("0.00012345")
+ rawtx = node.createrawtransaction([], {dest_address: dest_amount})
+ fundtx = node.fundrawtransaction(rawtx)
+ info = node.decoderawtransaction(fundtx["hex"])
+ return next(address for out in info["vout"]
+ if out["value"] != dest_amount for address in out["scriptPubKey"]["addresses"])
+
+
+def submit_block_with_tx(node, tx):
+ ctx = CTransaction()
+ ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx)))
+
+ tip = node.getbestblockhash()
+ height = node.getblockcount() + 1
+ block_time = node.getblockheader(tip)["mediantime"] + 1
+ block = blocktools.create_block(int(tip, 16), blocktools.create_coinbase(height), block_time)
+ block.vtx.append(ctx)
+ block.rehash()
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.solve()
+ node.submitblock(bytes_to_hex_str(block.serialize(True)))
+ return block
+
+
+if __name__ == "__main__":
+ BumpFeeTest().main()
diff --git a/test/functional/config.ini.in b/test/functional/config.ini.in
new file mode 100644
index 0000000000..29586c555d
--- /dev/null
+++ b/test/functional/config.ini.in
@@ -0,0 +1,18 @@
+# Copyright (c) 2013-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+# These environment variables are set by the build process and read by
+# test/functional/test_runner.py
+
+[environment]
+SRCDIR=@abs_top_srcdir@
+BUILDDIR=@abs_top_builddir@
+EXEEXT=@EXEEXT@
+
+[components]
+# Which components are enabled. These are commented out by `configure` if they were disabled when running config.
+@ENABLE_WALLET_TRUE@ENABLE_WALLET=true
+@BUILD_BITCOIN_UTILS_TRUE@ENABLE_UTILS=true
+@BUILD_BITCOIND_TRUE@ENABLE_BITCOIND=true
+@ENABLE_ZMQ_TRUE@ENABLE_ZMQ=true
diff --git a/test/functional/create_cache.py b/test/functional/create_cache.py
new file mode 100755
index 0000000000..39c4c0f47e
--- /dev/null
+++ b/test/functional/create_cache.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Create a blockchain cache.
+
+Creating a cache of the blockchain speeds up test execution when running
+multiple functional tests. This helper script is executed by test_runner when multiple
+tests are being run in parallel.
+"""
+
+from test_framework.test_framework import BitcoinTestFramework
+
+class CreateCache(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+
+ # Test network and test nodes are not required:
+ self.num_nodes = 0
+ self.nodes = []
+
+ def setup_network(self):
+ pass
+
+ def run_test(self):
+ pass
+
+if __name__ == '__main__':
+ CreateCache().main()
diff --git a/test/functional/decodescript.py b/test/functional/decodescript.py
new file mode 100755
index 0000000000..5555e96c44
--- /dev/null
+++ b/test/functional/decodescript.py
@@ -0,0 +1,185 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test decoding scripts via decodescript RPC command."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+from test_framework.mininode import *
+from io import BytesIO
+
+class DecodeScriptTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+
+ def setup_network(self, split=False):
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
+ self.is_network_split = False
+
+ def decodescript_script_sig(self):
+ signature = '304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
+ push_signature = '48' + signature
+ public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
+ push_public_key = '21' + public_key
+
+ # below are test cases for all of the standard transaction types
+
+ # 1) P2PK scriptSig
+ # the scriptSig of a public key scriptPubKey simply pushes a signature onto the stack
+ rpc_result = self.nodes[0].decodescript(push_signature)
+ assert_equal(signature, rpc_result['asm'])
+
+ # 2) P2PKH scriptSig
+ rpc_result = self.nodes[0].decodescript(push_signature + push_public_key)
+ assert_equal(signature + ' ' + public_key, rpc_result['asm'])
+
+ # 3) multisig scriptSig
+ # this also tests the leading portion of a P2SH multisig scriptSig
+ # OP_0 <A sig> <B sig>
+ rpc_result = self.nodes[0].decodescript('00' + push_signature + push_signature)
+ assert_equal('0 ' + signature + ' ' + signature, rpc_result['asm'])
+
+ # 4) P2SH scriptSig
+ # an empty P2SH redeemScript is valid and makes for a very simple test case.
+ # thus, such a spending scriptSig would just need to pass the outer redeemScript
+ # hash test and leave true on the top of the stack.
+ rpc_result = self.nodes[0].decodescript('5100')
+ assert_equal('1 0', rpc_result['asm'])
+
+ # 5) null data scriptSig - no such thing because null data scripts can not be spent.
+ # thus, no test case for that standard transaction type is here.
+
+ def decodescript_script_pub_key(self):
+ public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
+ push_public_key = '21' + public_key
+ public_key_hash = '11695b6cd891484c2d49ec5aa738ec2b2f897777'
+ push_public_key_hash = '14' + public_key_hash
+
+ # below are test cases for all of the standard transaction types
+
+ # 1) P2PK scriptPubKey
+ # <pubkey> OP_CHECKSIG
+ rpc_result = self.nodes[0].decodescript(push_public_key + 'ac')
+ assert_equal(public_key + ' OP_CHECKSIG', rpc_result['asm'])
+
+ # 2) P2PKH scriptPubKey
+ # OP_DUP OP_HASH160 <PubKeyHash> OP_EQUALVERIFY OP_CHECKSIG
+ rpc_result = self.nodes[0].decodescript('76a9' + push_public_key_hash + '88ac')
+ assert_equal('OP_DUP OP_HASH160 ' + public_key_hash + ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm'])
+
+ # 3) multisig scriptPubKey
+ # <m> <A pubkey> <B pubkey> <C pubkey> <n> OP_CHECKMULTISIG
+ # just imagine that the pub keys used below are different.
+ # for our purposes here it does not matter that they are the same even though it is unrealistic.
+ rpc_result = self.nodes[0].decodescript('52' + push_public_key + push_public_key + push_public_key + '53ae')
+ assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm'])
+
+ # 4) P2SH scriptPubKey
+ # OP_HASH160 <Hash160(redeemScript)> OP_EQUAL.
+ # push_public_key_hash here should actually be the hash of a redeem script.
+ # but this works the same for purposes of this test.
+ rpc_result = self.nodes[0].decodescript('a9' + push_public_key_hash + '87')
+ assert_equal('OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm'])
+
+ # 5) null data scriptPubKey
+ # use a signature look-alike here to make sure that we do not decode random data as a signature.
+ # this matters if/when signature sighash decoding comes along.
+ # would want to make sure that no such decoding takes place in this case.
+ signature_imposter = '48304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
+ # OP_RETURN <data>
+ rpc_result = self.nodes[0].decodescript('6a' + signature_imposter)
+ assert_equal('OP_RETURN ' + signature_imposter[2:], rpc_result['asm'])
+
+ # 6) a CLTV redeem script. redeem scripts are in-effect scriptPubKey scripts, so adding a test here.
+ # OP_NOP2 is also known as OP_CHECKLOCKTIMEVERIFY.
+ # just imagine that the pub keys used below are different.
+ # for our purposes here it does not matter that they are the same even though it is unrealistic.
+ #
+ # OP_IF
+ # <receiver-pubkey> OP_CHECKSIGVERIFY
+ # OP_ELSE
+ # <lock-until> OP_CHECKLOCKTIMEVERIFY OP_DROP
+ # OP_ENDIF
+ # <sender-pubkey> OP_CHECKSIG
+ #
+ # lock until block 500,000
+ rpc_result = self.nodes[0].decodescript('63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac')
+ assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm'])
+
+ def decoderawtransaction_asm_sighashtype(self):
+ """Test decoding scripts via RPC command "decoderawtransaction".
+
+ This test is in with the "decodescript" tests because they are testing the same "asm" script decodes.
+ """
+
+ # this test case uses a random plain vanilla mainnet transaction with a single P2PKH input and output
+ tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000'
+ rpc_result = self.nodes[0].decoderawtransaction(tx)
+ assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])
+
+ # this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.
+ # it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc
+ # verify that we have not altered scriptPubKey decoding.
+ tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000'
+ rpc_result = self.nodes[0].decoderawtransaction(tx)
+ assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])
+ assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])
+ assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
+ assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
+ txSave = CTransaction()
+ txSave.deserialize(BytesIO(hex_str_to_bytes(tx)))
+
+ # make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type
+ tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000'
+ rpc_result = self.nodes[0].decoderawtransaction(tx)
+ assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm'])
+
+ # verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks
+ tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000'
+ rpc_result = self.nodes[0].decoderawtransaction(tx)
+ assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
+ assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
+
+ # some more full transaction tests of varying specific scriptSigs. used instead of
+ # tests in decodescript_script_sig because the decodescript RPC is specifically
+ # for working on scriptPubKeys (argh!).
+ push_signature = bytes_to_hex_str(txSave.vin[0].scriptSig)[2:(0x48*2+4)]
+ signature = push_signature[2:]
+ der_signature = signature[:-2]
+ signature_sighash_decoded = der_signature + '[ALL]'
+ signature_2 = der_signature + '82'
+ push_signature_2 = '48' + signature_2
+ signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]'
+
+ # 1) P2PK scriptSig
+ txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature)
+ rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
+ assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
+
+ # make sure that the sighash decodes come out correctly for a more complex / lesser used case.
+ txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2)
+ rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
+ assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
+
+ # 2) multisig scriptSig
+ txSave.vin[0].scriptSig = hex_str_to_bytes('00' + push_signature + push_signature_2)
+ rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
+ assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
+
+ # 3) test a scriptSig that contains more than push operations.
+ # in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it.
+ txSave.vin[0].scriptSig = hex_str_to_bytes('6a143011020701010101010101020601010101010101')
+ rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
+ assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])
+
+ def run_test(self):
+ self.decodescript_script_sig()
+ self.decodescript_script_pub_key()
+ self.decoderawtransaction_asm_sighashtype()
+
+if __name__ == '__main__':
+ DecodeScriptTest().main()
diff --git a/test/functional/disablewallet.py b/test/functional/disablewallet.py
new file mode 100755
index 0000000000..2f729e19bf
--- /dev/null
+++ b/test/functional/disablewallet.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test a node with the -disablewallet option.
+
+- Test that validateaddress RPC works when running with -disablewallet
+- Test that it is not possible to mine to an invalid address.
+"""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+
+class DisableWalletTest (BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+
+ def setup_network(self, split=False):
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [['-disablewallet']])
+ self.is_network_split = False
+ self.sync_all()
+
+ def run_test (self):
+ x = self.nodes[0].validateaddress('3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
+ assert(x['isvalid'] == False)
+ x = self.nodes[0].validateaddress('mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
+ assert(x['isvalid'] == True)
+
+ # Checking mining to an address without a wallet. Generating to a valid address should succeed
+ # but generating to an invalid address will fail.
+ self.nodes[0].generatetoaddress(1, 'mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
+ assert_raises_jsonrpc(-5, "Invalid address", self.nodes[0].generatetoaddress, 1, '3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
+
+if __name__ == '__main__':
+ DisableWalletTest ().main ()
diff --git a/test/functional/forknotify.py b/test/functional/forknotify.py
new file mode 100755
index 0000000000..c2724ba5df
--- /dev/null
+++ b/test/functional/forknotify.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the -alertnotify option."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+class ForkNotifyTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 2
+ self.setup_clean_chain = False
+
+ alert_filename = None # Set by setup_network
+
+ def setup_network(self):
+ self.nodes = []
+ self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
+ with open(self.alert_filename, 'w', encoding='utf8'):
+ pass # Just open then close to create zero-length file
+ self.nodes.append(start_node(0, self.options.tmpdir,
+ ["-blockversion=2", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""]))
+ # Node1 mines block.version=211 blocks
+ self.nodes.append(start_node(1, self.options.tmpdir,
+ ["-blockversion=211"]))
+ connect_nodes(self.nodes[1], 0)
+
+ self.is_network_split = False
+ self.sync_all()
+
+ def run_test(self):
+ # Mine 51 up-version blocks
+ self.nodes[1].generate(51)
+ self.sync_all()
+ # -alertnotify should trigger on the 51'st,
+ # but mine and sync another to give
+ # -alertnotify time to write
+ self.nodes[1].generate(1)
+ self.sync_all()
+
+ with open(self.alert_filename, 'r', encoding='utf8') as f:
+ alert_text = f.read()
+
+ if len(alert_text) == 0:
+ raise AssertionError("-alertnotify did not warn of up-version blocks")
+
+ # Mine more up-version blocks, should not get more alerts:
+ self.nodes[1].generate(1)
+ self.sync_all()
+ self.nodes[1].generate(1)
+ self.sync_all()
+
+ with open(self.alert_filename, 'r', encoding='utf8') as f:
+ alert_text2 = f.read()
+
+ if alert_text != alert_text2:
+ raise AssertionError("-alertnotify excessive warning of up-version blocks")
+
+if __name__ == '__main__':
+ ForkNotifyTest().main()
diff --git a/test/functional/fundrawtransaction.py b/test/functional/fundrawtransaction.py
new file mode 100755
index 0000000000..fd330ef277
--- /dev/null
+++ b/test/functional/fundrawtransaction.py
@@ -0,0 +1,733 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the fundrawtransaction RPC."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+
+def get_unspent(listunspent, amount):
+ for utx in listunspent:
+ if utx['amount'] == amount:
+ return utx
+ raise AssertionError('Could not find unspent with amount={}'.format(amount))
+
+
+class RawTransactionsTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 4
+
+ def setup_network(self, split=False):
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
+
+ connect_nodes_bi(self.nodes,0,1)
+ connect_nodes_bi(self.nodes,1,2)
+ connect_nodes_bi(self.nodes,0,2)
+ connect_nodes_bi(self.nodes,0,3)
+
+ self.is_network_split=False
+ self.sync_all()
+
+ def run_test(self):
+ min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
+ # This test is not meant to test fee estimation and we'd like
+ # to be sure all txs are sent at a consistent desired feerate
+ for node in self.nodes:
+ node.settxfee(min_relay_tx_fee)
+
+ # if the fee's positive delta is higher than this value tests will fail,
+ # neg. delta always fail the tests.
+ # The size of the signature of every input may be at most 2 bytes larger
+ # than a minimum sized signature.
+
+ # = 2 bytes * minRelayTxFeePerByte
+ feeTolerance = 2 * min_relay_tx_fee/1000
+
+ self.nodes[2].generate(1)
+ self.sync_all()
+ self.nodes[0].generate(121)
+ self.sync_all()
+
+ watchonly_address = self.nodes[0].getnewaddress()
+ watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
+ watchonly_amount = Decimal(200)
+ self.nodes[3].importpubkey(watchonly_pubkey, "", True)
+ watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
+ self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
+
+ self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
+ self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
+ self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
+
+ self.nodes[0].generate(1)
+ self.sync_all()
+
+ ###############
+ # simple test #
+ ###############
+ inputs = [ ]
+ outputs = { self.nodes[0].getnewaddress() : 1.0 }
+ rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
+ dec_tx = self.nodes[2].decoderawtransaction(rawtx)
+ rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
+ fee = rawtxfund['fee']
+ dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
+ assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
+
+ ##############################
+ # simple test with two coins #
+ ##############################
+ inputs = [ ]
+ outputs = { self.nodes[0].getnewaddress() : 2.2 }
+ rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
+ dec_tx = self.nodes[2].decoderawtransaction(rawtx)
+
+ rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
+ fee = rawtxfund['fee']
+ dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
+ assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
+
+ ##############################
+ # simple test with two coins #
+ ##############################
+ inputs = [ ]
+ outputs = { self.nodes[0].getnewaddress() : 2.6 }
+ rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
+ dec_tx = self.nodes[2].decoderawtransaction(rawtx)
+
+ rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
+ fee = rawtxfund['fee']
+ dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
+ assert(len(dec_tx['vin']) > 0)
+ assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
+
+
+ ################################
+ # simple test with two outputs #
+ ################################
+ inputs = [ ]
+ outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
+ rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
+ dec_tx = self.nodes[2].decoderawtransaction(rawtx)
+
+ rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
+ fee = rawtxfund['fee']
+ dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
+ totalOut = 0
+ for out in dec_tx['vout']:
+ totalOut += out['value']
+
+ assert(len(dec_tx['vin']) > 0)
+ assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
+
+
+ #########################################################################
+ # test a fundrawtransaction with a VIN greater than the required amount #
+ #########################################################################
+ utx = get_unspent(self.nodes[2].listunspent(), 5)
+
+ inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
+ outputs = { self.nodes[0].getnewaddress() : 1.0 }
+ rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
+ dec_tx = self.nodes[2].decoderawtransaction(rawtx)
+ assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
+
+ rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
+ fee = rawtxfund['fee']
+ dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
+ totalOut = 0
+ for out in dec_tx['vout']:
+ totalOut += out['value']
+
+ assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
+
+
+ #####################################################################
+ # test a fundrawtransaction with which will not get a change output #
+ #####################################################################
+ utx = get_unspent(self.nodes[2].listunspent(), 5)
+
+ inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
+ outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
+ rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
+ dec_tx = self.nodes[2].decoderawtransaction(rawtx)
+ assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
+
+ rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
+ fee = rawtxfund['fee']
+ dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
+ totalOut = 0
+ for out in dec_tx['vout']:
+ totalOut += out['value']
+
+ assert_equal(rawtxfund['changepos'], -1)
+ assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
+
+
+ ####################################################
+ # test a fundrawtransaction with an invalid option #
+ ####################################################
+ utx = get_unspent(self.nodes[2].listunspent(), 5)
+
+ inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
+ outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
+ rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
+ dec_tx = self.nodes[2].decoderawtransaction(rawtx)
+ assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
+
+ assert_raises_jsonrpc(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
+
+ ############################################################
+ # test a fundrawtransaction with an invalid change address #
+ ############################################################
+ utx = get_unspent(self.nodes[2].listunspent(), 5)
+
+ inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
+ outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
+ rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
+ dec_tx = self.nodes[2].decoderawtransaction(rawtx)
+ assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
+
+ assert_raises_jsonrpc(-5, "changeAddress must be a valid bitcoin address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
+
+ ############################################################
+ # test a fundrawtransaction with a provided change address #
+ ############################################################
+ utx = get_unspent(self.nodes[2].listunspent(), 5)
+
+ inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
+ outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
+ rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
+ dec_tx = self.nodes[2].decoderawtransaction(rawtx)
+ assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
+
+ change = self.nodes[2].getnewaddress()
+ assert_raises_jsonrpc(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
+ rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
+ dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
+ out = dec_tx['vout'][0]
+ assert_equal(change, out['scriptPubKey']['addresses'][0])
+
+
+ #########################################################################
+ # test a fundrawtransaction with a VIN smaller than the required amount #
+ #########################################################################
+ utx = get_unspent(self.nodes[2].listunspent(), 1)
+
+ inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
+ outputs = { self.nodes[0].getnewaddress() : 1.0 }
+ rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
+
+ # 4-byte version + 1-byte vin count + 36-byte prevout then script_len
+ rawtx = rawtx[:82] + "0100" + rawtx[84:]
+
+ dec_tx = self.nodes[2].decoderawtransaction(rawtx)
+ assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
+ assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
+
+ rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
+ fee = rawtxfund['fee']
+ dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
+ totalOut = 0
+ matchingOuts = 0
+ for i, out in enumerate(dec_tx['vout']):
+ totalOut += out['value']
+ if out['scriptPubKey']['addresses'][0] in outputs:
+ matchingOuts+=1
+ else:
+ assert_equal(i, rawtxfund['changepos'])
+
+ assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
+ assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
+
+ assert_equal(matchingOuts, 1)
+ assert_equal(len(dec_tx['vout']), 2)
+
+
+ ###########################################
+ # test a fundrawtransaction with two VINs #
+ ###########################################
+ utx = get_unspent(self.nodes[2].listunspent(), 1)
+ utx2 = get_unspent(self.nodes[2].listunspent(), 5)
+
+ inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
+ outputs = { self.nodes[0].getnewaddress() : 6.0 }
+ rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
+ dec_tx = self.nodes[2].decoderawtransaction(rawtx)
+ assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
+
+ rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
+ fee = rawtxfund['fee']
+ dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
+ totalOut = 0
+ matchingOuts = 0
+ for out in dec_tx['vout']:
+ totalOut += out['value']
+ if out['scriptPubKey']['addresses'][0] in outputs:
+ matchingOuts+=1
+
+ assert_equal(matchingOuts, 1)
+ assert_equal(len(dec_tx['vout']), 2)
+
+ matchingIns = 0
+ for vinOut in dec_tx['vin']:
+ for vinIn in inputs:
+ if vinIn['txid'] == vinOut['txid']:
+ matchingIns+=1
+
+ assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
+
+ #########################################################
+ # test a fundrawtransaction with two VINs and two vOUTs #
+ #########################################################
+ utx = get_unspent(self.nodes[2].listunspent(), 1)
+ utx2 = get_unspent(self.nodes[2].listunspent(), 5)
+
+ inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
+ outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
+ rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
+ dec_tx = self.nodes[2].decoderawtransaction(rawtx)
+ assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
+
+ rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
+ fee = rawtxfund['fee']
+ dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
+ totalOut = 0
+ matchingOuts = 0
+ for out in dec_tx['vout']:
+ totalOut += out['value']
+ if out['scriptPubKey']['addresses'][0] in outputs:
+ matchingOuts+=1
+
+ assert_equal(matchingOuts, 2)
+ assert_equal(len(dec_tx['vout']), 3)
+
+ ##############################################
+ # test a fundrawtransaction with invalid vin #
+ ##############################################
+ listunspent = self.nodes[2].listunspent()
+ inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
+ outputs = { self.nodes[0].getnewaddress() : 1.0}
+ rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
+ dec_tx = self.nodes[2].decoderawtransaction(rawtx)
+
+ assert_raises_jsonrpc(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
+
+ ############################################################
+ #compare fee of a standard pubkeyhash transaction
+ inputs = []
+ outputs = {self.nodes[1].getnewaddress():1.1}
+ rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
+ fundedTx = self.nodes[0].fundrawtransaction(rawTx)
+
+ #create same transaction over sendtoaddress
+ txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
+ signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
+
+ #compare fee
+ feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
+ assert(feeDelta >= 0 and feeDelta <= feeTolerance)
+ ############################################################
+
+ ############################################################
+ #compare fee of a standard pubkeyhash transaction with multiple outputs
+ inputs = []
+ outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
+ rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
+ fundedTx = self.nodes[0].fundrawtransaction(rawTx)
+ #create same transaction over sendtoaddress
+ txId = self.nodes[0].sendmany("", outputs)
+ signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
+
+ #compare fee
+ feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
+ assert(feeDelta >= 0 and feeDelta <= feeTolerance)
+ ############################################################
+
+
+ ############################################################
+ #compare fee of a 2of2 multisig p2sh transaction
+
+ # create 2of2 addr
+ addr1 = self.nodes[1].getnewaddress()
+ addr2 = self.nodes[1].getnewaddress()
+
+ addr1Obj = self.nodes[1].validateaddress(addr1)
+ addr2Obj = self.nodes[1].validateaddress(addr2)
+
+ mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
+
+ inputs = []
+ outputs = {mSigObj:1.1}
+ rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
+ fundedTx = self.nodes[0].fundrawtransaction(rawTx)
+
+ #create same transaction over sendtoaddress
+ txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
+ signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
+
+ #compare fee
+ feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
+ assert(feeDelta >= 0 and feeDelta <= feeTolerance)
+ ############################################################
+
+
+ ############################################################
+ #compare fee of a standard pubkeyhash transaction
+
+ # create 4of5 addr
+ addr1 = self.nodes[1].getnewaddress()
+ addr2 = self.nodes[1].getnewaddress()
+ addr3 = self.nodes[1].getnewaddress()
+ addr4 = self.nodes[1].getnewaddress()
+ addr5 = self.nodes[1].getnewaddress()
+
+ addr1Obj = self.nodes[1].validateaddress(addr1)
+ addr2Obj = self.nodes[1].validateaddress(addr2)
+ addr3Obj = self.nodes[1].validateaddress(addr3)
+ addr4Obj = self.nodes[1].validateaddress(addr4)
+ addr5Obj = self.nodes[1].validateaddress(addr5)
+
+ mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
+
+ inputs = []
+ outputs = {mSigObj:1.1}
+ rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
+ fundedTx = self.nodes[0].fundrawtransaction(rawTx)
+
+ #create same transaction over sendtoaddress
+ txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
+ signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
+
+ #compare fee
+ feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
+ assert(feeDelta >= 0 and feeDelta <= feeTolerance)
+ ############################################################
+
+
+ ############################################################
+ # spend a 2of2 multisig transaction over fundraw
+
+ # create 2of2 addr
+ addr1 = self.nodes[2].getnewaddress()
+ addr2 = self.nodes[2].getnewaddress()
+
+ addr1Obj = self.nodes[2].validateaddress(addr1)
+ addr2Obj = self.nodes[2].validateaddress(addr2)
+
+ mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
+
+
+ # send 1.2 BTC to msig addr
+ txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
+ self.sync_all()
+ self.nodes[1].generate(1)
+ self.sync_all()
+
+ oldBalance = self.nodes[1].getbalance()
+ inputs = []
+ outputs = {self.nodes[1].getnewaddress():1.1}
+ rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
+ fundedTx = self.nodes[2].fundrawtransaction(rawTx)
+
+ signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
+ txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
+ self.sync_all()
+ self.nodes[1].generate(1)
+ self.sync_all()
+
+ # make sure funds are received at node1
+ assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
+
+ ############################################################
+ # locked wallet test
+ self.nodes[1].encryptwallet("test")
+ self.nodes.pop(1)
+ stop_node(self.nodes[0], 0)
+ stop_node(self.nodes[1], 2)
+ stop_node(self.nodes[2], 3)
+
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
+ # This test is not meant to test fee estimation and we'd like
+ # to be sure all txs are sent at a consistent desired feerate
+ for node in self.nodes:
+ node.settxfee(min_relay_tx_fee)
+
+ connect_nodes_bi(self.nodes,0,1)
+ connect_nodes_bi(self.nodes,1,2)
+ connect_nodes_bi(self.nodes,0,2)
+ connect_nodes_bi(self.nodes,0,3)
+ self.is_network_split=False
+ self.sync_all()
+
+ # drain the keypool
+ self.nodes[1].getnewaddress()
+ inputs = []
+ outputs = {self.nodes[0].getnewaddress():1.1}
+ rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
+ # fund a transaction that requires a new key for the change output
+ # creating the key must be impossible because the wallet is locked
+ assert_raises_jsonrpc(-4, "Insufficient funds", self.nodes[1].fundrawtransaction, rawtx)
+
+ #refill the keypool
+ self.nodes[1].walletpassphrase("test", 100)
+ self.nodes[1].walletlock()
+
+ assert_raises_jsonrpc(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
+
+ oldBalance = self.nodes[0].getbalance()
+
+ inputs = []
+ outputs = {self.nodes[0].getnewaddress():1.1}
+ rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
+ fundedTx = self.nodes[1].fundrawtransaction(rawTx)
+
+ #now we need to unlock
+ self.nodes[1].walletpassphrase("test", 600)
+ signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
+ txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
+ self.nodes[1].generate(1)
+ self.sync_all()
+
+ # make sure funds are received at node1
+ assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
+
+
+ ###############################################
+ # multiple (~19) inputs tx test | Compare fee #
+ ###############################################
+
+ #empty node1, send some small coins from node0 to node1
+ self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
+ self.sync_all()
+ self.nodes[0].generate(1)
+ self.sync_all()
+
+ for i in range(0,20):
+ self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
+ self.nodes[0].generate(1)
+ self.sync_all()
+
+ #fund a tx with ~20 small inputs
+ inputs = []
+ outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
+ rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
+ fundedTx = self.nodes[1].fundrawtransaction(rawTx)
+
+ #create same transaction over sendtoaddress
+ txId = self.nodes[1].sendmany("", outputs)
+ signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
+
+ #compare fee
+ feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
+ assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
+
+
+ #############################################
+ # multiple (~19) inputs tx test | sign/send #
+ #############################################
+
+ #again, empty node1, send some small coins from node0 to node1
+ self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
+ self.sync_all()
+ self.nodes[0].generate(1)
+ self.sync_all()
+
+ for i in range(0,20):
+ self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
+ self.nodes[0].generate(1)
+ self.sync_all()
+
+ #fund a tx with ~20 small inputs
+ oldBalance = self.nodes[0].getbalance()
+
+ inputs = []
+ outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
+ rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
+ fundedTx = self.nodes[1].fundrawtransaction(rawTx)
+ fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
+ txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
+ self.sync_all()
+ self.nodes[0].generate(1)
+ self.sync_all()
+ assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
+
+ #####################################################
+ # test fundrawtransaction with OP_RETURN and no vin #
+ #####################################################
+
+ rawtx = "0100000000010000000000000000066a047465737400000000"
+ dec_tx = self.nodes[2].decoderawtransaction(rawtx)
+
+ assert_equal(len(dec_tx['vin']), 0)
+ assert_equal(len(dec_tx['vout']), 1)
+
+ rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
+ dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
+
+ assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
+ assert_equal(len(dec_tx['vout']), 2) # one change output added
+
+
+ ##################################################
+ # test a fundrawtransaction using only watchonly #
+ ##################################################
+
+ inputs = []
+ outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
+ rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
+
+ result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
+ res_dec = self.nodes[0].decoderawtransaction(result["hex"])
+ assert_equal(len(res_dec["vin"]), 1)
+ assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
+
+ assert("fee" in result.keys())
+ assert_greater_than(result["changepos"], -1)
+
+ ###############################################################
+ # test fundrawtransaction using the entirety of watched funds #
+ ###############################################################
+
+ inputs = []
+ outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
+ rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
+
+ # Backward compatibility test (2nd param is includeWatching)
+ result = self.nodes[3].fundrawtransaction(rawtx, True)
+ res_dec = self.nodes[0].decoderawtransaction(result["hex"])
+ assert_equal(len(res_dec["vin"]), 2)
+ assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
+
+ assert_greater_than(result["fee"], 0)
+ assert_greater_than(result["changepos"], -1)
+ assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
+
+ signedtx = self.nodes[3].signrawtransaction(result["hex"])
+ assert(not signedtx["complete"])
+ signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
+ assert(signedtx["complete"])
+ self.nodes[0].sendrawtransaction(signedtx["hex"])
+ self.nodes[0].generate(1)
+ self.sync_all()
+
+ #######################
+ # Test feeRate option #
+ #######################
+
+ # Make sure there is exactly one input so coin selection can't skew the result
+ assert_equal(len(self.nodes[3].listunspent(1)), 1)
+
+ inputs = []
+ outputs = {self.nodes[3].getnewaddress() : 1}
+ rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
+ result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee)
+ result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
+ result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
+ result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
+ assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
+ assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
+
+ #############################
+ # Test address reuse option #
+ #############################
+
+ result3 = self.nodes[3].fundrawtransaction(rawtx, {"reserveChangeKey": False})
+ res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
+ changeaddress = ""
+ for out in res_dec['vout']:
+ if out['value'] > 1.0:
+ changeaddress += out['scriptPubKey']['addresses'][0]
+ assert(changeaddress != "")
+ nextaddr = self.nodes[3].getnewaddress()
+ # frt should not have removed the key from the keypool
+ assert(changeaddress == nextaddr)
+
+ result3 = self.nodes[3].fundrawtransaction(rawtx)
+ res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
+ changeaddress = ""
+ for out in res_dec['vout']:
+ if out['value'] > 1.0:
+ changeaddress += out['scriptPubKey']['addresses'][0]
+ assert(changeaddress != "")
+ nextaddr = self.nodes[3].getnewaddress()
+ # Now the change address key should be removed from the keypool
+ assert(changeaddress != nextaddr)
+
+ ######################################
+ # Test subtractFeeFromOutputs option #
+ ######################################
+
+ # Make sure there is exactly one input so coin selection can't skew the result
+ assert_equal(len(self.nodes[3].listunspent(1)), 1)
+
+ inputs = []
+ outputs = {self.nodes[2].getnewaddress(): 1}
+ rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
+
+ result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
+ self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
+ self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
+ self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
+ self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
+
+ dec_tx = [self.nodes[3].decoderawtransaction(tx['hex']) for tx in result]
+ output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
+ change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
+
+ assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
+ assert_equal(result[3]['fee'], result[4]['fee'])
+ assert_equal(change[0], change[1])
+ assert_equal(output[0], output[1])
+ assert_equal(output[0], output[2] + result[2]['fee'])
+ assert_equal(change[0] + result[0]['fee'], change[2])
+ assert_equal(output[3], output[4] + result[4]['fee'])
+ assert_equal(change[3] + result[3]['fee'], change[4])
+
+ inputs = []
+ outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
+ keys = list(outputs.keys())
+ rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
+
+ result = [self.nodes[3].fundrawtransaction(rawtx),
+ # split the fee between outputs 0, 2, and 3, but not output 1
+ self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
+
+ dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
+ self.nodes[3].decoderawtransaction(result[1]['hex'])]
+
+ # Nested list of non-change output amounts for each transaction
+ output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
+ for d, r in zip(dec_tx, result)]
+
+ # List of differences in output amounts between normal and subtractFee transactions
+ share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
+
+ # output 1 is the same in both transactions
+ assert_equal(share[1], 0)
+
+ # the other 3 outputs are smaller as a result of subtractFeeFromOutputs
+ assert_greater_than(share[0], 0)
+ assert_greater_than(share[2], 0)
+ assert_greater_than(share[3], 0)
+
+ # outputs 2 and 3 take the same share of the fee
+ assert_equal(share[2], share[3])
+
+ # output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
+ assert_greater_than_or_equal(share[0], share[2])
+ assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
+
+ # the fee is the same in both transactions
+ assert_equal(result[0]['fee'], result[1]['fee'])
+
+ # the total subtracted from the outputs is equal to the fee
+ assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
+
+if __name__ == '__main__':
+ RawTransactionsTest().main()
diff --git a/test/functional/getblocktemplate_longpoll.py b/test/functional/getblocktemplate_longpoll.py
new file mode 100755
index 0000000000..bbe1dda5f7
--- /dev/null
+++ b/test/functional/getblocktemplate_longpoll.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test longpolling with getblocktemplate."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+import threading
+
+class LongpollThread(threading.Thread):
+ def __init__(self, node):
+ threading.Thread.__init__(self)
+ # query current longpollid
+ templat = node.getblocktemplate()
+ self.longpollid = templat['longpollid']
+ # create a new connection to the node, we can't use the same
+ # connection from two threads
+ self.node = get_rpc_proxy(node.url, 1, timeout=600)
+
+ def run(self):
+ self.node.getblocktemplate({'longpollid':self.longpollid})
+
+class GetBlockTemplateLPTest(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 4
+ self.setup_clean_chain = False
+
+ def run_test(self):
+ self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.")
+ self.nodes[0].generate(10)
+ templat = self.nodes[0].getblocktemplate()
+ longpollid = templat['longpollid']
+ # longpollid should not change between successive invocations if nothing else happens
+ templat2 = self.nodes[0].getblocktemplate()
+ assert(templat2['longpollid'] == longpollid)
+
+ # Test 1: test that the longpolling wait if we do nothing
+ thr = LongpollThread(self.nodes[0])
+ thr.start()
+ # check that thread still lives
+ thr.join(5) # wait 5 seconds or until thread exits
+ assert(thr.is_alive())
+
+ # Test 2: test that longpoll will terminate if another node generates a block
+ self.nodes[1].generate(1) # generate a block on another node
+ # check that thread will exit now that new transaction entered mempool
+ thr.join(5) # wait 5 seconds or until thread exits
+ assert(not thr.is_alive())
+
+ # Test 3: test that longpoll will terminate if we generate a block ourselves
+ thr = LongpollThread(self.nodes[0])
+ thr.start()
+ self.nodes[0].generate(1) # generate a block on another node
+ thr.join(5) # wait 5 seconds or until thread exits
+ assert(not thr.is_alive())
+
+ # Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
+ thr = LongpollThread(self.nodes[0])
+ thr.start()
+ # generate a random transaction and submit it
+ min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
+ # min_relay_fee is fee per 1000 bytes, which should be more than enough.
+ (txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20)
+ # after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
+ thr.join(60 + 20)
+ assert(not thr.is_alive())
+
+if __name__ == '__main__':
+ GetBlockTemplateLPTest().main()
+
diff --git a/test/functional/getblocktemplate_proposals.py b/test/functional/getblocktemplate_proposals.py
new file mode 100755
index 0000000000..67745f77d1
--- /dev/null
+++ b/test/functional/getblocktemplate_proposals.py
@@ -0,0 +1,161 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test block proposals with getblocktemplate."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+from binascii import a2b_hex, b2a_hex
+from hashlib import sha256
+from struct import pack
+
+def b2x(b):
+ return b2a_hex(b).decode('ascii')
+
+# NOTE: This does not work for signed numbers (set the high bit) or zero (use b'\0')
+def encodeUNum(n):
+ s = bytearray(b'\1')
+ while n > 127:
+ s[0] += 1
+ s.append(n % 256)
+ n //= 256
+ s.append(n)
+ return bytes(s)
+
+def varlenEncode(n):
+ if n < 0xfd:
+ return pack('<B', n)
+ if n <= 0xffff:
+ return b'\xfd' + pack('<H', n)
+ if n <= 0xffffffff:
+ return b'\xfe' + pack('<L', n)
+ return b'\xff' + pack('<Q', n)
+
+def dblsha(b):
+ return sha256(sha256(b).digest()).digest()
+
+def genmrklroot(leaflist):
+ cur = leaflist
+ while len(cur) > 1:
+ n = []
+ if len(cur) & 1:
+ cur.append(cur[-1])
+ for i in range(0, len(cur), 2):
+ n.append(dblsha(cur[i] + cur[i+1]))
+ cur = n
+ return cur[0]
+
+def template_to_bytearray(tmpl, txlist):
+ blkver = pack('<L', tmpl['version'])
+ mrklroot = genmrklroot(list(dblsha(a) for a in txlist))
+ timestamp = pack('<L', tmpl['curtime'])
+ nonce = b'\0\0\0\0'
+ blk = blkver + a2b_hex(tmpl['previousblockhash'])[::-1] + mrklroot + timestamp + a2b_hex(tmpl['bits'])[::-1] + nonce
+ blk += varlenEncode(len(txlist))
+ for tx in txlist:
+ blk += tx
+ return bytearray(blk)
+
+def template_to_hex(tmpl, txlist):
+ return b2x(template_to_bytearray(tmpl, txlist))
+
+def assert_template(node, tmpl, txlist, expect):
+ rsp = node.getblocktemplate({'data':template_to_hex(tmpl, txlist),'mode':'proposal'})
+ if rsp != expect:
+ raise AssertionError('unexpected: %s' % (rsp,))
+
+class GetBlockTemplateProposalTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 2
+ self.setup_clean_chain = False
+
+ def setup_network(self):
+ self.nodes = self.setup_nodes()
+ connect_nodes_bi(self.nodes, 0, 1)
+
+ def run_test(self):
+ node = self.nodes[0]
+ node.generate(1) # Mine a block to leave initial block download
+ tmpl = node.getblocktemplate()
+ if 'coinbasetxn' not in tmpl:
+ rawcoinbase = encodeUNum(tmpl['height'])
+ rawcoinbase += b'\x01-'
+ hexcoinbase = b2x(rawcoinbase)
+ hexoutval = b2x(pack('<Q', tmpl['coinbasevalue']))
+ tmpl['coinbasetxn'] = {'data': '01000000' + '01' + '0000000000000000000000000000000000000000000000000000000000000000ffffffff' + ('%02x' % (len(rawcoinbase),)) + hexcoinbase + 'fffffffe' + '01' + hexoutval + '00' + '00000000'}
+ txlist = list(bytearray(a2b_hex(a['data'])) for a in (tmpl['coinbasetxn'],) + tuple(tmpl['transactions']))
+
+ # Test 0: Capability advertised
+ assert('proposal' in tmpl['capabilities'])
+
+ # NOTE: This test currently FAILS (regtest mode doesn't enforce block height in coinbase)
+ ## Test 1: Bad height in coinbase
+ #txlist[0][4+1+36+1+1] += 1
+ #assert_template(node, tmpl, txlist, 'FIXME')
+ #txlist[0][4+1+36+1+1] -= 1
+
+ # Test 2: Bad input hash for gen tx
+ txlist[0][4+1] += 1
+ assert_template(node, tmpl, txlist, 'bad-cb-missing')
+ txlist[0][4+1] -= 1
+
+ # Test 3: Truncated final tx
+ lastbyte = txlist[-1].pop()
+ assert_raises_jsonrpc(-22, "Block decode failed", assert_template, node, tmpl, txlist, 'n/a')
+ txlist[-1].append(lastbyte)
+
+ # Test 4: Add an invalid tx to the end (duplicate of gen tx)
+ txlist.append(txlist[0])
+ assert_template(node, tmpl, txlist, 'bad-txns-duplicate')
+ txlist.pop()
+
+ # Test 5: Add an invalid tx to the end (non-duplicate)
+ txlist.append(bytearray(txlist[0]))
+ txlist[-1][4+1] = 0xff
+ assert_template(node, tmpl, txlist, 'bad-txns-inputs-missingorspent')
+ txlist.pop()
+
+ # Test 6: Future tx lock time
+ txlist[0][-4:] = b'\xff\xff\xff\xff'
+ assert_template(node, tmpl, txlist, 'bad-txns-nonfinal')
+ txlist[0][-4:] = b'\0\0\0\0'
+
+ # Test 7: Bad tx count
+ txlist.append(b'')
+ assert_raises_jsonrpc(-22, 'Block decode failed', assert_template, node, tmpl, txlist, 'n/a')
+ txlist.pop()
+
+ # Test 8: Bad bits
+ realbits = tmpl['bits']
+ tmpl['bits'] = '1c0000ff' # impossible in the real world
+ assert_template(node, tmpl, txlist, 'bad-diffbits')
+ tmpl['bits'] = realbits
+
+ # Test 9: Bad merkle root
+ rawtmpl = template_to_bytearray(tmpl, txlist)
+ rawtmpl[4+32] = (rawtmpl[4+32] + 1) % 0x100
+ rsp = node.getblocktemplate({'data':b2x(rawtmpl),'mode':'proposal'})
+ if rsp != 'bad-txnmrklroot':
+ raise AssertionError('unexpected: %s' % (rsp,))
+
+ # Test 10: Bad timestamps
+ realtime = tmpl['curtime']
+ tmpl['curtime'] = 0x7fffffff
+ assert_template(node, tmpl, txlist, 'time-too-new')
+ tmpl['curtime'] = 0
+ assert_template(node, tmpl, txlist, 'time-too-old')
+ tmpl['curtime'] = realtime
+
+ # Test 11: Valid block
+ assert_template(node, tmpl, txlist, None)
+
+ # Test 12: Orphan block
+ tmpl['previousblockhash'] = 'ff00' * 16
+ assert_template(node, tmpl, txlist, 'inconclusive-not-best-prevblk')
+
+if __name__ == '__main__':
+ GetBlockTemplateProposalTest().main()
diff --git a/test/functional/getchaintips.py b/test/functional/getchaintips.py
new file mode 100755
index 0000000000..14222334a6
--- /dev/null
+++ b/test/functional/getchaintips.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the getchaintips RPC.
+
+- introduce a network split
+- work on chains of different lengths
+- join the network together again
+- verify that getchaintips now returns two chain tips.
+"""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import assert_equal
+
+class GetChainTipsTest (BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 4
+ self.setup_clean_chain = False
+
+ def run_test (self):
+
+ tips = self.nodes[0].getchaintips ()
+ assert_equal (len (tips), 1)
+ assert_equal (tips[0]['branchlen'], 0)
+ assert_equal (tips[0]['height'], 200)
+ assert_equal (tips[0]['status'], 'active')
+
+ # Split the network and build two chains of different lengths.
+ self.split_network ()
+ self.nodes[0].generate(10)
+ self.nodes[2].generate(20)
+ self.sync_all ()
+
+ tips = self.nodes[1].getchaintips ()
+ assert_equal (len (tips), 1)
+ shortTip = tips[0]
+ assert_equal (shortTip['branchlen'], 0)
+ assert_equal (shortTip['height'], 210)
+ assert_equal (tips[0]['status'], 'active')
+
+ tips = self.nodes[3].getchaintips ()
+ assert_equal (len (tips), 1)
+ longTip = tips[0]
+ assert_equal (longTip['branchlen'], 0)
+ assert_equal (longTip['height'], 220)
+ assert_equal (tips[0]['status'], 'active')
+
+ # Join the network halves and check that we now have two tips
+ # (at least at the nodes that previously had the short chain).
+ self.join_network ()
+
+ tips = self.nodes[0].getchaintips ()
+ assert_equal (len (tips), 2)
+ assert_equal (tips[0], longTip)
+
+ assert_equal (tips[1]['branchlen'], 10)
+ assert_equal (tips[1]['status'], 'valid-fork')
+ tips[1]['branchlen'] = 0
+ tips[1]['status'] = 'active'
+ assert_equal (tips[1], shortTip)
+
+if __name__ == '__main__':
+ GetChainTipsTest ().main ()
diff --git a/test/functional/httpbasics.py b/test/functional/httpbasics.py
new file mode 100755
index 0000000000..8f35f0ab87
--- /dev/null
+++ b/test/functional/httpbasics.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the RPC HTTP basics."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+import http.client
+import urllib.parse
+
+class HTTPBasicsTest (BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 3
+ self.setup_clean_chain = False
+
+ def setup_network(self):
+ self.nodes = self.setup_nodes()
+
+ def run_test(self):
+
+ #################################################
+ # lowlevel check for http persistent connection #
+ #################################################
+ url = urllib.parse.urlparse(self.nodes[0].url)
+ authpair = url.username + ':' + url.password
+ headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
+
+ conn = http.client.HTTPConnection(url.hostname, url.port)
+ conn.connect()
+ conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
+ out1 = conn.getresponse().read()
+ assert(b'"error":null' in out1)
+ assert(conn.sock!=None) #according to http/1.1 connection must still be open!
+
+ #send 2nd request without closing connection
+ conn.request('POST', '/', '{"method": "getchaintips"}', headers)
+ out1 = conn.getresponse().read()
+ assert(b'"error":null' in out1) #must also response with a correct json-rpc message
+ assert(conn.sock!=None) #according to http/1.1 connection must still be open!
+ conn.close()
+
+ #same should be if we add keep-alive because this should be the std. behaviour
+ headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"}
+
+ conn = http.client.HTTPConnection(url.hostname, url.port)
+ conn.connect()
+ conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
+ out1 = conn.getresponse().read()
+ assert(b'"error":null' in out1)
+ assert(conn.sock!=None) #according to http/1.1 connection must still be open!
+
+ #send 2nd request without closing connection
+ conn.request('POST', '/', '{"method": "getchaintips"}', headers)
+ out1 = conn.getresponse().read()
+ assert(b'"error":null' in out1) #must also response with a correct json-rpc message
+ assert(conn.sock!=None) #according to http/1.1 connection must still be open!
+ conn.close()
+
+ #now do the same with "Connection: close"
+ headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"}
+
+ conn = http.client.HTTPConnection(url.hostname, url.port)
+ conn.connect()
+ conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
+ out1 = conn.getresponse().read()
+ assert(b'"error":null' in out1)
+ assert(conn.sock==None) #now the connection must be closed after the response
+
+ #node1 (2nd node) is running with disabled keep-alive option
+ urlNode1 = urllib.parse.urlparse(self.nodes[1].url)
+ authpair = urlNode1.username + ':' + urlNode1.password
+ headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
+
+ conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port)
+ conn.connect()
+ conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
+ out1 = conn.getresponse().read()
+ assert(b'"error":null' in out1)
+
+ #node2 (third node) is running with standard keep-alive parameters which means keep-alive is on
+ urlNode2 = urllib.parse.urlparse(self.nodes[2].url)
+ authpair = urlNode2.username + ':' + urlNode2.password
+ headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
+
+ conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
+ conn.connect()
+ conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
+ out1 = conn.getresponse().read()
+ assert(b'"error":null' in out1)
+ assert(conn.sock!=None) #connection must be closed because bitcoind should use keep-alive by default
+
+ # Check excessive request size
+ conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
+ conn.connect()
+ conn.request('GET', '/' + ('x'*1000), '', headers)
+ out1 = conn.getresponse()
+ assert_equal(out1.status, http.client.NOT_FOUND)
+
+ conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
+ conn.connect()
+ conn.request('GET', '/' + ('x'*10000), '', headers)
+ out1 = conn.getresponse()
+ assert_equal(out1.status, http.client.BAD_REQUEST)
+
+
+if __name__ == '__main__':
+ HTTPBasicsTest ().main ()
diff --git a/test/functional/import-rescan.py b/test/functional/import-rescan.py
new file mode 100755
index 0000000000..0218a46168
--- /dev/null
+++ b/test/functional/import-rescan.py
@@ -0,0 +1,193 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test wallet import RPCs.
+
+Test rescan behavior of importaddress, importpubkey, importprivkey, and
+importmulti RPCs with different types of keys and rescan options.
+
+In the first part of the test, node 0 creates an address for each type of
+import RPC call and sends BTC to it. Then other nodes import the addresses,
+and the test makes listtransactions and getbalance calls to confirm that the
+importing node either did or did not execute rescans picking up the send
+transactions.
+
+In the second part of the test, node 0 sends more BTC to each address, and the
+test makes more listtransactions and getbalance calls to confirm that the
+importing nodes pick up the new transactions regardless of whether rescans
+happened previously.
+"""
+
+from test_framework.authproxy import JSONRPCException
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (start_nodes, connect_nodes, sync_blocks, assert_equal, set_node_times)
+from decimal import Decimal
+
+import collections
+import enum
+import itertools
+
+Call = enum.Enum("Call", "single multi")
+Data = enum.Enum("Data", "address pub priv")
+Rescan = enum.Enum("Rescan", "no yes late_timestamp")
+
+
+class Variant(collections.namedtuple("Variant", "call data rescan prune")):
+ """Helper for importing one key and verifying scanned transactions."""
+
+ def do_import(self, timestamp):
+ """Call one key import RPC."""
+
+ if self.call == Call.single:
+ if self.data == Data.address:
+ response, error = try_rpc(self.node.importaddress, self.address["address"], self.label,
+ self.rescan == Rescan.yes)
+ elif self.data == Data.pub:
+ response, error = try_rpc(self.node.importpubkey, self.address["pubkey"], self.label,
+ self.rescan == Rescan.yes)
+ elif self.data == Data.priv:
+ response, error = try_rpc(self.node.importprivkey, self.key, self.label, self.rescan == Rescan.yes)
+ assert_equal(response, None)
+ assert_equal(error, {'message': 'Rescan is disabled in pruned mode',
+ 'code': -4} if self.expect_disabled else None)
+ elif self.call == Call.multi:
+ response = self.node.importmulti([{
+ "scriptPubKey": {
+ "address": self.address["address"]
+ },
+ "timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0),
+ "pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [],
+ "keys": [self.key] if self.data == Data.priv else [],
+ "label": self.label,
+ "watchonly": self.data != Data.priv
+ }], {"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)})
+ assert_equal(response, [{"success": True}])
+
+ def check(self, txid=None, amount=None, confirmations=None):
+ """Verify that getbalance/listtransactions return expected values."""
+
+ balance = self.node.getbalance(self.label, 0, True)
+ assert_equal(balance, self.expected_balance)
+
+ txs = self.node.listtransactions(self.label, 10000, 0, True)
+ assert_equal(len(txs), self.expected_txs)
+
+ if txid is not None:
+ tx, = [tx for tx in txs if tx["txid"] == txid]
+ assert_equal(tx["account"], self.label)
+ assert_equal(tx["address"], self.address["address"])
+ assert_equal(tx["amount"], amount)
+ assert_equal(tx["category"], "receive")
+ assert_equal(tx["label"], self.label)
+ assert_equal(tx["txid"], txid)
+ assert_equal(tx["confirmations"], confirmations)
+ assert_equal("trusted" not in tx, True)
+ # Verify the transaction is correctly marked watchonly depending on
+ # whether the transaction pays to an imported public key or
+ # imported private key. The test setup ensures that transaction
+ # inputs will not be from watchonly keys (important because
+ # involvesWatchonly will be true if either the transaction output
+ # or inputs are watchonly).
+ if self.data != Data.priv:
+ assert_equal(tx["involvesWatchonly"], True)
+ else:
+ assert_equal("involvesWatchonly" not in tx, True)
+
+
+# List of Variants for each way a key or address could be imported.
+IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, Rescan, (False, True))]
+
+# List of nodes to import keys to. Half the nodes will have pruning disabled,
+# half will have it enabled. Different nodes will be used for imports that are
+# expected to cause rescans, and imports that are not expected to cause
+# rescans, in order to prevent rescans during later imports picking up
+# transactions associated with earlier imports. This makes it easier to keep
+# track of expected balances and transactions.
+ImportNode = collections.namedtuple("ImportNode", "prune rescan")
+IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)]
+
+# Rescans start at the earliest block up to 2 hours before the key timestamp.
+TIMESTAMP_WINDOW = 2 * 60 * 60
+
+
+class ImportRescanTest(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 2 + len(IMPORT_NODES)
+
+ def setup_network(self):
+ extra_args = [[] for _ in range(self.num_nodes)]
+ for i, import_node in enumerate(IMPORT_NODES, 2):
+ if import_node.prune:
+ extra_args[i] += ["-prune=1"]
+
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
+ for i in range(1, self.num_nodes):
+ connect_nodes(self.nodes[i], 0)
+
+ def run_test(self):
+ # Create one transaction on node 0 with a unique amount and label for
+ # each possible type of wallet import RPC.
+ for i, variant in enumerate(IMPORT_VARIANTS):
+ variant.label = "label {} {}".format(i, variant)
+ variant.address = self.nodes[1].validateaddress(self.nodes[1].getnewaddress(variant.label))
+ variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
+ variant.initial_amount = 10 - (i + 1) / 4.0
+ variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)
+
+ # Generate a block containing the initial transactions, then another
+ # block further in the future (past the rescan window).
+ self.nodes[0].generate(1)
+ assert_equal(self.nodes[0].getrawmempool(), [])
+ timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
+ set_node_times(self.nodes, timestamp + TIMESTAMP_WINDOW + 1)
+ self.nodes[0].generate(1)
+ sync_blocks(self.nodes)
+
+ # For each variation of wallet key import, invoke the import RPC and
+ # check the results from getbalance and listtransactions.
+ for variant in IMPORT_VARIANTS:
+ variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single
+ expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled
+ variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
+ variant.do_import(timestamp)
+ if expect_rescan:
+ variant.expected_balance = variant.initial_amount
+ variant.expected_txs = 1
+ variant.check(variant.initial_txid, variant.initial_amount, 2)
+ else:
+ variant.expected_balance = 0
+ variant.expected_txs = 0
+ variant.check()
+
+ # Create new transactions sending to each address.
+ fee = self.nodes[0].getnetworkinfo()["relayfee"]
+ for i, variant in enumerate(IMPORT_VARIANTS):
+ variant.sent_amount = 10 - (2 * i + 1) / 8.0
+ variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount)
+
+ # Generate a block containing the new transactions.
+ self.nodes[0].generate(1)
+ assert_equal(self.nodes[0].getrawmempool(), [])
+ sync_blocks(self.nodes)
+
+ # Check the latest results from getbalance and listtransactions.
+ for variant in IMPORT_VARIANTS:
+ if not variant.expect_disabled:
+ variant.expected_balance += variant.sent_amount
+ variant.expected_txs += 1
+ variant.check(variant.sent_txid, variant.sent_amount, 1)
+ else:
+ variant.check()
+
+
+def try_rpc(func, *args, **kwargs):
+ try:
+ return func(*args, **kwargs), None
+ except JSONRPCException as e:
+ return None, e.error
+
+
+if __name__ == "__main__":
+ ImportRescanTest().main()
diff --git a/test/functional/importmulti.py b/test/functional/importmulti.py
new file mode 100755
index 0000000000..aa03c6780a
--- /dev/null
+++ b/test/functional/importmulti.py
@@ -0,0 +1,453 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the importmulti RPC."""
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+class ImportMultiTest (BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 2
+ self.setup_clean_chain = True
+
+ def setup_network(self, split=False):
+ self.nodes = start_nodes(2, self.options.tmpdir)
+ self.is_network_split=False
+
+ def run_test (self):
+ self.log.info("Mining blocks...")
+ self.nodes[0].generate(1)
+ self.nodes[1].generate(1)
+ timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
+
+ # keyword definition
+ PRIV_KEY = 'privkey'
+ PUB_KEY = 'pubkey'
+ ADDRESS_KEY = 'address'
+ SCRIPT_KEY = 'script'
+
+
+ node0_address1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ node0_address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ node0_address3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+
+ #Check only one address
+ assert_equal(node0_address1['ismine'], True)
+
+ #Node 1 sync test
+ assert_equal(self.nodes[1].getblockcount(),1)
+
+ #Address Test - before import
+ address_info = self.nodes[1].validateaddress(node0_address1['address'])
+ assert_equal(address_info['iswatchonly'], False)
+ assert_equal(address_info['ismine'], False)
+
+
+ # RPC importmulti -----------------------------------------------
+
+ # Bitcoin Address
+ self.log.info("Should import an address")
+ address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ result = self.nodes[1].importmulti([{
+ "scriptPubKey": {
+ "address": address['address']
+ },
+ "timestamp": "now",
+ }])
+ assert_equal(result[0]['success'], True)
+ address_assert = self.nodes[1].validateaddress(address['address'])
+ assert_equal(address_assert['iswatchonly'], True)
+ assert_equal(address_assert['ismine'], False)
+ assert_equal(address_assert['timestamp'], timestamp)
+ watchonly_address = address['address']
+ watchonly_timestamp = timestamp
+
+ self.log.info("Should not import an invalid address")
+ result = self.nodes[1].importmulti([{
+ "scriptPubKey": {
+ "address": "not valid address",
+ },
+ "timestamp": "now",
+ }])
+ assert_equal(result[0]['success'], False)
+ assert_equal(result[0]['error']['code'], -5)
+ assert_equal(result[0]['error']['message'], 'Invalid address')
+
+ # ScriptPubKey + internal
+ self.log.info("Should import a scriptPubKey with internal flag")
+ address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ result = self.nodes[1].importmulti([{
+ "scriptPubKey": address['scriptPubKey'],
+ "timestamp": "now",
+ "internal": True
+ }])
+ assert_equal(result[0]['success'], True)
+ address_assert = self.nodes[1].validateaddress(address['address'])
+ assert_equal(address_assert['iswatchonly'], True)
+ assert_equal(address_assert['ismine'], False)
+ assert_equal(address_assert['timestamp'], timestamp)
+
+ # ScriptPubKey + !internal
+ self.log.info("Should not import a scriptPubKey without internal flag")
+ address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ result = self.nodes[1].importmulti([{
+ "scriptPubKey": address['scriptPubKey'],
+ "timestamp": "now",
+ }])
+ assert_equal(result[0]['success'], False)
+ assert_equal(result[0]['error']['code'], -8)
+ assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
+ address_assert = self.nodes[1].validateaddress(address['address'])
+ assert_equal(address_assert['iswatchonly'], False)
+ assert_equal(address_assert['ismine'], False)
+ assert_equal('timestamp' in address_assert, False)
+
+
+ # Address + Public key + !Internal
+ self.log.info("Should import an address with public key")
+ address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ result = self.nodes[1].importmulti([{
+ "scriptPubKey": {
+ "address": address['address']
+ },
+ "timestamp": "now",
+ "pubkeys": [ address['pubkey'] ]
+ }])
+ assert_equal(result[0]['success'], True)
+ address_assert = self.nodes[1].validateaddress(address['address'])
+ assert_equal(address_assert['iswatchonly'], True)
+ assert_equal(address_assert['ismine'], False)
+ assert_equal(address_assert['timestamp'], timestamp)
+
+
+ # ScriptPubKey + Public key + internal
+ self.log.info("Should import a scriptPubKey with internal and with public key")
+ address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ request = [{
+ "scriptPubKey": address['scriptPubKey'],
+ "timestamp": "now",
+ "pubkeys": [ address['pubkey'] ],
+ "internal": True
+ }]
+ result = self.nodes[1].importmulti(request)
+ assert_equal(result[0]['success'], True)
+ address_assert = self.nodes[1].validateaddress(address['address'])
+ assert_equal(address_assert['iswatchonly'], True)
+ assert_equal(address_assert['ismine'], False)
+ assert_equal(address_assert['timestamp'], timestamp)
+
+ # ScriptPubKey + Public key + !internal
+ self.log.info("Should not import a scriptPubKey without internal and with public key")
+ address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ request = [{
+ "scriptPubKey": address['scriptPubKey'],
+ "timestamp": "now",
+ "pubkeys": [ address['pubkey'] ]
+ }]
+ result = self.nodes[1].importmulti(request)
+ assert_equal(result[0]['success'], False)
+ assert_equal(result[0]['error']['code'], -8)
+ assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
+ address_assert = self.nodes[1].validateaddress(address['address'])
+ assert_equal(address_assert['iswatchonly'], False)
+ assert_equal(address_assert['ismine'], False)
+ assert_equal('timestamp' in address_assert, False)
+
+ # Address + Private key + !watchonly
+ self.log.info("Should import an address with private key")
+ address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ result = self.nodes[1].importmulti([{
+ "scriptPubKey": {
+ "address": address['address']
+ },
+ "timestamp": "now",
+ "keys": [ self.nodes[0].dumpprivkey(address['address']) ]
+ }])
+ assert_equal(result[0]['success'], True)
+ address_assert = self.nodes[1].validateaddress(address['address'])
+ assert_equal(address_assert['iswatchonly'], False)
+ assert_equal(address_assert['ismine'], True)
+ assert_equal(address_assert['timestamp'], timestamp)
+
+ # Address + Private key + watchonly
+ self.log.info("Should not import an address with private key and with watchonly")
+ address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ result = self.nodes[1].importmulti([{
+ "scriptPubKey": {
+ "address": address['address']
+ },
+ "timestamp": "now",
+ "keys": [ self.nodes[0].dumpprivkey(address['address']) ],
+ "watchonly": True
+ }])
+ assert_equal(result[0]['success'], False)
+ assert_equal(result[0]['error']['code'], -8)
+ assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys')
+ address_assert = self.nodes[1].validateaddress(address['address'])
+ assert_equal(address_assert['iswatchonly'], False)
+ assert_equal(address_assert['ismine'], False)
+ assert_equal('timestamp' in address_assert, False)
+
+ # ScriptPubKey + Private key + internal
+ self.log.info("Should import a scriptPubKey with internal and with private key")
+ address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ result = self.nodes[1].importmulti([{
+ "scriptPubKey": address['scriptPubKey'],
+ "timestamp": "now",
+ "keys": [ self.nodes[0].dumpprivkey(address['address']) ],
+ "internal": True
+ }])
+ assert_equal(result[0]['success'], True)
+ address_assert = self.nodes[1].validateaddress(address['address'])
+ assert_equal(address_assert['iswatchonly'], False)
+ assert_equal(address_assert['ismine'], True)
+ assert_equal(address_assert['timestamp'], timestamp)
+
+ # ScriptPubKey + Private key + !internal
+ self.log.info("Should not import a scriptPubKey without internal and with private key")
+ address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ result = self.nodes[1].importmulti([{
+ "scriptPubKey": address['scriptPubKey'],
+ "timestamp": "now",
+ "keys": [ self.nodes[0].dumpprivkey(address['address']) ]
+ }])
+ assert_equal(result[0]['success'], False)
+ assert_equal(result[0]['error']['code'], -8)
+ assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
+ address_assert = self.nodes[1].validateaddress(address['address'])
+ assert_equal(address_assert['iswatchonly'], False)
+ assert_equal(address_assert['ismine'], False)
+ assert_equal('timestamp' in address_assert, False)
+
+
+ # P2SH address
+ sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
+ self.nodes[1].generate(100)
+ transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
+ self.nodes[1].generate(1)
+ timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
+ transaction = self.nodes[1].gettransaction(transactionid)
+
+ self.log.info("Should import a p2sh")
+ result = self.nodes[1].importmulti([{
+ "scriptPubKey": {
+ "address": multi_sig_script['address']
+ },
+ "timestamp": "now",
+ }])
+ assert_equal(result[0]['success'], True)
+ address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
+ assert_equal(address_assert['isscript'], True)
+ assert_equal(address_assert['iswatchonly'], True)
+ assert_equal(address_assert['timestamp'], timestamp)
+ p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
+ assert_equal(p2shunspent['spendable'], False)
+ assert_equal(p2shunspent['solvable'], False)
+
+
+ # P2SH + Redeem script
+ sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
+ self.nodes[1].generate(100)
+ transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
+ self.nodes[1].generate(1)
+ timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
+ transaction = self.nodes[1].gettransaction(transactionid)
+
+ self.log.info("Should import a p2sh with respective redeem script")
+ result = self.nodes[1].importmulti([{
+ "scriptPubKey": {
+ "address": multi_sig_script['address']
+ },
+ "timestamp": "now",
+ "redeemscript": multi_sig_script['redeemScript']
+ }])
+ assert_equal(result[0]['success'], True)
+ address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
+ assert_equal(address_assert['timestamp'], timestamp)
+
+ p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
+ assert_equal(p2shunspent['spendable'], False)
+ assert_equal(p2shunspent['solvable'], True)
+
+
+ # P2SH + Redeem script + Private Keys + !Watchonly
+ sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
+ self.nodes[1].generate(100)
+ transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
+ self.nodes[1].generate(1)
+ timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
+ transaction = self.nodes[1].gettransaction(transactionid)
+
+ self.log.info("Should import a p2sh with respective redeem script and private keys")
+ result = self.nodes[1].importmulti([{
+ "scriptPubKey": {
+ "address": multi_sig_script['address']
+ },
+ "timestamp": "now",
+ "redeemscript": multi_sig_script['redeemScript'],
+ "keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])]
+ }])
+ assert_equal(result[0]['success'], True)
+ address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
+ assert_equal(address_assert['timestamp'], timestamp)
+
+ p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
+ assert_equal(p2shunspent['spendable'], False)
+ assert_equal(p2shunspent['solvable'], True)
+
+ # P2SH + Redeem script + Private Keys + Watchonly
+ sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
+ self.nodes[1].generate(100)
+ transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
+ self.nodes[1].generate(1)
+ timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
+ transaction = self.nodes[1].gettransaction(transactionid)
+
+ self.log.info("Should import a p2sh with respective redeem script and private keys")
+ result = self.nodes[1].importmulti([{
+ "scriptPubKey": {
+ "address": multi_sig_script['address']
+ },
+ "timestamp": "now",
+ "redeemscript": multi_sig_script['redeemScript'],
+ "keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])],
+ "watchonly": True
+ }])
+ assert_equal(result[0]['success'], False)
+ assert_equal(result[0]['error']['code'], -8)
+ assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys')
+
+
+ # Address + Public key + !Internal + Wrong pubkey
+ self.log.info("Should not import an address with a wrong public key")
+ address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ result = self.nodes[1].importmulti([{
+ "scriptPubKey": {
+ "address": address['address']
+ },
+ "timestamp": "now",
+ "pubkeys": [ address2['pubkey'] ]
+ }])
+ assert_equal(result[0]['success'], False)
+ assert_equal(result[0]['error']['code'], -5)
+ assert_equal(result[0]['error']['message'], 'Consistency check failed')
+ address_assert = self.nodes[1].validateaddress(address['address'])
+ assert_equal(address_assert['iswatchonly'], False)
+ assert_equal(address_assert['ismine'], False)
+ assert_equal('timestamp' in address_assert, False)
+
+
+ # ScriptPubKey + Public key + internal + Wrong pubkey
+ self.log.info("Should not import a scriptPubKey with internal and with a wrong public key")
+ address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ request = [{
+ "scriptPubKey": address['scriptPubKey'],
+ "timestamp": "now",
+ "pubkeys": [ address2['pubkey'] ],
+ "internal": True
+ }]
+ result = self.nodes[1].importmulti(request)
+ assert_equal(result[0]['success'], False)
+ assert_equal(result[0]['error']['code'], -5)
+ assert_equal(result[0]['error']['message'], 'Consistency check failed')
+ address_assert = self.nodes[1].validateaddress(address['address'])
+ assert_equal(address_assert['iswatchonly'], False)
+ assert_equal(address_assert['ismine'], False)
+ assert_equal('timestamp' in address_assert, False)
+
+
+ # Address + Private key + !watchonly + Wrong private key
+ self.log.info("Should not import an address with a wrong private key")
+ address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ result = self.nodes[1].importmulti([{
+ "scriptPubKey": {
+ "address": address['address']
+ },
+ "timestamp": "now",
+ "keys": [ self.nodes[0].dumpprivkey(address2['address']) ]
+ }])
+ assert_equal(result[0]['success'], False)
+ assert_equal(result[0]['error']['code'], -5)
+ assert_equal(result[0]['error']['message'], 'Consistency check failed')
+ address_assert = self.nodes[1].validateaddress(address['address'])
+ assert_equal(address_assert['iswatchonly'], False)
+ assert_equal(address_assert['ismine'], False)
+ assert_equal('timestamp' in address_assert, False)
+
+
+ # ScriptPubKey + Private key + internal + Wrong private key
+ self.log.info("Should not import a scriptPubKey with internal and with a wrong private key")
+ address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
+ result = self.nodes[1].importmulti([{
+ "scriptPubKey": address['scriptPubKey'],
+ "timestamp": "now",
+ "keys": [ self.nodes[0].dumpprivkey(address2['address']) ],
+ "internal": True
+ }])
+ assert_equal(result[0]['success'], False)
+ assert_equal(result[0]['error']['code'], -5)
+ assert_equal(result[0]['error']['message'], 'Consistency check failed')
+ address_assert = self.nodes[1].validateaddress(address['address'])
+ assert_equal(address_assert['iswatchonly'], False)
+ assert_equal(address_assert['ismine'], False)
+ assert_equal('timestamp' in address_assert, False)
+
+
+ # Importing existing watch only address with new timestamp should replace saved timestamp.
+ assert_greater_than(timestamp, watchonly_timestamp)
+ self.log.info("Should replace previously saved watch only timestamp.")
+ result = self.nodes[1].importmulti([{
+ "scriptPubKey": {
+ "address": watchonly_address,
+ },
+ "timestamp": "now",
+ }])
+ assert_equal(result[0]['success'], True)
+ address_assert = self.nodes[1].validateaddress(watchonly_address)
+ assert_equal(address_assert['iswatchonly'], True)
+ assert_equal(address_assert['ismine'], False)
+ assert_equal(address_assert['timestamp'], timestamp)
+ watchonly_timestamp = timestamp
+
+
+ # restart nodes to check for proper serialization/deserialization of watch only address
+ stop_nodes(self.nodes)
+ self.nodes = start_nodes(2, self.options.tmpdir)
+ address_assert = self.nodes[1].validateaddress(watchonly_address)
+ assert_equal(address_assert['iswatchonly'], True)
+ assert_equal(address_assert['ismine'], False)
+ assert_equal(address_assert['timestamp'], watchonly_timestamp);
+
+ # Bad or missing timestamps
+ self.log.info("Should throw on invalid or missing timestamp values")
+ assert_raises_message(JSONRPCException, 'Missing required timestamp field for key',
+ self.nodes[1].importmulti, [{
+ "scriptPubKey": address['scriptPubKey'],
+ }])
+ assert_raises_message(JSONRPCException, 'Expected number or "now" timestamp value for key. got type string',
+ self.nodes[1].importmulti, [{
+ "scriptPubKey": address['scriptPubKey'],
+ "timestamp": "",
+ }])
+
+
+if __name__ == '__main__':
+ ImportMultiTest ().main ()
diff --git a/test/functional/importprunedfunds.py b/test/functional/importprunedfunds.py
new file mode 100755
index 0000000000..b4c8ee6c70
--- /dev/null
+++ b/test/functional/importprunedfunds.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the importprunedfunds and removeprunedfunds RPCs."""
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+
+class ImportPrunedFundsTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 2
+
+ def setup_network(self, split=False):
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
+ connect_nodes_bi(self.nodes,0,1)
+ self.is_network_split=False
+ self.sync_all()
+
+ def run_test(self):
+ self.log.info("Mining blocks...")
+ self.nodes[0].generate(101)
+
+ self.sync_all()
+
+ # address
+ address1 = self.nodes[0].getnewaddress()
+ # pubkey
+ address2 = self.nodes[0].getnewaddress()
+ address2_pubkey = self.nodes[0].validateaddress(address2)['pubkey'] # Using pubkey
+ # privkey
+ address3 = self.nodes[0].getnewaddress()
+ address3_privkey = self.nodes[0].dumpprivkey(address3) # Using privkey
+
+ #Check only one address
+ address_info = self.nodes[0].validateaddress(address1)
+ assert_equal(address_info['ismine'], True)
+
+ self.sync_all()
+
+ #Node 1 sync test
+ assert_equal(self.nodes[1].getblockcount(),101)
+
+ #Address Test - before import
+ address_info = self.nodes[1].validateaddress(address1)
+ assert_equal(address_info['iswatchonly'], False)
+ assert_equal(address_info['ismine'], False)
+
+ address_info = self.nodes[1].validateaddress(address2)
+ assert_equal(address_info['iswatchonly'], False)
+ assert_equal(address_info['ismine'], False)
+
+ address_info = self.nodes[1].validateaddress(address3)
+ assert_equal(address_info['iswatchonly'], False)
+ assert_equal(address_info['ismine'], False)
+
+ #Send funds to self
+ txnid1 = self.nodes[0].sendtoaddress(address1, 0.1)
+ self.nodes[0].generate(1)
+ rawtxn1 = self.nodes[0].gettransaction(txnid1)['hex']
+ proof1 = self.nodes[0].gettxoutproof([txnid1])
+
+ txnid2 = self.nodes[0].sendtoaddress(address2, 0.05)
+ self.nodes[0].generate(1)
+ rawtxn2 = self.nodes[0].gettransaction(txnid2)['hex']
+ proof2 = self.nodes[0].gettxoutproof([txnid2])
+
+ txnid3 = self.nodes[0].sendtoaddress(address3, 0.025)
+ self.nodes[0].generate(1)
+ rawtxn3 = self.nodes[0].gettransaction(txnid3)['hex']
+ proof3 = self.nodes[0].gettxoutproof([txnid3])
+
+ self.sync_all()
+
+ #Import with no affiliated address
+ assert_raises_jsonrpc(-5, "No addresses", self.nodes[1].importprunedfunds, rawtxn1, proof1)
+
+ balance1 = self.nodes[1].getbalance("", 0, True)
+ assert_equal(balance1, Decimal(0))
+
+ #Import with affiliated address with no rescan
+ self.nodes[1].importaddress(address2, "add2", False)
+ result2 = self.nodes[1].importprunedfunds(rawtxn2, proof2)
+ balance2 = self.nodes[1].getbalance("add2", 0, True)
+ assert_equal(balance2, Decimal('0.05'))
+
+ #Import with private key with no rescan
+ self.nodes[1].importprivkey(address3_privkey, "add3", False)
+ result3 = self.nodes[1].importprunedfunds(rawtxn3, proof3)
+ balance3 = self.nodes[1].getbalance("add3", 0, False)
+ assert_equal(balance3, Decimal('0.025'))
+ balance3 = self.nodes[1].getbalance("*", 0, True)
+ assert_equal(balance3, Decimal('0.075'))
+
+ #Addresses Test - after import
+ address_info = self.nodes[1].validateaddress(address1)
+ assert_equal(address_info['iswatchonly'], False)
+ assert_equal(address_info['ismine'], False)
+ address_info = self.nodes[1].validateaddress(address2)
+ assert_equal(address_info['iswatchonly'], True)
+ assert_equal(address_info['ismine'], False)
+ address_info = self.nodes[1].validateaddress(address3)
+ assert_equal(address_info['iswatchonly'], False)
+ assert_equal(address_info['ismine'], True)
+
+ #Remove transactions
+ assert_raises_jsonrpc(-8, "Transaction does not exist in wallet.", self.nodes[1].removeprunedfunds, txnid1)
+
+ balance1 = self.nodes[1].getbalance("*", 0, True)
+ assert_equal(balance1, Decimal('0.075'))
+
+ self.nodes[1].removeprunedfunds(txnid2)
+ balance2 = self.nodes[1].getbalance("*", 0, True)
+ assert_equal(balance2, Decimal('0.025'))
+
+ self.nodes[1].removeprunedfunds(txnid3)
+ balance3 = self.nodes[1].getbalance("*", 0, True)
+ assert_equal(balance3, Decimal('0.0'))
+
+if __name__ == '__main__':
+ ImportPrunedFundsTest().main()
diff --git a/test/functional/invalidateblock.py b/test/functional/invalidateblock.py
new file mode 100755
index 0000000000..8c80b64003
--- /dev/null
+++ b/test/functional/invalidateblock.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the invalidateblock RPC."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+class InvalidateTest(BitcoinTestFramework):
+
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 3
+
+ def setup_network(self):
+ self.nodes = []
+ self.is_network_split = False
+ self.nodes.append(start_node(0, self.options.tmpdir))
+ self.nodes.append(start_node(1, self.options.tmpdir))
+ self.nodes.append(start_node(2, self.options.tmpdir))
+
+ def run_test(self):
+ self.log.info("Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:")
+ self.log.info("Mine 4 blocks on Node 0")
+ self.nodes[0].generate(4)
+ assert(self.nodes[0].getblockcount() == 4)
+ besthash = self.nodes[0].getbestblockhash()
+
+ self.log.info("Mine competing 6 blocks on Node 1")
+ self.nodes[1].generate(6)
+ assert(self.nodes[1].getblockcount() == 6)
+
+ self.log.info("Connect nodes to force a reorg")
+ connect_nodes_bi(self.nodes,0,1)
+ sync_blocks(self.nodes[0:2])
+ assert(self.nodes[0].getblockcount() == 6)
+ badhash = self.nodes[1].getblockhash(2)
+
+ self.log.info("Invalidate block 2 on node 0 and verify we reorg to node 0's original chain")
+ self.nodes[0].invalidateblock(badhash)
+ newheight = self.nodes[0].getblockcount()
+ newhash = self.nodes[0].getbestblockhash()
+ if (newheight != 4 or newhash != besthash):
+ raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight))
+
+ self.log.info("Make sure we won't reorg to a lower work chain:")
+ connect_nodes_bi(self.nodes,1,2)
+ self.log.info("Sync node 2 to node 1 so both have 6 blocks")
+ sync_blocks(self.nodes[1:3])
+ assert(self.nodes[2].getblockcount() == 6)
+ self.log.info("Invalidate block 5 on node 1 so its tip is now at 4")
+ self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5))
+ assert(self.nodes[1].getblockcount() == 4)
+ self.log.info("Invalidate block 3 on node 2, so its tip is now 2")
+ self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3))
+ assert(self.nodes[2].getblockcount() == 2)
+ self.log.info("..and then mine a block")
+ self.nodes[2].generate(1)
+ self.log.info("Verify all nodes are at the right height")
+ time.sleep(5)
+ assert_equal(self.nodes[2].getblockcount(), 3)
+ assert_equal(self.nodes[0].getblockcount(), 4)
+ node1height = self.nodes[1].getblockcount()
+ if node1height < 4:
+ raise AssertionError("Node 1 reorged to a lower height: %d"%node1height)
+
+if __name__ == '__main__':
+ InvalidateTest().main()
diff --git a/test/functional/invalidblockrequest.py b/test/functional/invalidblockrequest.py
new file mode 100755
index 0000000000..eabc0db8df
--- /dev/null
+++ b/test/functional/invalidblockrequest.py
@@ -0,0 +1,116 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test node responses to invalid blocks.
+
+In this test we connect to one node over p2p, and test block requests:
+1) Valid blocks should be requested and become chain tip.
+2) Invalid block with duplicated transaction should be re-requested.
+3) Invalid block with bad coinbase value should be rejected and not
+re-requested.
+"""
+
+from test_framework.test_framework import ComparisonTestFramework
+from test_framework.util import *
+from test_framework.comptool import TestManager, TestInstance, RejectResult
+from test_framework.blocktools import *
+import copy
+import time
+
+# Use the ComparisonTestFramework with 1 node: only use --testbinary.
+class InvalidBlockRequestTest(ComparisonTestFramework):
+
+ ''' Can either run this test as 1 node with expected answers, or two and compare them.
+ Change the "outcome" variable from each TestInstance object to only do the comparison. '''
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 1
+
+ def run_test(self):
+ test = TestManager(self, self.options.tmpdir)
+ test.add_all_connections(self.nodes)
+ self.tip = None
+ self.block_time = None
+ NetworkThread().start() # Start up network handling in another thread
+ test.run()
+
+ def get_tests(self):
+ if self.tip is None:
+ self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
+ self.block_time = int(time.time())+1
+
+ '''
+ Create a new block with an anyone-can-spend coinbase
+ '''
+ height = 1
+ block = create_block(self.tip, create_coinbase(height), self.block_time)
+ self.block_time += 1
+ block.solve()
+ # Save the coinbase for later
+ self.block1 = block
+ self.tip = block.sha256
+ height += 1
+ yield TestInstance([[block, True]])
+
+ '''
+ Now we need that block to mature so we can spend the coinbase.
+ '''
+ test = TestInstance(sync_every_block=False)
+ for i in range(100):
+ block = create_block(self.tip, create_coinbase(height), self.block_time)
+ block.solve()
+ self.tip = block.sha256
+ self.block_time += 1
+ test.blocks_and_transactions.append([block, True])
+ height += 1
+ yield test
+
+ '''
+ Now we use merkle-root malleability to generate an invalid block with
+ same blockheader.
+ Manufacture a block with 3 transactions (coinbase, spend of prior
+ coinbase, spend of that spend). Duplicate the 3rd transaction to
+ leave merkle root and blockheader unchanged but invalidate the block.
+ '''
+ block2 = create_block(self.tip, create_coinbase(height), self.block_time)
+ self.block_time += 1
+
+ # b'0x51' is OP_TRUE
+ tx1 = create_transaction(self.block1.vtx[0], 0, b'\x51', 50 * COIN)
+ tx2 = create_transaction(tx1, 0, b'\x51', 50 * COIN)
+
+ block2.vtx.extend([tx1, tx2])
+ block2.hashMerkleRoot = block2.calc_merkle_root()
+ block2.rehash()
+ block2.solve()
+ orig_hash = block2.sha256
+ block2_orig = copy.deepcopy(block2)
+
+ # Mutate block 2
+ block2.vtx.append(tx2)
+ assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root())
+ assert_equal(orig_hash, block2.rehash())
+ assert(block2_orig.vtx != block2.vtx)
+
+ self.tip = block2.sha256
+ yield TestInstance([[block2, RejectResult(16, b'bad-txns-duplicate')], [block2_orig, True]])
+ height += 1
+
+ '''
+ Make sure that a totally screwed up block is not valid.
+ '''
+ block3 = create_block(self.tip, create_coinbase(height), self.block_time)
+ self.block_time += 1
+ block3.vtx[0].vout[0].nValue = 100 * COIN # Too high!
+ block3.vtx[0].sha256=None
+ block3.vtx[0].calc_sha256()
+ block3.hashMerkleRoot = block3.calc_merkle_root()
+ block3.rehash()
+ block3.solve()
+
+ yield TestInstance([[block3, RejectResult(16, b'bad-cb-amount')]])
+
+
+if __name__ == '__main__':
+ InvalidBlockRequestTest().main()
diff --git a/test/functional/invalidtxrequest.py b/test/functional/invalidtxrequest.py
new file mode 100755
index 0000000000..a9ac231f09
--- /dev/null
+++ b/test/functional/invalidtxrequest.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test node responses to invalid transactions.
+
+In this test we connect to one node over p2p, and test tx requests.
+"""
+
+from test_framework.test_framework import ComparisonTestFramework
+from test_framework.comptool import TestManager, TestInstance, RejectResult
+from test_framework.blocktools import *
+import time
+
+
+
+# Use the ComparisonTestFramework with 1 node: only use --testbinary.
+class InvalidTxRequestTest(ComparisonTestFramework):
+
+ ''' Can either run this test as 1 node with expected answers, or two and compare them.
+ Change the "outcome" variable from each TestInstance object to only do the comparison. '''
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 1
+
+ def run_test(self):
+ test = TestManager(self, self.options.tmpdir)
+ test.add_all_connections(self.nodes)
+ self.tip = None
+ self.block_time = None
+ NetworkThread().start() # Start up network handling in another thread
+ test.run()
+
+ def get_tests(self):
+ if self.tip is None:
+ self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
+ self.block_time = int(time.time())+1
+
+ '''
+ Create a new block with an anyone-can-spend coinbase
+ '''
+ height = 1
+ block = create_block(self.tip, create_coinbase(height), self.block_time)
+ self.block_time += 1
+ block.solve()
+ # Save the coinbase for later
+ self.block1 = block
+ self.tip = block.sha256
+ height += 1
+ yield TestInstance([[block, True]])
+
+ '''
+ Now we need that block to mature so we can spend the coinbase.
+ '''
+ test = TestInstance(sync_every_block=False)
+ for i in range(100):
+ block = create_block(self.tip, create_coinbase(height), self.block_time)
+ block.solve()
+ self.tip = block.sha256
+ self.block_time += 1
+ test.blocks_and_transactions.append([block, True])
+ height += 1
+ yield test
+
+ # b'\x64' is OP_NOTIF
+ # Transaction will be rejected with code 16 (REJECT_INVALID)
+ tx1 = create_transaction(self.block1.vtx[0], 0, b'\x64', 50 * COIN - 12000)
+ yield TestInstance([[tx1, RejectResult(16, b'mandatory-script-verify-flag-failed')]])
+
+ # TODO: test further transactions...
+
+if __name__ == '__main__':
+ InvalidTxRequestTest().main()
diff --git a/test/functional/keypool.py b/test/functional/keypool.py
new file mode 100755
index 0000000000..cee58563f0
--- /dev/null
+++ b/test/functional/keypool.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the wallet keypool and interaction with wallet encryption/locking."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+class KeyPoolTest(BitcoinTestFramework):
+
+ def run_test(self):
+ nodes = self.nodes
+ addr_before_encrypting = nodes[0].getnewaddress()
+ addr_before_encrypting_data = nodes[0].validateaddress(addr_before_encrypting)
+ wallet_info_old = nodes[0].getwalletinfo()
+ assert(addr_before_encrypting_data['hdmasterkeyid'] == wallet_info_old['hdmasterkeyid'])
+
+ # Encrypt wallet and wait to terminate
+ nodes[0].encryptwallet('test')
+ bitcoind_processes[0].wait()
+ # Restart node 0
+ nodes[0] = start_node(0, self.options.tmpdir)
+ # Keep creating keys
+ addr = nodes[0].getnewaddress()
+ addr_data = nodes[0].validateaddress(addr)
+ wallet_info = nodes[0].getwalletinfo()
+ assert(addr_before_encrypting_data['hdmasterkeyid'] != wallet_info['hdmasterkeyid'])
+ assert(addr_data['hdmasterkeyid'] == wallet_info['hdmasterkeyid'])
+
+ assert_raises_jsonrpc(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress)
+
+ # put three new keys in the keypool
+ nodes[0].walletpassphrase('test', 12000)
+ nodes[0].keypoolrefill(3)
+ nodes[0].walletlock()
+
+ # drain the keys
+ addr = set()
+ addr.add(nodes[0].getrawchangeaddress())
+ addr.add(nodes[0].getrawchangeaddress())
+ addr.add(nodes[0].getrawchangeaddress())
+ addr.add(nodes[0].getrawchangeaddress())
+ # assert that four unique addresses were returned
+ assert(len(addr) == 4)
+ # the next one should fail
+ assert_raises_jsonrpc(-12, "Keypool ran out", nodes[0].getrawchangeaddress)
+
+ # refill keypool with three new addresses
+ nodes[0].walletpassphrase('test', 1)
+ nodes[0].keypoolrefill(3)
+ # test walletpassphrase timeout
+ time.sleep(1.1)
+ assert_equal(nodes[0].getwalletinfo()["unlocked_until"], 0)
+
+ # drain them by mining
+ nodes[0].generate(1)
+ nodes[0].generate(1)
+ nodes[0].generate(1)
+ nodes[0].generate(1)
+ assert_raises_jsonrpc(-12, "Keypool ran out", nodes[0].generate, 1)
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = False
+ self.num_nodes = 1
+
+ def setup_network(self):
+ self.nodes = self.setup_nodes()
+
+if __name__ == '__main__':
+ KeyPoolTest().main()
diff --git a/test/functional/listsinceblock.py b/test/functional/listsinceblock.py
new file mode 100755
index 0000000000..a75e66c8c4
--- /dev/null
+++ b/test/functional/listsinceblock.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python3
+# Copyright (c) 2017 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the listsincelast RPC."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import assert_equal
+
+class ListSinceBlockTest (BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 4
+
+ def run_test (self):
+ '''
+ `listsinceblock` did not behave correctly when handed a block that was
+ no longer in the main chain:
+
+ ab0
+ / \
+ aa1 [tx0] bb1
+ | |
+ aa2 bb2
+ | |
+ aa3 bb3
+ |
+ bb4
+
+ Consider a client that has only seen block `aa3` above. It asks the node
+ to `listsinceblock aa3`. But at some point prior the main chain switched
+ to the bb chain.
+
+ Previously: listsinceblock would find height=4 for block aa3 and compare
+ this to height=5 for the tip of the chain (bb4). It would then return
+ results restricted to bb3-bb4.
+
+ Now: listsinceblock finds the fork at ab0 and returns results in the
+ range bb1-bb4.
+
+ This test only checks that [tx0] is present.
+ '''
+
+ assert_equal(self.is_network_split, False)
+ self.nodes[2].generate(101)
+ self.sync_all()
+
+ assert_equal(self.nodes[0].getbalance(), 0)
+ assert_equal(self.nodes[1].getbalance(), 0)
+ assert_equal(self.nodes[2].getbalance(), 50)
+ assert_equal(self.nodes[3].getbalance(), 0)
+
+ # Split network into two
+ self.split_network()
+ assert_equal(self.is_network_split, True)
+
+ # send to nodes[0] from nodes[2]
+ senttx = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
+
+ # generate on both sides
+ lastblockhash = self.nodes[1].generate(6)[5]
+ self.nodes[2].generate(7)
+ self.log.info('lastblockhash=%s' % (lastblockhash))
+
+ self.sync_all()
+
+ self.join_network()
+
+ # listsinceblock(lastblockhash) should now include tx, as seen from nodes[0]
+ lsbres = self.nodes[0].listsinceblock(lastblockhash)
+ found = False
+ for tx in lsbres['transactions']:
+ if tx['txid'] == senttx:
+ found = True
+ break
+ assert_equal(found, True)
+
+if __name__ == '__main__':
+ ListSinceBlockTest().main()
diff --git a/test/functional/listtransactions.py b/test/functional/listtransactions.py
new file mode 100755
index 0000000000..68d14093ce
--- /dev/null
+++ b/test/functional/listtransactions.py
@@ -0,0 +1,207 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the listtransactions API."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+from test_framework.mininode import CTransaction, COIN
+from io import BytesIO
+
+def txFromHex(hexstring):
+ tx = CTransaction()
+ f = BytesIO(hex_str_to_bytes(hexstring))
+ tx.deserialize(f)
+ return tx
+
+class ListTransactionsTest(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 4
+ self.setup_clean_chain = False
+
+ def setup_nodes(self):
+ #This test requires mocktime
+ enable_mocktime()
+ return start_nodes(self.num_nodes, self.options.tmpdir)
+
+ def run_test(self):
+ # Simple send, 0 to 1:
+ txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
+ self.sync_all()
+ assert_array_result(self.nodes[0].listtransactions(),
+ {"txid":txid},
+ {"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
+ assert_array_result(self.nodes[1].listtransactions(),
+ {"txid":txid},
+ {"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
+ # mine a block, confirmations should change:
+ self.nodes[0].generate(1)
+ self.sync_all()
+ assert_array_result(self.nodes[0].listtransactions(),
+ {"txid":txid},
+ {"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
+ assert_array_result(self.nodes[1].listtransactions(),
+ {"txid":txid},
+ {"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
+
+ # send-to-self:
+ txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
+ assert_array_result(self.nodes[0].listtransactions(),
+ {"txid":txid, "category":"send"},
+ {"amount":Decimal("-0.2")})
+ assert_array_result(self.nodes[0].listtransactions(),
+ {"txid":txid, "category":"receive"},
+ {"amount":Decimal("0.2")})
+
+ # sendmany from node1: twice to self, twice to node2:
+ send_to = { self.nodes[0].getnewaddress() : 0.11,
+ self.nodes[1].getnewaddress() : 0.22,
+ self.nodes[0].getaccountaddress("from1") : 0.33,
+ self.nodes[1].getaccountaddress("toself") : 0.44 }
+ txid = self.nodes[1].sendmany("", send_to)
+ self.sync_all()
+ assert_array_result(self.nodes[1].listtransactions(),
+ {"category":"send","amount":Decimal("-0.11")},
+ {"txid":txid} )
+ assert_array_result(self.nodes[0].listtransactions(),
+ {"category":"receive","amount":Decimal("0.11")},
+ {"txid":txid} )
+ assert_array_result(self.nodes[1].listtransactions(),
+ {"category":"send","amount":Decimal("-0.22")},
+ {"txid":txid} )
+ assert_array_result(self.nodes[1].listtransactions(),
+ {"category":"receive","amount":Decimal("0.22")},
+ {"txid":txid} )
+ assert_array_result(self.nodes[1].listtransactions(),
+ {"category":"send","amount":Decimal("-0.33")},
+ {"txid":txid} )
+ assert_array_result(self.nodes[0].listtransactions(),
+ {"category":"receive","amount":Decimal("0.33")},
+ {"txid":txid, "account" : "from1"} )
+ assert_array_result(self.nodes[1].listtransactions(),
+ {"category":"send","amount":Decimal("-0.44")},
+ {"txid":txid, "account" : ""} )
+ assert_array_result(self.nodes[1].listtransactions(),
+ {"category":"receive","amount":Decimal("0.44")},
+ {"txid":txid, "account" : "toself"} )
+
+ multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
+ self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
+ txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
+ self.nodes[1].generate(1)
+ self.sync_all()
+ assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0)
+ assert_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True),
+ {"category":"receive","amount":Decimal("0.1")},
+ {"txid":txid, "account" : "watchonly"} )
+
+ self.run_rbf_opt_in_test()
+
+ # Check that the opt-in-rbf flag works properly, for sent and received
+ # transactions.
+ def run_rbf_opt_in_test(self):
+ # Check whether a transaction signals opt-in RBF itself
+ def is_opt_in(node, txid):
+ rawtx = node.getrawtransaction(txid, 1)
+ for x in rawtx["vin"]:
+ if x["sequence"] < 0xfffffffe:
+ return True
+ return False
+
+ # Find an unconfirmed output matching a certain txid
+ def get_unconfirmed_utxo_entry(node, txid_to_match):
+ utxo = node.listunspent(0, 0)
+ for i in utxo:
+ if i["txid"] == txid_to_match:
+ return i
+ return None
+
+ # 1. Chain a few transactions that don't opt-in.
+ txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
+ assert(not is_opt_in(self.nodes[0], txid_1))
+ assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
+ sync_mempools(self.nodes)
+ assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
+
+ # Tx2 will build off txid_1, still not opting in to RBF.
+ utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_1)
+ assert_equal(utxo_to_use["safe"], True)
+ utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
+ utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
+ assert_equal(utxo_to_use["safe"], False)
+
+ # Create tx2 using createrawtransaction
+ inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}]
+ outputs = {self.nodes[0].getnewaddress(): 0.999}
+ tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
+ tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"]
+ txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
+
+ # ...and check the result
+ assert(not is_opt_in(self.nodes[1], txid_2))
+ assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
+ sync_mempools(self.nodes)
+ assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
+
+ # Tx3 will opt-in to RBF
+ utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
+ inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}]
+ outputs = {self.nodes[1].getnewaddress(): 0.998}
+ tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
+ tx3_modified = txFromHex(tx3)
+ tx3_modified.vin[0].nSequence = 0
+ tx3 = bytes_to_hex_str(tx3_modified.serialize())
+ tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex']
+ txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
+
+ assert(is_opt_in(self.nodes[0], txid_3))
+ assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
+ sync_mempools(self.nodes)
+ assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
+
+ # Tx4 will chain off tx3. Doesn't signal itself, but depends on one
+ # that does.
+ utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
+ inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}]
+ outputs = {self.nodes[0].getnewaddress(): 0.997}
+ tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
+ tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"]
+ txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
+
+ assert(not is_opt_in(self.nodes[1], txid_4))
+ assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
+ sync_mempools(self.nodes)
+ assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
+
+ # Replace tx3, and check that tx4 becomes unknown
+ tx3_b = tx3_modified
+ tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee
+ tx3_b = bytes_to_hex_str(tx3_b.serialize())
+ tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex']
+ txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True)
+ assert(is_opt_in(self.nodes[0], txid_3b))
+
+ assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
+ sync_mempools(self.nodes)
+ assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
+
+ # Check gettransaction as well:
+ for n in self.nodes[0:2]:
+ assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
+ assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
+ assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
+ assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
+ assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
+
+ # After mining a transaction, it's no longer BIP125-replaceable
+ self.nodes[0].generate(1)
+ assert(txid_3b not in self.nodes[0].getrawmempool())
+ assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
+ assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
+
+
+if __name__ == '__main__':
+ ListTransactionsTest().main()
+
diff --git a/test/functional/maxblocksinflight.py b/test/functional/maxblocksinflight.py
new file mode 100755
index 0000000000..2c3766125a
--- /dev/null
+++ b/test/functional/maxblocksinflight.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test nodes responses to having many blocks in flight.
+
+In this test we connect to one node over p2p, send it numerous inv's, and
+compare the resulting number of getdata requests to a max allowed value. We
+test for exceeding 128 blocks in flight, which was the limit an 0.9 client will
+reach. [0.10 clients shouldn't request more than 16 from a single peer.]
+"""
+
+from test_framework.mininode import *
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+MAX_REQUESTS = 128
+
+class TestManager(NodeConnCB):
+ # set up NodeConnCB callbacks, overriding base class
+ def on_getdata(self, conn, message):
+ self.log.debug("got getdata %s" % repr(message))
+ # Log the requests
+ for inv in message.inv:
+ if inv.hash not in self.blockReqCounts:
+ self.blockReqCounts[inv.hash] = 0
+ self.blockReqCounts[inv.hash] += 1
+
+ def on_close(self, conn):
+ if not self.disconnectOkay:
+ raise EarlyDisconnectError(0)
+
+ def __init__(self):
+ NodeConnCB.__init__(self)
+
+ def add_new_connection(self, connection):
+ self.connection = connection
+ self.blockReqCounts = {}
+ self.disconnectOkay = False
+
+ def run(self):
+ self.connection.rpc.generate(1) # Leave IBD
+
+ numBlocksToGenerate = [8, 16, 128, 1024]
+ for count in range(len(numBlocksToGenerate)):
+ current_invs = []
+ for i in range(numBlocksToGenerate[count]):
+ current_invs.append(CInv(2, random.randrange(0, 1 << 256)))
+ if len(current_invs) >= 50000:
+ self.connection.send_message(msg_inv(current_invs))
+ current_invs = []
+ if len(current_invs) > 0:
+ self.connection.send_message(msg_inv(current_invs))
+
+ # Wait and see how many blocks were requested
+ time.sleep(2)
+
+ total_requests = 0
+ with mininode_lock:
+ for key in self.blockReqCounts:
+ total_requests += self.blockReqCounts[key]
+ if self.blockReqCounts[key] > 1:
+ raise AssertionError("Error, test failed: block %064x requested more than once" % key)
+ if total_requests > MAX_REQUESTS:
+ raise AssertionError("Error, too many blocks (%d) requested" % total_requests)
+ self.log.info("Round %d: success (total requests: %d)" % (count, total_requests))
+
+ self.disconnectOkay = True
+ self.connection.disconnect_node()
+
+
+class MaxBlocksInFlightTest(BitcoinTestFramework):
+ def add_options(self, parser):
+ parser.add_option("--testbinary", dest="testbinary",
+ default=os.getenv("BITCOIND", "bitcoind"),
+ help="Binary to test max block requests behavior")
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+
+ def setup_network(self):
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
+ extra_args=[['-whitelist=127.0.0.1']],
+ binary=[self.options.testbinary])
+
+ def run_test(self):
+ test = TestManager()
+ # pass log handler through to the test manager object
+ test.log = self.log
+ test.add_new_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test))
+ NetworkThread().start() # Start up network handling in another thread
+ test.run()
+
+if __name__ == '__main__':
+ MaxBlocksInFlightTest().main()
diff --git a/test/functional/maxuploadtarget.py b/test/functional/maxuploadtarget.py
new file mode 100755
index 0000000000..40cd85c9ec
--- /dev/null
+++ b/test/functional/maxuploadtarget.py
@@ -0,0 +1,233 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test behavior of -maxuploadtarget.
+
+* Verify that getdata requests for old blocks (>1week) are dropped
+if uploadtarget has been reached.
+* Verify that getdata requests for recent blocks are respecteved even
+if uploadtarget has been reached.
+* Verify that the upload counters are reset after 24 hours.
+"""
+
+from test_framework.mininode import *
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+import time
+
+# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
+# p2p messages to a node, generating the messages in the main testing logic.
+class TestNode(NodeConnCB):
+ def __init__(self):
+ NodeConnCB.__init__(self)
+ self.connection = None
+ self.ping_counter = 1
+ self.last_pong = msg_pong()
+ self.block_receive_map = {}
+
+ def add_connection(self, conn):
+ self.connection = conn
+ self.peer_disconnected = False
+
+ def on_inv(self, conn, message):
+ pass
+
+ # Track the last getdata message we receive (used in the test)
+ def on_getdata(self, conn, message):
+ self.last_getdata = message
+
+ def on_block(self, conn, message):
+ message.block.calc_sha256()
+ try:
+ self.block_receive_map[message.block.sha256] += 1
+ except KeyError as e:
+ self.block_receive_map[message.block.sha256] = 1
+
+ # Spin until verack message is received from the node.
+ # We use this to signal that our test can begin. This
+ # is called from the testing thread, so it needs to acquire
+ # the global lock.
+ def wait_for_verack(self):
+ def veracked():
+ return self.verack_received
+ return wait_until(veracked, timeout=10)
+
+ def wait_for_disconnect(self):
+ def disconnected():
+ return self.peer_disconnected
+ return wait_until(disconnected, timeout=10)
+
+ # Wrapper for the NodeConn's send_message function
+ def send_message(self, message):
+ self.connection.send_message(message)
+
+ def on_pong(self, conn, message):
+ self.last_pong = message
+
+ def on_close(self, conn):
+ self.peer_disconnected = True
+
+ # Sync up with the node after delivery of a block
+ def sync_with_ping(self, timeout=30):
+ def received_pong():
+ return (self.last_pong.nonce == self.ping_counter)
+ self.connection.send_message(msg_ping(nonce=self.ping_counter))
+ success = wait_until(received_pong, timeout=timeout)
+ self.ping_counter += 1
+ return success
+
+class MaxUploadTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+
+ # Cache for utxos, as the listunspent may take a long time later in the test
+ self.utxo_cache = []
+
+ def setup_network(self):
+ # Start a node with maxuploadtarget of 200 MB (/24h)
+ self.nodes = []
+ self.nodes.append(start_node(0, self.options.tmpdir, ["-maxuploadtarget=800", "-blockmaxsize=999000"]))
+
+ def run_test(self):
+ # Before we connect anything, we first set the time on the node
+ # to be in the past, otherwise things break because the CNode
+ # time counters can't be reset backward after initialization
+ old_time = int(time.time() - 2*60*60*24*7)
+ self.nodes[0].setmocktime(old_time)
+
+ # Generate some old blocks
+ self.nodes[0].generate(130)
+
+ # test_nodes[0] will only request old blocks
+ # test_nodes[1] will only request new blocks
+ # test_nodes[2] will test resetting the counters
+ test_nodes = []
+ connections = []
+
+ for i in range(3):
+ test_nodes.append(TestNode())
+ connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
+ test_nodes[i].add_connection(connections[i])
+
+ NetworkThread().start() # Start up network handling in another thread
+ [x.wait_for_verack() for x in test_nodes]
+
+ # Test logic begins here
+
+ # Now mine a big block
+ mine_large_block(self.nodes[0], self.utxo_cache)
+
+ # Store the hash; we'll request this later
+ big_old_block = self.nodes[0].getbestblockhash()
+ old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
+ big_old_block = int(big_old_block, 16)
+
+ # Advance to two days ago
+ self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
+
+ # Mine one more block, so that the prior block looks old
+ mine_large_block(self.nodes[0], self.utxo_cache)
+
+ # We'll be requesting this new block too
+ big_new_block = self.nodes[0].getbestblockhash()
+ big_new_block = int(big_new_block, 16)
+
+ # test_nodes[0] will test what happens if we just keep requesting the
+ # the same big old block too many times (expect: disconnect)
+
+ getdata_request = msg_getdata()
+ getdata_request.inv.append(CInv(2, big_old_block))
+
+ max_bytes_per_day = 800*1024*1024
+ daily_buffer = 144 * 4000000
+ max_bytes_available = max_bytes_per_day - daily_buffer
+ success_count = max_bytes_available // old_block_size
+
+ # 576MB will be reserved for relaying new blocks, so expect this to
+ # succeed for ~235 tries.
+ for i in range(success_count):
+ test_nodes[0].send_message(getdata_request)
+ test_nodes[0].sync_with_ping()
+ assert_equal(test_nodes[0].block_receive_map[big_old_block], i+1)
+
+ assert_equal(len(self.nodes[0].getpeerinfo()), 3)
+ # At most a couple more tries should succeed (depending on how long
+ # the test has been running so far).
+ for i in range(3):
+ test_nodes[0].send_message(getdata_request)
+ test_nodes[0].wait_for_disconnect()
+ assert_equal(len(self.nodes[0].getpeerinfo()), 2)
+ self.log.info("Peer 0 disconnected after downloading old block too many times")
+
+ # Requesting the current block on test_nodes[1] should succeed indefinitely,
+ # even when over the max upload target.
+ # We'll try 800 times
+ getdata_request.inv = [CInv(2, big_new_block)]
+ for i in range(800):
+ test_nodes[1].send_message(getdata_request)
+ test_nodes[1].sync_with_ping()
+ assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
+
+ self.log.info("Peer 1 able to repeatedly download new block")
+
+ # But if test_nodes[1] tries for an old block, it gets disconnected too.
+ getdata_request.inv = [CInv(2, big_old_block)]
+ test_nodes[1].send_message(getdata_request)
+ test_nodes[1].wait_for_disconnect()
+ assert_equal(len(self.nodes[0].getpeerinfo()), 1)
+
+ self.log.info("Peer 1 disconnected after trying to download old block")
+
+ self.log.info("Advancing system time on node to clear counters...")
+
+ # If we advance the time by 24 hours, then the counters should reset,
+ # and test_nodes[2] should be able to retrieve the old block.
+ self.nodes[0].setmocktime(int(time.time()))
+ test_nodes[2].sync_with_ping()
+ test_nodes[2].send_message(getdata_request)
+ test_nodes[2].sync_with_ping()
+ assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)
+
+ self.log.info("Peer 2 able to download old block")
+
+ [c.disconnect_node() for c in connections]
+
+ #stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
+ self.log.info("Restarting nodes with -whitelist=127.0.0.1")
+ stop_node(self.nodes[0], 0)
+ self.nodes[0] = start_node(0, self.options.tmpdir, ["-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"])
+
+ #recreate/reconnect 3 test nodes
+ test_nodes = []
+ connections = []
+
+ for i in range(3):
+ test_nodes.append(TestNode())
+ connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
+ test_nodes[i].add_connection(connections[i])
+
+ NetworkThread().start() # Start up network handling in another thread
+ [x.wait_for_verack() for x in test_nodes]
+
+ #retrieve 20 blocks which should be enough to break the 1MB limit
+ getdata_request.inv = [CInv(2, big_new_block)]
+ for i in range(20):
+ test_nodes[1].send_message(getdata_request)
+ test_nodes[1].sync_with_ping()
+ assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
+
+ getdata_request.inv = [CInv(2, big_old_block)]
+ test_nodes[1].send_message(getdata_request)
+ test_nodes[1].wait_for_disconnect()
+ assert_equal(len(self.nodes[0].getpeerinfo()), 3) #node is still connected because of the whitelist
+
+ self.log.info("Peer 1 still connected after trying to download old block (whitelisted)")
+
+ [c.disconnect_node() for c in connections]
+
+if __name__ == '__main__':
+ MaxUploadTest().main()
diff --git a/test/functional/mempool_limit.py b/test/functional/mempool_limit.py
new file mode 100755
index 0000000000..a7ca576aee
--- /dev/null
+++ b/test/functional/mempool_limit.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test mempool limiting together/eviction with the wallet."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+class MempoolLimitTest(BitcoinTestFramework):
+
+ def setup_network(self):
+ self.nodes = []
+ self.nodes.append(start_node(0, self.options.tmpdir, ["-maxmempool=5", "-spendzeroconfchange=0"]))
+ self.is_network_split = False
+ self.sync_all()
+ self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+
+ self.txouts = gen_return_txouts()
+
+ def run_test(self):
+ txids = []
+ utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], 91)
+
+ #create a mempool tx that will be evicted
+ us0 = utxos.pop()
+ inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
+ outputs = {self.nodes[0].getnewaddress() : 0.0001}
+ tx = self.nodes[0].createrawtransaction(inputs, outputs)
+ self.nodes[0].settxfee(self.relayfee) # specifically fund this tx with low fee
+ txF = self.nodes[0].fundrawtransaction(tx)
+ self.nodes[0].settxfee(0) # return to automatic fee selection
+ txFS = self.nodes[0].signrawtransaction(txF['hex'])
+ txid = self.nodes[0].sendrawtransaction(txFS['hex'])
+
+ relayfee = self.nodes[0].getnetworkinfo()['relayfee']
+ base_fee = relayfee*100
+ for i in range (3):
+ txids.append([])
+ txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[30*i:30*i+30], 30, (i+1)*base_fee)
+
+ # by now, the tx should be evicted, check confirmation state
+ assert(txid not in self.nodes[0].getrawmempool())
+ txdata = self.nodes[0].gettransaction(txid)
+ assert(txdata['confirmations'] == 0) #confirmation should still be 0
+
+if __name__ == '__main__':
+ MempoolLimitTest().main()
diff --git a/test/functional/mempool_packages.py b/test/functional/mempool_packages.py
new file mode 100755
index 0000000000..17e3a9a967
--- /dev/null
+++ b/test/functional/mempool_packages.py
@@ -0,0 +1,239 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test descendant package tracking code."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+from test_framework.mininode import COIN
+
+MAX_ANCESTORS = 25
+MAX_DESCENDANTS = 25
+
+class MempoolPackagesTest(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 2
+ self.setup_clean_chain = False
+
+ def setup_network(self):
+ self.nodes = []
+ self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000"]))
+ self.nodes.append(start_node(1, self.options.tmpdir, ["-maxorphantx=1000", "-limitancestorcount=5"]))
+ connect_nodes(self.nodes[0], 1)
+ self.is_network_split = False
+ self.sync_all()
+
+ # Build a transaction that spends parent_txid:vout
+ # Return amount sent
+ def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs):
+ send_value = satoshi_round((value - fee)/num_outputs)
+ inputs = [ {'txid' : parent_txid, 'vout' : vout} ]
+ outputs = {}
+ for i in range(num_outputs):
+ outputs[node.getnewaddress()] = send_value
+ rawtx = node.createrawtransaction(inputs, outputs)
+ signedtx = node.signrawtransaction(rawtx)
+ txid = node.sendrawtransaction(signedtx['hex'])
+ fulltx = node.getrawtransaction(txid, 1)
+ assert(len(fulltx['vout']) == num_outputs) # make sure we didn't generate a change output
+ return (txid, send_value)
+
+ def run_test(self):
+ ''' Mine some blocks and have them mature. '''
+ self.nodes[0].generate(101)
+ utxo = self.nodes[0].listunspent(10)
+ txid = utxo[0]['txid']
+ vout = utxo[0]['vout']
+ value = utxo[0]['amount']
+
+ fee = Decimal("0.0001")
+ # MAX_ANCESTORS transactions off a confirmed tx should be fine
+ chain = []
+ for i in range(MAX_ANCESTORS):
+ (txid, sent_value) = self.chain_transaction(self.nodes[0], txid, 0, value, fee, 1)
+ value = sent_value
+ chain.append(txid)
+
+ # Check mempool has MAX_ANCESTORS transactions in it, and descendant
+ # count and fees should look correct
+ mempool = self.nodes[0].getrawmempool(True)
+ assert_equal(len(mempool), MAX_ANCESTORS)
+ descendant_count = 1
+ descendant_fees = 0
+ descendant_size = 0
+
+ descendants = []
+ ancestors = list(chain)
+ for x in reversed(chain):
+ # Check that getmempoolentry is consistent with getrawmempool
+ entry = self.nodes[0].getmempoolentry(x)
+ assert_equal(entry, mempool[x])
+
+ # Check that the descendant calculations are correct
+ assert_equal(mempool[x]['descendantcount'], descendant_count)
+ descendant_fees += mempool[x]['fee']
+ assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee'])
+ assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN)
+ descendant_size += mempool[x]['size']
+ assert_equal(mempool[x]['descendantsize'], descendant_size)
+ descendant_count += 1
+
+ # Check that getmempooldescendants is correct
+ assert_equal(sorted(descendants), sorted(self.nodes[0].getmempooldescendants(x)))
+ descendants.append(x)
+
+ # Check that getmempoolancestors is correct
+ ancestors.remove(x)
+ assert_equal(sorted(ancestors), sorted(self.nodes[0].getmempoolancestors(x)))
+
+ # Check that getmempoolancestors/getmempooldescendants correctly handle verbose=true
+ v_ancestors = self.nodes[0].getmempoolancestors(chain[-1], True)
+ assert_equal(len(v_ancestors), len(chain)-1)
+ for x in v_ancestors.keys():
+ assert_equal(mempool[x], v_ancestors[x])
+ assert(chain[-1] not in v_ancestors.keys())
+
+ v_descendants = self.nodes[0].getmempooldescendants(chain[0], True)
+ assert_equal(len(v_descendants), len(chain)-1)
+ for x in v_descendants.keys():
+ assert_equal(mempool[x], v_descendants[x])
+ assert(chain[0] not in v_descendants.keys())
+
+ # Check that descendant modified fees includes fee deltas from
+ # prioritisetransaction
+ self.nodes[0].prioritisetransaction(chain[-1], 1000)
+ mempool = self.nodes[0].getrawmempool(True)
+
+ descendant_fees = 0
+ for x in reversed(chain):
+ descendant_fees += mempool[x]['fee']
+ assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 1000)
+
+ # Adding one more transaction on to the chain should fail.
+ assert_raises_jsonrpc(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], txid, vout, value, fee, 1)
+
+ # Check that prioritising a tx before it's added to the mempool works
+ # First clear the mempool by mining a block.
+ self.nodes[0].generate(1)
+ sync_blocks(self.nodes)
+ assert_equal(len(self.nodes[0].getrawmempool()), 0)
+ # Prioritise a transaction that has been mined, then add it back to the
+ # mempool by using invalidateblock.
+ self.nodes[0].prioritisetransaction(chain[-1], 2000)
+ self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
+ # Keep node1's tip synced with node0
+ self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
+
+ # Now check that the transaction is in the mempool, with the right modified fee
+ mempool = self.nodes[0].getrawmempool(True)
+
+ descendant_fees = 0
+ for x in reversed(chain):
+ descendant_fees += mempool[x]['fee']
+ if (x == chain[-1]):
+ assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee']+satoshi_round(0.00002))
+ assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 2000)
+
+ # TODO: check that node1's mempool is as expected
+
+ # TODO: test ancestor size limits
+
+ # Now test descendant chain limits
+ txid = utxo[1]['txid']
+ value = utxo[1]['amount']
+ vout = utxo[1]['vout']
+
+ transaction_package = []
+ # First create one parent tx with 10 children
+ (txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 10)
+ parent_transaction = txid
+ for i in range(10):
+ transaction_package.append({'txid': txid, 'vout': i, 'amount': sent_value})
+
+ # Sign and send up to MAX_DESCENDANT transactions chained off the parent tx
+ for i in range(MAX_DESCENDANTS - 1):
+ utxo = transaction_package.pop(0)
+ (txid, sent_value) = self.chain_transaction(self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
+ for j in range(10):
+ transaction_package.append({'txid': txid, 'vout': j, 'amount': sent_value})
+
+ mempool = self.nodes[0].getrawmempool(True)
+ assert_equal(mempool[parent_transaction]['descendantcount'], MAX_DESCENDANTS)
+
+ # Sending one more chained transaction will fail
+ utxo = transaction_package.pop(0)
+ assert_raises_jsonrpc(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
+
+ # TODO: check that node1's mempool is as expected
+
+ # TODO: test descendant size limits
+
+ # Test reorg handling
+ # First, the basics:
+ self.nodes[0].generate(1)
+ sync_blocks(self.nodes)
+ self.nodes[1].invalidateblock(self.nodes[0].getbestblockhash())
+ self.nodes[1].reconsiderblock(self.nodes[0].getbestblockhash())
+
+ # Now test the case where node1 has a transaction T in its mempool that
+ # depends on transactions A and B which are in a mined block, and the
+ # block containing A and B is disconnected, AND B is not accepted back
+ # into node1's mempool because its ancestor count is too high.
+
+ # Create 8 transactions, like so:
+ # Tx0 -> Tx1 (vout0)
+ # \--> Tx2 (vout1) -> Tx3 -> Tx4 -> Tx5 -> Tx6 -> Tx7
+ #
+ # Mine them in the next block, then generate a new tx8 that spends
+ # Tx1 and Tx7, and add to node1's mempool, then disconnect the
+ # last block.
+
+ # Create tx0 with 2 outputs
+ utxo = self.nodes[0].listunspent()
+ txid = utxo[0]['txid']
+ value = utxo[0]['amount']
+ vout = utxo[0]['vout']
+
+ send_value = satoshi_round((value - fee)/2)
+ inputs = [ {'txid' : txid, 'vout' : vout} ]
+ outputs = {}
+ for i in range(2):
+ outputs[self.nodes[0].getnewaddress()] = send_value
+ rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
+ signedtx = self.nodes[0].signrawtransaction(rawtx)
+ txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
+ tx0_id = txid
+ value = send_value
+
+ # Create tx1
+ (tx1_id, tx1_value) = self.chain_transaction(self.nodes[0], tx0_id, 0, value, fee, 1)
+
+ # Create tx2-7
+ vout = 1
+ txid = tx0_id
+ for i in range(6):
+ (txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1)
+ vout = 0
+ value = sent_value
+
+ # Mine these in a block
+ self.nodes[0].generate(1)
+ self.sync_all()
+
+ # Now generate tx8, with a big fee
+ inputs = [ {'txid' : tx1_id, 'vout': 0}, {'txid' : txid, 'vout': 0} ]
+ outputs = { self.nodes[0].getnewaddress() : send_value + value - 4*fee }
+ rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
+ signedtx = self.nodes[0].signrawtransaction(rawtx)
+ txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
+ sync_mempools(self.nodes)
+
+ # Now try to disconnect the tip on each node...
+ self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
+ self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
+ sync_blocks(self.nodes)
+
+if __name__ == '__main__':
+ MempoolPackagesTest().main()
diff --git a/test/functional/mempool_reorg.py b/test/functional/mempool_reorg.py
new file mode 100755
index 0000000000..812b54ffcb
--- /dev/null
+++ b/test/functional/mempool_reorg.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test mempool re-org scenarios.
+
+Test re-org scenarios with a mempool that contains transactions
+that spend (directly or indirectly) coinbase transactions.
+"""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+# Create one-input, one-output, no-fee transaction:
+class MempoolCoinbaseTest(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 2
+ self.setup_clean_chain = False
+
+ alert_filename = None # Set by setup_network
+
+ def setup_network(self):
+ args = ["-checkmempool"]
+ self.nodes = []
+ self.nodes.append(start_node(0, self.options.tmpdir, args))
+ self.nodes.append(start_node(1, self.options.tmpdir, args))
+ connect_nodes(self.nodes[1], 0)
+ self.is_network_split = False
+ self.sync_all()
+
+ def run_test(self):
+ # Start with a 200 block chain
+ assert_equal(self.nodes[0].getblockcount(), 200)
+
+ # Mine four blocks. After this, nodes[0] blocks
+ # 101, 102, and 103 are spend-able.
+ new_blocks = self.nodes[1].generate(4)
+ self.sync_all()
+
+ node0_address = self.nodes[0].getnewaddress()
+ node1_address = self.nodes[1].getnewaddress()
+
+ # Three scenarios for re-orging coinbase spends in the memory pool:
+ # 1. Direct coinbase spend : spend_101
+ # 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
+ # 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
+ # Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
+ # and make sure the mempool code behaves correctly.
+ b = [ self.nodes[0].getblockhash(n) for n in range(101, 105) ]
+ coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
+ spend_101_raw = create_tx(self.nodes[0], coinbase_txids[1], node1_address, 49.99)
+ spend_102_raw = create_tx(self.nodes[0], coinbase_txids[2], node0_address, 49.99)
+ spend_103_raw = create_tx(self.nodes[0], coinbase_txids[3], node0_address, 49.99)
+
+ # Create a transaction which is time-locked to two blocks in the future
+ timelock_tx = self.nodes[0].createrawtransaction([{"txid": coinbase_txids[0], "vout": 0}], {node0_address: 49.99})
+ # Set the time lock
+ timelock_tx = timelock_tx.replace("ffffffff", "11111191", 1)
+ timelock_tx = timelock_tx[:-8] + hex(self.nodes[0].getblockcount() + 2)[2:] + "000000"
+ timelock_tx = self.nodes[0].signrawtransaction(timelock_tx)["hex"]
+ # This will raise an exception because the timelock transaction is too immature to spend
+ assert_raises_jsonrpc(-26, "non-final", self.nodes[0].sendrawtransaction, timelock_tx)
+
+ # Broadcast and mine spend_102 and 103:
+ spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
+ spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
+ self.nodes[0].generate(1)
+ # Time-locked transaction is still too immature to spend
+ assert_raises_jsonrpc(-26,'non-final', self.nodes[0].sendrawtransaction, timelock_tx)
+
+ # Create 102_1 and 103_1:
+ spend_102_1_raw = create_tx(self.nodes[0], spend_102_id, node1_address, 49.98)
+ spend_103_1_raw = create_tx(self.nodes[0], spend_103_id, node1_address, 49.98)
+
+ # Broadcast and mine 103_1:
+ spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
+ last_block = self.nodes[0].generate(1)
+ # Time-locked transaction can now be spent
+ timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx)
+
+ # ... now put spend_101 and spend_102_1 in memory pools:
+ spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
+ spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
+
+ self.sync_all()
+
+ assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, timelock_tx_id})
+
+ for node in self.nodes:
+ node.invalidateblock(last_block[0])
+ # Time-locked transaction is now too immature and has been removed from the mempool
+ # spend_103_1 has been re-orged out of the chain and is back in the mempool
+ assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, spend_103_1_id})
+
+ # Use invalidateblock to re-org back and make all those coinbase spends
+ # immature/invalid:
+ for node in self.nodes:
+ node.invalidateblock(new_blocks[0])
+
+ self.sync_all()
+
+ # mempool should be empty.
+ assert_equal(set(self.nodes[0].getrawmempool()), set())
+
+if __name__ == '__main__':
+ MempoolCoinbaseTest().main()
diff --git a/test/functional/mempool_resurrect_test.py b/test/functional/mempool_resurrect_test.py
new file mode 100755
index 0000000000..727892d1f2
--- /dev/null
+++ b/test/functional/mempool_resurrect_test.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test resurrection of mined transactions when the blockchain is re-organized."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+# Create one-input, one-output, no-fee transaction:
+class MempoolCoinbaseTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 1
+ self.setup_clean_chain = False
+
+ def setup_network(self):
+ # Just need one node for this test
+ args = ["-checkmempool"]
+ self.nodes = []
+ self.nodes.append(start_node(0, self.options.tmpdir, args))
+ self.is_network_split = False
+
+ def run_test(self):
+ node0_address = self.nodes[0].getnewaddress()
+ # Spend block 1/2/3's coinbase transactions
+ # Mine a block.
+ # Create three more transactions, spending the spends
+ # Mine another block.
+ # ... make sure all the transactions are confirmed
+ # Invalidate both blocks
+ # ... make sure all the transactions are put back in the mempool
+ # Mine a new block
+ # ... make sure all the transactions are confirmed again.
+
+ b = [ self.nodes[0].getblockhash(n) for n in range(1, 4) ]
+ coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
+ spends1_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.99) for txid in coinbase_txids ]
+ spends1_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends1_raw ]
+
+ blocks = []
+ blocks.extend(self.nodes[0].generate(1))
+
+ spends2_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.98) for txid in spends1_id ]
+ spends2_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends2_raw ]
+
+ blocks.extend(self.nodes[0].generate(1))
+
+ # mempool should be empty, all txns confirmed
+ assert_equal(set(self.nodes[0].getrawmempool()), set())
+ for txid in spends1_id+spends2_id:
+ tx = self.nodes[0].gettransaction(txid)
+ assert(tx["confirmations"] > 0)
+
+ # Use invalidateblock to re-org back; all transactions should
+ # end up unconfirmed and back in the mempool
+ for node in self.nodes:
+ node.invalidateblock(blocks[0])
+
+ # mempool should be empty, all txns confirmed
+ assert_equal(set(self.nodes[0].getrawmempool()), set(spends1_id+spends2_id))
+ for txid in spends1_id+spends2_id:
+ tx = self.nodes[0].gettransaction(txid)
+ assert(tx["confirmations"] == 0)
+
+ # Generate another block, they should all get mined
+ self.nodes[0].generate(1)
+ # mempool should be empty, all txns confirmed
+ assert_equal(set(self.nodes[0].getrawmempool()), set())
+ for txid in spends1_id+spends2_id:
+ tx = self.nodes[0].gettransaction(txid)
+ assert(tx["confirmations"] > 0)
+
+
+if __name__ == '__main__':
+ MempoolCoinbaseTest().main()
diff --git a/test/functional/mempool_spendcoinbase.py b/test/functional/mempool_spendcoinbase.py
new file mode 100755
index 0000000000..f562a93d86
--- /dev/null
+++ b/test/functional/mempool_spendcoinbase.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test spending coinbase transactions.
+
+The coinbase transaction in block N can appear in block
+N+100... so is valid in the mempool when the best block
+height is N+99.
+This test makes sure coinbase spends that will be mature
+in the next block are accepted into the memory pool,
+but less mature coinbase spends are NOT.
+"""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+# Create one-input, one-output, no-fee transaction:
+class MempoolSpendCoinbaseTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 1
+ self.setup_clean_chain = False
+
+ def setup_network(self):
+ # Just need one node for this test
+ args = ["-checkmempool"]
+ self.nodes = []
+ self.nodes.append(start_node(0, self.options.tmpdir, args))
+ self.is_network_split = False
+
+ def run_test(self):
+ chain_height = self.nodes[0].getblockcount()
+ assert_equal(chain_height, 200)
+ node0_address = self.nodes[0].getnewaddress()
+
+ # Coinbase at height chain_height-100+1 ok in mempool, should
+ # get mined. Coinbase at height chain_height-100+2 is
+ # is too immature to spend.
+ b = [ self.nodes[0].getblockhash(n) for n in range(101, 103) ]
+ coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
+ spends_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.99) for txid in coinbase_txids ]
+
+ spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0])
+
+ # coinbase at height 102 should be too immature to spend
+ assert_raises_jsonrpc(-26,"bad-txns-premature-spend-of-coinbase", self.nodes[0].sendrawtransaction, spends_raw[1])
+
+ # mempool should have just spend_101:
+ assert_equal(self.nodes[0].getrawmempool(), [ spend_101_id ])
+
+ # mine a block, spend_101 should get confirmed
+ self.nodes[0].generate(1)
+ assert_equal(set(self.nodes[0].getrawmempool()), set())
+
+ # ... and now height 102 can be spent:
+ spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1])
+ assert_equal(self.nodes[0].getrawmempool(), [ spend_102_id ])
+
+if __name__ == '__main__':
+ MempoolSpendCoinbaseTest().main()
diff --git a/test/functional/merkle_blocks.py b/test/functional/merkle_blocks.py
new file mode 100755
index 0000000000..5963f2e7b6
--- /dev/null
+++ b/test/functional/merkle_blocks.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test gettxoutproof and verifytxoutproof RPCs."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+class MerkleBlockTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 4
+
+ def setup_network(self):
+ self.nodes = []
+ # Nodes 0/1 are "wallet" nodes
+ self.nodes.append(start_node(0, self.options.tmpdir))
+ self.nodes.append(start_node(1, self.options.tmpdir))
+ # Nodes 2/3 are used for testing
+ self.nodes.append(start_node(2, self.options.tmpdir))
+ self.nodes.append(start_node(3, self.options.tmpdir, ["-txindex"]))
+ connect_nodes(self.nodes[0], 1)
+ connect_nodes(self.nodes[0], 2)
+ connect_nodes(self.nodes[0], 3)
+
+ self.is_network_split = False
+ self.sync_all()
+
+ def run_test(self):
+ self.log.info("Mining blocks...")
+ self.nodes[0].generate(105)
+ self.sync_all()
+
+ chain_height = self.nodes[1].getblockcount()
+ assert_equal(chain_height, 105)
+ assert_equal(self.nodes[1].getbalance(), 0)
+ assert_equal(self.nodes[2].getbalance(), 0)
+
+ node0utxos = self.nodes[0].listunspent(1)
+ tx1 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99})
+ txid1 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx1)["hex"])
+ tx2 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99})
+ txid2 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx2)["hex"])
+ assert_raises(JSONRPCException, self.nodes[0].gettxoutproof, [txid1])
+
+ self.nodes[0].generate(1)
+ blockhash = self.nodes[0].getblockhash(chain_height + 1)
+ self.sync_all()
+
+ txlist = []
+ blocktxn = self.nodes[0].getblock(blockhash, True)["tx"]
+ txlist.append(blocktxn[1])
+ txlist.append(blocktxn[2])
+
+ assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1])), [txid1])
+ assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
+ assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2], blockhash)), txlist)
+
+ txin_spent = self.nodes[1].listunspent(1).pop()
+ tx3 = self.nodes[1].createrawtransaction([txin_spent], {self.nodes[0].getnewaddress(): 49.98})
+ self.nodes[0].sendrawtransaction(self.nodes[1].signrawtransaction(tx3)["hex"])
+ self.nodes[0].generate(1)
+ self.sync_all()
+
+ txid_spent = txin_spent["txid"]
+ txid_unspent = txid1 if txin_spent["txid"] != txid1 else txid2
+
+ # We can't find the block from a fully-spent tx
+ assert_raises(JSONRPCException, self.nodes[2].gettxoutproof, [txid_spent])
+ # ...but we can if we specify the block
+ assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_spent], blockhash)), [txid_spent])
+ # ...or if the first tx is not fully-spent
+ assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent])
+ try:
+ assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
+ except JSONRPCException:
+ assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid2, txid1])), txlist)
+ # ...or if we have a -txindex
+ assert_equal(self.nodes[2].verifytxoutproof(self.nodes[3].gettxoutproof([txid_spent])), [txid_spent])
+
+if __name__ == '__main__':
+ MerkleBlockTest().main()
diff --git a/test/functional/multi_rpc.py b/test/functional/multi_rpc.py
new file mode 100755
index 0000000000..3b74bf1c46
--- /dev/null
+++ b/test/functional/multi_rpc.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test multiple RPC users."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import str_to_b64str, assert_equal
+
+import os
+import http.client
+import urllib.parse
+
+class HTTPBasicsTest (BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = False
+ self.num_nodes = 1
+
+ def setup_chain(self):
+ super().setup_chain()
+ #Append rpcauth to bitcoin.conf before initialization
+ rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
+ rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
+ with open(os.path.join(self.options.tmpdir+"/node0", "bitcoin.conf"), 'a', encoding='utf8') as f:
+ f.write(rpcauth+"\n")
+ f.write(rpcauth2+"\n")
+
+ def setup_network(self):
+ self.nodes = self.setup_nodes()
+
+ def run_test(self):
+
+ ##################################################
+ # Check correctness of the rpcauth config option #
+ ##################################################
+ url = urllib.parse.urlparse(self.nodes[0].url)
+
+ #Old authpair
+ authpair = url.username + ':' + url.password
+
+ #New authpair generated via share/rpcuser tool
+ rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
+ password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
+
+ #Second authpair with different username
+ rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
+ password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
+ authpairnew = "rt:"+password
+
+ headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
+
+ conn = http.client.HTTPConnection(url.hostname, url.port)
+ conn.connect()
+ conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
+ resp = conn.getresponse()
+ assert_equal(resp.status==401, False)
+ conn.close()
+
+ #Use new authpair to confirm both work
+ headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
+
+ conn = http.client.HTTPConnection(url.hostname, url.port)
+ conn.connect()
+ conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
+ resp = conn.getresponse()
+ assert_equal(resp.status==401, False)
+ conn.close()
+
+ #Wrong login name with rt's password
+ authpairnew = "rtwrong:"+password
+ headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
+
+ conn = http.client.HTTPConnection(url.hostname, url.port)
+ conn.connect()
+ conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
+ resp = conn.getresponse()
+ assert_equal(resp.status==401, True)
+ conn.close()
+
+ #Wrong password for rt
+ authpairnew = "rt:"+password+"wrong"
+ headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
+
+ conn = http.client.HTTPConnection(url.hostname, url.port)
+ conn.connect()
+ conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
+ resp = conn.getresponse()
+ assert_equal(resp.status==401, True)
+ conn.close()
+
+ #Correct for rt2
+ authpairnew = "rt2:"+password2
+ headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
+
+ conn = http.client.HTTPConnection(url.hostname, url.port)
+ conn.connect()
+ conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
+ resp = conn.getresponse()
+ assert_equal(resp.status==401, False)
+ conn.close()
+
+ #Wrong password for rt2
+ authpairnew = "rt2:"+password2+"wrong"
+ headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
+
+ conn = http.client.HTTPConnection(url.hostname, url.port)
+ conn.connect()
+ conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
+ resp = conn.getresponse()
+ assert_equal(resp.status==401, True)
+ conn.close()
+
+
+if __name__ == '__main__':
+ HTTPBasicsTest ().main ()
diff --git a/test/functional/nodehandling.py b/test/functional/nodehandling.py
new file mode 100755
index 0000000000..a6b10a0d83
--- /dev/null
+++ b/test/functional/nodehandling.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test node handling."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+import urllib.parse
+
+class NodeHandlingTest (BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 4
+ self.setup_clean_chain = False
+
+ def run_test(self):
+ ###########################
+ # setban/listbanned tests #
+ ###########################
+ assert_equal(len(self.nodes[2].getpeerinfo()), 4) #we should have 4 nodes at this point
+ self.nodes[2].setban("127.0.0.1", "add")
+ time.sleep(3) #wait till the nodes are disconected
+ assert_equal(len(self.nodes[2].getpeerinfo()), 0) #all nodes must be disconnected at this point
+ assert_equal(len(self.nodes[2].listbanned()), 1)
+ self.nodes[2].clearbanned()
+ assert_equal(len(self.nodes[2].listbanned()), 0)
+ self.nodes[2].setban("127.0.0.0/24", "add")
+ assert_equal(len(self.nodes[2].listbanned()), 1)
+ # This will throw an exception because 127.0.0.1 is within range 127.0.0.0/24
+ assert_raises_jsonrpc(-23, "IP/Subnet already banned", self.nodes[2].setban, "127.0.0.1", "add")
+ # This will throw an exception because 127.0.0.1/42 is not a real subnet
+ assert_raises_jsonrpc(-30, "Error: Invalid IP/Subnet", self.nodes[2].setban, "127.0.0.1/42", "add")
+ assert_equal(len(self.nodes[2].listbanned()), 1) #still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
+ # This will throw an exception because 127.0.0.1 was not added above
+ assert_raises_jsonrpc(-30, "Error: Unban failed", self.nodes[2].setban, "127.0.0.1", "remove")
+ assert_equal(len(self.nodes[2].listbanned()), 1)
+ self.nodes[2].setban("127.0.0.0/24", "remove")
+ assert_equal(len(self.nodes[2].listbanned()), 0)
+ self.nodes[2].clearbanned()
+ assert_equal(len(self.nodes[2].listbanned()), 0)
+
+ ##test persisted banlist
+ self.nodes[2].setban("127.0.0.0/32", "add")
+ self.nodes[2].setban("127.0.0.0/24", "add")
+ self.nodes[2].setban("192.168.0.1", "add", 1) #ban for 1 seconds
+ self.nodes[2].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) #ban for 1000 seconds
+ listBeforeShutdown = self.nodes[2].listbanned()
+ assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address']) #must be here
+ time.sleep(2) #make 100% sure we expired 192.168.0.1 node time
+
+ #stop node
+ stop_node(self.nodes[2], 2)
+
+ self.nodes[2] = start_node(2, self.options.tmpdir)
+ listAfterShutdown = self.nodes[2].listbanned()
+ assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
+ assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
+ assert_equal("/19" in listAfterShutdown[2]['address'], True)
+
+ ###########################
+ # RPC disconnectnode test #
+ ###########################
+ url = urllib.parse.urlparse(self.nodes[1].url)
+ self.nodes[0].disconnectnode(url.hostname+":"+str(p2p_port(1)))
+ time.sleep(2) #disconnecting a node needs a little bit of time
+ for node in self.nodes[0].getpeerinfo():
+ assert(node['addr'] != url.hostname+":"+str(p2p_port(1)))
+
+ connect_nodes_bi(self.nodes,0,1) #reconnect the node
+ found = False
+ for node in self.nodes[0].getpeerinfo():
+ if node['addr'] == url.hostname+":"+str(p2p_port(1)):
+ found = True
+ assert(found)
+
+if __name__ == '__main__':
+ NodeHandlingTest ().main ()
diff --git a/test/functional/nulldummy.py b/test/functional/nulldummy.py
new file mode 100755
index 0000000000..369c593a90
--- /dev/null
+++ b/test/functional/nulldummy.py
@@ -0,0 +1,136 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test NULLDUMMY softfork.
+
+Connect to a single node.
+Generate 2 blocks (save the coinbases for later).
+Generate 427 more blocks.
+[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in the 430th block.
+[Policy] Check that non-NULLDUMMY transactions are rejected before activation.
+[Consensus] Check that the new NULLDUMMY rules are not enforced on the 431st block.
+[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd block.
+"""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+from test_framework.mininode import CTransaction, NetworkThread
+from test_framework.blocktools import create_coinbase, create_block, add_witness_commitment
+from test_framework.script import CScript
+from io import BytesIO
+import time
+
+NULLDUMMY_ERROR = "64: non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)"
+
+def trueDummy(tx):
+ scriptSig = CScript(tx.vin[0].scriptSig)
+ newscript = []
+ for i in scriptSig:
+ if (len(newscript) == 0):
+ assert(len(i) == 0)
+ newscript.append(b'\x51')
+ else:
+ newscript.append(i)
+ tx.vin[0].scriptSig = CScript(newscript)
+ tx.rehash()
+
+class NULLDUMMYTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 1
+ self.setup_clean_chain = True
+
+ def setup_network(self):
+ # Must set the blockversion for this test
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
+ extra_args=[['-whitelist=127.0.0.1', '-walletprematurewitness']])
+
+ def run_test(self):
+ self.address = self.nodes[0].getnewaddress()
+ self.ms_address = self.nodes[0].addmultisigaddress(1,[self.address])
+ self.wit_address = self.nodes[0].addwitnessaddress(self.address)
+ self.wit_ms_address = self.nodes[0].addwitnessaddress(self.ms_address)
+
+ NetworkThread().start() # Start up network handling in another thread
+ self.coinbase_blocks = self.nodes[0].generate(2) # Block 2
+ coinbase_txid = []
+ for i in self.coinbase_blocks:
+ coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
+ self.nodes[0].generate(427) # Block 429
+ self.lastblockhash = self.nodes[0].getbestblockhash()
+ self.tip = int("0x" + self.lastblockhash, 0)
+ self.lastblockheight = 429
+ self.lastblocktime = int(time.time()) + 429
+
+ self.log.info("Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]")
+ test1txs = [self.create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, 49)]
+ txid1 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[0].serialize_with_witness()), True)
+ test1txs.append(self.create_transaction(self.nodes[0], txid1, self.ms_address, 48))
+ txid2 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[1].serialize_with_witness()), True)
+ test1txs.append(self.create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, 49))
+ txid3 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[2].serialize_with_witness()), True)
+ self.block_submit(self.nodes[0], test1txs, False, True)
+
+ self.log.info("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation")
+ test2tx = self.create_transaction(self.nodes[0], txid2, self.ms_address, 47)
+ trueDummy(test2tx)
+ assert_raises_jsonrpc(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test2tx.serialize_with_witness()), True)
+
+ self.log.info("Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]")
+ self.block_submit(self.nodes[0], [test2tx], False, True)
+
+ self.log.info("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation")
+ test4tx = self.create_transaction(self.nodes[0], test2tx.hash, self.address, 46)
+ test6txs=[CTransaction(test4tx)]
+ trueDummy(test4tx)
+ assert_raises_jsonrpc(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test4tx.serialize_with_witness()), True)
+ self.block_submit(self.nodes[0], [test4tx])
+
+ self.log.info("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation")
+ test5tx = self.create_transaction(self.nodes[0], txid3, self.wit_address, 48)
+ test6txs.append(CTransaction(test5tx))
+ test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01'
+ assert_raises_jsonrpc(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test5tx.serialize_with_witness()), True)
+ self.block_submit(self.nodes[0], [test5tx], True)
+
+ self.log.info("Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [432]")
+ for i in test6txs:
+ self.nodes[0].sendrawtransaction(bytes_to_hex_str(i.serialize_with_witness()), True)
+ self.block_submit(self.nodes[0], test6txs, True, True)
+
+
+ def create_transaction(self, node, txid, to_address, amount):
+ inputs = [{ "txid" : txid, "vout" : 0}]
+ outputs = { to_address : amount }
+ rawtx = node.createrawtransaction(inputs, outputs)
+ signresult = node.signrawtransaction(rawtx)
+ tx = CTransaction()
+ f = BytesIO(hex_str_to_bytes(signresult['hex']))
+ tx.deserialize(f)
+ return tx
+
+
+ def block_submit(self, node, txs, witness = False, accept = False):
+ block = create_block(self.tip, create_coinbase(self.lastblockheight + 1), self.lastblocktime + 1)
+ block.nVersion = 4
+ for tx in txs:
+ tx.rehash()
+ block.vtx.append(tx)
+ block.hashMerkleRoot = block.calc_merkle_root()
+ witness and add_witness_commitment(block)
+ block.rehash()
+ block.solve()
+ node.submitblock(bytes_to_hex_str(block.serialize(True)))
+ if (accept):
+ assert_equal(node.getbestblockhash(), block.hash)
+ self.tip = block.sha256
+ self.lastblockhash = block.hash
+ self.lastblocktime += 1
+ self.lastblockheight += 1
+ else:
+ assert_equal(node.getbestblockhash(), self.lastblockhash)
+
+if __name__ == '__main__':
+ NULLDUMMYTest().main()
diff --git a/test/functional/p2p-acceptblock.py b/test/functional/p2p-acceptblock.py
new file mode 100755
index 0000000000..e1111da4ae
--- /dev/null
+++ b/test/functional/p2p-acceptblock.py
@@ -0,0 +1,277 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test processing of unrequested blocks.
+
+Since behavior differs when receiving unrequested blocks from whitelisted peers
+versus non-whitelisted peers, this tests the behavior of both (effectively two
+separate tests running in parallel).
+
+Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
+whitelist localhost, but node1 does. They will each be on their own chain for
+this test.
+
+We have one NodeConn connection to each, test_node and white_node respectively.
+
+The test:
+1. Generate one block on each node, to leave IBD.
+
+2. Mine a new block on each tip, and deliver to each node from node's peer.
+ The tip should advance.
+
+3. Mine a block that forks the previous block, and deliver to each node from
+ corresponding peer.
+ Node0 should not process this block (just accept the header), because it is
+ unrequested and doesn't have more work than the tip.
+ Node1 should process because this is coming from a whitelisted peer.
+
+4. Send another block that builds on the forking block.
+ Node0 should process this block but be stuck on the shorter chain, because
+ it's missing an intermediate block.
+ Node1 should reorg to this longer chain.
+
+4b.Send 288 more blocks on the longer chain.
+ Node0 should process all but the last block (too far ahead in height).
+ Send all headers to Node1, and then send the last block in that chain.
+ Node1 should accept the block because it's coming from a whitelisted peer.
+
+5. Send a duplicate of the block in #3 to Node0.
+ Node0 should not process the block because it is unrequested, and stay on
+ the shorter chain.
+
+6. Send Node0 an inv for the height 3 block produced in #4 above.
+ Node0 should figure out that Node0 has the missing height 2 block and send a
+ getdata.
+
+7. Send Node0 the missing block again.
+ Node0 should process and the tip should advance.
+"""
+
+from test_framework.mininode import *
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+import time
+from test_framework.blocktools import create_block, create_coinbase
+
+# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
+# p2p messages to a node, generating the messages in the main testing logic.
+class TestNode(NodeConnCB):
+ def __init__(self):
+ NodeConnCB.__init__(self)
+ self.connection = None
+ self.ping_counter = 1
+ self.last_pong = msg_pong()
+
+ def add_connection(self, conn):
+ self.connection = conn
+
+ # Track the last getdata message we receive (used in the test)
+ def on_getdata(self, conn, message):
+ self.last_getdata = message
+
+ # Spin until verack message is received from the node.
+ # We use this to signal that our test can begin. This
+ # is called from the testing thread, so it needs to acquire
+ # the global lock.
+ def wait_for_verack(self):
+ while True:
+ with mininode_lock:
+ if self.verack_received:
+ return
+ time.sleep(0.05)
+
+ # Wrapper for the NodeConn's send_message function
+ def send_message(self, message):
+ self.connection.send_message(message)
+
+ def on_pong(self, conn, message):
+ self.last_pong = message
+
+ # Sync up with the node after delivery of a block
+ def sync_with_ping(self, timeout=30):
+ self.connection.send_message(msg_ping(nonce=self.ping_counter))
+ received_pong = False
+ sleep_time = 0.05
+ while not received_pong and timeout > 0:
+ time.sleep(sleep_time)
+ timeout -= sleep_time
+ with mininode_lock:
+ if self.last_pong.nonce == self.ping_counter:
+ received_pong = True
+ self.ping_counter += 1
+ return received_pong
+
+
+class AcceptBlockTest(BitcoinTestFramework):
+ def add_options(self, parser):
+ parser.add_option("--testbinary", dest="testbinary",
+ default=os.getenv("BITCOIND", "bitcoind"),
+ help="bitcoind binary to test")
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 2
+
+ def setup_network(self):
+ # Node0 will be used to test behavior of processing unrequested blocks
+ # from peers which are not whitelisted, while Node1 will be used for
+ # the whitelisted case.
+ self.nodes = []
+ self.nodes.append(start_node(0, self.options.tmpdir,
+ binary=self.options.testbinary))
+ self.nodes.append(start_node(1, self.options.tmpdir,
+ ["-whitelist=127.0.0.1"],
+ binary=self.options.testbinary))
+
+ def run_test(self):
+ # Setup the p2p connections and start up the network thread.
+ test_node = TestNode() # connects to node0 (not whitelisted)
+ white_node = TestNode() # connects to node1 (whitelisted)
+
+ connections = []
+ connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
+ connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
+ test_node.add_connection(connections[0])
+ white_node.add_connection(connections[1])
+
+ NetworkThread().start() # Start up network handling in another thread
+
+ # Test logic begins here
+ test_node.wait_for_verack()
+ white_node.wait_for_verack()
+
+ # 1. Have both nodes mine a block (leave IBD)
+ [ n.generate(1) for n in self.nodes ]
+ tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
+
+ # 2. Send one block that builds on each tip.
+ # This should be accepted.
+ blocks_h2 = [] # the height 2 blocks on each node's chain
+ block_time = int(time.time()) + 1
+ for i in range(2):
+ blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
+ blocks_h2[i].solve()
+ block_time += 1
+ test_node.send_message(msg_block(blocks_h2[0]))
+ white_node.send_message(msg_block(blocks_h2[1]))
+
+ [ x.sync_with_ping() for x in [test_node, white_node] ]
+ assert_equal(self.nodes[0].getblockcount(), 2)
+ assert_equal(self.nodes[1].getblockcount(), 2)
+ self.log.info("First height 2 block accepted by both nodes")
+
+ # 3. Send another block that builds on the original tip.
+ blocks_h2f = [] # Blocks at height 2 that fork off the main chain
+ for i in range(2):
+ blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
+ blocks_h2f[i].solve()
+ test_node.send_message(msg_block(blocks_h2f[0]))
+ white_node.send_message(msg_block(blocks_h2f[1]))
+
+ [ x.sync_with_ping() for x in [test_node, white_node] ]
+ for x in self.nodes[0].getchaintips():
+ if x['hash'] == blocks_h2f[0].hash:
+ assert_equal(x['status'], "headers-only")
+
+ for x in self.nodes[1].getchaintips():
+ if x['hash'] == blocks_h2f[1].hash:
+ assert_equal(x['status'], "valid-headers")
+
+ self.log.info("Second height 2 block accepted only from whitelisted peer")
+
+ # 4. Now send another block that builds on the forking chain.
+ blocks_h3 = []
+ for i in range(2):
+ blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
+ blocks_h3[i].solve()
+ test_node.send_message(msg_block(blocks_h3[0]))
+ white_node.send_message(msg_block(blocks_h3[1]))
+
+ [ x.sync_with_ping() for x in [test_node, white_node] ]
+ # Since the earlier block was not processed by node0, the new block
+ # can't be fully validated.
+ for x in self.nodes[0].getchaintips():
+ if x['hash'] == blocks_h3[0].hash:
+ assert_equal(x['status'], "headers-only")
+
+ # But this block should be accepted by node0 since it has more work.
+ self.nodes[0].getblock(blocks_h3[0].hash)
+ self.log.info("Unrequested more-work block accepted from non-whitelisted peer")
+
+ # Node1 should have accepted and reorged.
+ assert_equal(self.nodes[1].getblockcount(), 3)
+ self.log.info("Successfully reorged to length 3 chain from whitelisted peer")
+
+ # 4b. Now mine 288 more blocks and deliver; all should be processed but
+ # the last (height-too-high) on node0. Node1 should process the tip if
+ # we give it the headers chain leading to the tip.
+ tips = blocks_h3
+ headers_message = msg_headers()
+ all_blocks = [] # node0's blocks
+ for j in range(2):
+ for i in range(288):
+ next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
+ next_block.solve()
+ if j==0:
+ test_node.send_message(msg_block(next_block))
+ all_blocks.append(next_block)
+ else:
+ headers_message.headers.append(CBlockHeader(next_block))
+ tips[j] = next_block
+
+ time.sleep(2)
+ # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
+ for x in all_blocks[:-1]:
+ self.nodes[0].getblock(x.hash)
+ assert_raises_jsonrpc(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
+
+ headers_message.headers.pop() # Ensure the last block is unrequested
+ white_node.send_message(headers_message) # Send headers leading to tip
+ white_node.send_message(msg_block(tips[1])) # Now deliver the tip
+ white_node.sync_with_ping()
+ self.nodes[1].getblock(tips[1].hash)
+ self.log.info("Unrequested block far ahead of tip accepted from whitelisted peer")
+
+ # 5. Test handling of unrequested block on the node that didn't process
+ # Should still not be processed (even though it has a child that has more
+ # work).
+ test_node.send_message(msg_block(blocks_h2f[0]))
+
+ # Here, if the sleep is too short, the test could falsely succeed (if the
+ # node hasn't processed the block by the time the sleep returns, and then
+ # the node processes it and incorrectly advances the tip).
+ # But this would be caught later on, when we verify that an inv triggers
+ # a getdata request for this block.
+ test_node.sync_with_ping()
+ assert_equal(self.nodes[0].getblockcount(), 2)
+ self.log.info("Unrequested block that would complete more-work chain was ignored")
+
+ # 6. Try to get node to request the missing block.
+ # Poke the node with an inv for block at height 3 and see if that
+ # triggers a getdata on block 2 (it should if block 2 is missing).
+ with mininode_lock:
+ # Clear state so we can check the getdata request
+ test_node.last_getdata = None
+ test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
+
+ test_node.sync_with_ping()
+ with mininode_lock:
+ getdata = test_node.last_getdata
+
+ # Check that the getdata includes the right block
+ assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
+ self.log.info("Inv at tip triggered getdata for unprocessed block")
+
+ # 7. Send the missing block for the third time (now it is requested)
+ test_node.send_message(msg_block(blocks_h2f[0]))
+
+ test_node.sync_with_ping()
+ assert_equal(self.nodes[0].getblockcount(), 290)
+ self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
+
+ [ c.disconnect_node() for c in connections ]
+
+if __name__ == '__main__':
+ AcceptBlockTest().main()
diff --git a/test/functional/p2p-compactblocks.py b/test/functional/p2p-compactblocks.py
new file mode 100755
index 0000000000..1fc0312c34
--- /dev/null
+++ b/test/functional/p2p-compactblocks.py
@@ -0,0 +1,968 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test compact blocks (BIP 152).
+
+Version 1 compact blocks are pre-segwit (txids)
+Version 2 compact blocks are post-segwit (wtxids)
+"""
+
+from test_framework.mininode import *
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment
+from test_framework.script import CScript, OP_TRUE
+
+# TestNode: A peer we use to send messages to bitcoind, and store responses.
+class TestNode(SingleNodeConnCB):
+ def __init__(self):
+ SingleNodeConnCB.__init__(self)
+ self.last_sendcmpct = []
+ self.last_headers = None
+ self.last_inv = None
+ self.last_cmpctblock = None
+ self.block_announced = False
+ self.last_getdata = None
+ self.last_getheaders = None
+ self.last_getblocktxn = None
+ self.last_block = None
+ self.last_blocktxn = None
+ # Store the hashes of blocks we've seen announced.
+ # This is for synchronizing the p2p message traffic,
+ # so we can eg wait until a particular block is announced.
+ self.set_announced_blockhashes = set()
+
+ def on_sendcmpct(self, conn, message):
+ self.last_sendcmpct.append(message)
+
+ def on_block(self, conn, message):
+ self.last_block = message
+
+ def on_cmpctblock(self, conn, message):
+ self.last_cmpctblock = message
+ self.block_announced = True
+ self.last_cmpctblock.header_and_shortids.header.calc_sha256()
+ self.set_announced_blockhashes.add(self.last_cmpctblock.header_and_shortids.header.sha256)
+
+ def on_headers(self, conn, message):
+ self.last_headers = message
+ self.block_announced = True
+ for x in self.last_headers.headers:
+ x.calc_sha256()
+ self.set_announced_blockhashes.add(x.sha256)
+
+ def on_inv(self, conn, message):
+ self.last_inv = message
+ for x in self.last_inv.inv:
+ if x.type == 2:
+ self.block_announced = True
+ self.set_announced_blockhashes.add(x.hash)
+
+ def on_getdata(self, conn, message):
+ self.last_getdata = message
+
+ def on_getheaders(self, conn, message):
+ self.last_getheaders = message
+
+ def on_getblocktxn(self, conn, message):
+ self.last_getblocktxn = message
+
+ def on_blocktxn(self, conn, message):
+ self.last_blocktxn = message
+
+ # Requires caller to hold mininode_lock
+ def received_block_announcement(self):
+ return self.block_announced
+
+ def clear_block_announcement(self):
+ with mininode_lock:
+ self.block_announced = False
+ self.last_inv = None
+ self.last_headers = None
+ self.last_cmpctblock = None
+
+ def get_headers(self, locator, hashstop):
+ msg = msg_getheaders()
+ msg.locator.vHave = locator
+ msg.hashstop = hashstop
+ self.connection.send_message(msg)
+
+ def send_header_for_blocks(self, new_blocks):
+ headers_message = msg_headers()
+ headers_message.headers = [CBlockHeader(b) for b in new_blocks]
+ self.send_message(headers_message)
+
+ def request_headers_and_sync(self, locator, hashstop=0):
+ self.clear_block_announcement()
+ self.get_headers(locator, hashstop)
+ assert(wait_until(self.received_block_announcement, timeout=30))
+ assert(self.received_block_announcement())
+ self.clear_block_announcement()
+
+ # Block until a block announcement for a particular block hash is
+ # received.
+ def wait_for_block_announcement(self, block_hash, timeout=30):
+ def received_hash():
+ return (block_hash in self.set_announced_blockhashes)
+ return wait_until(received_hash, timeout=timeout)
+
+class CompactBlocksTest(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ # Node0 = pre-segwit, node1 = segwit-aware
+ self.num_nodes = 2
+ self.utxos = []
+
+ def setup_network(self):
+ self.nodes = []
+
+ # Start up node0 to be a version 1, pre-segwit node.
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
+ [["-bip9params=segwit:0:0"],
+ ["-txindex"]])
+ connect_nodes(self.nodes[0], 1)
+
+ def build_block_on_tip(self, node, segwit=False):
+ height = node.getblockcount()
+ tip = node.getbestblockhash()
+ mtp = node.getblockheader(tip)['mediantime']
+ block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1)
+ block.nVersion = 4
+ if segwit:
+ add_witness_commitment(block)
+ block.solve()
+ return block
+
+ # Create 10 more anyone-can-spend utxo's for testing.
+ def make_utxos(self):
+ # Doesn't matter which node we use, just use node0.
+ block = self.build_block_on_tip(self.nodes[0])
+ self.test_node.send_and_ping(msg_block(block))
+ assert(int(self.nodes[0].getbestblockhash(), 16) == block.sha256)
+ self.nodes[0].generate(100)
+
+ total_value = block.vtx[0].vout[0].nValue
+ out_value = total_value // 10
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))
+ for i in range(10):
+ tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
+ tx.rehash()
+
+ block2 = self.build_block_on_tip(self.nodes[0])
+ block2.vtx.append(tx)
+ block2.hashMerkleRoot = block2.calc_merkle_root()
+ block2.solve()
+ self.test_node.send_and_ping(msg_block(block2))
+ assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256)
+ self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)])
+ return
+
+ # Test "sendcmpct" (between peers preferring the same version):
+ # - No compact block announcements unless sendcmpct is sent.
+ # - If sendcmpct is sent with version > preferred_version, the message is ignored.
+ # - If sendcmpct is sent with boolean 0, then block announcements are not
+ # made with compact blocks.
+ # - If sendcmpct is then sent with boolean 1, then new block announcements
+ # are made with compact blocks.
+ # If old_node is passed in, request compact blocks with version=preferred-1
+ # and verify that it receives block announcements via compact block.
+ def test_sendcmpct(self, node, test_node, preferred_version, old_node=None):
+ # Make sure we get a SENDCMPCT message from our peer
+ def received_sendcmpct():
+ return (len(test_node.last_sendcmpct) > 0)
+ got_message = wait_until(received_sendcmpct, timeout=30)
+ assert(received_sendcmpct())
+ assert(got_message)
+ with mininode_lock:
+ # Check that the first version received is the preferred one
+ assert_equal(test_node.last_sendcmpct[0].version, preferred_version)
+ # And that we receive versions down to 1.
+ assert_equal(test_node.last_sendcmpct[-1].version, 1)
+ test_node.last_sendcmpct = []
+
+ tip = int(node.getbestblockhash(), 16)
+
+ def check_announcement_of_new_block(node, peer, predicate):
+ peer.clear_block_announcement()
+ block_hash = int(node.generate(1)[0], 16)
+ peer.wait_for_block_announcement(block_hash, timeout=30)
+ assert(peer.block_announced)
+ assert(got_message)
+
+ with mininode_lock:
+ assert predicate(peer), (
+ "block_hash={!r}, cmpctblock={!r}, inv={!r}".format(
+ block_hash, peer.last_cmpctblock, peer.last_inv))
+
+ # We shouldn't get any block announcements via cmpctblock yet.
+ check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None)
+
+ # Try one more time, this time after requesting headers.
+ test_node.request_headers_and_sync(locator=[tip])
+ check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None and p.last_inv is not None)
+
+ # Test a few ways of using sendcmpct that should NOT
+ # result in compact block announcements.
+ # Before each test, sync the headers chain.
+ test_node.request_headers_and_sync(locator=[tip])
+
+ # Now try a SENDCMPCT message with too-high version
+ sendcmpct = msg_sendcmpct()
+ sendcmpct.version = preferred_version+1
+ sendcmpct.announce = True
+ test_node.send_and_ping(sendcmpct)
+ check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None)
+
+ # Headers sync before next test.
+ test_node.request_headers_and_sync(locator=[tip])
+
+ # Now try a SENDCMPCT message with valid version, but announce=False
+ sendcmpct.version = preferred_version
+ sendcmpct.announce = False
+ test_node.send_and_ping(sendcmpct)
+ check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None)
+
+ # Headers sync before next test.
+ test_node.request_headers_and_sync(locator=[tip])
+
+ # Finally, try a SENDCMPCT message with announce=True
+ sendcmpct.version = preferred_version
+ sendcmpct.announce = True
+ test_node.send_and_ping(sendcmpct)
+ check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None)
+
+ # Try one more time (no headers sync should be needed!)
+ check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None)
+
+ # Try one more time, after turning on sendheaders
+ test_node.send_and_ping(msg_sendheaders())
+ check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None)
+
+ # Try one more time, after sending a version-1, announce=false message.
+ sendcmpct.version = preferred_version-1
+ sendcmpct.announce = False
+ test_node.send_and_ping(sendcmpct)
+ check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None)
+
+ # Now turn off announcements
+ sendcmpct.version = preferred_version
+ sendcmpct.announce = False
+ test_node.send_and_ping(sendcmpct)
+ check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None and p.last_headers is not None)
+
+ if old_node is not None:
+ # Verify that a peer using an older protocol version can receive
+ # announcements from this node.
+ sendcmpct.version = preferred_version-1
+ sendcmpct.announce = True
+ old_node.send_and_ping(sendcmpct)
+ # Header sync
+ old_node.request_headers_and_sync(locator=[tip])
+ check_announcement_of_new_block(node, old_node, lambda p: p.last_cmpctblock is not None)
+
+ # This test actually causes bitcoind to (reasonably!) disconnect us, so do this last.
+ def test_invalid_cmpctblock_message(self):
+ self.nodes[0].generate(101)
+ block = self.build_block_on_tip(self.nodes[0])
+
+ cmpct_block = P2PHeaderAndShortIDs()
+ cmpct_block.header = CBlockHeader(block)
+ cmpct_block.prefilled_txn_length = 1
+ # This index will be too high
+ prefilled_txn = PrefilledTransaction(1, block.vtx[0])
+ cmpct_block.prefilled_txn = [prefilled_txn]
+ self.test_node.send_and_ping(msg_cmpctblock(cmpct_block))
+ assert(int(self.nodes[0].getbestblockhash(), 16) == block.hashPrevBlock)
+
+ # Compare the generated shortids to what we expect based on BIP 152, given
+ # bitcoind's choice of nonce.
+ def test_compactblock_construction(self, node, test_node, version, use_witness_address):
+ # Generate a bunch of transactions.
+ node.generate(101)
+ num_transactions = 25
+ address = node.getnewaddress()
+ if use_witness_address:
+ # Want at least one segwit spend, so move all funds to
+ # a witness address.
+ address = node.addwitnessaddress(address)
+ value_to_send = node.getbalance()
+ node.sendtoaddress(address, satoshi_round(value_to_send-Decimal(0.1)))
+ node.generate(1)
+
+ segwit_tx_generated = False
+ for i in range(num_transactions):
+ txid = node.sendtoaddress(address, 0.1)
+ hex_tx = node.gettransaction(txid)["hex"]
+ tx = FromHex(CTransaction(), hex_tx)
+ if not tx.wit.is_null():
+ segwit_tx_generated = True
+
+ if use_witness_address:
+ assert(segwit_tx_generated) # check that our test is not broken
+
+ # Wait until we've seen the block announcement for the resulting tip
+ tip = int(node.getbestblockhash(), 16)
+ assert(test_node.wait_for_block_announcement(tip))
+
+ # Make sure we will receive a fast-announce compact block
+ self.request_cb_announcements(test_node, node, version)
+
+ # Now mine a block, and look at the resulting compact block.
+ test_node.clear_block_announcement()
+ block_hash = int(node.generate(1)[0], 16)
+
+ # Store the raw block in our internal format.
+ block = FromHex(CBlock(), node.getblock("%02x" % block_hash, False))
+ [tx.calc_sha256() for tx in block.vtx]
+ block.rehash()
+
+ # Wait until the block was announced (via compact blocks)
+ wait_until(test_node.received_block_announcement, timeout=30)
+ assert(test_node.received_block_announcement())
+
+ # Now fetch and check the compact block
+ header_and_shortids = None
+ with mininode_lock:
+ assert(test_node.last_cmpctblock is not None)
+ # Convert the on-the-wire representation to absolute indexes
+ header_and_shortids = HeaderAndShortIDs(test_node.last_cmpctblock.header_and_shortids)
+ self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
+
+ # Now fetch the compact block using a normal non-announce getdata
+ with mininode_lock:
+ test_node.clear_block_announcement()
+ inv = CInv(4, block_hash) # 4 == "CompactBlock"
+ test_node.send_message(msg_getdata([inv]))
+
+ wait_until(test_node.received_block_announcement, timeout=30)
+ assert(test_node.received_block_announcement())
+
+ # Now fetch and check the compact block
+ header_and_shortids = None
+ with mininode_lock:
+ assert(test_node.last_cmpctblock is not None)
+ # Convert the on-the-wire representation to absolute indexes
+ header_and_shortids = HeaderAndShortIDs(test_node.last_cmpctblock.header_and_shortids)
+ self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
+
+ def check_compactblock_construction_from_block(self, version, header_and_shortids, block_hash, block):
+ # Check that we got the right block!
+ header_and_shortids.header.calc_sha256()
+ assert_equal(header_and_shortids.header.sha256, block_hash)
+
+ # Make sure the prefilled_txn appears to have included the coinbase
+ assert(len(header_and_shortids.prefilled_txn) >= 1)
+ assert_equal(header_and_shortids.prefilled_txn[0].index, 0)
+
+ # Check that all prefilled_txn entries match what's in the block.
+ for entry in header_and_shortids.prefilled_txn:
+ entry.tx.calc_sha256()
+ # This checks the non-witness parts of the tx agree
+ assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256)
+
+ # And this checks the witness
+ wtxid = entry.tx.calc_sha256(True)
+ if version == 2:
+ assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True))
+ else:
+ # Shouldn't have received a witness
+ assert(entry.tx.wit.is_null())
+
+ # Check that the cmpctblock message announced all the transactions.
+ assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx))
+
+ # And now check that all the shortids are as expected as well.
+ # Determine the siphash keys to use.
+ [k0, k1] = header_and_shortids.get_siphash_keys()
+
+ index = 0
+ while index < len(block.vtx):
+ if (len(header_and_shortids.prefilled_txn) > 0 and
+ header_and_shortids.prefilled_txn[0].index == index):
+ # Already checked prefilled transactions above
+ header_and_shortids.prefilled_txn.pop(0)
+ else:
+ tx_hash = block.vtx[index].sha256
+ if version == 2:
+ tx_hash = block.vtx[index].calc_sha256(True)
+ shortid = calculate_shortid(k0, k1, tx_hash)
+ assert_equal(shortid, header_and_shortids.shortids[0])
+ header_and_shortids.shortids.pop(0)
+ index += 1
+
+ # Test that bitcoind requests compact blocks when we announce new blocks
+ # via header or inv, and that responding to getblocktxn causes the block
+ # to be successfully reconstructed.
+ # Post-segwit: upgraded nodes would only make this request of cb-version-2,
+ # NODE_WITNESS peers. Unupgraded nodes would still make this request of
+ # any cb-version-1-supporting peer.
+ def test_compactblock_requests(self, node, test_node, version, segwit):
+ # Try announcing a block with an inv or header, expect a compactblock
+ # request
+ for announce in ["inv", "header"]:
+ block = self.build_block_on_tip(node, segwit=segwit)
+ with mininode_lock:
+ test_node.last_getdata = None
+
+ if announce == "inv":
+ test_node.send_message(msg_inv([CInv(2, block.sha256)]))
+ success = wait_until(lambda: test_node.last_getheaders is not None, timeout=30)
+ assert(success)
+ test_node.send_header_for_blocks([block])
+ else:
+ test_node.send_header_for_blocks([block])
+ success = wait_until(lambda: test_node.last_getdata is not None, timeout=30)
+ assert(success)
+ assert_equal(len(test_node.last_getdata.inv), 1)
+ assert_equal(test_node.last_getdata.inv[0].type, 4)
+ assert_equal(test_node.last_getdata.inv[0].hash, block.sha256)
+
+ # Send back a compactblock message that omits the coinbase
+ comp_block = HeaderAndShortIDs()
+ comp_block.header = CBlockHeader(block)
+ comp_block.nonce = 0
+ [k0, k1] = comp_block.get_siphash_keys()
+ coinbase_hash = block.vtx[0].sha256
+ if version == 2:
+ coinbase_hash = block.vtx[0].calc_sha256(True)
+ comp_block.shortids = [
+ calculate_shortid(k0, k1, coinbase_hash) ]
+ test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
+ assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
+ # Expect a getblocktxn message.
+ with mininode_lock:
+ assert(test_node.last_getblocktxn is not None)
+ absolute_indexes = test_node.last_getblocktxn.block_txn_request.to_absolute()
+ assert_equal(absolute_indexes, [0]) # should be a coinbase request
+
+ # Send the coinbase, and verify that the tip advances.
+ if version == 2:
+ msg = msg_witness_blocktxn()
+ else:
+ msg = msg_blocktxn()
+ msg.block_transactions.blockhash = block.sha256
+ msg.block_transactions.transactions = [block.vtx[0]]
+ test_node.send_and_ping(msg)
+ assert_equal(int(node.getbestblockhash(), 16), block.sha256)
+
+ # Create a chain of transactions from given utxo, and add to a new block.
+ def build_block_with_transactions(self, node, utxo, num_transactions):
+ block = self.build_block_on_tip(node)
+
+ for i in range(num_transactions):
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))
+ tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE])))
+ tx.rehash()
+ utxo = [tx.sha256, 0, tx.vout[0].nValue]
+ block.vtx.append(tx)
+
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.solve()
+ return block
+
+ # Test that we only receive getblocktxn requests for transactions that the
+ # node needs, and that responding to them causes the block to be
+ # reconstructed.
+ def test_getblocktxn_requests(self, node, test_node, version):
+ with_witness = (version==2)
+
+ def test_getblocktxn_response(compact_block, peer, expected_result):
+ msg = msg_cmpctblock(compact_block.to_p2p())
+ peer.send_and_ping(msg)
+ with mininode_lock:
+ assert(peer.last_getblocktxn is not None)
+ absolute_indexes = peer.last_getblocktxn.block_txn_request.to_absolute()
+ assert_equal(absolute_indexes, expected_result)
+
+ def test_tip_after_message(node, peer, msg, tip):
+ peer.send_and_ping(msg)
+ assert_equal(int(node.getbestblockhash(), 16), tip)
+
+ # First try announcing compactblocks that won't reconstruct, and verify
+ # that we receive getblocktxn messages back.
+ utxo = self.utxos.pop(0)
+
+ block = self.build_block_with_transactions(node, utxo, 5)
+ self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
+ comp_block = HeaderAndShortIDs()
+ comp_block.initialize_from_block(block, use_witness=with_witness)
+
+ test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5])
+
+ msg_bt = msg_blocktxn()
+ if with_witness:
+ msg_bt = msg_witness_blocktxn() # serialize with witnesses
+ msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:])
+ test_tip_after_message(node, test_node, msg_bt, block.sha256)
+
+ utxo = self.utxos.pop(0)
+ block = self.build_block_with_transactions(node, utxo, 5)
+ self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
+
+ # Now try interspersing the prefilled transactions
+ comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=with_witness)
+ test_getblocktxn_response(comp_block, test_node, [2, 3, 4])
+ msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5])
+ test_tip_after_message(node, test_node, msg_bt, block.sha256)
+
+ # Now try giving one transaction ahead of time.
+ utxo = self.utxos.pop(0)
+ block = self.build_block_with_transactions(node, utxo, 5)
+ self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
+ test_node.send_and_ping(msg_tx(block.vtx[1]))
+ assert(block.vtx[1].hash in node.getrawmempool())
+
+ # Prefill 4 out of the 6 transactions, and verify that only the one
+ # that was not in the mempool is requested.
+ comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=with_witness)
+ test_getblocktxn_response(comp_block, test_node, [5])
+
+ msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]])
+ test_tip_after_message(node, test_node, msg_bt, block.sha256)
+
+ # Now provide all transactions to the node before the block is
+ # announced and verify reconstruction happens immediately.
+ utxo = self.utxos.pop(0)
+ block = self.build_block_with_transactions(node, utxo, 10)
+ self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
+ for tx in block.vtx[1:]:
+ test_node.send_message(msg_tx(tx))
+ test_node.sync_with_ping()
+ # Make sure all transactions were accepted.
+ mempool = node.getrawmempool()
+ for tx in block.vtx[1:]:
+ assert(tx.hash in mempool)
+
+ # Clear out last request.
+ with mininode_lock:
+ test_node.last_getblocktxn = None
+
+ # Send compact block
+ comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness)
+ test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256)
+ with mininode_lock:
+ # Shouldn't have gotten a request for any transaction
+ assert(test_node.last_getblocktxn is None)
+
+ # Incorrectly responding to a getblocktxn shouldn't cause the block to be
+ # permanently failed.
+ def test_incorrect_blocktxn_response(self, node, test_node, version):
+ if (len(self.utxos) == 0):
+ self.make_utxos()
+ utxo = self.utxos.pop(0)
+
+ block = self.build_block_with_transactions(node, utxo, 10)
+ self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
+ # Relay the first 5 transactions from the block in advance
+ for tx in block.vtx[1:6]:
+ test_node.send_message(msg_tx(tx))
+ test_node.sync_with_ping()
+ # Make sure all transactions were accepted.
+ mempool = node.getrawmempool()
+ for tx in block.vtx[1:6]:
+ assert(tx.hash in mempool)
+
+ # Send compact block
+ comp_block = HeaderAndShortIDs()
+ comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2))
+ test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
+ absolute_indexes = []
+ with mininode_lock:
+ assert(test_node.last_getblocktxn is not None)
+ absolute_indexes = test_node.last_getblocktxn.block_txn_request.to_absolute()
+ assert_equal(absolute_indexes, [6, 7, 8, 9, 10])
+
+ # Now give an incorrect response.
+ # Note that it's possible for bitcoind to be smart enough to know we're
+ # lying, since it could check to see if the shortid matches what we're
+ # sending, and eg disconnect us for misbehavior. If that behavior
+ # change were made, we could just modify this test by having a
+ # different peer provide the block further down, so that we're still
+ # verifying that the block isn't marked bad permanently. This is good
+ # enough for now.
+ msg = msg_blocktxn()
+ if version==2:
+ msg = msg_witness_blocktxn()
+ msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:])
+ test_node.send_and_ping(msg)
+
+ # Tip should not have updated
+ assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
+
+ # We should receive a getdata request
+ success = wait_until(lambda: test_node.last_getdata is not None, timeout=10)
+ assert(success)
+ assert_equal(len(test_node.last_getdata.inv), 1)
+ assert(test_node.last_getdata.inv[0].type == 2 or test_node.last_getdata.inv[0].type == 2|MSG_WITNESS_FLAG)
+ assert_equal(test_node.last_getdata.inv[0].hash, block.sha256)
+
+ # Deliver the block
+ if version==2:
+ test_node.send_and_ping(msg_witness_block(block))
+ else:
+ test_node.send_and_ping(msg_block(block))
+ assert_equal(int(node.getbestblockhash(), 16), block.sha256)
+
+ def test_getblocktxn_handler(self, node, test_node, version):
+ # bitcoind will not send blocktxn responses for blocks whose height is
+ # more than 10 blocks deep.
+ MAX_GETBLOCKTXN_DEPTH = 10
+ chain_height = node.getblockcount()
+ current_height = chain_height
+ while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH):
+ block_hash = node.getblockhash(current_height)
+ block = FromHex(CBlock(), node.getblock(block_hash, False))
+
+ msg = msg_getblocktxn()
+ msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [])
+ num_to_request = random.randint(1, len(block.vtx))
+ msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
+ test_node.send_message(msg)
+ success = wait_until(lambda: test_node.last_blocktxn is not None, timeout=10)
+ assert(success)
+
+ [tx.calc_sha256() for tx in block.vtx]
+ with mininode_lock:
+ assert_equal(test_node.last_blocktxn.block_transactions.blockhash, int(block_hash, 16))
+ all_indices = msg.block_txn_request.to_absolute()
+ for index in all_indices:
+ tx = test_node.last_blocktxn.block_transactions.transactions.pop(0)
+ tx.calc_sha256()
+ assert_equal(tx.sha256, block.vtx[index].sha256)
+ if version == 1:
+ # Witnesses should have been stripped
+ assert(tx.wit.is_null())
+ else:
+ # Check that the witness matches
+ assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True))
+ test_node.last_blocktxn = None
+ current_height -= 1
+
+ # Next request should send a full block response, as we're past the
+ # allowed depth for a blocktxn response.
+ block_hash = node.getblockhash(current_height)
+ msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])
+ with mininode_lock:
+ test_node.last_block = None
+ test_node.last_blocktxn = None
+ test_node.send_and_ping(msg)
+ with mininode_lock:
+ test_node.last_block.block.calc_sha256()
+ assert_equal(test_node.last_block.block.sha256, int(block_hash, 16))
+ assert_equal(test_node.last_blocktxn, None)
+
+ def test_compactblocks_not_at_tip(self, node, test_node):
+ # Test that requesting old compactblocks doesn't work.
+ MAX_CMPCTBLOCK_DEPTH = 5
+ new_blocks = []
+ for i in range(MAX_CMPCTBLOCK_DEPTH + 1):
+ test_node.clear_block_announcement()
+ new_blocks.append(node.generate(1)[0])
+ wait_until(test_node.received_block_announcement, timeout=30)
+
+ test_node.clear_block_announcement()
+ test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
+ success = wait_until(lambda: test_node.last_cmpctblock is not None, timeout=30)
+ assert(success)
+
+ test_node.clear_block_announcement()
+ node.generate(1)
+ wait_until(test_node.received_block_announcement, timeout=30)
+ test_node.clear_block_announcement()
+ with mininode_lock:
+ test_node.last_block = None
+ test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
+ success = wait_until(lambda: test_node.last_block is not None, timeout=30)
+ assert(success)
+ with mininode_lock:
+ test_node.last_block.block.calc_sha256()
+ assert_equal(test_node.last_block.block.sha256, int(new_blocks[0], 16))
+
+ # Generate an old compactblock, and verify that it's not accepted.
+ cur_height = node.getblockcount()
+ hashPrevBlock = int(node.getblockhash(cur_height-5), 16)
+ block = self.build_block_on_tip(node)
+ block.hashPrevBlock = hashPrevBlock
+ block.solve()
+
+ comp_block = HeaderAndShortIDs()
+ comp_block.initialize_from_block(block)
+ test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
+
+ tips = node.getchaintips()
+ found = False
+ for x in tips:
+ if x["hash"] == block.hash:
+ assert_equal(x["status"], "headers-only")
+ found = True
+ break
+ assert(found)
+
+ # Requesting this block via getblocktxn should silently fail
+ # (to avoid fingerprinting attacks).
+ msg = msg_getblocktxn()
+ msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
+ with mininode_lock:
+ test_node.last_blocktxn = None
+ test_node.send_and_ping(msg)
+ with mininode_lock:
+ assert(test_node.last_blocktxn is None)
+
+ def activate_segwit(self, node):
+ node.generate(144*3)
+ assert_equal(get_bip9_status(node, "segwit")["status"], 'active')
+
+ def test_end_to_end_block_relay(self, node, listeners):
+ utxo = self.utxos.pop(0)
+
+ block = self.build_block_with_transactions(node, utxo, 10)
+
+ [l.clear_block_announcement() for l in listeners]
+
+ # ToHex() won't serialize with witness, but this block has no witnesses
+ # anyway. TODO: repeat this test with witness tx's to a segwit node.
+ node.submitblock(ToHex(block))
+
+ for l in listeners:
+ wait_until(lambda: l.received_block_announcement(), timeout=30)
+ with mininode_lock:
+ for l in listeners:
+ assert(l.last_cmpctblock is not None)
+ l.last_cmpctblock.header_and_shortids.header.calc_sha256()
+ assert_equal(l.last_cmpctblock.header_and_shortids.header.sha256, block.sha256)
+
+ # Test that we don't get disconnected if we relay a compact block with valid header,
+ # but invalid transactions.
+ def test_invalid_tx_in_compactblock(self, node, test_node, use_segwit):
+ assert(len(self.utxos))
+ utxo = self.utxos[0]
+
+ block = self.build_block_with_transactions(node, utxo, 5)
+ del block.vtx[3]
+ block.hashMerkleRoot = block.calc_merkle_root()
+ if use_segwit:
+ # If we're testing with segwit, also drop the coinbase witness,
+ # but include the witness commitment.
+ add_witness_commitment(block)
+ block.vtx[0].wit.vtxinwit = []
+ block.solve()
+
+ # Now send the compact block with all transactions prefilled, and
+ # verify that we don't get disconnected.
+ comp_block = HeaderAndShortIDs()
+ comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=use_segwit)
+ msg = msg_cmpctblock(comp_block.to_p2p())
+ test_node.send_and_ping(msg)
+
+ # Check that the tip didn't advance
+ assert(int(node.getbestblockhash(), 16) is not block.sha256)
+ test_node.sync_with_ping()
+
+ # Helper for enabling cb announcements
+ # Send the sendcmpct request and sync headers
+ def request_cb_announcements(self, peer, node, version):
+ tip = node.getbestblockhash()
+ peer.get_headers(locator=[int(tip, 16)], hashstop=0)
+
+ msg = msg_sendcmpct()
+ msg.version = version
+ msg.announce = True
+ peer.send_and_ping(msg)
+
+ def test_compactblock_reconstruction_multiple_peers(self, node, stalling_peer, delivery_peer):
+ assert(len(self.utxos))
+
+ def announce_cmpct_block(node, peer):
+ utxo = self.utxos.pop(0)
+ block = self.build_block_with_transactions(node, utxo, 5)
+
+ cmpct_block = HeaderAndShortIDs()
+ cmpct_block.initialize_from_block(block)
+ msg = msg_cmpctblock(cmpct_block.to_p2p())
+ peer.send_and_ping(msg)
+ with mininode_lock:
+ assert(peer.last_getblocktxn is not None)
+ return block, cmpct_block
+
+ block, cmpct_block = announce_cmpct_block(node, stalling_peer)
+
+ for tx in block.vtx[1:]:
+ delivery_peer.send_message(msg_tx(tx))
+ delivery_peer.sync_with_ping()
+ mempool = node.getrawmempool()
+ for tx in block.vtx[1:]:
+ assert(tx.hash in mempool)
+
+ delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
+ assert_equal(int(node.getbestblockhash(), 16), block.sha256)
+
+ self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
+
+ # Now test that delivering an invalid compact block won't break relay
+
+ block, cmpct_block = announce_cmpct_block(node, stalling_peer)
+ for tx in block.vtx[1:]:
+ delivery_peer.send_message(msg_tx(tx))
+ delivery_peer.sync_with_ping()
+
+ cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [ CTxInWitness() ]
+ cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
+
+ cmpct_block.use_witness = True
+ delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
+ assert(int(node.getbestblockhash(), 16) != block.sha256)
+
+ msg = msg_blocktxn()
+ msg.block_transactions.blockhash = block.sha256
+ msg.block_transactions.transactions = block.vtx[1:]
+ stalling_peer.send_and_ping(msg)
+ assert_equal(int(node.getbestblockhash(), 16), block.sha256)
+
+ def run_test(self):
+ # Setup the p2p connections and start up the network thread.
+ self.test_node = TestNode()
+ self.segwit_node = TestNode()
+ self.old_node = TestNode() # version 1 peer <--> segwit node
+
+ connections = []
+ connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
+ connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
+ self.segwit_node, services=NODE_NETWORK|NODE_WITNESS))
+ connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
+ self.old_node, services=NODE_NETWORK))
+ self.test_node.add_connection(connections[0])
+ self.segwit_node.add_connection(connections[1])
+ self.old_node.add_connection(connections[2])
+
+ NetworkThread().start() # Start up network handling in another thread
+
+ # Test logic begins here
+ self.test_node.wait_for_verack()
+
+ # We will need UTXOs to construct transactions in later tests.
+ self.make_utxos()
+
+ self.log.info("Running tests, pre-segwit activation:")
+
+ self.log.info("Testing SENDCMPCT p2p message... ")
+ self.test_sendcmpct(self.nodes[0], self.test_node, 1)
+ sync_blocks(self.nodes)
+ self.test_sendcmpct(self.nodes[1], self.segwit_node, 2, old_node=self.old_node)
+ sync_blocks(self.nodes)
+
+ self.log.info("Testing compactblock construction...")
+ self.test_compactblock_construction(self.nodes[0], self.test_node, 1, False)
+ sync_blocks(self.nodes)
+ self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, False)
+ sync_blocks(self.nodes)
+
+ self.log.info("Testing compactblock requests... ")
+ self.test_compactblock_requests(self.nodes[0], self.test_node, 1, False)
+ sync_blocks(self.nodes)
+ self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, False)
+ sync_blocks(self.nodes)
+
+ self.log.info("Testing getblocktxn requests...")
+ self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
+ sync_blocks(self.nodes)
+ self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
+ sync_blocks(self.nodes)
+
+ self.log.info("Testing getblocktxn handler...")
+ self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1)
+ sync_blocks(self.nodes)
+ self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
+ self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
+ sync_blocks(self.nodes)
+
+ self.log.info("Testing compactblock requests/announcements not at chain tip...")
+ self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node)
+ sync_blocks(self.nodes)
+ self.test_compactblocks_not_at_tip(self.nodes[1], self.segwit_node)
+ self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node)
+ sync_blocks(self.nodes)
+
+ self.log.info("Testing handling of incorrect blocktxn responses...")
+ self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1)
+ sync_blocks(self.nodes)
+ self.test_incorrect_blocktxn_response(self.nodes[1], self.segwit_node, 2)
+ sync_blocks(self.nodes)
+
+ # End-to-end block relay tests
+ self.log.info("Testing end-to-end block relay...")
+ self.request_cb_announcements(self.test_node, self.nodes[0], 1)
+ self.request_cb_announcements(self.old_node, self.nodes[1], 1)
+ self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
+ self.test_end_to_end_block_relay(self.nodes[0], [self.segwit_node, self.test_node, self.old_node])
+ self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
+
+ self.log.info("Testing handling of invalid compact blocks...")
+ self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
+ self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, False)
+ self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, False)
+
+ self.log.info("Testing reconstructing compact blocks from all peers...")
+ self.test_compactblock_reconstruction_multiple_peers(self.nodes[1], self.segwit_node, self.old_node)
+ sync_blocks(self.nodes)
+
+ # Advance to segwit activation
+ self.log.info("Advancing to segwit activation")
+ self.activate_segwit(self.nodes[1])
+ self.log.info("Running tests, post-segwit activation...")
+
+ self.log.info("Testing compactblock construction...")
+ self.test_compactblock_construction(self.nodes[1], self.old_node, 1, True)
+ self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, True)
+ sync_blocks(self.nodes)
+
+ self.log.info("Testing compactblock requests (unupgraded node)... ")
+ self.test_compactblock_requests(self.nodes[0], self.test_node, 1, True)
+
+ self.log.info("Testing getblocktxn requests (unupgraded node)...")
+ self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
+
+ # Need to manually sync node0 and node1, because post-segwit activation,
+ # node1 will not download blocks from node0.
+ self.log.info("Syncing nodes...")
+ assert(self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash())
+ while (self.nodes[0].getblockcount() > self.nodes[1].getblockcount()):
+ block_hash = self.nodes[0].getblockhash(self.nodes[1].getblockcount()+1)
+ self.nodes[1].submitblock(self.nodes[0].getblock(block_hash, False))
+ assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())
+
+ self.log.info("Testing compactblock requests (segwit node)... ")
+ self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, True)
+
+ self.log.info("Testing getblocktxn requests (segwit node)...")
+ self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
+ sync_blocks(self.nodes)
+
+ self.log.info("Testing getblocktxn handler (segwit node should return witnesses)...")
+ self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
+ self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
+
+ # Test that if we submitblock to node1, we'll get a compact block
+ # announcement to all peers.
+ # (Post-segwit activation, blocks won't propagate from node0 to node1
+ # automatically, so don't bother testing a block announced to node0.)
+ self.log.info("Testing end-to-end block relay...")
+ self.request_cb_announcements(self.test_node, self.nodes[0], 1)
+ self.request_cb_announcements(self.old_node, self.nodes[1], 1)
+ self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
+ self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
+
+ self.log.info("Testing handling of invalid compact blocks...")
+ self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
+ self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, True)
+ self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, True)
+
+ self.log.info("Testing invalid index in cmpctblock message...")
+ self.test_invalid_cmpctblock_message()
+
+
+if __name__ == '__main__':
+ CompactBlocksTest().main()
diff --git a/test/functional/p2p-feefilter.py b/test/functional/p2p-feefilter.py
new file mode 100755
index 0000000000..d8f07700d0
--- /dev/null
+++ b/test/functional/p2p-feefilter.py
@@ -0,0 +1,112 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test processing of feefilter messages."""
+
+from test_framework.mininode import *
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+import time
+
+
+def hashToHex(hash):
+ return format(hash, '064x')
+
+# Wait up to 60 secs to see if the testnode has received all the expected invs
+def allInvsMatch(invsExpected, testnode):
+ for x in range(60):
+ with mininode_lock:
+ if (sorted(invsExpected) == sorted(testnode.txinvs)):
+ return True
+ time.sleep(1)
+ return False
+
+# TestNode: bare-bones "peer". Used to track which invs are received from a node
+# and to send the node feefilter messages.
+class TestNode(SingleNodeConnCB):
+ def __init__(self):
+ SingleNodeConnCB.__init__(self)
+ self.txinvs = []
+
+ def on_inv(self, conn, message):
+ for i in message.inv:
+ if (i.type == 1):
+ self.txinvs.append(hashToHex(i.hash))
+
+ def clear_invs(self):
+ with mininode_lock:
+ self.txinvs = []
+
+ def send_filter(self, feerate):
+ self.send_message(msg_feefilter(feerate))
+ self.sync_with_ping()
+
+class FeeFilterTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 2
+ self.setup_clean_chain = False
+
+ def setup_network(self):
+ # Node1 will be used to generate txs which should be relayed from Node0
+ # to our test node
+ self.nodes = []
+ self.nodes.append(start_node(0, self.options.tmpdir))
+ self.nodes.append(start_node(1, self.options.tmpdir))
+ connect_nodes(self.nodes[0], 1)
+
+ def run_test(self):
+ node1 = self.nodes[1]
+ node0 = self.nodes[0]
+ # Get out of IBD
+ node1.generate(1)
+ sync_blocks(self.nodes)
+
+ # Setup the p2p connections and start up the network thread.
+ test_node = TestNode()
+ connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)
+ test_node.add_connection(connection)
+ NetworkThread().start()
+ test_node.wait_for_verack()
+
+ # Test that invs are received for all txs at feerate of 20 sat/byte
+ node1.settxfee(Decimal("0.00020000"))
+ txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
+ assert(allInvsMatch(txids, test_node))
+ test_node.clear_invs()
+
+ # Set a filter of 15 sat/byte
+ test_node.send_filter(15000)
+
+ # Test that txs are still being received (paying 20 sat/byte)
+ txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
+ assert(allInvsMatch(txids, test_node))
+ test_node.clear_invs()
+
+ # Change tx fee rate to 10 sat/byte and test they are no longer received
+ node1.settxfee(Decimal("0.00010000"))
+ [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
+ sync_mempools(self.nodes) # must be sure node 0 has received all txs
+
+ # Send one transaction from node0 that should be received, so that we
+ # we can sync the test on receipt (if node1's txs were relayed, they'd
+ # be received by the time this node0 tx is received). This is
+ # unfortunately reliant on the current relay behavior where we batch up
+ # to 35 entries in an inv, which means that when this next transaction
+ # is eligible for relay, the prior transactions from node1 are eligible
+ # as well.
+ node0.settxfee(Decimal("0.00020000"))
+ txids = [node0.sendtoaddress(node0.getnewaddress(), 1)]
+ assert(allInvsMatch(txids, test_node))
+ test_node.clear_invs()
+
+ # Remove fee filter and check that txs are received again
+ test_node.send_filter(0)
+ txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
+ assert(allInvsMatch(txids, test_node))
+ test_node.clear_invs()
+
+if __name__ == '__main__':
+ FeeFilterTest().main()
diff --git a/test/functional/p2p-fullblocktest.py b/test/functional/p2p-fullblocktest.py
new file mode 100755
index 0000000000..274dbb8a92
--- /dev/null
+++ b/test/functional/p2p-fullblocktest.py
@@ -0,0 +1,1290 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test block processing.
+
+This reimplements tests from the bitcoinj/FullBlockTestGenerator used
+by the pull-tester.
+
+We use the testing framework in which we expect a particular answer from
+each test.
+"""
+
+from test_framework.test_framework import ComparisonTestFramework
+from test_framework.util import *
+from test_framework.comptool import TestManager, TestInstance, RejectResult
+from test_framework.blocktools import *
+import time
+from test_framework.key import CECKey
+from test_framework.script import *
+import struct
+
+class PreviousSpendableOutput(object):
+ def __init__(self, tx = CTransaction(), n = -1):
+ self.tx = tx
+ self.n = n # the output we're spending
+
+# Use this class for tests that require behavior other than normal "mininode" behavior.
+# For now, it is used to serialize a bloated varint (b64).
+class CBrokenBlock(CBlock):
+ def __init__(self, header=None):
+ super(CBrokenBlock, self).__init__(header)
+
+ def initialize(self, base_block):
+ self.vtx = copy.deepcopy(base_block.vtx)
+ self.hashMerkleRoot = self.calc_merkle_root()
+
+ def serialize(self):
+ r = b""
+ r += super(CBlock, self).serialize()
+ r += struct.pack("<BQ", 255, len(self.vtx))
+ for tx in self.vtx:
+ r += tx.serialize()
+ return r
+
+ def normal_serialize(self):
+ r = b""
+ r += super(CBrokenBlock, self).serialize()
+ return r
+
+class FullBlockTest(ComparisonTestFramework):
+
+ # Can either run this test as 1 node with expected answers, or two and compare them.
+ # Change the "outcome" variable from each TestInstance object to only do the comparison.
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 1
+ self.block_heights = {}
+ self.coinbase_key = CECKey()
+ self.coinbase_key.set_secretbytes(b"horsebattery")
+ self.coinbase_pubkey = self.coinbase_key.get_pubkey()
+ self.tip = None
+ self.blocks = {}
+
+ def add_options(self, parser):
+ super().add_options(parser)
+ parser.add_option("--runbarelyexpensive", dest="runbarelyexpensive", default=True)
+
+ def run_test(self):
+ self.test = TestManager(self, self.options.tmpdir)
+ self.test.add_all_connections(self.nodes)
+ NetworkThread().start() # Start up network handling in another thread
+ self.test.run()
+
+ def add_transactions_to_block(self, block, tx_list):
+ [ tx.rehash() for tx in tx_list ]
+ block.vtx.extend(tx_list)
+
+ # this is a little handier to use than the version in blocktools.py
+ def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])):
+ tx = create_transaction(spend_tx, n, b"", value, script)
+ return tx
+
+ # sign a transaction, using the key we know about
+ # this signs input 0 in tx, which is assumed to be spending output n in spend_tx
+ def sign_tx(self, tx, spend_tx, n):
+ scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey)
+ if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend
+ tx.vin[0].scriptSig = CScript()
+ return
+ (sighash, err) = SignatureHash(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL)
+ tx.vin[0].scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))])
+
+ def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])):
+ tx = self.create_tx(spend_tx, n, value, script)
+ self.sign_tx(tx, spend_tx, n)
+ tx.rehash()
+ return tx
+
+ def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True):
+ if self.tip == None:
+ base_block_hash = self.genesis_hash
+ block_time = int(time.time())+1
+ else:
+ base_block_hash = self.tip.sha256
+ block_time = self.tip.nTime + 1
+ # First create the coinbase
+ height = self.block_heights[base_block_hash] + 1
+ coinbase = create_coinbase(height, self.coinbase_pubkey)
+ coinbase.vout[0].nValue += additional_coinbase_value
+ coinbase.rehash()
+ if spend == None:
+ block = create_block(base_block_hash, coinbase, block_time)
+ else:
+ coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees
+ coinbase.rehash()
+ block = create_block(base_block_hash, coinbase, block_time)
+ tx = create_transaction(spend.tx, spend.n, b"", 1, script) # spend 1 satoshi
+ self.sign_tx(tx, spend.tx, spend.n)
+ self.add_transactions_to_block(block, [tx])
+ block.hashMerkleRoot = block.calc_merkle_root()
+ if solve:
+ block.solve()
+ self.tip = block
+ self.block_heights[block.sha256] = height
+ assert number not in self.blocks
+ self.blocks[number] = block
+ return block
+
+ def get_tests(self):
+ self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
+ self.block_heights[self.genesis_hash] = 0
+ spendable_outputs = []
+
+ # save the current tip so it can be spent by a later block
+ def save_spendable_output():
+ spendable_outputs.append(self.tip)
+
+ # get an output that we previously marked as spendable
+ def get_spendable_output():
+ return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)
+
+ # returns a test case that asserts that the current tip was accepted
+ def accepted():
+ return TestInstance([[self.tip, True]])
+
+ # returns a test case that asserts that the current tip was rejected
+ def rejected(reject = None):
+ if reject is None:
+ return TestInstance([[self.tip, False]])
+ else:
+ return TestInstance([[self.tip, reject]])
+
+ # move the tip back to a previous block
+ def tip(number):
+ self.tip = self.blocks[number]
+
+ # adds transactions to the block and updates state
+ def update_block(block_number, new_transactions):
+ block = self.blocks[block_number]
+ self.add_transactions_to_block(block, new_transactions)
+ old_sha256 = block.sha256
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.solve()
+ # Update the internal state just like in next_block
+ self.tip = block
+ if block.sha256 != old_sha256:
+ self.block_heights[block.sha256] = self.block_heights[old_sha256]
+ del self.block_heights[old_sha256]
+ self.blocks[block_number] = block
+ return block
+
+ # shorthand for functions
+ block = self.next_block
+ create_tx = self.create_tx
+ create_and_sign_tx = self.create_and_sign_transaction
+
+ # these must be updated if consensus changes
+ MAX_BLOCK_SIGOPS = 20000
+
+
+ # Create a new block
+ block(0)
+ save_spendable_output()
+ yield accepted()
+
+
+ # Now we need that block to mature so we can spend the coinbase.
+ test = TestInstance(sync_every_block=False)
+ for i in range(99):
+ block(5000 + i)
+ test.blocks_and_transactions.append([self.tip, True])
+ save_spendable_output()
+ yield test
+
+ # collect spendable outputs now to avoid cluttering the code later on
+ out = []
+ for i in range(33):
+ out.append(get_spendable_output())
+
+ # Start by building a couple of blocks on top (which output is spent is
+ # in parentheses):
+ # genesis -> b1 (0) -> b2 (1)
+ block(1, spend=out[0])
+ save_spendable_output()
+ yield accepted()
+
+ block(2, spend=out[1])
+ yield accepted()
+ save_spendable_output()
+
+ # so fork like this:
+ #
+ # genesis -> b1 (0) -> b2 (1)
+ # \-> b3 (1)
+ #
+ # Nothing should happen at this point. We saw b2 first so it takes priority.
+ tip(1)
+ b3 = block(3, spend=out[1])
+ txout_b3 = PreviousSpendableOutput(b3.vtx[1], 0)
+ yield rejected()
+
+
+ # Now we add another block to make the alternative chain longer.
+ #
+ # genesis -> b1 (0) -> b2 (1)
+ # \-> b3 (1) -> b4 (2)
+ block(4, spend=out[2])
+ yield accepted()
+
+
+ # ... and back to the first chain.
+ # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
+ # \-> b3 (1) -> b4 (2)
+ tip(2)
+ block(5, spend=out[2])
+ save_spendable_output()
+ yield rejected()
+
+ block(6, spend=out[3])
+ yield accepted()
+
+ # Try to create a fork that double-spends
+ # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
+ # \-> b7 (2) -> b8 (4)
+ # \-> b3 (1) -> b4 (2)
+ tip(5)
+ block(7, spend=out[2])
+ yield rejected()
+
+ block(8, spend=out[4])
+ yield rejected()
+
+ # Try to create a block that has too much fee
+ # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
+ # \-> b9 (4)
+ # \-> b3 (1) -> b4 (2)
+ tip(6)
+ block(9, spend=out[4], additional_coinbase_value=1)
+ yield rejected(RejectResult(16, b'bad-cb-amount'))
+
+ # Create a fork that ends in a block with too much fee (the one that causes the reorg)
+ # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
+ # \-> b10 (3) -> b11 (4)
+ # \-> b3 (1) -> b4 (2)
+ tip(5)
+ block(10, spend=out[3])
+ yield rejected()
+
+ block(11, spend=out[4], additional_coinbase_value=1)
+ yield rejected(RejectResult(16, b'bad-cb-amount'))
+
+
+ # Try again, but with a valid fork first
+ # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
+ # \-> b12 (3) -> b13 (4) -> b14 (5)
+ # (b12 added last)
+ # \-> b3 (1) -> b4 (2)
+ tip(5)
+ b12 = block(12, spend=out[3])
+ save_spendable_output()
+ b13 = block(13, spend=out[4])
+ # Deliver the block header for b12, and the block b13.
+ # b13 should be accepted but the tip won't advance until b12 is delivered.
+ yield TestInstance([[CBlockHeader(b12), None], [b13, False]])
+
+ save_spendable_output()
+ # b14 is invalid, but the node won't know that until it tries to connect
+ # Tip still can't advance because b12 is missing
+ block(14, spend=out[5], additional_coinbase_value=1)
+ yield rejected()
+
+ yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13.
+
+ # Add a block with MAX_BLOCK_SIGOPS and one with one more sigop
+ # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
+ # \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6)
+ # \-> b3 (1) -> b4 (2)
+
+ # Test that a block with a lot of checksigs is okay
+ lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1))
+ tip(13)
+ block(15, spend=out[5], script=lots_of_checksigs)
+ yield accepted()
+ save_spendable_output()
+
+
+ # Test that a block with too many checksigs is rejected
+ too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS))
+ block(16, spend=out[6], script=too_many_checksigs)
+ yield rejected(RejectResult(16, b'bad-blk-sigops'))
+
+
+ # Attempt to spend a transaction created on a different fork
+ # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
+ # \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1])
+ # \-> b3 (1) -> b4 (2)
+ tip(15)
+ block(17, spend=txout_b3)
+ yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
+
+ # Attempt to spend a transaction created on a different fork (on a fork this time)
+ # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
+ # \-> b12 (3) -> b13 (4) -> b15 (5)
+ # \-> b18 (b3.vtx[1]) -> b19 (6)
+ # \-> b3 (1) -> b4 (2)
+ tip(13)
+ block(18, spend=txout_b3)
+ yield rejected()
+
+ block(19, spend=out[6])
+ yield rejected()
+
+ # Attempt to spend a coinbase at depth too low
+ # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
+ # \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7)
+ # \-> b3 (1) -> b4 (2)
+ tip(15)
+ block(20, spend=out[7])
+ yield rejected(RejectResult(16, b'bad-txns-premature-spend-of-coinbase'))
+
+ # Attempt to spend a coinbase at depth too low (on a fork this time)
+ # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
+ # \-> b12 (3) -> b13 (4) -> b15 (5)
+ # \-> b21 (6) -> b22 (5)
+ # \-> b3 (1) -> b4 (2)
+ tip(13)
+ block(21, spend=out[6])
+ yield rejected()
+
+ block(22, spend=out[5])
+ yield rejected()
+
+ # Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected
+ # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
+ # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6)
+ # \-> b24 (6) -> b25 (7)
+ # \-> b3 (1) -> b4 (2)
+ tip(15)
+ b23 = block(23, spend=out[6])
+ tx = CTransaction()
+ script_length = MAX_BLOCK_BASE_SIZE - len(b23.serialize()) - 69
+ script_output = CScript([b'\x00' * script_length])
+ tx.vout.append(CTxOut(0, script_output))
+ tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0)))
+ b23 = update_block(23, [tx])
+ # Make sure the math above worked out to produce a max-sized block
+ assert_equal(len(b23.serialize()), MAX_BLOCK_BASE_SIZE)
+ yield accepted()
+ save_spendable_output()
+
+ # Make the next block one byte bigger and check that it fails
+ tip(15)
+ b24 = block(24, spend=out[6])
+ script_length = MAX_BLOCK_BASE_SIZE - len(b24.serialize()) - 69
+ script_output = CScript([b'\x00' * (script_length+1)])
+ tx.vout = [CTxOut(0, script_output)]
+ b24 = update_block(24, [tx])
+ assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE+1)
+ yield rejected(RejectResult(16, b'bad-blk-length'))
+
+ block(25, spend=out[7])
+ yield rejected()
+
+ # Create blocks with a coinbase input script size out of range
+ # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
+ # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7)
+ # \-> ... (6) -> ... (7)
+ # \-> b3 (1) -> b4 (2)
+ tip(15)
+ b26 = block(26, spend=out[6])
+ b26.vtx[0].vin[0].scriptSig = b'\x00'
+ b26.vtx[0].rehash()
+ # update_block causes the merkle root to get updated, even with no new
+ # transactions, and updates the required state.
+ b26 = update_block(26, [])
+ yield rejected(RejectResult(16, b'bad-cb-length'))
+
+ # Extend the b26 chain to make sure bitcoind isn't accepting b26
+ b27 = block(27, spend=out[7])
+ yield rejected(RejectResult(0, b'bad-prevblk'))
+
+ # Now try a too-large-coinbase script
+ tip(15)
+ b28 = block(28, spend=out[6])
+ b28.vtx[0].vin[0].scriptSig = b'\x00' * 101
+ b28.vtx[0].rehash()
+ b28 = update_block(28, [])
+ yield rejected(RejectResult(16, b'bad-cb-length'))
+
+ # Extend the b28 chain to make sure bitcoind isn't accepting b28
+ b29 = block(29, spend=out[7])
+ yield rejected(RejectResult(0, b'bad-prevblk'))
+
+ # b30 has a max-sized coinbase scriptSig.
+ tip(23)
+ b30 = block(30)
+ b30.vtx[0].vin[0].scriptSig = b'\x00' * 100
+ b30.vtx[0].rehash()
+ b30 = update_block(30, [])
+ yield accepted()
+ save_spendable_output()
+
+ # b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY
+ #
+ # genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
+ # \-> b36 (11)
+ # \-> b34 (10)
+ # \-> b32 (9)
+ #
+
+ # MULTISIG: each op code counts as 20 sigops. To create the edge case, pack another 19 sigops at the end.
+ lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19)
+ b31 = block(31, spend=out[8], script=lots_of_multisigs)
+ assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS)
+ yield accepted()
+ save_spendable_output()
+
+ # this goes over the limit because the coinbase has one sigop
+ too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS // 20))
+ b32 = block(32, spend=out[9], script=too_many_multisigs)
+ assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS + 1)
+ yield rejected(RejectResult(16, b'bad-blk-sigops'))
+
+
+ # CHECKMULTISIGVERIFY
+ tip(31)
+ lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19)
+ block(33, spend=out[9], script=lots_of_multisigs)
+ yield accepted()
+ save_spendable_output()
+
+ too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS // 20))
+ block(34, spend=out[10], script=too_many_multisigs)
+ yield rejected(RejectResult(16, b'bad-blk-sigops'))
+
+
+ # CHECKSIGVERIFY
+ tip(33)
+ lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS - 1))
+ b35 = block(35, spend=out[10], script=lots_of_checksigs)
+ yield accepted()
+ save_spendable_output()
+
+ too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS))
+ block(36, spend=out[11], script=too_many_checksigs)
+ yield rejected(RejectResult(16, b'bad-blk-sigops'))
+
+
+ # Check spending of a transaction in a block which failed to connect
+ #
+ # b6 (3)
+ # b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
+ # \-> b37 (11)
+ # \-> b38 (11/37)
+ #
+
+ # save 37's spendable output, but then double-spend out11 to invalidate the block
+ tip(35)
+ b37 = block(37, spend=out[11])
+ txout_b37 = PreviousSpendableOutput(b37.vtx[1], 0)
+ tx = create_and_sign_tx(out[11].tx, out[11].n, 0)
+ b37 = update_block(37, [tx])
+ yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
+
+ # attempt to spend b37's first non-coinbase tx, at which point b37 was still considered valid
+ tip(35)
+ block(38, spend=txout_b37)
+ yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
+
+ # Check P2SH SigOp counting
+ #
+ #
+ # 13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12)
+ # \-> b40 (12)
+ #
+ # b39 - create some P2SH outputs that will require 6 sigops to spend:
+ #
+ # redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG
+ # p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL
+ #
+ tip(35)
+ b39 = block(39)
+ b39_outputs = 0
+ b39_sigops_per_output = 6
+
+ # Build the redeem script, hash it, use hash to create the p2sh script
+ redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY]*5 + [OP_CHECKSIG])
+ redeem_script_hash = hash160(redeem_script)
+ p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL])
+
+ # Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE
+ # This must be signed because it is spending a coinbase
+ spend = out[11]
+ tx = create_tx(spend.tx, spend.n, 1, p2sh_script)
+ tx.vout.append(CTxOut(spend.tx.vout[spend.n].nValue - 1, CScript([OP_TRUE])))
+ self.sign_tx(tx, spend.tx, spend.n)
+ tx.rehash()
+ b39 = update_block(39, [tx])
+ b39_outputs += 1
+
+ # Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE
+ tx_new = None
+ tx_last = tx
+ total_size=len(b39.serialize())
+ while(total_size < MAX_BLOCK_BASE_SIZE):
+ tx_new = create_tx(tx_last, 1, 1, p2sh_script)
+ tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE])))
+ tx_new.rehash()
+ total_size += len(tx_new.serialize())
+ if total_size >= MAX_BLOCK_BASE_SIZE:
+ break
+ b39.vtx.append(tx_new) # add tx to block
+ tx_last = tx_new
+ b39_outputs += 1
+
+ b39 = update_block(39, [])
+ yield accepted()
+ save_spendable_output()
+
+
+ # Test sigops in P2SH redeem scripts
+ #
+ # b40 creates 3333 tx's spending the 6-sigop P2SH outputs from b39 for a total of 19998 sigops.
+ # The first tx has one sigop and then at the end we add 2 more to put us just over the max.
+ #
+ # b41 does the same, less one, so it has the maximum sigops permitted.
+ #
+ tip(39)
+ b40 = block(40, spend=out[12])
+ sigops = get_legacy_sigopcount_block(b40)
+ numTxes = (MAX_BLOCK_SIGOPS - sigops) // b39_sigops_per_output
+ assert_equal(numTxes <= b39_outputs, True)
+
+ lastOutpoint = COutPoint(b40.vtx[1].sha256, 0)
+ new_txs = []
+ for i in range(1, numTxes+1):
+ tx = CTransaction()
+ tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
+ tx.vin.append(CTxIn(lastOutpoint, b''))
+ # second input is corresponding P2SH output from b39
+ tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b''))
+ # Note: must pass the redeem_script (not p2sh_script) to the signature hash function
+ (sighash, err) = SignatureHash(redeem_script, tx, 1, SIGHASH_ALL)
+ sig = self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))
+ scriptSig = CScript([sig, redeem_script])
+
+ tx.vin[1].scriptSig = scriptSig
+ tx.rehash()
+ new_txs.append(tx)
+ lastOutpoint = COutPoint(tx.sha256, 0)
+
+ b40_sigops_to_fill = MAX_BLOCK_SIGOPS - (numTxes * b39_sigops_per_output + sigops) + 1
+ tx = CTransaction()
+ tx.vin.append(CTxIn(lastOutpoint, b''))
+ tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill)))
+ tx.rehash()
+ new_txs.append(tx)
+ update_block(40, new_txs)
+ yield rejected(RejectResult(16, b'bad-blk-sigops'))
+
+ # same as b40, but one less sigop
+ tip(39)
+ b41 = block(41, spend=None)
+ update_block(41, b40.vtx[1:-1])
+ b41_sigops_to_fill = b40_sigops_to_fill - 1
+ tx = CTransaction()
+ tx.vin.append(CTxIn(lastOutpoint, b''))
+ tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill)))
+ tx.rehash()
+ update_block(41, [tx])
+ yield accepted()
+
+ # Fork off of b39 to create a constant base again
+ #
+ # b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13)
+ # \-> b41 (12)
+ #
+ tip(39)
+ block(42, spend=out[12])
+ yield rejected()
+ save_spendable_output()
+
+ block(43, spend=out[13])
+ yield accepted()
+ save_spendable_output()
+
+
+ # Test a number of really invalid scenarios
+ #
+ # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14)
+ # \-> ??? (15)
+
+ # The next few blocks are going to be created "by hand" since they'll do funky things, such as having
+ # the first transaction be non-coinbase, etc. The purpose of b44 is to make sure this works.
+ height = self.block_heights[self.tip.sha256] + 1
+ coinbase = create_coinbase(height, self.coinbase_pubkey)
+ b44 = CBlock()
+ b44.nTime = self.tip.nTime + 1
+ b44.hashPrevBlock = self.tip.sha256
+ b44.nBits = 0x207fffff
+ b44.vtx.append(coinbase)
+ b44.hashMerkleRoot = b44.calc_merkle_root()
+ b44.solve()
+ self.tip = b44
+ self.block_heights[b44.sha256] = height
+ self.blocks[44] = b44
+ yield accepted()
+
+ # A block with a non-coinbase as the first tx
+ non_coinbase = create_tx(out[15].tx, out[15].n, 1)
+ b45 = CBlock()
+ b45.nTime = self.tip.nTime + 1
+ b45.hashPrevBlock = self.tip.sha256
+ b45.nBits = 0x207fffff
+ b45.vtx.append(non_coinbase)
+ b45.hashMerkleRoot = b45.calc_merkle_root()
+ b45.calc_sha256()
+ b45.solve()
+ self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256]+1
+ self.tip = b45
+ self.blocks[45] = b45
+ yield rejected(RejectResult(16, b'bad-cb-missing'))
+
+ # A block with no txns
+ tip(44)
+ b46 = CBlock()
+ b46.nTime = b44.nTime+1
+ b46.hashPrevBlock = b44.sha256
+ b46.nBits = 0x207fffff
+ b46.vtx = []
+ b46.hashMerkleRoot = 0
+ b46.solve()
+ self.block_heights[b46.sha256] = self.block_heights[b44.sha256]+1
+ self.tip = b46
+ assert 46 not in self.blocks
+ self.blocks[46] = b46
+ s = ser_uint256(b46.hashMerkleRoot)
+ yield rejected(RejectResult(16, b'bad-blk-length'))
+
+ # A block with invalid work
+ tip(44)
+ b47 = block(47, solve=False)
+ target = uint256_from_compact(b47.nBits)
+ while b47.sha256 < target: #changed > to <
+ b47.nNonce += 1
+ b47.rehash()
+ yield rejected(RejectResult(16, b'high-hash'))
+
+ # A block with timestamp > 2 hrs in the future
+ tip(44)
+ b48 = block(48, solve=False)
+ b48.nTime = int(time.time()) + 60 * 60 * 3
+ b48.solve()
+ yield rejected(RejectResult(16, b'time-too-new'))
+
+ # A block with an invalid merkle hash
+ tip(44)
+ b49 = block(49)
+ b49.hashMerkleRoot += 1
+ b49.solve()
+ yield rejected(RejectResult(16, b'bad-txnmrklroot'))
+
+ # A block with an incorrect POW limit
+ tip(44)
+ b50 = block(50)
+ b50.nBits = b50.nBits - 1
+ b50.solve()
+ yield rejected(RejectResult(16, b'bad-diffbits'))
+
+ # A block with two coinbase txns
+ tip(44)
+ b51 = block(51)
+ cb2 = create_coinbase(51, self.coinbase_pubkey)
+ b51 = update_block(51, [cb2])
+ yield rejected(RejectResult(16, b'bad-cb-multiple'))
+
+ # A block w/ duplicate txns
+ # Note: txns have to be in the right position in the merkle tree to trigger this error
+ tip(44)
+ b52 = block(52, spend=out[15])
+ tx = create_tx(b52.vtx[1], 0, 1)
+ b52 = update_block(52, [tx, tx])
+ yield rejected(RejectResult(16, b'bad-txns-duplicate'))
+
+ # Test block timestamps
+ # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15)
+ # \-> b54 (15)
+ #
+ tip(43)
+ block(53, spend=out[14])
+ yield rejected() # rejected since b44 is at same height
+ save_spendable_output()
+
+ # invalid timestamp (b35 is 5 blocks back, so its time is MedianTimePast)
+ b54 = block(54, spend=out[15])
+ b54.nTime = b35.nTime - 1
+ b54.solve()
+ yield rejected(RejectResult(16, b'time-too-old'))
+
+ # valid timestamp
+ tip(53)
+ b55 = block(55, spend=out[15])
+ b55.nTime = b35.nTime
+ update_block(55, [])
+ yield accepted()
+ save_spendable_output()
+
+
+ # Test CVE-2012-2459
+ #
+ # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16)
+ # \-> b57 (16)
+ # \-> b56p2 (16)
+ # \-> b56 (16)
+ #
+ # Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without
+ # affecting the merkle root of a block, while still invalidating it.
+ # See: src/consensus/merkle.h
+ #
+ # b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx.
+ # Result: OK
+ #
+ # b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle
+ # root but duplicate transactions.
+ # Result: Fails
+ #
+ # b57p2 has six transactions in its merkle tree:
+ # - coinbase, tx, tx1, tx2, tx3, tx4
+ # Merkle root calculation will duplicate as necessary.
+ # Result: OK.
+ #
+ # b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches
+ # duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates
+ # that the error was caught early, avoiding a DOS vulnerability.)
+
+ # b57 - a good block with 2 txs, don't submit until end
+ tip(55)
+ b57 = block(57)
+ tx = create_and_sign_tx(out[16].tx, out[16].n, 1)
+ tx1 = create_tx(tx, 0, 1)
+ b57 = update_block(57, [tx, tx1])
+
+ # b56 - copy b57, add a duplicate tx
+ tip(55)
+ b56 = copy.deepcopy(b57)
+ self.blocks[56] = b56
+ assert_equal(len(b56.vtx),3)
+ b56 = update_block(56, [tx1])
+ assert_equal(b56.hash, b57.hash)
+ yield rejected(RejectResult(16, b'bad-txns-duplicate'))
+
+ # b57p2 - a good block with 6 tx'es, don't submit until end
+ tip(55)
+ b57p2 = block("57p2")
+ tx = create_and_sign_tx(out[16].tx, out[16].n, 1)
+ tx1 = create_tx(tx, 0, 1)
+ tx2 = create_tx(tx1, 0, 1)
+ tx3 = create_tx(tx2, 0, 1)
+ tx4 = create_tx(tx3, 0, 1)
+ b57p2 = update_block("57p2", [tx, tx1, tx2, tx3, tx4])
+
+ # b56p2 - copy b57p2, duplicate two non-consecutive tx's
+ tip(55)
+ b56p2 = copy.deepcopy(b57p2)
+ self.blocks["b56p2"] = b56p2
+ assert_equal(b56p2.hash, b57p2.hash)
+ assert_equal(len(b56p2.vtx),6)
+ b56p2 = update_block("b56p2", [tx3, tx4])
+ yield rejected(RejectResult(16, b'bad-txns-duplicate'))
+
+ tip("57p2")
+ yield accepted()
+
+ tip(57)
+ yield rejected() #rejected because 57p2 seen first
+ save_spendable_output()
+
+ # Test a few invalid tx types
+ #
+ # -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
+ # \-> ??? (17)
+ #
+
+ # tx with prevout.n out of range
+ tip(57)
+ b58 = block(58, spend=out[17])
+ tx = CTransaction()
+ assert(len(out[17].tx.vout) < 42)
+ tx.vin.append(CTxIn(COutPoint(out[17].tx.sha256, 42), CScript([OP_TRUE]), 0xffffffff))
+ tx.vout.append(CTxOut(0, b""))
+ tx.calc_sha256()
+ b58 = update_block(58, [tx])
+ yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
+
+ # tx with output value > input value out of range
+ tip(57)
+ b59 = block(59)
+ tx = create_and_sign_tx(out[17].tx, out[17].n, 51*COIN)
+ b59 = update_block(59, [tx])
+ yield rejected(RejectResult(16, b'bad-txns-in-belowout'))
+
+ # reset to good chain
+ tip(57)
+ b60 = block(60, spend=out[17])
+ yield accepted()
+ save_spendable_output()
+
+ # Test BIP30
+ #
+ # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
+ # \-> b61 (18)
+ #
+ # Blocks are not allowed to contain a transaction whose id matches that of an earlier,
+ # not-fully-spent transaction in the same chain. To test, make identical coinbases;
+ # the second one should be rejected.
+ #
+ tip(60)
+ b61 = block(61, spend=out[18])
+ b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig #equalize the coinbases
+ b61.vtx[0].rehash()
+ b61 = update_block(61, [])
+ assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize())
+ yield rejected(RejectResult(16, b'bad-txns-BIP30'))
+
+
+ # Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests)
+ #
+ # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
+ # \-> b62 (18)
+ #
+ tip(60)
+ b62 = block(62)
+ tx = CTransaction()
+ tx.nLockTime = 0xffffffff #this locktime is non-final
+ assert(out[18].n < len(out[18].tx.vout))
+ tx.vin.append(CTxIn(COutPoint(out[18].tx.sha256, out[18].n))) # don't set nSequence
+ tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
+ assert(tx.vin[0].nSequence < 0xffffffff)
+ tx.calc_sha256()
+ b62 = update_block(62, [tx])
+ yield rejected(RejectResult(16, b'bad-txns-nonfinal'))
+
+
+ # Test a non-final coinbase is also rejected
+ #
+ # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
+ # \-> b63 (-)
+ #
+ tip(60)
+ b63 = block(63)
+ b63.vtx[0].nLockTime = 0xffffffff
+ b63.vtx[0].vin[0].nSequence = 0xDEADBEEF
+ b63.vtx[0].rehash()
+ b63 = update_block(63, [])
+ yield rejected(RejectResult(16, b'bad-txns-nonfinal'))
+
+
+ # This checks that a block with a bloated VARINT between the block_header and the array of tx such that
+ # the block is > MAX_BLOCK_BASE_SIZE with the bloated varint, but <= MAX_BLOCK_BASE_SIZE without the bloated varint,
+ # does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not
+ # care whether the bloated block is accepted or rejected; it only cares that the second block is accepted.
+ #
+ # What matters is that the receiving node should not reject the bloated block, and then reject the canonical
+ # block on the basis that it's the same as an already-rejected block (which would be a consensus failure.)
+ #
+ # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18)
+ # \
+ # b64a (18)
+ # b64a is a bloated block (non-canonical varint)
+ # b64 is a good block (same as b64 but w/ canonical varint)
+ #
+ tip(60)
+ regular_block = block("64a", spend=out[18])
+
+ # make it a "broken_block," with non-canonical serialization
+ b64a = CBrokenBlock(regular_block)
+ b64a.initialize(regular_block)
+ self.blocks["64a"] = b64a
+ self.tip = b64a
+ tx = CTransaction()
+
+ # use canonical serialization to calculate size
+ script_length = MAX_BLOCK_BASE_SIZE - len(b64a.normal_serialize()) - 69
+ script_output = CScript([b'\x00' * script_length])
+ tx.vout.append(CTxOut(0, script_output))
+ tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0)))
+ b64a = update_block("64a", [tx])
+ assert_equal(len(b64a.serialize()), MAX_BLOCK_BASE_SIZE + 8)
+ yield TestInstance([[self.tip, None]])
+
+ # comptool workaround: to make sure b64 is delivered, manually erase b64a from blockstore
+ self.test.block_store.erase(b64a.sha256)
+
+ tip(60)
+ b64 = CBlock(b64a)
+ b64.vtx = copy.deepcopy(b64a.vtx)
+ assert_equal(b64.hash, b64a.hash)
+ assert_equal(len(b64.serialize()), MAX_BLOCK_BASE_SIZE)
+ self.blocks[64] = b64
+ update_block(64, [])
+ yield accepted()
+ save_spendable_output()
+
+ # Spend an output created in the block itself
+ #
+ # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
+ #
+ tip(64)
+ b65 = block(65)
+ tx1 = create_and_sign_tx(out[19].tx, out[19].n, out[19].tx.vout[0].nValue)
+ tx2 = create_and_sign_tx(tx1, 0, 0)
+ update_block(65, [tx1, tx2])
+ yield accepted()
+ save_spendable_output()
+
+ # Attempt to spend an output created later in the same block
+ #
+ # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
+ # \-> b66 (20)
+ tip(65)
+ b66 = block(66)
+ tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue)
+ tx2 = create_and_sign_tx(tx1, 0, 1)
+ update_block(66, [tx2, tx1])
+ yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
+
+ # Attempt to double-spend a transaction created in a block
+ #
+ # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
+ # \-> b67 (20)
+ #
+ #
+ tip(65)
+ b67 = block(67)
+ tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue)
+ tx2 = create_and_sign_tx(tx1, 0, 1)
+ tx3 = create_and_sign_tx(tx1, 0, 2)
+ update_block(67, [tx1, tx2, tx3])
+ yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
+
+ # More tests of block subsidy
+ #
+ # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
+ # \-> b68 (20)
+ #
+ # b68 - coinbase with an extra 10 satoshis,
+ # creates a tx that has 9 satoshis from out[20] go to fees
+ # this fails because the coinbase is trying to claim 1 satoshi too much in fees
+ #
+ # b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee
+ # this succeeds
+ #
+ tip(65)
+ b68 = block(68, additional_coinbase_value=10)
+ tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-9)
+ update_block(68, [tx])
+ yield rejected(RejectResult(16, b'bad-cb-amount'))
+
+ tip(65)
+ b69 = block(69, additional_coinbase_value=10)
+ tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-10)
+ update_block(69, [tx])
+ yield accepted()
+ save_spendable_output()
+
+ # Test spending the outpoint of a non-existent transaction
+ #
+ # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
+ # \-> b70 (21)
+ #
+ tip(69)
+ block(70, spend=out[21])
+ bogus_tx = CTransaction()
+ bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c")
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff))
+ tx.vout.append(CTxOut(1, b""))
+ update_block(70, [tx])
+ yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
+
+
+ # Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks)
+ #
+ # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
+ # \-> b71 (21)
+ #
+ # b72 is a good block.
+ # b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b71.
+ #
+ tip(69)
+ b72 = block(72)
+ tx1 = create_and_sign_tx(out[21].tx, out[21].n, 2)
+ tx2 = create_and_sign_tx(tx1, 0, 1)
+ b72 = update_block(72, [tx1, tx2]) # now tip is 72
+ b71 = copy.deepcopy(b72)
+ b71.vtx.append(tx2) # add duplicate tx2
+ self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 # b71 builds off b69
+ self.blocks[71] = b71
+
+ assert_equal(len(b71.vtx), 4)
+ assert_equal(len(b72.vtx), 3)
+ assert_equal(b72.sha256, b71.sha256)
+
+ tip(71)
+ yield rejected(RejectResult(16, b'bad-txns-duplicate'))
+ tip(72)
+ yield accepted()
+ save_spendable_output()
+
+
+ # Test some invalid scripts and MAX_BLOCK_SIGOPS
+ #
+ # -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
+ # \-> b** (22)
+ #
+
+ # b73 - tx with excessive sigops that are placed after an excessively large script element.
+ # The purpose of the test is to make sure those sigops are counted.
+ #
+ # script is a bytearray of size 20,526
+ #
+ # bytearray[0-19,998] : OP_CHECKSIG
+ # bytearray[19,999] : OP_PUSHDATA4
+ # bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format)
+ # bytearray[20,004-20,525]: unread data (script_element)
+ # bytearray[20,526] : OP_CHECKSIG (this puts us over the limit)
+ #
+ tip(72)
+ b73 = block(73)
+ size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1
+ a = bytearray([OP_CHECKSIG] * size)
+ a[MAX_BLOCK_SIGOPS - 1] = int("4e",16) # OP_PUSHDATA4
+
+ element_size = MAX_SCRIPT_ELEMENT_SIZE + 1
+ a[MAX_BLOCK_SIGOPS] = element_size % 256
+ a[MAX_BLOCK_SIGOPS+1] = element_size // 256
+ a[MAX_BLOCK_SIGOPS+2] = 0
+ a[MAX_BLOCK_SIGOPS+3] = 0
+
+ tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
+ b73 = update_block(73, [tx])
+ assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS+1)
+ yield rejected(RejectResult(16, b'bad-blk-sigops'))
+
+ # b74/75 - if we push an invalid script element, all prevous sigops are counted,
+ # but sigops after the element are not counted.
+ #
+ # The invalid script element is that the push_data indicates that
+ # there will be a large amount of data (0xffffff bytes), but we only
+ # provide a much smaller number. These bytes are CHECKSIGS so they would
+ # cause b75 to fail for excessive sigops, if those bytes were counted.
+ #
+ # b74 fails because we put MAX_BLOCK_SIGOPS+1 before the element
+ # b75 succeeds because we put MAX_BLOCK_SIGOPS before the element
+ #
+ #
+ tip(72)
+ b74 = block(74)
+ size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561
+ a = bytearray([OP_CHECKSIG] * size)
+ a[MAX_BLOCK_SIGOPS] = 0x4e
+ a[MAX_BLOCK_SIGOPS+1] = 0xfe
+ a[MAX_BLOCK_SIGOPS+2] = 0xff
+ a[MAX_BLOCK_SIGOPS+3] = 0xff
+ a[MAX_BLOCK_SIGOPS+4] = 0xff
+ tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
+ b74 = update_block(74, [tx])
+ yield rejected(RejectResult(16, b'bad-blk-sigops'))
+
+ tip(72)
+ b75 = block(75)
+ size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42
+ a = bytearray([OP_CHECKSIG] * size)
+ a[MAX_BLOCK_SIGOPS-1] = 0x4e
+ a[MAX_BLOCK_SIGOPS] = 0xff
+ a[MAX_BLOCK_SIGOPS+1] = 0xff
+ a[MAX_BLOCK_SIGOPS+2] = 0xff
+ a[MAX_BLOCK_SIGOPS+3] = 0xff
+ tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
+ b75 = update_block(75, [tx])
+ yield accepted()
+ save_spendable_output()
+
+ # Check that if we push an element filled with CHECKSIGs, they are not counted
+ tip(75)
+ b76 = block(76)
+ size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5
+ a = bytearray([OP_CHECKSIG] * size)
+ a[MAX_BLOCK_SIGOPS-1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs
+ tx = create_and_sign_tx(out[23].tx, 0, 1, CScript(a))
+ b76 = update_block(76, [tx])
+ yield accepted()
+ save_spendable_output()
+
+ # Test transaction resurrection
+ #
+ # -> b77 (24) -> b78 (25) -> b79 (26)
+ # \-> b80 (25) -> b81 (26) -> b82 (27)
+ #
+ # b78 creates a tx, which is spent in b79. After b82, both should be in mempool
+ #
+ # The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the
+ # rather obscure reason that the Python signature code does not distinguish between
+ # Low-S and High-S values (whereas the bitcoin code has custom code which does so);
+ # as a result of which, the odds are 50% that the python code will use the right
+ # value and the transaction will be accepted into the mempool. Until we modify the
+ # test framework to support low-S signing, we are out of luck.
+ #
+ # To get around this issue, we construct transactions which are not signed and which
+ # spend to OP_TRUE. If the standard-ness rules change, this test would need to be
+ # updated. (Perhaps to spend to a P2SH OP_TRUE script)
+ #
+ tip(76)
+ block(77)
+ tx77 = create_and_sign_tx(out[24].tx, out[24].n, 10*COIN)
+ update_block(77, [tx77])
+ yield accepted()
+ save_spendable_output()
+
+ block(78)
+ tx78 = create_tx(tx77, 0, 9*COIN)
+ update_block(78, [tx78])
+ yield accepted()
+
+ block(79)
+ tx79 = create_tx(tx78, 0, 8*COIN)
+ update_block(79, [tx79])
+ yield accepted()
+
+ # mempool should be empty
+ assert_equal(len(self.nodes[0].getrawmempool()), 0)
+
+ tip(77)
+ block(80, spend=out[25])
+ yield rejected()
+ save_spendable_output()
+
+ block(81, spend=out[26])
+ yield rejected() # other chain is same length
+ save_spendable_output()
+
+ block(82, spend=out[27])
+ yield accepted() # now this chain is longer, triggers re-org
+ save_spendable_output()
+
+ # now check that tx78 and tx79 have been put back into the peer's mempool
+ mempool = self.nodes[0].getrawmempool()
+ assert_equal(len(mempool), 2)
+ assert(tx78.hash in mempool)
+ assert(tx79.hash in mempool)
+
+
+ # Test invalid opcodes in dead execution paths.
+ #
+ # -> b81 (26) -> b82 (27) -> b83 (28)
+ #
+ b83 = block(83)
+ op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF]
+ script = CScript(op_codes)
+ tx1 = create_and_sign_tx(out[28].tx, out[28].n, out[28].tx.vout[0].nValue, script)
+
+ tx2 = create_and_sign_tx(tx1, 0, 0, CScript([OP_TRUE]))
+ tx2.vin[0].scriptSig = CScript([OP_FALSE])
+ tx2.rehash()
+
+ update_block(83, [tx1, tx2])
+ yield accepted()
+ save_spendable_output()
+
+
+ # Reorg on/off blocks that have OP_RETURN in them (and try to spend them)
+ #
+ # -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31)
+ # \-> b85 (29) -> b86 (30) \-> b89a (32)
+ #
+ #
+ b84 = block(84)
+ tx1 = create_tx(out[29].tx, out[29].n, 0, CScript([OP_RETURN]))
+ tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
+ tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
+ tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
+ tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
+ tx1.calc_sha256()
+ self.sign_tx(tx1, out[29].tx, out[29].n)
+ tx1.rehash()
+ tx2 = create_tx(tx1, 1, 0, CScript([OP_RETURN]))
+ tx2.vout.append(CTxOut(0, CScript([OP_RETURN])))
+ tx3 = create_tx(tx1, 2, 0, CScript([OP_RETURN]))
+ tx3.vout.append(CTxOut(0, CScript([OP_TRUE])))
+ tx4 = create_tx(tx1, 3, 0, CScript([OP_TRUE]))
+ tx4.vout.append(CTxOut(0, CScript([OP_RETURN])))
+ tx5 = create_tx(tx1, 4, 0, CScript([OP_RETURN]))
+
+ update_block(84, [tx1,tx2,tx3,tx4,tx5])
+ yield accepted()
+ save_spendable_output()
+
+ tip(83)
+ block(85, spend=out[29])
+ yield rejected()
+
+ block(86, spend=out[30])
+ yield accepted()
+
+ tip(84)
+ block(87, spend=out[30])
+ yield rejected()
+ save_spendable_output()
+
+ block(88, spend=out[31])
+ yield accepted()
+ save_spendable_output()
+
+ # trying to spend the OP_RETURN output is rejected
+ block("89a", spend=out[32])
+ tx = create_tx(tx1, 0, 0, CScript([OP_TRUE]))
+ update_block("89a", [tx])
+ yield rejected()
+
+
+ # Test re-org of a week's worth of blocks (1088 blocks)
+ # This test takes a minute or two and can be accomplished in memory
+ #
+ if self.options.runbarelyexpensive:
+ tip(88)
+ LARGE_REORG_SIZE = 1088
+ test1 = TestInstance(sync_every_block=False)
+ spend=out[32]
+ for i in range(89, LARGE_REORG_SIZE + 89):
+ b = block(i, spend)
+ tx = CTransaction()
+ script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 69
+ script_output = CScript([b'\x00' * script_length])
+ tx.vout.append(CTxOut(0, script_output))
+ tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0)))
+ b = update_block(i, [tx])
+ assert_equal(len(b.serialize()), MAX_BLOCK_BASE_SIZE)
+ test1.blocks_and_transactions.append([self.tip, True])
+ save_spendable_output()
+ spend = get_spendable_output()
+
+ yield test1
+ chain1_tip = i
+
+ # now create alt chain of same length
+ tip(88)
+ test2 = TestInstance(sync_every_block=False)
+ for i in range(89, LARGE_REORG_SIZE + 89):
+ block("alt"+str(i))
+ test2.blocks_and_transactions.append([self.tip, False])
+ yield test2
+
+ # extend alt chain to trigger re-org
+ block("alt" + str(chain1_tip + 1))
+ yield accepted()
+
+ # ... and re-org back to the first chain
+ tip(chain1_tip)
+ block(chain1_tip + 1)
+ yield rejected()
+ block(chain1_tip + 2)
+ yield accepted()
+
+ chain1_tip += 2
+
+
+
+if __name__ == '__main__':
+ FullBlockTest().main()
diff --git a/test/functional/p2p-leaktests.py b/test/functional/p2p-leaktests.py
new file mode 100755
index 0000000000..3a843197fb
--- /dev/null
+++ b/test/functional/p2p-leaktests.py
@@ -0,0 +1,143 @@
+#!/usr/bin/env python3
+# Copyright (c) 2017 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test message sending before handshake completion.
+
+A node should never send anything other than VERSION/VERACK/REJECT until it's
+received a VERACK.
+
+This test connects to a node and sends it a few messages, trying to intice it
+into sending us something it shouldn't.
+"""
+
+from test_framework.mininode import *
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+banscore = 10
+
+class CLazyNode(NodeConnCB):
+ def __init__(self):
+ self.connection = None
+ self.unexpected_msg = False
+ self.connected = False
+ super().__init__()
+
+ def add_connection(self, conn):
+ self.connection = conn
+
+ def send_message(self, message):
+ self.connection.send_message(message)
+
+ def bad_message(self, message):
+ self.unexpected_msg = True
+ self.log.info("should not have received message: %s" % message.command)
+
+ def on_open(self, conn):
+ self.connected = True
+
+ def on_version(self, conn, message): self.bad_message(message)
+ def on_verack(self, conn, message): self.bad_message(message)
+ def on_reject(self, conn, message): self.bad_message(message)
+ def on_inv(self, conn, message): self.bad_message(message)
+ def on_addr(self, conn, message): self.bad_message(message)
+ def on_alert(self, conn, message): self.bad_message(message)
+ def on_getdata(self, conn, message): self.bad_message(message)
+ def on_getblocks(self, conn, message): self.bad_message(message)
+ def on_tx(self, conn, message): self.bad_message(message)
+ def on_block(self, conn, message): self.bad_message(message)
+ def on_getaddr(self, conn, message): self.bad_message(message)
+ def on_headers(self, conn, message): self.bad_message(message)
+ def on_getheaders(self, conn, message): self.bad_message(message)
+ def on_ping(self, conn, message): self.bad_message(message)
+ def on_mempool(self, conn): self.bad_message(message)
+ def on_pong(self, conn, message): self.bad_message(message)
+ def on_feefilter(self, conn, message): self.bad_message(message)
+ def on_sendheaders(self, conn, message): self.bad_message(message)
+ def on_sendcmpct(self, conn, message): self.bad_message(message)
+ def on_cmpctblock(self, conn, message): self.bad_message(message)
+ def on_getblocktxn(self, conn, message): self.bad_message(message)
+ def on_blocktxn(self, conn, message): self.bad_message(message)
+
+# Node that never sends a version. We'll use this to send a bunch of messages
+# anyway, and eventually get disconnected.
+class CNodeNoVersionBan(CLazyNode):
+ def __init__(self):
+ super().__init__()
+
+ # send a bunch of veracks without sending a message. This should get us disconnected.
+ # NOTE: implementation-specific check here. Remove if bitcoind ban behavior changes
+ def on_open(self, conn):
+ super().on_open(conn)
+ for i in range(banscore):
+ self.send_message(msg_verack())
+
+ def on_reject(self, conn, message): pass
+
+# Node that never sends a version. This one just sits idle and hopes to receive
+# any message (it shouldn't!)
+class CNodeNoVersionIdle(CLazyNode):
+ def __init__(self):
+ super().__init__()
+
+# Node that sends a version but not a verack.
+class CNodeNoVerackIdle(CLazyNode):
+ def __init__(self):
+ self.version_received = False
+ super().__init__()
+
+ def on_reject(self, conn, message): pass
+ def on_verack(self, conn, message): pass
+ # When version is received, don't reply with a verack. Instead, see if the
+ # node will give us a message that it shouldn't. This is not an exhaustive
+ # list!
+ def on_version(self, conn, message):
+ self.version_received = True
+ conn.send_message(msg_ping())
+ conn.send_message(msg_getaddr())
+
+class P2PLeakTest(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 1
+ def setup_network(self):
+ extra_args = [['-banscore='+str(banscore)]
+ for i in range(self.num_nodes)]
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
+
+ def run_test(self):
+ no_version_bannode = CNodeNoVersionBan()
+ no_version_idlenode = CNodeNoVersionIdle()
+ no_verack_idlenode = CNodeNoVerackIdle()
+
+ connections = []
+ connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_bannode, send_version=False))
+ connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_idlenode, send_version=False))
+ connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_verack_idlenode))
+ no_version_bannode.add_connection(connections[0])
+ no_version_idlenode.add_connection(connections[1])
+ no_verack_idlenode.add_connection(connections[2])
+
+ NetworkThread().start() # Start up network handling in another thread
+
+ assert(wait_until(lambda: no_version_bannode.connected and no_version_idlenode.connected and no_verack_idlenode.version_received, timeout=10))
+
+ # Mine a block and make sure that it's not sent to the connected nodes
+ self.nodes[0].generate(1)
+
+ #Give the node enough time to possibly leak out a message
+ time.sleep(5)
+
+ #This node should have been banned
+ assert(no_version_bannode.connection.state == "closed")
+
+ [conn.disconnect_node() for conn in connections]
+
+ # Make sure no unexpected messages came in
+ assert(no_version_bannode.unexpected_msg == False)
+ assert(no_version_idlenode.unexpected_msg == False)
+ assert(no_verack_idlenode.unexpected_msg == False)
+
+if __name__ == '__main__':
+ P2PLeakTest().main()
diff --git a/test/functional/p2p-mempool.py b/test/functional/p2p-mempool.py
new file mode 100755
index 0000000000..0aa9c90e8f
--- /dev/null
+++ b/test/functional/p2p-mempool.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test p2p mempool message.
+
+Test that nodes are disconnected if they send mempool messages when bloom
+filters are not enabled.
+"""
+
+from test_framework.mininode import *
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+class TestNode(NodeConnCB):
+ def __init__(self):
+ NodeConnCB.__init__(self)
+ self.connection = None
+ self.ping_counter = 1
+ self.last_pong = msg_pong()
+ self.block_receive_map = {}
+
+ def add_connection(self, conn):
+ self.connection = conn
+ self.peer_disconnected = False
+
+ def on_inv(self, conn, message):
+ pass
+
+ # Track the last getdata message we receive (used in the test)
+ def on_getdata(self, conn, message):
+ self.last_getdata = message
+
+ def on_block(self, conn, message):
+ message.block.calc_sha256()
+ try:
+ self.block_receive_map[message.block.sha256] += 1
+ except KeyError as e:
+ self.block_receive_map[message.block.sha256] = 1
+
+ # Spin until verack message is received from the node.
+ # We use this to signal that our test can begin. This
+ # is called from the testing thread, so it needs to acquire
+ # the global lock.
+ def wait_for_verack(self):
+ def veracked():
+ return self.verack_received
+ return wait_until(veracked, timeout=10)
+
+ def wait_for_disconnect(self):
+ def disconnected():
+ return self.peer_disconnected
+ return wait_until(disconnected, timeout=10)
+
+ # Wrapper for the NodeConn's send_message function
+ def send_message(self, message):
+ self.connection.send_message(message)
+
+ def on_pong(self, conn, message):
+ self.last_pong = message
+
+ def on_close(self, conn):
+ self.peer_disconnected = True
+
+ # Sync up with the node after delivery of a block
+ def sync_with_ping(self, timeout=30):
+ def received_pong():
+ return (self.last_pong.nonce == self.ping_counter)
+ self.connection.send_message(msg_ping(nonce=self.ping_counter))
+ success = wait_until(received_pong, timeout=timeout)
+ self.ping_counter += 1
+ return success
+
+ def send_mempool(self):
+ self.lastInv = []
+ self.send_message(msg_mempool())
+
+class P2PMempoolTests(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 2
+
+ def setup_network(self):
+ # Start a node with maxuploadtarget of 200 MB (/24h)
+ self.nodes = []
+ self.nodes.append(start_node(0, self.options.tmpdir, ["-peerbloomfilters=0"]))
+
+ def run_test(self):
+ #connect a mininode
+ aTestNode = TestNode()
+ node = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], aTestNode)
+ aTestNode.add_connection(node)
+ NetworkThread().start()
+ aTestNode.wait_for_verack()
+
+ #request mempool
+ aTestNode.send_mempool()
+ aTestNode.wait_for_disconnect()
+
+ #mininode must be disconnected at this point
+ assert_equal(len(self.nodes[0].getpeerinfo()), 0)
+
+if __name__ == '__main__':
+ P2PMempoolTests().main()
diff --git a/test/functional/p2p-segwit.py b/test/functional/p2p-segwit.py
new file mode 100755
index 0000000000..dcf2b9a7de
--- /dev/null
+++ b/test/functional/p2p-segwit.py
@@ -0,0 +1,2033 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test segwit transactions and blocks on P2P network."""
+
+from test_framework.mininode import *
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+from test_framework.script import *
+from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, WITNESS_COMMITMENT_HEADER
+from test_framework.key import CECKey, CPubKey
+import time
+import random
+from binascii import hexlify
+
+# The versionbit bit used to signal activation of SegWit
+VB_WITNESS_BIT = 1
+VB_PERIOD = 144
+VB_ACTIVATION_THRESHOLD = 108
+VB_TOP_BITS = 0x20000000
+
+MAX_SIGOP_COST = 80000
+
+
+# Calculate the virtual size of a witness block:
+# (base + witness/4)
+def get_virtual_size(witness_block):
+ base_size = len(witness_block.serialize())
+ total_size = len(witness_block.serialize(with_witness=True))
+ # the "+3" is so we round up
+ vsize = int((3*base_size + total_size + 3)/4)
+ return vsize
+
+# Note: we can reduce code by using SingleNodeConnCB (in master, not 0.12)
+class TestNode(NodeConnCB):
+ def __init__(self):
+ NodeConnCB.__init__(self)
+ self.connection = None
+ self.ping_counter = 1
+ self.last_pong = msg_pong(0)
+ self.sleep_time = 0.05
+ self.getdataset = set()
+ self.last_reject = None
+
+ def add_connection(self, conn):
+ self.connection = conn
+
+ # Wrapper for the NodeConn's send_message function
+ def send_message(self, message):
+ self.connection.send_message(message)
+
+ def on_inv(self, conn, message):
+ self.last_inv = message
+
+ def on_block(self, conn, message):
+ self.last_block = message.block
+ self.last_block.calc_sha256()
+
+ def on_getdata(self, conn, message):
+ for inv in message.inv:
+ self.getdataset.add(inv.hash)
+ self.last_getdata = message
+
+ def on_getheaders(self, conn, message):
+ self.last_getheaders = message
+
+ def on_pong(self, conn, message):
+ self.last_pong = message
+
+ def on_reject(self, conn, message):
+ self.last_reject = message
+
+ # Syncing helpers
+ def sync(self, test_function, timeout=60):
+ while timeout > 0:
+ with mininode_lock:
+ if test_function():
+ return
+ time.sleep(self.sleep_time)
+ timeout -= self.sleep_time
+ raise AssertionError("Sync failed to complete")
+
+ def sync_with_ping(self, timeout=60):
+ self.send_message(msg_ping(nonce=self.ping_counter))
+ test_function = lambda: self.last_pong.nonce == self.ping_counter
+ self.sync(test_function, timeout)
+ self.ping_counter += 1
+ return
+
+ def wait_for_block(self, blockhash, timeout=60):
+ test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash
+ self.sync(test_function, timeout)
+ return
+
+ def wait_for_getdata(self, timeout=60):
+ test_function = lambda: self.last_getdata != None
+ self.sync(test_function, timeout)
+
+ def wait_for_getheaders(self, timeout=60):
+ test_function = lambda: self.last_getheaders != None
+ self.sync(test_function, timeout)
+
+ def wait_for_inv(self, expected_inv, timeout=60):
+ test_function = lambda: self.last_inv != expected_inv
+ self.sync(test_function, timeout)
+
+ def announce_tx_and_wait_for_getdata(self, tx, timeout=60):
+ with mininode_lock:
+ self.last_getdata = None
+ self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
+ self.wait_for_getdata(timeout)
+ return
+
+ def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
+ with mininode_lock:
+ self.last_getdata = None
+ self.last_getheaders = None
+ msg = msg_headers()
+ msg.headers = [ CBlockHeader(block) ]
+ if use_header:
+ self.send_message(msg)
+ else:
+ self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
+ self.wait_for_getheaders()
+ self.send_message(msg)
+ self.wait_for_getdata()
+ return
+
+ def announce_block(self, block, use_header):
+ with mininode_lock:
+ self.last_getdata = None
+ if use_header:
+ msg = msg_headers()
+ msg.headers = [ CBlockHeader(block) ]
+ self.send_message(msg)
+ else:
+ self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
+
+ def request_block(self, blockhash, inv_type, timeout=60):
+ with mininode_lock:
+ self.last_block = None
+ self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
+ self.wait_for_block(blockhash, timeout)
+ return self.last_block
+
+ def test_transaction_acceptance(self, tx, with_witness, accepted, reason=None):
+ tx_message = msg_tx(tx)
+ if with_witness:
+ tx_message = msg_witness_tx(tx)
+ self.send_message(tx_message)
+ self.sync_with_ping()
+ assert_equal(tx.hash in self.connection.rpc.getrawmempool(), accepted)
+ if (reason != None and not accepted):
+ # Check the rejection reason as well.
+ with mininode_lock:
+ assert_equal(self.last_reject.reason, reason)
+
+ # Test whether a witness block had the correct effect on the tip
+ def test_witness_block(self, block, accepted, with_witness=True):
+ if with_witness:
+ self.send_message(msg_witness_block(block))
+ else:
+ self.send_message(msg_block(block))
+ self.sync_with_ping()
+ assert_equal(self.connection.rpc.getbestblockhash() == block.hash, accepted)
+
+
+# Used to keep track of anyone-can-spend outputs that we can use in the tests
+class UTXO(object):
+ def __init__(self, sha256, n, nValue):
+ self.sha256 = sha256
+ self.n = n
+ self.nValue = nValue
+
+# Helper for getting the script associated with a P2PKH
+def GetP2PKHScript(pubkeyhash):
+ return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
+
+# Add signature for a P2PK witness program.
+def sign_P2PK_witness_input(script, txTo, inIdx, hashtype, value, key):
+ tx_hash = SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, value)
+ signature = key.sign(tx_hash) + chr(hashtype).encode('latin-1')
+ txTo.wit.vtxinwit[inIdx].scriptWitness.stack = [signature, script]
+ txTo.rehash()
+
+
+class SegWitTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 3
+
+ def setup_network(self):
+ self.nodes = []
+ self.nodes.append(start_node(0, self.options.tmpdir, ["-whitelist=127.0.0.1"]))
+ # Start a node for testing IsStandard rules.
+ self.nodes.append(start_node(1, self.options.tmpdir, ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0"]))
+ connect_nodes(self.nodes[0], 1)
+
+ # Disable segwit's bip9 parameter to simulate upgrading after activation.
+ self.nodes.append(start_node(2, self.options.tmpdir, ["-whitelist=127.0.0.1", "-bip9params=segwit:0:0"]))
+ connect_nodes(self.nodes[0], 2)
+
+ ''' Helpers '''
+ # Build a block on top of node0's tip.
+ def build_next_block(self, nVersion=4):
+ tip = self.nodes[0].getbestblockhash()
+ height = self.nodes[0].getblockcount() + 1
+ block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
+ block = create_block(int(tip, 16), create_coinbase(height), block_time)
+ block.nVersion = nVersion
+ block.rehash()
+ return block
+
+ # Adds list of transactions to block, adds witness commitment, then solves.
+ def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
+ block.vtx.extend(tx_list)
+ add_witness_commitment(block, nonce)
+ block.solve()
+ return
+
+ ''' Individual tests '''
+ def test_witness_services(self):
+ self.log.info("Verifying NODE_WITNESS service bit")
+ assert((self.test_node.connection.nServices & NODE_WITNESS) != 0)
+
+
+ # See if sending a regular transaction works, and create a utxo
+ # to use in later tests.
+ def test_non_witness_transaction(self):
+ # Mine a block with an anyone-can-spend coinbase,
+ # let it mature, then try to spend it.
+ self.log.info("Testing non-witness transaction")
+ block = self.build_next_block(nVersion=1)
+ block.solve()
+ self.test_node.send_message(msg_block(block))
+ self.test_node.sync_with_ping() # make sure the block was processed
+ txid = block.vtx[0].sha256
+
+ self.nodes[0].generate(99) # let the block mature
+
+ # Create a transaction that spends the coinbase
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
+ tx.vout.append(CTxOut(49*100000000, CScript([OP_TRUE])))
+ tx.calc_sha256()
+
+ # Check that serializing it with or without witness is the same
+ # This is a sanity check of our testing framework.
+ assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
+
+ self.test_node.send_message(msg_witness_tx(tx))
+ self.test_node.sync_with_ping() # make sure the tx was processed
+ assert(tx.hash in self.nodes[0].getrawmempool())
+ # Save this transaction for later
+ self.utxo.append(UTXO(tx.sha256, 0, 49*100000000))
+ self.nodes[0].generate(1)
+
+
+ # Verify that blocks with witnesses are rejected before activation.
+ def test_unnecessary_witness_before_segwit_activation(self):
+ self.log.info("Testing behavior of unnecessary witnesses")
+ # For now, rely on earlier tests to have created at least one utxo for
+ # us to use
+ assert(len(self.utxo) > 0)
+ assert(get_bip9_status(self.nodes[0], 'segwit')['status'] != 'active')
+
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
+ tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE])))
+ tx.wit.vtxinwit.append(CTxInWitness())
+ tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
+
+ # Verify the hash with witness differs from the txid
+ # (otherwise our testing framework must be broken!)
+ tx.rehash()
+ assert(tx.sha256 != tx.calc_sha256(with_witness=True))
+
+ # Construct a segwit-signaling block that includes the transaction.
+ block = self.build_next_block(nVersion=(VB_TOP_BITS|(1 << VB_WITNESS_BIT)))
+ self.update_witness_block_with_transactions(block, [tx])
+ # Sending witness data before activation is not allowed (anti-spam
+ # rule).
+ self.test_node.test_witness_block(block, accepted=False)
+ # TODO: fix synchronization so we can test reject reason
+ # Right now, bitcoind delays sending reject messages for blocks
+ # until the future, making synchronization here difficult.
+ #assert_equal(self.test_node.last_reject.reason, "unexpected-witness")
+
+ # But it should not be permanently marked bad...
+ # Resend without witness information.
+ self.test_node.send_message(msg_block(block))
+ self.test_node.sync_with_ping()
+ assert_equal(self.nodes[0].getbestblockhash(), block.hash)
+
+ sync_blocks(self.nodes)
+
+ # Create a p2sh output -- this is so we can pass the standardness
+ # rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
+ # in P2SH).
+ p2sh_program = CScript([OP_TRUE])
+ p2sh_pubkey = hash160(p2sh_program)
+ scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
+
+ # Now check that unnecessary witnesses can't be used to blind a node
+ # to a transaction, eg by violating standardness checks.
+ tx2 = CTransaction()
+ tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
+ tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptPubKey))
+ tx2.rehash()
+ self.test_node.test_transaction_acceptance(tx2, False, True)
+ self.nodes[0].generate(1)
+ sync_blocks(self.nodes)
+
+ # We'll add an unnecessary witness to this transaction that would cause
+ # it to be non-standard, to test that violating policy with a witness before
+ # segwit activation doesn't blind a node to a transaction. Transactions
+ # rejected for having a witness before segwit activation shouldn't be added
+ # to the rejection cache.
+ tx3 = CTransaction()
+ tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
+ tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptPubKey))
+ tx3.wit.vtxinwit.append(CTxInWitness())
+ tx3.wit.vtxinwit[0].scriptWitness.stack = [b'a'*400000]
+ tx3.rehash()
+ # Note that this should be rejected for the premature witness reason,
+ # rather than a policy check, since segwit hasn't activated yet.
+ self.std_node.test_transaction_acceptance(tx3, True, False, b'no-witness-yet')
+
+ # If we send without witness, it should be accepted.
+ self.std_node.test_transaction_acceptance(tx3, False, True)
+
+ # Now create a new anyone-can-spend utxo for the next test.
+ tx4 = CTransaction()
+ tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), CScript([p2sh_program])))
+ tx4.vout.append(CTxOut(tx3.vout[0].nValue-1000, CScript([OP_TRUE])))
+ tx4.rehash()
+ self.test_node.test_transaction_acceptance(tx3, False, True)
+ self.test_node.test_transaction_acceptance(tx4, False, True)
+
+ self.nodes[0].generate(1)
+ sync_blocks(self.nodes)
+
+ # Update our utxo list; we spent the first entry.
+ self.utxo.pop(0)
+ self.utxo.append(UTXO(tx4.sha256, 0, tx4.vout[0].nValue))
+
+
+ # Mine enough blocks for segwit's vb state to be 'started'.
+ def advance_to_segwit_started(self):
+ height = self.nodes[0].getblockcount()
+ # Will need to rewrite the tests here if we are past the first period
+ assert(height < VB_PERIOD - 1)
+ # Genesis block is 'defined'.
+ assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'defined')
+ # Advance to end of period, status should now be 'started'
+ self.nodes[0].generate(VB_PERIOD-height-1)
+ assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
+
+ # Mine enough blocks to lock in segwit, but don't activate.
+ # TODO: we could verify that lockin only happens at the right threshold of
+ # signalling blocks, rather than just at the right period boundary.
+ def advance_to_segwit_lockin(self):
+ height = self.nodes[0].getblockcount()
+ assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
+ # Advance to end of period, and verify lock-in happens at the end
+ self.nodes[0].generate(VB_PERIOD-1)
+ height = self.nodes[0].getblockcount()
+ assert((height % VB_PERIOD) == VB_PERIOD - 2)
+ assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
+ self.nodes[0].generate(1)
+ assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
+
+
+ # Mine enough blocks to activate segwit.
+ # TODO: we could verify that activation only happens at the right threshold
+ # of signalling blocks, rather than just at the right period boundary.
+ def advance_to_segwit_active(self):
+ assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
+ height = self.nodes[0].getblockcount()
+ self.nodes[0].generate(VB_PERIOD - (height%VB_PERIOD) - 2)
+ assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
+ self.nodes[0].generate(1)
+ assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active')
+
+
+ # This test can only be run after segwit has activated
+ def test_witness_commitments(self):
+ self.log.info("Testing witness commitments")
+
+ # First try a correct witness commitment.
+ block = self.build_next_block()
+ add_witness_commitment(block)
+ block.solve()
+
+ # Test the test -- witness serialization should be different
+ assert(msg_witness_block(block).serialize() != msg_block(block).serialize())
+
+ # This empty block should be valid.
+ self.test_node.test_witness_block(block, accepted=True)
+
+ # Try to tweak the nonce
+ block_2 = self.build_next_block()
+ add_witness_commitment(block_2, nonce=28)
+ block_2.solve()
+
+ # The commitment should have changed!
+ assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1])
+
+ # This should also be valid.
+ self.test_node.test_witness_block(block_2, accepted=True)
+
+ # Now test commitments with actual transactions
+ assert (len(self.utxo) > 0)
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
+
+ # Let's construct a witness program
+ witness_program = CScript([OP_TRUE])
+ witness_hash = sha256(witness_program)
+ scriptPubKey = CScript([OP_0, witness_hash])
+ tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
+ tx.rehash()
+
+ # tx2 will spend tx1, and send back to a regular anyone-can-spend address
+ tx2 = CTransaction()
+ tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
+ tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
+ tx2.wit.vtxinwit.append(CTxInWitness())
+ tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
+ tx2.rehash()
+
+ block_3 = self.build_next_block()
+ self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
+ # Add an extra OP_RETURN output that matches the witness commitment template,
+ # even though it has extra data after the incorrect commitment.
+ # This block should fail.
+ block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
+ block_3.vtx[0].rehash()
+ block_3.hashMerkleRoot = block_3.calc_merkle_root()
+ block_3.rehash()
+ block_3.solve()
+
+ self.test_node.test_witness_block(block_3, accepted=False)
+
+ # Add a different commitment with different nonce, but in the
+ # right location, and with some funds burned(!).
+ # This should succeed (nValue shouldn't affect finding the
+ # witness commitment).
+ add_witness_commitment(block_3, nonce=0)
+ block_3.vtx[0].vout[0].nValue -= 1
+ block_3.vtx[0].vout[-1].nValue += 1
+ block_3.vtx[0].rehash()
+ block_3.hashMerkleRoot = block_3.calc_merkle_root()
+ block_3.rehash()
+ assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns
+ block_3.solve()
+ self.test_node.test_witness_block(block_3, accepted=True)
+
+ # Finally test that a block with no witness transactions can
+ # omit the commitment.
+ block_4 = self.build_next_block()
+ tx3 = CTransaction()
+ tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
+ tx3.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
+ tx3.rehash()
+ block_4.vtx.append(tx3)
+ block_4.hashMerkleRoot = block_4.calc_merkle_root()
+ block_4.solve()
+ self.test_node.test_witness_block(block_4, with_witness=False, accepted=True)
+
+ # Update available utxo's for use in later test.
+ self.utxo.pop(0)
+ self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
+
+
+ def test_block_malleability(self):
+ self.log.info("Testing witness block malleability")
+
+ # Make sure that a block that has too big a virtual size
+ # because of a too-large coinbase witness is not permanently
+ # marked bad.
+ block = self.build_next_block()
+ add_witness_commitment(block)
+ block.solve()
+
+ block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a'*5000000)
+ assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE)
+
+ # We can't send over the p2p network, because this is too big to relay
+ # TODO: repeat this test with a block that can be relayed
+ self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
+
+ assert(self.nodes[0].getbestblockhash() != block.hash)
+
+ block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
+ assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE)
+ self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
+
+ assert(self.nodes[0].getbestblockhash() == block.hash)
+
+ # Now make sure that malleating the witness nonce doesn't
+ # result in a block permanently marked bad.
+ block = self.build_next_block()
+ add_witness_commitment(block)
+ block.solve()
+
+ # Change the nonce -- should not cause the block to be permanently
+ # failed
+ block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(1) ]
+ self.test_node.test_witness_block(block, accepted=False)
+
+ # Changing the witness nonce doesn't change the block hash
+ block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(0) ]
+ self.test_node.test_witness_block(block, accepted=True)
+
+
+ def test_witness_block_size(self):
+ self.log.info("Testing witness block size limit")
+ # TODO: Test that non-witness carrying blocks can't exceed 1MB
+ # Skipping this test for now; this is covered in p2p-fullblocktest.py
+
+ # Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
+ block = self.build_next_block()
+
+ assert(len(self.utxo) > 0)
+
+ # Create a P2WSH transaction.
+ # The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
+ # This should give us plenty of room to tweak the spending tx's
+ # virtual size.
+ NUM_DROPS = 200 # 201 max ops per script!
+ NUM_OUTPUTS = 50
+
+ witness_program = CScript([OP_2DROP]*NUM_DROPS + [OP_TRUE])
+ witness_hash = uint256_from_str(sha256(witness_program))
+ scriptPubKey = CScript([OP_0, ser_uint256(witness_hash)])
+
+ prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
+ value = self.utxo[0].nValue
+
+ parent_tx = CTransaction()
+ parent_tx.vin.append(CTxIn(prevout, b""))
+ child_value = int(value/NUM_OUTPUTS)
+ for i in range(NUM_OUTPUTS):
+ parent_tx.vout.append(CTxOut(child_value, scriptPubKey))
+ parent_tx.vout[0].nValue -= 50000
+ assert(parent_tx.vout[0].nValue > 0)
+ parent_tx.rehash()
+
+ child_tx = CTransaction()
+ for i in range(NUM_OUTPUTS):
+ child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
+ child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
+ for i in range(NUM_OUTPUTS):
+ child_tx.wit.vtxinwit.append(CTxInWitness())
+ child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a'*195]*(2*NUM_DROPS) + [witness_program]
+ child_tx.rehash()
+ self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
+
+ vsize = get_virtual_size(block)
+ additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize)*4
+ i = 0
+ while additional_bytes > 0:
+ # Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
+ extra_bytes = min(additional_bytes+1, 55)
+ block.vtx[-1].wit.vtxinwit[int(i/(2*NUM_DROPS))].scriptWitness.stack[i%(2*NUM_DROPS)] = b'a'*(195+extra_bytes)
+ additional_bytes -= extra_bytes
+ i += 1
+
+ block.vtx[0].vout.pop() # Remove old commitment
+ add_witness_commitment(block)
+ block.solve()
+ vsize = get_virtual_size(block)
+ assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
+ # Make sure that our test case would exceed the old max-network-message
+ # limit
+ assert(len(block.serialize(True)) > 2*1024*1024)
+
+ self.test_node.test_witness_block(block, accepted=False)
+
+ # Now resize the second transaction to make the block fit.
+ cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
+ block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(cur_length-1)
+ block.vtx[0].vout.pop()
+ add_witness_commitment(block)
+ block.solve()
+ assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE)
+
+ self.test_node.test_witness_block(block, accepted=True)
+
+ # Update available utxo's
+ self.utxo.pop(0)
+ self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
+
+
+ # submitblock will try to add the nonce automatically, so that mining
+ # software doesn't need to worry about doing so itself.
+ def test_submit_block(self):
+ block = self.build_next_block()
+
+ # Try using a custom nonce and then don't supply it.
+ # This shouldn't possibly work.
+ add_witness_commitment(block, nonce=1)
+ block.vtx[0].wit = CTxWitness() # drop the nonce
+ block.solve()
+ self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
+ assert(self.nodes[0].getbestblockhash() != block.hash)
+
+ # Now redo commitment with the standard nonce, but let bitcoind fill it in.
+ add_witness_commitment(block, nonce=0)
+ block.vtx[0].wit = CTxWitness()
+ block.solve()
+ self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
+ assert_equal(self.nodes[0].getbestblockhash(), block.hash)
+
+ # This time, add a tx with non-empty witness, but don't supply
+ # the commitment.
+ block_2 = self.build_next_block()
+
+ add_witness_commitment(block_2)
+
+ block_2.solve()
+
+ # Drop commitment and nonce -- submitblock should not fill in.
+ block_2.vtx[0].vout.pop()
+ block_2.vtx[0].wit = CTxWitness()
+
+ self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True)))
+ # Tip should not advance!
+ assert(self.nodes[0].getbestblockhash() != block_2.hash)
+
+
+ # Consensus tests of extra witness data in a transaction.
+ def test_extra_witness_data(self):
+ self.log.info("Testing extra witness data in tx")
+
+ assert(len(self.utxo) > 0)
+
+ block = self.build_next_block()
+
+ witness_program = CScript([OP_DROP, OP_TRUE])
+ witness_hash = sha256(witness_program)
+ scriptPubKey = CScript([OP_0, witness_hash])
+
+ # First try extra witness data on a tx that doesn't require a witness
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
+ tx.vout.append(CTxOut(self.utxo[0].nValue-2000, scriptPubKey))
+ tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
+ tx.wit.vtxinwit.append(CTxInWitness())
+ tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
+ tx.rehash()
+ self.update_witness_block_with_transactions(block, [tx])
+
+ # Extra witness data should not be allowed.
+ self.test_node.test_witness_block(block, accepted=False)
+
+ # Try extra signature data. Ok if we're not spending a witness output.
+ block.vtx[1].wit.vtxinwit = []
+ block.vtx[1].vin[0].scriptSig = CScript([OP_0])
+ block.vtx[1].rehash()
+ add_witness_commitment(block)
+ block.solve()
+
+ self.test_node.test_witness_block(block, accepted=True)
+
+ # Now try extra witness/signature data on an input that DOES require a
+ # witness
+ tx2 = CTransaction()
+ tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
+ tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
+ tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
+ tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
+ tx2.wit.vtxinwit[0].scriptWitness.stack = [ CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program ]
+ tx2.wit.vtxinwit[1].scriptWitness.stack = [ CScript([OP_TRUE]) ]
+
+ block = self.build_next_block()
+ self.update_witness_block_with_transactions(block, [tx2])
+
+ # This has extra witness data, so it should fail.
+ self.test_node.test_witness_block(block, accepted=False)
+
+ # Now get rid of the extra witness, but add extra scriptSig data
+ tx2.vin[0].scriptSig = CScript([OP_TRUE])
+ tx2.vin[1].scriptSig = CScript([OP_TRUE])
+ tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
+ tx2.wit.vtxinwit[1].scriptWitness.stack = []
+ tx2.rehash()
+ add_witness_commitment(block)
+ block.solve()
+
+ # This has extra signature data for a witness input, so it should fail.
+ self.test_node.test_witness_block(block, accepted=False)
+
+ # Now get rid of the extra scriptsig on the witness input, and verify
+ # success (even with extra scriptsig data in the non-witness input)
+ tx2.vin[0].scriptSig = b""
+ tx2.rehash()
+ add_witness_commitment(block)
+ block.solve()
+
+ self.test_node.test_witness_block(block, accepted=True)
+
+ # Update utxo for later tests
+ self.utxo.pop(0)
+ self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
+
+
+ def test_max_witness_push_length(self):
+ ''' Should only allow up to 520 byte pushes in witness stack '''
+ self.log.info("Testing maximum witness push size")
+ MAX_SCRIPT_ELEMENT_SIZE = 520
+ assert(len(self.utxo))
+
+ block = self.build_next_block()
+
+ witness_program = CScript([OP_DROP, OP_TRUE])
+ witness_hash = sha256(witness_program)
+ scriptPubKey = CScript([OP_0, witness_hash])
+
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
+ tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
+ tx.rehash()
+
+ tx2 = CTransaction()
+ tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
+ tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE])))
+ tx2.wit.vtxinwit.append(CTxInWitness())
+ # First try a 521-byte stack element
+ tx2.wit.vtxinwit[0].scriptWitness.stack = [ b'a'*(MAX_SCRIPT_ELEMENT_SIZE+1), witness_program ]
+ tx2.rehash()
+
+ self.update_witness_block_with_transactions(block, [tx, tx2])
+ self.test_node.test_witness_block(block, accepted=False)
+
+ # Now reduce the length of the stack element
+ tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(MAX_SCRIPT_ELEMENT_SIZE)
+
+ add_witness_commitment(block)
+ block.solve()
+ self.test_node.test_witness_block(block, accepted=True)
+
+ # Update the utxo for later tests
+ self.utxo.pop()
+ self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
+
+ def test_max_witness_program_length(self):
+ # Can create witness outputs that are long, but can't be greater than
+ # 10k bytes to successfully spend
+ self.log.info("Testing maximum witness program length")
+ assert(len(self.utxo))
+ MAX_PROGRAM_LENGTH = 10000
+
+ # This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
+ long_witness_program = CScript([b'a'*520]*19 + [OP_DROP]*63 + [OP_TRUE])
+ assert(len(long_witness_program) == MAX_PROGRAM_LENGTH+1)
+ long_witness_hash = sha256(long_witness_program)
+ long_scriptPubKey = CScript([OP_0, long_witness_hash])
+
+ block = self.build_next_block()
+
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
+ tx.vout.append(CTxOut(self.utxo[0].nValue-1000, long_scriptPubKey))
+ tx.rehash()
+
+ tx2 = CTransaction()
+ tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
+ tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE])))
+ tx2.wit.vtxinwit.append(CTxInWitness())
+ tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*44 + [long_witness_program]
+ tx2.rehash()
+
+ self.update_witness_block_with_transactions(block, [tx, tx2])
+
+ self.test_node.test_witness_block(block, accepted=False)
+
+ # Try again with one less byte in the witness program
+ witness_program = CScript([b'a'*520]*19 + [OP_DROP]*62 + [OP_TRUE])
+ assert(len(witness_program) == MAX_PROGRAM_LENGTH)
+ witness_hash = sha256(witness_program)
+ scriptPubKey = CScript([OP_0, witness_hash])
+
+ tx.vout[0] = CTxOut(tx.vout[0].nValue, scriptPubKey)
+ tx.rehash()
+ tx2.vin[0].prevout.hash = tx.sha256
+ tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*43 + [witness_program]
+ tx2.rehash()
+ block.vtx = [block.vtx[0]]
+ self.update_witness_block_with_transactions(block, [tx, tx2])
+ self.test_node.test_witness_block(block, accepted=True)
+
+ self.utxo.pop()
+ self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
+
+
+ def test_witness_input_length(self):
+ ''' Ensure that vin length must match vtxinwit length '''
+ self.log.info("Testing witness input length")
+ assert(len(self.utxo))
+
+ witness_program = CScript([OP_DROP, OP_TRUE])
+ witness_hash = sha256(witness_program)
+ scriptPubKey = CScript([OP_0, witness_hash])
+
+ # Create a transaction that splits our utxo into many outputs
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
+ nValue = self.utxo[0].nValue
+ for i in range(10):
+ tx.vout.append(CTxOut(int(nValue/10), scriptPubKey))
+ tx.vout[0].nValue -= 1000
+ assert(tx.vout[0].nValue >= 0)
+
+ block = self.build_next_block()
+ self.update_witness_block_with_transactions(block, [tx])
+ self.test_node.test_witness_block(block, accepted=True)
+
+ # Try various ways to spend tx that should all break.
+ # This "broken" transaction serializer will not normalize
+ # the length of vtxinwit.
+ class BrokenCTransaction(CTransaction):
+ def serialize_with_witness(self):
+ flags = 0
+ if not self.wit.is_null():
+ flags |= 1
+ r = b""
+ r += struct.pack("<i", self.nVersion)
+ if flags:
+ dummy = []
+ r += ser_vector(dummy)
+ r += struct.pack("<B", flags)
+ r += ser_vector(self.vin)
+ r += ser_vector(self.vout)
+ if flags & 1:
+ r += self.wit.serialize()
+ r += struct.pack("<I", self.nLockTime)
+ return r
+
+ tx2 = BrokenCTransaction()
+ for i in range(10):
+ tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
+ tx2.vout.append(CTxOut(nValue-3000, CScript([OP_TRUE])))
+
+ # First try using a too long vtxinwit
+ for i in range(11):
+ tx2.wit.vtxinwit.append(CTxInWitness())
+ tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
+
+ block = self.build_next_block()
+ self.update_witness_block_with_transactions(block, [tx2])
+ self.test_node.test_witness_block(block, accepted=False)
+
+ # Now try using a too short vtxinwit
+ tx2.wit.vtxinwit.pop()
+ tx2.wit.vtxinwit.pop()
+
+ block.vtx = [block.vtx[0]]
+ self.update_witness_block_with_transactions(block, [tx2])
+ self.test_node.test_witness_block(block, accepted=False)
+
+ # Now make one of the intermediate witnesses be incorrect
+ tx2.wit.vtxinwit.append(CTxInWitness())
+ tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
+ tx2.wit.vtxinwit[5].scriptWitness.stack = [ witness_program ]
+
+ block.vtx = [block.vtx[0]]
+ self.update_witness_block_with_transactions(block, [tx2])
+ self.test_node.test_witness_block(block, accepted=False)
+
+ # Fix the broken witness and the block should be accepted.
+ tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
+ block.vtx = [block.vtx[0]]
+ self.update_witness_block_with_transactions(block, [tx2])
+ self.test_node.test_witness_block(block, accepted=True)
+
+ self.utxo.pop()
+ self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
+
+
+ def test_witness_tx_relay_before_segwit_activation(self):
+ self.log.info("Testing relay of witness transactions")
+ # Generate a transaction that doesn't require a witness, but send it
+ # with a witness. Should be rejected for premature-witness, but should
+ # not be added to recently rejected list.
+ assert(len(self.utxo))
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
+ tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE])))
+ tx.wit.vtxinwit.append(CTxInWitness())
+ tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
+ tx.rehash()
+
+ tx_hash = tx.sha256
+ tx_value = tx.vout[0].nValue
+
+ # Verify that if a peer doesn't set nServices to include NODE_WITNESS,
+ # the getdata is just for the non-witness portion.
+ self.old_node.announce_tx_and_wait_for_getdata(tx)
+ assert(self.old_node.last_getdata.inv[0].type == 1)
+
+ # Since we haven't delivered the tx yet, inv'ing the same tx from
+ # a witness transaction ought not result in a getdata.
+ try:
+ self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2)
+ self.log.error("Error: duplicate tx getdata!")
+ assert(False)
+ except AssertionError as e:
+ pass
+
+ # Delivering this transaction with witness should fail (no matter who
+ # its from)
+ assert_equal(len(self.nodes[0].getrawmempool()), 0)
+ assert_equal(len(self.nodes[1].getrawmempool()), 0)
+ self.old_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
+ self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
+
+ # But eliminating the witness should fix it
+ self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
+
+ # Cleanup: mine the first transaction and update utxo
+ self.nodes[0].generate(1)
+ assert_equal(len(self.nodes[0].getrawmempool()), 0)
+
+ self.utxo.pop(0)
+ self.utxo.append(UTXO(tx_hash, 0, tx_value))
+
+
+ # After segwit activates, verify that mempool:
+ # - rejects transactions with unnecessary/extra witnesses
+ # - accepts transactions with valid witnesses
+ # and that witness transactions are relayed to non-upgraded peers.
+ def test_tx_relay_after_segwit_activation(self):
+ self.log.info("Testing relay of witness transactions")
+ # Generate a transaction that doesn't require a witness, but send it
+ # with a witness. Should be rejected because we can't use a witness
+ # when spending a non-witness output.
+ assert(len(self.utxo))
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
+ tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE])))
+ tx.wit.vtxinwit.append(CTxInWitness())
+ tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
+ tx.rehash()
+
+ tx_hash = tx.sha256
+
+ # Verify that unnecessary witnesses are rejected.
+ self.test_node.announce_tx_and_wait_for_getdata(tx)
+ assert_equal(len(self.nodes[0].getrawmempool()), 0)
+ self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
+
+ # Verify that removing the witness succeeds.
+ self.test_node.announce_tx_and_wait_for_getdata(tx)
+ self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
+
+ # Now try to add extra witness data to a valid witness tx.
+ witness_program = CScript([OP_TRUE])
+ witness_hash = sha256(witness_program)
+ scriptPubKey = CScript([OP_0, witness_hash])
+ tx2 = CTransaction()
+ tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
+ tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptPubKey))
+ tx2.rehash()
+
+ tx3 = CTransaction()
+ tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
+ tx3.wit.vtxinwit.append(CTxInWitness())
+
+ # Add too-large for IsStandard witness and check that it does not enter reject filter
+ p2sh_program = CScript([OP_TRUE])
+ p2sh_pubkey = hash160(p2sh_program)
+ witness_program2 = CScript([b'a'*400000])
+ tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
+ tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
+ tx3.rehash()
+
+ # Node will not be blinded to the transaction
+ self.std_node.announce_tx_and_wait_for_getdata(tx3)
+ self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
+ self.std_node.announce_tx_and_wait_for_getdata(tx3)
+ self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
+
+ # Remove witness stuffing, instead add extra witness push on stack
+ tx3.vout[0] = CTxOut(tx2.vout[0].nValue-1000, CScript([OP_TRUE]))
+ tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program ]
+ tx3.rehash()
+
+ self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
+ self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
+
+ # Get rid of the extra witness, and verify acceptance.
+ tx3.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
+ # Also check that old_node gets a tx announcement, even though this is
+ # a witness transaction.
+ self.old_node.wait_for_inv(CInv(1, tx2.sha256)) # wait until tx2 was inv'ed
+ self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
+ self.old_node.wait_for_inv(CInv(1, tx3.sha256))
+
+ # Test that getrawtransaction returns correct witness information
+ # hash, size, vsize
+ raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
+ assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
+ assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
+ vsize = (len(tx3.serialize_with_witness()) + 3*len(tx3.serialize_without_witness()) + 3) / 4
+ assert_equal(raw_tx["vsize"], vsize)
+ assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
+ assert_equal(raw_tx["vin"][0]["txinwitness"][0], hexlify(witness_program).decode('ascii'))
+ assert(vsize != raw_tx["size"])
+
+ # Cleanup: mine the transactions and update utxo for next test
+ self.nodes[0].generate(1)
+ assert_equal(len(self.nodes[0].getrawmempool()), 0)
+
+ self.utxo.pop(0)
+ self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
+
+
+ # Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG
+ # This is true regardless of segwit activation.
+ # Also test that we don't ask for blocks from unupgraded peers
+ def test_block_relay(self, segwit_activated):
+ self.log.info("Testing block relay")
+
+ blocktype = 2|MSG_WITNESS_FLAG
+
+ # test_node has set NODE_WITNESS, so all getdata requests should be for
+ # witness blocks.
+ # Test announcing a block via inv results in a getdata, and that
+ # announcing a version 4 or random VB block with a header results in a getdata
+ block1 = self.build_next_block()
+ block1.solve()
+
+ self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
+ assert(self.test_node.last_getdata.inv[0].type == blocktype)
+ self.test_node.test_witness_block(block1, True)
+
+ block2 = self.build_next_block(nVersion=4)
+ block2.solve()
+
+ self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
+ assert(self.test_node.last_getdata.inv[0].type == blocktype)
+ self.test_node.test_witness_block(block2, True)
+
+ block3 = self.build_next_block(nVersion=(VB_TOP_BITS | (1<<15)))
+ block3.solve()
+ self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
+ assert(self.test_node.last_getdata.inv[0].type == blocktype)
+ self.test_node.test_witness_block(block3, True)
+
+ # Check that we can getdata for witness blocks or regular blocks,
+ # and the right thing happens.
+ if segwit_activated == False:
+ # Before activation, we should be able to request old blocks with
+ # or without witness, and they should be the same.
+ chain_height = self.nodes[0].getblockcount()
+ # Pick 10 random blocks on main chain, and verify that getdata's
+ # for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
+ all_heights = list(range(chain_height+1))
+ random.shuffle(all_heights)
+ all_heights = all_heights[0:10]
+ for height in all_heights:
+ block_hash = self.nodes[0].getblockhash(height)
+ rpc_block = self.nodes[0].getblock(block_hash, False)
+ block_hash = int(block_hash, 16)
+ block = self.test_node.request_block(block_hash, 2)
+ wit_block = self.test_node.request_block(block_hash, 2|MSG_WITNESS_FLAG)
+ assert_equal(block.serialize(True), wit_block.serialize(True))
+ assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
+ else:
+ # After activation, witness blocks and non-witness blocks should
+ # be different. Verify rpc getblock() returns witness blocks, while
+ # getdata respects the requested type.
+ block = self.build_next_block()
+ self.update_witness_block_with_transactions(block, [])
+ # This gives us a witness commitment.
+ assert(len(block.vtx[0].wit.vtxinwit) == 1)
+ assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1)
+ self.test_node.test_witness_block(block, accepted=True)
+ # Now try to retrieve it...
+ rpc_block = self.nodes[0].getblock(block.hash, False)
+ non_wit_block = self.test_node.request_block(block.sha256, 2)
+ wit_block = self.test_node.request_block(block.sha256, 2|MSG_WITNESS_FLAG)
+ assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block))
+ assert_equal(wit_block.serialize(False), non_wit_block.serialize())
+ assert_equal(wit_block.serialize(True), block.serialize(True))
+
+ # Test size, vsize, weight
+ rpc_details = self.nodes[0].getblock(block.hash, True)
+ assert_equal(rpc_details["size"], len(block.serialize(True)))
+ assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
+ weight = 3*len(block.serialize(False)) + len(block.serialize(True))
+ assert_equal(rpc_details["weight"], weight)
+
+ # Upgraded node should not ask for blocks from unupgraded
+ block4 = self.build_next_block(nVersion=4)
+ block4.solve()
+ self.old_node.getdataset = set()
+ # Blocks can be requested via direct-fetch (immediately upon processing the announcement)
+ # or via parallel download (with an indeterminate delay from processing the announcement)
+ # so to test that a block is NOT requested, we could guess a time period to sleep for,
+ # and then check. We can avoid the sleep() by taking advantage of transaction getdata's
+ # being processed after block getdata's, and announce a transaction as well,
+ # and then check to see if that particular getdata has been received.
+ self.old_node.announce_block(block4, use_header=False)
+ self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
+ assert(block4.sha256 not in self.old_node.getdataset)
+
+ # V0 segwit outputs should be standard after activation, but not before.
+ def test_standardness_v0(self, segwit_activated):
+ self.log.info("Testing standardness of v0 outputs (%s activation)" % ("after" if segwit_activated else "before"))
+ assert(len(self.utxo))
+
+ witness_program = CScript([OP_TRUE])
+ witness_hash = sha256(witness_program)
+ scriptPubKey = CScript([OP_0, witness_hash])
+
+ p2sh_pubkey = hash160(witness_program)
+ p2sh_scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
+
+ # First prepare a p2sh output (so that spending it will pass standardness)
+ p2sh_tx = CTransaction()
+ p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
+ p2sh_tx.vout = [CTxOut(self.utxo[0].nValue-1000, p2sh_scriptPubKey)]
+ p2sh_tx.rehash()
+
+ # Mine it on test_node to create the confirmed output.
+ self.test_node.test_transaction_acceptance(p2sh_tx, with_witness=True, accepted=True)
+ self.nodes[0].generate(1)
+ sync_blocks(self.nodes)
+
+ # Now test standardness of v0 P2WSH outputs.
+ # Start by creating a transaction with two outputs.
+ tx = CTransaction()
+ tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
+ tx.vout = [CTxOut(p2sh_tx.vout[0].nValue-10000, scriptPubKey)]
+ tx.vout.append(CTxOut(8000, scriptPubKey)) # Might burn this later
+ tx.rehash()
+
+ self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=segwit_activated)
+
+ # Now create something that looks like a P2PKH output. This won't be spendable.
+ scriptPubKey = CScript([OP_0, hash160(witness_hash)])
+ tx2 = CTransaction()
+ if segwit_activated:
+ # if tx was accepted, then we spend the second output.
+ tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
+ tx2.vout = [CTxOut(7000, scriptPubKey)]
+ tx2.wit.vtxinwit.append(CTxInWitness())
+ tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
+ else:
+ # if tx wasn't accepted, we just re-spend the p2sh output we started with.
+ tx2.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
+ tx2.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, scriptPubKey)]
+ tx2.rehash()
+
+ self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=segwit_activated)
+
+ # Now update self.utxo for later tests.
+ tx3 = CTransaction()
+ if segwit_activated:
+ # tx and tx2 were both accepted. Don't bother trying to reclaim the
+ # P2PKH output; just send tx's first output back to an anyone-can-spend.
+ sync_mempools([self.nodes[0], self.nodes[1]])
+ tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
+ tx3.vout = [CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE]))]
+ tx3.wit.vtxinwit.append(CTxInWitness())
+ tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
+ tx3.rehash()
+ self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
+ else:
+ # tx and tx2 didn't go anywhere; just clean up the p2sh_tx output.
+ tx3.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
+ tx3.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, witness_program)]
+ tx3.rehash()
+ self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
+
+ self.nodes[0].generate(1)
+ sync_blocks(self.nodes)
+ self.utxo.pop(0)
+ self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
+ assert_equal(len(self.nodes[1].getrawmempool()), 0)
+
+
+ # Verify that future segwit upgraded transactions are non-standard,
+ # but valid in blocks. Can run this before and after segwit activation.
+ def test_segwit_versions(self):
+ self.log.info("Testing standardness/consensus for segwit versions (0-16)")
+ assert(len(self.utxo))
+ NUM_TESTS = 17 # will test OP_0, OP1, ..., OP_16
+ if (len(self.utxo) < NUM_TESTS):
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
+ split_value = (self.utxo[0].nValue - 4000) // NUM_TESTS
+ for i in range(NUM_TESTS):
+ tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
+ tx.rehash()
+ block = self.build_next_block()
+ self.update_witness_block_with_transactions(block, [tx])
+ self.test_node.test_witness_block(block, accepted=True)
+ self.utxo.pop(0)
+ for i in range(NUM_TESTS):
+ self.utxo.append(UTXO(tx.sha256, i, split_value))
+
+ sync_blocks(self.nodes)
+ temp_utxo = []
+ tx = CTransaction()
+ count = 0
+ witness_program = CScript([OP_TRUE])
+ witness_hash = sha256(witness_program)
+ assert_equal(len(self.nodes[1].getrawmempool()), 0)
+ for version in list(range(OP_1, OP_16+1)) + [OP_0]:
+ count += 1
+ # First try to spend to a future version segwit scriptPubKey.
+ scriptPubKey = CScript([CScriptOp(version), witness_hash])
+ tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
+ tx.vout = [CTxOut(self.utxo[0].nValue-1000, scriptPubKey)]
+ tx.rehash()
+ self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
+ self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
+ self.utxo.pop(0)
+ temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
+
+ self.nodes[0].generate(1) # Mine all the transactions
+ sync_blocks(self.nodes)
+ assert(len(self.nodes[0].getrawmempool()) == 0)
+
+ # Finally, verify that version 0 -> version 1 transactions
+ # are non-standard
+ scriptPubKey = CScript([CScriptOp(OP_1), witness_hash])
+ tx2 = CTransaction()
+ tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
+ tx2.vout = [CTxOut(tx.vout[0].nValue-1000, scriptPubKey)]
+ tx2.wit.vtxinwit.append(CTxInWitness())
+ tx2.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
+ tx2.rehash()
+ # Gets accepted to test_node, because standardness of outputs isn't
+ # checked with fRequireStandard
+ self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
+ self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=False)
+ temp_utxo.pop() # last entry in temp_utxo was the output we just spent
+ temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
+
+ # Spend everything in temp_utxo back to an OP_TRUE output.
+ tx3 = CTransaction()
+ total_value = 0
+ for i in temp_utxo:
+ tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
+ tx3.wit.vtxinwit.append(CTxInWitness())
+ total_value += i.nValue
+ tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
+ tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE])))
+ tx3.rehash()
+ # Spending a higher version witness output is not allowed by policy,
+ # even with fRequireStandard=false.
+ self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
+ self.test_node.sync_with_ping()
+ with mininode_lock:
+ assert(b"reserved for soft-fork upgrades" in self.test_node.last_reject.reason)
+
+ # Building a block with the transaction must be valid, however.
+ block = self.build_next_block()
+ self.update_witness_block_with_transactions(block, [tx2, tx3])
+ self.test_node.test_witness_block(block, accepted=True)
+ sync_blocks(self.nodes)
+
+ # Add utxo to our list
+ self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
+
+
+ def test_premature_coinbase_witness_spend(self):
+ self.log.info("Testing premature coinbase witness spend")
+ block = self.build_next_block()
+ # Change the output of the block to be a witness output.
+ witness_program = CScript([OP_TRUE])
+ witness_hash = sha256(witness_program)
+ scriptPubKey = CScript([OP_0, witness_hash])
+ block.vtx[0].vout[0].scriptPubKey = scriptPubKey
+ # This next line will rehash the coinbase and update the merkle
+ # root, and solve.
+ self.update_witness_block_with_transactions(block, [])
+ self.test_node.test_witness_block(block, accepted=True)
+
+ spend_tx = CTransaction()
+ spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
+ spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
+ spend_tx.wit.vtxinwit.append(CTxInWitness())
+ spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
+ spend_tx.rehash()
+
+ # Now test a premature spend.
+ self.nodes[0].generate(98)
+ sync_blocks(self.nodes)
+ block2 = self.build_next_block()
+ self.update_witness_block_with_transactions(block2, [spend_tx])
+ self.test_node.test_witness_block(block2, accepted=False)
+
+ # Advancing one more block should allow the spend.
+ self.nodes[0].generate(1)
+ block2 = self.build_next_block()
+ self.update_witness_block_with_transactions(block2, [spend_tx])
+ self.test_node.test_witness_block(block2, accepted=True)
+ sync_blocks(self.nodes)
+
+
+ def test_signature_version_1(self):
+ self.log.info("Testing segwit signature hash version 1")
+ key = CECKey()
+ key.set_secretbytes(b"9")
+ pubkey = CPubKey(key.get_pubkey())
+
+ witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
+ witness_hash = sha256(witness_program)
+ scriptPubKey = CScript([OP_0, witness_hash])
+
+ # First create a witness output for use in the tests.
+ assert(len(self.utxo))
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
+ tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
+ tx.rehash()
+
+ self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
+ # Mine this transaction in preparation for following tests.
+ block = self.build_next_block()
+ self.update_witness_block_with_transactions(block, [tx])
+ self.test_node.test_witness_block(block, accepted=True)
+ sync_blocks(self.nodes)
+ self.utxo.pop(0)
+
+ # Test each hashtype
+ prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
+ for sigflag in [ 0, SIGHASH_ANYONECANPAY ]:
+ for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
+ hashtype |= sigflag
+ block = self.build_next_block()
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
+ tx.vout.append(CTxOut(prev_utxo.nValue - 1000, scriptPubKey))
+ tx.wit.vtxinwit.append(CTxInWitness())
+ # Too-large input value
+ sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue+1, key)
+ self.update_witness_block_with_transactions(block, [tx])
+ self.test_node.test_witness_block(block, accepted=False)
+
+ # Too-small input value
+ sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue-1, key)
+ block.vtx.pop() # remove last tx
+ self.update_witness_block_with_transactions(block, [tx])
+ self.test_node.test_witness_block(block, accepted=False)
+
+ # Now try correct value
+ sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
+ block.vtx.pop()
+ self.update_witness_block_with_transactions(block, [tx])
+ self.test_node.test_witness_block(block, accepted=True)
+
+ prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
+
+ # Test combinations of signature hashes.
+ # Split the utxo into a lot of outputs.
+ # Randomly choose up to 10 to spend, sign with different hashtypes, and
+ # output to a random number of outputs. Repeat NUM_TESTS times.
+ # Ensure that we've tested a situation where we use SIGHASH_SINGLE with
+ # an input index > number of outputs.
+ NUM_TESTS = 500
+ temp_utxos = []
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
+ split_value = prev_utxo.nValue // NUM_TESTS
+ for i in range(NUM_TESTS):
+ tx.vout.append(CTxOut(split_value, scriptPubKey))
+ tx.wit.vtxinwit.append(CTxInWitness())
+ sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
+ for i in range(NUM_TESTS):
+ temp_utxos.append(UTXO(tx.sha256, i, split_value))
+
+ block = self.build_next_block()
+ self.update_witness_block_with_transactions(block, [tx])
+ self.test_node.test_witness_block(block, accepted=True)
+
+ block = self.build_next_block()
+ used_sighash_single_out_of_bounds = False
+ for i in range(NUM_TESTS):
+ # Ping regularly to keep the connection alive
+ if (not i % 100):
+ self.test_node.sync_with_ping()
+ # Choose random number of inputs to use.
+ num_inputs = random.randint(1, 10)
+ # Create a slight bias for producing more utxos
+ num_outputs = random.randint(1, 11)
+ random.shuffle(temp_utxos)
+ assert(len(temp_utxos) > num_inputs)
+ tx = CTransaction()
+ total_value = 0
+ for i in range(num_inputs):
+ tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
+ tx.wit.vtxinwit.append(CTxInWitness())
+ total_value += temp_utxos[i].nValue
+ split_value = total_value // num_outputs
+ for i in range(num_outputs):
+ tx.vout.append(CTxOut(split_value, scriptPubKey))
+ for i in range(num_inputs):
+ # Now try to sign each input, using a random hashtype.
+ anyonecanpay = 0
+ if random.randint(0, 1):
+ anyonecanpay = SIGHASH_ANYONECANPAY
+ hashtype = random.randint(1, 3) | anyonecanpay
+ sign_P2PK_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
+ if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
+ used_sighash_single_out_of_bounds = True
+ tx.rehash()
+ for i in range(num_outputs):
+ temp_utxos.append(UTXO(tx.sha256, i, split_value))
+ temp_utxos = temp_utxos[num_inputs:]
+
+ block.vtx.append(tx)
+
+ # Test the block periodically, if we're close to maxblocksize
+ if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
+ self.update_witness_block_with_transactions(block, [])
+ self.test_node.test_witness_block(block, accepted=True)
+ block = self.build_next_block()
+
+ if (not used_sighash_single_out_of_bounds):
+ self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
+ # Test the transactions we've added to the block
+ if (len(block.vtx) > 1):
+ self.update_witness_block_with_transactions(block, [])
+ self.test_node.test_witness_block(block, accepted=True)
+
+ # Now test witness version 0 P2PKH transactions
+ pubkeyhash = hash160(pubkey)
+ scriptPKH = CScript([OP_0, pubkeyhash])
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
+ tx.vout.append(CTxOut(temp_utxos[0].nValue, scriptPKH))
+ tx.wit.vtxinwit.append(CTxInWitness())
+ sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
+ tx2 = CTransaction()
+ tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
+ tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
+
+ script = GetP2PKHScript(pubkeyhash)
+ sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
+ signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
+
+ # Check that we can't have a scriptSig
+ tx2.vin[0].scriptSig = CScript([signature, pubkey])
+ block = self.build_next_block()
+ self.update_witness_block_with_transactions(block, [tx, tx2])
+ self.test_node.test_witness_block(block, accepted=False)
+
+ # Move the signature to the witness.
+ block.vtx.pop()
+ tx2.wit.vtxinwit.append(CTxInWitness())
+ tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
+ tx2.vin[0].scriptSig = b""
+ tx2.rehash()
+
+ self.update_witness_block_with_transactions(block, [tx2])
+ self.test_node.test_witness_block(block, accepted=True)
+
+ temp_utxos.pop(0)
+
+ # Update self.utxos for later tests. Just spend everything in
+ # temp_utxos to a corresponding entry in self.utxos
+ tx = CTransaction()
+ index = 0
+ for i in temp_utxos:
+ # Just spend to our usual anyone-can-spend output
+ # Use SIGHASH_SINGLE|SIGHASH_ANYONECANPAY so we can build up
+ # the signatures as we go.
+ tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
+ tx.vout.append(CTxOut(i.nValue, CScript([OP_TRUE])))
+ tx.wit.vtxinwit.append(CTxInWitness())
+ sign_P2PK_witness_input(witness_program, tx, index, SIGHASH_SINGLE|SIGHASH_ANYONECANPAY, i.nValue, key)
+ index += 1
+ block = self.build_next_block()
+ self.update_witness_block_with_transactions(block, [tx])
+ self.test_node.test_witness_block(block, accepted=True)
+
+ for i in range(len(tx.vout)):
+ self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
+
+
+ # Test P2SH wrapped witness programs.
+ def test_p2sh_witness(self, segwit_activated):
+ self.log.info("Testing P2SH witness transactions")
+
+ assert(len(self.utxo))
+
+ # Prepare the p2sh-wrapped witness output
+ witness_program = CScript([OP_DROP, OP_TRUE])
+ witness_hash = sha256(witness_program)
+ p2wsh_pubkey = CScript([OP_0, witness_hash])
+ p2sh_witness_hash = hash160(p2wsh_pubkey)
+ scriptPubKey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
+ scriptSig = CScript([p2wsh_pubkey]) # a push of the redeem script
+
+ # Fund the P2SH output
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
+ tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
+ tx.rehash()
+
+ # Verify mempool acceptance and block validity
+ self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
+ block = self.build_next_block()
+ self.update_witness_block_with_transactions(block, [tx])
+ self.test_node.test_witness_block(block, accepted=True, with_witness=segwit_activated)
+ sync_blocks(self.nodes)
+
+ # Now test attempts to spend the output.
+ spend_tx = CTransaction()
+ spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), scriptSig))
+ spend_tx.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE])))
+ spend_tx.rehash()
+
+ # This transaction should not be accepted into the mempool pre- or
+ # post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
+ # will require a witness to spend a witness program regardless of
+ # segwit activation. Note that older bitcoind's that are not
+ # segwit-aware would also reject this for failing CLEANSTACK.
+ self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
+
+ # Try to put the witness script in the scriptSig, should also fail.
+ spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
+ spend_tx.rehash()
+ self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
+
+ # Now put the witness script in the witness, should succeed after
+ # segwit activates.
+ spend_tx.vin[0].scriptSig = scriptSig
+ spend_tx.rehash()
+ spend_tx.wit.vtxinwit.append(CTxInWitness())
+ spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a', witness_program ]
+
+ # Verify mempool acceptance
+ self.test_node.test_transaction_acceptance(spend_tx, with_witness=True, accepted=segwit_activated)
+ block = self.build_next_block()
+ self.update_witness_block_with_transactions(block, [spend_tx])
+
+ # If we're before activation, then sending this without witnesses
+ # should be valid. If we're after activation, then sending this with
+ # witnesses should be valid.
+ if segwit_activated:
+ self.test_node.test_witness_block(block, accepted=True)
+ else:
+ self.test_node.test_witness_block(block, accepted=True, with_witness=False)
+
+ # Update self.utxo
+ self.utxo.pop(0)
+ self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
+
+ # Test the behavior of starting up a segwit-aware node after the softfork
+ # has activated. As segwit requires different block data than pre-segwit
+ # nodes would have stored, this requires special handling.
+ # To enable this test, pass --oldbinary=<path-to-pre-segwit-bitcoind> to
+ # the test.
+ def test_upgrade_after_activation(self, node, node_id):
+ self.log.info("Testing software upgrade after softfork activation")
+
+ assert(node_id != 0) # node0 is assumed to be a segwit-active bitcoind
+
+ # Make sure the nodes are all up
+ sync_blocks(self.nodes)
+
+ # Restart with the new binary
+ stop_node(node, node_id)
+ self.nodes[node_id] = start_node(node_id, self.options.tmpdir)
+ connect_nodes(self.nodes[0], node_id)
+
+ sync_blocks(self.nodes)
+
+ # Make sure that this peer thinks segwit has activated.
+ assert(get_bip9_status(node, 'segwit')['status'] == "active")
+
+ # Make sure this peers blocks match those of node0.
+ height = node.getblockcount()
+ while height >= 0:
+ block_hash = node.getblockhash(height)
+ assert_equal(block_hash, self.nodes[0].getblockhash(height))
+ assert_equal(self.nodes[0].getblock(block_hash), node.getblock(block_hash))
+ height -= 1
+
+
+ def test_witness_sigops(self):
+ '''Ensure sigop counting is correct inside witnesses.'''
+ self.log.info("Testing sigops limit")
+
+ assert(len(self.utxo))
+
+ # Keep this under MAX_OPS_PER_SCRIPT (201)
+ witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG]*5 + [OP_CHECKSIG]*193 + [OP_ENDIF])
+ witness_hash = sha256(witness_program)
+ scriptPubKey = CScript([OP_0, witness_hash])
+
+ sigops_per_script = 20*5 + 193*1
+ # We'll produce 2 extra outputs, one with a program that would take us
+ # over max sig ops, and one with a program that would exactly reach max
+ # sig ops
+ outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
+ extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
+
+ # We chose the number of checkmultisigs/checksigs to make this work:
+ assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT
+
+ # This script, when spent with the first
+ # N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
+ # would push us just over the block sigop limit.
+ witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available + 1) + [OP_ENDIF])
+ witness_hash_toomany = sha256(witness_program_toomany)
+ scriptPubKey_toomany = CScript([OP_0, witness_hash_toomany])
+
+ # If we spend this script instead, we would exactly reach our sigop
+ # limit (for witness sigops).
+ witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available) + [OP_ENDIF])
+ witness_hash_justright = sha256(witness_program_justright)
+ scriptPubKey_justright = CScript([OP_0, witness_hash_justright])
+
+ # First split our available utxo into a bunch of outputs
+ split_value = self.utxo[0].nValue // outputs
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
+ for i in range(outputs):
+ tx.vout.append(CTxOut(split_value, scriptPubKey))
+ tx.vout[-2].scriptPubKey = scriptPubKey_toomany
+ tx.vout[-1].scriptPubKey = scriptPubKey_justright
+ tx.rehash()
+
+ block_1 = self.build_next_block()
+ self.update_witness_block_with_transactions(block_1, [tx])
+ self.test_node.test_witness_block(block_1, accepted=True)
+
+ tx2 = CTransaction()
+ # If we try to spend the first n-1 outputs from tx, that should be
+ # too many sigops.
+ total_value = 0
+ for i in range(outputs-1):
+ tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
+ tx2.wit.vtxinwit.append(CTxInWitness())
+ tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program ]
+ total_value += tx.vout[i].nValue
+ tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_toomany ]
+ tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
+ tx2.rehash()
+
+ block_2 = self.build_next_block()
+ self.update_witness_block_with_transactions(block_2, [tx2])
+ self.test_node.test_witness_block(block_2, accepted=False)
+
+ # Try dropping the last input in tx2, and add an output that has
+ # too many sigops (contributing to legacy sigop count).
+ checksig_count = (extra_sigops_available // 4) + 1
+ scriptPubKey_checksigs = CScript([OP_CHECKSIG]*checksig_count)
+ tx2.vout.append(CTxOut(0, scriptPubKey_checksigs))
+ tx2.vin.pop()
+ tx2.wit.vtxinwit.pop()
+ tx2.vout[0].nValue -= tx.vout[-2].nValue
+ tx2.rehash()
+ block_3 = self.build_next_block()
+ self.update_witness_block_with_transactions(block_3, [tx2])
+ self.test_node.test_witness_block(block_3, accepted=False)
+
+ # If we drop the last checksig in this output, the tx should succeed.
+ block_4 = self.build_next_block()
+ tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG]*(checksig_count-1))
+ tx2.rehash()
+ self.update_witness_block_with_transactions(block_4, [tx2])
+ self.test_node.test_witness_block(block_4, accepted=True)
+
+ # Reset the tip back down for the next test
+ sync_blocks(self.nodes)
+ for x in self.nodes:
+ x.invalidateblock(block_4.hash)
+
+ # Try replacing the last input of tx2 to be spending the last
+ # output of tx
+ block_5 = self.build_next_block()
+ tx2.vout.pop()
+ tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs-1), b""))
+ tx2.wit.vtxinwit.append(CTxInWitness())
+ tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_justright ]
+ tx2.rehash()
+ self.update_witness_block_with_transactions(block_5, [tx2])
+ self.test_node.test_witness_block(block_5, accepted=True)
+
+ # TODO: test p2sh sigop counting
+
+ def test_getblocktemplate_before_lockin(self):
+ self.log.info("Testing getblocktemplate setting of segwit versionbit (before lockin)")
+ # Node0 is segwit aware, node2 is not.
+ for node in [self.nodes[0], self.nodes[2]]:
+ gbt_results = node.getblocktemplate()
+ block_version = gbt_results['version']
+ # If we're not indicating segwit support, we will still be
+ # signalling for segwit activation.
+ assert_equal((block_version & (1 << VB_WITNESS_BIT) != 0), node == self.nodes[0])
+ # If we don't specify the segwit rule, then we won't get a default
+ # commitment.
+ assert('default_witness_commitment' not in gbt_results)
+
+ # Workaround:
+ # Can either change the tip, or change the mempool and wait 5 seconds
+ # to trigger a recomputation of getblocktemplate.
+ txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
+ # Using mocktime lets us avoid sleep()
+ sync_mempools(self.nodes)
+ self.nodes[0].setmocktime(int(time.time())+10)
+ self.nodes[2].setmocktime(int(time.time())+10)
+
+ for node in [self.nodes[0], self.nodes[2]]:
+ gbt_results = node.getblocktemplate({"rules" : ["segwit"]})
+ block_version = gbt_results['version']
+ if node == self.nodes[2]:
+ # If this is a non-segwit node, we should still not get a witness
+ # commitment, nor a version bit signalling segwit.
+ assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
+ assert('default_witness_commitment' not in gbt_results)
+ else:
+ # For segwit-aware nodes, check the version bit and the witness
+ # commitment are correct.
+ assert(block_version & (1 << VB_WITNESS_BIT) != 0)
+ assert('default_witness_commitment' in gbt_results)
+ witness_commitment = gbt_results['default_witness_commitment']
+
+ # TODO: this duplicates some code from blocktools.py, would be nice
+ # to refactor.
+ # Check that default_witness_commitment is present.
+ block = CBlock()
+ witness_root = block.get_merkle_root([ser_uint256(0), ser_uint256(txid)])
+ check_commitment = uint256_from_str(hash256(ser_uint256(witness_root)+ser_uint256(0)))
+ from test_framework.blocktools import WITNESS_COMMITMENT_HEADER
+ output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(check_commitment)
+ script = CScript([OP_RETURN, output_data])
+ assert_equal(witness_commitment, bytes_to_hex_str(script))
+
+ # undo mocktime
+ self.nodes[0].setmocktime(0)
+ self.nodes[2].setmocktime(0)
+
+ # Uncompressed pubkeys are no longer supported in default relay policy,
+ # but (for now) are still valid in blocks.
+ def test_uncompressed_pubkey(self):
+ self.log.info("Testing uncompressed pubkeys")
+ # Segwit transactions using uncompressed pubkeys are not accepted
+ # under default policy, but should still pass consensus.
+ key = CECKey()
+ key.set_secretbytes(b"9")
+ key.set_compressed(False)
+ pubkey = CPubKey(key.get_pubkey())
+ assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
+
+ assert(len(self.utxo) > 0)
+ utxo = self.utxo.pop(0)
+
+ # Test 1: P2WPKH
+ # First create a P2WPKH output that uses an uncompressed pubkey
+ pubkeyhash = hash160(pubkey)
+ scriptPKH = CScript([OP_0, pubkeyhash])
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
+ tx.vout.append(CTxOut(utxo.nValue-1000, scriptPKH))
+ tx.rehash()
+
+ # Confirm it in a block.
+ block = self.build_next_block()
+ self.update_witness_block_with_transactions(block, [tx])
+ self.test_node.test_witness_block(block, accepted=True)
+
+ # Now try to spend it. Send it to a P2WSH output, which we'll
+ # use in the next test.
+ witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
+ witness_hash = sha256(witness_program)
+ scriptWSH = CScript([OP_0, witness_hash])
+
+ tx2 = CTransaction()
+ tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
+ tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptWSH))
+ script = GetP2PKHScript(pubkeyhash)
+ sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
+ signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
+ tx2.wit.vtxinwit.append(CTxInWitness())
+ tx2.wit.vtxinwit[0].scriptWitness.stack = [ signature, pubkey ]
+ tx2.rehash()
+
+ # Should fail policy test.
+ self.test_node.test_transaction_acceptance(tx2, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
+ # But passes consensus.
+ block = self.build_next_block()
+ self.update_witness_block_with_transactions(block, [tx2])
+ self.test_node.test_witness_block(block, accepted=True)
+
+ # Test 2: P2WSH
+ # Try to spend the P2WSH output created in last test.
+ # Send it to a P2SH(P2WSH) output, which we'll use in the next test.
+ p2sh_witness_hash = hash160(scriptWSH)
+ scriptP2SH = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
+ scriptSig = CScript([scriptWSH])
+
+ tx3 = CTransaction()
+ tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
+ tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptP2SH))
+ tx3.wit.vtxinwit.append(CTxInWitness())
+ sign_P2PK_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
+
+ # Should fail policy test.
+ self.test_node.test_transaction_acceptance(tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
+ # But passes consensus.
+ block = self.build_next_block()
+ self.update_witness_block_with_transactions(block, [tx3])
+ self.test_node.test_witness_block(block, accepted=True)
+
+ # Test 3: P2SH(P2WSH)
+ # Try to spend the P2SH output created in the last test.
+ # Send it to a P2PKH output, which we'll use in the next test.
+ scriptPubKey = GetP2PKHScript(pubkeyhash)
+ tx4 = CTransaction()
+ tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), scriptSig))
+ tx4.vout.append(CTxOut(tx3.vout[0].nValue-1000, scriptPubKey))
+ tx4.wit.vtxinwit.append(CTxInWitness())
+ sign_P2PK_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
+
+ # Should fail policy test.
+ self.test_node.test_transaction_acceptance(tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
+ block = self.build_next_block()
+ self.update_witness_block_with_transactions(block, [tx4])
+ self.test_node.test_witness_block(block, accepted=True)
+
+ # Test 4: Uncompressed pubkeys should still be valid in non-segwit
+ # transactions.
+ tx5 = CTransaction()
+ tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
+ tx5.vout.append(CTxOut(tx4.vout[0].nValue-1000, CScript([OP_TRUE])))
+ (sig_hash, err) = SignatureHash(scriptPubKey, tx5, 0, SIGHASH_ALL)
+ signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
+ tx5.vin[0].scriptSig = CScript([signature, pubkey])
+ tx5.rehash()
+ # Should pass policy and consensus.
+ self.test_node.test_transaction_acceptance(tx5, True, True)
+ block = self.build_next_block()
+ self.update_witness_block_with_transactions(block, [tx5])
+ self.test_node.test_witness_block(block, accepted=True)
+ self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
+
+ def test_non_standard_witness(self):
+ self.log.info("Testing detection of non-standard P2WSH witness")
+ pad = chr(1).encode('latin-1')
+
+ # Create scripts for tests
+ scripts = []
+ scripts.append(CScript([OP_DROP] * 100))
+ scripts.append(CScript([OP_DROP] * 99))
+ scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
+ scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
+
+ p2wsh_scripts = []
+
+ assert(len(self.utxo))
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
+
+ # For each script, generate a pair of P2WSH and P2SH-P2WSH output.
+ outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2)
+ for i in scripts:
+ p2wsh = CScript([OP_0, sha256(i)])
+ p2sh = hash160(p2wsh)
+ p2wsh_scripts.append(p2wsh)
+ tx.vout.append(CTxOut(outputvalue, p2wsh))
+ tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
+ tx.rehash()
+ txid = tx.sha256
+ self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
+
+ self.nodes[0].generate(1)
+ sync_blocks(self.nodes)
+
+ # Creating transactions for tests
+ p2wsh_txs = []
+ p2sh_txs = []
+ for i in range(len(scripts)):
+ p2wsh_tx = CTransaction()
+ p2wsh_tx.vin.append(CTxIn(COutPoint(txid,i*2)))
+ p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
+ p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
+ p2wsh_tx.rehash()
+ p2wsh_txs.append(p2wsh_tx)
+ p2sh_tx = CTransaction()
+ p2sh_tx.vin.append(CTxIn(COutPoint(txid,i*2+1), CScript([p2wsh_scripts[i]])))
+ p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
+ p2sh_tx.wit.vtxinwit.append(CTxInWitness())
+ p2sh_tx.rehash()
+ p2sh_txs.append(p2sh_tx)
+
+ # Testing native P2WSH
+ # Witness stack size, excluding witnessScript, over 100 is non-standard
+ p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
+ self.std_node.test_transaction_acceptance(p2wsh_txs[0], True, False, b'bad-witness-nonstandard')
+ # Non-standard nodes should accept
+ self.test_node.test_transaction_acceptance(p2wsh_txs[0], True, True)
+
+ # Stack element size over 80 bytes is non-standard
+ p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
+ self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, False, b'bad-witness-nonstandard')
+ # Non-standard nodes should accept
+ self.test_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
+ # Standard nodes should accept if element size is not over 80 bytes
+ p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
+ self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
+
+ # witnessScript size at 3600 bytes is standard
+ p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
+ self.test_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
+ self.std_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
+
+ # witnessScript size at 3601 bytes is non-standard
+ p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
+ self.std_node.test_transaction_acceptance(p2wsh_txs[3], True, False, b'bad-witness-nonstandard')
+ # Non-standard nodes should accept
+ self.test_node.test_transaction_acceptance(p2wsh_txs[3], True, True)
+
+ # Repeating the same tests with P2SH-P2WSH
+ p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
+ self.std_node.test_transaction_acceptance(p2sh_txs[0], True, False, b'bad-witness-nonstandard')
+ self.test_node.test_transaction_acceptance(p2sh_txs[0], True, True)
+ p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
+ self.std_node.test_transaction_acceptance(p2sh_txs[1], True, False, b'bad-witness-nonstandard')
+ self.test_node.test_transaction_acceptance(p2sh_txs[1], True, True)
+ p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
+ self.std_node.test_transaction_acceptance(p2sh_txs[1], True, True)
+ p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
+ self.test_node.test_transaction_acceptance(p2sh_txs[2], True, True)
+ self.std_node.test_transaction_acceptance(p2sh_txs[2], True, True)
+ p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
+ self.std_node.test_transaction_acceptance(p2sh_txs[3], True, False, b'bad-witness-nonstandard')
+ self.test_node.test_transaction_acceptance(p2sh_txs[3], True, True)
+
+ self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
+ # Valid but non-standard transactions in a block should be accepted by standard node
+ sync_blocks(self.nodes)
+ assert_equal(len(self.nodes[0].getrawmempool()), 0)
+ assert_equal(len(self.nodes[1].getrawmempool()), 0)
+
+ self.utxo.pop(0)
+
+
+ def run_test(self):
+ # Setup the p2p connections and start up the network thread.
+ self.test_node = TestNode() # sets NODE_WITNESS|NODE_NETWORK
+ self.old_node = TestNode() # only NODE_NETWORK
+ self.std_node = TestNode() # for testing node1 (fRequireStandard=true)
+
+ self.p2p_connections = [self.test_node, self.old_node]
+
+ self.connections = []
+ self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node, services=NODE_NETWORK|NODE_WITNESS))
+ self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.old_node, services=NODE_NETWORK))
+ self.connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], self.std_node, services=NODE_NETWORK|NODE_WITNESS))
+ self.test_node.add_connection(self.connections[0])
+ self.old_node.add_connection(self.connections[1])
+ self.std_node.add_connection(self.connections[2])
+
+ NetworkThread().start() # Start up network handling in another thread
+
+ # Keep a place to store utxo's that can be used in later tests
+ self.utxo = []
+
+ # Test logic begins here
+ self.test_node.wait_for_verack()
+
+ self.log.info("Starting tests before segwit lock in:")
+
+ self.test_witness_services() # Verifies NODE_WITNESS
+ self.test_non_witness_transaction() # non-witness tx's are accepted
+ self.test_unnecessary_witness_before_segwit_activation()
+ self.test_block_relay(segwit_activated=False)
+
+ # Advance to segwit being 'started'
+ self.advance_to_segwit_started()
+ sync_blocks(self.nodes)
+ self.test_getblocktemplate_before_lockin()
+
+ sync_blocks(self.nodes)
+
+ # At lockin, nothing should change.
+ self.log.info("Testing behavior post lockin, pre-activation")
+ self.advance_to_segwit_lockin()
+
+ # Retest unnecessary witnesses
+ self.test_unnecessary_witness_before_segwit_activation()
+ self.test_witness_tx_relay_before_segwit_activation()
+ self.test_block_relay(segwit_activated=False)
+ self.test_p2sh_witness(segwit_activated=False)
+ self.test_standardness_v0(segwit_activated=False)
+
+ sync_blocks(self.nodes)
+
+ # Now activate segwit
+ self.log.info("Testing behavior after segwit activation")
+ self.advance_to_segwit_active()
+
+ sync_blocks(self.nodes)
+
+ # Test P2SH witness handling again
+ self.test_p2sh_witness(segwit_activated=True)
+ self.test_witness_commitments()
+ self.test_block_malleability()
+ self.test_witness_block_size()
+ self.test_submit_block()
+ self.test_extra_witness_data()
+ self.test_max_witness_push_length()
+ self.test_max_witness_program_length()
+ self.test_witness_input_length()
+ self.test_block_relay(segwit_activated=True)
+ self.test_tx_relay_after_segwit_activation()
+ self.test_standardness_v0(segwit_activated=True)
+ self.test_segwit_versions()
+ self.test_premature_coinbase_witness_spend()
+ self.test_uncompressed_pubkey()
+ self.test_signature_version_1()
+ self.test_non_standard_witness()
+ sync_blocks(self.nodes)
+ self.test_upgrade_after_activation(self.nodes[2], 2)
+ self.test_witness_sigops()
+
+
+if __name__ == '__main__':
+ SegWitTest().main()
diff --git a/test/functional/p2p-timeouts.py b/test/functional/p2p-timeouts.py
new file mode 100755
index 0000000000..498acb23fe
--- /dev/null
+++ b/test/functional/p2p-timeouts.py
@@ -0,0 +1,102 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test various net timeouts.
+
+- Create three bitcoind nodes:
+
+ no_verack_node - we never send a verack in response to their version
+ no_version_node - we never send a version (only a ping)
+ no_send_node - we never send any P2P message.
+
+- Start all three nodes
+- Wait 1 second
+- Assert that we're connected
+- Send a ping to no_verack_node and no_version_node
+- Wait 30 seconds
+- Assert that we're still connected
+- Send a ping to no_verack_node and no_version_node
+- Wait 31 seconds
+- Assert that we're no longer connected (timeout to receive version/verack is 60 seconds)
+"""
+
+from time import sleep
+
+from test_framework.mininode import *
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+class TestNode(SingleNodeConnCB):
+ def __init__(self):
+ SingleNodeConnCB.__init__(self)
+ self.connected = False
+ self.received_version = False
+
+ def on_open(self, conn):
+ self.connected = True
+
+ def on_close(self, conn):
+ self.connected = False
+
+ def on_version(self, conn, message):
+ # Don't send a verack in response
+ self.received_version = True
+
+class TimeoutsTest(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+
+ def setup_network(self):
+ self.nodes = []
+
+ # Start up node0 to be a version 1, pre-segwit node.
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
+
+ def run_test(self):
+ # Setup the p2p connections and start up the network thread.
+ self.no_verack_node = TestNode() # never send verack
+ self.no_version_node = TestNode() # never send version (just ping)
+ self.no_send_node = TestNode() # never send anything
+
+ connections = []
+ connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_verack_node))
+ connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_version_node, send_version=False))
+ connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_send_node, send_version=False))
+ self.no_verack_node.add_connection(connections[0])
+ self.no_version_node.add_connection(connections[1])
+ self.no_send_node.add_connection(connections[2])
+
+ NetworkThread().start() # Start up network handling in another thread
+
+ sleep(1)
+
+ assert(self.no_verack_node.connected)
+ assert(self.no_version_node.connected)
+ assert(self.no_send_node.connected)
+
+ ping_msg = msg_ping()
+ connections[0].send_message(ping_msg)
+ connections[1].send_message(ping_msg)
+
+ sleep(30)
+
+ assert(self.no_verack_node.received_version)
+
+ assert(self.no_verack_node.connected)
+ assert(self.no_version_node.connected)
+ assert(self.no_send_node.connected)
+
+ connections[0].send_message(ping_msg)
+ connections[1].send_message(ping_msg)
+
+ sleep(31)
+
+ assert(not self.no_verack_node.connected)
+ assert(not self.no_version_node.connected)
+ assert(not self.no_send_node.connected)
+
+if __name__ == '__main__':
+ TimeoutsTest().main()
diff --git a/test/functional/p2p-versionbits-warning.py b/test/functional/p2p-versionbits-warning.py
new file mode 100755
index 0000000000..dc714e9a4a
--- /dev/null
+++ b/test/functional/p2p-versionbits-warning.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test version bits warning system.
+
+Generate chains with block versions that appear to be signalling unknown
+soft-forks, and test that warning alerts are generated.
+"""
+
+from test_framework.mininode import *
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+import re
+import time
+from test_framework.blocktools import create_block, create_coinbase
+
+VB_PERIOD = 144 # versionbits period length for regtest
+VB_THRESHOLD = 108 # versionbits activation threshold for regtest
+VB_TOP_BITS = 0x20000000
+VB_UNKNOWN_BIT = 27 # Choose a bit unassigned to any deployment
+
+WARN_UNKNOWN_RULES_MINED = "Unknown block versions being mined! It's possible unknown rules are in effect"
+WARN_UNKNOWN_RULES_ACTIVE = "unknown new rules activated (versionbit {})".format(VB_UNKNOWN_BIT)
+VB_PATTERN = re.compile("^Warning.*versionbit")
+
+# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
+# p2p messages to a node, generating the messages in the main testing logic.
+class TestNode(NodeConnCB):
+ def __init__(self):
+ NodeConnCB.__init__(self)
+ self.connection = None
+ self.ping_counter = 1
+ self.last_pong = msg_pong()
+
+ def add_connection(self, conn):
+ self.connection = conn
+
+ def on_inv(self, conn, message):
+ pass
+
+ # Wrapper for the NodeConn's send_message function
+ def send_message(self, message):
+ self.connection.send_message(message)
+
+ def on_pong(self, conn, message):
+ self.last_pong = message
+
+ # Sync up with the node after delivery of a block
+ def sync_with_ping(self, timeout=30):
+ self.connection.send_message(msg_ping(nonce=self.ping_counter))
+ received_pong = False
+ sleep_time = 0.05
+ while not received_pong and timeout > 0:
+ time.sleep(sleep_time)
+ timeout -= sleep_time
+ with mininode_lock:
+ if self.last_pong.nonce == self.ping_counter:
+ received_pong = True
+ self.ping_counter += 1
+ return received_pong
+
+
+class VersionBitsWarningTest(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+
+ def setup_network(self):
+ self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
+ # Open and close to create zero-length file
+ with open(self.alert_filename, 'w', encoding='utf8') as _:
+ pass
+ self.extra_args = [["-alertnotify=echo %s >> \"" + self.alert_filename + "\""]]
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
+
+ # Send numblocks blocks via peer with nVersionToUse set.
+ def send_blocks_with_version(self, peer, numblocks, nVersionToUse):
+ tip = self.nodes[0].getbestblockhash()
+ height = self.nodes[0].getblockcount()
+ block_time = self.nodes[0].getblockheader(tip)["time"]+1
+ tip = int(tip, 16)
+
+ for _ in range(numblocks):
+ block = create_block(tip, create_coinbase(height+1), block_time)
+ block.nVersion = nVersionToUse
+ block.solve()
+ peer.send_message(msg_block(block))
+ block_time += 1
+ height += 1
+ tip = block.sha256
+ peer.sync_with_ping()
+
+ def test_versionbits_in_alert_file(self):
+ with open(self.alert_filename, 'r', encoding='utf8') as f:
+ alert_text = f.read()
+ assert(VB_PATTERN.match(alert_text))
+
+ def run_test(self):
+ # Setup the p2p connection and start up the network thread.
+ test_node = TestNode()
+
+ connections = []
+ connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
+ test_node.add_connection(connections[0])
+
+ NetworkThread().start() # Start up network handling in another thread
+
+ # Test logic begins here
+ test_node.wait_for_verack()
+
+ # 1. Have the node mine one period worth of blocks
+ self.nodes[0].generate(VB_PERIOD)
+
+ # 2. Now build one period of blocks on the tip, with < VB_THRESHOLD
+ # blocks signaling some unknown bit.
+ nVersion = VB_TOP_BITS | (1<<VB_UNKNOWN_BIT)
+ self.send_blocks_with_version(test_node, VB_THRESHOLD-1, nVersion)
+
+ # Fill rest of period with regular version blocks
+ self.nodes[0].generate(VB_PERIOD - VB_THRESHOLD + 1)
+ # Check that we're not getting any versionbit-related errors in
+ # get*info()
+ assert(not VB_PATTERN.match(self.nodes[0].getinfo()["errors"]))
+ assert(not VB_PATTERN.match(self.nodes[0].getmininginfo()["errors"]))
+ assert(not VB_PATTERN.match(self.nodes[0].getnetworkinfo()["warnings"]))
+
+ # 3. Now build one period of blocks with >= VB_THRESHOLD blocks signaling
+ # some unknown bit
+ self.send_blocks_with_version(test_node, VB_THRESHOLD, nVersion)
+ self.nodes[0].generate(VB_PERIOD - VB_THRESHOLD)
+ # Might not get a versionbits-related alert yet, as we should
+ # have gotten a different alert due to more than 51/100 blocks
+ # being of unexpected version.
+ # Check that get*info() shows some kind of error.
+ assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getinfo()["errors"])
+ assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getmininginfo()["errors"])
+ assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getnetworkinfo()["warnings"])
+
+ # Mine a period worth of expected blocks so the generic block-version warning
+ # is cleared, and restart the node. This should move the versionbit state
+ # to ACTIVE.
+ self.nodes[0].generate(VB_PERIOD)
+ stop_nodes(self.nodes)
+ # Empty out the alert file
+ with open(self.alert_filename, 'w', encoding='utf8') as _:
+ pass
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
+
+ # Connecting one block should be enough to generate an error.
+ self.nodes[0].generate(1)
+ assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[0].getinfo()["errors"])
+ assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[0].getmininginfo()["errors"])
+ assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[0].getnetworkinfo()["warnings"])
+ stop_nodes(self.nodes)
+ self.test_versionbits_in_alert_file()
+
+ # Test framework expects the node to still be running...
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
+
+if __name__ == '__main__':
+ VersionBitsWarningTest().main()
diff --git a/test/functional/preciousblock.py b/test/functional/preciousblock.py
new file mode 100755
index 0000000000..30b0b5a301
--- /dev/null
+++ b/test/functional/preciousblock.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the preciousblock RPC."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_equal,
+ connect_nodes_bi,
+ sync_chain,
+ sync_blocks,
+)
+
+def unidirectional_node_sync_via_rpc(node_src, node_dest):
+ blocks_to_copy = []
+ blockhash = node_src.getbestblockhash()
+ while True:
+ try:
+ assert(len(node_dest.getblock(blockhash, False)) > 0)
+ break
+ except:
+ blocks_to_copy.append(blockhash)
+ blockhash = node_src.getblockheader(blockhash, True)['previousblockhash']
+ blocks_to_copy.reverse()
+ for blockhash in blocks_to_copy:
+ blockdata = node_src.getblock(blockhash, False)
+ assert(node_dest.submitblock(blockdata) in (None, 'inconclusive'))
+
+def node_sync_via_rpc(nodes):
+ for node_src in nodes:
+ for node_dest in nodes:
+ if node_src is node_dest:
+ continue
+ unidirectional_node_sync_via_rpc(node_src, node_dest)
+
+class PreciousTest(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 3
+
+ def setup_network(self):
+ self.nodes = self.setup_nodes()
+
+ def run_test(self):
+ self.log.info("Ensure submitblock can in principle reorg to a competing chain")
+ self.nodes[0].generate(1)
+ assert_equal(self.nodes[0].getblockcount(), 1)
+ (hashY, hashZ) = self.nodes[1].generate(2)
+ assert_equal(self.nodes[1].getblockcount(), 2)
+ node_sync_via_rpc(self.nodes[0:3])
+ assert_equal(self.nodes[0].getbestblockhash(), hashZ)
+
+ self.log.info("Mine blocks A-B-C on Node 0")
+ (hashA, hashB, hashC) = self.nodes[0].generate(3)
+ assert_equal(self.nodes[0].getblockcount(), 5)
+ self.log.info("Mine competing blocks E-F-G on Node 1")
+ (hashE, hashF, hashG) = self.nodes[1].generate(3)
+ assert_equal(self.nodes[1].getblockcount(), 5)
+ assert(hashC != hashG)
+ self.log.info("Connect nodes and check no reorg occurs")
+ # Submit competing blocks via RPC so any reorg should occur before we proceed (no way to wait on inaction for p2p sync)
+ node_sync_via_rpc(self.nodes[0:2])
+ connect_nodes_bi(self.nodes,0,1)
+ assert_equal(self.nodes[0].getbestblockhash(), hashC)
+ assert_equal(self.nodes[1].getbestblockhash(), hashG)
+ self.log.info("Make Node0 prefer block G")
+ self.nodes[0].preciousblock(hashG)
+ assert_equal(self.nodes[0].getbestblockhash(), hashG)
+ self.log.info("Make Node0 prefer block C again")
+ self.nodes[0].preciousblock(hashC)
+ assert_equal(self.nodes[0].getbestblockhash(), hashC)
+ self.log.info("Make Node1 prefer block C")
+ self.nodes[1].preciousblock(hashC)
+ sync_chain(self.nodes[0:2]) # wait because node 1 may not have downloaded hashC
+ assert_equal(self.nodes[1].getbestblockhash(), hashC)
+ self.log.info("Make Node1 prefer block G again")
+ self.nodes[1].preciousblock(hashG)
+ assert_equal(self.nodes[1].getbestblockhash(), hashG)
+ self.log.info("Make Node0 prefer block G again")
+ self.nodes[0].preciousblock(hashG)
+ assert_equal(self.nodes[0].getbestblockhash(), hashG)
+ self.log.info("Make Node1 prefer block C again")
+ self.nodes[1].preciousblock(hashC)
+ assert_equal(self.nodes[1].getbestblockhash(), hashC)
+ self.log.info("Mine another block (E-F-G-)H on Node 0 and reorg Node 1")
+ self.nodes[0].generate(1)
+ assert_equal(self.nodes[0].getblockcount(), 6)
+ sync_blocks(self.nodes[0:2])
+ hashH = self.nodes[0].getbestblockhash()
+ assert_equal(self.nodes[1].getbestblockhash(), hashH)
+ self.log.info("Node1 should not be able to prefer block C anymore")
+ self.nodes[1].preciousblock(hashC)
+ assert_equal(self.nodes[1].getbestblockhash(), hashH)
+ self.log.info("Mine competing blocks I-J-K-L on Node 2")
+ self.nodes[2].generate(4)
+ assert_equal(self.nodes[2].getblockcount(), 6)
+ hashL = self.nodes[2].getbestblockhash()
+ self.log.info("Connect nodes and check no reorg occurs")
+ node_sync_via_rpc(self.nodes[1:3])
+ connect_nodes_bi(self.nodes,1,2)
+ connect_nodes_bi(self.nodes,0,2)
+ assert_equal(self.nodes[0].getbestblockhash(), hashH)
+ assert_equal(self.nodes[1].getbestblockhash(), hashH)
+ assert_equal(self.nodes[2].getbestblockhash(), hashL)
+ self.log.info("Make Node1 prefer block L")
+ self.nodes[1].preciousblock(hashL)
+ assert_equal(self.nodes[1].getbestblockhash(), hashL)
+ self.log.info("Make Node2 prefer block H")
+ self.nodes[2].preciousblock(hashH)
+ assert_equal(self.nodes[2].getbestblockhash(), hashH)
+
+if __name__ == '__main__':
+ PreciousTest().main()
diff --git a/test/functional/prioritise_transaction.py b/test/functional/prioritise_transaction.py
new file mode 100755
index 0000000000..0b04ad17ab
--- /dev/null
+++ b/test/functional/prioritise_transaction.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the prioritisetransaction mining RPC."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+from test_framework.mininode import COIN, MAX_BLOCK_BASE_SIZE
+
+class PrioritiseTransactionTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+
+ self.txouts = gen_return_txouts()
+
+ def setup_network(self):
+ self.nodes = []
+ self.is_network_split = False
+
+ self.nodes.append(start_node(0, self.options.tmpdir, ["-printpriority=1"]))
+ self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
+
+ def run_test(self):
+ utxo_count = 90
+ utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], utxo_count)
+ base_fee = self.relayfee*100 # our transactions are smaller than 100kb
+ txids = []
+
+ # Create 3 batches of transactions at 3 different fee rate levels
+ range_size = utxo_count // 3
+ for i in range(3):
+ txids.append([])
+ start_range = i * range_size
+ end_range = start_range + range_size
+ txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], end_range - start_range, (i+1)*base_fee)
+
+ # Make sure that the size of each group of transactions exceeds
+ # MAX_BLOCK_BASE_SIZE -- otherwise the test needs to be revised to create
+ # more transactions.
+ mempool = self.nodes[0].getrawmempool(True)
+ sizes = [0, 0, 0]
+ for i in range(3):
+ for j in txids[i]:
+ assert(j in mempool)
+ sizes[i] += mempool[j]['size']
+ assert(sizes[i] > MAX_BLOCK_BASE_SIZE) # Fail => raise utxo_count
+
+ # add a fee delta to something in the cheapest bucket and make sure it gets mined
+ # also check that a different entry in the cheapest bucket is NOT mined
+ self.nodes[0].prioritisetransaction(txids[0][0], int(3*base_fee*COIN))
+
+ self.nodes[0].generate(1)
+
+ mempool = self.nodes[0].getrawmempool()
+ self.log.info("Assert that prioritised transaction was mined")
+ assert(txids[0][0] not in mempool)
+ assert(txids[0][1] in mempool)
+
+ high_fee_tx = None
+ for x in txids[2]:
+ if x not in mempool:
+ high_fee_tx = x
+
+ # Something high-fee should have been mined!
+ assert(high_fee_tx != None)
+
+ # Add a prioritisation before a tx is in the mempool (de-prioritising a
+ # high-fee transaction so that it's now low fee).
+ self.nodes[0].prioritisetransaction(high_fee_tx, -int(2*base_fee*COIN))
+
+ # Add everything back to mempool
+ self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
+
+ # Check to make sure our high fee rate tx is back in the mempool
+ mempool = self.nodes[0].getrawmempool()
+ assert(high_fee_tx in mempool)
+
+ # Now verify the modified-high feerate transaction isn't mined before
+ # the other high fee transactions. Keep mining until our mempool has
+ # decreased by all the high fee size that we calculated above.
+ while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]):
+ self.nodes[0].generate(1)
+
+ # High fee transaction should not have been mined, but other high fee rate
+ # transactions should have been.
+ mempool = self.nodes[0].getrawmempool()
+ self.log.info("Assert that de-prioritised transaction is still in mempool")
+ assert(high_fee_tx in mempool)
+ for x in txids[2]:
+ if (x != high_fee_tx):
+ assert(x not in mempool)
+
+ # Create a free transaction. Should be rejected.
+ utxo_list = self.nodes[0].listunspent()
+ assert(len(utxo_list) > 0)
+ utxo = utxo_list[0]
+
+ inputs = []
+ outputs = {}
+ inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]})
+ outputs[self.nodes[0].getnewaddress()] = utxo["amount"]
+ raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
+ tx_hex = self.nodes[0].signrawtransaction(raw_tx)["hex"]
+ tx_id = self.nodes[0].decoderawtransaction(tx_hex)["txid"]
+
+ # This will raise an exception due to min relay fee not being met
+ assert_raises_jsonrpc(-26, "66: min relay fee not met", self.nodes[0].sendrawtransaction, tx_hex)
+ assert(tx_id not in self.nodes[0].getrawmempool())
+
+ # This is a less than 1000-byte transaction, so just set the fee
+ # to be the minimum for a 1000 byte transaction and check that it is
+ # accepted.
+ self.nodes[0].prioritisetransaction(tx_id, int(self.relayfee*COIN))
+
+ self.log.info("Assert that prioritised free transaction is accepted to mempool")
+ assert_equal(self.nodes[0].sendrawtransaction(tx_hex), tx_id)
+ assert(tx_id in self.nodes[0].getrawmempool())
+
+if __name__ == '__main__':
+ PrioritiseTransactionTest().main()
diff --git a/test/functional/proxy_test.py b/test/functional/proxy_test.py
new file mode 100755
index 0000000000..748e3e69f6
--- /dev/null
+++ b/test/functional/proxy_test.py
@@ -0,0 +1,204 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test bitcoind with different proxy configuration.
+
+Test plan:
+- Start bitcoind's with different proxy configurations
+- Use addnode to initiate connections
+- Verify that proxies are connected to, and the right connection command is given
+- Proxy configurations to test on bitcoind side:
+ - `-proxy` (proxy everything)
+ - `-onion` (proxy just onions)
+ - `-proxyrandomize` Circuit randomization
+- Proxy configurations to test on proxy side,
+ - support no authentication (other proxy)
+ - support no authentication + user/pass authentication (Tor)
+ - proxy on IPv6
+
+- Create various proxies (as threads)
+- Create bitcoinds that connect to them
+- Manipulate the bitcoinds using addnode (onetry) an observe effects
+
+addnode connect to IPv4
+addnode connect to IPv6
+addnode connect to onion
+addnode connect to generic DNS name
+"""
+
+import socket
+import os
+
+from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ PORT_MIN,
+ PORT_RANGE,
+ start_nodes,
+ assert_equal,
+)
+from test_framework.netutil import test_ipv6_local
+
+RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
+
+
+class ProxyTest(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 4
+ self.setup_clean_chain = False
+
+ def setup_nodes(self):
+ self.have_ipv6 = test_ipv6_local()
+ # Create two proxies on different ports
+ # ... one unauthenticated
+ self.conf1 = Socks5Configuration()
+ self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
+ self.conf1.unauth = True
+ self.conf1.auth = False
+ # ... one supporting authenticated and unauthenticated (Tor)
+ self.conf2 = Socks5Configuration()
+ self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
+ self.conf2.unauth = True
+ self.conf2.auth = True
+ if self.have_ipv6:
+ # ... one on IPv6 with similar configuration
+ self.conf3 = Socks5Configuration()
+ self.conf3.af = socket.AF_INET6
+ self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
+ self.conf3.unauth = True
+ self.conf3.auth = True
+ else:
+ self.log.warning("Testing without local IPv6 support")
+
+ self.serv1 = Socks5Server(self.conf1)
+ self.serv1.start()
+ self.serv2 = Socks5Server(self.conf2)
+ self.serv2.start()
+ if self.have_ipv6:
+ self.serv3 = Socks5Server(self.conf3)
+ self.serv3.start()
+
+ # Note: proxies are not used to connect to local nodes
+ # this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
+ args = [
+ ['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
+ ['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
+ ['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
+ []
+ ]
+ if self.have_ipv6:
+ args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
+ return start_nodes(self.num_nodes, self.options.tmpdir, extra_args=args)
+
+ def node_test(self, node, proxies, auth, test_onion=True):
+ rv = []
+ # Test: outgoing IPv4 connection through node
+ node.addnode("15.61.23.23:1234", "onetry")
+ cmd = proxies[0].queue.get()
+ assert(isinstance(cmd, Socks5Command))
+ # Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
+ assert_equal(cmd.atyp, AddressType.DOMAINNAME)
+ assert_equal(cmd.addr, b"15.61.23.23")
+ assert_equal(cmd.port, 1234)
+ if not auth:
+ assert_equal(cmd.username, None)
+ assert_equal(cmd.password, None)
+ rv.append(cmd)
+
+ if self.have_ipv6:
+ # Test: outgoing IPv6 connection through node
+ node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
+ cmd = proxies[1].queue.get()
+ assert(isinstance(cmd, Socks5Command))
+ # Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
+ assert_equal(cmd.atyp, AddressType.DOMAINNAME)
+ assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
+ assert_equal(cmd.port, 5443)
+ if not auth:
+ assert_equal(cmd.username, None)
+ assert_equal(cmd.password, None)
+ rv.append(cmd)
+
+ if test_onion:
+ # Test: outgoing onion connection through node
+ node.addnode("bitcoinostk4e4re.onion:8333", "onetry")
+ cmd = proxies[2].queue.get()
+ assert(isinstance(cmd, Socks5Command))
+ assert_equal(cmd.atyp, AddressType.DOMAINNAME)
+ assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
+ assert_equal(cmd.port, 8333)
+ if not auth:
+ assert_equal(cmd.username, None)
+ assert_equal(cmd.password, None)
+ rv.append(cmd)
+
+ # Test: outgoing DNS name connection through node
+ node.addnode("node.noumenon:8333", "onetry")
+ cmd = proxies[3].queue.get()
+ assert(isinstance(cmd, Socks5Command))
+ assert_equal(cmd.atyp, AddressType.DOMAINNAME)
+ assert_equal(cmd.addr, b"node.noumenon")
+ assert_equal(cmd.port, 8333)
+ if not auth:
+ assert_equal(cmd.username, None)
+ assert_equal(cmd.password, None)
+ rv.append(cmd)
+
+ return rv
+
+ def run_test(self):
+ # basic -proxy
+ self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
+
+ # -proxy plus -onion
+ self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
+
+ # -proxy plus -onion, -proxyrandomize
+ rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
+ # Check that credentials as used for -proxyrandomize connections are unique
+ credentials = set((x.username,x.password) for x in rv)
+ assert_equal(len(credentials), len(rv))
+
+ if self.have_ipv6:
+ # proxy on IPv6 localhost
+ self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
+
+ def networks_dict(d):
+ r = {}
+ for x in d['networks']:
+ r[x['name']] = x
+ return r
+
+ # test RPC getnetworkinfo
+ n0 = networks_dict(self.nodes[0].getnetworkinfo())
+ for net in ['ipv4','ipv6','onion']:
+ assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
+ assert_equal(n0[net]['proxy_randomize_credentials'], True)
+ assert_equal(n0['onion']['reachable'], True)
+
+ n1 = networks_dict(self.nodes[1].getnetworkinfo())
+ for net in ['ipv4','ipv6']:
+ assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
+ assert_equal(n1[net]['proxy_randomize_credentials'], False)
+ assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
+ assert_equal(n1['onion']['proxy_randomize_credentials'], False)
+ assert_equal(n1['onion']['reachable'], True)
+
+ n2 = networks_dict(self.nodes[2].getnetworkinfo())
+ for net in ['ipv4','ipv6','onion']:
+ assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
+ assert_equal(n2[net]['proxy_randomize_credentials'], True)
+ assert_equal(n2['onion']['reachable'], True)
+
+ if self.have_ipv6:
+ n3 = networks_dict(self.nodes[3].getnetworkinfo())
+ for net in ['ipv4','ipv6']:
+ assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
+ assert_equal(n3[net]['proxy_randomize_credentials'], False)
+ assert_equal(n3['onion']['reachable'], False)
+
+if __name__ == '__main__':
+ ProxyTest().main()
+
diff --git a/test/functional/pruning.py b/test/functional/pruning.py
new file mode 100755
index 0000000000..cc84c8c085
--- /dev/null
+++ b/test/functional/pruning.py
@@ -0,0 +1,443 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the pruning code.
+
+WARNING:
+This test uses 4GB of disk space.
+This test takes 30 mins or more (up to 2 hours)
+"""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+import time
+import os
+
+MIN_BLOCKS_TO_KEEP = 288
+
+# Rescans start at the earliest block up to 2 hours before a key timestamp, so
+# the manual prune RPC avoids pruning blocks in the same window to be
+# compatible with pruning based on key creation time.
+TIMESTAMP_WINDOW = 2 * 60 * 60
+
+
+def calc_usage(blockdir):
+ return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.)
+
+class PruneTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 6
+
+ # Cache for utxos, as the listunspent may take a long time later in the test
+ self.utxo_cache_0 = []
+ self.utxo_cache_1 = []
+
+ def setup_network(self):
+ self.nodes = []
+ self.is_network_split = False
+
+ # Create nodes 0 and 1 to mine
+ self.nodes.append(start_node(0, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
+ self.nodes.append(start_node(1, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
+
+ # Create node 2 to test pruning
+ self.nodes.append(start_node(2, self.options.tmpdir, ["-maxreceivebuffer=20000","-prune=550"], timewait=900))
+ self.prunedir = self.options.tmpdir+"/node2/regtest/blocks/"
+
+ # Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later)
+ self.nodes.append(start_node(3, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=999000"], timewait=900))
+ self.nodes.append(start_node(4, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=999000"], timewait=900))
+
+ # Create nodes 5 to test wallet in prune mode, but do not connect
+ self.nodes.append(start_node(5, self.options.tmpdir, ["-prune=550"]))
+
+ # Determine default relay fee
+ self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
+
+ connect_nodes(self.nodes[0], 1)
+ connect_nodes(self.nodes[1], 2)
+ connect_nodes(self.nodes[2], 0)
+ connect_nodes(self.nodes[0], 3)
+ connect_nodes(self.nodes[0], 4)
+ sync_blocks(self.nodes[0:5])
+
+ def create_big_chain(self):
+ # Start by creating some coinbases we can spend later
+ self.nodes[1].generate(200)
+ sync_blocks(self.nodes[0:2])
+ self.nodes[0].generate(150)
+ # Then mine enough full blocks to create more than 550MiB of data
+ for i in range(645):
+ mine_large_block(self.nodes[0], self.utxo_cache_0)
+
+ sync_blocks(self.nodes[0:5])
+
+ def test_height_min(self):
+ if not os.path.isfile(self.prunedir+"blk00000.dat"):
+ raise AssertionError("blk00000.dat is missing, pruning too early")
+ self.log.info("Success")
+ self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir))
+ self.log.info("Mining 25 more blocks should cause the first block file to be pruned")
+ # Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
+ for i in range(25):
+ mine_large_block(self.nodes[0], self.utxo_cache_0)
+
+ waitstart = time.time()
+ while os.path.isfile(self.prunedir+"blk00000.dat"):
+ time.sleep(0.1)
+ if time.time() - waitstart > 30:
+ raise AssertionError("blk00000.dat not pruned when it should be")
+
+ self.log.info("Success")
+ usage = calc_usage(self.prunedir)
+ self.log.info("Usage should be below target: %d" % usage)
+ if (usage > 550):
+ raise AssertionError("Pruning target not being met")
+
+ def create_chain_with_staleblocks(self):
+ # Create stale blocks in manageable sized chunks
+ self.log.info("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
+
+ for j in range(12):
+ # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
+ # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
+ # Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
+ self.stop_node(0)
+ self.nodes[0]=start_node(0, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)
+ # Mine 24 blocks in node 1
+ for i in range(24):
+ if j == 0:
+ mine_large_block(self.nodes[1], self.utxo_cache_1)
+ else:
+ self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
+
+ # Reorg back with 25 block chain from node 0
+ for i in range(25):
+ mine_large_block(self.nodes[0], self.utxo_cache_0)
+
+ # Create connections in the order so both nodes can see the reorg at the same time
+ connect_nodes(self.nodes[1], 0)
+ connect_nodes(self.nodes[2], 0)
+ sync_blocks(self.nodes[0:3])
+
+ self.log.info("Usage can be over target because of high stale rate: %d" % calc_usage(self.prunedir))
+
+ def reorg_test(self):
+ # Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
+ # This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
+ # Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
+ # Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
+ self.stop_node(1)
+ self.nodes[1]=start_node(1, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
+
+ height = self.nodes[1].getblockcount()
+ self.log.info("Current block height: %d" % height)
+
+ invalidheight = height-287
+ badhash = self.nodes[1].getblockhash(invalidheight)
+ self.log.info("Invalidating block %s at height %d" % (badhash,invalidheight))
+ self.nodes[1].invalidateblock(badhash)
+
+ # We've now switched to our previously mined-24 block fork on node 1, but thats not what we want
+ # So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
+ mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
+ curhash = self.nodes[1].getblockhash(invalidheight - 1)
+ while curhash != mainchainhash:
+ self.nodes[1].invalidateblock(curhash)
+ curhash = self.nodes[1].getblockhash(invalidheight - 1)
+
+ assert(self.nodes[1].getblockcount() == invalidheight - 1)
+ self.log.info("New best height: %d" % self.nodes[1].getblockcount())
+
+ # Reboot node1 to clear those giant tx's from mempool
+ self.stop_node(1)
+ self.nodes[1]=start_node(1, self.options.tmpdir, ["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
+
+ self.log.info("Generating new longer chain of 300 more blocks")
+ self.nodes[1].generate(300)
+
+ self.log.info("Reconnect nodes")
+ connect_nodes(self.nodes[0], 1)
+ connect_nodes(self.nodes[2], 1)
+ sync_blocks(self.nodes[0:3], timeout=120)
+
+ self.log.info("Verify height on node 2: %d" % self.nodes[2].getblockcount())
+ self.log.info("Usage possibly still high bc of stale blocks in block files: %d" % calc_usage(self.prunedir))
+
+ self.log.info("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)")
+ for i in range(22):
+ # This can be slow, so do this in multiple RPC calls to avoid
+ # RPC timeouts.
+ self.nodes[0].generate(10) #node 0 has many large tx's in its mempool from the disconnects
+ sync_blocks(self.nodes[0:3], timeout=300)
+
+ usage = calc_usage(self.prunedir)
+ self.log.info("Usage should be below target: %d" % usage)
+ if (usage > 550):
+ raise AssertionError("Pruning target not being met")
+
+ return invalidheight,badhash
+
+ def reorg_back(self):
+ # Verify that a block on the old main chain fork has been pruned away
+ assert_raises_jsonrpc(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash)
+ self.log.info("Will need to redownload block %d" % self.forkheight)
+
+ # Verify that we have enough history to reorg back to the fork point
+ # Although this is more than 288 blocks, because this chain was written more recently
+ # and only its other 299 small and 220 large block are in the block files after it,
+ # its expected to still be retained
+ self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
+
+ first_reorg_height = self.nodes[2].getblockcount()
+ curchainhash = self.nodes[2].getblockhash(self.mainchainheight)
+ self.nodes[2].invalidateblock(curchainhash)
+ goalbestheight = self.mainchainheight
+ goalbesthash = self.mainchainhash2
+
+ # As of 0.10 the current block download logic is not able to reorg to the original chain created in
+ # create_chain_with_stale_blocks because it doesn't know of any peer thats on that chain from which to
+ # redownload its missing blocks.
+ # Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
+ # because it has all the block data.
+ # However it must mine enough blocks to have a more work chain than the reorg_test chain in order
+ # to trigger node 2's block download logic.
+ # At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
+ if self.nodes[2].getblockcount() < self.mainchainheight:
+ blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
+ self.log.info("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine)
+ self.nodes[0].invalidateblock(curchainhash)
+ assert(self.nodes[0].getblockcount() == self.mainchainheight)
+ assert(self.nodes[0].getbestblockhash() == self.mainchainhash2)
+ goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1]
+ goalbestheight = first_reorg_height + 1
+
+ self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
+ waitstart = time.time()
+ while self.nodes[2].getblockcount() < goalbestheight:
+ time.sleep(0.1)
+ if time.time() - waitstart > 900:
+ raise AssertionError("Node 2 didn't reorg to proper height")
+ assert(self.nodes[2].getbestblockhash() == goalbesthash)
+ # Verify we can now have the data for a block previously pruned
+ assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight)
+
+ def manual_test(self, node_number, use_timestamp):
+ # at this point, node has 995 blocks and has not yet run in prune mode
+ node = self.nodes[node_number] = start_node(node_number, self.options.tmpdir, timewait=900)
+ assert_equal(node.getblockcount(), 995)
+ assert_raises_jsonrpc(-1, "not in prune mode", node.pruneblockchain, 500)
+ self.stop_node(node_number)
+
+ # now re-start in manual pruning mode
+ node = self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-prune=1"], timewait=900)
+ assert_equal(node.getblockcount(), 995)
+
+ def height(index):
+ if use_timestamp:
+ return node.getblockheader(node.getblockhash(index))["time"] + TIMESTAMP_WINDOW
+ else:
+ return index
+
+ def prune(index, expected_ret=None):
+ ret = node.pruneblockchain(height(index))
+ # Check the return value. When use_timestamp is True, just check
+ # that the return value is less than or equal to the expected
+ # value, because when more than one block is generated per second,
+ # a timestamp will not be granular enough to uniquely identify an
+ # individual block.
+ if expected_ret is None:
+ expected_ret = index
+ if use_timestamp:
+ assert_greater_than(ret, 0)
+ assert_greater_than(expected_ret + 1, ret)
+ else:
+ assert_equal(ret, expected_ret)
+
+ def has_block(index):
+ return os.path.isfile(self.options.tmpdir + "/node{}/regtest/blocks/blk{:05}.dat".format(node_number, index))
+
+ # should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000)
+ assert_raises_jsonrpc(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500))
+
+ # mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight)
+ node.generate(6)
+ assert_equal(node.getblockchaininfo()["blocks"], 1001)
+
+ # negative heights should raise an exception
+ assert_raises_jsonrpc(-8, "Negative", node.pruneblockchain, -10)
+
+ # height=100 too low to prune first block file so this is a no-op
+ prune(100)
+ if not has_block(0):
+ raise AssertionError("blk00000.dat is missing when should still be there")
+
+ # Does nothing
+ node.pruneblockchain(height(0))
+ if not has_block(0):
+ raise AssertionError("blk00000.dat is missing when should still be there")
+
+ # height=500 should prune first file
+ prune(500)
+ if has_block(0):
+ raise AssertionError("blk00000.dat is still there, should be pruned by now")
+ if not has_block(1):
+ raise AssertionError("blk00001.dat is missing when should still be there")
+
+ # height=650 should prune second file
+ prune(650)
+ if has_block(1):
+ raise AssertionError("blk00001.dat is still there, should be pruned by now")
+
+ # height=1000 should not prune anything more, because tip-288 is in blk00002.dat.
+ prune(1000, 1001 - MIN_BLOCKS_TO_KEEP)
+ if not has_block(2):
+ raise AssertionError("blk00002.dat is still there, should be pruned by now")
+
+ # advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat)
+ node.generate(288)
+ prune(1000)
+ if has_block(2):
+ raise AssertionError("blk00002.dat is still there, should be pruned by now")
+ if has_block(3):
+ raise AssertionError("blk00003.dat is still there, should be pruned by now")
+
+ # stop node, start back up with auto-prune at 550MB, make sure still runs
+ self.stop_node(node_number)
+ self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-prune=550"], timewait=900)
+
+ self.log.info("Success")
+
+ def wallet_test(self):
+ # check that the pruning node's wallet is still in good shape
+ self.log.info("Stop and start pruning node to trigger wallet rescan")
+ self.stop_node(2)
+ start_node(2, self.options.tmpdir, ["-prune=550"])
+ self.log.info("Success")
+
+ # check that wallet loads loads successfully when restarting a pruned node after IBD.
+ # this was reported to fail in #7494.
+ self.log.info("Syncing node 5 to test wallet")
+ connect_nodes(self.nodes[0], 5)
+ nds = [self.nodes[0], self.nodes[5]]
+ sync_blocks(nds, wait=5, timeout=300)
+ self.stop_node(5) #stop and start to trigger rescan
+ start_node(5, self.options.tmpdir, ["-prune=550"])
+ self.log.info("Success")
+
+ def run_test(self):
+ self.log.info("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)")
+ self.log.info("Mining a big blockchain of 995 blocks")
+ self.create_big_chain()
+ # Chain diagram key:
+ # * blocks on main chain
+ # +,&,$,@ blocks on other forks
+ # X invalidated block
+ # N1 Node 1
+ #
+ # Start by mining a simple chain that all nodes have
+ # N0=N1=N2 **...*(995)
+
+ # stop manual-pruning node with 995 blocks
+ self.stop_node(3)
+ self.stop_node(4)
+
+ self.log.info("Check that we haven't started pruning yet because we're below PruneAfterHeight")
+ self.test_height_min()
+ # Extend this chain past the PruneAfterHeight
+ # N0=N1=N2 **...*(1020)
+
+ self.log.info("Check that we'll exceed disk space target if we have a very high stale block rate")
+ self.create_chain_with_staleblocks()
+ # Disconnect N0
+ # And mine a 24 block chain on N1 and a separate 25 block chain on N0
+ # N1=N2 **...*+...+(1044)
+ # N0 **...**...**(1045)
+ #
+ # reconnect nodes causing reorg on N1 and N2
+ # N1=N2 **...*(1020) *...**(1045)
+ # \
+ # +...+(1044)
+ #
+ # repeat this process until you have 12 stale forks hanging off the
+ # main chain on N1 and N2
+ # N0 *************************...***************************(1320)
+ #
+ # N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320)
+ # \ \ \
+ # +...+(1044) &.. $...$(1319)
+
+ # Save some current chain state for later use
+ self.mainchainheight = self.nodes[2].getblockcount() #1320
+ self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
+
+ self.log.info("Check that we can survive a 288 block reorg still")
+ (self.forkheight,self.forkhash) = self.reorg_test() #(1033, )
+ # Now create a 288 block reorg by mining a longer chain on N1
+ # First disconnect N1
+ # Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
+ # N1 **...*(1020) **...**(1032)X..
+ # \
+ # ++...+(1031)X..
+ #
+ # Now mine 300 more blocks on N1
+ # N1 **...*(1020) **...**(1032) @@...@(1332)
+ # \ \
+ # \ X...
+ # \ \
+ # ++...+(1031)X.. ..
+ #
+ # Reconnect nodes and mine 220 more blocks on N1
+ # N1 **...*(1020) **...**(1032) @@...@@@(1552)
+ # \ \
+ # \ X...
+ # \ \
+ # ++...+(1031)X.. ..
+ #
+ # N2 **...*(1020) **...**(1032) @@...@@@(1552)
+ # \ \
+ # \ *...**(1320)
+ # \ \
+ # ++...++(1044) ..
+ #
+ # N0 ********************(1032) @@...@@@(1552)
+ # \
+ # *...**(1320)
+
+ self.log.info("Test that we can rerequest a block we previously pruned if needed for a reorg")
+ self.reorg_back()
+ # Verify that N2 still has block 1033 on current chain (@), but not on main chain (*)
+ # Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to
+ # original main chain (*), but will require redownload of some blocks
+ # In order to have a peer we think we can download from, must also perform this invalidation
+ # on N0 and mine a new longest chain to trigger.
+ # Final result:
+ # N0 ********************(1032) **...****(1553)
+ # \
+ # X@...@@@(1552)
+ #
+ # N2 **...*(1020) **...**(1032) **...****(1553)
+ # \ \
+ # \ X@...@@@(1552)
+ # \
+ # +..
+ #
+ # N1 doesn't change because 1033 on main chain (*) is invalid
+
+ self.log.info("Test manual pruning with block indices")
+ self.manual_test(3, use_timestamp=False)
+
+ self.log.info("Test manual pruning with timestamps")
+ self.manual_test(4, use_timestamp=True)
+
+ self.log.info("Test wallet re-scan")
+ self.wallet_test()
+
+ self.log.info("Done")
+
+if __name__ == '__main__':
+ PruneTest().main()
diff --git a/test/functional/rawtransactions.py b/test/functional/rawtransactions.py
new file mode 100755
index 0000000000..0374d8984a
--- /dev/null
+++ b/test/functional/rawtransactions.py
@@ -0,0 +1,190 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the rawtranscation RPCs.
+
+Test the following RPCs:
+ - createrawtransaction
+ - signrawtransaction
+ - sendrawtransaction
+ - decoderawtransaction
+ - getrawtransaction
+"""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+# Create one-input, one-output, no-fee transaction:
+class RawTransactionsTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 3
+
+ def setup_network(self, split=False):
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
+
+ #connect to a local machine for debugging
+ #url = "http://bitcoinrpc:DP6DvqZtqXarpeNWyN3LZTFchCCyCUuHwNF7E8pX99x1@%s:%d" % ('127.0.0.1', 18332)
+ #proxy = AuthServiceProxy(url)
+ #proxy.url = url # store URL on proxy for info
+ #self.nodes.append(proxy)
+
+ connect_nodes_bi(self.nodes,0,1)
+ connect_nodes_bi(self.nodes,1,2)
+ connect_nodes_bi(self.nodes,0,2)
+
+ self.is_network_split=False
+ self.sync_all()
+
+ def run_test(self):
+
+ #prepare some coins for multiple *rawtransaction commands
+ self.nodes[2].generate(1)
+ self.sync_all()
+ self.nodes[0].generate(101)
+ self.sync_all()
+ self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
+ self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
+ self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
+ self.sync_all()
+ self.nodes[0].generate(5)
+ self.sync_all()
+
+ #########################################
+ # sendrawtransaction with missing input #
+ #########################################
+ inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
+ outputs = { self.nodes[0].getnewaddress() : 4.998 }
+ rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
+ rawtx = self.nodes[2].signrawtransaction(rawtx)
+
+ # This will raise an exception since there are missing inputs
+ assert_raises_jsonrpc(-25, "Missing inputs", self.nodes[2].sendrawtransaction, rawtx['hex'])
+
+ #########################
+ # RAW TX MULTISIG TESTS #
+ #########################
+ # 2of2 test
+ addr1 = self.nodes[2].getnewaddress()
+ addr2 = self.nodes[2].getnewaddress()
+
+ addr1Obj = self.nodes[2].validateaddress(addr1)
+ addr2Obj = self.nodes[2].validateaddress(addr2)
+
+ mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
+ mSigObjValid = self.nodes[2].validateaddress(mSigObj)
+
+ #use balance deltas instead of absolute values
+ bal = self.nodes[2].getbalance()
+
+ # send 1.2 BTC to msig adr
+ txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
+ self.sync_all()
+ self.nodes[0].generate(1)
+ self.sync_all()
+ assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
+
+
+ # 2of3 test from different nodes
+ bal = self.nodes[2].getbalance()
+ addr1 = self.nodes[1].getnewaddress()
+ addr2 = self.nodes[2].getnewaddress()
+ addr3 = self.nodes[2].getnewaddress()
+
+ addr1Obj = self.nodes[1].validateaddress(addr1)
+ addr2Obj = self.nodes[2].validateaddress(addr2)
+ addr3Obj = self.nodes[2].validateaddress(addr3)
+
+ mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])
+ mSigObjValid = self.nodes[2].validateaddress(mSigObj)
+
+ txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
+ decTx = self.nodes[0].gettransaction(txId)
+ rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
+ sPK = rawTx['vout'][0]['scriptPubKey']['hex']
+ self.sync_all()
+ self.nodes[0].generate(1)
+ self.sync_all()
+
+ #THIS IS A INCOMPLETE FEATURE
+ #NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
+ assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
+
+ txDetails = self.nodes[0].gettransaction(txId, True)
+ rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
+ vout = False
+ for outpoint in rawTx['vout']:
+ if outpoint['value'] == Decimal('2.20000000'):
+ vout = outpoint
+ break
+
+ bal = self.nodes[0].getbalance()
+ inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex']}]
+ outputs = { self.nodes[0].getnewaddress() : 2.19 }
+ rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
+ rawTxPartialSigned = self.nodes[1].signrawtransaction(rawTx, inputs)
+ assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
+
+ rawTxSigned = self.nodes[2].signrawtransaction(rawTx, inputs)
+ assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
+ self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
+ rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
+ self.sync_all()
+ self.nodes[0].generate(1)
+ self.sync_all()
+ assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
+
+ # getrawtransaction tests
+ # 1. valid parameters - only supply txid
+ txHash = rawTx["hash"]
+ assert_equal(self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex'])
+
+ # 2. valid parameters - supply txid and 0 for non-verbose
+ assert_equal(self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex'])
+
+ # 3. valid parameters - supply txid and False for non-verbose
+ assert_equal(self.nodes[0].getrawtransaction(txHash, False), rawTxSigned['hex'])
+
+ # 4. valid parameters - supply txid and 1 for verbose.
+ # We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
+ assert_equal(self.nodes[0].getrawtransaction(txHash, 1)["hex"], rawTxSigned['hex'])
+
+ # 5. valid parameters - supply txid and True for non-verbose
+ assert_equal(self.nodes[0].getrawtransaction(txHash, True)["hex"], rawTxSigned['hex'])
+
+ # 6. invalid parameters - supply txid and string "Flase"
+ assert_raises_jsonrpc(-3,"Invalid type", self.nodes[0].getrawtransaction, txHash, "Flase")
+
+ # 7. invalid parameters - supply txid and empty array
+ assert_raises_jsonrpc(-3,"Invalid type", self.nodes[0].getrawtransaction, txHash, [])
+
+ # 8. invalid parameters - supply txid and empty dict
+ assert_raises_jsonrpc(-3,"Invalid type", self.nodes[0].getrawtransaction, txHash, {})
+
+ inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
+ outputs = { self.nodes[0].getnewaddress() : 1 }
+ rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
+ decrawtx= self.nodes[0].decoderawtransaction(rawtx)
+ assert_equal(decrawtx['vin'][0]['sequence'], 1000)
+
+ # 9. invalid parameters - sequence number out of range
+ inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
+ outputs = { self.nodes[0].getnewaddress() : 1 }
+ assert_raises_jsonrpc(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
+
+ # 10. invalid parameters - sequence number out of range
+ inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
+ outputs = { self.nodes[0].getnewaddress() : 1 }
+ assert_raises_jsonrpc(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
+
+ inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
+ outputs = { self.nodes[0].getnewaddress() : 1 }
+ rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
+ decrawtx= self.nodes[0].decoderawtransaction(rawtx)
+ assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
+
+if __name__ == '__main__':
+ RawTransactionsTest().main()
diff --git a/test/functional/receivedby.py b/test/functional/receivedby.py
new file mode 100755
index 0000000000..248bcdbd68
--- /dev/null
+++ b/test/functional/receivedby.py
@@ -0,0 +1,147 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the listreceivedbyaddress RPC."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+def get_sub_array_from_array(object_array, to_match):
+ '''
+ Finds and returns a sub array from an array of arrays.
+ to_match should be a unique idetifier of a sub array
+ '''
+ for item in object_array:
+ all_match = True
+ for key,value in to_match.items():
+ if item[key] != value:
+ all_match = False
+ if not all_match:
+ continue
+ return item
+ return []
+
+class ReceivedByTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 4
+ self.setup_clean_chain = False
+
+ def setup_nodes(self):
+ #This test requires mocktime
+ enable_mocktime()
+ return start_nodes(self.num_nodes, self.options.tmpdir)
+
+ def run_test(self):
+ '''
+ listreceivedbyaddress Test
+ '''
+ # Send from node 0 to 1
+ addr = self.nodes[1].getnewaddress()
+ txid = self.nodes[0].sendtoaddress(addr, 0.1)
+ self.sync_all()
+
+ #Check not listed in listreceivedbyaddress because has 0 confirmations
+ assert_array_result(self.nodes[1].listreceivedbyaddress(),
+ {"address":addr},
+ { },
+ True)
+ #Bury Tx under 10 block so it will be returned by listreceivedbyaddress
+ self.nodes[1].generate(10)
+ self.sync_all()
+ assert_array_result(self.nodes[1].listreceivedbyaddress(),
+ {"address":addr},
+ {"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
+ #With min confidence < 10
+ assert_array_result(self.nodes[1].listreceivedbyaddress(5),
+ {"address":addr},
+ {"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
+ #With min confidence > 10, should not find Tx
+ assert_array_result(self.nodes[1].listreceivedbyaddress(11),{"address":addr},{ },True)
+
+ #Empty Tx
+ addr = self.nodes[1].getnewaddress()
+ assert_array_result(self.nodes[1].listreceivedbyaddress(0,True),
+ {"address":addr},
+ {"address":addr, "account":"", "amount":0, "confirmations":0, "txids":[]})
+
+ '''
+ getreceivedbyaddress Test
+ '''
+ # Send from node 0 to 1
+ addr = self.nodes[1].getnewaddress()
+ txid = self.nodes[0].sendtoaddress(addr, 0.1)
+ self.sync_all()
+
+ #Check balance is 0 because of 0 confirmations
+ balance = self.nodes[1].getreceivedbyaddress(addr)
+ if balance != Decimal("0.0"):
+ raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
+
+ #Check balance is 0.1
+ balance = self.nodes[1].getreceivedbyaddress(addr,0)
+ if balance != Decimal("0.1"):
+ raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
+
+ #Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress
+ self.nodes[1].generate(10)
+ self.sync_all()
+ balance = self.nodes[1].getreceivedbyaddress(addr)
+ if balance != Decimal("0.1"):
+ raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
+
+ '''
+ listreceivedbyaccount + getreceivedbyaccount Test
+ '''
+ #set pre-state
+ addrArr = self.nodes[1].getnewaddress()
+ account = self.nodes[1].getaccount(addrArr)
+ received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(),{"account":account})
+ if len(received_by_account_json) == 0:
+ raise AssertionError("No accounts found in node")
+ balance_by_account = self.nodes[1].getreceivedbyaccount(account)
+
+ txid = self.nodes[0].sendtoaddress(addr, 0.1)
+ self.sync_all()
+
+ # listreceivedbyaccount should return received_by_account_json because of 0 confirmations
+ assert_array_result(self.nodes[1].listreceivedbyaccount(),
+ {"account":account},
+ received_by_account_json)
+
+ # getreceivedbyaddress should return same balance because of 0 confirmations
+ balance = self.nodes[1].getreceivedbyaccount(account)
+ if balance != balance_by_account:
+ raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
+
+ self.nodes[1].generate(10)
+ self.sync_all()
+ # listreceivedbyaccount should return updated account balance
+ assert_array_result(self.nodes[1].listreceivedbyaccount(),
+ {"account":account},
+ {"account":received_by_account_json["account"], "amount":(received_by_account_json["amount"] + Decimal("0.1"))})
+
+ # getreceivedbyaddress should return updates balance
+ balance = self.nodes[1].getreceivedbyaccount(account)
+ if balance != balance_by_account + Decimal("0.1"):
+ raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
+
+ #Create a new account named "mynewaccount" that has a 0 balance
+ self.nodes[1].getaccountaddress("mynewaccount")
+ received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(0,True),{"account":"mynewaccount"})
+ if len(received_by_account_json) == 0:
+ raise AssertionError("No accounts found in node")
+
+ # Test includeempty of listreceivedbyaccount
+ if received_by_account_json["amount"] != Decimal("0.0"):
+ raise AssertionError("Wrong balance returned by listreceivedbyaccount, %0.2f"%(received_by_account_json["amount"]))
+
+ # Test getreceivedbyaccount for 0 amount accounts
+ balance = self.nodes[1].getreceivedbyaccount("mynewaccount")
+ if balance != Decimal("0.0"):
+ raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
+
+if __name__ == '__main__':
+ ReceivedByTest().main()
diff --git a/test/functional/reindex.py b/test/functional/reindex.py
new file mode 100755
index 0000000000..0cebb0466f
--- /dev/null
+++ b/test/functional/reindex.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test running bitcoind with -reindex and -reindex-chainstate options.
+
+- Start a single node and generate 3 blocks.
+- Stop the node and restart it with -reindex. Verify that the node has reindexed up to block 3.
+- Stop the node and restart it with -reindex-chainstate. Verify that the node has reindexed up to block 3.
+"""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ start_nodes,
+ stop_nodes,
+ assert_equal,
+)
+import time
+
+class ReindexTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+
+ def setup_network(self):
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
+
+ def reindex(self, justchainstate=False):
+ self.nodes[0].generate(3)
+ blockcount = self.nodes[0].getblockcount()
+ stop_nodes(self.nodes)
+ extra_args = [["-reindex-chainstate" if justchainstate else "-reindex", "-checkblockindex=1"]]
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
+ while self.nodes[0].getblockcount() < blockcount:
+ time.sleep(0.1)
+ assert_equal(self.nodes[0].getblockcount(), blockcount)
+ self.log.info("Success")
+
+ def run_test(self):
+ self.reindex(False)
+ self.reindex(True)
+ self.reindex(False)
+ self.reindex(True)
+
+if __name__ == '__main__':
+ ReindexTest().main()
diff --git a/test/functional/replace-by-fee.py b/test/functional/replace-by-fee.py
new file mode 100755
index 0000000000..163c304eba
--- /dev/null
+++ b/test/functional/replace-by-fee.py
@@ -0,0 +1,525 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the RBF code."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+from test_framework.script import *
+from test_framework.mininode import *
+
+MAX_REPLACEMENT_LIMIT = 100
+
+def txToHex(tx):
+ return bytes_to_hex_str(tx.serialize())
+
+def make_utxo(node, amount, confirmed=True, scriptPubKey=CScript([1])):
+ """Create a txout with a given amount and scriptPubKey
+
+ Mines coins as needed.
+
+ confirmed - txouts created will be confirmed in the blockchain;
+ unconfirmed otherwise.
+ """
+ fee = 1*COIN
+ while node.getbalance() < satoshi_round((amount + fee)/COIN):
+ node.generate(100)
+
+ new_addr = node.getnewaddress()
+ txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN))
+ tx1 = node.getrawtransaction(txid, 1)
+ txid = int(txid, 16)
+ i = None
+
+ for i, txout in enumerate(tx1['vout']):
+ if txout['scriptPubKey']['addresses'] == [new_addr]:
+ break
+ assert i is not None
+
+ tx2 = CTransaction()
+ tx2.vin = [CTxIn(COutPoint(txid, i))]
+ tx2.vout = [CTxOut(amount, scriptPubKey)]
+ tx2.rehash()
+
+ signed_tx = node.signrawtransaction(txToHex(tx2))
+
+ txid = node.sendrawtransaction(signed_tx['hex'], True)
+
+ # If requested, ensure txouts are confirmed.
+ if confirmed:
+ mempool_size = len(node.getrawmempool())
+ while mempool_size > 0:
+ node.generate(1)
+ new_size = len(node.getrawmempool())
+ # Error out if we have something stuck in the mempool, as this
+ # would likely be a bug.
+ assert(new_size < mempool_size)
+ mempool_size = new_size
+
+ return COutPoint(int(txid, 16), 0)
+
+class ReplaceByFeeTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 1
+ self.setup_clean_chain = False
+
+ def setup_network(self):
+ self.nodes = []
+ self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000",
+ "-whitelist=127.0.0.1",
+ "-limitancestorcount=50",
+ "-limitancestorsize=101",
+ "-limitdescendantcount=200",
+ "-limitdescendantsize=101"
+ ]))
+ self.is_network_split = False
+
+ def run_test(self):
+ make_utxo(self.nodes[0], 1*COIN)
+
+ self.log.info("Running test simple doublespend...")
+ self.test_simple_doublespend()
+
+ self.log.info("Running test doublespend chain...")
+ self.test_doublespend_chain()
+
+ self.log.info("Running test doublespend tree...")
+ self.test_doublespend_tree()
+
+ self.log.info("Running test replacement feeperkb...")
+ self.test_replacement_feeperkb()
+
+ self.log.info("Running test spends of conflicting outputs...")
+ self.test_spends_of_conflicting_outputs()
+
+ self.log.info("Running test new unconfirmed inputs...")
+ self.test_new_unconfirmed_inputs()
+
+ self.log.info("Running test too many replacements...")
+ self.test_too_many_replacements()
+
+ self.log.info("Running test opt-in...")
+ self.test_opt_in()
+
+ self.log.info("Running test prioritised transactions...")
+ self.test_prioritised_transactions()
+
+ self.log.info("Passed")
+
+ def test_simple_doublespend(self):
+ """Simple doublespend"""
+ tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
+
+ tx1a = CTransaction()
+ tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
+ tx1a_hex = txToHex(tx1a)
+ tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
+
+ # Should fail because we haven't changed the fee
+ tx1b = CTransaction()
+ tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ tx1b.vout = [CTxOut(1*COIN, CScript([b'b']))]
+ tx1b_hex = txToHex(tx1b)
+
+ # This will raise an exception due to insufficient fee
+ assert_raises_jsonrpc(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
+
+ # Extra 0.1 BTC fee
+ tx1b = CTransaction()
+ tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
+ tx1b_hex = txToHex(tx1b)
+ tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
+
+ mempool = self.nodes[0].getrawmempool()
+
+ assert (tx1a_txid not in mempool)
+ assert (tx1b_txid in mempool)
+
+ assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid))
+
+ def test_doublespend_chain(self):
+ """Doublespend of a long chain"""
+
+ initial_nValue = 50*COIN
+ tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
+
+ prevout = tx0_outpoint
+ remaining_value = initial_nValue
+ chain_txids = []
+ while remaining_value > 10*COIN:
+ remaining_value -= 1*COIN
+ tx = CTransaction()
+ tx.vin = [CTxIn(prevout, nSequence=0)]
+ tx.vout = [CTxOut(remaining_value, CScript([1]))]
+ tx_hex = txToHex(tx)
+ txid = self.nodes[0].sendrawtransaction(tx_hex, True)
+ chain_txids.append(txid)
+ prevout = COutPoint(int(txid, 16), 0)
+
+ # Whether the double-spend is allowed is evaluated by including all
+ # child fees - 40 BTC - so this attempt is rejected.
+ dbl_tx = CTransaction()
+ dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ dbl_tx.vout = [CTxOut(initial_nValue - 30*COIN, CScript([1]))]
+ dbl_tx_hex = txToHex(dbl_tx)
+
+ # This will raise an exception due to insufficient fee
+ assert_raises_jsonrpc(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
+
+ # Accepted with sufficient fee
+ dbl_tx = CTransaction()
+ dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ dbl_tx.vout = [CTxOut(1*COIN, CScript([1]))]
+ dbl_tx_hex = txToHex(dbl_tx)
+ self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
+
+ mempool = self.nodes[0].getrawmempool()
+ for doublespent_txid in chain_txids:
+ assert(doublespent_txid not in mempool)
+
+ def test_doublespend_tree(self):
+ """Doublespend of a big tree of transactions"""
+
+ initial_nValue = 50*COIN
+ tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
+
+ def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None):
+ if _total_txs is None:
+ _total_txs = [0]
+ if _total_txs[0] >= max_txs:
+ return
+
+ txout_value = (initial_value - fee) // tree_width
+ if txout_value < fee:
+ return
+
+ vout = [CTxOut(txout_value, CScript([i+1]))
+ for i in range(tree_width)]
+ tx = CTransaction()
+ tx.vin = [CTxIn(prevout, nSequence=0)]
+ tx.vout = vout
+ tx_hex = txToHex(tx)
+
+ assert(len(tx.serialize()) < 100000)
+ txid = self.nodes[0].sendrawtransaction(tx_hex, True)
+ yield tx
+ _total_txs[0] += 1
+
+ txid = int(txid, 16)
+
+ for i, txout in enumerate(tx.vout):
+ for x in branch(COutPoint(txid, i), txout_value,
+ max_txs,
+ tree_width=tree_width, fee=fee,
+ _total_txs=_total_txs):
+ yield x
+
+ fee = int(0.0001*COIN)
+ n = MAX_REPLACEMENT_LIMIT
+ tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
+ assert_equal(len(tree_txs), n)
+
+ # Attempt double-spend, will fail because too little fee paid
+ dbl_tx = CTransaction()
+ dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ dbl_tx.vout = [CTxOut(initial_nValue - fee*n, CScript([1]))]
+ dbl_tx_hex = txToHex(dbl_tx)
+ # This will raise an exception due to insufficient fee
+ assert_raises_jsonrpc(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
+
+ # 1 BTC fee is enough
+ dbl_tx = CTransaction()
+ dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ dbl_tx.vout = [CTxOut(initial_nValue - fee*n - 1*COIN, CScript([1]))]
+ dbl_tx_hex = txToHex(dbl_tx)
+ self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
+
+ mempool = self.nodes[0].getrawmempool()
+
+ for tx in tree_txs:
+ tx.rehash()
+ assert (tx.hash not in mempool)
+
+ # Try again, but with more total transactions than the "max txs
+ # double-spent at once" anti-DoS limit.
+ for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2):
+ fee = int(0.0001*COIN)
+ tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
+ tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
+ assert_equal(len(tree_txs), n)
+
+ dbl_tx = CTransaction()
+ dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ dbl_tx.vout = [CTxOut(initial_nValue - 2*fee*n, CScript([1]))]
+ dbl_tx_hex = txToHex(dbl_tx)
+ # This will raise an exception
+ assert_raises_jsonrpc(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
+
+ for tx in tree_txs:
+ tx.rehash()
+ self.nodes[0].getrawtransaction(tx.hash)
+
+ def test_replacement_feeperkb(self):
+ """Replacement requires fee-per-KB to be higher"""
+ tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
+
+ tx1a = CTransaction()
+ tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
+ tx1a_hex = txToHex(tx1a)
+ tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
+
+ # Higher fee, but the fee per KB is much lower, so the replacement is
+ # rejected.
+ tx1b = CTransaction()
+ tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*999000]))]
+ tx1b_hex = txToHex(tx1b)
+
+ # This will raise an exception due to insufficient fee
+ assert_raises_jsonrpc(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
+
+ def test_spends_of_conflicting_outputs(self):
+ """Replacements that spend conflicting tx outputs are rejected"""
+ utxo1 = make_utxo(self.nodes[0], int(1.2*COIN))
+ utxo2 = make_utxo(self.nodes[0], 3*COIN)
+
+ tx1a = CTransaction()
+ tx1a.vin = [CTxIn(utxo1, nSequence=0)]
+ tx1a.vout = [CTxOut(int(1.1*COIN), CScript([b'a']))]
+ tx1a_hex = txToHex(tx1a)
+ tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
+
+ tx1a_txid = int(tx1a_txid, 16)
+
+ # Direct spend an output of the transaction we're replacing.
+ tx2 = CTransaction()
+ tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)]
+ tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0))
+ tx2.vout = tx1a.vout
+ tx2_hex = txToHex(tx2)
+
+ # This will raise an exception
+ assert_raises_jsonrpc(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True)
+
+ # Spend tx1a's output to test the indirect case.
+ tx1b = CTransaction()
+ tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
+ tx1b.vout = [CTxOut(1*COIN, CScript([b'a']))]
+ tx1b_hex = txToHex(tx1b)
+ tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
+ tx1b_txid = int(tx1b_txid, 16)
+
+ tx2 = CTransaction()
+ tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0),
+ CTxIn(COutPoint(tx1b_txid, 0))]
+ tx2.vout = tx1a.vout
+ tx2_hex = txToHex(tx2)
+
+ # This will raise an exception
+ assert_raises_jsonrpc(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True)
+
+ def test_new_unconfirmed_inputs(self):
+ """Replacements that add new unconfirmed inputs are rejected"""
+ confirmed_utxo = make_utxo(self.nodes[0], int(1.1*COIN))
+ unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1*COIN), False)
+
+ tx1 = CTransaction()
+ tx1.vin = [CTxIn(confirmed_utxo)]
+ tx1.vout = [CTxOut(1*COIN, CScript([b'a']))]
+ tx1_hex = txToHex(tx1)
+ tx1_txid = self.nodes[0].sendrawtransaction(tx1_hex, True)
+
+ tx2 = CTransaction()
+ tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)]
+ tx2.vout = tx1.vout
+ tx2_hex = txToHex(tx2)
+
+ # This will raise an exception
+ assert_raises_jsonrpc(-26, "replacement-adds-unconfirmed", self.nodes[0].sendrawtransaction, tx2_hex, True)
+
+ def test_too_many_replacements(self):
+ """Replacements that evict too many transactions are rejected"""
+ # Try directly replacing more than MAX_REPLACEMENT_LIMIT
+ # transactions
+
+ # Start by creating a single transaction with many outputs
+ initial_nValue = 10*COIN
+ utxo = make_utxo(self.nodes[0], initial_nValue)
+ fee = int(0.0001*COIN)
+ split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1))
+
+ outputs = []
+ for i in range(MAX_REPLACEMENT_LIMIT+1):
+ outputs.append(CTxOut(split_value, CScript([1])))
+
+ splitting_tx = CTransaction()
+ splitting_tx.vin = [CTxIn(utxo, nSequence=0)]
+ splitting_tx.vout = outputs
+ splitting_tx_hex = txToHex(splitting_tx)
+
+ txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True)
+ txid = int(txid, 16)
+
+ # Now spend each of those outputs individually
+ for i in range(MAX_REPLACEMENT_LIMIT+1):
+ tx_i = CTransaction()
+ tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)]
+ tx_i.vout = [CTxOut(split_value-fee, CScript([b'a']))]
+ tx_i_hex = txToHex(tx_i)
+ self.nodes[0].sendrawtransaction(tx_i_hex, True)
+
+ # Now create doublespend of the whole lot; should fail.
+ # Need a big enough fee to cover all spending transactions and have
+ # a higher fee rate
+ double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1)
+ inputs = []
+ for i in range(MAX_REPLACEMENT_LIMIT+1):
+ inputs.append(CTxIn(COutPoint(txid, i), nSequence=0))
+ double_tx = CTransaction()
+ double_tx.vin = inputs
+ double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
+ double_tx_hex = txToHex(double_tx)
+
+ # This will raise an exception
+ assert_raises_jsonrpc(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, double_tx_hex, True)
+
+ # If we remove an input, it should pass
+ double_tx = CTransaction()
+ double_tx.vin = inputs[0:-1]
+ double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
+ double_tx_hex = txToHex(double_tx)
+ self.nodes[0].sendrawtransaction(double_tx_hex, True)
+
+ def test_opt_in(self):
+ """Replacing should only work if orig tx opted in"""
+ tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
+
+ # Create a non-opting in transaction
+ tx1a = CTransaction()
+ tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)]
+ tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
+ tx1a_hex = txToHex(tx1a)
+ tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
+
+ # Shouldn't be able to double-spend
+ tx1b = CTransaction()
+ tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
+ tx1b_hex = txToHex(tx1b)
+
+ # This will raise an exception
+ assert_raises_jsonrpc(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx1b_hex, True)
+
+ tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
+
+ # Create a different non-opting in transaction
+ tx2a = CTransaction()
+ tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)]
+ tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
+ tx2a_hex = txToHex(tx2a)
+ tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
+
+ # Still shouldn't be able to double-spend
+ tx2b = CTransaction()
+ tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
+ tx2b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
+ tx2b_hex = txToHex(tx2b)
+
+ # This will raise an exception
+ assert_raises_jsonrpc(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx2b_hex, True)
+
+ # Now create a new transaction that spends from tx1a and tx2a
+ # opt-in on one of the inputs
+ # Transaction should be replaceable on either input
+
+ tx1a_txid = int(tx1a_txid, 16)
+ tx2a_txid = int(tx2a_txid, 16)
+
+ tx3a = CTransaction()
+ tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff),
+ CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)]
+ tx3a.vout = [CTxOut(int(0.9*COIN), CScript([b'c'])), CTxOut(int(0.9*COIN), CScript([b'd']))]
+ tx3a_hex = txToHex(tx3a)
+
+ self.nodes[0].sendrawtransaction(tx3a_hex, True)
+
+ tx3b = CTransaction()
+ tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
+ tx3b.vout = [CTxOut(int(0.5*COIN), CScript([b'e']))]
+ tx3b_hex = txToHex(tx3b)
+
+ tx3c = CTransaction()
+ tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)]
+ tx3c.vout = [CTxOut(int(0.5*COIN), CScript([b'f']))]
+ tx3c_hex = txToHex(tx3c)
+
+ self.nodes[0].sendrawtransaction(tx3b_hex, True)
+ # If tx3b was accepted, tx3c won't look like a replacement,
+ # but make sure it is accepted anyway
+ self.nodes[0].sendrawtransaction(tx3c_hex, True)
+
+ def test_prioritised_transactions(self):
+ # Ensure that fee deltas used via prioritisetransaction are
+ # correctly used by replacement logic
+
+ # 1. Check that feeperkb uses modified fees
+ tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
+
+ tx1a = CTransaction()
+ tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
+ tx1a_hex = txToHex(tx1a)
+ tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
+
+ # Higher fee, but the actual fee per KB is much lower.
+ tx1b = CTransaction()
+ tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
+ tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*740000]))]
+ tx1b_hex = txToHex(tx1b)
+
+ # Verify tx1b cannot replace tx1a.
+ assert_raises_jsonrpc(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
+
+ # Use prioritisetransaction to set tx1a's fee to 0.
+ self.nodes[0].prioritisetransaction(tx1a_txid, int(-0.1*COIN))
+
+ # Now tx1b should be able to replace tx1a
+ tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
+
+ assert(tx1b_txid in self.nodes[0].getrawmempool())
+
+ # 2. Check that absolute fee checks use modified fee.
+ tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
+
+ tx2a = CTransaction()
+ tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)]
+ tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
+ tx2a_hex = txToHex(tx2a)
+ tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
+
+ # Lower fee, but we'll prioritise it
+ tx2b = CTransaction()
+ tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
+ tx2b.vout = [CTxOut(int(1.01*COIN), CScript([b'a']))]
+ tx2b.rehash()
+ tx2b_hex = txToHex(tx2b)
+
+ # Verify tx2b cannot replace tx2a.
+ assert_raises_jsonrpc(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx2b_hex, True)
+
+ # Now prioritise tx2b to have a higher modified fee
+ self.nodes[0].prioritisetransaction(tx2b.hash, int(0.1*COIN))
+
+ # tx2b should now be accepted
+ tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
+
+ assert(tx2b_txid in self.nodes[0].getrawmempool())
+
+if __name__ == '__main__':
+ ReplaceByFeeTest().main()
diff --git a/test/functional/rest.py b/test/functional/rest.py
new file mode 100755
index 0000000000..776211d301
--- /dev/null
+++ b/test/functional/rest.py
@@ -0,0 +1,331 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the REST API."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+from struct import *
+from io import BytesIO
+from codecs import encode
+
+import http.client
+import urllib.parse
+
+def deser_uint256(f):
+ r = 0
+ for i in range(8):
+ t = unpack(b"<I", f.read(4))[0]
+ r += t << (i * 32)
+ return r
+
+#allows simple http get calls
+def http_get_call(host, port, path, response_object = 0):
+ conn = http.client.HTTPConnection(host, port)
+ conn.request('GET', path)
+
+ if response_object:
+ return conn.getresponse()
+
+ return conn.getresponse().read().decode('utf-8')
+
+#allows simple http post calls with a request body
+def http_post_call(host, port, path, requestdata = '', response_object = 0):
+ conn = http.client.HTTPConnection(host, port)
+ conn.request('POST', path, requestdata)
+
+ if response_object:
+ return conn.getresponse()
+
+ return conn.getresponse().read()
+
+class RESTTest (BitcoinTestFramework):
+ FORMAT_SEPARATOR = "."
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 3
+
+ def setup_network(self, split=False):
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
+ connect_nodes_bi(self.nodes,0,1)
+ connect_nodes_bi(self.nodes,1,2)
+ connect_nodes_bi(self.nodes,0,2)
+ self.is_network_split=False
+ self.sync_all()
+
+ def run_test(self):
+ url = urllib.parse.urlparse(self.nodes[0].url)
+ self.log.info("Mining blocks...")
+
+ self.nodes[0].generate(1)
+ self.sync_all()
+ self.nodes[2].generate(100)
+ self.sync_all()
+
+ assert_equal(self.nodes[0].getbalance(), 50)
+
+ txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
+ self.sync_all()
+ self.nodes[2].generate(1)
+ self.sync_all()
+ bb_hash = self.nodes[0].getbestblockhash()
+
+ assert_equal(self.nodes[1].getbalance(), Decimal("0.1")) #balance now should be 0.1 on node 1
+
+ # load the latest 0.1 tx over the REST API
+ json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
+ json_obj = json.loads(json_string)
+ vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
+ # get n of 0.1 outpoint
+ n = 0
+ for vout in json_obj['vout']:
+ if vout['value'] == 0.1:
+ n = vout['n']
+
+
+ ######################################
+ # GETUTXOS: query a unspent outpoint #
+ ######################################
+ json_request = '/checkmempool/'+txid+'-'+str(n)
+ json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
+ json_obj = json.loads(json_string)
+
+ #check chainTip response
+ assert_equal(json_obj['chaintipHash'], bb_hash)
+
+ #make sure there is one utxo
+ assert_equal(len(json_obj['utxos']), 1)
+ assert_equal(json_obj['utxos'][0]['value'], 0.1)
+
+
+ ################################################
+ # GETUTXOS: now query a already spent outpoint #
+ ################################################
+ json_request = '/checkmempool/'+vintx+'-0'
+ json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
+ json_obj = json.loads(json_string)
+
+ #check chainTip response
+ assert_equal(json_obj['chaintipHash'], bb_hash)
+
+ #make sure there is no utox in the response because this oupoint has been spent
+ assert_equal(len(json_obj['utxos']), 0)
+
+ #check bitmap
+ assert_equal(json_obj['bitmap'], "0")
+
+
+ ##################################################
+ # GETUTXOS: now check both with the same request #
+ ##################################################
+ json_request = '/checkmempool/'+txid+'-'+str(n)+'/'+vintx+'-0'
+ json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
+ json_obj = json.loads(json_string)
+ assert_equal(len(json_obj['utxos']), 1)
+ assert_equal(json_obj['bitmap'], "10")
+
+ #test binary response
+ bb_hash = self.nodes[0].getbestblockhash()
+
+ binaryRequest = b'\x01\x02'
+ binaryRequest += hex_str_to_bytes(txid)
+ binaryRequest += pack("i", n)
+ binaryRequest += hex_str_to_bytes(vintx)
+ binaryRequest += pack("i", 0)
+
+ bin_response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', binaryRequest)
+ output = BytesIO()
+ output.write(bin_response)
+ output.seek(0)
+ chainHeight = unpack("i", output.read(4))[0]
+ hashFromBinResponse = hex(deser_uint256(output))[2:].zfill(64)
+
+ assert_equal(bb_hash, hashFromBinResponse) #check if getutxo's chaintip during calculation was fine
+ assert_equal(chainHeight, 102) #chain height must be 102
+
+
+ ############################
+ # GETUTXOS: mempool checks #
+ ############################
+
+ # do a tx and don't sync
+ txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
+ json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
+ json_obj = json.loads(json_string)
+ vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
+ # get n of 0.1 outpoint
+ n = 0
+ for vout in json_obj['vout']:
+ if vout['value'] == 0.1:
+ n = vout['n']
+
+ json_request = '/'+txid+'-'+str(n)
+ json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
+ json_obj = json.loads(json_string)
+ assert_equal(len(json_obj['utxos']), 0) #there should be a outpoint because it has just added to the mempool
+
+ json_request = '/checkmempool/'+txid+'-'+str(n)
+ json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
+ json_obj = json.loads(json_string)
+ assert_equal(len(json_obj['utxos']), 1) #there should be a outpoint because it has just added to the mempool
+
+ #do some invalid requests
+ json_request = '{"checkmempool'
+ response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True)
+ assert_equal(response.status, 400) #must be a 400 because we send a invalid json request
+
+ json_request = '{"checkmempool'
+ response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', json_request, True)
+ assert_equal(response.status, 400) #must be a 400 because we send a invalid bin request
+
+ response = http_post_call(url.hostname, url.port, '/rest/getutxos/checkmempool'+self.FORMAT_SEPARATOR+'bin', '', True)
+ assert_equal(response.status, 400) #must be a 400 because we send a invalid bin request
+
+ #test limits
+ json_request = '/checkmempool/'
+ for x in range(0, 20):
+ json_request += txid+'-'+str(n)+'/'
+ json_request = json_request.rstrip("/")
+ response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
+ assert_equal(response.status, 400) #must be a 400 because we exceeding the limits
+
+ json_request = '/checkmempool/'
+ for x in range(0, 15):
+ json_request += txid+'-'+str(n)+'/'
+ json_request = json_request.rstrip("/")
+ response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
+ assert_equal(response.status, 200) #must be a 200 because we are within the limits
+
+ self.nodes[0].generate(1) #generate block to not affect upcoming tests
+ self.sync_all()
+
+ ################
+ # /rest/block/ #
+ ################
+
+ # check binary format
+ response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
+ assert_equal(response.status, 200)
+ assert_greater_than(int(response.getheader('content-length')), 80)
+ response_str = response.read()
+
+ # compare with block header
+ response_header = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
+ assert_equal(response_header.status, 200)
+ assert_equal(int(response_header.getheader('content-length')), 80)
+ response_header_str = response_header.read()
+ assert_equal(response_str[0:80], response_header_str)
+
+ # check block hex format
+ response_hex = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
+ assert_equal(response_hex.status, 200)
+ assert_greater_than(int(response_hex.getheader('content-length')), 160)
+ response_hex_str = response_hex.read()
+ assert_equal(encode(response_str, "hex_codec")[0:160], response_hex_str[0:160])
+
+ # compare with hex block header
+ response_header_hex = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
+ assert_equal(response_header_hex.status, 200)
+ assert_greater_than(int(response_header_hex.getheader('content-length')), 160)
+ response_header_hex_str = response_header_hex.read()
+ assert_equal(response_hex_str[0:160], response_header_hex_str[0:160])
+ assert_equal(encode(response_header_str, "hex_codec")[0:160], response_header_hex_str[0:160])
+
+ # check json format
+ block_json_string = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+'json')
+ block_json_obj = json.loads(block_json_string)
+ assert_equal(block_json_obj['hash'], bb_hash)
+
+ # compare with json block header
+ response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
+ assert_equal(response_header_json.status, 200)
+ response_header_json_str = response_header_json.read().decode('utf-8')
+ json_obj = json.loads(response_header_json_str, parse_float=Decimal)
+ assert_equal(len(json_obj), 1) #ensure that there is one header in the json response
+ assert_equal(json_obj[0]['hash'], bb_hash) #request/response hash should be the same
+
+ #compare with normal RPC block response
+ rpc_block_json = self.nodes[0].getblock(bb_hash)
+ assert_equal(json_obj[0]['hash'], rpc_block_json['hash'])
+ assert_equal(json_obj[0]['confirmations'], rpc_block_json['confirmations'])
+ assert_equal(json_obj[0]['height'], rpc_block_json['height'])
+ assert_equal(json_obj[0]['version'], rpc_block_json['version'])
+ assert_equal(json_obj[0]['merkleroot'], rpc_block_json['merkleroot'])
+ assert_equal(json_obj[0]['time'], rpc_block_json['time'])
+ assert_equal(json_obj[0]['nonce'], rpc_block_json['nonce'])
+ assert_equal(json_obj[0]['bits'], rpc_block_json['bits'])
+ assert_equal(json_obj[0]['difficulty'], rpc_block_json['difficulty'])
+ assert_equal(json_obj[0]['chainwork'], rpc_block_json['chainwork'])
+ assert_equal(json_obj[0]['previousblockhash'], rpc_block_json['previousblockhash'])
+
+ #see if we can get 5 headers in one response
+ self.nodes[1].generate(5)
+ self.sync_all()
+ response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/5/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
+ assert_equal(response_header_json.status, 200)
+ response_header_json_str = response_header_json.read().decode('utf-8')
+ json_obj = json.loads(response_header_json_str)
+ assert_equal(len(json_obj), 5) #now we should have 5 header objects
+
+ # do tx test
+ tx_hash = block_json_obj['tx'][0]['txid']
+ json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"json")
+ json_obj = json.loads(json_string)
+ assert_equal(json_obj['txid'], tx_hash)
+
+ # check hex format response
+ hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", True)
+ assert_equal(hex_string.status, 200)
+ assert_greater_than(int(response.getheader('content-length')), 10)
+
+
+ # check block tx details
+ # let's make 3 tx and mine them on node 1
+ txs = []
+ txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
+ txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
+ txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
+ self.sync_all()
+
+ # check that there are exactly 3 transactions in the TX memory pool before generating the block
+ json_string = http_get_call(url.hostname, url.port, '/rest/mempool/info'+self.FORMAT_SEPARATOR+'json')
+ json_obj = json.loads(json_string)
+ assert_equal(json_obj['size'], 3)
+ # the size of the memory pool should be greater than 3x ~100 bytes
+ assert_greater_than(json_obj['bytes'], 300)
+
+ # check that there are our submitted transactions in the TX memory pool
+ json_string = http_get_call(url.hostname, url.port, '/rest/mempool/contents'+self.FORMAT_SEPARATOR+'json')
+ json_obj = json.loads(json_string)
+ for tx in txs:
+ assert_equal(tx in json_obj, True)
+
+ # now mine the transactions
+ newblockhash = self.nodes[1].generate(1)
+ self.sync_all()
+
+ #check if the 3 tx show up in the new block
+ json_string = http_get_call(url.hostname, url.port, '/rest/block/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
+ json_obj = json.loads(json_string)
+ for tx in json_obj['tx']:
+ if not 'coinbase' in tx['vin'][0]: #exclude coinbase
+ assert_equal(tx['txid'] in txs, True)
+
+ #check the same but without tx details
+ json_string = http_get_call(url.hostname, url.port, '/rest/block/notxdetails/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
+ json_obj = json.loads(json_string)
+ for tx in txs:
+ assert_equal(tx in json_obj['tx'], True)
+
+ #test rest bestblock
+ bb_hash = self.nodes[0].getbestblockhash()
+
+ json_string = http_get_call(url.hostname, url.port, '/rest/chaininfo.json')
+ json_obj = json.loads(json_string)
+ assert_equal(json_obj['bestblockhash'], bb_hash)
+
+if __name__ == '__main__':
+ RESTTest ().main ()
diff --git a/test/functional/rpcbind_test.py b/test/functional/rpcbind_test.py
new file mode 100755
index 0000000000..8720a345ce
--- /dev/null
+++ b/test/functional/rpcbind_test.py
@@ -0,0 +1,98 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test running bitcoind with the -rpcbind and -rpcallowip options."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+from test_framework.netutil import *
+
+
+class RPCBindTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+
+ def setup_network(self):
+ pass
+
+ def setup_nodes(self):
+ pass
+
+ def run_bind_test(self, allow_ips, connect_to, addresses, expected):
+ '''
+ Start a node with requested rpcallowip and rpcbind parameters,
+ then try to connect, and check if the set of bound addresses
+ matches the expected set.
+ '''
+ expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
+ base_args = ['-disablewallet', '-nolisten']
+ if allow_ips:
+ base_args += ['-rpcallowip=' + x for x in allow_ips]
+ binds = ['-rpcbind='+addr for addr in addresses]
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [base_args + binds], connect_to)
+ pid = bitcoind_processes[0].pid
+ assert_equal(set(get_bind_addrs(pid)), set(expected))
+ stop_nodes(self.nodes)
+
+ def run_allowip_test(self, allow_ips, rpchost, rpcport):
+ '''
+ Start a node with rpcallow IP, and request getnetworkinfo
+ at a non-localhost IP.
+ '''
+ base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [base_args])
+ # connect to node through non-loopback interface
+ node = get_rpc_proxy(rpc_url(0, "%s:%d" % (rpchost, rpcport)), 0)
+ node.getnetworkinfo()
+ stop_nodes(self.nodes)
+
+ def run_test(self):
+ # due to OS-specific network stats queries, this test works only on Linux
+ assert(sys.platform.startswith('linux'))
+ # find the first non-loopback interface for testing
+ non_loopback_ip = None
+ for name,ip in all_interfaces():
+ if ip != '127.0.0.1':
+ non_loopback_ip = ip
+ break
+ if non_loopback_ip is None:
+ assert(not 'This test requires at least one non-loopback IPv4 interface')
+ self.log.info("Using interface %s for testing" % non_loopback_ip)
+
+ defaultport = rpc_port(0)
+
+ # check default without rpcallowip (IPv4 and IPv6 localhost)
+ self.run_bind_test(None, '127.0.0.1', [],
+ [('127.0.0.1', defaultport), ('::1', defaultport)])
+ # check default with rpcallowip (IPv6 any)
+ self.run_bind_test(['127.0.0.1'], '127.0.0.1', [],
+ [('::0', defaultport)])
+ # check only IPv4 localhost (explicit)
+ self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
+ [('127.0.0.1', defaultport)])
+ # check only IPv4 localhost (explicit) with alternative port
+ self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
+ [('127.0.0.1', 32171)])
+ # check only IPv4 localhost (explicit) with multiple alternative ports on same host
+ self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
+ [('127.0.0.1', 32171), ('127.0.0.1', 32172)])
+ # check only IPv6 localhost (explicit)
+ self.run_bind_test(['[::1]'], '[::1]', ['[::1]'],
+ [('::1', defaultport)])
+ # check both IPv4 and IPv6 localhost (explicit)
+ self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
+ [('127.0.0.1', defaultport), ('::1', defaultport)])
+ # check only non-loopback interface
+ self.run_bind_test([non_loopback_ip], non_loopback_ip, [non_loopback_ip],
+ [(non_loopback_ip, defaultport)])
+
+ # Check that with invalid rpcallowip, we are denied
+ self.run_allowip_test([non_loopback_ip], non_loopback_ip, defaultport)
+ assert_raises_jsonrpc(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], non_loopback_ip, defaultport)
+
+if __name__ == '__main__':
+ RPCBindTest().main()
diff --git a/test/functional/rpcnamedargs.py b/test/functional/rpcnamedargs.py
new file mode 100755
index 0000000000..f6175c8ca7
--- /dev/null
+++ b/test/functional/rpcnamedargs.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test using named arguments for RPCs."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_equal,
+ assert_raises_jsonrpc,
+ start_nodes,
+)
+
+
+class NamedArgumentTest(BitcoinTestFramework):
+ """
+ Test named arguments on RPC calls.
+ """
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = False
+ self.num_nodes = 1
+
+ def setup_network(self, split=False):
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
+ self.is_network_split = False
+ self.sync_all()
+
+ def run_test(self):
+ node = self.nodes[0]
+ h = node.help(command='getinfo')
+ assert(h.startswith('getinfo\n'))
+
+ assert_raises_jsonrpc(-8, 'Unknown named parameter', node.help, random='getinfo')
+
+ h = node.getblockhash(height=0)
+ node.getblock(blockhash=h)
+
+ assert_equal(node.echo(), [])
+ assert_equal(node.echo(arg0=0,arg9=9), [0] + [None]*8 + [9])
+ assert_equal(node.echo(arg1=1), [None, 1])
+ assert_equal(node.echo(arg9=None), [None]*10)
+ assert_equal(node.echo(arg0=0,arg3=3,arg9=9), [0] + [None]*2 + [3] + [None]*5 + [9])
+
+if __name__ == '__main__':
+ NamedArgumentTest().main()
diff --git a/test/functional/segwit.py b/test/functional/segwit.py
new file mode 100755
index 0000000000..5b1fba8eec
--- /dev/null
+++ b/test/functional/segwit.py
@@ -0,0 +1,642 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the SegWit changeover logic."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+from test_framework.mininode import sha256, ripemd160, CTransaction, CTxIn, COutPoint, CTxOut, COIN
+from test_framework.address import script_to_p2sh, key_to_p2pkh
+from test_framework.script import CScript, OP_HASH160, OP_CHECKSIG, OP_0, hash160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_1, OP_2, OP_CHECKMULTISIG, hash160, OP_TRUE
+from io import BytesIO
+from test_framework.mininode import ToHex, FromHex, COIN
+
+NODE_0 = 0
+NODE_1 = 1
+NODE_2 = 2
+WIT_V0 = 0
+WIT_V1 = 1
+
+# Create a scriptPubKey corresponding to either a P2WPKH output for the
+# given pubkey, or a P2WSH output of a 1-of-1 multisig for the given
+# pubkey. Returns the hex encoding of the scriptPubKey.
+def witness_script(use_p2wsh, pubkey):
+ if (use_p2wsh == False):
+ # P2WPKH instead
+ pubkeyhash = hash160(hex_str_to_bytes(pubkey))
+ pkscript = CScript([OP_0, pubkeyhash])
+ else:
+ # 1-of-1 multisig
+ witness_program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG])
+ scripthash = sha256(witness_program)
+ pkscript = CScript([OP_0, scripthash])
+ return bytes_to_hex_str(pkscript)
+
+# Return a transaction (in hex) that spends the given utxo to a segwit output,
+# optionally wrapping the segwit output using P2SH.
+def create_witnessprogram(use_p2wsh, utxo, pubkey, encode_p2sh, amount):
+ pkscript = hex_str_to_bytes(witness_script(use_p2wsh, pubkey))
+ if (encode_p2sh):
+ p2sh_hash = hash160(pkscript)
+ pkscript = CScript([OP_HASH160, p2sh_hash, OP_EQUAL])
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), b""))
+ tx.vout.append(CTxOut(int(amount*COIN), pkscript))
+ return ToHex(tx)
+
+# Create a transaction spending a given utxo to a segwit output corresponding
+# to the given pubkey: use_p2wsh determines whether to use P2WPKH or P2WSH;
+# encode_p2sh determines whether to wrap in P2SH.
+# sign=True will have the given node sign the transaction.
+# insert_redeem_script will be added to the scriptSig, if given.
+def send_to_witness(use_p2wsh, node, utxo, pubkey, encode_p2sh, amount, sign=True, insert_redeem_script=""):
+ tx_to_witness = create_witnessprogram(use_p2wsh, utxo, pubkey, encode_p2sh, amount)
+ if (sign):
+ signed = node.signrawtransaction(tx_to_witness)
+ assert("errors" not in signed or len(["errors"]) == 0)
+ return node.sendrawtransaction(signed["hex"])
+ else:
+ if (insert_redeem_script):
+ tx = FromHex(CTransaction(), tx_to_witness)
+ tx.vin[0].scriptSig += CScript([hex_str_to_bytes(insert_redeem_script)])
+ tx_to_witness = ToHex(tx)
+
+ return node.sendrawtransaction(tx_to_witness)
+
+def getutxo(txid):
+ utxo = {}
+ utxo["vout"] = 0
+ utxo["txid"] = txid
+ return utxo
+
+def find_unspent(node, min_value):
+ for utxo in node.listunspent():
+ if utxo['amount'] >= min_value:
+ return utxo
+
+class SegWitTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 3
+
+ def setup_network(self):
+ self.nodes = []
+ self.nodes.append(start_node(0, self.options.tmpdir, ["-walletprematurewitness", "-rpcserialversion=0"]))
+ self.nodes.append(start_node(1, self.options.tmpdir, ["-blockversion=4", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-rpcserialversion=1"]))
+ self.nodes.append(start_node(2, self.options.tmpdir, ["-blockversion=536870915", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness"]))
+ connect_nodes(self.nodes[1], 0)
+ connect_nodes(self.nodes[2], 1)
+ connect_nodes(self.nodes[0], 2)
+ self.is_network_split = False
+ self.sync_all()
+
+ def success_mine(self, node, txid, sign, redeem_script=""):
+ send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
+ block = node.generate(1)
+ assert_equal(len(node.getblock(block[0])["tx"]), 2)
+ sync_blocks(self.nodes)
+
+ def skip_mine(self, node, txid, sign, redeem_script=""):
+ send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
+ block = node.generate(1)
+ assert_equal(len(node.getblock(block[0])["tx"]), 1)
+ sync_blocks(self.nodes)
+
+ def fail_accept(self, node, error_msg, txid, sign, redeem_script=""):
+ assert_raises_jsonrpc(-26, error_msg, send_to_witness, 1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
+
+ def fail_mine(self, node, txid, sign, redeem_script=""):
+ send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
+ assert_raises_jsonrpc(-1, "CreateNewBlock: TestBlockValidity failed", node.generate, 1)
+ sync_blocks(self.nodes)
+
+ def run_test(self):
+ self.nodes[0].generate(161) #block 161
+
+ self.log.info("Verify sigops are counted in GBT with pre-BIP141 rules before the fork")
+ txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
+ tmpl = self.nodes[0].getblocktemplate({})
+ assert(tmpl['sizelimit'] == 1000000)
+ assert('weightlimit' not in tmpl)
+ assert(tmpl['sigoplimit'] == 20000)
+ assert(tmpl['transactions'][0]['hash'] == txid)
+ assert(tmpl['transactions'][0]['sigops'] == 2)
+ tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})
+ assert(tmpl['sizelimit'] == 1000000)
+ assert('weightlimit' not in tmpl)
+ assert(tmpl['sigoplimit'] == 20000)
+ assert(tmpl['transactions'][0]['hash'] == txid)
+ assert(tmpl['transactions'][0]['sigops'] == 2)
+ self.nodes[0].generate(1) #block 162
+
+ balance_presetup = self.nodes[0].getbalance()
+ self.pubkey = []
+ p2sh_ids = [] # p2sh_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE embedded in p2sh
+ wit_ids = [] # wit_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE via bare witness
+ for i in range(3):
+ newaddress = self.nodes[i].getnewaddress()
+ self.pubkey.append(self.nodes[i].validateaddress(newaddress)["pubkey"])
+ multiaddress = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]])
+ self.nodes[i].addwitnessaddress(newaddress)
+ self.nodes[i].addwitnessaddress(multiaddress)
+ p2sh_ids.append([])
+ wit_ids.append([])
+ for v in range(2):
+ p2sh_ids[i].append([])
+ wit_ids[i].append([])
+
+ for i in range(5):
+ for n in range(3):
+ for v in range(2):
+ wit_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], False, Decimal("49.999")))
+ p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], True, Decimal("49.999")))
+
+ self.nodes[0].generate(1) #block 163
+ sync_blocks(self.nodes)
+
+ # Make sure all nodes recognize the transactions as theirs
+ assert_equal(self.nodes[0].getbalance(), balance_presetup - 60*50 + 20*Decimal("49.999") + 50)
+ assert_equal(self.nodes[1].getbalance(), 20*Decimal("49.999"))
+ assert_equal(self.nodes[2].getbalance(), 20*Decimal("49.999"))
+
+ self.nodes[0].generate(260) #block 423
+ sync_blocks(self.nodes)
+
+ self.log.info("Verify default node can't accept any witness format txs before fork")
+ # unsigned, no scriptsig
+ self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V0][0], False)
+ self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V1][0], False)
+ self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False)
+ self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False)
+ # unsigned with redeem script
+ self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False, witness_script(False, self.pubkey[0]))
+ self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False, witness_script(True, self.pubkey[0]))
+ # signed
+ self.fail_accept(self.nodes[0], "no-witness-yet", wit_ids[NODE_0][WIT_V0][0], True)
+ self.fail_accept(self.nodes[0], "no-witness-yet", wit_ids[NODE_0][WIT_V1][0], True)
+ self.fail_accept(self.nodes[0], "no-witness-yet", p2sh_ids[NODE_0][WIT_V0][0], True)
+ self.fail_accept(self.nodes[0], "no-witness-yet", p2sh_ids[NODE_0][WIT_V1][0], True)
+
+ self.log.info("Verify witness txs are skipped for mining before the fork")
+ self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][0], True) #block 424
+ self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][0], True) #block 425
+ self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][0], True) #block 426
+ self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][0], True) #block 427
+
+ # TODO: An old node would see these txs without witnesses and be able to mine them
+
+ self.log.info("Verify unsigned bare witness txs in versionbits-setting blocks are valid before the fork")
+ self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][1], False) #block 428
+ self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][1], False) #block 429
+
+ self.log.info("Verify unsigned p2sh witness txs without a redeem script are invalid")
+ self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V0][1], False)
+ self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V1][1], False)
+
+ self.log.info("Verify unsigned p2sh witness txs with a redeem script in versionbits-settings blocks are valid before the fork")
+ self.success_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][1], False, witness_script(False, self.pubkey[2])) #block 430
+ self.success_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][1], False, witness_script(True, self.pubkey[2])) #block 431
+
+ self.log.info("Verify previous witness txs skipped for mining can now be mined")
+ assert_equal(len(self.nodes[2].getrawmempool()), 4)
+ block = self.nodes[2].generate(1) #block 432 (first block with new rules; 432 = 144 * 3)
+ sync_blocks(self.nodes)
+ assert_equal(len(self.nodes[2].getrawmempool()), 0)
+ segwit_tx_list = self.nodes[2].getblock(block[0])["tx"]
+ assert_equal(len(segwit_tx_list), 5)
+
+ self.log.info("Verify block and transaction serialization rpcs return differing serializations depending on rpc serialization flag")
+ assert(self.nodes[2].getblock(block[0], False) != self.nodes[0].getblock(block[0], False))
+ assert(self.nodes[1].getblock(block[0], False) == self.nodes[2].getblock(block[0], False))
+ for i in range(len(segwit_tx_list)):
+ tx = FromHex(CTransaction(), self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
+ assert(self.nodes[2].getrawtransaction(segwit_tx_list[i]) != self.nodes[0].getrawtransaction(segwit_tx_list[i]))
+ assert(self.nodes[1].getrawtransaction(segwit_tx_list[i], 0) == self.nodes[2].getrawtransaction(segwit_tx_list[i]))
+ assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) != self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
+ assert(self.nodes[1].getrawtransaction(segwit_tx_list[i]) == self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
+ assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) == bytes_to_hex_str(tx.serialize_without_witness()))
+
+ self.log.info("Verify witness txs without witness data are invalid after the fork")
+ self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][2], False)
+ self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][2], False)
+ self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][2], False, witness_script(False, self.pubkey[2]))
+ self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][2], False, witness_script(True, self.pubkey[2]))
+
+ self.log.info("Verify default node can now use witness txs")
+ self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], True) #block 432
+ self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], True) #block 433
+ self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True) #block 434
+ self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True) #block 435
+
+ self.log.info("Verify sigops are counted in GBT with BIP141 rules after the fork")
+ txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
+ tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})
+ assert(tmpl['sizelimit'] >= 3999577) # actual maximum size is lower due to minimum mandatory non-witness data
+ assert(tmpl['weightlimit'] == 4000000)
+ assert(tmpl['sigoplimit'] == 80000)
+ assert(tmpl['transactions'][0]['txid'] == txid)
+ assert(tmpl['transactions'][0]['sigops'] == 8)
+
+ self.nodes[0].generate(1) # Mine a block to clear the gbt cache
+
+ self.log.info("Non-segwit miners are able to use GBT response after activation.")
+ # Create a 3-tx chain: tx1 (non-segwit input, paying to a segwit output) ->
+ # tx2 (segwit input, paying to a non-segwit output) ->
+ # tx3 (non-segwit input, paying to a non-segwit output).
+ # tx1 is allowed to appear in the block, but no others.
+ txid1 = send_to_witness(1, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[0], False, Decimal("49.996"))
+ hex_tx = self.nodes[0].gettransaction(txid)['hex']
+ tx = FromHex(CTransaction(), hex_tx)
+ assert(tx.wit.is_null()) # This should not be a segwit input
+ assert(txid1 in self.nodes[0].getrawmempool())
+
+ # Now create tx2, which will spend from txid1.
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(int(txid1, 16), 0), b''))
+ tx.vout.append(CTxOut(int(49.99*COIN), CScript([OP_TRUE])))
+ tx2_hex = self.nodes[0].signrawtransaction(ToHex(tx))['hex']
+ txid2 = self.nodes[0].sendrawtransaction(tx2_hex)
+ tx = FromHex(CTransaction(), tx2_hex)
+ assert(not tx.wit.is_null())
+
+ # Now create tx3, which will spend from txid2
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(int(txid2, 16), 0), b""))
+ tx.vout.append(CTxOut(int(49.95*COIN), CScript([OP_TRUE]))) # Huge fee
+ tx.calc_sha256()
+ txid3 = self.nodes[0].sendrawtransaction(ToHex(tx))
+ assert(tx.wit.is_null())
+ assert(txid3 in self.nodes[0].getrawmempool())
+
+ # Now try calling getblocktemplate() without segwit support.
+ template = self.nodes[0].getblocktemplate()
+
+ # Check that tx1 is the only transaction of the 3 in the template.
+ template_txids = [ t['txid'] for t in template['transactions'] ]
+ assert(txid2 not in template_txids and txid3 not in template_txids)
+ assert(txid1 in template_txids)
+
+ # Check that running with segwit support results in all 3 being included.
+ template = self.nodes[0].getblocktemplate({"rules": ["segwit"]})
+ template_txids = [ t['txid'] for t in template['transactions'] ]
+ assert(txid1 in template_txids)
+ assert(txid2 in template_txids)
+ assert(txid3 in template_txids)
+
+ # Mine a block to clear the gbt cache again.
+ self.nodes[0].generate(1)
+
+ self.log.info("Verify behaviour of importaddress, addwitnessaddress and listunspent")
+
+ # Some public keys to be used later
+ pubkeys = [
+ "0363D44AABD0F1699138239DF2F042C3282C0671CC7A76826A55C8203D90E39242", # cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb
+ "02D3E626B3E616FC8662B489C123349FECBFC611E778E5BE739B257EAE4721E5BF", # cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97
+ "04A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538A62F5BD8EC85C2477F39650BD391EA6250207065B2A81DA8B009FC891E898F0E", # 91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV
+ "02A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538", # cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd
+ "036722F784214129FEB9E8129D626324F3F6716555B603FFE8300BBCB882151228", # cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66
+ "0266A8396EE936BF6D99D17920DB21C6C7B1AB14C639D5CD72B300297E416FD2EC", # cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K
+ "0450A38BD7F0AC212FEBA77354A9B036A32E0F7C81FC4E0C5ADCA7C549C4505D2522458C2D9AE3CEFD684E039194B72C8A10F9CB9D4764AB26FCC2718D421D3B84", # 92h2XPssjBpsJN5CqSP7v9a7cf2kgDunBC6PDFwJHMACM1rrVBJ
+ ]
+
+ # Import a compressed key and an uncompressed key, generate some multisig addresses
+ self.nodes[0].importprivkey("92e6XLo5jVAVwrQKPNTs93oQco8f8sDNBcpv73Dsrs397fQtFQn")
+ uncompressed_spendable_address = ["mvozP4UwyGD2mGZU4D2eMvMLPB9WkMmMQu"]
+ self.nodes[0].importprivkey("cNC8eQ5dg3mFAVePDX4ddmPYpPbw41r9bm2jd1nLJT77e6RrzTRR")
+ compressed_spendable_address = ["mmWQubrDomqpgSYekvsU7HWEVjLFHAakLe"]
+ assert ((self.nodes[0].validateaddress(uncompressed_spendable_address[0])['iscompressed'] == False))
+ assert ((self.nodes[0].validateaddress(compressed_spendable_address[0])['iscompressed'] == True))
+
+ self.nodes[0].importpubkey(pubkeys[0])
+ compressed_solvable_address = [key_to_p2pkh(pubkeys[0])]
+ self.nodes[0].importpubkey(pubkeys[1])
+ compressed_solvable_address.append(key_to_p2pkh(pubkeys[1]))
+ self.nodes[0].importpubkey(pubkeys[2])
+ uncompressed_solvable_address = [key_to_p2pkh(pubkeys[2])]
+
+ spendable_anytime = [] # These outputs should be seen anytime after importprivkey and addmultisigaddress
+ spendable_after_importaddress = [] # These outputs should be seen after importaddress
+ solvable_after_importaddress = [] # These outputs should be seen after importaddress but not spendable
+ unsolvable_after_importaddress = [] # These outputs should be unsolvable after importaddress
+ solvable_anytime = [] # These outputs should be solvable after importpubkey
+ unseen_anytime = [] # These outputs should never be seen
+
+ uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]]))
+ uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]]))
+ compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]]))
+ uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], uncompressed_solvable_address[0]]))
+ compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]]))
+ compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], compressed_solvable_address[1]]))
+ unknown_address = ["mtKKyoHabkk6e4ppT7NaM7THqPUt7AzPrT", "2NDP3jLWAFT8NDAiUa9qiE6oBt2awmMq7Dx"]
+
+ # Test multisig_without_privkey
+ # We have 2 public keys without private keys, use addmultisigaddress to add to wallet.
+ # Money sent to P2SH of multisig of this should only be seen after importaddress with the BASE58 P2SH address.
+
+ multisig_without_privkey_address = self.nodes[0].addmultisigaddress(2, [pubkeys[3], pubkeys[4]])
+ script = CScript([OP_2, hex_str_to_bytes(pubkeys[3]), hex_str_to_bytes(pubkeys[4]), OP_2, OP_CHECKMULTISIG])
+ solvable_after_importaddress.append(CScript([OP_HASH160, hash160(script), OP_EQUAL]))
+
+ for i in compressed_spendable_address:
+ v = self.nodes[0].validateaddress(i)
+ if (v['isscript']):
+ [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
+ # bare and p2sh multisig with compressed keys should always be spendable
+ spendable_anytime.extend([bare, p2sh])
+ # P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after direct importaddress
+ spendable_after_importaddress.extend([p2wsh, p2sh_p2wsh])
+ else:
+ [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
+ # normal P2PKH and P2PK with compressed keys should always be spendable
+ spendable_anytime.extend([p2pkh, p2pk])
+ # P2SH_P2PK, P2SH_P2PKH, and witness with compressed keys are spendable after direct importaddress
+ spendable_after_importaddress.extend([p2wpkh, p2sh_p2wpkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
+
+ for i in uncompressed_spendable_address:
+ v = self.nodes[0].validateaddress(i)
+ if (v['isscript']):
+ [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
+ # bare and p2sh multisig with uncompressed keys should always be spendable
+ spendable_anytime.extend([bare, p2sh])
+ # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
+ unseen_anytime.extend([p2wsh, p2sh_p2wsh])
+ else:
+ [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
+ # normal P2PKH and P2PK with uncompressed keys should always be spendable
+ spendable_anytime.extend([p2pkh, p2pk])
+ # P2SH_P2PK and P2SH_P2PKH are spendable after direct importaddress
+ spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
+ # witness with uncompressed keys are never seen
+ unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
+
+ for i in compressed_solvable_address:
+ v = self.nodes[0].validateaddress(i)
+ if (v['isscript']):
+ # Multisig without private is not seen after addmultisigaddress, but seen after importaddress
+ [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
+ solvable_after_importaddress.extend([bare, p2sh, p2wsh, p2sh_p2wsh])
+ else:
+ [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
+ # normal P2PKH and P2PK with compressed keys should always be seen
+ solvable_anytime.extend([p2pkh, p2pk])
+ # P2SH_P2PK, P2SH_P2PKH, and witness with compressed keys are seen after direct importaddress
+ solvable_after_importaddress.extend([p2wpkh, p2sh_p2wpkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
+
+ for i in uncompressed_solvable_address:
+ v = self.nodes[0].validateaddress(i)
+ if (v['isscript']):
+ [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
+ # Base uncompressed multisig without private is not seen after addmultisigaddress, but seen after importaddress
+ solvable_after_importaddress.extend([bare, p2sh])
+ # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
+ unseen_anytime.extend([p2wsh, p2sh_p2wsh])
+ else:
+ [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
+ # normal P2PKH and P2PK with uncompressed keys should always be seen
+ solvable_anytime.extend([p2pkh, p2pk])
+ # P2SH_P2PK, P2SH_P2PKH with uncompressed keys are seen after direct importaddress
+ solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
+ # witness with uncompressed keys are never seen
+ unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
+
+ op1 = CScript([OP_1])
+ op0 = CScript([OP_0])
+ # 2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe is the P2SH(P2PKH) version of mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V
+ unsolvable_address = ["mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V", "2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe", script_to_p2sh(op1), script_to_p2sh(op0)]
+ unsolvable_address_key = hex_str_to_bytes("02341AEC7587A51CDE5279E0630A531AEA2615A9F80B17E8D9376327BAEAA59E3D")
+ unsolvablep2pkh = CScript([OP_DUP, OP_HASH160, hash160(unsolvable_address_key), OP_EQUALVERIFY, OP_CHECKSIG])
+ unsolvablep2wshp2pkh = CScript([OP_0, sha256(unsolvablep2pkh)])
+ p2shop0 = CScript([OP_HASH160, hash160(op0), OP_EQUAL])
+ p2wshop1 = CScript([OP_0, sha256(op1)])
+ unsolvable_after_importaddress.append(unsolvablep2pkh)
+ unsolvable_after_importaddress.append(unsolvablep2wshp2pkh)
+ unsolvable_after_importaddress.append(op1) # OP_1 will be imported as script
+ unsolvable_after_importaddress.append(p2wshop1)
+ unseen_anytime.append(op0) # OP_0 will be imported as P2SH address with no script provided
+ unsolvable_after_importaddress.append(p2shop0)
+
+ spendable_txid = []
+ solvable_txid = []
+ spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime, 2))
+ solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime, 1))
+ self.mine_and_test_listunspent(spendable_after_importaddress + solvable_after_importaddress + unseen_anytime + unsolvable_after_importaddress, 0)
+
+ importlist = []
+ for i in compressed_spendable_address + uncompressed_spendable_address + compressed_solvable_address + uncompressed_solvable_address:
+ v = self.nodes[0].validateaddress(i)
+ if (v['isscript']):
+ bare = hex_str_to_bytes(v['hex'])
+ importlist.append(bytes_to_hex_str(bare))
+ importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(bare)])))
+ else:
+ pubkey = hex_str_to_bytes(v['pubkey'])
+ p2pk = CScript([pubkey, OP_CHECKSIG])
+ p2pkh = CScript([OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG])
+ importlist.append(bytes_to_hex_str(p2pk))
+ importlist.append(bytes_to_hex_str(p2pkh))
+ importlist.append(bytes_to_hex_str(CScript([OP_0, hash160(pubkey)])))
+ importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pk)])))
+ importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pkh)])))
+
+ importlist.append(bytes_to_hex_str(unsolvablep2pkh))
+ importlist.append(bytes_to_hex_str(unsolvablep2wshp2pkh))
+ importlist.append(bytes_to_hex_str(op1))
+ importlist.append(bytes_to_hex_str(p2wshop1))
+
+ for i in importlist:
+ # import all generated addresses. The wallet already has the private keys for some of these, so catch JSON RPC
+ # exceptions and continue.
+ try:
+ self.nodes[0].importaddress(i,"",False,True)
+ except JSONRPCException as exp:
+ assert_equal(exp.error["message"], "The wallet already contains the private key for this address or script")
+ assert_equal(exp.error["code"], -4)
+
+ self.nodes[0].importaddress(script_to_p2sh(op0)) # import OP_0 as address only
+ self.nodes[0].importaddress(multisig_without_privkey_address) # Test multisig_without_privkey
+
+ spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
+ solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
+ self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
+ self.mine_and_test_listunspent(unseen_anytime, 0)
+
+ # addwitnessaddress should refuse to return a witness address if an uncompressed key is used or the address is
+ # not in the wallet
+ # note that no witness address should be returned by unsolvable addresses
+ # the multisig_without_privkey_address will fail because its keys were not added with importpubkey
+ for i in uncompressed_spendable_address + uncompressed_solvable_address + unknown_address + unsolvable_address + [multisig_without_privkey_address]:
+ assert_raises_jsonrpc(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i)
+
+ for i in compressed_spendable_address + compressed_solvable_address:
+ witaddress = self.nodes[0].addwitnessaddress(i)
+ # addwitnessaddress should return the same address if it is a known P2SH-witness address
+ assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))
+
+ spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
+ solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
+ self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
+ self.mine_and_test_listunspent(unseen_anytime, 0)
+
+ # Repeat some tests. This time we don't add witness scripts with importaddress
+ # Import a compressed key and an uncompressed key, generate some multisig addresses
+ self.nodes[0].importprivkey("927pw6RW8ZekycnXqBQ2JS5nPyo1yRfGNN8oq74HeddWSpafDJH")
+ uncompressed_spendable_address = ["mguN2vNSCEUh6rJaXoAVwY3YZwZvEmf5xi"]
+ self.nodes[0].importprivkey("cMcrXaaUC48ZKpcyydfFo8PxHAjpsYLhdsp6nmtB3E2ER9UUHWnw")
+ compressed_spendable_address = ["n1UNmpmbVUJ9ytXYXiurmGPQ3TRrXqPWKL"]
+
+ self.nodes[0].importpubkey(pubkeys[5])
+ compressed_solvable_address = [key_to_p2pkh(pubkeys[5])]
+ self.nodes[0].importpubkey(pubkeys[6])
+ uncompressed_solvable_address = [key_to_p2pkh(pubkeys[6])]
+
+ spendable_after_addwitnessaddress = [] # These outputs should be seen after importaddress
+ solvable_after_addwitnessaddress=[] # These outputs should be seen after importaddress but not spendable
+ unseen_anytime = [] # These outputs should never be seen
+
+ uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]]))
+ uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]]))
+ compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]]))
+ uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], uncompressed_solvable_address[0]]))
+ compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]]))
+
+ premature_witaddress = []
+
+ for i in compressed_spendable_address:
+ v = self.nodes[0].validateaddress(i)
+ if (v['isscript']):
+ [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
+ # P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after addwitnessaddress
+ spendable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])
+ premature_witaddress.append(script_to_p2sh(p2wsh))
+ else:
+ [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
+ # P2WPKH, P2SH_P2WPKH are spendable after addwitnessaddress
+ spendable_after_addwitnessaddress.extend([p2wpkh, p2sh_p2wpkh])
+ premature_witaddress.append(script_to_p2sh(p2wpkh))
+
+ for i in uncompressed_spendable_address + uncompressed_solvable_address:
+ v = self.nodes[0].validateaddress(i)
+ if (v['isscript']):
+ [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
+ # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
+ unseen_anytime.extend([p2wsh, p2sh_p2wsh])
+ else:
+ [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
+ # P2WPKH, P2SH_P2WPKH with uncompressed keys are never seen
+ unseen_anytime.extend([p2wpkh, p2sh_p2wpkh])
+
+ for i in compressed_solvable_address:
+ v = self.nodes[0].validateaddress(i)
+ if (v['isscript']):
+ # P2WSH multisig without private key are seen after addwitnessaddress
+ [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
+ solvable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])
+ premature_witaddress.append(script_to_p2sh(p2wsh))
+ else:
+ [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
+ # P2SH_P2PK, P2SH_P2PKH with compressed keys are seen after addwitnessaddress
+ solvable_after_addwitnessaddress.extend([p2wpkh, p2sh_p2wpkh])
+ premature_witaddress.append(script_to_p2sh(p2wpkh))
+
+ self.mine_and_test_listunspent(spendable_after_addwitnessaddress + solvable_after_addwitnessaddress + unseen_anytime, 0)
+
+ # addwitnessaddress should refuse to return a witness address if an uncompressed key is used
+ # note that a multisig address returned by addmultisigaddress is not solvable until it is added with importaddress
+ # premature_witaddress are not accepted until the script is added with addwitnessaddress first
+ for i in uncompressed_spendable_address + uncompressed_solvable_address + premature_witaddress + [compressed_solvable_address[1]]:
+ # This will raise an exception
+ assert_raises_jsonrpc(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i)
+
+ # after importaddress it should pass addwitnessaddress
+ v = self.nodes[0].validateaddress(compressed_solvable_address[1])
+ self.nodes[0].importaddress(v['hex'],"",False,True)
+ for i in compressed_spendable_address + compressed_solvable_address + premature_witaddress:
+ witaddress = self.nodes[0].addwitnessaddress(i)
+ assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))
+
+ spendable_txid.append(self.mine_and_test_listunspent(spendable_after_addwitnessaddress, 2))
+ solvable_txid.append(self.mine_and_test_listunspent(solvable_after_addwitnessaddress, 1))
+ self.mine_and_test_listunspent(unseen_anytime, 0)
+
+ # Check that spendable outputs are really spendable
+ self.create_and_mine_tx_from_txids(spendable_txid)
+
+ # import all the private keys so solvable addresses become spendable
+ self.nodes[0].importprivkey("cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb")
+ self.nodes[0].importprivkey("cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97")
+ self.nodes[0].importprivkey("91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV")
+ self.nodes[0].importprivkey("cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd")
+ self.nodes[0].importprivkey("cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66")
+ self.nodes[0].importprivkey("cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K")
+ self.create_and_mine_tx_from_txids(solvable_txid)
+
+ def mine_and_test_listunspent(self, script_list, ismine):
+ utxo = find_unspent(self.nodes[0], 50)
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(int('0x'+utxo['txid'],0), utxo['vout'])))
+ for i in script_list:
+ tx.vout.append(CTxOut(10000000, i))
+ tx.rehash()
+ signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex']
+ txid = self.nodes[0].sendrawtransaction(signresults, True)
+ self.nodes[0].generate(1)
+ sync_blocks(self.nodes)
+ watchcount = 0
+ spendcount = 0
+ for i in self.nodes[0].listunspent():
+ if (i['txid'] == txid):
+ watchcount += 1
+ if (i['spendable'] == True):
+ spendcount += 1
+ if (ismine == 2):
+ assert_equal(spendcount, len(script_list))
+ elif (ismine == 1):
+ assert_equal(watchcount, len(script_list))
+ assert_equal(spendcount, 0)
+ else:
+ assert_equal(watchcount, 0)
+ return txid
+
+ def p2sh_address_to_script(self,v):
+ bare = CScript(hex_str_to_bytes(v['hex']))
+ p2sh = CScript(hex_str_to_bytes(v['scriptPubKey']))
+ p2wsh = CScript([OP_0, sha256(bare)])
+ p2sh_p2wsh = CScript([OP_HASH160, hash160(p2wsh), OP_EQUAL])
+ return([bare, p2sh, p2wsh, p2sh_p2wsh])
+
+ def p2pkh_address_to_script(self,v):
+ pubkey = hex_str_to_bytes(v['pubkey'])
+ p2wpkh = CScript([OP_0, hash160(pubkey)])
+ p2sh_p2wpkh = CScript([OP_HASH160, hash160(p2wpkh), OP_EQUAL])
+ p2pk = CScript([pubkey, OP_CHECKSIG])
+ p2pkh = CScript(hex_str_to_bytes(v['scriptPubKey']))
+ p2sh_p2pk = CScript([OP_HASH160, hash160(p2pk), OP_EQUAL])
+ p2sh_p2pkh = CScript([OP_HASH160, hash160(p2pkh), OP_EQUAL])
+ p2wsh_p2pk = CScript([OP_0, sha256(p2pk)])
+ p2wsh_p2pkh = CScript([OP_0, sha256(p2pkh)])
+ p2sh_p2wsh_p2pk = CScript([OP_HASH160, hash160(p2wsh_p2pk), OP_EQUAL])
+ p2sh_p2wsh_p2pkh = CScript([OP_HASH160, hash160(p2wsh_p2pkh), OP_EQUAL])
+ return [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]
+
+ def create_and_mine_tx_from_txids(self, txids, success = True):
+ tx = CTransaction()
+ for i in txids:
+ txtmp = CTransaction()
+ txraw = self.nodes[0].getrawtransaction(i)
+ f = BytesIO(hex_str_to_bytes(txraw))
+ txtmp.deserialize(f)
+ for j in range(len(txtmp.vout)):
+ tx.vin.append(CTxIn(COutPoint(int('0x'+i,0), j)))
+ tx.vout.append(CTxOut(0, CScript()))
+ tx.rehash()
+ signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex']
+ self.nodes[0].sendrawtransaction(signresults, True)
+ self.nodes[0].generate(1)
+ sync_blocks(self.nodes)
+
+
+if __name__ == '__main__':
+ SegWitTest().main()
diff --git a/test/functional/sendheaders.py b/test/functional/sendheaders.py
new file mode 100755
index 0000000000..de7f5e0849
--- /dev/null
+++ b/test/functional/sendheaders.py
@@ -0,0 +1,605 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test behavior of headers messages to announce blocks.
+
+Setup:
+
+- Two nodes, two p2p connections to node0. One p2p connection should only ever
+ receive inv's (omitted from testing description below, this is our control).
+ Second node is used for creating reorgs.
+
+Part 1: No headers announcements before "sendheaders"
+a. node mines a block [expect: inv]
+ send getdata for the block [expect: block]
+b. node mines another block [expect: inv]
+ send getheaders and getdata [expect: headers, then block]
+c. node mines another block [expect: inv]
+ peer mines a block, announces with header [expect: getdata]
+d. node mines another block [expect: inv]
+
+Part 2: After "sendheaders", headers announcements should generally work.
+a. peer sends sendheaders [expect: no response]
+ peer sends getheaders with current tip [expect: no response]
+b. node mines a block [expect: tip header]
+c. for N in 1, ..., 10:
+ * for announce-type in {inv, header}
+ - peer mines N blocks, announces with announce-type
+ [ expect: getheaders/getdata or getdata, deliver block(s) ]
+ - node mines a block [ expect: 1 header ]
+
+Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer.
+- For response-type in {inv, getheaders}
+ * node mines a 7 block reorg [ expect: headers announcement of 8 blocks ]
+ * node mines an 8-block reorg [ expect: inv at tip ]
+ * peer responds with getblocks/getdata [expect: inv, blocks ]
+ * node mines another block [ expect: inv at tip, peer sends getdata, expect: block ]
+ * node mines another block at tip [ expect: inv ]
+ * peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers]
+ * peer requests block [ expect: block ]
+ * node mines another block at tip [ expect: inv, peer sends getdata, expect: block ]
+ * peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block]
+ * node mines 1 block [expect: 1 header, peer responds with getdata]
+
+Part 4: Test direct fetch behavior
+a. Announce 2 old block headers.
+ Expect: no getdata requests.
+b. Announce 3 new blocks via 1 headers message.
+ Expect: one getdata request for all 3 blocks.
+ (Send blocks.)
+c. Announce 1 header that forks off the last two blocks.
+ Expect: no response.
+d. Announce 1 more header that builds on that fork.
+ Expect: one getdata request for two blocks.
+e. Announce 16 more headers that build on that fork.
+ Expect: getdata request for 14 more blocks.
+f. Announce 1 more header that builds on that fork.
+ Expect: no response.
+
+Part 5: Test handling of headers that don't connect.
+a. Repeat 10 times:
+ 1. Announce a header that doesn't connect.
+ Expect: getheaders message
+ 2. Send headers chain.
+ Expect: getdata for the missing blocks, tip update.
+b. Then send 9 more headers that don't connect.
+ Expect: getheaders message each time.
+c. Announce a header that does connect.
+ Expect: no response.
+d. Announce 49 headers that don't connect.
+ Expect: getheaders message each time.
+e. Announce one more that doesn't connect.
+ Expect: disconnect.
+"""
+
+from test_framework.mininode import *
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+from test_framework.blocktools import create_block, create_coinbase
+
+
+direct_fetch_response_time = 0.05
+
+class BaseNode(SingleNodeConnCB):
+ def __init__(self):
+ SingleNodeConnCB.__init__(self)
+ self.last_inv = None
+ self.last_headers = None
+ self.last_block = None
+ self.last_getdata = None
+ self.block_announced = False
+ self.last_getheaders = None
+ self.disconnected = False
+ self.last_blockhash_announced = None
+
+ def clear_last_announcement(self):
+ with mininode_lock:
+ self.block_announced = False
+ self.last_inv = None
+ self.last_headers = None
+
+ # Request data for a list of block hashes
+ def get_data(self, block_hashes):
+ msg = msg_getdata()
+ for x in block_hashes:
+ msg.inv.append(CInv(2, x))
+ self.connection.send_message(msg)
+
+ def get_headers(self, locator, hashstop):
+ msg = msg_getheaders()
+ msg.locator.vHave = locator
+ msg.hashstop = hashstop
+ self.connection.send_message(msg)
+
+ def send_block_inv(self, blockhash):
+ msg = msg_inv()
+ msg.inv = [CInv(2, blockhash)]
+ self.connection.send_message(msg)
+
+ def on_inv(self, conn, message):
+ self.last_inv = message
+ self.block_announced = True
+ self.last_blockhash_announced = message.inv[-1].hash
+
+ def on_headers(self, conn, message):
+ self.last_headers = message
+ if len(message.headers):
+ self.block_announced = True
+ message.headers[-1].calc_sha256()
+ self.last_blockhash_announced = message.headers[-1].sha256
+
+ def on_block(self, conn, message):
+ self.last_block = message.block
+ self.last_block.calc_sha256()
+
+ def on_getdata(self, conn, message):
+ self.last_getdata = message
+
+ def on_getheaders(self, conn, message):
+ self.last_getheaders = message
+
+ def on_close(self, conn):
+ self.disconnected = True
+
+ # Test whether the last announcement we received had the
+ # right header or the right inv
+ # inv and headers should be lists of block hashes
+ def check_last_announcement(self, headers=None, inv=None):
+ expect_headers = headers if headers != None else []
+ expect_inv = inv if inv != None else []
+ test_function = lambda: self.block_announced
+ assert(wait_until(test_function, timeout=60))
+ with mininode_lock:
+ self.block_announced = False
+
+ success = True
+ compare_inv = []
+ if self.last_inv != None:
+ compare_inv = [x.hash for x in self.last_inv.inv]
+ if compare_inv != expect_inv:
+ success = False
+
+ hash_headers = []
+ if self.last_headers != None:
+ # treat headers as a list of block hashes
+ hash_headers = [ x.sha256 for x in self.last_headers.headers ]
+ if hash_headers != expect_headers:
+ success = False
+
+ self.last_inv = None
+ self.last_headers = None
+ return success
+
+ # Syncing helpers
+ def wait_for_block(self, blockhash, timeout=60):
+ test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash
+ assert(wait_until(test_function, timeout=timeout))
+ return
+
+ def wait_for_getheaders(self, timeout=60):
+ test_function = lambda: self.last_getheaders != None
+ assert(wait_until(test_function, timeout=timeout))
+ return
+
+ def wait_for_getdata(self, hash_list, timeout=60):
+ if hash_list == []:
+ return
+
+ test_function = lambda: self.last_getdata != None and [x.hash for x in self.last_getdata.inv] == hash_list
+ assert(wait_until(test_function, timeout=timeout))
+ return
+
+ def wait_for_disconnect(self, timeout=60):
+ test_function = lambda: self.disconnected
+ assert(wait_until(test_function, timeout=timeout))
+ return
+
+ def wait_for_block_announcement(self, block_hash, timeout=60):
+ test_function = lambda: self.last_blockhash_announced == block_hash
+ assert(wait_until(test_function, timeout=timeout))
+ return
+
+ def send_header_for_blocks(self, new_blocks):
+ headers_message = msg_headers()
+ headers_message.headers = [ CBlockHeader(b) for b in new_blocks ]
+ self.send_message(headers_message)
+
+ def send_getblocks(self, locator):
+ getblocks_message = msg_getblocks()
+ getblocks_message.locator.vHave = locator
+ self.send_message(getblocks_message)
+
+# InvNode: This peer should only ever receive inv's, because it doesn't ever send a
+# "sendheaders" message.
+class InvNode(BaseNode):
+ def __init__(self):
+ BaseNode.__init__(self)
+
+# TestNode: This peer is the one we use for most of the testing.
+class TestNode(BaseNode):
+ def __init__(self):
+ BaseNode.__init__(self)
+
+class SendHeadersTest(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 2
+
+ def setup_network(self):
+ self.nodes = []
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
+ connect_nodes(self.nodes[0], 1)
+
+ # mine count blocks and return the new tip
+ def mine_blocks(self, count):
+ # Clear out last block announcement from each p2p listener
+ [ x.clear_last_announcement() for x in self.p2p_connections ]
+ self.nodes[0].generate(count)
+ return int(self.nodes[0].getbestblockhash(), 16)
+
+ # mine a reorg that invalidates length blocks (replacing them with
+ # length+1 blocks).
+ # Note: we clear the state of our p2p connections after the
+ # to-be-reorged-out blocks are mined, so that we don't break later tests.
+ # return the list of block hashes newly mined
+ def mine_reorg(self, length):
+ self.nodes[0].generate(length) # make sure all invalidated blocks are node0's
+ sync_blocks(self.nodes, wait=0.1)
+ for x in self.p2p_connections:
+ x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16))
+ x.clear_last_announcement()
+
+ tip_height = self.nodes[1].getblockcount()
+ hash_to_invalidate = self.nodes[1].getblockhash(tip_height-(length-1))
+ self.nodes[1].invalidateblock(hash_to_invalidate)
+ all_hashes = self.nodes[1].generate(length+1) # Must be longer than the orig chain
+ sync_blocks(self.nodes, wait=0.1)
+ return [int(x, 16) for x in all_hashes]
+
+ def run_test(self):
+ # Setup the p2p connections and start up the network thread.
+ inv_node = InvNode()
+ test_node = TestNode()
+
+ self.p2p_connections = [inv_node, test_node]
+
+ connections = []
+ connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node))
+ # Set nServices to 0 for test_node, so no block download will occur outside of
+ # direct fetching
+ connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0))
+ inv_node.add_connection(connections[0])
+ test_node.add_connection(connections[1])
+
+ NetworkThread().start() # Start up network handling in another thread
+
+ # Test logic begins here
+ inv_node.wait_for_verack()
+ test_node.wait_for_verack()
+
+ tip = int(self.nodes[0].getbestblockhash(), 16)
+
+ # PART 1
+ # 1. Mine a block; expect inv announcements each time
+ self.log.info("Part 1: headers don't start before sendheaders message...")
+ for i in range(4):
+ old_tip = tip
+ tip = self.mine_blocks(1)
+ assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
+ assert_equal(test_node.check_last_announcement(inv=[tip]), True)
+ # Try a few different responses; none should affect next announcement
+ if i == 0:
+ # first request the block
+ test_node.get_data([tip])
+ test_node.wait_for_block(tip, timeout=5)
+ elif i == 1:
+ # next try requesting header and block
+ test_node.get_headers(locator=[old_tip], hashstop=tip)
+ test_node.get_data([tip])
+ test_node.wait_for_block(tip)
+ test_node.clear_last_announcement() # since we requested headers...
+ elif i == 2:
+ # this time announce own block via headers
+ height = self.nodes[0].getblockcount()
+ last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
+ block_time = last_time + 1
+ new_block = create_block(tip, create_coinbase(height+1), block_time)
+ new_block.solve()
+ test_node.send_header_for_blocks([new_block])
+ test_node.wait_for_getdata([new_block.sha256], timeout=5)
+ test_node.send_message(msg_block(new_block))
+ test_node.sync_with_ping() # make sure this block is processed
+ inv_node.clear_last_announcement()
+ test_node.clear_last_announcement()
+
+ self.log.info("Part 1: success!")
+ self.log.info("Part 2: announce blocks with headers after sendheaders message...")
+ # PART 2
+ # 2. Send a sendheaders message and test that headers announcements
+ # commence and keep working.
+ test_node.send_message(msg_sendheaders())
+ prev_tip = int(self.nodes[0].getbestblockhash(), 16)
+ test_node.get_headers(locator=[prev_tip], hashstop=0)
+ test_node.sync_with_ping()
+
+ # Now that we've synced headers, headers announcements should work
+ tip = self.mine_blocks(1)
+ assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
+ assert_equal(test_node.check_last_announcement(headers=[tip]), True)
+
+ height = self.nodes[0].getblockcount()+1
+ block_time += 10 # Advance far enough ahead
+ for i in range(10):
+ # Mine i blocks, and alternate announcing either via
+ # inv (of tip) or via headers. After each, new blocks
+ # mined by the node should successfully be announced
+ # with block header, even though the blocks are never requested
+ for j in range(2):
+ blocks = []
+ for b in range(i+1):
+ blocks.append(create_block(tip, create_coinbase(height), block_time))
+ blocks[-1].solve()
+ tip = blocks[-1].sha256
+ block_time += 1
+ height += 1
+ if j == 0:
+ # Announce via inv
+ test_node.send_block_inv(tip)
+ test_node.wait_for_getheaders(timeout=5)
+ # Should have received a getheaders now
+ test_node.send_header_for_blocks(blocks)
+ # Test that duplicate inv's won't result in duplicate
+ # getdata requests, or duplicate headers announcements
+ [ inv_node.send_block_inv(x.sha256) for x in blocks ]
+ test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=5)
+ inv_node.sync_with_ping()
+ else:
+ # Announce via headers
+ test_node.send_header_for_blocks(blocks)
+ test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=5)
+ # Test that duplicate headers won't result in duplicate
+ # getdata requests (the check is further down)
+ inv_node.send_header_for_blocks(blocks)
+ inv_node.sync_with_ping()
+ [ test_node.send_message(msg_block(x)) for x in blocks ]
+ test_node.sync_with_ping()
+ inv_node.sync_with_ping()
+ # This block should not be announced to the inv node (since it also
+ # broadcast it)
+ assert_equal(inv_node.last_inv, None)
+ assert_equal(inv_node.last_headers, None)
+ tip = self.mine_blocks(1)
+ assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
+ assert_equal(test_node.check_last_announcement(headers=[tip]), True)
+ height += 1
+ block_time += 1
+
+ self.log.info("Part 2: success!")
+
+ self.log.info("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...")
+
+ # PART 3. Headers announcements can stop after large reorg, and resume after
+ # getheaders or inv from peer.
+ for j in range(2):
+ # First try mining a reorg that can propagate with header announcement
+ new_block_hashes = self.mine_reorg(length=7)
+ tip = new_block_hashes[-1]
+ assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
+ assert_equal(test_node.check_last_announcement(headers=new_block_hashes), True)
+
+ block_time += 8
+
+ # Mine a too-large reorg, which should be announced with a single inv
+ new_block_hashes = self.mine_reorg(length=8)
+ tip = new_block_hashes[-1]
+ assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
+ assert_equal(test_node.check_last_announcement(inv=[tip]), True)
+
+ block_time += 9
+
+ fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"]
+ fork_point = int(fork_point, 16)
+
+ # Use getblocks/getdata
+ test_node.send_getblocks(locator = [fork_point])
+ assert_equal(test_node.check_last_announcement(inv=new_block_hashes), True)
+ test_node.get_data(new_block_hashes)
+ test_node.wait_for_block(new_block_hashes[-1])
+
+ for i in range(3):
+ # Mine another block, still should get only an inv
+ tip = self.mine_blocks(1)
+ assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
+ assert_equal(test_node.check_last_announcement(inv=[tip]), True)
+ if i == 0:
+ # Just get the data -- shouldn't cause headers announcements to resume
+ test_node.get_data([tip])
+ test_node.wait_for_block(tip)
+ elif i == 1:
+ # Send a getheaders message that shouldn't trigger headers announcements
+ # to resume (best header sent will be too old)
+ test_node.get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
+ test_node.get_data([tip])
+ test_node.wait_for_block(tip)
+ elif i == 2:
+ test_node.get_data([tip])
+ test_node.wait_for_block(tip)
+ # This time, try sending either a getheaders to trigger resumption
+ # of headers announcements, or mine a new block and inv it, also
+ # triggering resumption of headers announcements.
+ if j == 0:
+ test_node.get_headers(locator=[tip], hashstop=0)
+ test_node.sync_with_ping()
+ else:
+ test_node.send_block_inv(tip)
+ test_node.sync_with_ping()
+ # New blocks should now be announced with header
+ tip = self.mine_blocks(1)
+ assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
+ assert_equal(test_node.check_last_announcement(headers=[tip]), True)
+
+ self.log.info("Part 3: success!")
+
+ self.log.info("Part 4: Testing direct fetch behavior...")
+ tip = self.mine_blocks(1)
+ height = self.nodes[0].getblockcount() + 1
+ last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
+ block_time = last_time + 1
+
+ # Create 2 blocks. Send the blocks, then send the headers.
+ blocks = []
+ for b in range(2):
+ blocks.append(create_block(tip, create_coinbase(height), block_time))
+ blocks[-1].solve()
+ tip = blocks[-1].sha256
+ block_time += 1
+ height += 1
+ inv_node.send_message(msg_block(blocks[-1]))
+
+ inv_node.sync_with_ping() # Make sure blocks are processed
+ test_node.last_getdata = None
+ test_node.send_header_for_blocks(blocks)
+ test_node.sync_with_ping()
+ # should not have received any getdata messages
+ with mininode_lock:
+ assert_equal(test_node.last_getdata, None)
+
+ # This time, direct fetch should work
+ blocks = []
+ for b in range(3):
+ blocks.append(create_block(tip, create_coinbase(height), block_time))
+ blocks[-1].solve()
+ tip = blocks[-1].sha256
+ block_time += 1
+ height += 1
+
+ test_node.send_header_for_blocks(blocks)
+ test_node.sync_with_ping()
+ test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=direct_fetch_response_time)
+
+ [ test_node.send_message(msg_block(x)) for x in blocks ]
+
+ test_node.sync_with_ping()
+
+ # Now announce a header that forks the last two blocks
+ tip = blocks[0].sha256
+ height -= 1
+ blocks = []
+
+ # Create extra blocks for later
+ for b in range(20):
+ blocks.append(create_block(tip, create_coinbase(height), block_time))
+ blocks[-1].solve()
+ tip = blocks[-1].sha256
+ block_time += 1
+ height += 1
+
+ # Announcing one block on fork should not trigger direct fetch
+ # (less work than tip)
+ test_node.last_getdata = None
+ test_node.send_header_for_blocks(blocks[0:1])
+ test_node.sync_with_ping()
+ with mininode_lock:
+ assert_equal(test_node.last_getdata, None)
+
+ # Announcing one more block on fork should trigger direct fetch for
+ # both blocks (same work as tip)
+ test_node.send_header_for_blocks(blocks[1:2])
+ test_node.sync_with_ping()
+ test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=direct_fetch_response_time)
+
+ # Announcing 16 more headers should trigger direct fetch for 14 more
+ # blocks
+ test_node.send_header_for_blocks(blocks[2:18])
+ test_node.sync_with_ping()
+ test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=direct_fetch_response_time)
+
+ # Announcing 1 more header should not trigger any response
+ test_node.last_getdata = None
+ test_node.send_header_for_blocks(blocks[18:19])
+ test_node.sync_with_ping()
+ with mininode_lock:
+ assert_equal(test_node.last_getdata, None)
+
+ self.log.info("Part 4: success!")
+
+ # Now deliver all those blocks we announced.
+ [ test_node.send_message(msg_block(x)) for x in blocks ]
+
+ self.log.info("Part 5: Testing handling of unconnecting headers")
+ # First we test that receipt of an unconnecting header doesn't prevent
+ # chain sync.
+ for i in range(10):
+ test_node.last_getdata = None
+ blocks = []
+ # Create two more blocks.
+ for j in range(2):
+ blocks.append(create_block(tip, create_coinbase(height), block_time))
+ blocks[-1].solve()
+ tip = blocks[-1].sha256
+ block_time += 1
+ height += 1
+ # Send the header of the second block -> this won't connect.
+ with mininode_lock:
+ test_node.last_getheaders = None
+ test_node.send_header_for_blocks([blocks[1]])
+ test_node.wait_for_getheaders(timeout=1)
+ test_node.send_header_for_blocks(blocks)
+ test_node.wait_for_getdata([x.sha256 for x in blocks])
+ [ test_node.send_message(msg_block(x)) for x in blocks ]
+ test_node.sync_with_ping()
+ assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256)
+
+ blocks = []
+ # Now we test that if we repeatedly don't send connecting headers, we
+ # don't go into an infinite loop trying to get them to connect.
+ MAX_UNCONNECTING_HEADERS = 10
+ for j in range(MAX_UNCONNECTING_HEADERS+1):
+ blocks.append(create_block(tip, create_coinbase(height), block_time))
+ blocks[-1].solve()
+ tip = blocks[-1].sha256
+ block_time += 1
+ height += 1
+
+ for i in range(1, MAX_UNCONNECTING_HEADERS):
+ # Send a header that doesn't connect, check that we get a getheaders.
+ with mininode_lock:
+ test_node.last_getheaders = None
+ test_node.send_header_for_blocks([blocks[i]])
+ test_node.wait_for_getheaders(timeout=1)
+
+ # Next header will connect, should re-set our count:
+ test_node.send_header_for_blocks([blocks[0]])
+
+ # Remove the first two entries (blocks[1] would connect):
+ blocks = blocks[2:]
+
+ # Now try to see how many unconnecting headers we can send
+ # before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS
+ for i in range(5*MAX_UNCONNECTING_HEADERS - 1):
+ # Send a header that doesn't connect, check that we get a getheaders.
+ with mininode_lock:
+ test_node.last_getheaders = None
+ test_node.send_header_for_blocks([blocks[i%len(blocks)]])
+ test_node.wait_for_getheaders(timeout=1)
+
+ # Eventually this stops working.
+ with mininode_lock:
+ self.last_getheaders = None
+ test_node.send_header_for_blocks([blocks[-1]])
+
+ # Should get disconnected
+ test_node.wait_for_disconnect()
+ with mininode_lock:
+ self.last_getheaders = True
+
+ self.log.info("Part 5: success!")
+
+ # Finally, check that the inv node never received a getdata request,
+ # throughout the test
+ assert_equal(inv_node.last_getdata, None)
+
+if __name__ == '__main__':
+ SendHeadersTest().main()
diff --git a/test/functional/signmessages.py b/test/functional/signmessages.py
new file mode 100755
index 0000000000..91f5abef5d
--- /dev/null
+++ b/test/functional/signmessages.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test RPC commands for signing and verifying messages."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+class SignMessagesTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+
+ def setup_network(self, split=False):
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
+ self.is_network_split = False
+
+ def run_test(self):
+ message = 'This is just a test message'
+
+ # Test the signing with a privkey
+ privKey = 'cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N'
+ address = 'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB'
+ signature = self.nodes[0].signmessagewithprivkey(privKey, message)
+
+ # Verify the message
+ assert(self.nodes[0].verifymessage(address, signature, message))
+
+ # Test the signing with an address with wallet
+ address = self.nodes[0].getnewaddress()
+ signature = self.nodes[0].signmessage(address, message)
+
+ # Verify the message
+ assert(self.nodes[0].verifymessage(address, signature, message))
+
+if __name__ == '__main__':
+ SignMessagesTest().main()
diff --git a/test/functional/signrawtransactions.py b/test/functional/signrawtransactions.py
new file mode 100755
index 0000000000..b24162ab97
--- /dev/null
+++ b/test/functional/signrawtransactions.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test transaction signing using the signrawtransaction RPC."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+
+class SignRawTransactionsTest(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+
+ def setup_network(self, split=False):
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
+ self.is_network_split = False
+
+ def successful_signing_test(self):
+ """Create and sign a valid raw transaction with one input.
+
+ Expected results:
+
+ 1) The transaction has a complete set of signatures
+ 2) No script verification error occurred"""
+ privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N', 'cVKpPfVKSJxKqVpE9awvXNWuLHCa5j5tiE7K6zbUSptFpTEtiFrA']
+
+ inputs = [
+ # Valid pay-to-pubkey scripts
+ {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
+ 'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
+ {'txid': '83a4f6a6b73660e13ee6cb3c6063fa3759c50c9b7521d0536022961898f4fb02', 'vout': 0,
+ 'scriptPubKey': '76a914669b857c03a5ed269d5d85a1ffac9ed5d663072788ac'},
+ ]
+
+ outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
+
+ rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
+ rawTxSigned = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys)
+
+ # 1) The transaction has a complete set of signatures
+ assert 'complete' in rawTxSigned
+ assert_equal(rawTxSigned['complete'], True)
+
+ # 2) No script verification error occurred
+ assert 'errors' not in rawTxSigned
+
+ # Check that signrawtransaction doesn't blow up on garbage merge attempts
+ dummyTxInconsistent = self.nodes[0].createrawtransaction([inputs[0]], outputs)
+ rawTxUnsigned = self.nodes[0].signrawtransaction(rawTx + dummyTxInconsistent, inputs)
+
+ assert 'complete' in rawTxUnsigned
+ assert_equal(rawTxUnsigned['complete'], False)
+
+ # Check that signrawtransaction properly merges unsigned and signed txn, even with garbage in the middle
+ rawTxSigned2 = self.nodes[0].signrawtransaction(rawTxUnsigned["hex"] + dummyTxInconsistent + rawTxSigned["hex"], inputs)
+
+ assert 'complete' in rawTxSigned2
+ assert_equal(rawTxSigned2['complete'], True)
+
+ assert 'errors' not in rawTxSigned2
+
+
+ def script_verification_error_test(self):
+ """Create and sign a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.
+
+ Expected results:
+
+ 3) The transaction has no complete set of signatures
+ 4) Two script verification errors occurred
+ 5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")
+ 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)"""
+ privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
+
+ inputs = [
+ # Valid pay-to-pubkey script
+ {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0},
+ # Invalid script
+ {'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7},
+ # Missing scriptPubKey
+ {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1},
+ ]
+
+ scripts = [
+ # Valid pay-to-pubkey script
+ {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
+ 'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
+ # Invalid script
+ {'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7,
+ 'scriptPubKey': 'badbadbadbad'}
+ ]
+
+ outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
+
+ rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
+
+ # Make sure decoderawtransaction is at least marginally sane
+ decodedRawTx = self.nodes[0].decoderawtransaction(rawTx)
+ for i, inp in enumerate(inputs):
+ assert_equal(decodedRawTx["vin"][i]["txid"], inp["txid"])
+ assert_equal(decodedRawTx["vin"][i]["vout"], inp["vout"])
+
+ # Make sure decoderawtransaction throws if there is extra data
+ assert_raises(JSONRPCException, self.nodes[0].decoderawtransaction, rawTx + "00")
+
+ rawTxSigned = self.nodes[0].signrawtransaction(rawTx, scripts, privKeys)
+
+ # 3) The transaction has no complete set of signatures
+ assert 'complete' in rawTxSigned
+ assert_equal(rawTxSigned['complete'], False)
+
+ # 4) Two script verification errors occurred
+ assert 'errors' in rawTxSigned
+ assert_equal(len(rawTxSigned['errors']), 2)
+
+ # 5) Script verification errors have certain properties
+ assert 'txid' in rawTxSigned['errors'][0]
+ assert 'vout' in rawTxSigned['errors'][0]
+ assert 'scriptSig' in rawTxSigned['errors'][0]
+ assert 'sequence' in rawTxSigned['errors'][0]
+ assert 'error' in rawTxSigned['errors'][0]
+
+ # 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)
+ assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
+ assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
+ assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
+ assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
+
+ def run_test(self):
+ self.successful_signing_test()
+ self.script_verification_error_test()
+
+
+if __name__ == '__main__':
+ SignRawTransactionsTest().main()
diff --git a/test/functional/smartfees.py b/test/functional/smartfees.py
new file mode 100755
index 0000000000..49f2df5c37
--- /dev/null
+++ b/test/functional/smartfees.py
@@ -0,0 +1,266 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test fee estimation code."""
+
+from collections import OrderedDict
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+from test_framework.script import CScript, OP_1, OP_DROP, OP_2, OP_HASH160, OP_EQUAL, hash160, OP_TRUE
+from test_framework.mininode import CTransaction, CTxIn, CTxOut, COutPoint, ToHex, FromHex, COIN
+
+# Construct 2 trivial P2SH's and the ScriptSigs that spend them
+# So we can create many many transactions without needing to spend
+# time signing.
+redeem_script_1 = CScript([OP_1, OP_DROP])
+redeem_script_2 = CScript([OP_2, OP_DROP])
+P2SH_1 = CScript([OP_HASH160, hash160(redeem_script_1), OP_EQUAL])
+P2SH_2 = CScript([OP_HASH160, hash160(redeem_script_2), OP_EQUAL])
+
+# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2
+SCRIPT_SIG = [CScript([OP_TRUE, redeem_script_1]), CScript([OP_TRUE, redeem_script_2])]
+
+global log
+
+def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment):
+ """
+ Create and send a transaction with a random fee.
+ The transaction pays to a trivial P2SH script, and assumes that its inputs
+ are of the same form.
+ The function takes a list of confirmed outputs and unconfirmed outputs
+ and attempts to use the confirmed list first for its inputs.
+ It adds the newly created outputs to the unconfirmed list.
+ Returns (raw transaction, fee)
+ """
+ # It's best to exponentially distribute our random fees
+ # because the buckets are exponentially spaced.
+ # Exponentially distributed from 1-128 * fee_increment
+ rand_fee = float(fee_increment)*(1.1892**random.randint(0,28))
+ # Total fee ranges from min_fee to min_fee + 127*fee_increment
+ fee = min_fee - fee_increment + satoshi_round(rand_fee)
+ tx = CTransaction()
+ total_in = Decimal("0.00000000")
+ while total_in <= (amount + fee) and len(conflist) > 0:
+ t = conflist.pop(0)
+ total_in += t["amount"]
+ tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
+ if total_in <= amount + fee:
+ while total_in <= (amount + fee) and len(unconflist) > 0:
+ t = unconflist.pop(0)
+ total_in += t["amount"]
+ tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
+ if total_in <= amount + fee:
+ raise RuntimeError("Insufficient funds: need %d, have %d"%(amount+fee, total_in))
+ tx.vout.append(CTxOut(int((total_in - amount - fee)*COIN), P2SH_1))
+ tx.vout.append(CTxOut(int(amount*COIN), P2SH_2))
+ # These transactions don't need to be signed, but we still have to insert
+ # the ScriptSig that will satisfy the ScriptPubKey.
+ for inp in tx.vin:
+ inp.scriptSig = SCRIPT_SIG[inp.prevout.n]
+ txid = from_node.sendrawtransaction(ToHex(tx), True)
+ unconflist.append({ "txid" : txid, "vout" : 0 , "amount" : total_in - amount - fee})
+ unconflist.append({ "txid" : txid, "vout" : 1 , "amount" : amount})
+
+ return (ToHex(tx), fee)
+
+def split_inputs(from_node, txins, txouts, initial_split = False):
+ """
+ We need to generate a lot of inputs so we can generate a ton of transactions.
+ This function takes an input from txins, and creates and sends a transaction
+ which splits the value into 2 outputs which are appended to txouts.
+ Previously this was designed to be small inputs so they wouldn't have
+ a high coin age when the notion of priority still existed.
+ """
+ prevtxout = txins.pop()
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(int(prevtxout["txid"], 16), prevtxout["vout"]), b""))
+
+ half_change = satoshi_round(prevtxout["amount"]/2)
+ rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000")
+ tx.vout.append(CTxOut(int(half_change*COIN), P2SH_1))
+ tx.vout.append(CTxOut(int(rem_change*COIN), P2SH_2))
+
+ # If this is the initial split we actually need to sign the transaction
+ # Otherwise we just need to insert the proper ScriptSig
+ if (initial_split) :
+ completetx = from_node.signrawtransaction(ToHex(tx))["hex"]
+ else :
+ tx.vin[0].scriptSig = SCRIPT_SIG[prevtxout["vout"]]
+ completetx = ToHex(tx)
+ txid = from_node.sendrawtransaction(completetx, True)
+ txouts.append({ "txid" : txid, "vout" : 0 , "amount" : half_change})
+ txouts.append({ "txid" : txid, "vout" : 1 , "amount" : rem_change})
+
+def check_estimates(node, fees_seen, max_invalid, print_estimates = True):
+ """
+ This function calls estimatefee and verifies that the estimates
+ meet certain invariants.
+ """
+ all_estimates = [ node.estimatefee(i) for i in range(1,26) ]
+ if print_estimates:
+ log.info([str(all_estimates[e-1]) for e in [1,2,3,6,15,25]])
+ delta = 1.0e-6 # account for rounding error
+ last_e = max(fees_seen)
+ for e in [x for x in all_estimates if x >= 0]:
+ # Estimates should be within the bounds of what transactions fees actually were:
+ if float(e)+delta < min(fees_seen) or float(e)-delta > max(fees_seen):
+ raise AssertionError("Estimated fee (%f) out of range (%f,%f)"
+ %(float(e), min(fees_seen), max(fees_seen)))
+ # Estimates should be monotonically decreasing
+ if float(e)-delta > last_e:
+ raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms"
+ %(float(e),float(last_e)))
+ last_e = e
+ valid_estimate = False
+ invalid_estimates = 0
+ for i,e in enumerate(all_estimates): # estimate is for i+1
+ if e >= 0:
+ valid_estimate = True
+ # estimatesmartfee should return the same result
+ assert_equal(node.estimatesmartfee(i+1)["feerate"], e)
+
+ else:
+ invalid_estimates += 1
+
+ # estimatesmartfee should still be valid
+ approx_estimate = node.estimatesmartfee(i+1)["feerate"]
+ answer_found = node.estimatesmartfee(i+1)["blocks"]
+ assert(approx_estimate > 0)
+ assert(answer_found > i+1)
+
+ # Once we're at a high enough confirmation count that we can give an estimate
+ # We should have estimates for all higher confirmation counts
+ if valid_estimate:
+ raise AssertionError("Invalid estimate appears at higher confirm count than valid estimate")
+
+ # Check on the expected number of different confirmation counts
+ # that we might not have valid estimates for
+ if invalid_estimates > max_invalid:
+ raise AssertionError("More than (%d) invalid estimates"%(max_invalid))
+ return all_estimates
+
+
+class EstimateFeeTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 3
+ self.setup_clean_chain = False
+
+ def setup_network(self):
+ """
+ We'll setup the network to have 3 nodes that all mine with different parameters.
+ But first we need to use one node to create a lot of outputs
+ which we will use to generate our transactions.
+ """
+ self.nodes = []
+ # Use node0 to mine blocks for input splitting
+ self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000",
+ "-whitelist=127.0.0.1"]))
+
+ self.log.info("This test is time consuming, please be patient")
+ self.log.info("Splitting inputs so we can generate tx's")
+ self.txouts = []
+ self.txouts2 = []
+ # Split a coinbase into two transaction puzzle outputs
+ split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True)
+
+ # Mine
+ while (len(self.nodes[0].getrawmempool()) > 0):
+ self.nodes[0].generate(1)
+
+ # Repeatedly split those 2 outputs, doubling twice for each rep
+ # Use txouts to monitor the available utxo, since these won't be tracked in wallet
+ reps = 0
+ while (reps < 5):
+ #Double txouts to txouts2
+ while (len(self.txouts)>0):
+ split_inputs(self.nodes[0], self.txouts, self.txouts2)
+ while (len(self.nodes[0].getrawmempool()) > 0):
+ self.nodes[0].generate(1)
+ #Double txouts2 to txouts
+ while (len(self.txouts2)>0):
+ split_inputs(self.nodes[0], self.txouts2, self.txouts)
+ while (len(self.nodes[0].getrawmempool()) > 0):
+ self.nodes[0].generate(1)
+ reps += 1
+ self.log.info("Finished splitting")
+
+ # Now we can connect the other nodes, didn't want to connect them earlier
+ # so the estimates would not be affected by the splitting transactions
+ # Node1 mines small blocks but that are bigger than the expected transaction rate.
+ # NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes,
+ # (17k is room enough for 110 or so transactions)
+ self.nodes.append(start_node(1, self.options.tmpdir,
+ ["-blockmaxsize=17000", "-maxorphantx=1000"]))
+ connect_nodes(self.nodes[1], 0)
+
+ # Node2 is a stingy miner, that
+ # produces too small blocks (room for only 55 or so transactions)
+ node2args = ["-blockmaxsize=8000", "-maxorphantx=1000"]
+
+ self.nodes.append(start_node(2, self.options.tmpdir, node2args))
+ connect_nodes(self.nodes[0], 2)
+ connect_nodes(self.nodes[2], 1)
+
+ self.is_network_split = False
+ self.sync_all()
+
+ def transact_and_mine(self, numblocks, mining_node):
+ min_fee = Decimal("0.00001")
+ # We will now mine numblocks blocks generating on average 100 transactions between each block
+ # We shuffle our confirmed txout set before each set of transactions
+ # small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible
+ # resorting to tx's that depend on the mempool when those run out
+ for i in range(numblocks):
+ random.shuffle(self.confutxo)
+ for j in range(random.randrange(100-50,100+50)):
+ from_index = random.randint(1,2)
+ (txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo,
+ self.memutxo, Decimal("0.005"), min_fee, min_fee)
+ tx_kbytes = (len(txhex) // 2) / 1000.0
+ self.fees_per_kb.append(float(fee)/tx_kbytes)
+ sync_mempools(self.nodes[0:3], wait=.1)
+ mined = mining_node.getblock(mining_node.generate(1)[0],True)["tx"]
+ sync_blocks(self.nodes[0:3], wait=.1)
+ # update which txouts are confirmed
+ newmem = []
+ for utx in self.memutxo:
+ if utx["txid"] in mined:
+ self.confutxo.append(utx)
+ else:
+ newmem.append(utx)
+ self.memutxo = newmem
+
+ def run_test(self):
+ # Make log handler available to helper functions
+ global log
+ log = self.log
+ self.fees_per_kb = []
+ self.memutxo = []
+ self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
+ self.log.info("Will output estimates for 1/2/3/6/15/25 blocks")
+
+ for i in range(2):
+ self.log.info("Creating transactions and mining them with a block size that can't keep up")
+ # Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine
+ self.transact_and_mine(10, self.nodes[2])
+ check_estimates(self.nodes[1], self.fees_per_kb, 14)
+
+ self.log.info("Creating transactions and mining them at a block size that is just big enough")
+ # Generate transactions while mining 10 more blocks, this time with node1
+ # which mines blocks with capacity just above the rate that transactions are being created
+ self.transact_and_mine(10, self.nodes[1])
+ check_estimates(self.nodes[1], self.fees_per_kb, 2)
+
+ # Finish by mining a normal-sized block:
+ while len(self.nodes[1].getrawmempool()) > 0:
+ self.nodes[1].generate(1)
+
+ sync_blocks(self.nodes[0:3], wait=.1)
+ self.log.info("Final estimates after emptying mempools")
+ check_estimates(self.nodes[1], self.fees_per_kb, 2)
+
+if __name__ == '__main__':
+ EstimateFeeTest().main()
diff --git a/test/functional/test_framework/__init__.py b/test/functional/test_framework/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/functional/test_framework/__init__.py
diff --git a/test/functional/test_framework/address.py b/test/functional/test_framework/address.py
new file mode 100644
index 0000000000..96bebe1ea1
--- /dev/null
+++ b/test/functional/test_framework/address.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Encode and decode BASE58, P2PKH and P2SH addresses."""
+
+from .script import hash256, hash160, sha256, CScript, OP_0
+from .util import bytes_to_hex_str, hex_str_to_bytes
+
+chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
+
+def byte_to_base58(b, version):
+ result = ''
+ str = bytes_to_hex_str(b)
+ str = bytes_to_hex_str(chr(version).encode('latin-1')) + str
+ checksum = bytes_to_hex_str(hash256(hex_str_to_bytes(str)))
+ str += checksum[:8]
+ value = int('0x'+str,0)
+ while value > 0:
+ result = chars[value % 58] + result
+ value //= 58
+ while (str[:2] == '00'):
+ result = chars[0] + result
+ str = str[2:]
+ return result
+
+# TODO: def base58_decode
+
+def keyhash_to_p2pkh(hash, main = False):
+ assert (len(hash) == 20)
+ version = 0 if main else 111
+ return byte_to_base58(hash, version)
+
+def scripthash_to_p2sh(hash, main = False):
+ assert (len(hash) == 20)
+ version = 5 if main else 196
+ return byte_to_base58(hash, version)
+
+def key_to_p2pkh(key, main = False):
+ key = check_key(key)
+ return keyhash_to_p2pkh(hash160(key), main)
+
+def script_to_p2sh(script, main = False):
+ script = check_script(script)
+ return scripthash_to_p2sh(hash160(script), main)
+
+def key_to_p2sh_p2wpkh(key, main = False):
+ key = check_key(key)
+ p2shscript = CScript([OP_0, hash160(key)])
+ return script_to_p2sh(p2shscript, main)
+
+def script_to_p2sh_p2wsh(script, main = False):
+ script = check_script(script)
+ p2shscript = CScript([OP_0, sha256(script)])
+ return script_to_p2sh(p2shscript, main)
+
+def check_key(key):
+ if (type(key) is str):
+ key = hex_str_to_bytes(key) # Assuming this is hex string
+ if (type(key) is bytes and (len(key) == 33 or len(key) == 65)):
+ return key
+ assert(False)
+
+def check_script(script):
+ if (type(script) is str):
+ script = hex_str_to_bytes(script) # Assuming this is hex string
+ if (type(script) is bytes or type(script) is CScript):
+ return script
+ assert(False)
diff --git a/test/functional/test_framework/authproxy.py b/test/functional/test_framework/authproxy.py
new file mode 100644
index 0000000000..9ab3094b06
--- /dev/null
+++ b/test/functional/test_framework/authproxy.py
@@ -0,0 +1,190 @@
+# Copyright (c) 2011 Jeff Garzik
+#
+# Previous copyright, from python-jsonrpc/jsonrpc/proxy.py:
+#
+# Copyright (c) 2007 Jan-Klaas Kollhof
+#
+# This file is part of jsonrpc.
+#
+# jsonrpc is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation; either version 2.1 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this software; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+"""HTTP proxy for opening RPC connection to bitcoind.
+
+AuthServiceProxy has the following improvements over python-jsonrpc's
+ServiceProxy class:
+
+- HTTP connections persist for the life of the AuthServiceProxy object
+ (if server supports HTTP/1.1)
+- sends protocol 'version', per JSON-RPC 1.1
+- sends proper, incrementing 'id'
+- sends Basic HTTP authentication headers
+- parses all JSON numbers that look like floats as Decimal
+- uses standard Python json lib
+"""
+
+try:
+ import http.client as httplib
+except ImportError:
+ import httplib
+import base64
+import decimal
+import json
+import logging
+import socket
+try:
+ import urllib.parse as urlparse
+except ImportError:
+ import urlparse
+
+USER_AGENT = "AuthServiceProxy/0.1"
+
+HTTP_TIMEOUT = 30
+
+log = logging.getLogger("BitcoinRPC")
+
+class JSONRPCException(Exception):
+ def __init__(self, rpc_error):
+ try:
+ errmsg = '%(message)s (%(code)i)' % rpc_error
+ except (KeyError, TypeError):
+ errmsg = ''
+ Exception.__init__(self, errmsg)
+ self.error = rpc_error
+
+
+def EncodeDecimal(o):
+ if isinstance(o, decimal.Decimal):
+ return str(o)
+ raise TypeError(repr(o) + " is not JSON serializable")
+
+class AuthServiceProxy(object):
+ __id_count = 0
+
+ # ensure_ascii: escape unicode as \uXXXX, passed to json.dumps
+ def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True):
+ self.__service_url = service_url
+ self._service_name = service_name
+ self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests
+ self.__url = urlparse.urlparse(service_url)
+ if self.__url.port is None:
+ port = 80
+ else:
+ port = self.__url.port
+ (user, passwd) = (self.__url.username, self.__url.password)
+ try:
+ user = user.encode('utf8')
+ except AttributeError:
+ pass
+ try:
+ passwd = passwd.encode('utf8')
+ except AttributeError:
+ pass
+ authpair = user + b':' + passwd
+ self.__auth_header = b'Basic ' + base64.b64encode(authpair)
+
+ if connection:
+ # Callables re-use the connection of the original proxy
+ self.__conn = connection
+ elif self.__url.scheme == 'https':
+ self.__conn = httplib.HTTPSConnection(self.__url.hostname, port,
+ timeout=timeout)
+ else:
+ self.__conn = httplib.HTTPConnection(self.__url.hostname, port,
+ timeout=timeout)
+
+ def __getattr__(self, name):
+ if name.startswith('__') and name.endswith('__'):
+ # Python internal stuff
+ raise AttributeError
+ if self._service_name is not None:
+ name = "%s.%s" % (self._service_name, name)
+ return AuthServiceProxy(self.__service_url, name, connection=self.__conn)
+
+ def _request(self, method, path, postdata):
+ '''
+ Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout).
+ This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5.
+ '''
+ headers = {'Host': self.__url.hostname,
+ 'User-Agent': USER_AGENT,
+ 'Authorization': self.__auth_header,
+ 'Content-type': 'application/json'}
+ try:
+ self.__conn.request(method, path, postdata, headers)
+ return self._get_response()
+ except httplib.BadStatusLine as e:
+ if e.line == "''": # if connection was closed, try again
+ self.__conn.close()
+ self.__conn.request(method, path, postdata, headers)
+ return self._get_response()
+ else:
+ raise
+ except (BrokenPipeError,ConnectionResetError):
+ # Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset
+ # ConnectionResetError happens on FreeBSD with Python 3.4
+ self.__conn.close()
+ self.__conn.request(method, path, postdata, headers)
+ return self._get_response()
+
+ def __call__(self, *args, **argsn):
+ AuthServiceProxy.__id_count += 1
+
+ log.debug("-%s-> %s %s"%(AuthServiceProxy.__id_count, self._service_name,
+ json.dumps(args, default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
+ if args and argsn:
+ raise ValueError('Cannot handle both named and positional arguments')
+ postdata = json.dumps({'version': '1.1',
+ 'method': self._service_name,
+ 'params': args or argsn,
+ 'id': AuthServiceProxy.__id_count}, default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
+ response = self._request('POST', self.__url.path, postdata.encode('utf-8'))
+ if response['error'] is not None:
+ raise JSONRPCException(response['error'])
+ elif 'result' not in response:
+ raise JSONRPCException({
+ 'code': -343, 'message': 'missing JSON-RPC result'})
+ else:
+ return response['result']
+
+ def _batch(self, rpc_call_list):
+ postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
+ log.debug("--> "+postdata)
+ return self._request('POST', self.__url.path, postdata.encode('utf-8'))
+
+ def _get_response(self):
+ try:
+ http_response = self.__conn.getresponse()
+ except socket.timeout as e:
+ raise JSONRPCException({
+ 'code': -344,
+ 'message': '%r RPC took longer than %f seconds. Consider '
+ 'using larger timeout for calls that take '
+ 'longer to return.' % (self._service_name,
+ self.__conn.timeout)})
+ if http_response is None:
+ raise JSONRPCException({
+ 'code': -342, 'message': 'missing HTTP response from server'})
+
+ content_type = http_response.getheader('Content-Type')
+ if content_type != 'application/json':
+ raise JSONRPCException({
+ 'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (http_response.status, http_response.reason)})
+
+ responsedata = http_response.read().decode('utf8')
+ response = json.loads(responsedata, parse_float=decimal.Decimal)
+ if "error" in response and response["error"] is None:
+ log.debug("<-%s- %s"%(response["id"], json.dumps(response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
+ else:
+ log.debug("<-- "+responsedata)
+ return response
diff --git a/test/functional/test_framework/bignum.py b/test/functional/test_framework/bignum.py
new file mode 100644
index 0000000000..024611da6e
--- /dev/null
+++ b/test/functional/test_framework/bignum.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python3
+#
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Big number routines.
+
+This file is copied from python-bitcoinlib.
+"""
+
+import struct
+
+
+# generic big endian MPI format
+
+def bn_bytes(v, have_ext=False):
+ ext = 0
+ if have_ext:
+ ext = 1
+ return ((v.bit_length()+7)//8) + ext
+
+def bn2bin(v):
+ s = bytearray()
+ i = bn_bytes(v)
+ while i > 0:
+ s.append((v >> ((i-1) * 8)) & 0xff)
+ i -= 1
+ return s
+
+def bin2bn(s):
+ l = 0
+ for ch in s:
+ l = (l << 8) | ch
+ return l
+
+def bn2mpi(v):
+ have_ext = False
+ if v.bit_length() > 0:
+ have_ext = (v.bit_length() & 0x07) == 0
+
+ neg = False
+ if v < 0:
+ neg = True
+ v = -v
+
+ s = struct.pack(b">I", bn_bytes(v, have_ext))
+ ext = bytearray()
+ if have_ext:
+ ext.append(0)
+ v_bin = bn2bin(v)
+ if neg:
+ if have_ext:
+ ext[0] |= 0x80
+ else:
+ v_bin[0] |= 0x80
+ return s + ext + v_bin
+
+def mpi2bn(s):
+ if len(s) < 4:
+ return None
+ s_size = bytes(s[:4])
+ v_len = struct.unpack(b">I", s_size)[0]
+ if len(s) != (v_len + 4):
+ return None
+ if v_len == 0:
+ return 0
+
+ v_str = bytearray(s[4:])
+ neg = False
+ i = v_str[0]
+ if i & 0x80:
+ neg = True
+ i &= ~0x80
+ v_str[0] = i
+
+ v = bin2bn(v_str)
+
+ if neg:
+ return -v
+ return v
+
+# bitcoin-specific little endian format, with implicit size
+def mpi2vch(s):
+ r = s[4:] # strip size
+ r = r[::-1] # reverse string, converting BE->LE
+ return r
+
+def bn2vch(v):
+ return bytes(mpi2vch(bn2mpi(v)))
+
+def vch2mpi(s):
+ r = struct.pack(b">I", len(s)) # size
+ r += s[::-1] # reverse string, converting LE->BE
+ return r
+
+def vch2bn(s):
+ return mpi2bn(vch2mpi(s))
+
diff --git a/test/functional/test_framework/blockstore.py b/test/functional/test_framework/blockstore.py
new file mode 100644
index 0000000000..4cfd682bb5
--- /dev/null
+++ b/test/functional/test_framework/blockstore.py
@@ -0,0 +1,170 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""BlockStore and TxStore helper classes."""
+
+from .mininode import *
+from io import BytesIO
+import dbm.dumb as dbmd
+
+logger = logging.getLogger("TestFramework.blockstore")
+
+class BlockStore(object):
+ """BlockStore helper class.
+
+ BlockStore keeps a map of blocks and implements helper functions for
+ responding to getheaders and getdata, and for constructing a getheaders
+ message.
+ """
+
+ def __init__(self, datadir):
+ self.blockDB = dbmd.open(datadir + "/blocks", 'c')
+ self.currentBlock = 0
+ self.headers_map = dict()
+
+ def close(self):
+ self.blockDB.close()
+
+ def erase(self, blockhash):
+ del self.blockDB[repr(blockhash)]
+
+ # lookup an entry and return the item as raw bytes
+ def get(self, blockhash):
+ value = None
+ try:
+ value = self.blockDB[repr(blockhash)]
+ except KeyError:
+ return None
+ return value
+
+ # lookup an entry and return it as a CBlock
+ def get_block(self, blockhash):
+ ret = None
+ serialized_block = self.get(blockhash)
+ if serialized_block is not None:
+ f = BytesIO(serialized_block)
+ ret = CBlock()
+ ret.deserialize(f)
+ ret.calc_sha256()
+ return ret
+
+ def get_header(self, blockhash):
+ try:
+ return self.headers_map[blockhash]
+ except KeyError:
+ return None
+
+ # Note: this pulls full blocks out of the database just to retrieve
+ # the headers -- perhaps we could keep a separate data structure
+ # to avoid this overhead.
+ def headers_for(self, locator, hash_stop, current_tip=None):
+ if current_tip is None:
+ current_tip = self.currentBlock
+ current_block_header = self.get_header(current_tip)
+ if current_block_header is None:
+ return None
+
+ response = msg_headers()
+ headersList = [ current_block_header ]
+ maxheaders = 2000
+ while (headersList[0].sha256 not in locator.vHave):
+ prevBlockHash = headersList[0].hashPrevBlock
+ prevBlockHeader = self.get_header(prevBlockHash)
+ if prevBlockHeader is not None:
+ headersList.insert(0, prevBlockHeader)
+ else:
+ break
+ headersList = headersList[:maxheaders] # truncate if we have too many
+ hashList = [x.sha256 for x in headersList]
+ index = len(headersList)
+ if (hash_stop in hashList):
+ index = hashList.index(hash_stop)+1
+ response.headers = headersList[:index]
+ return response
+
+ def add_block(self, block):
+ block.calc_sha256()
+ try:
+ self.blockDB[repr(block.sha256)] = bytes(block.serialize())
+ except TypeError as e:
+ logger.exception("Unexpected error")
+ self.currentBlock = block.sha256
+ self.headers_map[block.sha256] = CBlockHeader(block)
+
+ def add_header(self, header):
+ self.headers_map[header.sha256] = header
+
+ # lookup the hashes in "inv", and return p2p messages for delivering
+ # blocks found.
+ def get_blocks(self, inv):
+ responses = []
+ for i in inv:
+ if (i.type == 2): # MSG_BLOCK
+ data = self.get(i.hash)
+ if data is not None:
+ # Use msg_generic to avoid re-serialization
+ responses.append(msg_generic(b"block", data))
+ return responses
+
+ def get_locator(self, current_tip=None):
+ if current_tip is None:
+ current_tip = self.currentBlock
+ r = []
+ counter = 0
+ step = 1
+ lastBlock = self.get_block(current_tip)
+ while lastBlock is not None:
+ r.append(lastBlock.hashPrevBlock)
+ for i in range(step):
+ lastBlock = self.get_block(lastBlock.hashPrevBlock)
+ if lastBlock is None:
+ break
+ counter += 1
+ if counter > 10:
+ step *= 2
+ locator = CBlockLocator()
+ locator.vHave = r
+ return locator
+
+class TxStore(object):
+ def __init__(self, datadir):
+ self.txDB = dbmd.open(datadir + "/transactions", 'c')
+
+ def close(self):
+ self.txDB.close()
+
+ # lookup an entry and return the item as raw bytes
+ def get(self, txhash):
+ value = None
+ try:
+ value = self.txDB[repr(txhash)]
+ except KeyError:
+ return None
+ return value
+
+ def get_transaction(self, txhash):
+ ret = None
+ serialized_tx = self.get(txhash)
+ if serialized_tx is not None:
+ f = BytesIO(serialized_tx)
+ ret = CTransaction()
+ ret.deserialize(f)
+ ret.calc_sha256()
+ return ret
+
+ def add_transaction(self, tx):
+ tx.calc_sha256()
+ try:
+ self.txDB[repr(tx.sha256)] = bytes(tx.serialize())
+ except TypeError as e:
+ logger.exception("Unexpected error")
+
+ def get_transactions(self, inv):
+ responses = []
+ for i in inv:
+ if (i.type == 1): # MSG_TX
+ tx = self.get(i.hash)
+ if tx is not None:
+ responses.append(msg_generic(b"tx", tx))
+ return responses
diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py
new file mode 100644
index 0000000000..2c9a0857df
--- /dev/null
+++ b/test/functional/test_framework/blocktools.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Utilities for manipulating blocks and transactions."""
+
+from .mininode import *
+from .script import CScript, OP_TRUE, OP_CHECKSIG, OP_RETURN
+
+# Create a block (with regtest difficulty)
+def create_block(hashprev, coinbase, nTime=None):
+ block = CBlock()
+ if nTime is None:
+ import time
+ block.nTime = int(time.time()+600)
+ else:
+ block.nTime = nTime
+ block.hashPrevBlock = hashprev
+ block.nBits = 0x207fffff # Will break after a difficulty adjustment...
+ block.vtx.append(coinbase)
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.calc_sha256()
+ return block
+
+# From BIP141
+WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed"
+
+# According to BIP141, blocks with witness rules active must commit to the
+# hash of all in-block transactions including witness.
+def add_witness_commitment(block, nonce=0):
+ # First calculate the merkle root of the block's
+ # transactions, with witnesses.
+ witness_nonce = nonce
+ witness_root = block.calc_witness_merkle_root()
+ witness_commitment = uint256_from_str(hash256(ser_uint256(witness_root)+ser_uint256(witness_nonce)))
+ # witness_nonce should go to coinbase witness.
+ block.vtx[0].wit.vtxinwit = [CTxInWitness()]
+ block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(witness_nonce)]
+
+ # witness commitment is the last OP_RETURN output in coinbase
+ output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(witness_commitment)
+ block.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, output_data])))
+ block.vtx[0].rehash()
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.rehash()
+
+
+def serialize_script_num(value):
+ r = bytearray(0)
+ if value == 0:
+ return r
+ neg = value < 0
+ absvalue = -value if neg else value
+ while (absvalue):
+ r.append(int(absvalue & 0xff))
+ absvalue >>= 8
+ if r[-1] & 0x80:
+ r.append(0x80 if neg else 0)
+ elif neg:
+ r[-1] |= 0x80
+ return r
+
+# Create a coinbase transaction, assuming no miner fees.
+# If pubkey is passed in, the coinbase output will be a P2PK output;
+# otherwise an anyone-can-spend output.
+def create_coinbase(height, pubkey = None):
+ coinbase = CTransaction()
+ coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff),
+ ser_string(serialize_script_num(height)), 0xffffffff))
+ coinbaseoutput = CTxOut()
+ coinbaseoutput.nValue = 50 * COIN
+ halvings = int(height/150) # regtest
+ coinbaseoutput.nValue >>= halvings
+ if (pubkey != None):
+ coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG])
+ else:
+ coinbaseoutput.scriptPubKey = CScript([OP_TRUE])
+ coinbase.vout = [ coinbaseoutput ]
+ coinbase.calc_sha256()
+ return coinbase
+
+# Create a transaction.
+# If the scriptPubKey is not specified, make it anyone-can-spend.
+def create_transaction(prevtx, n, sig, value, scriptPubKey=CScript()):
+ tx = CTransaction()
+ assert(n < len(prevtx.vout))
+ tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff))
+ tx.vout.append(CTxOut(value, scriptPubKey))
+ tx.calc_sha256()
+ return tx
+
+def get_legacy_sigopcount_block(block, fAccurate=True):
+ count = 0
+ for tx in block.vtx:
+ count += get_legacy_sigopcount_tx(tx, fAccurate)
+ return count
+
+def get_legacy_sigopcount_tx(tx, fAccurate=True):
+ count = 0
+ for i in tx.vout:
+ count += i.scriptPubKey.GetSigOpCount(fAccurate)
+ for j in tx.vin:
+ # scriptSig might be of type bytes, so convert to CScript for the moment
+ count += CScript(j.scriptSig).GetSigOpCount(fAccurate)
+ return count
diff --git a/test/functional/test_framework/comptool.py b/test/functional/test_framework/comptool.py
new file mode 100755
index 0000000000..70d1d700ef
--- /dev/null
+++ b/test/functional/test_framework/comptool.py
@@ -0,0 +1,410 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Compare two or more bitcoinds to each other.
+
+To use, create a class that implements get_tests(), and pass it in
+as the test generator to TestManager. get_tests() should be a python
+generator that returns TestInstance objects. See below for definition.
+
+TestNode behaves as follows:
+ Configure with a BlockStore and TxStore
+ on_inv: log the message but don't request
+ on_headers: log the chain tip
+ on_pong: update ping response map (for synchronization)
+ on_getheaders: provide headers via BlockStore
+ on_getdata: provide blocks via BlockStore
+"""
+
+from .mininode import *
+from .blockstore import BlockStore, TxStore
+from .util import p2p_port
+
+import logging
+
+logger=logging.getLogger("TestFramework.comptool")
+
+global mininode_lock
+
+class RejectResult(object):
+ """Outcome that expects rejection of a transaction or block."""
+ def __init__(self, code, reason=b''):
+ self.code = code
+ self.reason = reason
+ def match(self, other):
+ if self.code != other.code:
+ return False
+ return other.reason.startswith(self.reason)
+ def __repr__(self):
+ return '%i:%s' % (self.code,self.reason or '*')
+
+class TestNode(NodeConnCB):
+
+ def __init__(self, block_store, tx_store):
+ NodeConnCB.__init__(self)
+ self.conn = None
+ self.bestblockhash = None
+ self.block_store = block_store
+ self.block_request_map = {}
+ self.tx_store = tx_store
+ self.tx_request_map = {}
+ self.block_reject_map = {}
+ self.tx_reject_map = {}
+
+ # When the pingmap is non-empty we're waiting for
+ # a response
+ self.pingMap = {}
+ self.lastInv = []
+ self.closed = False
+
+ def on_close(self, conn):
+ self.closed = True
+
+ def add_connection(self, conn):
+ self.conn = conn
+
+ def on_headers(self, conn, message):
+ if len(message.headers) > 0:
+ best_header = message.headers[-1]
+ best_header.calc_sha256()
+ self.bestblockhash = best_header.sha256
+
+ def on_getheaders(self, conn, message):
+ response = self.block_store.headers_for(message.locator, message.hashstop)
+ if response is not None:
+ conn.send_message(response)
+
+ def on_getdata(self, conn, message):
+ [conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
+ [conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
+
+ for i in message.inv:
+ if i.type == 1:
+ self.tx_request_map[i.hash] = True
+ elif i.type == 2:
+ self.block_request_map[i.hash] = True
+
+ def on_inv(self, conn, message):
+ self.lastInv = [x.hash for x in message.inv]
+
+ def on_pong(self, conn, message):
+ try:
+ del self.pingMap[message.nonce]
+ except KeyError:
+ raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
+
+ def on_reject(self, conn, message):
+ if message.message == b'tx':
+ self.tx_reject_map[message.data] = RejectResult(message.code, message.reason)
+ if message.message == b'block':
+ self.block_reject_map[message.data] = RejectResult(message.code, message.reason)
+
+ def send_inv(self, obj):
+ mtype = 2 if isinstance(obj, CBlock) else 1
+ self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
+
+ def send_getheaders(self):
+ # We ask for headers from their last tip.
+ m = msg_getheaders()
+ m.locator = self.block_store.get_locator(self.bestblockhash)
+ self.conn.send_message(m)
+
+ def send_header(self, header):
+ m = msg_headers()
+ m.headers.append(header)
+ self.conn.send_message(m)
+
+ # This assumes BIP31
+ def send_ping(self, nonce):
+ self.pingMap[nonce] = True
+ self.conn.send_message(msg_ping(nonce))
+
+ def received_ping_response(self, nonce):
+ return nonce not in self.pingMap
+
+ def send_mempool(self):
+ self.lastInv = []
+ self.conn.send_message(msg_mempool())
+
+# TestInstance:
+#
+# Instances of these are generated by the test generator, and fed into the
+# comptool.
+#
+# "blocks_and_transactions" should be an array of
+# [obj, True/False/None, hash/None]:
+# - obj is either a CBlock, CBlockHeader, or a CTransaction, and
+# - the second value indicates whether the object should be accepted
+# into the blockchain or mempool (for tests where we expect a certain
+# answer), or "None" if we don't expect a certain answer and are just
+# comparing the behavior of the nodes being tested.
+# - the third value is the hash to test the tip against (if None or omitted,
+# use the hash of the block)
+# - NOTE: if a block header, no test is performed; instead the header is
+# just added to the block_store. This is to facilitate block delivery
+# when communicating with headers-first clients (when withholding an
+# intermediate block).
+# sync_every_block: if True, then each block will be inv'ed, synced, and
+# nodes will be tested based on the outcome for the block. If False,
+# then inv's accumulate until all blocks are processed (or max inv size
+# is reached) and then sent out in one inv message. Then the final block
+# will be synced across all connections, and the outcome of the final
+# block will be tested.
+# sync_every_tx: analogous to behavior for sync_every_block, except if outcome
+# on the final tx is None, then contents of entire mempool are compared
+# across all connections. (If outcome of final tx is specified as true
+# or false, then only the last tx is tested against outcome.)
+
+class TestInstance(object):
+ def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False):
+ self.blocks_and_transactions = objects if objects else []
+ self.sync_every_block = sync_every_block
+ self.sync_every_tx = sync_every_tx
+
+class TestManager(object):
+
+ def __init__(self, testgen, datadir):
+ self.test_generator = testgen
+ self.connections = []
+ self.test_nodes = []
+ self.block_store = BlockStore(datadir)
+ self.tx_store = TxStore(datadir)
+ self.ping_counter = 1
+
+ def add_all_connections(self, nodes):
+ for i in range(len(nodes)):
+ # Create a p2p connection to each node
+ test_node = TestNode(self.block_store, self.tx_store)
+ self.test_nodes.append(test_node)
+ self.connections.append(NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node))
+ # Make sure the TestNode (callback class) has a reference to its
+ # associated NodeConn
+ test_node.add_connection(self.connections[-1])
+
+ def clear_all_connections(self):
+ self.connections = []
+ self.test_nodes = []
+
+ def wait_for_disconnections(self):
+ def disconnected():
+ return all(node.closed for node in self.test_nodes)
+ return wait_until(disconnected, timeout=10)
+
+ def wait_for_verack(self):
+ def veracked():
+ return all(node.verack_received for node in self.test_nodes)
+ return wait_until(veracked, timeout=10)
+
+ def wait_for_pings(self, counter):
+ def received_pongs():
+ return all(node.received_ping_response(counter) for node in self.test_nodes)
+ return wait_until(received_pongs)
+
+ # sync_blocks: Wait for all connections to request the blockhash given
+ # then send get_headers to find out the tip of each node, and synchronize
+ # the response by using a ping (and waiting for pong with same nonce).
+ def sync_blocks(self, blockhash, num_blocks):
+ def blocks_requested():
+ return all(
+ blockhash in node.block_request_map and node.block_request_map[blockhash]
+ for node in self.test_nodes
+ )
+
+ # --> error if not requested
+ if not wait_until(blocks_requested, attempts=20*num_blocks):
+ raise AssertionError("Not all nodes requested block")
+
+ # Send getheaders message
+ [ c.cb.send_getheaders() for c in self.connections ]
+
+ # Send ping and wait for response -- synchronization hack
+ [ c.cb.send_ping(self.ping_counter) for c in self.connections ]
+ self.wait_for_pings(self.ping_counter)
+ self.ping_counter += 1
+
+ # Analogous to sync_block (see above)
+ def sync_transaction(self, txhash, num_events):
+ # Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
+ def transaction_requested():
+ return all(
+ txhash in node.tx_request_map and node.tx_request_map[txhash]
+ for node in self.test_nodes
+ )
+
+ # --> error if not requested
+ if not wait_until(transaction_requested, attempts=20*num_events):
+ raise AssertionError("Not all nodes requested transaction")
+
+ # Get the mempool
+ [ c.cb.send_mempool() for c in self.connections ]
+
+ # Send ping and wait for response -- synchronization hack
+ [ c.cb.send_ping(self.ping_counter) for c in self.connections ]
+ self.wait_for_pings(self.ping_counter)
+ self.ping_counter += 1
+
+ # Sort inv responses from each node
+ with mininode_lock:
+ [ c.cb.lastInv.sort() for c in self.connections ]
+
+ # Verify that the tip of each connection all agree with each other, and
+ # with the expected outcome (if given)
+ def check_results(self, blockhash, outcome):
+ with mininode_lock:
+ for c in self.connections:
+ if outcome is None:
+ if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
+ return False
+ elif isinstance(outcome, RejectResult): # Check that block was rejected w/ code
+ if c.cb.bestblockhash == blockhash:
+ return False
+ if blockhash not in c.cb.block_reject_map:
+ logger.error('Block not in reject map: %064x' % (blockhash))
+ return False
+ if not outcome.match(c.cb.block_reject_map[blockhash]):
+ logger.error('Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash))
+ return False
+ elif ((c.cb.bestblockhash == blockhash) != outcome):
+ return False
+ return True
+
+ # Either check that the mempools all agree with each other, or that
+ # txhash's presence in the mempool matches the outcome specified.
+ # This is somewhat of a strange comparison, in that we're either comparing
+ # a particular tx to an outcome, or the entire mempools altogether;
+ # perhaps it would be useful to add the ability to check explicitly that
+ # a particular tx's existence in the mempool is the same across all nodes.
+ def check_mempool(self, txhash, outcome):
+ with mininode_lock:
+ for c in self.connections:
+ if outcome is None:
+ # Make sure the mempools agree with each other
+ if c.cb.lastInv != self.connections[0].cb.lastInv:
+ return False
+ elif isinstance(outcome, RejectResult): # Check that tx was rejected w/ code
+ if txhash in c.cb.lastInv:
+ return False
+ if txhash not in c.cb.tx_reject_map:
+ logger.error('Tx not in reject map: %064x' % (txhash))
+ return False
+ if not outcome.match(c.cb.tx_reject_map[txhash]):
+ logger.error('Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash))
+ return False
+ elif ((txhash in c.cb.lastInv) != outcome):
+ return False
+ return True
+
+ def run(self):
+ # Wait until verack is received
+ self.wait_for_verack()
+
+ test_number = 1
+ for test_instance in self.test_generator.get_tests():
+ # We use these variables to keep track of the last block
+ # and last transaction in the tests, which are used
+ # if we're not syncing on every block or every tx.
+ [ block, block_outcome, tip ] = [ None, None, None ]
+ [ tx, tx_outcome ] = [ None, None ]
+ invqueue = []
+
+ for test_obj in test_instance.blocks_and_transactions:
+ b_or_t = test_obj[0]
+ outcome = test_obj[1]
+ # Determine if we're dealing with a block or tx
+ if isinstance(b_or_t, CBlock): # Block test runner
+ block = b_or_t
+ block_outcome = outcome
+ tip = block.sha256
+ # each test_obj can have an optional third argument
+ # to specify the tip we should compare with
+ # (default is to use the block being tested)
+ if len(test_obj) >= 3:
+ tip = test_obj[2]
+
+ # Add to shared block_store, set as current block
+ # If there was an open getdata request for the block
+ # previously, and we didn't have an entry in the
+ # block_store, then immediately deliver, because the
+ # node wouldn't send another getdata request while
+ # the earlier one is outstanding.
+ first_block_with_hash = True
+ if self.block_store.get(block.sha256) is not None:
+ first_block_with_hash = False
+ with mininode_lock:
+ self.block_store.add_block(block)
+ for c in self.connections:
+ if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[block.sha256] == True:
+ # There was a previous request for this block hash
+ # Most likely, we delivered a header for this block
+ # but never had the block to respond to the getdata
+ c.send_message(msg_block(block))
+ else:
+ c.cb.block_request_map[block.sha256] = False
+ # Either send inv's to each node and sync, or add
+ # to invqueue for later inv'ing.
+ if (test_instance.sync_every_block):
+ # if we expect success, send inv and sync every block
+ # if we expect failure, just push the block and see what happens.
+ if outcome == True:
+ [ c.cb.send_inv(block) for c in self.connections ]
+ self.sync_blocks(block.sha256, 1)
+ else:
+ [ c.send_message(msg_block(block)) for c in self.connections ]
+ [ c.cb.send_ping(self.ping_counter) for c in self.connections ]
+ self.wait_for_pings(self.ping_counter)
+ self.ping_counter += 1
+ if (not self.check_results(tip, outcome)):
+ raise AssertionError("Test failed at test %d" % test_number)
+ else:
+ invqueue.append(CInv(2, block.sha256))
+ elif isinstance(b_or_t, CBlockHeader):
+ block_header = b_or_t
+ self.block_store.add_header(block_header)
+ [ c.cb.send_header(block_header) for c in self.connections ]
+
+ else: # Tx test runner
+ assert(isinstance(b_or_t, CTransaction))
+ tx = b_or_t
+ tx_outcome = outcome
+ # Add to shared tx store and clear map entry
+ with mininode_lock:
+ self.tx_store.add_transaction(tx)
+ for c in self.connections:
+ c.cb.tx_request_map[tx.sha256] = False
+ # Again, either inv to all nodes or save for later
+ if (test_instance.sync_every_tx):
+ [ c.cb.send_inv(tx) for c in self.connections ]
+ self.sync_transaction(tx.sha256, 1)
+ if (not self.check_mempool(tx.sha256, outcome)):
+ raise AssertionError("Test failed at test %d" % test_number)
+ else:
+ invqueue.append(CInv(1, tx.sha256))
+ # Ensure we're not overflowing the inv queue
+ if len(invqueue) == MAX_INV_SZ:
+ [ c.send_message(msg_inv(invqueue)) for c in self.connections ]
+ invqueue = []
+
+ # Do final sync if we weren't syncing on every block or every tx.
+ if (not test_instance.sync_every_block and block is not None):
+ if len(invqueue) > 0:
+ [ c.send_message(msg_inv(invqueue)) for c in self.connections ]
+ invqueue = []
+ self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions))
+ if (not self.check_results(tip, block_outcome)):
+ raise AssertionError("Block test failed at test %d" % test_number)
+ if (not test_instance.sync_every_tx and tx is not None):
+ if len(invqueue) > 0:
+ [ c.send_message(msg_inv(invqueue)) for c in self.connections ]
+ invqueue = []
+ self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
+ if (not self.check_mempool(tx.sha256, tx_outcome)):
+ raise AssertionError("Mempool test failed at test %d" % test_number)
+
+ logger.info("Test %d: PASS" % test_number)
+ test_number += 1
+
+ [ c.disconnect_node() for c in self.connections ]
+ self.wait_for_disconnections()
+ self.block_store.close()
+ self.tx_store.close()
diff --git a/test/functional/test_framework/coverage.py b/test/functional/test_framework/coverage.py
new file mode 100644
index 0000000000..3f87ef91f6
--- /dev/null
+++ b/test/functional/test_framework/coverage.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Utilities for doing coverage analysis on the RPC interface.
+
+Provides a way to track which RPC commands are exercised during
+testing.
+"""
+
+import os
+
+
+REFERENCE_FILENAME = 'rpc_interface.txt'
+
+
+class AuthServiceProxyWrapper(object):
+ """
+ An object that wraps AuthServiceProxy to record specific RPC calls.
+
+ """
+ def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
+ """
+ Kwargs:
+ auth_service_proxy_instance (AuthServiceProxy): the instance
+ being wrapped.
+ coverage_logfile (str): if specified, write each service_name
+ out to a file when called.
+
+ """
+ self.auth_service_proxy_instance = auth_service_proxy_instance
+ self.coverage_logfile = coverage_logfile
+
+ def __getattr__(self, *args, **kwargs):
+ return_val = self.auth_service_proxy_instance.__getattr__(
+ *args, **kwargs)
+
+ return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
+
+ def __call__(self, *args, **kwargs):
+ """
+ Delegates to AuthServiceProxy, then writes the particular RPC method
+ called to a file.
+
+ """
+ return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
+ rpc_method = self.auth_service_proxy_instance._service_name
+
+ if self.coverage_logfile:
+ with open(self.coverage_logfile, 'a+', encoding='utf8') as f:
+ f.write("%s\n" % rpc_method)
+
+ return return_val
+
+ @property
+ def url(self):
+ return self.auth_service_proxy_instance.url
+
+
+def get_filename(dirname, n_node):
+ """
+ Get a filename unique to the test process ID and node.
+
+ This file will contain a list of RPC commands covered.
+ """
+ pid = str(os.getpid())
+ return os.path.join(
+ dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
+
+
+def write_all_rpc_commands(dirname, node):
+ """
+ Write out a list of all RPC functions available in `bitcoin-cli` for
+ coverage comparison. This will only happen once per coverage
+ directory.
+
+ Args:
+ dirname (str): temporary test dir
+ node (AuthServiceProxy): client
+
+ Returns:
+ bool. if the RPC interface file was written.
+
+ """
+ filename = os.path.join(dirname, REFERENCE_FILENAME)
+
+ if os.path.isfile(filename):
+ return False
+
+ help_output = node.help().split('\n')
+ commands = set()
+
+ for line in help_output:
+ line = line.strip()
+
+ # Ignore blanks and headers
+ if line and not line.startswith('='):
+ commands.add("%s\n" % line.split()[0])
+
+ with open(filename, 'w', encoding='utf8') as f:
+ f.writelines(list(commands))
+
+ return True
diff --git a/test/functional/test_framework/key.py b/test/functional/test_framework/key.py
new file mode 100644
index 0000000000..85a6158a2f
--- /dev/null
+++ b/test/functional/test_framework/key.py
@@ -0,0 +1,232 @@
+# Copyright (c) 2011 Sam Rushing
+"""ECC secp256k1 OpenSSL wrapper.
+
+WARNING: This module does not mlock() secrets; your private keys may end up on
+disk in swap! Use with caution!
+
+This file is modified from python-bitcoinlib.
+"""
+
+import ctypes
+import ctypes.util
+import hashlib
+import sys
+
+ssl = ctypes.cdll.LoadLibrary(ctypes.util.find_library ('ssl') or 'libeay32')
+
+ssl.BN_new.restype = ctypes.c_void_p
+ssl.BN_new.argtypes = []
+
+ssl.BN_bin2bn.restype = ctypes.c_void_p
+ssl.BN_bin2bn.argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.c_void_p]
+
+ssl.BN_CTX_free.restype = None
+ssl.BN_CTX_free.argtypes = [ctypes.c_void_p]
+
+ssl.BN_CTX_new.restype = ctypes.c_void_p
+ssl.BN_CTX_new.argtypes = []
+
+ssl.ECDH_compute_key.restype = ctypes.c_int
+ssl.ECDH_compute_key.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
+
+ssl.ECDSA_sign.restype = ctypes.c_int
+ssl.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
+
+ssl.ECDSA_verify.restype = ctypes.c_int
+ssl.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]
+
+ssl.EC_KEY_free.restype = None
+ssl.EC_KEY_free.argtypes = [ctypes.c_void_p]
+
+ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
+ssl.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int]
+
+ssl.EC_KEY_get0_group.restype = ctypes.c_void_p
+ssl.EC_KEY_get0_group.argtypes = [ctypes.c_void_p]
+
+ssl.EC_KEY_get0_public_key.restype = ctypes.c_void_p
+ssl.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p]
+
+ssl.EC_KEY_set_private_key.restype = ctypes.c_int
+ssl.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
+
+ssl.EC_KEY_set_conv_form.restype = None
+ssl.EC_KEY_set_conv_form.argtypes = [ctypes.c_void_p, ctypes.c_int]
+
+ssl.EC_KEY_set_public_key.restype = ctypes.c_int
+ssl.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
+
+ssl.i2o_ECPublicKey.restype = ctypes.c_void_p
+ssl.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
+
+ssl.EC_POINT_new.restype = ctypes.c_void_p
+ssl.EC_POINT_new.argtypes = [ctypes.c_void_p]
+
+ssl.EC_POINT_free.restype = None
+ssl.EC_POINT_free.argtypes = [ctypes.c_void_p]
+
+ssl.EC_POINT_mul.restype = ctypes.c_int
+ssl.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
+
+# this specifies the curve used with ECDSA.
+NID_secp256k1 = 714 # from openssl/obj_mac.h
+
+SECP256K1_ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
+SECP256K1_ORDER_HALF = SECP256K1_ORDER // 2
+
+# Thx to Sam Devlin for the ctypes magic 64-bit fix.
+def _check_result(val, func, args):
+ if val == 0:
+ raise ValueError
+ else:
+ return ctypes.c_void_p (val)
+
+ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
+ssl.EC_KEY_new_by_curve_name.errcheck = _check_result
+
+class CECKey(object):
+ """Wrapper around OpenSSL's EC_KEY"""
+
+ POINT_CONVERSION_COMPRESSED = 2
+ POINT_CONVERSION_UNCOMPRESSED = 4
+
+ def __init__(self):
+ self.k = ssl.EC_KEY_new_by_curve_name(NID_secp256k1)
+
+ def __del__(self):
+ if ssl:
+ ssl.EC_KEY_free(self.k)
+ self.k = None
+
+ def set_secretbytes(self, secret):
+ priv_key = ssl.BN_bin2bn(secret, 32, ssl.BN_new())
+ group = ssl.EC_KEY_get0_group(self.k)
+ pub_key = ssl.EC_POINT_new(group)
+ ctx = ssl.BN_CTX_new()
+ if not ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx):
+ raise ValueError("Could not derive public key from the supplied secret.")
+ ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx)
+ ssl.EC_KEY_set_private_key(self.k, priv_key)
+ ssl.EC_KEY_set_public_key(self.k, pub_key)
+ ssl.EC_POINT_free(pub_key)
+ ssl.BN_CTX_free(ctx)
+ return self.k
+
+ def set_privkey(self, key):
+ self.mb = ctypes.create_string_buffer(key)
+ return ssl.d2i_ECPrivateKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
+
+ def set_pubkey(self, key):
+ self.mb = ctypes.create_string_buffer(key)
+ return ssl.o2i_ECPublicKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
+
+ def get_privkey(self):
+ size = ssl.i2d_ECPrivateKey(self.k, 0)
+ mb_pri = ctypes.create_string_buffer(size)
+ ssl.i2d_ECPrivateKey(self.k, ctypes.byref(ctypes.pointer(mb_pri)))
+ return mb_pri.raw
+
+ def get_pubkey(self):
+ size = ssl.i2o_ECPublicKey(self.k, 0)
+ mb = ctypes.create_string_buffer(size)
+ ssl.i2o_ECPublicKey(self.k, ctypes.byref(ctypes.pointer(mb)))
+ return mb.raw
+
+ def get_raw_ecdh_key(self, other_pubkey):
+ ecdh_keybuffer = ctypes.create_string_buffer(32)
+ r = ssl.ECDH_compute_key(ctypes.pointer(ecdh_keybuffer), 32,
+ ssl.EC_KEY_get0_public_key(other_pubkey.k),
+ self.k, 0)
+ if r != 32:
+ raise Exception('CKey.get_ecdh_key(): ECDH_compute_key() failed')
+ return ecdh_keybuffer.raw
+
+ def get_ecdh_key(self, other_pubkey, kdf=lambda k: hashlib.sha256(k).digest()):
+ # FIXME: be warned it's not clear what the kdf should be as a default
+ r = self.get_raw_ecdh_key(other_pubkey)
+ return kdf(r)
+
+ def sign(self, hash, low_s = True):
+ # FIXME: need unit tests for below cases
+ if not isinstance(hash, bytes):
+ raise TypeError('Hash must be bytes instance; got %r' % hash.__class__)
+ if len(hash) != 32:
+ raise ValueError('Hash must be exactly 32 bytes long')
+
+ sig_size0 = ctypes.c_uint32()
+ sig_size0.value = ssl.ECDSA_size(self.k)
+ mb_sig = ctypes.create_string_buffer(sig_size0.value)
+ result = ssl.ECDSA_sign(0, hash, len(hash), mb_sig, ctypes.byref(sig_size0), self.k)
+ assert 1 == result
+ assert mb_sig.raw[0] == 0x30
+ assert mb_sig.raw[1] == sig_size0.value - 2
+ total_size = mb_sig.raw[1]
+ assert mb_sig.raw[2] == 2
+ r_size = mb_sig.raw[3]
+ assert mb_sig.raw[4 + r_size] == 2
+ s_size = mb_sig.raw[5 + r_size]
+ s_value = int.from_bytes(mb_sig.raw[6+r_size:6+r_size+s_size], byteorder='big')
+ if (not low_s) or s_value <= SECP256K1_ORDER_HALF:
+ return mb_sig.raw[:sig_size0.value]
+ else:
+ low_s_value = SECP256K1_ORDER - s_value
+ low_s_bytes = (low_s_value).to_bytes(33, byteorder='big')
+ while len(low_s_bytes) > 1 and low_s_bytes[0] == 0 and low_s_bytes[1] < 0x80:
+ low_s_bytes = low_s_bytes[1:]
+ new_s_size = len(low_s_bytes)
+ new_total_size_byte = (total_size + new_s_size - s_size).to_bytes(1,byteorder='big')
+ new_s_size_byte = (new_s_size).to_bytes(1,byteorder='big')
+ return b'\x30' + new_total_size_byte + mb_sig.raw[2:5+r_size] + new_s_size_byte + low_s_bytes
+
+ def verify(self, hash, sig):
+ """Verify a DER signature"""
+ return ssl.ECDSA_verify(0, hash, len(hash), sig, len(sig), self.k) == 1
+
+ def set_compressed(self, compressed):
+ if compressed:
+ form = self.POINT_CONVERSION_COMPRESSED
+ else:
+ form = self.POINT_CONVERSION_UNCOMPRESSED
+ ssl.EC_KEY_set_conv_form(self.k, form)
+
+
+class CPubKey(bytes):
+ """An encapsulated public key
+
+ Attributes:
+
+ is_valid - Corresponds to CPubKey.IsValid()
+ is_fullyvalid - Corresponds to CPubKey.IsFullyValid()
+ is_compressed - Corresponds to CPubKey.IsCompressed()
+ """
+
+ def __new__(cls, buf, _cec_key=None):
+ self = super(CPubKey, cls).__new__(cls, buf)
+ if _cec_key is None:
+ _cec_key = CECKey()
+ self._cec_key = _cec_key
+ self.is_fullyvalid = _cec_key.set_pubkey(self) != 0
+ return self
+
+ @property
+ def is_valid(self):
+ return len(self) > 0
+
+ @property
+ def is_compressed(self):
+ return len(self) == 33
+
+ def verify(self, hash, sig):
+ return self._cec_key.verify(hash, sig)
+
+ def __str__(self):
+ return repr(self)
+
+ def __repr__(self):
+ # Always have represent as b'<secret>' so test cases don't have to
+ # change for py2/3
+ if sys.version > '3':
+ return '%s(%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
+ else:
+ return '%s(b%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
+
diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py
new file mode 100755
index 0000000000..aace17a043
--- /dev/null
+++ b/test/functional/test_framework/mininode.py
@@ -0,0 +1,1797 @@
+#!/usr/bin/env python3
+# Copyright (c) 2010 ArtForz -- public domain half-a-node
+# Copyright (c) 2012 Jeff Garzik
+# Copyright (c) 2010-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Bitcoin P2P network half-a-node.
+
+This python code was modified from ArtForz' public domain half-a-node, as
+found in the mini-node branch of http://github.com/jgarzik/pynode.
+
+NodeConn: an object which manages p2p connectivity to a bitcoin node
+NodeConnCB: a base class that describes the interface for receiving
+ callbacks with network messages from a NodeConn
+CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
+ data structures that should map to corresponding structures in
+ bitcoin/primitives
+msg_block, msg_tx, msg_headers, etc.:
+ data structures that represent network messages
+ser_*, deser_*: functions that handle serialization/deserialization
+"""
+
+import struct
+import socket
+import asyncore
+import time
+import sys
+import random
+from .util import hex_str_to_bytes, bytes_to_hex_str
+from io import BytesIO
+from codecs import encode
+import hashlib
+from threading import RLock
+from threading import Thread
+import logging
+import copy
+from test_framework.siphash import siphash256
+
+BIP0031_VERSION = 60000
+MY_VERSION = 70014 # past bip-31 for ping/pong
+MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
+MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
+
+MAX_INV_SZ = 50000
+MAX_BLOCK_BASE_SIZE = 1000000
+
+COIN = 100000000 # 1 btc in satoshis
+
+NODE_NETWORK = (1 << 0)
+NODE_GETUTXO = (1 << 1)
+NODE_BLOOM = (1 << 2)
+NODE_WITNESS = (1 << 3)
+
+logger = logging.getLogger("TestFramework.mininode")
+
+# Keep our own socket map for asyncore, so that we can track disconnects
+# ourselves (to workaround an issue with closing an asyncore socket when
+# using select)
+mininode_socket_map = dict()
+
+# One lock for synchronizing all data access between the networking thread (see
+# NetworkThread below) and the thread running the test logic. For simplicity,
+# NodeConn acquires this lock whenever delivering a message to to a NodeConnCB,
+# and whenever adding anything to the send buffer (in send_message()). This
+# lock should be acquired in the thread running the test logic to synchronize
+# access to any data shared with the NodeConnCB or NodeConn.
+mininode_lock = RLock()
+
+# Serialization/deserialization tools
+def sha256(s):
+ return hashlib.new('sha256', s).digest()
+
+def ripemd160(s):
+ return hashlib.new('ripemd160', s).digest()
+
+def hash256(s):
+ return sha256(sha256(s))
+
+def ser_compact_size(l):
+ r = b""
+ if l < 253:
+ r = struct.pack("B", l)
+ elif l < 0x10000:
+ r = struct.pack("<BH", 253, l)
+ elif l < 0x100000000:
+ r = struct.pack("<BI", 254, l)
+ else:
+ r = struct.pack("<BQ", 255, l)
+ return r
+
+def deser_compact_size(f):
+ nit = struct.unpack("<B", f.read(1))[0]
+ if nit == 253:
+ nit = struct.unpack("<H", f.read(2))[0]
+ elif nit == 254:
+ nit = struct.unpack("<I", f.read(4))[0]
+ elif nit == 255:
+ nit = struct.unpack("<Q", f.read(8))[0]
+ return nit
+
+def deser_string(f):
+ nit = deser_compact_size(f)
+ return f.read(nit)
+
+def ser_string(s):
+ return ser_compact_size(len(s)) + s
+
+def deser_uint256(f):
+ r = 0
+ for i in range(8):
+ t = struct.unpack("<I", f.read(4))[0]
+ r += t << (i * 32)
+ return r
+
+
+def ser_uint256(u):
+ rs = b""
+ for i in range(8):
+ rs += struct.pack("<I", u & 0xFFFFFFFF)
+ u >>= 32
+ return rs
+
+
+def uint256_from_str(s):
+ r = 0
+ t = struct.unpack("<IIIIIIII", s[:32])
+ for i in range(8):
+ r += t[i] << (i * 32)
+ return r
+
+
+def uint256_from_compact(c):
+ nbytes = (c >> 24) & 0xFF
+ v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
+ return v
+
+
+def deser_vector(f, c):
+ nit = deser_compact_size(f)
+ r = []
+ for i in range(nit):
+ t = c()
+ t.deserialize(f)
+ r.append(t)
+ return r
+
+
+# ser_function_name: Allow for an alternate serialization function on the
+# entries in the vector (we use this for serializing the vector of transactions
+# for a witness block).
+def ser_vector(l, ser_function_name=None):
+ r = ser_compact_size(len(l))
+ for i in l:
+ if ser_function_name:
+ r += getattr(i, ser_function_name)()
+ else:
+ r += i.serialize()
+ return r
+
+
+def deser_uint256_vector(f):
+ nit = deser_compact_size(f)
+ r = []
+ for i in range(nit):
+ t = deser_uint256(f)
+ r.append(t)
+ return r
+
+
+def ser_uint256_vector(l):
+ r = ser_compact_size(len(l))
+ for i in l:
+ r += ser_uint256(i)
+ return r
+
+
+def deser_string_vector(f):
+ nit = deser_compact_size(f)
+ r = []
+ for i in range(nit):
+ t = deser_string(f)
+ r.append(t)
+ return r
+
+
+def ser_string_vector(l):
+ r = ser_compact_size(len(l))
+ for sv in l:
+ r += ser_string(sv)
+ return r
+
+
+def deser_int_vector(f):
+ nit = deser_compact_size(f)
+ r = []
+ for i in range(nit):
+ t = struct.unpack("<i", f.read(4))[0]
+ r.append(t)
+ return r
+
+
+def ser_int_vector(l):
+ r = ser_compact_size(len(l))
+ for i in l:
+ r += struct.pack("<i", i)
+ return r
+
+# Deserialize from a hex string representation (eg from RPC)
+def FromHex(obj, hex_string):
+ obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
+ return obj
+
+# Convert a binary-serializable object to hex (eg for submission via RPC)
+def ToHex(obj):
+ return bytes_to_hex_str(obj.serialize())
+
+# Objects that map to bitcoind objects, which can be serialized/deserialized
+
+class CAddress(object):
+ def __init__(self):
+ self.nServices = 1
+ self.pchReserved = b"\x00" * 10 + b"\xff" * 2
+ self.ip = "0.0.0.0"
+ self.port = 0
+
+ def deserialize(self, f):
+ self.nServices = struct.unpack("<Q", f.read(8))[0]
+ self.pchReserved = f.read(12)
+ self.ip = socket.inet_ntoa(f.read(4))
+ self.port = struct.unpack(">H", f.read(2))[0]
+
+ def serialize(self):
+ r = b""
+ r += struct.pack("<Q", self.nServices)
+ r += self.pchReserved
+ r += socket.inet_aton(self.ip)
+ r += struct.pack(">H", self.port)
+ return r
+
+ def __repr__(self):
+ return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
+ self.ip, self.port)
+
+MSG_WITNESS_FLAG = 1<<30
+
+class CInv(object):
+ typemap = {
+ 0: "Error",
+ 1: "TX",
+ 2: "Block",
+ 1|MSG_WITNESS_FLAG: "WitnessTx",
+ 2|MSG_WITNESS_FLAG : "WitnessBlock",
+ 4: "CompactBlock"
+ }
+
+ def __init__(self, t=0, h=0):
+ self.type = t
+ self.hash = h
+
+ def deserialize(self, f):
+ self.type = struct.unpack("<i", f.read(4))[0]
+ self.hash = deser_uint256(f)
+
+ def serialize(self):
+ r = b""
+ r += struct.pack("<i", self.type)
+ r += ser_uint256(self.hash)
+ return r
+
+ def __repr__(self):
+ return "CInv(type=%s hash=%064x)" \
+ % (self.typemap[self.type], self.hash)
+
+
+class CBlockLocator(object):
+ def __init__(self):
+ self.nVersion = MY_VERSION
+ self.vHave = []
+
+ def deserialize(self, f):
+ self.nVersion = struct.unpack("<i", f.read(4))[0]
+ self.vHave = deser_uint256_vector(f)
+
+ def serialize(self):
+ r = b""
+ r += struct.pack("<i", self.nVersion)
+ r += ser_uint256_vector(self.vHave)
+ return r
+
+ def __repr__(self):
+ return "CBlockLocator(nVersion=%i vHave=%s)" \
+ % (self.nVersion, repr(self.vHave))
+
+
+class COutPoint(object):
+ def __init__(self, hash=0, n=0):
+ self.hash = hash
+ self.n = n
+
+ def deserialize(self, f):
+ self.hash = deser_uint256(f)
+ self.n = struct.unpack("<I", f.read(4))[0]
+
+ def serialize(self):
+ r = b""
+ r += ser_uint256(self.hash)
+ r += struct.pack("<I", self.n)
+ return r
+
+ def __repr__(self):
+ return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
+
+
+class CTxIn(object):
+ def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
+ if outpoint is None:
+ self.prevout = COutPoint()
+ else:
+ self.prevout = outpoint
+ self.scriptSig = scriptSig
+ self.nSequence = nSequence
+
+ def deserialize(self, f):
+ self.prevout = COutPoint()
+ self.prevout.deserialize(f)
+ self.scriptSig = deser_string(f)
+ self.nSequence = struct.unpack("<I", f.read(4))[0]
+
+ def serialize(self):
+ r = b""
+ r += self.prevout.serialize()
+ r += ser_string(self.scriptSig)
+ r += struct.pack("<I", self.nSequence)
+ return r
+
+ def __repr__(self):
+ return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
+ % (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
+ self.nSequence)
+
+
+class CTxOut(object):
+ def __init__(self, nValue=0, scriptPubKey=b""):
+ self.nValue = nValue
+ self.scriptPubKey = scriptPubKey
+
+ def deserialize(self, f):
+ self.nValue = struct.unpack("<q", f.read(8))[0]
+ self.scriptPubKey = deser_string(f)
+
+ def serialize(self):
+ r = b""
+ r += struct.pack("<q", self.nValue)
+ r += ser_string(self.scriptPubKey)
+ return r
+
+ def __repr__(self):
+ return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
+ % (self.nValue // COIN, self.nValue % COIN,
+ bytes_to_hex_str(self.scriptPubKey))
+
+
+class CScriptWitness(object):
+ def __init__(self):
+ # stack is a vector of strings
+ self.stack = []
+
+ def __repr__(self):
+ return "CScriptWitness(%s)" % \
+ (",".join([bytes_to_hex_str(x) for x in self.stack]))
+
+ def is_null(self):
+ if self.stack:
+ return False
+ return True
+
+
+class CTxInWitness(object):
+ def __init__(self):
+ self.scriptWitness = CScriptWitness()
+
+ def deserialize(self, f):
+ self.scriptWitness.stack = deser_string_vector(f)
+
+ def serialize(self):
+ return ser_string_vector(self.scriptWitness.stack)
+
+ def __repr__(self):
+ return repr(self.scriptWitness)
+
+ def is_null(self):
+ return self.scriptWitness.is_null()
+
+
+class CTxWitness(object):
+ def __init__(self):
+ self.vtxinwit = []
+
+ def deserialize(self, f):
+ for i in range(len(self.vtxinwit)):
+ self.vtxinwit[i].deserialize(f)
+
+ def serialize(self):
+ r = b""
+ # This is different than the usual vector serialization --
+ # we omit the length of the vector, which is required to be
+ # the same length as the transaction's vin vector.
+ for x in self.vtxinwit:
+ r += x.serialize()
+ return r
+
+ def __repr__(self):
+ return "CTxWitness(%s)" % \
+ (';'.join([repr(x) for x in self.vtxinwit]))
+
+ def is_null(self):
+ for x in self.vtxinwit:
+ if not x.is_null():
+ return False
+ return True
+
+
+class CTransaction(object):
+ def __init__(self, tx=None):
+ if tx is None:
+ self.nVersion = 1
+ self.vin = []
+ self.vout = []
+ self.wit = CTxWitness()
+ self.nLockTime = 0
+ self.sha256 = None
+ self.hash = None
+ else:
+ self.nVersion = tx.nVersion
+ self.vin = copy.deepcopy(tx.vin)
+ self.vout = copy.deepcopy(tx.vout)
+ self.nLockTime = tx.nLockTime
+ self.sha256 = tx.sha256
+ self.hash = tx.hash
+ self.wit = copy.deepcopy(tx.wit)
+
+ def deserialize(self, f):
+ self.nVersion = struct.unpack("<i", f.read(4))[0]
+ self.vin = deser_vector(f, CTxIn)
+ flags = 0
+ if len(self.vin) == 0:
+ flags = struct.unpack("<B", f.read(1))[0]
+ # Not sure why flags can't be zero, but this
+ # matches the implementation in bitcoind
+ if (flags != 0):
+ self.vin = deser_vector(f, CTxIn)
+ self.vout = deser_vector(f, CTxOut)
+ else:
+ self.vout = deser_vector(f, CTxOut)
+ if flags != 0:
+ self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
+ self.wit.deserialize(f)
+ self.nLockTime = struct.unpack("<I", f.read(4))[0]
+ self.sha256 = None
+ self.hash = None
+
+ def serialize_without_witness(self):
+ r = b""
+ r += struct.pack("<i", self.nVersion)
+ r += ser_vector(self.vin)
+ r += ser_vector(self.vout)
+ r += struct.pack("<I", self.nLockTime)
+ return r
+
+ # Only serialize with witness when explicitly called for
+ def serialize_with_witness(self):
+ flags = 0
+ if not self.wit.is_null():
+ flags |= 1
+ r = b""
+ r += struct.pack("<i", self.nVersion)
+ if flags:
+ dummy = []
+ r += ser_vector(dummy)
+ r += struct.pack("<B", flags)
+ r += ser_vector(self.vin)
+ r += ser_vector(self.vout)
+ if flags & 1:
+ if (len(self.wit.vtxinwit) != len(self.vin)):
+ # vtxinwit must have the same length as vin
+ self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
+ for i in range(len(self.wit.vtxinwit), len(self.vin)):
+ self.wit.vtxinwit.append(CTxInWitness())
+ r += self.wit.serialize()
+ r += struct.pack("<I", self.nLockTime)
+ return r
+
+ # Regular serialization is without witness -- must explicitly
+ # call serialize_with_witness to include witness data.
+ def serialize(self):
+ return self.serialize_without_witness()
+
+ # Recalculate the txid (transaction hash without witness)
+ def rehash(self):
+ self.sha256 = None
+ self.calc_sha256()
+
+ # We will only cache the serialization without witness in
+ # self.sha256 and self.hash -- those are expected to be the txid.
+ def calc_sha256(self, with_witness=False):
+ if with_witness:
+ # Don't cache the result, just return it
+ return uint256_from_str(hash256(self.serialize_with_witness()))
+
+ if self.sha256 is None:
+ self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
+ self.hash = encode(hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
+
+ def is_valid(self):
+ self.calc_sha256()
+ for tout in self.vout:
+ if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
+ return False
+ return True
+
+ def __repr__(self):
+ return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
+ % (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
+
+
+class CBlockHeader(object):
+ def __init__(self, header=None):
+ if header is None:
+ self.set_null()
+ else:
+ self.nVersion = header.nVersion
+ self.hashPrevBlock = header.hashPrevBlock
+ self.hashMerkleRoot = header.hashMerkleRoot
+ self.nTime = header.nTime
+ self.nBits = header.nBits
+ self.nNonce = header.nNonce
+ self.sha256 = header.sha256
+ self.hash = header.hash
+ self.calc_sha256()
+
+ def set_null(self):
+ self.nVersion = 1
+ self.hashPrevBlock = 0
+ self.hashMerkleRoot = 0
+ self.nTime = 0
+ self.nBits = 0
+ self.nNonce = 0
+ self.sha256 = None
+ self.hash = None
+
+ def deserialize(self, f):
+ self.nVersion = struct.unpack("<i", f.read(4))[0]
+ self.hashPrevBlock = deser_uint256(f)
+ self.hashMerkleRoot = deser_uint256(f)
+ self.nTime = struct.unpack("<I", f.read(4))[0]
+ self.nBits = struct.unpack("<I", f.read(4))[0]
+ self.nNonce = struct.unpack("<I", f.read(4))[0]
+ self.sha256 = None
+ self.hash = None
+
+ def serialize(self):
+ r = b""
+ r += struct.pack("<i", self.nVersion)
+ r += ser_uint256(self.hashPrevBlock)
+ r += ser_uint256(self.hashMerkleRoot)
+ r += struct.pack("<I", self.nTime)
+ r += struct.pack("<I", self.nBits)
+ r += struct.pack("<I", self.nNonce)
+ return r
+
+ def calc_sha256(self):
+ if self.sha256 is None:
+ r = b""
+ r += struct.pack("<i", self.nVersion)
+ r += ser_uint256(self.hashPrevBlock)
+ r += ser_uint256(self.hashMerkleRoot)
+ r += struct.pack("<I", self.nTime)
+ r += struct.pack("<I", self.nBits)
+ r += struct.pack("<I", self.nNonce)
+ self.sha256 = uint256_from_str(hash256(r))
+ self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
+
+ def rehash(self):
+ self.sha256 = None
+ self.calc_sha256()
+ return self.sha256
+
+ def __repr__(self):
+ return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
+ % (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
+ time.ctime(self.nTime), self.nBits, self.nNonce)
+
+
+class CBlock(CBlockHeader):
+ def __init__(self, header=None):
+ super(CBlock, self).__init__(header)
+ self.vtx = []
+
+ def deserialize(self, f):
+ super(CBlock, self).deserialize(f)
+ self.vtx = deser_vector(f, CTransaction)
+
+ def serialize(self, with_witness=False):
+ r = b""
+ r += super(CBlock, self).serialize()
+ if with_witness:
+ r += ser_vector(self.vtx, "serialize_with_witness")
+ else:
+ r += ser_vector(self.vtx)
+ return r
+
+ # Calculate the merkle root given a vector of transaction hashes
+ def get_merkle_root(self, hashes):
+ while len(hashes) > 1:
+ newhashes = []
+ for i in range(0, len(hashes), 2):
+ i2 = min(i+1, len(hashes)-1)
+ newhashes.append(hash256(hashes[i] + hashes[i2]))
+ hashes = newhashes
+ return uint256_from_str(hashes[0])
+
+ def calc_merkle_root(self):
+ hashes = []
+ for tx in self.vtx:
+ tx.calc_sha256()
+ hashes.append(ser_uint256(tx.sha256))
+ return self.get_merkle_root(hashes)
+
+ def calc_witness_merkle_root(self):
+ # For witness root purposes, the hash of the
+ # coinbase, with witness, is defined to be 0...0
+ hashes = [ser_uint256(0)]
+
+ for tx in self.vtx[1:]:
+ # Calculate the hashes with witness data
+ hashes.append(ser_uint256(tx.calc_sha256(True)))
+
+ return self.get_merkle_root(hashes)
+
+ def is_valid(self):
+ self.calc_sha256()
+ target = uint256_from_compact(self.nBits)
+ if self.sha256 > target:
+ return False
+ for tx in self.vtx:
+ if not tx.is_valid():
+ return False
+ if self.calc_merkle_root() != self.hashMerkleRoot:
+ return False
+ return True
+
+ def solve(self):
+ self.rehash()
+ target = uint256_from_compact(self.nBits)
+ while self.sha256 > target:
+ self.nNonce += 1
+ self.rehash()
+
+ def __repr__(self):
+ return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
+ % (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
+ time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
+
+
+class CUnsignedAlert(object):
+ def __init__(self):
+ self.nVersion = 1
+ self.nRelayUntil = 0
+ self.nExpiration = 0
+ self.nID = 0
+ self.nCancel = 0
+ self.setCancel = []
+ self.nMinVer = 0
+ self.nMaxVer = 0
+ self.setSubVer = []
+ self.nPriority = 0
+ self.strComment = b""
+ self.strStatusBar = b""
+ self.strReserved = b""
+
+ def deserialize(self, f):
+ self.nVersion = struct.unpack("<i", f.read(4))[0]
+ self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
+ self.nExpiration = struct.unpack("<q", f.read(8))[0]
+ self.nID = struct.unpack("<i", f.read(4))[0]
+ self.nCancel = struct.unpack("<i", f.read(4))[0]
+ self.setCancel = deser_int_vector(f)
+ self.nMinVer = struct.unpack("<i", f.read(4))[0]
+ self.nMaxVer = struct.unpack("<i", f.read(4))[0]
+ self.setSubVer = deser_string_vector(f)
+ self.nPriority = struct.unpack("<i", f.read(4))[0]
+ self.strComment = deser_string(f)
+ self.strStatusBar = deser_string(f)
+ self.strReserved = deser_string(f)
+
+ def serialize(self):
+ r = b""
+ r += struct.pack("<i", self.nVersion)
+ r += struct.pack("<q", self.nRelayUntil)
+ r += struct.pack("<q", self.nExpiration)
+ r += struct.pack("<i", self.nID)
+ r += struct.pack("<i", self.nCancel)
+ r += ser_int_vector(self.setCancel)
+ r += struct.pack("<i", self.nMinVer)
+ r += struct.pack("<i", self.nMaxVer)
+ r += ser_string_vector(self.setSubVer)
+ r += struct.pack("<i", self.nPriority)
+ r += ser_string(self.strComment)
+ r += ser_string(self.strStatusBar)
+ r += ser_string(self.strReserved)
+ return r
+
+ def __repr__(self):
+ return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
+ % (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
+ self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
+ self.strComment, self.strStatusBar, self.strReserved)
+
+
+class CAlert(object):
+ def __init__(self):
+ self.vchMsg = b""
+ self.vchSig = b""
+
+ def deserialize(self, f):
+ self.vchMsg = deser_string(f)
+ self.vchSig = deser_string(f)
+
+ def serialize(self):
+ r = b""
+ r += ser_string(self.vchMsg)
+ r += ser_string(self.vchSig)
+ return r
+
+ def __repr__(self):
+ return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
+ % (len(self.vchMsg), len(self.vchSig))
+
+
+class PrefilledTransaction(object):
+ def __init__(self, index=0, tx = None):
+ self.index = index
+ self.tx = tx
+
+ def deserialize(self, f):
+ self.index = deser_compact_size(f)
+ self.tx = CTransaction()
+ self.tx.deserialize(f)
+
+ def serialize(self, with_witness=False):
+ r = b""
+ r += ser_compact_size(self.index)
+ if with_witness:
+ r += self.tx.serialize_with_witness()
+ else:
+ r += self.tx.serialize_without_witness()
+ return r
+
+ def serialize_with_witness(self):
+ return self.serialize(with_witness=True)
+
+ def __repr__(self):
+ return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
+
+# This is what we send on the wire, in a cmpctblock message.
+class P2PHeaderAndShortIDs(object):
+ def __init__(self):
+ self.header = CBlockHeader()
+ self.nonce = 0
+ self.shortids_length = 0
+ self.shortids = []
+ self.prefilled_txn_length = 0
+ self.prefilled_txn = []
+
+ def deserialize(self, f):
+ self.header.deserialize(f)
+ self.nonce = struct.unpack("<Q", f.read(8))[0]
+ self.shortids_length = deser_compact_size(f)
+ for i in range(self.shortids_length):
+ # shortids are defined to be 6 bytes in the spec, so append
+ # two zero bytes and read it in as an 8-byte number
+ self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
+ self.prefilled_txn = deser_vector(f, PrefilledTransaction)
+ self.prefilled_txn_length = len(self.prefilled_txn)
+
+ # When using version 2 compact blocks, we must serialize with_witness.
+ def serialize(self, with_witness=False):
+ r = b""
+ r += self.header.serialize()
+ r += struct.pack("<Q", self.nonce)
+ r += ser_compact_size(self.shortids_length)
+ for x in self.shortids:
+ # We only want the first 6 bytes
+ r += struct.pack("<Q", x)[0:6]
+ if with_witness:
+ r += ser_vector(self.prefilled_txn, "serialize_with_witness")
+ else:
+ r += ser_vector(self.prefilled_txn)
+ return r
+
+ def __repr__(self):
+ return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
+
+# P2P version of the above that will use witness serialization (for compact
+# block version 2)
+class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
+ def serialize(self):
+ return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
+
+# Calculate the BIP 152-compact blocks shortid for a given transaction hash
+def calculate_shortid(k0, k1, tx_hash):
+ expected_shortid = siphash256(k0, k1, tx_hash)
+ expected_shortid &= 0x0000ffffffffffff
+ return expected_shortid
+
+# This version gets rid of the array lengths, and reinterprets the differential
+# encoding into indices that can be used for lookup.
+class HeaderAndShortIDs(object):
+ def __init__(self, p2pheaders_and_shortids = None):
+ self.header = CBlockHeader()
+ self.nonce = 0
+ self.shortids = []
+ self.prefilled_txn = []
+ self.use_witness = False
+
+ if p2pheaders_and_shortids != None:
+ self.header = p2pheaders_and_shortids.header
+ self.nonce = p2pheaders_and_shortids.nonce
+ self.shortids = p2pheaders_and_shortids.shortids
+ last_index = -1
+ for x in p2pheaders_and_shortids.prefilled_txn:
+ self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
+ last_index = self.prefilled_txn[-1].index
+
+ def to_p2p(self):
+ if self.use_witness:
+ ret = P2PHeaderAndShortWitnessIDs()
+ else:
+ ret = P2PHeaderAndShortIDs()
+ ret.header = self.header
+ ret.nonce = self.nonce
+ ret.shortids_length = len(self.shortids)
+ ret.shortids = self.shortids
+ ret.prefilled_txn_length = len(self.prefilled_txn)
+ ret.prefilled_txn = []
+ last_index = -1
+ for x in self.prefilled_txn:
+ ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
+ last_index = x.index
+ return ret
+
+ def get_siphash_keys(self):
+ header_nonce = self.header.serialize()
+ header_nonce += struct.pack("<Q", self.nonce)
+ hash_header_nonce_as_str = sha256(header_nonce)
+ key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
+ key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
+ return [ key0, key1 ]
+
+ # Version 2 compact blocks use wtxid in shortids (rather than txid)
+ def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
+ self.header = CBlockHeader(block)
+ self.nonce = nonce
+ self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
+ self.shortids = []
+ self.use_witness = use_witness
+ [k0, k1] = self.get_siphash_keys()
+ for i in range(len(block.vtx)):
+ if i not in prefill_list:
+ tx_hash = block.vtx[i].sha256
+ if use_witness:
+ tx_hash = block.vtx[i].calc_sha256(with_witness=True)
+ self.shortids.append(calculate_shortid(k0, k1, tx_hash))
+
+ def __repr__(self):
+ return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
+
+
+class BlockTransactionsRequest(object):
+
+ def __init__(self, blockhash=0, indexes = None):
+ self.blockhash = blockhash
+ self.indexes = indexes if indexes != None else []
+
+ def deserialize(self, f):
+ self.blockhash = deser_uint256(f)
+ indexes_length = deser_compact_size(f)
+ for i in range(indexes_length):
+ self.indexes.append(deser_compact_size(f))
+
+ def serialize(self):
+ r = b""
+ r += ser_uint256(self.blockhash)
+ r += ser_compact_size(len(self.indexes))
+ for x in self.indexes:
+ r += ser_compact_size(x)
+ return r
+
+ # helper to set the differentially encoded indexes from absolute ones
+ def from_absolute(self, absolute_indexes):
+ self.indexes = []
+ last_index = -1
+ for x in absolute_indexes:
+ self.indexes.append(x-last_index-1)
+ last_index = x
+
+ def to_absolute(self):
+ absolute_indexes = []
+ last_index = -1
+ for x in self.indexes:
+ absolute_indexes.append(x+last_index+1)
+ last_index = absolute_indexes[-1]
+ return absolute_indexes
+
+ def __repr__(self):
+ return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
+
+
+class BlockTransactions(object):
+
+ def __init__(self, blockhash=0, transactions = None):
+ self.blockhash = blockhash
+ self.transactions = transactions if transactions != None else []
+
+ def deserialize(self, f):
+ self.blockhash = deser_uint256(f)
+ self.transactions = deser_vector(f, CTransaction)
+
+ def serialize(self, with_witness=False):
+ r = b""
+ r += ser_uint256(self.blockhash)
+ if with_witness:
+ r += ser_vector(self.transactions, "serialize_with_witness")
+ else:
+ r += ser_vector(self.transactions)
+ return r
+
+ def __repr__(self):
+ return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
+
+
+# Objects that correspond to messages on the wire
+class msg_version(object):
+ command = b"version"
+
+ def __init__(self):
+ self.nVersion = MY_VERSION
+ self.nServices = 1
+ self.nTime = int(time.time())
+ self.addrTo = CAddress()
+ self.addrFrom = CAddress()
+ self.nNonce = random.getrandbits(64)
+ self.strSubVer = MY_SUBVERSION
+ self.nStartingHeight = -1
+ self.nRelay = MY_RELAY
+
+ def deserialize(self, f):
+ self.nVersion = struct.unpack("<i", f.read(4))[0]
+ if self.nVersion == 10300:
+ self.nVersion = 300
+ self.nServices = struct.unpack("<Q", f.read(8))[0]
+ self.nTime = struct.unpack("<q", f.read(8))[0]
+ self.addrTo = CAddress()
+ self.addrTo.deserialize(f)
+
+ if self.nVersion >= 106:
+ self.addrFrom = CAddress()
+ self.addrFrom.deserialize(f)
+ self.nNonce = struct.unpack("<Q", f.read(8))[0]
+ self.strSubVer = deser_string(f)
+ else:
+ self.addrFrom = None
+ self.nNonce = None
+ self.strSubVer = None
+ self.nStartingHeight = None
+
+ if self.nVersion >= 209:
+ self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
+ else:
+ self.nStartingHeight = None
+
+ if self.nVersion >= 70001:
+ # Relay field is optional for version 70001 onwards
+ try:
+ self.nRelay = struct.unpack("<b", f.read(1))[0]
+ except:
+ self.nRelay = 0
+ else:
+ self.nRelay = 0
+
+ def serialize(self):
+ r = b""
+ r += struct.pack("<i", self.nVersion)
+ r += struct.pack("<Q", self.nServices)
+ r += struct.pack("<q", self.nTime)
+ r += self.addrTo.serialize()
+ r += self.addrFrom.serialize()
+ r += struct.pack("<Q", self.nNonce)
+ r += ser_string(self.strSubVer)
+ r += struct.pack("<i", self.nStartingHeight)
+ r += struct.pack("<b", self.nRelay)
+ return r
+
+ def __repr__(self):
+ return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
+ % (self.nVersion, self.nServices, time.ctime(self.nTime),
+ repr(self.addrTo), repr(self.addrFrom), self.nNonce,
+ self.strSubVer, self.nStartingHeight, self.nRelay)
+
+
+class msg_verack(object):
+ command = b"verack"
+
+ def __init__(self):
+ pass
+
+ def deserialize(self, f):
+ pass
+
+ def serialize(self):
+ return b""
+
+ def __repr__(self):
+ return "msg_verack()"
+
+
+class msg_addr(object):
+ command = b"addr"
+
+ def __init__(self):
+ self.addrs = []
+
+ def deserialize(self, f):
+ self.addrs = deser_vector(f, CAddress)
+
+ def serialize(self):
+ return ser_vector(self.addrs)
+
+ def __repr__(self):
+ return "msg_addr(addrs=%s)" % (repr(self.addrs))
+
+
+class msg_alert(object):
+ command = b"alert"
+
+ def __init__(self):
+ self.alert = CAlert()
+
+ def deserialize(self, f):
+ self.alert = CAlert()
+ self.alert.deserialize(f)
+
+ def serialize(self):
+ r = b""
+ r += self.alert.serialize()
+ return r
+
+ def __repr__(self):
+ return "msg_alert(alert=%s)" % (repr(self.alert), )
+
+
+class msg_inv(object):
+ command = b"inv"
+
+ def __init__(self, inv=None):
+ if inv is None:
+ self.inv = []
+ else:
+ self.inv = inv
+
+ def deserialize(self, f):
+ self.inv = deser_vector(f, CInv)
+
+ def serialize(self):
+ return ser_vector(self.inv)
+
+ def __repr__(self):
+ return "msg_inv(inv=%s)" % (repr(self.inv))
+
+
+class msg_getdata(object):
+ command = b"getdata"
+
+ def __init__(self, inv=None):
+ self.inv = inv if inv != None else []
+
+ def deserialize(self, f):
+ self.inv = deser_vector(f, CInv)
+
+ def serialize(self):
+ return ser_vector(self.inv)
+
+ def __repr__(self):
+ return "msg_getdata(inv=%s)" % (repr(self.inv))
+
+
+class msg_getblocks(object):
+ command = b"getblocks"
+
+ def __init__(self):
+ self.locator = CBlockLocator()
+ self.hashstop = 0
+
+ def deserialize(self, f):
+ self.locator = CBlockLocator()
+ self.locator.deserialize(f)
+ self.hashstop = deser_uint256(f)
+
+ def serialize(self):
+ r = b""
+ r += self.locator.serialize()
+ r += ser_uint256(self.hashstop)
+ return r
+
+ def __repr__(self):
+ return "msg_getblocks(locator=%s hashstop=%064x)" \
+ % (repr(self.locator), self.hashstop)
+
+
+class msg_tx(object):
+ command = b"tx"
+
+ def __init__(self, tx=CTransaction()):
+ self.tx = tx
+
+ def deserialize(self, f):
+ self.tx.deserialize(f)
+
+ def serialize(self):
+ return self.tx.serialize_without_witness()
+
+ def __repr__(self):
+ return "msg_tx(tx=%s)" % (repr(self.tx))
+
+class msg_witness_tx(msg_tx):
+
+ def serialize(self):
+ return self.tx.serialize_with_witness()
+
+
+class msg_block(object):
+ command = b"block"
+
+ def __init__(self, block=None):
+ if block is None:
+ self.block = CBlock()
+ else:
+ self.block = block
+
+ def deserialize(self, f):
+ self.block.deserialize(f)
+
+ def serialize(self):
+ return self.block.serialize()
+
+ def __repr__(self):
+ return "msg_block(block=%s)" % (repr(self.block))
+
+# for cases where a user needs tighter control over what is sent over the wire
+# note that the user must supply the name of the command, and the data
+class msg_generic(object):
+ def __init__(self, command, data=None):
+ self.command = command
+ self.data = data
+
+ def serialize(self):
+ return self.data
+
+ def __repr__(self):
+ return "msg_generic()"
+
+class msg_witness_block(msg_block):
+
+ def serialize(self):
+ r = self.block.serialize(with_witness=True)
+ return r
+
+class msg_getaddr(object):
+ command = b"getaddr"
+
+ def __init__(self):
+ pass
+
+ def deserialize(self, f):
+ pass
+
+ def serialize(self):
+ return b""
+
+ def __repr__(self):
+ return "msg_getaddr()"
+
+
+class msg_ping_prebip31(object):
+ command = b"ping"
+
+ def __init__(self):
+ pass
+
+ def deserialize(self, f):
+ pass
+
+ def serialize(self):
+ return b""
+
+ def __repr__(self):
+ return "msg_ping() (pre-bip31)"
+
+
+class msg_ping(object):
+ command = b"ping"
+
+ def __init__(self, nonce=0):
+ self.nonce = nonce
+
+ def deserialize(self, f):
+ self.nonce = struct.unpack("<Q", f.read(8))[0]
+
+ def serialize(self):
+ r = b""
+ r += struct.pack("<Q", self.nonce)
+ return r
+
+ def __repr__(self):
+ return "msg_ping(nonce=%08x)" % self.nonce
+
+
+class msg_pong(object):
+ command = b"pong"
+
+ def __init__(self, nonce=0):
+ self.nonce = nonce
+
+ def deserialize(self, f):
+ self.nonce = struct.unpack("<Q", f.read(8))[0]
+
+ def serialize(self):
+ r = b""
+ r += struct.pack("<Q", self.nonce)
+ return r
+
+ def __repr__(self):
+ return "msg_pong(nonce=%08x)" % self.nonce
+
+
+class msg_mempool(object):
+ command = b"mempool"
+
+ def __init__(self):
+ pass
+
+ def deserialize(self, f):
+ pass
+
+ def serialize(self):
+ return b""
+
+ def __repr__(self):
+ return "msg_mempool()"
+
+class msg_sendheaders(object):
+ command = b"sendheaders"
+
+ def __init__(self):
+ pass
+
+ def deserialize(self, f):
+ pass
+
+ def serialize(self):
+ return b""
+
+ def __repr__(self):
+ return "msg_sendheaders()"
+
+
+# getheaders message has
+# number of entries
+# vector of hashes
+# hash_stop (hash of last desired block header, 0 to get as many as possible)
+class msg_getheaders(object):
+ command = b"getheaders"
+
+ def __init__(self):
+ self.locator = CBlockLocator()
+ self.hashstop = 0
+
+ def deserialize(self, f):
+ self.locator = CBlockLocator()
+ self.locator.deserialize(f)
+ self.hashstop = deser_uint256(f)
+
+ def serialize(self):
+ r = b""
+ r += self.locator.serialize()
+ r += ser_uint256(self.hashstop)
+ return r
+
+ def __repr__(self):
+ return "msg_getheaders(locator=%s, stop=%064x)" \
+ % (repr(self.locator), self.hashstop)
+
+
+# headers message has
+# <count> <vector of block headers>
+class msg_headers(object):
+ command = b"headers"
+
+ def __init__(self):
+ self.headers = []
+
+ def deserialize(self, f):
+ # comment in bitcoind indicates these should be deserialized as blocks
+ blocks = deser_vector(f, CBlock)
+ for x in blocks:
+ self.headers.append(CBlockHeader(x))
+
+ def serialize(self):
+ blocks = [CBlock(x) for x in self.headers]
+ return ser_vector(blocks)
+
+ def __repr__(self):
+ return "msg_headers(headers=%s)" % repr(self.headers)
+
+
+class msg_reject(object):
+ command = b"reject"
+ REJECT_MALFORMED = 1
+
+ def __init__(self):
+ self.message = b""
+ self.code = 0
+ self.reason = b""
+ self.data = 0
+
+ def deserialize(self, f):
+ self.message = deser_string(f)
+ self.code = struct.unpack("<B", f.read(1))[0]
+ self.reason = deser_string(f)
+ if (self.code != self.REJECT_MALFORMED and
+ (self.message == b"block" or self.message == b"tx")):
+ self.data = deser_uint256(f)
+
+ def serialize(self):
+ r = ser_string(self.message)
+ r += struct.pack("<B", self.code)
+ r += ser_string(self.reason)
+ if (self.code != self.REJECT_MALFORMED and
+ (self.message == b"block" or self.message == b"tx")):
+ r += ser_uint256(self.data)
+ return r
+
+ def __repr__(self):
+ return "msg_reject: %s %d %s [%064x]" \
+ % (self.message, self.code, self.reason, self.data)
+
+# Helper function
+def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf')):
+ attempt = 0
+ elapsed = 0
+
+ while attempt < attempts and elapsed < timeout:
+ with mininode_lock:
+ if predicate():
+ return True
+ attempt += 1
+ elapsed += 0.05
+ time.sleep(0.05)
+
+ return False
+
+class msg_feefilter(object):
+ command = b"feefilter"
+
+ def __init__(self, feerate=0):
+ self.feerate = feerate
+
+ def deserialize(self, f):
+ self.feerate = struct.unpack("<Q", f.read(8))[0]
+
+ def serialize(self):
+ r = b""
+ r += struct.pack("<Q", self.feerate)
+ return r
+
+ def __repr__(self):
+ return "msg_feefilter(feerate=%08x)" % self.feerate
+
+class msg_sendcmpct(object):
+ command = b"sendcmpct"
+
+ def __init__(self):
+ self.announce = False
+ self.version = 1
+
+ def deserialize(self, f):
+ self.announce = struct.unpack("<?", f.read(1))[0]
+ self.version = struct.unpack("<Q", f.read(8))[0]
+
+ def serialize(self):
+ r = b""
+ r += struct.pack("<?", self.announce)
+ r += struct.pack("<Q", self.version)
+ return r
+
+ def __repr__(self):
+ return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
+
+class msg_cmpctblock(object):
+ command = b"cmpctblock"
+
+ def __init__(self, header_and_shortids = None):
+ self.header_and_shortids = header_and_shortids
+
+ def deserialize(self, f):
+ self.header_and_shortids = P2PHeaderAndShortIDs()
+ self.header_and_shortids.deserialize(f)
+
+ def serialize(self):
+ r = b""
+ r += self.header_and_shortids.serialize()
+ return r
+
+ def __repr__(self):
+ return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
+
+class msg_getblocktxn(object):
+ command = b"getblocktxn"
+
+ def __init__(self):
+ self.block_txn_request = None
+
+ def deserialize(self, f):
+ self.block_txn_request = BlockTransactionsRequest()
+ self.block_txn_request.deserialize(f)
+
+ def serialize(self):
+ r = b""
+ r += self.block_txn_request.serialize()
+ return r
+
+ def __repr__(self):
+ return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
+
+class msg_blocktxn(object):
+ command = b"blocktxn"
+
+ def __init__(self):
+ self.block_transactions = BlockTransactions()
+
+ def deserialize(self, f):
+ self.block_transactions.deserialize(f)
+
+ def serialize(self):
+ r = b""
+ r += self.block_transactions.serialize()
+ return r
+
+ def __repr__(self):
+ return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
+
+class msg_witness_blocktxn(msg_blocktxn):
+ def serialize(self):
+ r = b""
+ r += self.block_transactions.serialize(with_witness=True)
+ return r
+
+# This is what a callback should look like for NodeConn
+# Reimplement the on_* functions to provide handling for events
+class NodeConnCB(object):
+ def __init__(self):
+ self.verack_received = False
+ # deliver_sleep_time is helpful for debugging race conditions in p2p
+ # tests; it causes message delivery to sleep for the specified time
+ # before acquiring the global lock and delivering the next message.
+ self.deliver_sleep_time = None
+ # Remember the services our peer has advertised
+ self.peer_services = None
+
+ def set_deliver_sleep_time(self, value):
+ with mininode_lock:
+ self.deliver_sleep_time = value
+
+ def get_deliver_sleep_time(self):
+ with mininode_lock:
+ return self.deliver_sleep_time
+
+ # Spin until verack message is received from the node.
+ # Tests may want to use this as a signal that the test can begin.
+ # This can be called from the testing thread, so it needs to acquire the
+ # global lock.
+ def wait_for_verack(self):
+ while True:
+ with mininode_lock:
+ if self.verack_received:
+ return
+ time.sleep(0.05)
+
+ def deliver(self, conn, message):
+ deliver_sleep = self.get_deliver_sleep_time()
+ if deliver_sleep is not None:
+ time.sleep(deliver_sleep)
+ with mininode_lock:
+ try:
+ getattr(self, 'on_' + message.command.decode('ascii'))(conn, message)
+ except:
+ logger.exception("ERROR delivering %s" % repr(message))
+
+ def on_version(self, conn, message):
+ if message.nVersion >= 209:
+ conn.send_message(msg_verack())
+ conn.ver_send = min(MY_VERSION, message.nVersion)
+ if message.nVersion < 209:
+ conn.ver_recv = conn.ver_send
+ conn.nServices = message.nServices
+
+ def on_verack(self, conn, message):
+ conn.ver_recv = conn.ver_send
+ self.verack_received = True
+
+ def on_inv(self, conn, message):
+ want = msg_getdata()
+ for i in message.inv:
+ if i.type != 0:
+ want.inv.append(i)
+ if len(want.inv):
+ conn.send_message(want)
+
+ def on_addr(self, conn, message): pass
+ def on_alert(self, conn, message): pass
+ def on_getdata(self, conn, message): pass
+ def on_getblocks(self, conn, message): pass
+ def on_tx(self, conn, message): pass
+ def on_block(self, conn, message): pass
+ def on_getaddr(self, conn, message): pass
+ def on_headers(self, conn, message): pass
+ def on_getheaders(self, conn, message): pass
+ def on_ping(self, conn, message):
+ if conn.ver_send > BIP0031_VERSION:
+ conn.send_message(msg_pong(message.nonce))
+ def on_reject(self, conn, message): pass
+ def on_open(self, conn): pass
+ def on_close(self, conn): pass
+ def on_mempool(self, conn): pass
+ def on_pong(self, conn, message): pass
+ def on_feefilter(self, conn, message): pass
+ def on_sendheaders(self, conn, message): pass
+ def on_sendcmpct(self, conn, message): pass
+ def on_cmpctblock(self, conn, message): pass
+ def on_getblocktxn(self, conn, message): pass
+ def on_blocktxn(self, conn, message): pass
+
+# More useful callbacks and functions for NodeConnCB's which have a single NodeConn
+class SingleNodeConnCB(NodeConnCB):
+ def __init__(self):
+ NodeConnCB.__init__(self)
+ self.connection = None
+ self.ping_counter = 1
+ self.last_pong = msg_pong()
+
+ def add_connection(self, conn):
+ self.connection = conn
+
+ # Wrapper for the NodeConn's send_message function
+ def send_message(self, message):
+ self.connection.send_message(message)
+
+ def send_and_ping(self, message):
+ self.send_message(message)
+ self.sync_with_ping()
+
+ def on_pong(self, conn, message):
+ self.last_pong = message
+
+ # Sync up with the node
+ def sync_with_ping(self, timeout=30):
+ def received_pong():
+ return (self.last_pong.nonce == self.ping_counter)
+ self.send_message(msg_ping(nonce=self.ping_counter))
+ success = wait_until(received_pong, timeout=timeout)
+ self.ping_counter += 1
+ return success
+
+# The actual NodeConn class
+# This class provides an interface for a p2p connection to a specified node
+class NodeConn(asyncore.dispatcher):
+ messagemap = {
+ b"version": msg_version,
+ b"verack": msg_verack,
+ b"addr": msg_addr,
+ b"alert": msg_alert,
+ b"inv": msg_inv,
+ b"getdata": msg_getdata,
+ b"getblocks": msg_getblocks,
+ b"tx": msg_tx,
+ b"block": msg_block,
+ b"getaddr": msg_getaddr,
+ b"ping": msg_ping,
+ b"pong": msg_pong,
+ b"headers": msg_headers,
+ b"getheaders": msg_getheaders,
+ b"reject": msg_reject,
+ b"mempool": msg_mempool,
+ b"feefilter": msg_feefilter,
+ b"sendheaders": msg_sendheaders,
+ b"sendcmpct": msg_sendcmpct,
+ b"cmpctblock": msg_cmpctblock,
+ b"getblocktxn": msg_getblocktxn,
+ b"blocktxn": msg_blocktxn
+ }
+ MAGIC_BYTES = {
+ "mainnet": b"\xf9\xbe\xb4\xd9", # mainnet
+ "testnet3": b"\x0b\x11\x09\x07", # testnet3
+ "regtest": b"\xfa\xbf\xb5\xda", # regtest
+ }
+
+ def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK, send_version=True):
+ asyncore.dispatcher.__init__(self, map=mininode_socket_map)
+ self.dstaddr = dstaddr
+ self.dstport = dstport
+ self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.sendbuf = b""
+ self.recvbuf = b""
+ self.ver_send = 209
+ self.ver_recv = 209
+ self.last_sent = 0
+ self.state = "connecting"
+ self.network = net
+ self.cb = callback
+ self.disconnect = False
+ self.nServices = 0
+
+ if send_version:
+ # stuff version msg into sendbuf
+ vt = msg_version()
+ vt.nServices = services
+ vt.addrTo.ip = self.dstaddr
+ vt.addrTo.port = self.dstport
+ vt.addrFrom.ip = "0.0.0.0"
+ vt.addrFrom.port = 0
+ self.send_message(vt, True)
+
+ logger.info('Connecting to Bitcoin Node: %s:%d' % (self.dstaddr, self.dstport))
+
+ try:
+ self.connect((dstaddr, dstport))
+ except:
+ self.handle_close()
+ self.rpc = rpc
+
+ def handle_connect(self):
+ if self.state != "connected":
+ logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
+ self.state = "connected"
+ self.cb.on_open(self)
+
+ def handle_close(self):
+ logger.debug("Closing connection to: %s:%d" % (self.dstaddr, self.dstport))
+ self.state = "closed"
+ self.recvbuf = b""
+ self.sendbuf = b""
+ try:
+ self.close()
+ except:
+ pass
+ self.cb.on_close(self)
+
+ def handle_read(self):
+ try:
+ t = self.recv(8192)
+ if len(t) > 0:
+ self.recvbuf += t
+ self.got_data()
+ except:
+ pass
+
+ def readable(self):
+ return True
+
+ def writable(self):
+ with mininode_lock:
+ pre_connection = self.state == "connecting"
+ length = len(self.sendbuf)
+ return (length > 0 or pre_connection)
+
+ def handle_write(self):
+ with mininode_lock:
+ # asyncore does not expose socket connection, only the first read/write
+ # event, thus we must check connection manually here to know when we
+ # actually connect
+ if self.state == "connecting":
+ self.handle_connect()
+ if not self.writable():
+ return
+
+ try:
+ sent = self.send(self.sendbuf)
+ except:
+ self.handle_close()
+ return
+ self.sendbuf = self.sendbuf[sent:]
+
+ def got_data(self):
+ try:
+ while True:
+ if len(self.recvbuf) < 4:
+ return
+ if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
+ raise ValueError("got garbage %s" % repr(self.recvbuf))
+ if self.ver_recv < 209:
+ if len(self.recvbuf) < 4 + 12 + 4:
+ return
+ command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
+ msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
+ checksum = None
+ if len(self.recvbuf) < 4 + 12 + 4 + msglen:
+ return
+ msg = self.recvbuf[4+12+4:4+12+4+msglen]
+ self.recvbuf = self.recvbuf[4+12+4+msglen:]
+ else:
+ if len(self.recvbuf) < 4 + 12 + 4 + 4:
+ return
+ command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
+ msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
+ checksum = self.recvbuf[4+12+4:4+12+4+4]
+ if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
+ return
+ msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
+ th = sha256(msg)
+ h = sha256(th)
+ if checksum != h[:4]:
+ raise ValueError("got bad checksum " + repr(self.recvbuf))
+ self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
+ if command in self.messagemap:
+ f = BytesIO(msg)
+ t = self.messagemap[command]()
+ t.deserialize(f)
+ self.got_message(t)
+ else:
+ logger.warning("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
+ except Exception as e:
+ logger.exception('got_data:', repr(e))
+
+ def send_message(self, message, pushbuf=False):
+ if self.state != "connected" and not pushbuf:
+ raise IOError('Not connected, no pushbuf')
+ logger.debug("Send message to %s:%d: %s" % (self.dstaddr, self.dstport, repr(message)))
+ command = message.command
+ data = message.serialize()
+ tmsg = self.MAGIC_BYTES[self.network]
+ tmsg += command
+ tmsg += b"\x00" * (12 - len(command))
+ tmsg += struct.pack("<I", len(data))
+ if self.ver_send >= 209:
+ th = sha256(data)
+ h = sha256(th)
+ tmsg += h[:4]
+ tmsg += data
+ with mininode_lock:
+ self.sendbuf += tmsg
+ self.last_sent = time.time()
+
+ def got_message(self, message):
+ if message.command == b"version":
+ if message.nVersion <= BIP0031_VERSION:
+ self.messagemap[b'ping'] = msg_ping_prebip31
+ if self.last_sent + 30 * 60 < time.time():
+ self.send_message(self.messagemap[b'ping']())
+ logger.debug("Received message from %s:%d: %s" % (self.dstaddr, self.dstport, repr(message)))
+ self.cb.deliver(self, message)
+
+ def disconnect_node(self):
+ self.disconnect = True
+
+
+class NetworkThread(Thread):
+ def run(self):
+ while mininode_socket_map:
+ # We check for whether to disconnect outside of the asyncore
+ # loop to workaround the behavior of asyncore when using
+ # select
+ disconnected = []
+ for fd, obj in mininode_socket_map.items():
+ if obj.disconnect:
+ disconnected.append(obj)
+ [ obj.handle_close() for obj in disconnected ]
+ asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
+
+
+# An exception we can raise if we detect a potential disconnect
+# (p2p or rpc) before the test is complete
+class EarlyDisconnectError(Exception):
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return repr(self.value)
diff --git a/test/functional/test_framework/netutil.py b/test/functional/test_framework/netutil.py
new file mode 100644
index 0000000000..45d8e22d22
--- /dev/null
+++ b/test/functional/test_framework/netutil.py
@@ -0,0 +1,156 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Linux network utilities.
+
+Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal
+"""
+
+import sys
+import socket
+import fcntl
+import struct
+import array
+import os
+from binascii import unhexlify, hexlify
+
+STATE_ESTABLISHED = '01'
+STATE_SYN_SENT = '02'
+STATE_SYN_RECV = '03'
+STATE_FIN_WAIT1 = '04'
+STATE_FIN_WAIT2 = '05'
+STATE_TIME_WAIT = '06'
+STATE_CLOSE = '07'
+STATE_CLOSE_WAIT = '08'
+STATE_LAST_ACK = '09'
+STATE_LISTEN = '0A'
+STATE_CLOSING = '0B'
+
+def get_socket_inodes(pid):
+ '''
+ Get list of socket inodes for process pid.
+ '''
+ base = '/proc/%i/fd' % pid
+ inodes = []
+ for item in os.listdir(base):
+ target = os.readlink(os.path.join(base, item))
+ if target.startswith('socket:'):
+ inodes.append(int(target[8:-1]))
+ return inodes
+
+def _remove_empty(array):
+ return [x for x in array if x !='']
+
+def _convert_ip_port(array):
+ host,port = array.split(':')
+ # convert host from mangled-per-four-bytes form as used by kernel
+ host = unhexlify(host)
+ host_out = ''
+ for x in range(0, len(host) // 4):
+ (val,) = struct.unpack('=I', host[x*4:(x+1)*4])
+ host_out += '%08x' % val
+
+ return host_out,int(port,16)
+
+def netstat(typ='tcp'):
+ '''
+ Function to return a list with status of tcp connections at linux systems
+ To get pid of all network process running on system, you must run this script
+ as superuser
+ '''
+ with open('/proc/net/'+typ,'r',encoding='utf8') as f:
+ content = f.readlines()
+ content.pop(0)
+ result = []
+ for line in content:
+ line_array = _remove_empty(line.split(' ')) # Split lines and remove empty spaces.
+ tcp_id = line_array[0]
+ l_addr = _convert_ip_port(line_array[1])
+ r_addr = _convert_ip_port(line_array[2])
+ state = line_array[3]
+ inode = int(line_array[9]) # Need the inode to match with process pid.
+ nline = [tcp_id, l_addr, r_addr, state, inode]
+ result.append(nline)
+ return result
+
+def get_bind_addrs(pid):
+ '''
+ Get bind addresses as (host,port) tuples for process pid.
+ '''
+ inodes = get_socket_inodes(pid)
+ bind_addrs = []
+ for conn in netstat('tcp') + netstat('tcp6'):
+ if conn[3] == STATE_LISTEN and conn[4] in inodes:
+ bind_addrs.append(conn[1])
+ return bind_addrs
+
+# from: http://code.activestate.com/recipes/439093/
+def all_interfaces():
+ '''
+ Return all interfaces that are up
+ '''
+ is_64bits = sys.maxsize > 2**32
+ struct_size = 40 if is_64bits else 32
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ max_possible = 8 # initial value
+ while True:
+ bytes = max_possible * struct_size
+ names = array.array('B', b'\0' * bytes)
+ outbytes = struct.unpack('iL', fcntl.ioctl(
+ s.fileno(),
+ 0x8912, # SIOCGIFCONF
+ struct.pack('iL', bytes, names.buffer_info()[0])
+ ))[0]
+ if outbytes == bytes:
+ max_possible *= 2
+ else:
+ break
+ namestr = names.tostring()
+ return [(namestr[i:i+16].split(b'\0', 1)[0],
+ socket.inet_ntoa(namestr[i+20:i+24]))
+ for i in range(0, outbytes, struct_size)]
+
+def addr_to_hex(addr):
+ '''
+ Convert string IPv4 or IPv6 address to binary address as returned by
+ get_bind_addrs.
+ Very naive implementation that certainly doesn't work for all IPv6 variants.
+ '''
+ if '.' in addr: # IPv4
+ addr = [int(x) for x in addr.split('.')]
+ elif ':' in addr: # IPv6
+ sub = [[], []] # prefix, suffix
+ x = 0
+ addr = addr.split(':')
+ for i,comp in enumerate(addr):
+ if comp == '':
+ if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
+ continue
+ x += 1 # :: skips to suffix
+ assert(x < 2)
+ else: # two bytes per component
+ val = int(comp, 16)
+ sub[x].append(val >> 8)
+ sub[x].append(val & 0xff)
+ nullbytes = 16 - len(sub[0]) - len(sub[1])
+ assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
+ addr = sub[0] + ([0] * nullbytes) + sub[1]
+ else:
+ raise ValueError('Could not parse address %s' % addr)
+ return hexlify(bytearray(addr)).decode('ascii')
+
+def test_ipv6_local():
+ '''
+ Check for (local) IPv6 support.
+ '''
+ import socket
+ # By using SOCK_DGRAM this will not actually make a connection, but it will
+ # fail if there is no route to IPv6 localhost.
+ have_ipv6 = True
+ try:
+ s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
+ s.connect(('::1', 0))
+ except socket.error:
+ have_ipv6 = False
+ return have_ipv6
diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py
new file mode 100644
index 0000000000..3d9572788e
--- /dev/null
+++ b/test/functional/test_framework/script.py
@@ -0,0 +1,939 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Functionality to build scripts, as well as SignatureHash().
+
+This file is modified from python-bitcoinlib.
+"""
+
+from .mininode import CTransaction, CTxOut, sha256, hash256, uint256_from_str, ser_uint256, ser_string
+from binascii import hexlify
+import hashlib
+
+import sys
+bchr = chr
+bord = ord
+if sys.version > '3':
+ long = int
+ bchr = lambda x: bytes([x])
+ bord = lambda x: x
+
+import struct
+
+from .bignum import bn2vch
+
+MAX_SCRIPT_SIZE = 10000
+MAX_SCRIPT_ELEMENT_SIZE = 520
+MAX_SCRIPT_OPCODES = 201
+
+OPCODE_NAMES = {}
+
+def hash160(s):
+ return hashlib.new('ripemd160', sha256(s)).digest()
+
+
+_opcode_instances = []
+class CScriptOp(int):
+ """A single script opcode"""
+ __slots__ = []
+
+ @staticmethod
+ def encode_op_pushdata(d):
+ """Encode a PUSHDATA op, returning bytes"""
+ if len(d) < 0x4c:
+ return b'' + bchr(len(d)) + d # OP_PUSHDATA
+ elif len(d) <= 0xff:
+ return b'\x4c' + bchr(len(d)) + d # OP_PUSHDATA1
+ elif len(d) <= 0xffff:
+ return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2
+ elif len(d) <= 0xffffffff:
+ return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4
+ else:
+ raise ValueError("Data too long to encode in a PUSHDATA op")
+
+ @staticmethod
+ def encode_op_n(n):
+ """Encode a small integer op, returning an opcode"""
+ if not (0 <= n <= 16):
+ raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
+
+ if n == 0:
+ return OP_0
+ else:
+ return CScriptOp(OP_1 + n-1)
+
+ def decode_op_n(self):
+ """Decode a small integer opcode, returning an integer"""
+ if self == OP_0:
+ return 0
+
+ if not (self == OP_0 or OP_1 <= self <= OP_16):
+ raise ValueError('op %r is not an OP_N' % self)
+
+ return int(self - OP_1+1)
+
+ def is_small_int(self):
+ """Return true if the op pushes a small integer to the stack"""
+ if 0x51 <= self <= 0x60 or self == 0:
+ return True
+ else:
+ return False
+
+ def __str__(self):
+ return repr(self)
+
+ def __repr__(self):
+ if self in OPCODE_NAMES:
+ return OPCODE_NAMES[self]
+ else:
+ return 'CScriptOp(0x%x)' % self
+
+ def __new__(cls, n):
+ try:
+ return _opcode_instances[n]
+ except IndexError:
+ assert len(_opcode_instances) == n
+ _opcode_instances.append(super(CScriptOp, cls).__new__(cls, n))
+ return _opcode_instances[n]
+
+# Populate opcode instance table
+for n in range(0xff+1):
+ CScriptOp(n)
+
+
+# push value
+OP_0 = CScriptOp(0x00)
+OP_FALSE = OP_0
+OP_PUSHDATA1 = CScriptOp(0x4c)
+OP_PUSHDATA2 = CScriptOp(0x4d)
+OP_PUSHDATA4 = CScriptOp(0x4e)
+OP_1NEGATE = CScriptOp(0x4f)
+OP_RESERVED = CScriptOp(0x50)
+OP_1 = CScriptOp(0x51)
+OP_TRUE=OP_1
+OP_2 = CScriptOp(0x52)
+OP_3 = CScriptOp(0x53)
+OP_4 = CScriptOp(0x54)
+OP_5 = CScriptOp(0x55)
+OP_6 = CScriptOp(0x56)
+OP_7 = CScriptOp(0x57)
+OP_8 = CScriptOp(0x58)
+OP_9 = CScriptOp(0x59)
+OP_10 = CScriptOp(0x5a)
+OP_11 = CScriptOp(0x5b)
+OP_12 = CScriptOp(0x5c)
+OP_13 = CScriptOp(0x5d)
+OP_14 = CScriptOp(0x5e)
+OP_15 = CScriptOp(0x5f)
+OP_16 = CScriptOp(0x60)
+
+# control
+OP_NOP = CScriptOp(0x61)
+OP_VER = CScriptOp(0x62)
+OP_IF = CScriptOp(0x63)
+OP_NOTIF = CScriptOp(0x64)
+OP_VERIF = CScriptOp(0x65)
+OP_VERNOTIF = CScriptOp(0x66)
+OP_ELSE = CScriptOp(0x67)
+OP_ENDIF = CScriptOp(0x68)
+OP_VERIFY = CScriptOp(0x69)
+OP_RETURN = CScriptOp(0x6a)
+
+# stack ops
+OP_TOALTSTACK = CScriptOp(0x6b)
+OP_FROMALTSTACK = CScriptOp(0x6c)
+OP_2DROP = CScriptOp(0x6d)
+OP_2DUP = CScriptOp(0x6e)
+OP_3DUP = CScriptOp(0x6f)
+OP_2OVER = CScriptOp(0x70)
+OP_2ROT = CScriptOp(0x71)
+OP_2SWAP = CScriptOp(0x72)
+OP_IFDUP = CScriptOp(0x73)
+OP_DEPTH = CScriptOp(0x74)
+OP_DROP = CScriptOp(0x75)
+OP_DUP = CScriptOp(0x76)
+OP_NIP = CScriptOp(0x77)
+OP_OVER = CScriptOp(0x78)
+OP_PICK = CScriptOp(0x79)
+OP_ROLL = CScriptOp(0x7a)
+OP_ROT = CScriptOp(0x7b)
+OP_SWAP = CScriptOp(0x7c)
+OP_TUCK = CScriptOp(0x7d)
+
+# splice ops
+OP_CAT = CScriptOp(0x7e)
+OP_SUBSTR = CScriptOp(0x7f)
+OP_LEFT = CScriptOp(0x80)
+OP_RIGHT = CScriptOp(0x81)
+OP_SIZE = CScriptOp(0x82)
+
+# bit logic
+OP_INVERT = CScriptOp(0x83)
+OP_AND = CScriptOp(0x84)
+OP_OR = CScriptOp(0x85)
+OP_XOR = CScriptOp(0x86)
+OP_EQUAL = CScriptOp(0x87)
+OP_EQUALVERIFY = CScriptOp(0x88)
+OP_RESERVED1 = CScriptOp(0x89)
+OP_RESERVED2 = CScriptOp(0x8a)
+
+# numeric
+OP_1ADD = CScriptOp(0x8b)
+OP_1SUB = CScriptOp(0x8c)
+OP_2MUL = CScriptOp(0x8d)
+OP_2DIV = CScriptOp(0x8e)
+OP_NEGATE = CScriptOp(0x8f)
+OP_ABS = CScriptOp(0x90)
+OP_NOT = CScriptOp(0x91)
+OP_0NOTEQUAL = CScriptOp(0x92)
+
+OP_ADD = CScriptOp(0x93)
+OP_SUB = CScriptOp(0x94)
+OP_MUL = CScriptOp(0x95)
+OP_DIV = CScriptOp(0x96)
+OP_MOD = CScriptOp(0x97)
+OP_LSHIFT = CScriptOp(0x98)
+OP_RSHIFT = CScriptOp(0x99)
+
+OP_BOOLAND = CScriptOp(0x9a)
+OP_BOOLOR = CScriptOp(0x9b)
+OP_NUMEQUAL = CScriptOp(0x9c)
+OP_NUMEQUALVERIFY = CScriptOp(0x9d)
+OP_NUMNOTEQUAL = CScriptOp(0x9e)
+OP_LESSTHAN = CScriptOp(0x9f)
+OP_GREATERTHAN = CScriptOp(0xa0)
+OP_LESSTHANOREQUAL = CScriptOp(0xa1)
+OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
+OP_MIN = CScriptOp(0xa3)
+OP_MAX = CScriptOp(0xa4)
+
+OP_WITHIN = CScriptOp(0xa5)
+
+# crypto
+OP_RIPEMD160 = CScriptOp(0xa6)
+OP_SHA1 = CScriptOp(0xa7)
+OP_SHA256 = CScriptOp(0xa8)
+OP_HASH160 = CScriptOp(0xa9)
+OP_HASH256 = CScriptOp(0xaa)
+OP_CODESEPARATOR = CScriptOp(0xab)
+OP_CHECKSIG = CScriptOp(0xac)
+OP_CHECKSIGVERIFY = CScriptOp(0xad)
+OP_CHECKMULTISIG = CScriptOp(0xae)
+OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
+
+# expansion
+OP_NOP1 = CScriptOp(0xb0)
+OP_CHECKLOCKTIMEVERIFY = CScriptOp(0xb1)
+OP_CHECKSEQUENCEVERIFY = CScriptOp(0xb2)
+OP_NOP4 = CScriptOp(0xb3)
+OP_NOP5 = CScriptOp(0xb4)
+OP_NOP6 = CScriptOp(0xb5)
+OP_NOP7 = CScriptOp(0xb6)
+OP_NOP8 = CScriptOp(0xb7)
+OP_NOP9 = CScriptOp(0xb8)
+OP_NOP10 = CScriptOp(0xb9)
+
+# template matching params
+OP_SMALLINTEGER = CScriptOp(0xfa)
+OP_PUBKEYS = CScriptOp(0xfb)
+OP_PUBKEYHASH = CScriptOp(0xfd)
+OP_PUBKEY = CScriptOp(0xfe)
+
+OP_INVALIDOPCODE = CScriptOp(0xff)
+
+VALID_OPCODES = {
+ OP_1NEGATE,
+ OP_RESERVED,
+ OP_1,
+ OP_2,
+ OP_3,
+ OP_4,
+ OP_5,
+ OP_6,
+ OP_7,
+ OP_8,
+ OP_9,
+ OP_10,
+ OP_11,
+ OP_12,
+ OP_13,
+ OP_14,
+ OP_15,
+ OP_16,
+
+ OP_NOP,
+ OP_VER,
+ OP_IF,
+ OP_NOTIF,
+ OP_VERIF,
+ OP_VERNOTIF,
+ OP_ELSE,
+ OP_ENDIF,
+ OP_VERIFY,
+ OP_RETURN,
+
+ OP_TOALTSTACK,
+ OP_FROMALTSTACK,
+ OP_2DROP,
+ OP_2DUP,
+ OP_3DUP,
+ OP_2OVER,
+ OP_2ROT,
+ OP_2SWAP,
+ OP_IFDUP,
+ OP_DEPTH,
+ OP_DROP,
+ OP_DUP,
+ OP_NIP,
+ OP_OVER,
+ OP_PICK,
+ OP_ROLL,
+ OP_ROT,
+ OP_SWAP,
+ OP_TUCK,
+
+ OP_CAT,
+ OP_SUBSTR,
+ OP_LEFT,
+ OP_RIGHT,
+ OP_SIZE,
+
+ OP_INVERT,
+ OP_AND,
+ OP_OR,
+ OP_XOR,
+ OP_EQUAL,
+ OP_EQUALVERIFY,
+ OP_RESERVED1,
+ OP_RESERVED2,
+
+ OP_1ADD,
+ OP_1SUB,
+ OP_2MUL,
+ OP_2DIV,
+ OP_NEGATE,
+ OP_ABS,
+ OP_NOT,
+ OP_0NOTEQUAL,
+
+ OP_ADD,
+ OP_SUB,
+ OP_MUL,
+ OP_DIV,
+ OP_MOD,
+ OP_LSHIFT,
+ OP_RSHIFT,
+
+ OP_BOOLAND,
+ OP_BOOLOR,
+ OP_NUMEQUAL,
+ OP_NUMEQUALVERIFY,
+ OP_NUMNOTEQUAL,
+ OP_LESSTHAN,
+ OP_GREATERTHAN,
+ OP_LESSTHANOREQUAL,
+ OP_GREATERTHANOREQUAL,
+ OP_MIN,
+ OP_MAX,
+
+ OP_WITHIN,
+
+ OP_RIPEMD160,
+ OP_SHA1,
+ OP_SHA256,
+ OP_HASH160,
+ OP_HASH256,
+ OP_CODESEPARATOR,
+ OP_CHECKSIG,
+ OP_CHECKSIGVERIFY,
+ OP_CHECKMULTISIG,
+ OP_CHECKMULTISIGVERIFY,
+
+ OP_NOP1,
+ OP_CHECKLOCKTIMEVERIFY,
+ OP_CHECKSEQUENCEVERIFY,
+ OP_NOP4,
+ OP_NOP5,
+ OP_NOP6,
+ OP_NOP7,
+ OP_NOP8,
+ OP_NOP9,
+ OP_NOP10,
+
+ OP_SMALLINTEGER,
+ OP_PUBKEYS,
+ OP_PUBKEYHASH,
+ OP_PUBKEY,
+}
+
+OPCODE_NAMES.update({
+ OP_0 : 'OP_0',
+ OP_PUSHDATA1 : 'OP_PUSHDATA1',
+ OP_PUSHDATA2 : 'OP_PUSHDATA2',
+ OP_PUSHDATA4 : 'OP_PUSHDATA4',
+ OP_1NEGATE : 'OP_1NEGATE',
+ OP_RESERVED : 'OP_RESERVED',
+ OP_1 : 'OP_1',
+ OP_2 : 'OP_2',
+ OP_3 : 'OP_3',
+ OP_4 : 'OP_4',
+ OP_5 : 'OP_5',
+ OP_6 : 'OP_6',
+ OP_7 : 'OP_7',
+ OP_8 : 'OP_8',
+ OP_9 : 'OP_9',
+ OP_10 : 'OP_10',
+ OP_11 : 'OP_11',
+ OP_12 : 'OP_12',
+ OP_13 : 'OP_13',
+ OP_14 : 'OP_14',
+ OP_15 : 'OP_15',
+ OP_16 : 'OP_16',
+ OP_NOP : 'OP_NOP',
+ OP_VER : 'OP_VER',
+ OP_IF : 'OP_IF',
+ OP_NOTIF : 'OP_NOTIF',
+ OP_VERIF : 'OP_VERIF',
+ OP_VERNOTIF : 'OP_VERNOTIF',
+ OP_ELSE : 'OP_ELSE',
+ OP_ENDIF : 'OP_ENDIF',
+ OP_VERIFY : 'OP_VERIFY',
+ OP_RETURN : 'OP_RETURN',
+ OP_TOALTSTACK : 'OP_TOALTSTACK',
+ OP_FROMALTSTACK : 'OP_FROMALTSTACK',
+ OP_2DROP : 'OP_2DROP',
+ OP_2DUP : 'OP_2DUP',
+ OP_3DUP : 'OP_3DUP',
+ OP_2OVER : 'OP_2OVER',
+ OP_2ROT : 'OP_2ROT',
+ OP_2SWAP : 'OP_2SWAP',
+ OP_IFDUP : 'OP_IFDUP',
+ OP_DEPTH : 'OP_DEPTH',
+ OP_DROP : 'OP_DROP',
+ OP_DUP : 'OP_DUP',
+ OP_NIP : 'OP_NIP',
+ OP_OVER : 'OP_OVER',
+ OP_PICK : 'OP_PICK',
+ OP_ROLL : 'OP_ROLL',
+ OP_ROT : 'OP_ROT',
+ OP_SWAP : 'OP_SWAP',
+ OP_TUCK : 'OP_TUCK',
+ OP_CAT : 'OP_CAT',
+ OP_SUBSTR : 'OP_SUBSTR',
+ OP_LEFT : 'OP_LEFT',
+ OP_RIGHT : 'OP_RIGHT',
+ OP_SIZE : 'OP_SIZE',
+ OP_INVERT : 'OP_INVERT',
+ OP_AND : 'OP_AND',
+ OP_OR : 'OP_OR',
+ OP_XOR : 'OP_XOR',
+ OP_EQUAL : 'OP_EQUAL',
+ OP_EQUALVERIFY : 'OP_EQUALVERIFY',
+ OP_RESERVED1 : 'OP_RESERVED1',
+ OP_RESERVED2 : 'OP_RESERVED2',
+ OP_1ADD : 'OP_1ADD',
+ OP_1SUB : 'OP_1SUB',
+ OP_2MUL : 'OP_2MUL',
+ OP_2DIV : 'OP_2DIV',
+ OP_NEGATE : 'OP_NEGATE',
+ OP_ABS : 'OP_ABS',
+ OP_NOT : 'OP_NOT',
+ OP_0NOTEQUAL : 'OP_0NOTEQUAL',
+ OP_ADD : 'OP_ADD',
+ OP_SUB : 'OP_SUB',
+ OP_MUL : 'OP_MUL',
+ OP_DIV : 'OP_DIV',
+ OP_MOD : 'OP_MOD',
+ OP_LSHIFT : 'OP_LSHIFT',
+ OP_RSHIFT : 'OP_RSHIFT',
+ OP_BOOLAND : 'OP_BOOLAND',
+ OP_BOOLOR : 'OP_BOOLOR',
+ OP_NUMEQUAL : 'OP_NUMEQUAL',
+ OP_NUMEQUALVERIFY : 'OP_NUMEQUALVERIFY',
+ OP_NUMNOTEQUAL : 'OP_NUMNOTEQUAL',
+ OP_LESSTHAN : 'OP_LESSTHAN',
+ OP_GREATERTHAN : 'OP_GREATERTHAN',
+ OP_LESSTHANOREQUAL : 'OP_LESSTHANOREQUAL',
+ OP_GREATERTHANOREQUAL : 'OP_GREATERTHANOREQUAL',
+ OP_MIN : 'OP_MIN',
+ OP_MAX : 'OP_MAX',
+ OP_WITHIN : 'OP_WITHIN',
+ OP_RIPEMD160 : 'OP_RIPEMD160',
+ OP_SHA1 : 'OP_SHA1',
+ OP_SHA256 : 'OP_SHA256',
+ OP_HASH160 : 'OP_HASH160',
+ OP_HASH256 : 'OP_HASH256',
+ OP_CODESEPARATOR : 'OP_CODESEPARATOR',
+ OP_CHECKSIG : 'OP_CHECKSIG',
+ OP_CHECKSIGVERIFY : 'OP_CHECKSIGVERIFY',
+ OP_CHECKMULTISIG : 'OP_CHECKMULTISIG',
+ OP_CHECKMULTISIGVERIFY : 'OP_CHECKMULTISIGVERIFY',
+ OP_NOP1 : 'OP_NOP1',
+ OP_CHECKLOCKTIMEVERIFY : 'OP_CHECKLOCKTIMEVERIFY',
+ OP_CHECKSEQUENCEVERIFY : 'OP_CHECKSEQUENCEVERIFY',
+ OP_NOP4 : 'OP_NOP4',
+ OP_NOP5 : 'OP_NOP5',
+ OP_NOP6 : 'OP_NOP6',
+ OP_NOP7 : 'OP_NOP7',
+ OP_NOP8 : 'OP_NOP8',
+ OP_NOP9 : 'OP_NOP9',
+ OP_NOP10 : 'OP_NOP10',
+ OP_SMALLINTEGER : 'OP_SMALLINTEGER',
+ OP_PUBKEYS : 'OP_PUBKEYS',
+ OP_PUBKEYHASH : 'OP_PUBKEYHASH',
+ OP_PUBKEY : 'OP_PUBKEY',
+ OP_INVALIDOPCODE : 'OP_INVALIDOPCODE',
+})
+
+OPCODES_BY_NAME = {
+ 'OP_0' : OP_0,
+ 'OP_PUSHDATA1' : OP_PUSHDATA1,
+ 'OP_PUSHDATA2' : OP_PUSHDATA2,
+ 'OP_PUSHDATA4' : OP_PUSHDATA4,
+ 'OP_1NEGATE' : OP_1NEGATE,
+ 'OP_RESERVED' : OP_RESERVED,
+ 'OP_1' : OP_1,
+ 'OP_2' : OP_2,
+ 'OP_3' : OP_3,
+ 'OP_4' : OP_4,
+ 'OP_5' : OP_5,
+ 'OP_6' : OP_6,
+ 'OP_7' : OP_7,
+ 'OP_8' : OP_8,
+ 'OP_9' : OP_9,
+ 'OP_10' : OP_10,
+ 'OP_11' : OP_11,
+ 'OP_12' : OP_12,
+ 'OP_13' : OP_13,
+ 'OP_14' : OP_14,
+ 'OP_15' : OP_15,
+ 'OP_16' : OP_16,
+ 'OP_NOP' : OP_NOP,
+ 'OP_VER' : OP_VER,
+ 'OP_IF' : OP_IF,
+ 'OP_NOTIF' : OP_NOTIF,
+ 'OP_VERIF' : OP_VERIF,
+ 'OP_VERNOTIF' : OP_VERNOTIF,
+ 'OP_ELSE' : OP_ELSE,
+ 'OP_ENDIF' : OP_ENDIF,
+ 'OP_VERIFY' : OP_VERIFY,
+ 'OP_RETURN' : OP_RETURN,
+ 'OP_TOALTSTACK' : OP_TOALTSTACK,
+ 'OP_FROMALTSTACK' : OP_FROMALTSTACK,
+ 'OP_2DROP' : OP_2DROP,
+ 'OP_2DUP' : OP_2DUP,
+ 'OP_3DUP' : OP_3DUP,
+ 'OP_2OVER' : OP_2OVER,
+ 'OP_2ROT' : OP_2ROT,
+ 'OP_2SWAP' : OP_2SWAP,
+ 'OP_IFDUP' : OP_IFDUP,
+ 'OP_DEPTH' : OP_DEPTH,
+ 'OP_DROP' : OP_DROP,
+ 'OP_DUP' : OP_DUP,
+ 'OP_NIP' : OP_NIP,
+ 'OP_OVER' : OP_OVER,
+ 'OP_PICK' : OP_PICK,
+ 'OP_ROLL' : OP_ROLL,
+ 'OP_ROT' : OP_ROT,
+ 'OP_SWAP' : OP_SWAP,
+ 'OP_TUCK' : OP_TUCK,
+ 'OP_CAT' : OP_CAT,
+ 'OP_SUBSTR' : OP_SUBSTR,
+ 'OP_LEFT' : OP_LEFT,
+ 'OP_RIGHT' : OP_RIGHT,
+ 'OP_SIZE' : OP_SIZE,
+ 'OP_INVERT' : OP_INVERT,
+ 'OP_AND' : OP_AND,
+ 'OP_OR' : OP_OR,
+ 'OP_XOR' : OP_XOR,
+ 'OP_EQUAL' : OP_EQUAL,
+ 'OP_EQUALVERIFY' : OP_EQUALVERIFY,
+ 'OP_RESERVED1' : OP_RESERVED1,
+ 'OP_RESERVED2' : OP_RESERVED2,
+ 'OP_1ADD' : OP_1ADD,
+ 'OP_1SUB' : OP_1SUB,
+ 'OP_2MUL' : OP_2MUL,
+ 'OP_2DIV' : OP_2DIV,
+ 'OP_NEGATE' : OP_NEGATE,
+ 'OP_ABS' : OP_ABS,
+ 'OP_NOT' : OP_NOT,
+ 'OP_0NOTEQUAL' : OP_0NOTEQUAL,
+ 'OP_ADD' : OP_ADD,
+ 'OP_SUB' : OP_SUB,
+ 'OP_MUL' : OP_MUL,
+ 'OP_DIV' : OP_DIV,
+ 'OP_MOD' : OP_MOD,
+ 'OP_LSHIFT' : OP_LSHIFT,
+ 'OP_RSHIFT' : OP_RSHIFT,
+ 'OP_BOOLAND' : OP_BOOLAND,
+ 'OP_BOOLOR' : OP_BOOLOR,
+ 'OP_NUMEQUAL' : OP_NUMEQUAL,
+ 'OP_NUMEQUALVERIFY' : OP_NUMEQUALVERIFY,
+ 'OP_NUMNOTEQUAL' : OP_NUMNOTEQUAL,
+ 'OP_LESSTHAN' : OP_LESSTHAN,
+ 'OP_GREATERTHAN' : OP_GREATERTHAN,
+ 'OP_LESSTHANOREQUAL' : OP_LESSTHANOREQUAL,
+ 'OP_GREATERTHANOREQUAL' : OP_GREATERTHANOREQUAL,
+ 'OP_MIN' : OP_MIN,
+ 'OP_MAX' : OP_MAX,
+ 'OP_WITHIN' : OP_WITHIN,
+ 'OP_RIPEMD160' : OP_RIPEMD160,
+ 'OP_SHA1' : OP_SHA1,
+ 'OP_SHA256' : OP_SHA256,
+ 'OP_HASH160' : OP_HASH160,
+ 'OP_HASH256' : OP_HASH256,
+ 'OP_CODESEPARATOR' : OP_CODESEPARATOR,
+ 'OP_CHECKSIG' : OP_CHECKSIG,
+ 'OP_CHECKSIGVERIFY' : OP_CHECKSIGVERIFY,
+ 'OP_CHECKMULTISIG' : OP_CHECKMULTISIG,
+ 'OP_CHECKMULTISIGVERIFY' : OP_CHECKMULTISIGVERIFY,
+ 'OP_NOP1' : OP_NOP1,
+ 'OP_CHECKLOCKTIMEVERIFY' : OP_CHECKLOCKTIMEVERIFY,
+ 'OP_CHECKSEQUENCEVERIFY' : OP_CHECKSEQUENCEVERIFY,
+ 'OP_NOP4' : OP_NOP4,
+ 'OP_NOP5' : OP_NOP5,
+ 'OP_NOP6' : OP_NOP6,
+ 'OP_NOP7' : OP_NOP7,
+ 'OP_NOP8' : OP_NOP8,
+ 'OP_NOP9' : OP_NOP9,
+ 'OP_NOP10' : OP_NOP10,
+ 'OP_SMALLINTEGER' : OP_SMALLINTEGER,
+ 'OP_PUBKEYS' : OP_PUBKEYS,
+ 'OP_PUBKEYHASH' : OP_PUBKEYHASH,
+ 'OP_PUBKEY' : OP_PUBKEY,
+}
+
+class CScriptInvalidError(Exception):
+ """Base class for CScript exceptions"""
+ pass
+
+class CScriptTruncatedPushDataError(CScriptInvalidError):
+ """Invalid pushdata due to truncation"""
+ def __init__(self, msg, data):
+ self.data = data
+ super(CScriptTruncatedPushDataError, self).__init__(msg)
+
+# This is used, eg, for blockchain heights in coinbase scripts (bip34)
+class CScriptNum(object):
+ def __init__(self, d=0):
+ self.value = d
+
+ @staticmethod
+ def encode(obj):
+ r = bytearray(0)
+ if obj.value == 0:
+ return bytes(r)
+ neg = obj.value < 0
+ absvalue = -obj.value if neg else obj.value
+ while (absvalue):
+ r.append(absvalue & 0xff)
+ absvalue >>= 8
+ if r[-1] & 0x80:
+ r.append(0x80 if neg else 0)
+ elif neg:
+ r[-1] |= 0x80
+ return bytes(bchr(len(r)) + r)
+
+
+class CScript(bytes):
+ """Serialized script
+
+ A bytes subclass, so you can use this directly whenever bytes are accepted.
+ Note that this means that indexing does *not* work - you'll get an index by
+ byte rather than opcode. This format was chosen for efficiency so that the
+ general case would not require creating a lot of little CScriptOP objects.
+
+ iter(script) however does iterate by opcode.
+ """
+ @classmethod
+ def __coerce_instance(cls, other):
+ # Coerce other into bytes
+ if isinstance(other, CScriptOp):
+ other = bchr(other)
+ elif isinstance(other, CScriptNum):
+ if (other.value == 0):
+ other = bchr(CScriptOp(OP_0))
+ else:
+ other = CScriptNum.encode(other)
+ elif isinstance(other, int):
+ if 0 <= other <= 16:
+ other = bytes(bchr(CScriptOp.encode_op_n(other)))
+ elif other == -1:
+ other = bytes(bchr(OP_1NEGATE))
+ else:
+ other = CScriptOp.encode_op_pushdata(bn2vch(other))
+ elif isinstance(other, (bytes, bytearray)):
+ other = CScriptOp.encode_op_pushdata(other)
+ return other
+
+ def __add__(self, other):
+ # Do the coercion outside of the try block so that errors in it are
+ # noticed.
+ other = self.__coerce_instance(other)
+
+ try:
+ # bytes.__add__ always returns bytes instances unfortunately
+ return CScript(super(CScript, self).__add__(other))
+ except TypeError:
+ raise TypeError('Can not add a %r instance to a CScript' % other.__class__)
+
+ def join(self, iterable):
+ # join makes no sense for a CScript()
+ raise NotImplementedError
+
+ def __new__(cls, value=b''):
+ if isinstance(value, bytes) or isinstance(value, bytearray):
+ return super(CScript, cls).__new__(cls, value)
+ else:
+ def coerce_iterable(iterable):
+ for instance in iterable:
+ yield cls.__coerce_instance(instance)
+ # Annoyingly on both python2 and python3 bytes.join() always
+ # returns a bytes instance even when subclassed.
+ return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value)))
+
+ def raw_iter(self):
+ """Raw iteration
+
+ Yields tuples of (opcode, data, sop_idx) so that the different possible
+ PUSHDATA encodings can be accurately distinguished, as well as
+ determining the exact opcode byte indexes. (sop_idx)
+ """
+ i = 0
+ while i < len(self):
+ sop_idx = i
+ opcode = bord(self[i])
+ i += 1
+
+ if opcode > OP_PUSHDATA4:
+ yield (opcode, None, sop_idx)
+ else:
+ datasize = None
+ pushdata_type = None
+ if opcode < OP_PUSHDATA1:
+ pushdata_type = 'PUSHDATA(%d)' % opcode
+ datasize = opcode
+
+ elif opcode == OP_PUSHDATA1:
+ pushdata_type = 'PUSHDATA1'
+ if i >= len(self):
+ raise CScriptInvalidError('PUSHDATA1: missing data length')
+ datasize = bord(self[i])
+ i += 1
+
+ elif opcode == OP_PUSHDATA2:
+ pushdata_type = 'PUSHDATA2'
+ if i + 1 >= len(self):
+ raise CScriptInvalidError('PUSHDATA2: missing data length')
+ datasize = bord(self[i]) + (bord(self[i+1]) << 8)
+ i += 2
+
+ elif opcode == OP_PUSHDATA4:
+ pushdata_type = 'PUSHDATA4'
+ if i + 3 >= len(self):
+ raise CScriptInvalidError('PUSHDATA4: missing data length')
+ datasize = bord(self[i]) + (bord(self[i+1]) << 8) + (bord(self[i+2]) << 16) + (bord(self[i+3]) << 24)
+ i += 4
+
+ else:
+ assert False # shouldn't happen
+
+
+ data = bytes(self[i:i+datasize])
+
+ # Check for truncation
+ if len(data) < datasize:
+ raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
+
+ i += datasize
+
+ yield (opcode, data, sop_idx)
+
+ def __iter__(self):
+ """'Cooked' iteration
+
+ Returns either a CScriptOP instance, an integer, or bytes, as
+ appropriate.
+
+ See raw_iter() if you need to distinguish the different possible
+ PUSHDATA encodings.
+ """
+ for (opcode, data, sop_idx) in self.raw_iter():
+ if data is not None:
+ yield data
+ else:
+ opcode = CScriptOp(opcode)
+
+ if opcode.is_small_int():
+ yield opcode.decode_op_n()
+ else:
+ yield CScriptOp(opcode)
+
+ def __repr__(self):
+ # For Python3 compatibility add b before strings so testcases don't
+ # need to change
+ def _repr(o):
+ if isinstance(o, bytes):
+ return b"x('%s')" % hexlify(o).decode('ascii')
+ else:
+ return repr(o)
+
+ ops = []
+ i = iter(self)
+ while True:
+ op = None
+ try:
+ op = _repr(next(i))
+ except CScriptTruncatedPushDataError as err:
+ op = '%s...<ERROR: %s>' % (_repr(err.data), err)
+ break
+ except CScriptInvalidError as err:
+ op = '<ERROR: %s>' % err
+ break
+ except StopIteration:
+ break
+ finally:
+ if op is not None:
+ ops.append(op)
+
+ return "CScript([%s])" % ', '.join(ops)
+
+ def GetSigOpCount(self, fAccurate):
+ """Get the SigOp count.
+
+ fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details.
+
+ Note that this is consensus-critical.
+ """
+ n = 0
+ lastOpcode = OP_INVALIDOPCODE
+ for (opcode, data, sop_idx) in self.raw_iter():
+ if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
+ n += 1
+ elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
+ if fAccurate and (OP_1 <= lastOpcode <= OP_16):
+ n += opcode.decode_op_n()
+ else:
+ n += 20
+ lastOpcode = opcode
+ return n
+
+
+SIGHASH_ALL = 1
+SIGHASH_NONE = 2
+SIGHASH_SINGLE = 3
+SIGHASH_ANYONECANPAY = 0x80
+
+def FindAndDelete(script, sig):
+ """Consensus critical, see FindAndDelete() in Satoshi codebase"""
+ r = b''
+ last_sop_idx = sop_idx = 0
+ skip = True
+ for (opcode, data, sop_idx) in script.raw_iter():
+ if not skip:
+ r += script[last_sop_idx:sop_idx]
+ last_sop_idx = sop_idx
+ if script[sop_idx:sop_idx + len(sig)] == sig:
+ skip = True
+ else:
+ skip = False
+ if not skip:
+ r += script[last_sop_idx:]
+ return CScript(r)
+
+
+def SignatureHash(script, txTo, inIdx, hashtype):
+ """Consensus-correct SignatureHash
+
+ Returns (hash, err) to precisely match the consensus-critical behavior of
+ the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity)
+ """
+ HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+
+ if inIdx >= len(txTo.vin):
+ return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
+ txtmp = CTransaction(txTo)
+
+ for txin in txtmp.vin:
+ txin.scriptSig = b''
+ txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
+
+ if (hashtype & 0x1f) == SIGHASH_NONE:
+ txtmp.vout = []
+
+ for i in range(len(txtmp.vin)):
+ if i != inIdx:
+ txtmp.vin[i].nSequence = 0
+
+ elif (hashtype & 0x1f) == SIGHASH_SINGLE:
+ outIdx = inIdx
+ if outIdx >= len(txtmp.vout):
+ return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
+
+ tmp = txtmp.vout[outIdx]
+ txtmp.vout = []
+ for i in range(outIdx):
+ txtmp.vout.append(CTxOut(-1))
+ txtmp.vout.append(tmp)
+
+ for i in range(len(txtmp.vin)):
+ if i != inIdx:
+ txtmp.vin[i].nSequence = 0
+
+ if hashtype & SIGHASH_ANYONECANPAY:
+ tmp = txtmp.vin[inIdx]
+ txtmp.vin = []
+ txtmp.vin.append(tmp)
+
+ s = txtmp.serialize()
+ s += struct.pack(b"<I", hashtype)
+
+ hash = hash256(s)
+
+ return (hash, None)
+
+# TODO: Allow cached hashPrevouts/hashSequence/hashOutputs to be provided.
+# Performance optimization probably not necessary for python tests, however.
+# Note that this corresponds to sigversion == 1 in EvalScript, which is used
+# for version 0 witnesses.
+def SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, amount):
+
+ hashPrevouts = 0
+ hashSequence = 0
+ hashOutputs = 0
+
+ if not (hashtype & SIGHASH_ANYONECANPAY):
+ serialize_prevouts = bytes()
+ for i in txTo.vin:
+ serialize_prevouts += i.prevout.serialize()
+ hashPrevouts = uint256_from_str(hash256(serialize_prevouts))
+
+ if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
+ serialize_sequence = bytes()
+ for i in txTo.vin:
+ serialize_sequence += struct.pack("<I", i.nSequence)
+ hashSequence = uint256_from_str(hash256(serialize_sequence))
+
+ if ((hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
+ serialize_outputs = bytes()
+ for o in txTo.vout:
+ serialize_outputs += o.serialize()
+ hashOutputs = uint256_from_str(hash256(serialize_outputs))
+ elif ((hashtype & 0x1f) == SIGHASH_SINGLE and inIdx < len(txTo.vout)):
+ serialize_outputs = txTo.vout[inIdx].serialize()
+ hashOutputs = uint256_from_str(hash256(serialize_outputs))
+
+ ss = bytes()
+ ss += struct.pack("<i", txTo.nVersion)
+ ss += ser_uint256(hashPrevouts)
+ ss += ser_uint256(hashSequence)
+ ss += txTo.vin[inIdx].prevout.serialize()
+ ss += ser_string(script)
+ ss += struct.pack("<q", amount)
+ ss += struct.pack("<I", txTo.vin[inIdx].nSequence)
+ ss += ser_uint256(hashOutputs)
+ ss += struct.pack("<i", txTo.nLockTime)
+ ss += struct.pack("<I", hashtype)
+
+ return hash256(ss)
diff --git a/test/functional/test_framework/siphash.py b/test/functional/test_framework/siphash.py
new file mode 100644
index 0000000000..f68ecad36b
--- /dev/null
+++ b/test/functional/test_framework/siphash.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Specialized SipHash-2-4 implementations.
+
+This implements SipHash-2-4 for 256-bit integers.
+"""
+
+def rotl64(n, b):
+ return n >> (64 - b) | (n & ((1 << (64 - b)) - 1)) << b
+
+def siphash_round(v0, v1, v2, v3):
+ v0 = (v0 + v1) & ((1 << 64) - 1)
+ v1 = rotl64(v1, 13)
+ v1 ^= v0
+ v0 = rotl64(v0, 32)
+ v2 = (v2 + v3) & ((1 << 64) - 1)
+ v3 = rotl64(v3, 16)
+ v3 ^= v2
+ v0 = (v0 + v3) & ((1 << 64) - 1)
+ v3 = rotl64(v3, 21)
+ v3 ^= v0
+ v2 = (v2 + v1) & ((1 << 64) - 1)
+ v1 = rotl64(v1, 17)
+ v1 ^= v2
+ v2 = rotl64(v2, 32)
+ return (v0, v1, v2, v3)
+
+def siphash256(k0, k1, h):
+ n0 = h & ((1 << 64) - 1)
+ n1 = (h >> 64) & ((1 << 64) - 1)
+ n2 = (h >> 128) & ((1 << 64) - 1)
+ n3 = (h >> 192) & ((1 << 64) - 1)
+ v0 = 0x736f6d6570736575 ^ k0
+ v1 = 0x646f72616e646f6d ^ k1
+ v2 = 0x6c7967656e657261 ^ k0
+ v3 = 0x7465646279746573 ^ k1 ^ n0
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0 ^= n0
+ v3 ^= n1
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0 ^= n1
+ v3 ^= n2
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0 ^= n2
+ v3 ^= n3
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0 ^= n3
+ v3 ^= 0x2000000000000000
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0 ^= 0x2000000000000000
+ v2 ^= 0xFF
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ return v0 ^ v1 ^ v2 ^ v3
diff --git a/test/functional/test_framework/socks5.py b/test/functional/test_framework/socks5.py
new file mode 100644
index 0000000000..dd7624d454
--- /dev/null
+++ b/test/functional/test_framework/socks5.py
@@ -0,0 +1,160 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Dummy Socks5 server for testing."""
+
+import socket, threading, queue
+import traceback, sys
+import logging
+
+logger = logging.getLogger("TestFramework.socks5")
+
+### Protocol constants
+class Command:
+ CONNECT = 0x01
+
+class AddressType:
+ IPV4 = 0x01
+ DOMAINNAME = 0x03
+ IPV6 = 0x04
+
+### Utility functions
+def recvall(s, n):
+ """Receive n bytes from a socket, or fail."""
+ rv = bytearray()
+ while n > 0:
+ d = s.recv(n)
+ if not d:
+ raise IOError('Unexpected end of stream')
+ rv.extend(d)
+ n -= len(d)
+ return rv
+
+### Implementation classes
+class Socks5Configuration(object):
+ """Proxy configuration."""
+ def __init__(self):
+ self.addr = None # Bind address (must be set)
+ self.af = socket.AF_INET # Bind address family
+ self.unauth = False # Support unauthenticated
+ self.auth = False # Support authentication
+
+class Socks5Command(object):
+ """Information about an incoming socks5 command."""
+ def __init__(self, cmd, atyp, addr, port, username, password):
+ self.cmd = cmd # Command (one of Command.*)
+ self.atyp = atyp # Address type (one of AddressType.*)
+ self.addr = addr # Address
+ self.port = port # Port to connect to
+ self.username = username
+ self.password = password
+ def __repr__(self):
+ return 'Socks5Command(%s,%s,%s,%s,%s,%s)' % (self.cmd, self.atyp, self.addr, self.port, self.username, self.password)
+
+class Socks5Connection(object):
+ def __init__(self, serv, conn, peer):
+ self.serv = serv
+ self.conn = conn
+ self.peer = peer
+
+ def handle(self):
+ """Handle socks5 request according to RFC192."""
+ try:
+ # Verify socks version
+ ver = recvall(self.conn, 1)[0]
+ if ver != 0x05:
+ raise IOError('Invalid socks version %i' % ver)
+ # Choose authentication method
+ nmethods = recvall(self.conn, 1)[0]
+ methods = bytearray(recvall(self.conn, nmethods))
+ method = None
+ if 0x02 in methods and self.serv.conf.auth:
+ method = 0x02 # username/password
+ elif 0x00 in methods and self.serv.conf.unauth:
+ method = 0x00 # unauthenticated
+ if method is None:
+ raise IOError('No supported authentication method was offered')
+ # Send response
+ self.conn.sendall(bytearray([0x05, method]))
+ # Read authentication (optional)
+ username = None
+ password = None
+ if method == 0x02:
+ ver = recvall(self.conn, 1)[0]
+ if ver != 0x01:
+ raise IOError('Invalid auth packet version %i' % ver)
+ ulen = recvall(self.conn, 1)[0]
+ username = str(recvall(self.conn, ulen))
+ plen = recvall(self.conn, 1)[0]
+ password = str(recvall(self.conn, plen))
+ # Send authentication response
+ self.conn.sendall(bytearray([0x01, 0x00]))
+
+ # Read connect request
+ (ver,cmd,rsv,atyp) = recvall(self.conn, 4)
+ if ver != 0x05:
+ raise IOError('Invalid socks version %i in connect request' % ver)
+ if cmd != Command.CONNECT:
+ raise IOError('Unhandled command %i in connect request' % cmd)
+
+ if atyp == AddressType.IPV4:
+ addr = recvall(self.conn, 4)
+ elif atyp == AddressType.DOMAINNAME:
+ n = recvall(self.conn, 1)[0]
+ addr = recvall(self.conn, n)
+ elif atyp == AddressType.IPV6:
+ addr = recvall(self.conn, 16)
+ else:
+ raise IOError('Unknown address type %i' % atyp)
+ port_hi,port_lo = recvall(self.conn, 2)
+ port = (port_hi << 8) | port_lo
+
+ # Send dummy response
+ self.conn.sendall(bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]))
+
+ cmdin = Socks5Command(cmd, atyp, addr, port, username, password)
+ self.serv.queue.put(cmdin)
+ logger.info('Proxy: %s', cmdin)
+ # Fall through to disconnect
+ except Exception as e:
+ logger.exception("socks5 request handling failed.")
+ self.serv.queue.put(e)
+ finally:
+ self.conn.close()
+
+class Socks5Server(object):
+ def __init__(self, conf):
+ self.conf = conf
+ self.s = socket.socket(conf.af)
+ self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ self.s.bind(conf.addr)
+ self.s.listen(5)
+ self.running = False
+ self.thread = None
+ self.queue = queue.Queue() # report connections and exceptions to client
+
+ def run(self):
+ while self.running:
+ (sockconn, peer) = self.s.accept()
+ if self.running:
+ conn = Socks5Connection(self, sockconn, peer)
+ thread = threading.Thread(None, conn.handle)
+ thread.daemon = True
+ thread.start()
+
+ def start(self):
+ assert(not self.running)
+ self.running = True
+ self.thread = threading.Thread(None, self.run)
+ self.thread.daemon = True
+ self.thread.start()
+
+ def stop(self):
+ self.running = False
+ # connect to self to end run loop
+ s = socket.socket(self.conf.af)
+ s.connect(self.conf.addr)
+ s.close()
+ self.thread.join()
+
diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py
new file mode 100755
index 0000000000..d7072fa78d
--- /dev/null
+++ b/test/functional/test_framework/test_framework.py
@@ -0,0 +1,244 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Base class for RPC testing."""
+
+import logging
+import optparse
+import os
+import sys
+import shutil
+import tempfile
+import traceback
+
+from .util import (
+ initialize_chain,
+ start_nodes,
+ connect_nodes_bi,
+ sync_blocks,
+ sync_mempools,
+ stop_nodes,
+ stop_node,
+ enable_coverage,
+ check_json_precision,
+ initialize_chain_clean,
+ PortSeed,
+)
+from .authproxy import JSONRPCException
+
+
+class BitcoinTestFramework(object):
+
+ def __init__(self):
+ self.num_nodes = 4
+ self.setup_clean_chain = False
+ self.nodes = None
+
+ def run_test(self):
+ raise NotImplementedError
+
+ def add_options(self, parser):
+ pass
+
+ def setup_chain(self):
+ self.log.info("Initializing test directory "+self.options.tmpdir)
+ if self.setup_clean_chain:
+ initialize_chain_clean(self.options.tmpdir, self.num_nodes)
+ else:
+ initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir)
+
+ def stop_node(self, num_node):
+ stop_node(self.nodes[num_node], num_node)
+
+ def setup_nodes(self):
+ return start_nodes(self.num_nodes, self.options.tmpdir)
+
+ def setup_network(self, split = False):
+ self.nodes = self.setup_nodes()
+
+ # Connect the nodes as a "chain". This allows us
+ # to split the network between nodes 1 and 2 to get
+ # two halves that can work on competing chains.
+
+ # If we joined network halves, connect the nodes from the joint
+ # on outward. This ensures that chains are properly reorganised.
+ if not split:
+ connect_nodes_bi(self.nodes, 1, 2)
+ sync_blocks(self.nodes[1:3])
+ sync_mempools(self.nodes[1:3])
+
+ connect_nodes_bi(self.nodes, 0, 1)
+ connect_nodes_bi(self.nodes, 2, 3)
+ self.is_network_split = split
+ self.sync_all()
+
+ def split_network(self):
+ """
+ Split the network of four nodes into nodes 0/1 and 2/3.
+ """
+ assert not self.is_network_split
+ stop_nodes(self.nodes)
+ self.setup_network(True)
+
+ def sync_all(self):
+ if self.is_network_split:
+ sync_blocks(self.nodes[:2])
+ sync_blocks(self.nodes[2:])
+ sync_mempools(self.nodes[:2])
+ sync_mempools(self.nodes[2:])
+ else:
+ sync_blocks(self.nodes)
+ sync_mempools(self.nodes)
+
+ def join_network(self):
+ """
+ Join the (previously split) network halves together.
+ """
+ assert self.is_network_split
+ stop_nodes(self.nodes)
+ self.setup_network(False)
+
+ def main(self):
+
+ parser = optparse.OptionParser(usage="%prog [options]")
+ parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
+ help="Leave bitcoinds and test.* datadir on exit or error")
+ parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
+ help="Don't stop bitcoinds after the test execution")
+ parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
+ help="Source directory containing bitcoind/bitcoin-cli (default: %default)")
+ parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../cache"),
+ help="Directory for caching pregenerated datadirs")
+ parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
+ help="Root directory for datadirs")
+ parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
+ help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
+ parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
+ help="Print out all RPC calls as they are made")
+ parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
+ help="The seed to use for assigning port numbers (default: current process id)")
+ parser.add_option("--coveragedir", dest="coveragedir",
+ help="Write tested RPC commands into this directory")
+ self.add_options(parser)
+ (self.options, self.args) = parser.parse_args()
+
+ # backup dir variable for removal at cleanup
+ self.options.root, self.options.tmpdir = self.options.tmpdir, self.options.tmpdir + '/' + str(self.options.port_seed)
+
+ if self.options.coveragedir:
+ enable_coverage(self.options.coveragedir)
+
+ PortSeed.n = self.options.port_seed
+
+ os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
+
+ check_json_precision()
+
+ # Set up temp directory and start logging
+ os.makedirs(self.options.tmpdir, exist_ok=False)
+ self._start_logging()
+
+ success = False
+
+ try:
+ self.setup_chain()
+ self.setup_network()
+ self.run_test()
+ success = True
+ except JSONRPCException as e:
+ self.log.exception("JSONRPC error")
+ except AssertionError as e:
+ self.log.exception("Assertion failed")
+ except KeyError as e:
+ self.log.exception("Key error")
+ except Exception as e:
+ self.log.exception("Unexpected exception caught during testing")
+ except KeyboardInterrupt as e:
+ self.log.warning("Exiting after keyboard interrupt")
+
+ if not self.options.noshutdown:
+ self.log.info("Stopping nodes")
+ stop_nodes(self.nodes)
+ else:
+ self.log.info("Note: bitcoinds were not stopped and may still be running")
+
+ if not self.options.nocleanup and not self.options.noshutdown and success:
+ self.log.info("Cleaning up")
+ shutil.rmtree(self.options.tmpdir)
+ if not os.listdir(self.options.root):
+ os.rmdir(self.options.root)
+ else:
+ self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
+ if os.getenv("PYTHON_DEBUG", ""):
+ # Dump the end of the debug logs, to aid in debugging rare
+ # travis failures.
+ import glob
+ filenames = glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
+ MAX_LINES_TO_PRINT = 1000
+ for f in filenames:
+ print("From" , f, ":")
+ from collections import deque
+ print("".join(deque(open(f), MAX_LINES_TO_PRINT)))
+ if success:
+ self.log.info("Tests successful")
+ sys.exit(0)
+ else:
+ self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
+ logging.shutdown()
+ sys.exit(1)
+
+ def _start_logging(self):
+ # Add logger and logging handlers
+ self.log = logging.getLogger('TestFramework')
+ self.log.setLevel(logging.DEBUG)
+ # Create file handler to log all messages
+ fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
+ fh.setLevel(logging.DEBUG)
+ # Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
+ ch = logging.StreamHandler(sys.stdout)
+ # User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
+ ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
+ ch.setLevel(ll)
+ # Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
+ formatter = logging.Formatter(fmt = '%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
+ fh.setFormatter(formatter)
+ ch.setFormatter(formatter)
+ # add the handlers to the logger
+ self.log.addHandler(fh)
+ self.log.addHandler(ch)
+
+ if self.options.trace_rpc:
+ rpc_logger = logging.getLogger("BitcoinRPC")
+ rpc_logger.setLevel(logging.DEBUG)
+ rpc_handler = logging.StreamHandler(sys.stdout)
+ rpc_handler.setLevel(logging.DEBUG)
+ rpc_logger.addHandler(rpc_handler)
+
+# Test framework for doing p2p comparison testing, which sets up some bitcoind
+# binaries:
+# 1 binary: test binary
+# 2 binaries: 1 test binary, 1 ref binary
+# n>2 binaries: 1 test binary, n-1 ref binaries
+
+class ComparisonTestFramework(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 2
+ self.setup_clean_chain = True
+
+ def add_options(self, parser):
+ parser.add_option("--testbinary", dest="testbinary",
+ default=os.getenv("BITCOIND", "bitcoind"),
+ help="bitcoind binary to test")
+ parser.add_option("--refbinary", dest="refbinary",
+ default=os.getenv("BITCOIND", "bitcoind"),
+ help="bitcoind binary to use for reference nodes (if any)")
+
+ def setup_network(self):
+ self.nodes = start_nodes(
+ self.num_nodes, self.options.tmpdir,
+ extra_args=[['-whitelist=127.0.0.1']] * self.num_nodes,
+ binary=[self.options.testbinary] +
+ [self.options.refbinary]*(self.num_nodes-1))
diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py
new file mode 100644
index 0000000000..23ac324510
--- /dev/null
+++ b/test/functional/test_framework/util.py
@@ -0,0 +1,670 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Helpful routines for regression testing."""
+
+import os
+import sys
+
+from binascii import hexlify, unhexlify
+from base64 import b64encode
+from decimal import Decimal, ROUND_DOWN
+import json
+import http.client
+import random
+import shutil
+import subprocess
+import tempfile
+import time
+import re
+import errno
+import logging
+
+from . import coverage
+from .authproxy import AuthServiceProxy, JSONRPCException
+
+COVERAGE_DIR = None
+
+logger = logging.getLogger("TestFramework.utils")
+
+# The maximum number of nodes a single test can spawn
+MAX_NODES = 8
+# Don't assign rpc or p2p ports lower than this
+PORT_MIN = 11000
+# The number of ports to "reserve" for p2p and rpc, each
+PORT_RANGE = 5000
+
+BITCOIND_PROC_WAIT_TIMEOUT = 60
+
+
+class PortSeed:
+ # Must be initialized with a unique integer for each process
+ n = None
+
+#Set Mocktime default to OFF.
+#MOCKTIME is only needed for scripts that use the
+#cached version of the blockchain. If the cached
+#version of the blockchain is used without MOCKTIME
+#then the mempools will not sync due to IBD.
+MOCKTIME = 0
+
+def enable_mocktime():
+ #For backwared compatibility of the python scripts
+ #with previous versions of the cache, set MOCKTIME
+ #to Jan 1, 2014 + (201 * 10 * 60)
+ global MOCKTIME
+ MOCKTIME = 1388534400 + (201 * 10 * 60)
+
+def disable_mocktime():
+ global MOCKTIME
+ MOCKTIME = 0
+
+def get_mocktime():
+ return MOCKTIME
+
+def enable_coverage(dirname):
+ """Maintain a log of which RPC calls are made during testing."""
+ global COVERAGE_DIR
+ COVERAGE_DIR = dirname
+
+
+def get_rpc_proxy(url, node_number, timeout=None):
+ """
+ Args:
+ url (str): URL of the RPC server to call
+ node_number (int): the node number (or id) that this calls to
+
+ Kwargs:
+ timeout (int): HTTP timeout in seconds
+
+ Returns:
+ AuthServiceProxy. convenience object for making RPC calls.
+
+ """
+ proxy_kwargs = {}
+ if timeout is not None:
+ proxy_kwargs['timeout'] = timeout
+
+ proxy = AuthServiceProxy(url, **proxy_kwargs)
+ proxy.url = url # store URL on proxy for info
+
+ coverage_logfile = coverage.get_filename(
+ COVERAGE_DIR, node_number) if COVERAGE_DIR else None
+
+ return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
+
+
+def p2p_port(n):
+ assert(n <= MAX_NODES)
+ return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
+
+def rpc_port(n):
+ return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
+
+def check_json_precision():
+ """Make sure json library being used does not lose precision converting BTC values"""
+ n = Decimal("20000000.00000003")
+ satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
+ if satoshis != 2000000000000003:
+ raise RuntimeError("JSON encode/decode loses precision")
+
+def count_bytes(hex_string):
+ return len(bytearray.fromhex(hex_string))
+
+def bytes_to_hex_str(byte_str):
+ return hexlify(byte_str).decode('ascii')
+
+def hex_str_to_bytes(hex_str):
+ return unhexlify(hex_str.encode('ascii'))
+
+def str_to_b64str(string):
+ return b64encode(string.encode('utf-8')).decode('ascii')
+
+def sync_blocks(rpc_connections, *, wait=1, timeout=60):
+ """
+ Wait until everybody has the same tip.
+
+ sync_blocks needs to be called with an rpc_connections set that has least
+ one node already synced to the latest, stable tip, otherwise there's a
+ chance it might return before all nodes are stably synced.
+ """
+ # Use getblockcount() instead of waitforblockheight() to determine the
+ # initial max height because the two RPCs look at different internal global
+ # variables (chainActive vs latestBlock) and the former gets updated
+ # earlier.
+ maxheight = max(x.getblockcount() for x in rpc_connections)
+ start_time = cur_time = time.time()
+ while cur_time <= start_time + timeout:
+ tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections]
+ if all(t["height"] == maxheight for t in tips):
+ if all(t["hash"] == tips[0]["hash"] for t in tips):
+ return
+ raise AssertionError("Block sync failed, mismatched block hashes:{}".format(
+ "".join("\n {!r}".format(tip) for tip in tips)))
+ cur_time = time.time()
+ raise AssertionError("Block sync to height {} timed out:{}".format(
+ maxheight, "".join("\n {!r}".format(tip) for tip in tips)))
+
+def sync_chain(rpc_connections, *, wait=1, timeout=60):
+ """
+ Wait until everybody has the same best block
+ """
+ while timeout > 0:
+ best_hash = [x.getbestblockhash() for x in rpc_connections]
+ if best_hash == [best_hash[0]]*len(best_hash):
+ return
+ time.sleep(wait)
+ timeout -= wait
+ raise AssertionError("Chain sync failed: Best block hashes don't match")
+
+def sync_mempools(rpc_connections, *, wait=1, timeout=60):
+ """
+ Wait until everybody has the same transactions in their memory
+ pools
+ """
+ while timeout > 0:
+ pool = set(rpc_connections[0].getrawmempool())
+ num_match = 1
+ for i in range(1, len(rpc_connections)):
+ if set(rpc_connections[i].getrawmempool()) == pool:
+ num_match = num_match+1
+ if num_match == len(rpc_connections):
+ return
+ time.sleep(wait)
+ timeout -= wait
+ raise AssertionError("Mempool sync failed")
+
+bitcoind_processes = {}
+
+def initialize_datadir(dirname, n):
+ datadir = os.path.join(dirname, "node"+str(n))
+ if not os.path.isdir(datadir):
+ os.makedirs(datadir)
+ rpc_u, rpc_p = rpc_auth_pair(n)
+ with open(os.path.join(datadir, "bitcoin.conf"), 'w', encoding='utf8') as f:
+ f.write("regtest=1\n")
+ f.write("rpcuser=" + rpc_u + "\n")
+ f.write("rpcpassword=" + rpc_p + "\n")
+ f.write("port="+str(p2p_port(n))+"\n")
+ f.write("rpcport="+str(rpc_port(n))+"\n")
+ f.write("listenonion=0\n")
+ return datadir
+
+def rpc_auth_pair(n):
+ return 'rpcuser💻' + str(n), 'rpcpass🔑' + str(n)
+
+def rpc_url(i, rpchost=None):
+ rpc_u, rpc_p = rpc_auth_pair(i)
+ host = '127.0.0.1'
+ port = rpc_port(i)
+ if rpchost:
+ parts = rpchost.split(':')
+ if len(parts) == 2:
+ host, port = parts
+ else:
+ host = rpchost
+ return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
+
+def wait_for_bitcoind_start(process, url, i):
+ '''
+ Wait for bitcoind to start. This means that RPC is accessible and fully initialized.
+ Raise an exception if bitcoind exits during initialization.
+ '''
+ while True:
+ if process.poll() is not None:
+ raise Exception('bitcoind exited with status %i during initialization' % process.returncode)
+ try:
+ rpc = get_rpc_proxy(url, i)
+ blocks = rpc.getblockcount()
+ break # break out of loop on success
+ except IOError as e:
+ if e.errno != errno.ECONNREFUSED: # Port not yet open?
+ raise # unknown IO error
+ except JSONRPCException as e: # Initialization phase
+ if e.error['code'] != -28: # RPC in warmup?
+ raise # unknown JSON RPC exception
+ time.sleep(0.25)
+
+def initialize_chain(test_dir, num_nodes, cachedir):
+ """
+ Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
+ Afterward, create num_nodes copies from the cache
+ """
+
+ assert num_nodes <= MAX_NODES
+ create_cache = False
+ for i in range(MAX_NODES):
+ if not os.path.isdir(os.path.join(cachedir, 'node'+str(i))):
+ create_cache = True
+ break
+
+ if create_cache:
+ logger.debug("Creating data directories from cached datadir")
+
+ #find and delete old cache directories if any exist
+ for i in range(MAX_NODES):
+ if os.path.isdir(os.path.join(cachedir,"node"+str(i))):
+ shutil.rmtree(os.path.join(cachedir,"node"+str(i)))
+
+ # Create cache directories, run bitcoinds:
+ for i in range(MAX_NODES):
+ datadir=initialize_datadir(cachedir, i)
+ args = [ os.getenv("BITCOIND", "bitcoind"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
+ if i > 0:
+ args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
+ bitcoind_processes[i] = subprocess.Popen(args)
+ logger.debug("initialize_chain: bitcoind started, waiting for RPC to come up")
+ wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i)
+ logger.debug("initialize_chain: RPC successfully started")
+
+ rpcs = []
+ for i in range(MAX_NODES):
+ try:
+ rpcs.append(get_rpc_proxy(rpc_url(i), i))
+ except:
+ sys.stderr.write("Error connecting to "+url+"\n")
+ sys.exit(1)
+
+ # Create a 200-block-long chain; each of the 4 first nodes
+ # gets 25 mature blocks and 25 immature.
+ # Note: To preserve compatibility with older versions of
+ # initialize_chain, only 4 nodes will generate coins.
+ #
+ # blocks are created with timestamps 10 minutes apart
+ # starting from 2010 minutes in the past
+ enable_mocktime()
+ block_time = get_mocktime() - (201 * 10 * 60)
+ for i in range(2):
+ for peer in range(4):
+ for j in range(25):
+ set_node_times(rpcs, block_time)
+ rpcs[peer].generate(1)
+ block_time += 10*60
+ # Must sync before next peer starts generating blocks
+ sync_blocks(rpcs)
+
+ # Shut them down, and clean up cache directories:
+ stop_nodes(rpcs)
+ disable_mocktime()
+ for i in range(MAX_NODES):
+ os.remove(log_filename(cachedir, i, "debug.log"))
+ os.remove(log_filename(cachedir, i, "db.log"))
+ os.remove(log_filename(cachedir, i, "peers.dat"))
+ os.remove(log_filename(cachedir, i, "fee_estimates.dat"))
+
+ for i in range(num_nodes):
+ from_dir = os.path.join(cachedir, "node"+str(i))
+ to_dir = os.path.join(test_dir, "node"+str(i))
+ shutil.copytree(from_dir, to_dir)
+ initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf
+
+def initialize_chain_clean(test_dir, num_nodes):
+ """
+ Create an empty blockchain and num_nodes wallets.
+ Useful if a test case wants complete control over initialization.
+ """
+ for i in range(num_nodes):
+ datadir=initialize_datadir(test_dir, i)
+
+
+def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None):
+ """
+ Start a bitcoind and return RPC connection to it
+ """
+ datadir = os.path.join(dirname, "node"+str(i))
+ if binary is None:
+ binary = os.getenv("BITCOIND", "bitcoind")
+ args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-logtimemicros", "-debug", "-mocktime="+str(get_mocktime()) ]
+ if extra_args is not None: args.extend(extra_args)
+ bitcoind_processes[i] = subprocess.Popen(args, stderr=stderr)
+ logger.debug("initialize_chain: bitcoind started, waiting for RPC to come up")
+ url = rpc_url(i, rpchost)
+ wait_for_bitcoind_start(bitcoind_processes[i], url, i)
+ logger.debug("initialize_chain: RPC successfully started")
+ proxy = get_rpc_proxy(url, i, timeout=timewait)
+
+ if COVERAGE_DIR:
+ coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
+
+ return proxy
+
+def assert_start_raises_init_error(i, dirname, extra_args=None, expected_msg=None):
+ with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
+ try:
+ node = start_node(i, dirname, extra_args, stderr=log_stderr)
+ stop_node(node, i)
+ except Exception as e:
+ assert 'bitcoind exited' in str(e) #node must have shutdown
+ if expected_msg is not None:
+ log_stderr.seek(0)
+ stderr = log_stderr.read().decode('utf-8')
+ if expected_msg not in stderr:
+ raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
+ else:
+ if expected_msg is None:
+ assert_msg = "bitcoind should have exited with an error"
+ else:
+ assert_msg = "bitcoind should have exited with expected error " + expected_msg
+ raise AssertionError(assert_msg)
+
+def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
+ """
+ Start multiple bitcoinds, return RPC connections to them
+ """
+ if extra_args is None: extra_args = [ None for _ in range(num_nodes) ]
+ if binary is None: binary = [ None for _ in range(num_nodes) ]
+ rpcs = []
+ try:
+ for i in range(num_nodes):
+ rpcs.append(start_node(i, dirname, extra_args[i], rpchost, timewait=timewait, binary=binary[i]))
+ except: # If one node failed to start, stop the others
+ stop_nodes(rpcs)
+ raise
+ return rpcs
+
+def log_filename(dirname, n_node, logname):
+ return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
+
+def stop_node(node, i):
+ logger.debug("Stopping node %d" % i)
+ try:
+ node.stop()
+ except http.client.CannotSendRequest as e:
+ logger.exception("Unable to stop node")
+ return_code = bitcoind_processes[i].wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT)
+ assert_equal(return_code, 0)
+ del bitcoind_processes[i]
+
+def stop_nodes(nodes):
+ for i, node in enumerate(nodes):
+ stop_node(node, i)
+ assert not bitcoind_processes.values() # All connections must be gone now
+
+def set_node_times(nodes, t):
+ for node in nodes:
+ node.setmocktime(t)
+
+def connect_nodes(from_connection, node_num):
+ ip_port = "127.0.0.1:"+str(p2p_port(node_num))
+ from_connection.addnode(ip_port, "onetry")
+ # poll until version handshake complete to avoid race conditions
+ # with transaction relaying
+ while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
+ time.sleep(0.1)
+
+def connect_nodes_bi(nodes, a, b):
+ connect_nodes(nodes[a], b)
+ connect_nodes(nodes[b], a)
+
+def find_output(node, txid, amount):
+ """
+ Return index to output of txid with value amount
+ Raises exception if there is none.
+ """
+ txdata = node.getrawtransaction(txid, 1)
+ for i in range(len(txdata["vout"])):
+ if txdata["vout"][i]["value"] == amount:
+ return i
+ raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
+
+
+def gather_inputs(from_node, amount_needed, confirmations_required=1):
+ """
+ Return a random set of unspent txouts that are enough to pay amount_needed
+ """
+ assert(confirmations_required >=0)
+ utxo = from_node.listunspent(confirmations_required)
+ random.shuffle(utxo)
+ inputs = []
+ total_in = Decimal("0.00000000")
+ while total_in < amount_needed and len(utxo) > 0:
+ t = utxo.pop()
+ total_in += t["amount"]
+ inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
+ if total_in < amount_needed:
+ raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
+ return (total_in, inputs)
+
+def make_change(from_node, amount_in, amount_out, fee):
+ """
+ Create change output(s), return them
+ """
+ outputs = {}
+ amount = amount_out+fee
+ change = amount_in - amount
+ if change > amount*2:
+ # Create an extra change output to break up big inputs
+ change_address = from_node.getnewaddress()
+ # Split change in two, being careful of rounding:
+ outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
+ change = amount_in - amount - outputs[change_address]
+ if change > 0:
+ outputs[from_node.getnewaddress()] = change
+ return outputs
+
+def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
+ """
+ Create a random transaction.
+ Returns (txid, hex-encoded-transaction-data, fee)
+ """
+ from_node = random.choice(nodes)
+ to_node = random.choice(nodes)
+ fee = min_fee + fee_increment*random.randint(0,fee_variants)
+
+ (total_in, inputs) = gather_inputs(from_node, amount+fee)
+ outputs = make_change(from_node, total_in, amount, fee)
+ outputs[to_node.getnewaddress()] = float(amount)
+
+ rawtx = from_node.createrawtransaction(inputs, outputs)
+ signresult = from_node.signrawtransaction(rawtx)
+ txid = from_node.sendrawtransaction(signresult["hex"], True)
+
+ return (txid, signresult["hex"], fee)
+
+def assert_fee_amount(fee, tx_size, fee_per_kB):
+ """Assert the fee was in range"""
+ target_fee = tx_size * fee_per_kB / 1000
+ if fee < target_fee:
+ raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)"%(str(fee), str(target_fee)))
+ # allow the wallet's estimation to be at most 2 bytes off
+ if fee > (tx_size + 2) * fee_per_kB / 1000:
+ raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)"%(str(fee), str(target_fee)))
+
+def assert_equal(thing1, thing2, *args):
+ if thing1 != thing2 or any(thing1 != arg for arg in args):
+ raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
+
+def assert_greater_than(thing1, thing2):
+ if thing1 <= thing2:
+ raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
+
+def assert_greater_than_or_equal(thing1, thing2):
+ if thing1 < thing2:
+ raise AssertionError("%s < %s"%(str(thing1),str(thing2)))
+
+def assert_raises(exc, fun, *args, **kwds):
+ assert_raises_message(exc, None, fun, *args, **kwds)
+
+def assert_raises_message(exc, message, fun, *args, **kwds):
+ try:
+ fun(*args, **kwds)
+ except exc as e:
+ if message is not None and message not in e.error['message']:
+ raise AssertionError("Expected substring not found:"+e.error['message'])
+ except Exception as e:
+ raise AssertionError("Unexpected exception raised: "+type(e).__name__)
+ else:
+ raise AssertionError("No exception raised")
+
+def assert_raises_jsonrpc(code, message, fun, *args, **kwds):
+ """Run an RPC and verify that a specific JSONRPC exception code and message is raised.
+
+ Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
+ and verifies that the error code and message are as expected. Throws AssertionError if
+ no JSONRPCException was returned or if the error code/message are not as expected.
+
+ Args:
+ code (int), optional: the error code returned by the RPC call (defined
+ in src/rpc/protocol.h). Set to None if checking the error code is not required.
+ message (string), optional: [a substring of] the error string returned by the
+ RPC call. Set to None if checking the error string is not required
+ fun (function): the function to call. This should be the name of an RPC.
+ args*: positional arguments for the function.
+ kwds**: named arguments for the function.
+ """
+ try:
+ fun(*args, **kwds)
+ except JSONRPCException as e:
+ # JSONRPCException was thrown as expected. Check the code and message values are correct.
+ if (code is not None) and (code != e.error["code"]):
+ raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
+ if (message is not None) and (message not in e.error['message']):
+ raise AssertionError("Expected substring not found:"+e.error['message'])
+ except Exception as e:
+ raise AssertionError("Unexpected exception raised: "+type(e).__name__)
+ else:
+ raise AssertionError("No exception raised")
+
+def assert_is_hex_string(string):
+ try:
+ int(string, 16)
+ except Exception as e:
+ raise AssertionError(
+ "Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
+
+def assert_is_hash_string(string, length=64):
+ if not isinstance(string, str):
+ raise AssertionError("Expected a string, got type %r" % type(string))
+ elif length and len(string) != length:
+ raise AssertionError(
+ "String of length %d expected; got %d" % (length, len(string)))
+ elif not re.match('[abcdef0-9]+$', string):
+ raise AssertionError(
+ "String %r contains invalid characters for a hash." % string)
+
+def assert_array_result(object_array, to_match, expected, should_not_find = False):
+ """
+ Pass in array of JSON objects, a dictionary with key/value pairs
+ to match against, and another dictionary with expected key/value
+ pairs.
+ If the should_not_find flag is true, to_match should not be found
+ in object_array
+ """
+ if should_not_find == True:
+ assert_equal(expected, { })
+ num_matched = 0
+ for item in object_array:
+ all_match = True
+ for key,value in to_match.items():
+ if item[key] != value:
+ all_match = False
+ if not all_match:
+ continue
+ elif should_not_find == True:
+ num_matched = num_matched+1
+ for key,value in expected.items():
+ if item[key] != value:
+ raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
+ num_matched = num_matched+1
+ if num_matched == 0 and should_not_find != True:
+ raise AssertionError("No objects matched %s"%(str(to_match)))
+ if num_matched > 0 and should_not_find == True:
+ raise AssertionError("Objects were found %s"%(str(to_match)))
+
+def satoshi_round(amount):
+ return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
+
+# Helper to create at least "count" utxos
+# Pass in a fee that is sufficient for relay and mining new transactions.
+def create_confirmed_utxos(fee, node, count):
+ node.generate(int(0.5*count)+101)
+ utxos = node.listunspent()
+ iterations = count - len(utxos)
+ addr1 = node.getnewaddress()
+ addr2 = node.getnewaddress()
+ if iterations <= 0:
+ return utxos
+ for i in range(iterations):
+ t = utxos.pop()
+ inputs = []
+ inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
+ outputs = {}
+ send_value = t['amount'] - fee
+ outputs[addr1] = satoshi_round(send_value/2)
+ outputs[addr2] = satoshi_round(send_value/2)
+ raw_tx = node.createrawtransaction(inputs, outputs)
+ signed_tx = node.signrawtransaction(raw_tx)["hex"]
+ txid = node.sendrawtransaction(signed_tx)
+
+ while (node.getmempoolinfo()['size'] > 0):
+ node.generate(1)
+
+ utxos = node.listunspent()
+ assert(len(utxos) >= count)
+ return utxos
+
+# Create large OP_RETURN txouts that can be appended to a transaction
+# to make it large (helper for constructing large transactions).
+def gen_return_txouts():
+ # Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
+ # So we have big transactions (and therefore can't fit very many into each block)
+ # create one script_pubkey
+ script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
+ for i in range (512):
+ script_pubkey = script_pubkey + "01"
+ # concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
+ txouts = "81"
+ for k in range(128):
+ # add txout value
+ txouts = txouts + "0000000000000000"
+ # add length of script_pubkey
+ txouts = txouts + "fd0402"
+ # add script_pubkey
+ txouts = txouts + script_pubkey
+ return txouts
+
+def create_tx(node, coinbase, to_address, amount):
+ inputs = [{ "txid" : coinbase, "vout" : 0}]
+ outputs = { to_address : amount }
+ rawtx = node.createrawtransaction(inputs, outputs)
+ signresult = node.signrawtransaction(rawtx)
+ assert_equal(signresult["complete"], True)
+ return signresult["hex"]
+
+# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
+# transaction to make it large. See gen_return_txouts() above.
+def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
+ addr = node.getnewaddress()
+ txids = []
+ for _ in range(num):
+ t = utxos.pop()
+ inputs=[{ "txid" : t["txid"], "vout" : t["vout"]}]
+ outputs = {}
+ change = t['amount'] - fee
+ outputs[addr] = satoshi_round(change)
+ rawtx = node.createrawtransaction(inputs, outputs)
+ newtx = rawtx[0:92]
+ newtx = newtx + txouts
+ newtx = newtx + rawtx[94:]
+ signresult = node.signrawtransaction(newtx, None, None, "NONE")
+ txid = node.sendrawtransaction(signresult["hex"], True)
+ txids.append(txid)
+ return txids
+
+def mine_large_block(node, utxos=None):
+ # generate a 66k transaction,
+ # and 14 of them is close to the 1MB block limit
+ num = 14
+ txouts = gen_return_txouts()
+ utxos = utxos if utxos is not None else []
+ if len(utxos) < num:
+ utxos.clear()
+ utxos.extend(node.listunspent())
+ fee = 100 * node.getnetworkinfo()["relayfee"]
+ create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
+ node.generate(1)
+
+def get_bip9_status(node, key):
+ info = node.getblockchaininfo()
+ return info['bip9_softforks'][key]
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
new file mode 100755
index 0000000000..12eb92028f
--- /dev/null
+++ b/test/functional/test_runner.py
@@ -0,0 +1,393 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Run regression test suite.
+
+This module calls down into individual test cases via subprocess. It will
+forward all unrecognized arguments onto the individual test scripts.
+
+Functional tests are disabled on Windows by default. Use --force to run them anyway.
+
+For a description of arguments recognized by test scripts, see
+`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
+
+"""
+
+import argparse
+import configparser
+import os
+import time
+import shutil
+import sys
+import subprocess
+import tempfile
+import re
+
+BASE_SCRIPTS= [
+ # Scripts that are run by the travis build process.
+ # Longest test should go first, to favor running tests in parallel
+ 'wallet-hd.py',
+ 'walletbackup.py',
+ # vv Tests less than 5m vv
+ 'p2p-fullblocktest.py',
+ 'fundrawtransaction.py',
+ 'p2p-compactblocks.py',
+ 'segwit.py',
+ # vv Tests less than 2m vv
+ 'wallet.py',
+ 'wallet-accounts.py',
+ 'p2p-segwit.py',
+ 'wallet-dump.py',
+ 'listtransactions.py',
+ # vv Tests less than 60s vv
+ 'sendheaders.py',
+ 'zapwallettxes.py',
+ 'importmulti.py',
+ 'mempool_limit.py',
+ 'merkle_blocks.py',
+ 'receivedby.py',
+ 'abandonconflict.py',
+ 'bip68-112-113-p2p.py',
+ 'rawtransactions.py',
+ 'reindex.py',
+ # vv Tests less than 30s vv
+ 'mempool_resurrect_test.py',
+ 'txn_doublespend.py --mineblock',
+ 'txn_clone.py',
+ 'getchaintips.py',
+ 'rest.py',
+ 'mempool_spendcoinbase.py',
+ 'mempool_reorg.py',
+ 'httpbasics.py',
+ 'multi_rpc.py',
+ 'proxy_test.py',
+ 'signrawtransactions.py',
+ 'nodehandling.py',
+ 'decodescript.py',
+ 'blockchain.py',
+ 'disablewallet.py',
+ 'keypool.py',
+ 'p2p-mempool.py',
+ 'prioritise_transaction.py',
+ 'invalidblockrequest.py',
+ 'invalidtxrequest.py',
+ 'p2p-versionbits-warning.py',
+ 'preciousblock.py',
+ 'importprunedfunds.py',
+ 'signmessages.py',
+ 'nulldummy.py',
+ 'import-rescan.py',
+ 'bumpfee.py',
+ 'rpcnamedargs.py',
+ 'listsinceblock.py',
+ 'p2p-leaktests.py',
+]
+
+ZMQ_SCRIPTS = [
+ # ZMQ test can only be run if bitcoin was built with zmq-enabled.
+ # call test_runner.py with -nozmq to explicitly exclude these tests.
+ "zmq_test.py"]
+
+EXTENDED_SCRIPTS = [
+ # These tests are not run by the travis build process.
+ # Longest test should go first, to favor running tests in parallel
+ 'pruning.py',
+ # vv Tests less than 20m vv
+ 'smartfees.py',
+ # vv Tests less than 5m vv
+ 'maxuploadtarget.py',
+ 'mempool_packages.py',
+ # vv Tests less than 2m vv
+ 'bip68-sequence.py',
+ 'getblocktemplate_longpoll.py',
+ 'p2p-timeouts.py',
+ # vv Tests less than 60s vv
+ 'bip9-softforks.py',
+ 'p2p-feefilter.py',
+ 'rpcbind_test.py',
+ # vv Tests less than 30s vv
+ 'bip65-cltv.py',
+ 'bip65-cltv-p2p.py',
+ 'bipdersig-p2p.py',
+ 'bipdersig.py',
+ 'getblocktemplate_proposals.py',
+ 'txn_doublespend.py',
+ 'txn_clone.py --mineblock',
+ 'forknotify.py',
+ 'invalidateblock.py',
+ 'maxblocksinflight.py',
+ 'p2p-acceptblock.py',
+ 'replace-by-fee.py',
+]
+
+ALL_SCRIPTS = BASE_SCRIPTS + ZMQ_SCRIPTS + EXTENDED_SCRIPTS
+
+def main():
+ # Parse arguments and pass through unrecognised args
+ parser = argparse.ArgumentParser(add_help=False,
+ usage='%(prog)s [test_runner.py options] [script options] [scripts]',
+ description=__doc__,
+ epilog='''
+ Help text and arguments for individual test script:''',
+ formatter_class=argparse.RawTextHelpFormatter)
+ parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
+ parser.add_argument('--exclude', '-x', help='specify a comma-seperated-list of scripts to exclude. Do not include the .py extension in the name.')
+ parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
+ parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
+ parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
+ parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
+ parser.add_argument('--nozmq', action='store_true', help='do not run the zmq tests')
+ args, unknown_args = parser.parse_known_args()
+
+ # Create a set to store arguments and create the passon string
+ tests = set(arg for arg in unknown_args if arg[:2] != "--")
+ passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
+
+ # Read config generated by configure.
+ config = configparser.ConfigParser()
+ config.read_file(open(os.path.dirname(__file__) + "/config.ini"))
+
+ enable_wallet = config["components"].getboolean("ENABLE_WALLET")
+ enable_utils = config["components"].getboolean("ENABLE_UTILS")
+ enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
+ enable_zmq = config["components"].getboolean("ENABLE_ZMQ") and not args.nozmq
+
+ if config["environment"]["EXEEXT"] == ".exe" and not args.force:
+ # https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
+ # https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
+ print("Tests currently disabled on Windows by default. Use --force option to enable")
+ sys.exit(0)
+
+ if not (enable_wallet and enable_utils and enable_bitcoind):
+ print("No functional tests to run. Wallet, utils, and bitcoind must all be enabled")
+ print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
+ sys.exit(0)
+
+ # python3-zmq may not be installed. Handle this gracefully and with some helpful info
+ if enable_zmq:
+ try:
+ import zmq
+ except ImportError:
+ print("ERROR: \"import zmq\" failed. Use -nozmq to run without the ZMQ tests."
+ "To run zmq tests, see dependency info in /test/README.md.")
+ raise
+
+ # Build list of tests
+ if tests:
+ # Individual tests have been specified. Run specified tests that exist
+ # in the ALL_SCRIPTS list. Accept the name with or without .py extension.
+ test_list = [t for t in ALL_SCRIPTS if
+ (t in tests or re.sub(".py$", "", t) in tests)]
+ else:
+ # No individual tests have been specified. Run base tests, and
+ # optionally ZMQ tests and extended tests.
+ test_list = BASE_SCRIPTS
+ if enable_zmq:
+ test_list += ZMQ_SCRIPTS
+ if args.extended:
+ test_list += EXTENDED_SCRIPTS
+ # TODO: BASE_SCRIPTS and EXTENDED_SCRIPTS are sorted by runtime
+ # (for parallel running efficiency). This combined list will is no
+ # longer sorted.
+
+ # Remove the test cases that the user has explicitly asked to exclude.
+ if args.exclude:
+ for exclude_test in args.exclude.split(','):
+ if exclude_test + ".py" in test_list:
+ test_list.remove(exclude_test + ".py")
+
+ if not test_list:
+ print("No valid test scripts specified. Check that your test is in one "
+ "of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
+ sys.exit(0)
+
+ if args.help:
+ # Print help for test_runner.py, then print help of the first script and exit.
+ parser.print_help()
+ subprocess.check_call((config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0]).split() + ['-h'])
+ sys.exit(0)
+
+ run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], args.jobs, args.coverage, passon_args)
+
+def run_tests(test_list, src_dir, build_dir, exeext, jobs=1, enable_coverage=False, args=[]):
+ BOLD = ("","")
+ if os.name == 'posix':
+ # primitive formatting on supported
+ # terminal via ANSI escape sequences:
+ BOLD = ('\033[0m', '\033[1m')
+
+ #Set env vars
+ if "BITCOIND" not in os.environ:
+ os.environ["BITCOIND"] = build_dir + '/src/bitcoind' + exeext
+
+ tests_dir = src_dir + '/test/functional/'
+
+ flags = ["--srcdir={}/src".format(build_dir)] + args
+ flags.append("--cachedir=%s/test/cache" % build_dir)
+
+ if enable_coverage:
+ coverage = RPCCoverage()
+ flags.append(coverage.flag)
+ print("Initializing coverage directory at %s\n" % coverage.dir)
+ else:
+ coverage = None
+
+ if len(test_list) > 1 and jobs > 1:
+ # Populate cache
+ subprocess.check_output([tests_dir + 'create_cache.py'] + flags)
+
+ #Run Tests
+ all_passed = True
+ time_sum = 0
+ time0 = time.time()
+
+ job_queue = TestHandler(jobs, tests_dir, test_list, flags)
+
+ max_len_name = len(max(test_list, key=len))
+ results = BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "PASSED", "DURATION") + BOLD[0]
+ for _ in range(len(test_list)):
+ (name, stdout, stderr, passed, duration) = job_queue.get_next()
+ all_passed = all_passed and passed
+ time_sum += duration
+
+ print('\n' + BOLD[1] + name + BOLD[0] + ":")
+ print('' if passed else stdout + '\n', end='')
+ print('' if stderr == '' else 'stderr:\n' + stderr + '\n', end='')
+ print("Pass: %s%s%s, Duration: %s s\n" % (BOLD[1], passed, BOLD[0], duration))
+
+ results += "%s | %s | %s s\n" % (name.ljust(max_len_name), str(passed).ljust(6), duration)
+
+ results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ("ALL".ljust(max_len_name), str(all_passed).ljust(6), time_sum) + BOLD[0]
+ print(results)
+ print("\nRuntime: %s s" % (int(time.time() - time0)))
+
+ if coverage:
+ coverage.report_rpc_coverage()
+
+ print("Cleaning up coverage data")
+ coverage.cleanup()
+
+ sys.exit(not all_passed)
+
+class TestHandler:
+ """
+ Trigger the testscrips passed in via the list.
+ """
+
+ def __init__(self, num_tests_parallel, tests_dir, test_list=None, flags=None):
+ assert(num_tests_parallel >= 1)
+ self.num_jobs = num_tests_parallel
+ self.tests_dir = tests_dir
+ self.test_list = test_list
+ self.flags = flags
+ self.num_running = 0
+ # In case there is a graveyard of zombie bitcoinds, we can apply a
+ # pseudorandom offset to hopefully jump over them.
+ # (625 is PORT_RANGE/MAX_NODES)
+ self.portseed_offset = int(time.time() * 1000) % 625
+ self.jobs = []
+
+ def get_next(self):
+ while self.num_running < self.num_jobs and self.test_list:
+ # Add tests
+ self.num_running += 1
+ t = self.test_list.pop(0)
+ port_seed = ["--portseed={}".format(len(self.test_list) + self.portseed_offset)]
+ log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
+ log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
+ self.jobs.append((t,
+ time.time(),
+ subprocess.Popen((self.tests_dir + t).split() + self.flags + port_seed,
+ universal_newlines=True,
+ stdout=log_stdout,
+ stderr=log_stderr),
+ log_stdout,
+ log_stderr))
+ if not self.jobs:
+ raise IndexError('pop from empty list')
+ while True:
+ # Return first proc that finishes
+ time.sleep(.5)
+ for j in self.jobs:
+ (name, time0, proc, log_out, log_err) = j
+ if proc.poll() is not None:
+ log_out.seek(0), log_err.seek(0)
+ [stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
+ log_out.close(), log_err.close()
+ passed = stderr == "" and proc.returncode == 0
+ self.num_running -= 1
+ self.jobs.remove(j)
+ return name, stdout, stderr, passed, int(time.time() - time0)
+ print('.', end='', flush=True)
+
+
+class RPCCoverage(object):
+ """
+ Coverage reporting utilities for test_runner.
+
+ Coverage calculation works by having each test script subprocess write
+ coverage files into a particular directory. These files contain the RPC
+ commands invoked during testing, as well as a complete listing of RPC
+ commands per `bitcoin-cli help` (`rpc_interface.txt`).
+
+ After all tests complete, the commands run are combined and diff'd against
+ the complete list to calculate uncovered RPC commands.
+
+ See also: test/functional/test_framework/coverage.py
+
+ """
+ def __init__(self):
+ self.dir = tempfile.mkdtemp(prefix="coverage")
+ self.flag = '--coveragedir=%s' % self.dir
+
+ def report_rpc_coverage(self):
+ """
+ Print out RPC commands that were unexercised by tests.
+
+ """
+ uncovered = self._get_uncovered_rpc_commands()
+
+ if uncovered:
+ print("Uncovered RPC commands:")
+ print("".join((" - %s\n" % i) for i in sorted(uncovered)))
+ else:
+ print("All RPC commands covered.")
+
+ def cleanup(self):
+ return shutil.rmtree(self.dir)
+
+ def _get_uncovered_rpc_commands(self):
+ """
+ Return a set of currently untested RPC commands.
+
+ """
+ # This is shared from `test/functional/test-framework/coverage.py`
+ reference_filename = 'rpc_interface.txt'
+ coverage_file_prefix = 'coverage.'
+
+ coverage_ref_filename = os.path.join(self.dir, reference_filename)
+ coverage_filenames = set()
+ all_cmds = set()
+ covered_cmds = set()
+
+ if not os.path.isfile(coverage_ref_filename):
+ raise RuntimeError("No coverage reference found")
+
+ with open(coverage_ref_filename, 'r') as f:
+ all_cmds.update([i.strip() for i in f.readlines()])
+
+ for root, dirs, files in os.walk(self.dir):
+ for filename in files:
+ if filename.startswith(coverage_file_prefix):
+ coverage_filenames.add(os.path.join(root, filename))
+
+ for filename in coverage_filenames:
+ with open(filename, 'r') as f:
+ covered_cmds.update([i.strip() for i in f.readlines()])
+
+ return all_cmds - covered_cmds
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/functional/txn_clone.py b/test/functional/txn_clone.py
new file mode 100755
index 0000000000..7a3b8d3474
--- /dev/null
+++ b/test/functional/txn_clone.py
@@ -0,0 +1,156 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the wallet accounts properly when there are cloned transactions with malleated scriptsigs."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+class TxnMallTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 4
+ self.setup_clean_chain = False
+
+ def add_options(self, parser):
+ parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
+ help="Test double-spend of 1-confirmed transaction")
+
+ def setup_network(self):
+ # Start with split network:
+ return super(TxnMallTest, self).setup_network(True)
+
+ def run_test(self):
+ # All nodes should start with 1,250 BTC:
+ starting_balance = 1250
+ for i in range(4):
+ assert_equal(self.nodes[i].getbalance(), starting_balance)
+ self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
+
+ # Assign coins to foo and bar accounts:
+ self.nodes[0].settxfee(.001)
+
+ node0_address_foo = self.nodes[0].getnewaddress("foo")
+ fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219)
+ fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
+
+ node0_address_bar = self.nodes[0].getnewaddress("bar")
+ fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29)
+ fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
+
+ assert_equal(self.nodes[0].getbalance(""),
+ starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
+
+ # Coins are sent to node1_address
+ node1_address = self.nodes[1].getnewaddress("from0")
+
+ # Send tx1, and another transaction tx2 that won't be cloned
+ txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0)
+ txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
+
+ # Construct a clone of tx1, to be malleated
+ rawtx1 = self.nodes[0].getrawtransaction(txid1,1)
+ clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}]
+ clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"],
+ rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]}
+ clone_locktime = rawtx1["locktime"]
+ clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs, clone_locktime)
+
+ # createrawtransaction randomizes the order of its outputs, so swap them if necessary.
+ # output 0 is at version+#inputs+input+sigstub+sequence+#outputs
+ # 40 BTC serialized is 00286bee00000000
+ pos0 = 2*(4+1+36+1+4+1)
+ hex40 = "00286bee00000000"
+ output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0)
+ if (rawtx1["vout"][0]["value"] == 40 and clone_raw[pos0 : pos0 + 16] != hex40 or
+ rawtx1["vout"][0]["value"] != 40 and clone_raw[pos0 : pos0 + 16] == hex40):
+ output0 = clone_raw[pos0 : pos0 + output_len]
+ output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len]
+ clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:]
+
+ # Use a different signature hash type to sign. This creates an equivalent but malleated clone.
+ # Don't send the clone anywhere yet
+ tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY")
+ assert_equal(tx1_clone["complete"], True)
+
+ # Have node0 mine a block, if requested:
+ if (self.options.mine_block):
+ self.nodes[0].generate(1)
+ sync_blocks(self.nodes[0:2])
+
+ tx1 = self.nodes[0].gettransaction(txid1)
+ tx2 = self.nodes[0].gettransaction(txid2)
+
+ # Node0's balance should be starting balance, plus 50BTC for another
+ # matured block, minus tx1 and tx2 amounts, and minus transaction fees:
+ expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
+ if self.options.mine_block: expected += 50
+ expected += tx1["amount"] + tx1["fee"]
+ expected += tx2["amount"] + tx2["fee"]
+ assert_equal(self.nodes[0].getbalance(), expected)
+
+ # foo and bar accounts should be debited:
+ assert_equal(self.nodes[0].getbalance("foo", 0), 1219 + tx1["amount"] + tx1["fee"])
+ assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
+
+ if self.options.mine_block:
+ assert_equal(tx1["confirmations"], 1)
+ assert_equal(tx2["confirmations"], 1)
+ # Node1's "from0" balance should be both transaction amounts:
+ assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"]))
+ else:
+ assert_equal(tx1["confirmations"], 0)
+ assert_equal(tx2["confirmations"], 0)
+
+ # Send clone and its parent to miner
+ self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
+ txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
+ # ... mine a block...
+ self.nodes[2].generate(1)
+
+ # Reconnect the split network, and sync chain:
+ connect_nodes(self.nodes[1], 2)
+ self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
+ self.nodes[2].sendrawtransaction(tx2["hex"])
+ self.nodes[2].generate(1) # Mine another block to make sure we sync
+ sync_blocks(self.nodes)
+
+ # Re-fetch transaction info:
+ tx1 = self.nodes[0].gettransaction(txid1)
+ tx1_clone = self.nodes[0].gettransaction(txid1_clone)
+ tx2 = self.nodes[0].gettransaction(txid2)
+
+ # Verify expected confirmations
+ assert_equal(tx1["confirmations"], -2)
+ assert_equal(tx1_clone["confirmations"], 2)
+ assert_equal(tx2["confirmations"], 1)
+
+ # Check node0's total balance; should be same as before the clone, + 100 BTC for 2 matured,
+ # less possible orphaned matured subsidy
+ expected += 100
+ if (self.options.mine_block):
+ expected -= 50
+ assert_equal(self.nodes[0].getbalance(), expected)
+ assert_equal(self.nodes[0].getbalance("*", 0), expected)
+
+ # Check node0's individual account balances.
+ # "foo" should have been debited by the equivalent clone of tx1
+ assert_equal(self.nodes[0].getbalance("foo"), 1219 + tx1["amount"] + tx1["fee"])
+ # "bar" should have been debited by (possibly unconfirmed) tx2
+ assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
+ # "" should have starting balance, less funding txes, plus subsidies
+ assert_equal(self.nodes[0].getbalance("", 0), starting_balance
+ - 1219
+ + fund_foo_tx["fee"]
+ - 29
+ + fund_bar_tx["fee"]
+ + 100)
+
+ # Node1's "from0" account balance
+ assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"]))
+
+if __name__ == '__main__':
+ TxnMallTest().main()
+
diff --git a/test/functional/txn_doublespend.py b/test/functional/txn_doublespend.py
new file mode 100755
index 0000000000..5b12cf4c29
--- /dev/null
+++ b/test/functional/txn_doublespend.py
@@ -0,0 +1,144 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the wallet accounts properly when there is a double-spend conflict."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+class TxnMallTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 4
+ self.setup_clean_chain = False
+
+ def add_options(self, parser):
+ parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
+ help="Test double-spend of 1-confirmed transaction")
+
+ def setup_network(self):
+ # Start with split network:
+ return super(TxnMallTest, self).setup_network(True)
+
+ def run_test(self):
+ # All nodes should start with 1,250 BTC:
+ starting_balance = 1250
+ for i in range(4):
+ assert_equal(self.nodes[i].getbalance(), starting_balance)
+ self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
+
+ # Assign coins to foo and bar accounts:
+ node0_address_foo = self.nodes[0].getnewaddress("foo")
+ fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219)
+ fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
+
+ node0_address_bar = self.nodes[0].getnewaddress("bar")
+ fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29)
+ fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
+
+ assert_equal(self.nodes[0].getbalance(""),
+ starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
+
+ # Coins are sent to node1_address
+ node1_address = self.nodes[1].getnewaddress("from0")
+
+ # First: use raw transaction API to send 1240 BTC to node1_address,
+ # but don't broadcast:
+ doublespend_fee = Decimal('-.02')
+ rawtx_input_0 = {}
+ rawtx_input_0["txid"] = fund_foo_txid
+ rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, 1219)
+ rawtx_input_1 = {}
+ rawtx_input_1["txid"] = fund_bar_txid
+ rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, 29)
+ inputs = [rawtx_input_0, rawtx_input_1]
+ change_address = self.nodes[0].getnewaddress()
+ outputs = {}
+ outputs[node1_address] = 1240
+ outputs[change_address] = 1248 - 1240 + doublespend_fee
+ rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
+ doublespend = self.nodes[0].signrawtransaction(rawtx)
+ assert_equal(doublespend["complete"], True)
+
+ # Create two spends using 1 50 BTC coin each
+ txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0)
+ txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
+
+ # Have node0 mine a block:
+ if (self.options.mine_block):
+ self.nodes[0].generate(1)
+ sync_blocks(self.nodes[0:2])
+
+ tx1 = self.nodes[0].gettransaction(txid1)
+ tx2 = self.nodes[0].gettransaction(txid2)
+
+ # Node0's balance should be starting balance, plus 50BTC for another
+ # matured block, minus 40, minus 20, and minus transaction fees:
+ expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
+ if self.options.mine_block: expected += 50
+ expected += tx1["amount"] + tx1["fee"]
+ expected += tx2["amount"] + tx2["fee"]
+ assert_equal(self.nodes[0].getbalance(), expected)
+
+ # foo and bar accounts should be debited:
+ assert_equal(self.nodes[0].getbalance("foo", 0), 1219+tx1["amount"]+tx1["fee"])
+ assert_equal(self.nodes[0].getbalance("bar", 0), 29+tx2["amount"]+tx2["fee"])
+
+ if self.options.mine_block:
+ assert_equal(tx1["confirmations"], 1)
+ assert_equal(tx2["confirmations"], 1)
+ # Node1's "from0" balance should be both transaction amounts:
+ assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"]+tx2["amount"]))
+ else:
+ assert_equal(tx1["confirmations"], 0)
+ assert_equal(tx2["confirmations"], 0)
+
+ # Now give doublespend and its parents to miner:
+ self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
+ self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
+ doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
+ # ... mine a block...
+ self.nodes[2].generate(1)
+
+ # Reconnect the split network, and sync chain:
+ connect_nodes(self.nodes[1], 2)
+ self.nodes[2].generate(1) # Mine another block to make sure we sync
+ sync_blocks(self.nodes)
+ assert_equal(self.nodes[0].gettransaction(doublespend_txid)["confirmations"], 2)
+
+ # Re-fetch transaction info:
+ tx1 = self.nodes[0].gettransaction(txid1)
+ tx2 = self.nodes[0].gettransaction(txid2)
+
+ # Both transactions should be conflicted
+ assert_equal(tx1["confirmations"], -2)
+ assert_equal(tx2["confirmations"], -2)
+
+ # Node0's total balance should be starting balance, plus 100BTC for
+ # two more matured blocks, minus 1240 for the double-spend, plus fees (which are
+ # negative):
+ expected = starting_balance + 100 - 1240 + fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee
+ assert_equal(self.nodes[0].getbalance(), expected)
+ assert_equal(self.nodes[0].getbalance("*"), expected)
+
+ # Final "" balance is starting_balance - amount moved to accounts - doublespend + subsidies +
+ # fees (which are negative)
+ assert_equal(self.nodes[0].getbalance("foo"), 1219)
+ assert_equal(self.nodes[0].getbalance("bar"), 29)
+ assert_equal(self.nodes[0].getbalance(""), starting_balance
+ -1219
+ - 29
+ -1240
+ + 100
+ + fund_foo_tx["fee"]
+ + fund_bar_tx["fee"]
+ + doublespend_fee)
+
+ # Node1's "from0" account balance should be just the doublespend:
+ assert_equal(self.nodes[1].getbalance("from0"), 1240)
+
+if __name__ == '__main__':
+ TxnMallTest().main()
+
diff --git a/test/functional/wallet-accounts.py b/test/functional/wallet-accounts.py
new file mode 100755
index 0000000000..ea12d4ec22
--- /dev/null
+++ b/test/functional/wallet-accounts.py
@@ -0,0 +1,100 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test account RPCs.
+
+RPCs tested are:
+ - getaccountaddress
+ - getaddressesbyaccount
+ - setaccount
+ - sendfrom (with account arguments)
+ - move (with account arguments)
+"""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ start_nodes,
+ assert_equal,
+)
+
+class WalletAccountsTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+ self.node_args = [[]]
+
+ def setup_network(self):
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.node_args)
+ self.is_network_split = False
+
+ def run_test (self):
+ node = self.nodes[0]
+ # Check that there's no UTXO on any of the nodes
+ assert_equal(len(node.listunspent()), 0)
+
+ node.generate(101)
+
+ assert_equal(node.getbalance(), 50)
+
+ accounts = ["a","b","c","d","e"]
+ amount_to_send = 1.0
+ account_addresses = dict()
+ for account in accounts:
+ address = node.getaccountaddress(account)
+ account_addresses[account] = address
+
+ node.getnewaddress(account)
+ assert_equal(node.getaccount(address), account)
+ assert(address in node.getaddressesbyaccount(account))
+
+ node.sendfrom("", address, amount_to_send)
+
+ node.generate(1)
+
+ for i in range(len(accounts)):
+ from_account = accounts[i]
+ to_account = accounts[(i+1)%len(accounts)]
+ to_address = account_addresses[to_account]
+ node.sendfrom(from_account, to_address, amount_to_send)
+
+ node.generate(1)
+
+ for account in accounts:
+ address = node.getaccountaddress(account)
+ assert(address != account_addresses[account])
+ assert_equal(node.getreceivedbyaccount(account), 2)
+ node.move(account, "", node.getbalance(account))
+
+ node.generate(101)
+
+ expected_account_balances = {"": 5200}
+ for account in accounts:
+ expected_account_balances[account] = 0
+
+ assert_equal(node.listaccounts(), expected_account_balances)
+
+ assert_equal(node.getbalance(""), 5200)
+
+ for account in accounts:
+ address = node.getaccountaddress("")
+ node.setaccount(address, account)
+ assert(address in node.getaddressesbyaccount(account))
+ assert(address not in node.getaddressesbyaccount(""))
+
+ for account in accounts:
+ addresses = []
+ for x in range(10):
+ addresses.append(node.getnewaddress())
+ multisig_address = node.addmultisigaddress(5, addresses, account)
+ node.sendfrom("", multisig_address, 50)
+
+ node.generate(101)
+
+ for account in accounts:
+ assert_equal(node.getbalance(account), 50)
+
+if __name__ == '__main__':
+ WalletAccountsTest().main ()
diff --git a/test/functional/wallet-dump.py b/test/functional/wallet-dump.py
new file mode 100755
index 0000000000..b819b72b75
--- /dev/null
+++ b/test/functional/wallet-dump.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the dumpwallet RPC."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (start_nodes, start_node, assert_equal, bitcoind_processes)
+
+
+def read_dump(file_name, addrs, hd_master_addr_old):
+ """
+ Read the given dump, count the addrs that match, count change and reserve.
+ Also check that the old hd_master is inactive
+ """
+ with open(file_name, encoding='utf8') as inputfile:
+ found_addr = 0
+ found_addr_chg = 0
+ found_addr_rsv = 0
+ hd_master_addr_ret = None
+ for line in inputfile:
+ # only read non comment lines
+ if line[0] != "#" and len(line) > 10:
+ # split out some data
+ key_label, comment = line.split("#")
+ # key = key_label.split(" ")[0]
+ keytype = key_label.split(" ")[2]
+ if len(comment) > 1:
+ addr_keypath = comment.split(" addr=")[1]
+ addr = addr_keypath.split(" ")[0]
+ keypath = None
+ if keytype == "inactivehdmaster=1":
+ # ensure the old master is still available
+ assert(hd_master_addr_old == addr)
+ elif keytype == "hdmaster=1":
+ # ensure we have generated a new hd master key
+ assert(hd_master_addr_old != addr)
+ hd_master_addr_ret = addr
+ else:
+ keypath = addr_keypath.rstrip().split("hdkeypath=")[1]
+
+ # count key types
+ for addrObj in addrs:
+ if addrObj['address'] == addr and addrObj['hdkeypath'] == keypath and keytype == "label=":
+ found_addr += 1
+ break
+ elif keytype == "change=1":
+ found_addr_chg += 1
+ break
+ elif keytype == "reserve=1":
+ found_addr_rsv += 1
+ break
+ return found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret
+
+
+class WalletDumpTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = False
+ self.num_nodes = 1
+ self.extra_args = [["-keypool=90"]]
+
+ def setup_network(self, split=False):
+ # Use 1 minute timeout because the initial getnewaddress RPC can take
+ # longer than the default 30 seconds due to an expensive
+ # CWallet::TopUpKeyPool call, and the encryptwallet RPC made later in
+ # the test often takes even longer.
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args, timewait=60)
+
+ def run_test (self):
+ tmpdir = self.options.tmpdir
+
+ # generate 20 addresses to compare against the dump
+ test_addr_count = 20
+ addrs = []
+ for i in range(0,test_addr_count):
+ addr = self.nodes[0].getnewaddress()
+ vaddr= self.nodes[0].validateaddress(addr) #required to get hd keypath
+ addrs.append(vaddr)
+ # Should be a no-op:
+ self.nodes[0].keypoolrefill()
+
+ # dump unencrypted wallet
+ self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.unencrypted.dump")
+
+ found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
+ read_dump(tmpdir + "/node0/wallet.unencrypted.dump", addrs, None)
+ assert_equal(found_addr, test_addr_count) # all keys must be in the dump
+ assert_equal(found_addr_chg, 50) # 50 blocks where mined
+ assert_equal(found_addr_rsv, 90 + 1) # keypool size (TODO: fix off-by-one)
+
+ #encrypt wallet, restart, unlock and dump
+ self.nodes[0].encryptwallet('test')
+ bitcoind_processes[0].wait()
+ self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0])
+ self.nodes[0].walletpassphrase('test', 10)
+ # Should be a no-op:
+ self.nodes[0].keypoolrefill()
+ self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump")
+
+ found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_enc = \
+ read_dump(tmpdir + "/node0/wallet.encrypted.dump", addrs, hd_master_addr_unenc)
+ assert_equal(found_addr, test_addr_count)
+ assert_equal(found_addr_chg, 90 + 1 + 50) # old reserve keys are marked as change now
+ assert_equal(found_addr_rsv, 90 + 1) # keypool size (TODO: fix off-by-one)
+
+if __name__ == '__main__':
+ WalletDumpTest().main ()
diff --git a/test/functional/wallet-hd.py b/test/functional/wallet-hd.py
new file mode 100755
index 0000000000..c40662dc3d
--- /dev/null
+++ b/test/functional/wallet-hd.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test Hierarchical Deterministic wallet function."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ start_nodes,
+ start_node,
+ assert_equal,
+ connect_nodes_bi,
+ assert_start_raises_init_error
+)
+import os
+import shutil
+
+
+class WalletHDTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 2
+ self.node_args = [['-usehd=0'], ['-usehd=1', '-keypool=0']]
+
+ def setup_network(self):
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.node_args)
+ self.is_network_split = False
+ connect_nodes_bi(self.nodes, 0, 1)
+
+ def run_test (self):
+ tmpdir = self.options.tmpdir
+
+ # Make sure can't switch off usehd after wallet creation
+ self.stop_node(1)
+ assert_start_raises_init_error(1, self.options.tmpdir, ['-usehd=0'], 'already existing HD wallet')
+ self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1])
+ connect_nodes_bi(self.nodes, 0, 1)
+
+ # Make sure we use hd, keep masterkeyid
+ masterkeyid = self.nodes[1].getwalletinfo()['hdmasterkeyid']
+ assert_equal(len(masterkeyid), 40)
+
+ # Import a non-HD private key in the HD wallet
+ non_hd_add = self.nodes[0].getnewaddress()
+ self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))
+
+ # This should be enough to keep the master key and the non-HD key
+ self.nodes[1].backupwallet(tmpdir + "/hd.bak")
+ #self.nodes[1].dumpwallet(tmpdir + "/hd.dump")
+
+ # Derive some HD addresses and remember the last
+ # Also send funds to each add
+ self.nodes[0].generate(101)
+ hd_add = None
+ num_hd_adds = 300
+ for i in range(num_hd_adds):
+ hd_add = self.nodes[1].getnewaddress()
+ hd_info = self.nodes[1].validateaddress(hd_add)
+ assert_equal(hd_info["hdkeypath"], "m/0'/0'/"+str(i+1)+"'")
+ assert_equal(hd_info["hdmasterkeyid"], masterkeyid)
+ self.nodes[0].sendtoaddress(hd_add, 1)
+ self.nodes[0].generate(1)
+ self.nodes[0].sendtoaddress(non_hd_add, 1)
+ self.nodes[0].generate(1)
+
+ self.sync_all()
+ assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
+
+ self.log.info("Restore backup ...")
+ self.stop_node(1)
+ os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
+ shutil.copyfile(tmpdir + "/hd.bak", tmpdir + "/node1/regtest/wallet.dat")
+ self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1])
+ #connect_nodes_bi(self.nodes, 0, 1)
+
+ # Assert that derivation is deterministic
+ hd_add_2 = None
+ for _ in range(num_hd_adds):
+ hd_add_2 = self.nodes[1].getnewaddress()
+ hd_info_2 = self.nodes[1].validateaddress(hd_add_2)
+ assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/"+str(_+1)+"'")
+ assert_equal(hd_info_2["hdmasterkeyid"], masterkeyid)
+ assert_equal(hd_add, hd_add_2)
+
+ # Needs rescan
+ self.stop_node(1)
+ self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1] + ['-rescan'])
+ #connect_nodes_bi(self.nodes, 0, 1)
+ assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
+
+
+if __name__ == '__main__':
+ WalletHDTest().main ()
diff --git a/test/functional/wallet.py b/test/functional/wallet.py
new file mode 100755
index 0000000000..80f74fa108
--- /dev/null
+++ b/test/functional/wallet.py
@@ -0,0 +1,395 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the wallet."""
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+class WalletTest (BitcoinTestFramework):
+
+ def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
+ """Return curr_balance after asserting the fee was in range"""
+ fee = balance_with_fee - curr_balance
+ assert_fee_amount(fee, tx_size, fee_per_byte * 1000)
+ return curr_balance
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 4
+ self.extra_args = [['-usehd={:d}'.format(i%2==0)] for i in range(4)]
+
+ def setup_network(self, split=False):
+ self.nodes = start_nodes(3, self.options.tmpdir, self.extra_args[:3])
+ connect_nodes_bi(self.nodes,0,1)
+ connect_nodes_bi(self.nodes,1,2)
+ connect_nodes_bi(self.nodes,0,2)
+ self.is_network_split=False
+ self.sync_all()
+
+ def run_test (self):
+
+ # Check that there's no UTXO on none of the nodes
+ assert_equal(len(self.nodes[0].listunspent()), 0)
+ assert_equal(len(self.nodes[1].listunspent()), 0)
+ assert_equal(len(self.nodes[2].listunspent()), 0)
+
+ self.log.info("Mining blocks...")
+
+ self.nodes[0].generate(1)
+
+ walletinfo = self.nodes[0].getwalletinfo()
+ assert_equal(walletinfo['immature_balance'], 50)
+ assert_equal(walletinfo['balance'], 0)
+
+ self.sync_all()
+ self.nodes[1].generate(101)
+ self.sync_all()
+
+ assert_equal(self.nodes[0].getbalance(), 50)
+ assert_equal(self.nodes[1].getbalance(), 50)
+ assert_equal(self.nodes[2].getbalance(), 0)
+
+ # Check that only first and second nodes have UTXOs
+ assert_equal(len(self.nodes[0].listunspent()), 1)
+ assert_equal(len(self.nodes[1].listunspent()), 1)
+ assert_equal(len(self.nodes[2].listunspent()), 0)
+
+ # Send 21 BTC from 0 to 2 using sendtoaddress call.
+ self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
+ self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
+
+ walletinfo = self.nodes[0].getwalletinfo()
+ assert_equal(walletinfo['immature_balance'], 0)
+
+ # Have node0 mine a block, thus it will collect its own fee.
+ self.nodes[0].generate(1)
+ self.sync_all()
+
+ # Exercise locking of unspent outputs
+ unspent_0 = self.nodes[2].listunspent()[0]
+ unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
+ self.nodes[2].lockunspent(False, [unspent_0])
+ assert_raises_jsonrpc(-4, "Insufficient funds", self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20)
+ assert_equal([unspent_0], self.nodes[2].listlockunspent())
+ self.nodes[2].lockunspent(True, [unspent_0])
+ assert_equal(len(self.nodes[2].listlockunspent()), 0)
+
+ # Have node1 generate 100 blocks (so node0 can recover the fee)
+ self.nodes[1].generate(100)
+ self.sync_all()
+
+ # node0 should end up with 100 btc in block rewards plus fees, but
+ # minus the 21 plus fees sent to node2
+ assert_equal(self.nodes[0].getbalance(), 100-21)
+ assert_equal(self.nodes[2].getbalance(), 21)
+
+ # Node0 should have two unspent outputs.
+ # Create a couple of transactions to send them to node2, submit them through
+ # node1, and make sure both node0 and node2 pick them up properly:
+ node0utxos = self.nodes[0].listunspent(1)
+ assert_equal(len(node0utxos), 2)
+
+ # create both transactions
+ txns_to_send = []
+ for utxo in node0utxos:
+ inputs = []
+ outputs = {}
+ inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
+ outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"] - 3
+ raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
+ txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
+
+ # Have node 1 (miner) send the transactions
+ self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True)
+ self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True)
+
+ # Have node1 mine a block to confirm transactions:
+ self.nodes[1].generate(1)
+ self.sync_all()
+
+ assert_equal(self.nodes[0].getbalance(), 0)
+ assert_equal(self.nodes[2].getbalance(), 94)
+ assert_equal(self.nodes[2].getbalance("from1"), 94-21)
+
+ # Send 10 BTC normal
+ address = self.nodes[0].getnewaddress("test")
+ fee_per_byte = Decimal('0.001') / 1000
+ self.nodes[2].settxfee(fee_per_byte * 1000)
+ txid = self.nodes[2].sendtoaddress(address, 10, "", "", False)
+ self.nodes[2].generate(1)
+ self.sync_all()
+ node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('84'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
+ assert_equal(self.nodes[0].getbalance(), Decimal('10'))
+
+ # Send 10 BTC with subtract fee from amount
+ txid = self.nodes[2].sendtoaddress(address, 10, "", "", True)
+ self.nodes[2].generate(1)
+ self.sync_all()
+ node_2_bal -= Decimal('10')
+ assert_equal(self.nodes[2].getbalance(), node_2_bal)
+ node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('20'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
+
+ # Sendmany 10 BTC
+ txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [])
+ self.nodes[2].generate(1)
+ self.sync_all()
+ node_0_bal += Decimal('10')
+ node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
+ assert_equal(self.nodes[0].getbalance(), node_0_bal)
+
+ # Sendmany 10 BTC with subtract fee from amount
+ txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [address])
+ self.nodes[2].generate(1)
+ self.sync_all()
+ node_2_bal -= Decimal('10')
+ assert_equal(self.nodes[2].getbalance(), node_2_bal)
+ node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
+
+ # Test ResendWalletTransactions:
+ # Create a couple of transactions, then start up a fourth
+ # node (nodes[3]) and ask nodes[0] to rebroadcast.
+ # EXPECT: nodes[3] should have those transactions in its mempool.
+ txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
+ txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
+ sync_mempools(self.nodes)
+
+ self.nodes.append(start_node(3, self.options.tmpdir, self.extra_args[3]))
+ connect_nodes_bi(self.nodes, 0, 3)
+ sync_blocks(self.nodes)
+
+ relayed = self.nodes[0].resendwallettransactions()
+ assert_equal(set(relayed), {txid1, txid2})
+ sync_mempools(self.nodes)
+
+ assert(txid1 in self.nodes[3].getrawmempool())
+
+ # Exercise balance rpcs
+ assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1)
+ assert_equal(self.nodes[0].getunconfirmedbalance(), 1)
+
+ #check if we can list zero value tx as available coins
+ #1. create rawtx
+ #2. hex-changed one output to 0.0
+ #3. sign and send
+ #4. check if recipient (node0) can list the zero value tx
+ usp = self.nodes[1].listunspent()
+ inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}]
+ outputs = {self.nodes[1].getnewaddress(): 49.998, self.nodes[0].getnewaddress(): 11.11}
+
+ rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32)
+ decRawTx = self.nodes[1].decoderawtransaction(rawTx)
+ signedRawTx = self.nodes[1].signrawtransaction(rawTx)
+ decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex'])
+ zeroValueTxid= decRawTx['txid']
+ sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex'])
+
+ self.sync_all()
+ self.nodes[1].generate(1) #mine a block
+ self.sync_all()
+
+ unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output
+ found = False
+ for uTx in unspentTxs:
+ if uTx['txid'] == zeroValueTxid:
+ found = True
+ assert_equal(uTx['amount'], Decimal('0'))
+ assert(found)
+
+ #do some -walletbroadcast tests
+ stop_nodes(self.nodes)
+ self.nodes = start_nodes(3, self.options.tmpdir, [["-walletbroadcast=0"],["-walletbroadcast=0"],["-walletbroadcast=0"]])
+ connect_nodes_bi(self.nodes,0,1)
+ connect_nodes_bi(self.nodes,1,2)
+ connect_nodes_bi(self.nodes,0,2)
+ self.sync_all()
+
+ txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
+ txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
+ self.nodes[1].generate(1) #mine a block, tx should not be in there
+ self.sync_all()
+ assert_equal(self.nodes[2].getbalance(), node_2_bal) #should not be changed because tx was not broadcasted
+
+ #now broadcast from another node, mine a block, sync, and check the balance
+ self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex'])
+ self.nodes[1].generate(1)
+ self.sync_all()
+ node_2_bal += 2
+ txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
+ assert_equal(self.nodes[2].getbalance(), node_2_bal)
+
+ #create another tx
+ txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
+
+ #restart the nodes with -walletbroadcast=1
+ stop_nodes(self.nodes)
+ self.nodes = start_nodes(3, self.options.tmpdir)
+ connect_nodes_bi(self.nodes,0,1)
+ connect_nodes_bi(self.nodes,1,2)
+ connect_nodes_bi(self.nodes,0,2)
+ sync_blocks(self.nodes)
+
+ self.nodes[0].generate(1)
+ sync_blocks(self.nodes)
+ node_2_bal += 2
+
+ #tx should be added to balance because after restarting the nodes tx should be broadcastet
+ assert_equal(self.nodes[2].getbalance(), node_2_bal)
+
+ #send a tx with value in a string (PR#6380 +)
+ txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
+ txObj = self.nodes[0].gettransaction(txId)
+ assert_equal(txObj['amount'], Decimal('-2'))
+
+ txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
+ txObj = self.nodes[0].gettransaction(txId)
+ assert_equal(txObj['amount'], Decimal('-0.0001'))
+
+ #check if JSON parser can handle scientific notation in strings
+ txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
+ txObj = self.nodes[0].gettransaction(txId)
+ assert_equal(txObj['amount'], Decimal('-0.0001'))
+
+ # This will raise an exception because the amount type is wrong
+ assert_raises_jsonrpc(-3, "Invalid amount", self.nodes[0].sendtoaddress, self.nodes[2].getnewaddress(), "1f-4")
+
+ # This will raise an exception since generate does not accept a string
+ assert_raises_jsonrpc(-1, "not an integer", self.nodes[0].generate, "2")
+
+ # Import address and private key to check correct behavior of spendable unspents
+ # 1. Send some coins to generate new UTXO
+ address_to_import = self.nodes[2].getnewaddress()
+ txid = self.nodes[0].sendtoaddress(address_to_import, 1)
+ self.nodes[0].generate(1)
+ self.sync_all()
+
+ # 2. Import address from node2 to node1
+ self.nodes[1].importaddress(address_to_import)
+
+ # 3. Validate that the imported address is watch-only on node1
+ assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"])
+
+ # 4. Check that the unspents after import are not spendable
+ assert_array_result(self.nodes[1].listunspent(),
+ {"address": address_to_import},
+ {"spendable": False})
+
+ # 5. Import private key of the previously imported address on node1
+ priv_key = self.nodes[2].dumpprivkey(address_to_import)
+ self.nodes[1].importprivkey(priv_key)
+
+ # 6. Check that the unspents are now spendable on node1
+ assert_array_result(self.nodes[1].listunspent(),
+ {"address": address_to_import},
+ {"spendable": True})
+
+ # Mine a block from node0 to an address from node1
+ cbAddr = self.nodes[1].getnewaddress()
+ blkHash = self.nodes[0].generatetoaddress(1, cbAddr)[0]
+ cbTxId = self.nodes[0].getblock(blkHash)['tx'][0]
+ self.sync_all()
+
+ # Check that the txid and balance is found by node1
+ self.nodes[1].gettransaction(cbTxId)
+
+ # check if wallet or blockchain maintenance changes the balance
+ self.sync_all()
+ blocks = self.nodes[0].generate(2)
+ self.sync_all()
+ balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
+ block_count = self.nodes[0].getblockcount()
+
+ # Check modes:
+ # - True: unicode escaped as \u....
+ # - False: unicode directly as UTF-8
+ for mode in [True, False]:
+ self.nodes[0].ensure_ascii = mode
+ # unicode check: Basic Multilingual Plane, Supplementary Plane respectively
+ for s in [u'рыба', u'𝅘𝅥𝅯']:
+ addr = self.nodes[0].getaccountaddress(s)
+ label = self.nodes[0].getaccount(addr)
+ assert_equal(label, s)
+ assert(s in self.nodes[0].listaccounts().keys())
+ self.nodes[0].ensure_ascii = True # restore to default
+
+ # maintenance tests
+ maintenance = [
+ '-rescan',
+ '-reindex',
+ '-zapwallettxes=1',
+ '-zapwallettxes=2',
+ # disabled until issue is fixed: https://github.com/bitcoin/bitcoin/issues/7463
+ # '-salvagewallet',
+ ]
+ chainlimit = 6
+ for m in maintenance:
+ self.log.info("check " + m)
+ stop_nodes(self.nodes)
+ # set lower ancestor limit for later
+ self.nodes = start_nodes(3, self.options.tmpdir, [[m, "-limitancestorcount="+str(chainlimit)]] * 3)
+ while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]:
+ # reindex will leave rpc warm up "early"; Wait for it to finish
+ time.sleep(0.1)
+ assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
+
+ # Exercise listsinceblock with the last two blocks
+ coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
+ assert_equal(coinbase_tx_1["lastblock"], blocks[1])
+ assert_equal(len(coinbase_tx_1["transactions"]), 1)
+ assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
+ assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
+
+ # ==Check that wallet prefers to use coins that don't exceed mempool limits =====
+
+ # Get all non-zero utxos together
+ chain_addrs = [self.nodes[0].getnewaddress(), self.nodes[0].getnewaddress()]
+ singletxid = self.nodes[0].sendtoaddress(chain_addrs[0], self.nodes[0].getbalance(), "", "", True)
+ self.nodes[0].generate(1)
+ node0_balance = self.nodes[0].getbalance()
+ # Split into two chains
+ rawtx = self.nodes[0].createrawtransaction([{"txid":singletxid, "vout":0}], {chain_addrs[0]:node0_balance/2-Decimal('0.01'), chain_addrs[1]:node0_balance/2-Decimal('0.01')})
+ signedtx = self.nodes[0].signrawtransaction(rawtx)
+ singletxid = self.nodes[0].sendrawtransaction(signedtx["hex"])
+ self.nodes[0].generate(1)
+
+ # Make a long chain of unconfirmed payments without hitting mempool limit
+ # Each tx we make leaves only one output of change on a chain 1 longer
+ # Since the amount to send is always much less than the outputs, we only ever need one output
+ # So we should be able to generate exactly chainlimit txs for each original output
+ sending_addr = self.nodes[1].getnewaddress()
+ txid_list = []
+ for i in range(chainlimit*2):
+ txid_list.append(self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001')))
+ assert_equal(self.nodes[0].getmempoolinfo()['size'], chainlimit*2)
+ assert_equal(len(txid_list), chainlimit*2)
+
+ # Without walletrejectlongchains, we will still generate a txid
+ # The tx will be stored in the wallet but not accepted to the mempool
+ extra_txid = self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001'))
+ assert(extra_txid not in self.nodes[0].getrawmempool())
+ assert(extra_txid in [tx["txid"] for tx in self.nodes[0].listtransactions()])
+ self.nodes[0].abandontransaction(extra_txid)
+ total_txs = len(self.nodes[0].listtransactions("*",99999))
+
+ # Try with walletrejectlongchains
+ # Double chain limit but require combining inputs, so we pass SelectCoinsMinConf
+ stop_node(self.nodes[0],0)
+ self.nodes[0] = start_node(0, self.options.tmpdir, ["-walletrejectlongchains", "-limitancestorcount="+str(2*chainlimit)])
+
+ # wait for loadmempool
+ timeout = 10
+ while (timeout > 0 and len(self.nodes[0].getrawmempool()) < chainlimit*2):
+ time.sleep(0.5)
+ timeout -= 0.5
+ assert_equal(len(self.nodes[0].getrawmempool()), chainlimit*2)
+
+ node0_balance = self.nodes[0].getbalance()
+ # With walletrejectlongchains we will not create the tx and store it in our wallet.
+ assert_raises_jsonrpc(-4, "Transaction has too long of a mempool chain", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01'))
+
+ # Verify nothing new in wallet
+ assert_equal(total_txs, len(self.nodes[0].listtransactions("*",99999)))
+
+if __name__ == '__main__':
+ WalletTest().main()
diff --git a/test/functional/walletbackup.py b/test/functional/walletbackup.py
new file mode 100755
index 0000000000..af1718572f
--- /dev/null
+++ b/test/functional/walletbackup.py
@@ -0,0 +1,198 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the wallet backup features.
+
+Test case is:
+4 nodes. 1 2 and 3 send transactions between each other,
+fourth node is a miner.
+1 2 3 each mine a block to start, then
+Miner creates 100 blocks so 1 2 3 each have 50 mature
+coins to spend.
+Then 5 iterations of 1/2/3 sending coins amongst
+themselves to get transactions in the wallets,
+and the miner mining one block.
+
+Wallets are backed up using dumpwallet/backupwallet.
+Then 5 more iterations of transactions and mining a block.
+
+Miner then generates 101 more blocks, so any
+transaction fees paid mature.
+
+Sanity check:
+ Sum(1,2,3,4 balances) == 114*50
+
+1/2/3 are shutdown, and their wallets erased.
+Then restore using wallet.dat backup. And
+confirm 1/2/3/4 balances are same as before.
+
+Shutdown again, restore using importwallet,
+and confirm again balances are correct.
+"""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+from random import randint
+
+class WalletBackupTest(BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 4
+ # nodes 1, 2,3 are spenders, let's give them a keypool=100
+ self.extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
+
+ # This mirrors how the network was setup in the bash test
+ def setup_network(self, split=False):
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
+ connect_nodes(self.nodes[0], 3)
+ connect_nodes(self.nodes[1], 3)
+ connect_nodes(self.nodes[2], 3)
+ connect_nodes(self.nodes[2], 0)
+ self.is_network_split=False
+ self.sync_all()
+
+ def one_send(self, from_node, to_address):
+ if (randint(1,2) == 1):
+ amount = Decimal(randint(1,10)) / Decimal(10)
+ self.nodes[from_node].sendtoaddress(to_address, amount)
+
+ def do_one_round(self):
+ a0 = self.nodes[0].getnewaddress()
+ a1 = self.nodes[1].getnewaddress()
+ a2 = self.nodes[2].getnewaddress()
+
+ self.one_send(0, a1)
+ self.one_send(0, a2)
+ self.one_send(1, a0)
+ self.one_send(1, a2)
+ self.one_send(2, a0)
+ self.one_send(2, a1)
+
+ # Have the miner (node3) mine a block.
+ # Must sync mempools before mining.
+ sync_mempools(self.nodes)
+ self.nodes[3].generate(1)
+ sync_blocks(self.nodes)
+
+ # As above, this mirrors the original bash test.
+ def start_three(self):
+ self.nodes[0] = start_node(0, self.options.tmpdir)
+ self.nodes[1] = start_node(1, self.options.tmpdir)
+ self.nodes[2] = start_node(2, self.options.tmpdir)
+ connect_nodes(self.nodes[0], 3)
+ connect_nodes(self.nodes[1], 3)
+ connect_nodes(self.nodes[2], 3)
+ connect_nodes(self.nodes[2], 0)
+
+ def stop_three(self):
+ stop_node(self.nodes[0], 0)
+ stop_node(self.nodes[1], 1)
+ stop_node(self.nodes[2], 2)
+
+ def erase_three(self):
+ os.remove(self.options.tmpdir + "/node0/regtest/wallet.dat")
+ os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
+ os.remove(self.options.tmpdir + "/node2/regtest/wallet.dat")
+
+ def run_test(self):
+ self.log.info("Generating initial blockchain")
+ self.nodes[0].generate(1)
+ sync_blocks(self.nodes)
+ self.nodes[1].generate(1)
+ sync_blocks(self.nodes)
+ self.nodes[2].generate(1)
+ sync_blocks(self.nodes)
+ self.nodes[3].generate(100)
+ sync_blocks(self.nodes)
+
+ assert_equal(self.nodes[0].getbalance(), 50)
+ assert_equal(self.nodes[1].getbalance(), 50)
+ assert_equal(self.nodes[2].getbalance(), 50)
+ assert_equal(self.nodes[3].getbalance(), 0)
+
+ self.log.info("Creating transactions")
+ # Five rounds of sending each other transactions.
+ for i in range(5):
+ self.do_one_round()
+
+ self.log.info("Backing up")
+ tmpdir = self.options.tmpdir
+ self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak")
+ self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump")
+ self.nodes[1].backupwallet(tmpdir + "/node1/wallet.bak")
+ self.nodes[1].dumpwallet(tmpdir + "/node1/wallet.dump")
+ self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak")
+ self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump")
+
+ self.log.info("More transactions")
+ for i in range(5):
+ self.do_one_round()
+
+ # Generate 101 more blocks, so any fees paid mature
+ self.nodes[3].generate(101)
+ self.sync_all()
+
+ balance0 = self.nodes[0].getbalance()
+ balance1 = self.nodes[1].getbalance()
+ balance2 = self.nodes[2].getbalance()
+ balance3 = self.nodes[3].getbalance()
+ total = balance0 + balance1 + balance2 + balance3
+
+ # At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
+ # 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
+ assert_equal(total, 5700)
+
+ ##
+ # Test restoring spender wallets from backups
+ ##
+ self.log.info("Restoring using wallet.dat")
+ self.stop_three()
+ self.erase_three()
+
+ # Start node2 with no chain
+ shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
+ shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
+
+ # Restore wallets from backup
+ shutil.copyfile(tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallet.dat")
+ shutil.copyfile(tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallet.dat")
+ shutil.copyfile(tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallet.dat")
+
+ self.log.info("Re-starting nodes")
+ self.start_three()
+ sync_blocks(self.nodes)
+
+ assert_equal(self.nodes[0].getbalance(), balance0)
+ assert_equal(self.nodes[1].getbalance(), balance1)
+ assert_equal(self.nodes[2].getbalance(), balance2)
+
+ self.log.info("Restoring using dumped wallet")
+ self.stop_three()
+ self.erase_three()
+
+ #start node2 with no chain
+ shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
+ shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
+
+ self.start_three()
+
+ assert_equal(self.nodes[0].getbalance(), 0)
+ assert_equal(self.nodes[1].getbalance(), 0)
+ assert_equal(self.nodes[2].getbalance(), 0)
+
+ self.nodes[0].importwallet(tmpdir + "/node0/wallet.dump")
+ self.nodes[1].importwallet(tmpdir + "/node1/wallet.dump")
+ self.nodes[2].importwallet(tmpdir + "/node2/wallet.dump")
+
+ sync_blocks(self.nodes)
+
+ assert_equal(self.nodes[0].getbalance(), balance0)
+ assert_equal(self.nodes[1].getbalance(), balance1)
+ assert_equal(self.nodes[2].getbalance(), balance2)
+
+
+if __name__ == '__main__':
+ WalletBackupTest().main()
diff --git a/test/functional/zapwallettxes.py b/test/functional/zapwallettxes.py
new file mode 100755
index 0000000000..ce446e44a3
--- /dev/null
+++ b/test/functional/zapwallettxes.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the zapwallettxes functionality.
+
+- start three bitcoind nodes
+- create four transactions on node 0 - two are confirmed and two are
+ unconfirmed.
+- restart node 1 and verify that both the confirmed and the unconfirmed
+ transactions are still available.
+- restart node 0 and verify that the confirmed transactions are still
+ available, but that the unconfirmed transaction has been zapped.
+"""
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+
+
+class ZapWalletTXesTest (BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 3
+
+ def setup_network(self, split=False):
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
+ connect_nodes_bi(self.nodes,0,1)
+ connect_nodes_bi(self.nodes,1,2)
+ connect_nodes_bi(self.nodes,0,2)
+ self.is_network_split=False
+ self.sync_all()
+
+ def run_test (self):
+ self.log.info("Mining blocks...")
+ self.nodes[0].generate(1)
+ self.sync_all()
+ self.nodes[1].generate(101)
+ self.sync_all()
+
+ assert_equal(self.nodes[0].getbalance(), 50)
+
+ txid0 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
+ txid1 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
+ self.sync_all()
+ self.nodes[0].generate(1)
+ self.sync_all()
+
+ txid2 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
+ txid3 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
+
+ tx0 = self.nodes[0].gettransaction(txid0)
+ assert_equal(tx0['txid'], txid0) #tx0 must be available (confirmed)
+
+ tx1 = self.nodes[0].gettransaction(txid1)
+ assert_equal(tx1['txid'], txid1) #tx1 must be available (confirmed)
+
+ tx2 = self.nodes[0].gettransaction(txid2)
+ assert_equal(tx2['txid'], txid2) #tx2 must be available (unconfirmed)
+
+ tx3 = self.nodes[0].gettransaction(txid3)
+ assert_equal(tx3['txid'], txid3) #tx3 must be available (unconfirmed)
+
+ #restart bitcoind
+ self.nodes[0].stop()
+ bitcoind_processes[0].wait()
+ self.nodes[0] = start_node(0,self.options.tmpdir)
+
+ tx3 = self.nodes[0].gettransaction(txid3)
+ assert_equal(tx3['txid'], txid3) #tx must be available (unconfirmed)
+
+ self.nodes[0].stop()
+ bitcoind_processes[0].wait()
+
+ #restart bitcoind with zapwallettxes
+ self.nodes[0] = start_node(0,self.options.tmpdir, ["-zapwallettxes=1"])
+
+ assert_raises(JSONRPCException, self.nodes[0].gettransaction, [txid3])
+ #there must be a expection because the unconfirmed wallettx0 must be gone by now
+
+ tx0 = self.nodes[0].gettransaction(txid0)
+ assert_equal(tx0['txid'], txid0) #tx0 (confirmed) must still be available because it was confirmed
+
+
+if __name__ == '__main__':
+ ZapWalletTXesTest ().main ()
diff --git a/test/functional/zmq_test.py b/test/functional/zmq_test.py
new file mode 100755
index 0000000000..e6f18b0b93
--- /dev/null
+++ b/test/functional/zmq_test.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python3
+# Copyright (c) 2015-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the ZMQ API."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+import zmq
+import struct
+
+class ZMQTest (BitcoinTestFramework):
+
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 4
+
+ port = 28332
+
+ def setup_nodes(self):
+ self.zmqContext = zmq.Context()
+ self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
+ self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashblock")
+ self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashtx")
+ self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % self.port)
+ return start_nodes(self.num_nodes, self.options.tmpdir, extra_args=[
+ ['-zmqpubhashtx=tcp://127.0.0.1:'+str(self.port), '-zmqpubhashblock=tcp://127.0.0.1:'+str(self.port)],
+ [],
+ [],
+ []
+ ])
+
+ def run_test(self):
+ self.sync_all()
+
+ genhashes = self.nodes[0].generate(1)
+ self.sync_all()
+
+ self.log.info("listen...")
+ msg = self.zmqSubSocket.recv_multipart()
+ topic = msg[0]
+ assert_equal(topic, b"hashtx")
+ body = msg[1]
+ nseq = msg[2]
+ msgSequence = struct.unpack('<I', msg[-1])[-1]
+ assert_equal(msgSequence, 0) #must be sequence 0 on hashtx
+
+ msg = self.zmqSubSocket.recv_multipart()
+ topic = msg[0]
+ body = msg[1]
+ msgSequence = struct.unpack('<I', msg[-1])[-1]
+ assert_equal(msgSequence, 0) #must be sequence 0 on hashblock
+ blkhash = bytes_to_hex_str(body)
+
+ assert_equal(genhashes[0], blkhash) #blockhash from generate must be equal to the hash received over zmq
+
+ n = 10
+ genhashes = self.nodes[1].generate(n)
+ self.sync_all()
+
+ zmqHashes = []
+ blockcount = 0
+ for x in range(0,n*2):
+ msg = self.zmqSubSocket.recv_multipart()
+ topic = msg[0]
+ body = msg[1]
+ if topic == b"hashblock":
+ zmqHashes.append(bytes_to_hex_str(body))
+ msgSequence = struct.unpack('<I', msg[-1])[-1]
+ assert_equal(msgSequence, blockcount+1)
+ blockcount += 1
+
+ for x in range(0,n):
+ assert_equal(genhashes[x], zmqHashes[x]) #blockhash from generate must be equal to the hash received over zmq
+
+ #test tx from a second node
+ hashRPC = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
+ self.sync_all()
+
+ # now we should receive a zmq msg because the tx was broadcast
+ msg = self.zmqSubSocket.recv_multipart()
+ topic = msg[0]
+ body = msg[1]
+ hashZMQ = ""
+ if topic == b"hashtx":
+ hashZMQ = bytes_to_hex_str(body)
+ msgSequence = struct.unpack('<I', msg[-1])[-1]
+ assert_equal(msgSequence, blockcount+1)
+
+ assert_equal(hashRPC, hashZMQ) #blockhash from generate must be equal to the hash received over zmq
+
+
+if __name__ == '__main__':
+ ZMQTest ().main ()
diff --git a/test/util/bctest.py b/test/util/bctest.py
new file mode 100644
index 0000000000..dfe3a123d1
--- /dev/null
+++ b/test/util/bctest.py
@@ -0,0 +1,127 @@
+# Copyright 2014 BitPay Inc.
+# Copyright 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+from __future__ import division,print_function,unicode_literals
+import subprocess
+import os
+import json
+import sys
+import binascii
+import difflib
+import logging
+import pprint
+
+def parse_output(a, fmt):
+ """Parse the output according to specified format.
+
+ Raise an error if the output can't be parsed."""
+ if fmt == 'json': # json: compare parsed data
+ return json.loads(a)
+ elif fmt == 'hex': # hex: parse and compare binary data
+ return binascii.a2b_hex(a.strip())
+ else:
+ raise NotImplementedError("Don't know how to compare %s" % fmt)
+
+def bctest(testDir, testObj, buildenv):
+ """Runs a single test, comparing output and RC to expected output and RC.
+
+ Raises an error if input can't be read, executable fails, or output/RC
+ are not as expected. Error is caught by bctester() and reported.
+ """
+ # Get the exec names and arguments
+ execprog = buildenv.BUILDDIR + "/src/" + testObj['exec'] + buildenv.exeext
+ execargs = testObj['args']
+ execrun = [execprog] + execargs
+
+ # Read the input data (if there is any)
+ stdinCfg = None
+ inputData = None
+ if "input" in testObj:
+ filename = testDir + "/" + testObj['input']
+ inputData = open(filename).read()
+ stdinCfg = subprocess.PIPE
+
+ # Read the expected output data (if there is any)
+ outputFn = None
+ outputData = None
+ if "output_cmp" in testObj:
+ outputFn = testObj['output_cmp']
+ outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
+ try:
+ outputData = open(testDir + "/" + outputFn).read()
+ except:
+ logging.error("Output file " + outputFn + " can not be opened")
+ raise
+ if not outputData:
+ logging.error("Output data missing for " + outputFn)
+ raise Exception
+
+ # Run the test
+ proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE,universal_newlines=True)
+ try:
+ outs = proc.communicate(input=inputData)
+ except OSError:
+ logging.error("OSError, Failed to execute " + execprog)
+ raise
+
+ if outputData:
+ data_mismatch, formatting_mismatch = False, False
+ # Parse command output and expected output
+ try:
+ a_parsed = parse_output(outs[0], outputType)
+ except Exception as e:
+ logging.error('Error parsing command output as %s: %s' % (outputType,e))
+ raise
+ try:
+ b_parsed = parse_output(outputData, outputType)
+ except Exception as e:
+ logging.error('Error parsing expected output %s as %s: %s' % (outputFn,outputType,e))
+ raise
+ # Compare data
+ if a_parsed != b_parsed:
+ logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")")
+ data_mismatch = True
+ # Compare formatting
+ if outs[0] != outputData:
+ error_message = "Output formatting mismatch for " + outputFn + ":\n"
+ error_message += "".join(difflib.context_diff(outputData.splitlines(True),
+ outs[0].splitlines(True),
+ fromfile=outputFn,
+ tofile="returned"))
+ logging.error(error_message)
+ formatting_mismatch = True
+
+ assert not data_mismatch and not formatting_mismatch
+
+ # Compare the return code to the expected return code
+ wantRC = 0
+ if "return_code" in testObj:
+ wantRC = testObj['return_code']
+ if proc.returncode != wantRC:
+ logging.error("Return code mismatch for " + outputFn)
+ raise Exception
+
+def bctester(testDir, input_basename, buildenv):
+ """ Loads and parses the input file, runs all tests and reports results"""
+ input_filename = testDir + "/" + input_basename
+ raw_data = open(input_filename).read()
+ input_data = json.loads(raw_data)
+
+ failed_testcases = []
+
+ for testObj in input_data:
+ try:
+ bctest(testDir, testObj, buildenv)
+ logging.info("PASSED: " + testObj["description"])
+ except:
+ logging.info("FAILED: " + testObj["description"])
+ failed_testcases.append(testObj["description"])
+
+ if failed_testcases:
+ error_message = "FAILED_TESTCASES:\n"
+ error_message += pprint.pformat(failed_testcases, width=400)
+ logging.error(error_message)
+ sys.exit(1)
+ else:
+ sys.exit(0)
diff --git a/test/util/bitcoin-util-test.py b/test/util/bitcoin-util-test.py
new file mode 100755
index 0000000000..e09a25159d
--- /dev/null
+++ b/test/util/bitcoin-util-test.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+# Copyright 2014 BitPay Inc.
+# Copyright 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+from __future__ import division,print_function,unicode_literals
+import os
+import sys
+import argparse
+import logging
+
+help_text="""Test framework for bitcoin utils.
+
+Runs automatically during `make check`.
+
+Can also be run manually."""
+
+if __name__ == '__main__':
+ sys.path.append(os.path.dirname(os.path.abspath(__file__)))
+ import buildenv
+ import bctest
+
+ parser = argparse.ArgumentParser(description=help_text)
+ parser.add_argument('-v', '--verbose', action='store_true')
+ args = parser.parse_args()
+ verbose = args.verbose
+
+ if verbose:
+ level = logging.DEBUG
+ else:
+ level = logging.ERROR
+ formatter = '%(asctime)s - %(levelname)s - %(message)s'
+ # Add the format/level to the logger
+ logging.basicConfig(format = formatter, level=level)
+
+ bctest.bctester(buildenv.SRCDIR + "/test/util/data", "bitcoin-util-test.json", buildenv)
diff --git a/test/util/buildenv.py.in b/test/util/buildenv.py.in
new file mode 100644
index 0000000000..33030b0348
--- /dev/null
+++ b/test/util/buildenv.py.in
@@ -0,0 +1,4 @@
+#!/usr/bin/env python
+exeext="@EXEEXT@"
+SRCDIR="@abs_top_srcdir@"
+BUILDDIR="@abs_top_builddir@"
diff --git a/test/util/data/bitcoin-util-test.json b/test/util/data/bitcoin-util-test.json
new file mode 100644
index 0000000000..a80ab51901
--- /dev/null
+++ b/test/util/data/bitcoin-util-test.json
@@ -0,0 +1,356 @@
+[
+ { "exec": "./bitcoin-tx",
+ "args": ["-create", "nversion=1"],
+ "output_cmp": "blanktxv1.hex",
+ "description": "Creates a blank v1 transaction"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-json","-create", "nversion=1"],
+ "output_cmp": "blanktxv1.json",
+ "description": "Creates a blank v1 transaction (output in json)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-"],
+ "input": "blanktxv2.hex",
+ "output_cmp": "blanktxv2.hex",
+ "description": "Creates a blank transaction when nothing is piped into bitcoin-tx"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-json","-create"],
+ "output_cmp": "blanktxv2.json",
+ "description": "Creates a blank transaction (output in json)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-json","-"],
+ "input": "blanktxv2.hex",
+ "output_cmp": "blanktxv2.json",
+ "description": "Creates a blank transaction when nothing is piped into bitcoin-tx (output in json)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-", "delin=1"],
+ "input": "tx394b54bb.hex",
+ "output_cmp": "tt-delin1-out.hex",
+ "description": "Deletes a single input from a transaction"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-json", "-", "delin=1"],
+ "input": "tx394b54bb.hex",
+ "output_cmp": "tt-delin1-out.json",
+ "description": "Deletes a single input from a transaction (output in json)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-", "delin=31"],
+ "input": "tx394b54bb.hex",
+ "return_code": 1,
+ "description": "Attempts to delete an input with a bad index from a transaction. Expected to fail."
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-", "delout=1"],
+ "input": "tx394b54bb.hex",
+ "output_cmp": "tt-delout1-out.hex",
+ "description": "Deletes a single output from a transaction"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-json", "-", "delout=1"],
+ "input": "tx394b54bb.hex",
+ "output_cmp": "tt-delout1-out.json",
+ "description": "Deletes a single output from a transaction (output in json)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-", "delout=2"],
+ "input": "tx394b54bb.hex",
+ "return_code": 1,
+ "description": "Attempts to delete an output with a bad index from a transaction. Expected to fail."
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-", "locktime=317000"],
+ "input": "tx394b54bb.hex",
+ "output_cmp": "tt-locktime317000-out.hex",
+ "description": "Adds an nlocktime to a transaction"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-json", "-", "locktime=317000"],
+ "input": "tx394b54bb.hex",
+ "output_cmp": "tt-locktime317000-out.json",
+ "description": "Adds an nlocktime to a transaction (output in json)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-create",
+ "in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0",
+ "in=bf829c6bcf84579331337659d31f89dfd138f7f7785802d5501c92333145ca7c:18",
+ "in=22a6f904655d53ae2ff70e701a0bbd90aa3975c0f40bfc6cc996a9049e31cdfc:1",
+ "outaddr=0.18:13tuJJDR2RgArmgfv6JScSdreahzgc4T6o",
+ "outaddr=4:1P8yWvZW8jVihP1bzHeqfE4aoXNX8AVa46"],
+ "output_cmp": "txcreate1.hex",
+ "description": "Creates a new transaction with three inputs and two outputs"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-json",
+ "-create",
+ "in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0",
+ "in=bf829c6bcf84579331337659d31f89dfd138f7f7785802d5501c92333145ca7c:18",
+ "in=22a6f904655d53ae2ff70e701a0bbd90aa3975c0f40bfc6cc996a9049e31cdfc:1",
+ "outaddr=0.18:13tuJJDR2RgArmgfv6JScSdreahzgc4T6o",
+ "outaddr=4:1P8yWvZW8jVihP1bzHeqfE4aoXNX8AVa46"],
+ "output_cmp": "txcreate1.json",
+ "description": "Creates a new transaction with three inputs and two outputs (output in json)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-create", "outscript=0:"],
+ "output_cmp": "txcreate2.hex",
+ "description": "Creates a new transaction with a single empty output script"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-json", "-create", "outscript=0:"],
+ "output_cmp": "txcreate2.json",
+ "description": "Creates a new transaction with a single empty output script (output in json)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["02000000000100000000000000000000000000"],
+ "output_cmp": "txcreate2.hex",
+ "description": "Parses a transation with no inputs and a single output script"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-json", "02000000000100000000000000000000000000"],
+ "output_cmp": "txcreate2.json",
+ "description": "Parses a transation with no inputs and a single output script (output in json)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-create", "outscript=0:OP_DROP", "nversion=1"],
+ "output_cmp": "txcreatescript1.hex",
+ "description": "Create a new transaction with a single output script (OP_DROP)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-json", "-create", "outscript=0:OP_DROP", "nversion=1"],
+ "output_cmp": "txcreatescript1.json",
+ "description": "Create a new transaction with a single output script (OP_DROP) (output as json)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-create", "outscript=0:OP_DROP:S", "nversion=1"],
+ "output_cmp": "txcreatescript2.hex",
+ "description": "Create a new transaction with a single output script (OP_DROP) in a P2SH"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-json", "-create", "outscript=0:OP_DROP:S", "nversion=1"],
+ "output_cmp": "txcreatescript2.json",
+ "description": "Create a new transaction with a single output script (OP_DROP) in a P2SH (output as json)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-create", "outscript=0:OP_DROP:W", "nversion=1"],
+ "output_cmp": "txcreatescript3.hex",
+ "description": "Create a new transaction with a single output script (OP_DROP) in a P2WSH"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-json", "-create", "outscript=0:OP_DROP:W", "nversion=1"],
+ "output_cmp": "txcreatescript3.json",
+ "description": "Create a new transaction with a single output script (OP_DROP) in a P2WSH (output as json)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-create", "outscript=0:OP_DROP:WS", "nversion=1"],
+ "output_cmp": "txcreatescript4.hex",
+ "description": "Create a new transaction with a single output script (OP_DROP) in a P2WSH, wrapped in a P2SH"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-json", "-create", "outscript=0:OP_DROP:WS", "nversion=1"],
+ "output_cmp": "txcreatescript4.json",
+ "description": "Create a new transaction with a single output script (OP_DROP) in a P2SH, wrapped in a P2SH (output as json)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-create", "nversion=1",
+ "in=4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485:0",
+ "set=privatekeys:[\"5HpHagT65TZzG1PH3CSu63k8DbpvD8s5ip4nEB3kEsreAnchuDf\"]",
+ "set=prevtxs:[{\"txid\":\"4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485\",\"vout\":0,\"scriptPubKey\":\"76a91491b24bf9f5288532960ac687abb035127b1d28a588ac\"}]",
+ "sign=ALL",
+ "outaddr=0.001:193P6LtvS4nCnkDvM9uXn1gsSRqh4aDAz7"],
+ "output_cmp": "txcreatesignv1.hex",
+ "description": "Creates a new v1 transaction with a single input and a single output, and then signs the transaction"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-json",
+ "-create", "nversion=1",
+ "in=4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485:0",
+ "set=privatekeys:[\"5HpHagT65TZzG1PH3CSu63k8DbpvD8s5ip4nEB3kEsreAnchuDf\"]",
+ "set=prevtxs:[{\"txid\":\"4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485\",\"vout\":0,\"scriptPubKey\":\"76a91491b24bf9f5288532960ac687abb035127b1d28a588ac\"}]",
+ "sign=ALL",
+ "outaddr=0.001:193P6LtvS4nCnkDvM9uXn1gsSRqh4aDAz7"],
+ "output_cmp": "txcreatesignv1.json",
+ "description": "Creates a new v1 transaction with a single input and a single output, and then signs the transaction (output in json)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-create",
+ "in=4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485:0",
+ "set=privatekeys:[\"5HpHagT65TZzG1PH3CSu63k8DbpvD8s5ip4nEB3kEsreAnchuDf\"]",
+ "set=prevtxs:[{\"txid\":\"4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485\",\"vout\":0,\"scriptPubKey\":\"76a91491b24bf9f5288532960ac687abb035127b1d28a588ac\"}]",
+ "sign=ALL",
+ "outaddr=0.001:193P6LtvS4nCnkDvM9uXn1gsSRqh4aDAz7"],
+ "output_cmp": "txcreatesignv2.hex",
+ "description": "Creates a new transaction with a single input and a single output, and then signs the transaction"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-create", "outpubkey=0:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397", "nversion=1"],
+ "output_cmp": "txcreateoutpubkey1.hex",
+ "description": "Creates a new transaction with a single pay-to-pubkey output"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-json", "-create", "outpubkey=0:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397", "nversion=1"],
+ "output_cmp": "txcreateoutpubkey1.json",
+ "description": "Creates a new transaction with a single pay-to-pubkey output (output as json)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-create", "outpubkey=0:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:W", "nversion=1"],
+ "output_cmp": "txcreateoutpubkey2.hex",
+ "description": "Creates a new transaction with a single pay-to-witness-pubkey output"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-json", "-create", "outpubkey=0:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:W", "nversion=1"],
+ "output_cmp": "txcreateoutpubkey2.json",
+ "description": "Creates a new transaction with a single pay-to-witness-pubkey output (output as json)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-create", "outpubkey=0:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:WS", "nversion=1"],
+ "output_cmp": "txcreateoutpubkey3.hex",
+ "description": "Creates a new transaction with a single pay-to-witness-pubkey, wrapped in P2SH output"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-json", "-create", "outpubkey=0:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:WS", "nversion=1"],
+ "output_cmp": "txcreateoutpubkey3.json",
+ "description": "Creates a new transaction with a single pay-to-pub-key output, wrapped in P2SH (output as json)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-create",
+ "in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0",
+ "outdata=4:badhexdata"],
+ "return_code": 1,
+ "description": "Attempts to create a new transaction with one input and an output with malformed hex data. Expected to fail"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-create",
+ "in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0",
+ "outdata=badhexdata"],
+ "return_code": 1,
+ "description": "Attempts to create a new transaction with one input and an output with no value and malformed hex data. Expected to fail"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-create",
+ "in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0",
+ "outaddr=0.18:13tuJJDR2RgArmgfv6JScSdreahzgc4T6o",
+ "outdata=4:54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e"],
+ "output_cmp": "txcreatedata1.hex",
+ "description": "Creates a new transaction with one input, one address output and one data output"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-json",
+ "-create", "nversion=1",
+ "in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0",
+ "outaddr=0.18:13tuJJDR2RgArmgfv6JScSdreahzgc4T6o",
+ "outdata=4:54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e"],
+ "output_cmp": "txcreatedata1.json",
+ "description": "Creates a new v1 transaction with one input, one address output and one data output (output in json)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-create",
+ "in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0",
+ "outaddr=0.18:13tuJJDR2RgArmgfv6JScSdreahzgc4T6o",
+ "outdata=54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e"],
+ "output_cmp": "txcreatedata2.hex",
+ "description": "Creates a new transaction with one input, one address output and one data (zero value) output"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-json",
+ "-create",
+ "in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0",
+ "outaddr=0.18:13tuJJDR2RgArmgfv6JScSdreahzgc4T6o",
+ "outdata=54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e"],
+ "output_cmp": "txcreatedata2.json",
+ "description": "Creates a new transaction with one input, one address output and one data (zero value) output (output in json)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-create",
+ "in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0:4294967293",
+ "outaddr=0.18:13tuJJDR2RgArmgfv6JScSdreahzgc4T6o"],
+ "output_cmp": "txcreatedata_seq0.hex",
+ "description": "Creates a new transaction with one input with sequence number and one address output"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-json",
+ "-create",
+ "in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0:4294967293",
+ "outaddr=0.18:13tuJJDR2RgArmgfv6JScSdreahzgc4T6o"],
+ "output_cmp": "txcreatedata_seq0.json",
+ "description": "Creates a new transaction with one input with sequence number and one address output (output in json)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["01000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000fdffffff0180a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac00000000",
+ "in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0:1"],
+ "output_cmp": "txcreatedata_seq1.hex",
+ "description": "Adds a new input with sequence number to a transaction"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-json",
+ "01000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000fdffffff0180a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac00000000",
+ "in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0:1"],
+ "output_cmp": "txcreatedata_seq1.json",
+ "description": "Adds a new input with sequence number to a transaction (output in json)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-create", "outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485", "nversion=1"],
+ "output_cmp": "txcreatemultisig1.hex",
+ "description": "Creates a new transaction with a single 2-of-3 multisig output"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-json", "-create", "outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485", "nversion=1"],
+ "output_cmp": "txcreatemultisig1.json",
+ "description": "Creates a new transaction with a single 2-of-3 multisig output (output in json)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-create", "outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485:S", "nversion=1"],
+ "output_cmp": "txcreatemultisig2.hex",
+ "description": "Creates a new transaction with a single 2-of-3 multisig in a P2SH output"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-json", "-create", "outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485:S", "nversion=1"],
+ "output_cmp": "txcreatemultisig2.json",
+ "description": "Creates a new transaction with a single 2-of-3 multisig in a P2SH output (output in json)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-create", "outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485:W", "nversion=1"],
+ "output_cmp": "txcreatemultisig3.hex",
+ "description": "Creates a new transaction with a single 2-of-3 multisig in a P2WSH output"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-json", "-create", "outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485:W", "nversion=1"],
+ "output_cmp": "txcreatemultisig3.json",
+ "description": "Creates a new transaction with a single 2-of-3 multisig in a P2WSH output (output in json)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-create", "outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485:WS", "nversion=1"],
+ "output_cmp": "txcreatemultisig4.hex",
+ "description": "Creates a new transaction with a single 2-of-3 multisig in a P2WSH output, wrapped in P2SH"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-json", "-create", "outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485:WS", "nversion=1"],
+ "output_cmp": "txcreatemultisig4.json",
+ "description": "Creates a new transaction with a single 2-of-3 multisig in a P2WSH output, wrapped in P2SH (output in json)"
+ }
+]
diff --git a/test/util/data/blanktxv1.hex b/test/util/data/blanktxv1.hex
new file mode 100644
index 0000000000..36b6f00fb6
--- /dev/null
+++ b/test/util/data/blanktxv1.hex
@@ -0,0 +1 @@
+01000000000000000000
diff --git a/test/util/data/blanktxv1.json b/test/util/data/blanktxv1.json
new file mode 100644
index 0000000000..51c25a5a98
--- /dev/null
+++ b/test/util/data/blanktxv1.json
@@ -0,0 +1,11 @@
+{
+ "txid": "d21633ba23f70118185227be58a63527675641ad37967e2aa461559f577aec43",
+ "hash": "d21633ba23f70118185227be58a63527675641ad37967e2aa461559f577aec43",
+ "version": 1,
+ "locktime": 0,
+ "vin": [
+ ],
+ "vout": [
+ ],
+ "hex": "01000000000000000000"
+}
diff --git a/test/util/data/blanktxv2.hex b/test/util/data/blanktxv2.hex
new file mode 100644
index 0000000000..22d830eda1
--- /dev/null
+++ b/test/util/data/blanktxv2.hex
@@ -0,0 +1 @@
+02000000000000000000
diff --git a/test/util/data/blanktxv2.json b/test/util/data/blanktxv2.json
new file mode 100644
index 0000000000..266919f445
--- /dev/null
+++ b/test/util/data/blanktxv2.json
@@ -0,0 +1,11 @@
+{
+ "txid": "4ebd325a4b394cff8c57e8317ccf5a8d0e2bdf1b8526f8aad6c8e43d8240621a",
+ "hash": "4ebd325a4b394cff8c57e8317ccf5a8d0e2bdf1b8526f8aad6c8e43d8240621a",
+ "version": 2,
+ "locktime": 0,
+ "vin": [
+ ],
+ "vout": [
+ ],
+ "hex": "02000000000000000000"
+}
diff --git a/test/util/data/tt-delin1-out.hex b/test/util/data/tt-delin1-out.hex
new file mode 100644
index 0000000000..42ad840f43
--- /dev/null
+++ b/test/util/data/tt-delin1-out.hex
@@ -0,0 +1 @@
+0100000014fd5c23522d31761c50175453daa6edaabe47a602a592d39ce933d8271a1a87274c0100006c493046022100b4251ecd63778a3dde0155abe4cd162947620ae9ee45a874353551092325b116022100db307baf4ff3781ec520bd18f387948cedd15dc27bafe17c894b0fe6ffffcafa012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffffc1b37ae964f605978022f94ce2f3f676d66a46d1aef7c2c17d6315b9697f2f75010000006a473044022079bd62ee09621a3be96b760c39e8ef78170101d46313923c6b07ae60a95c90670220238e51ea29fc70b04b65508450523caedbb11cb4dd5aa608c81487de798925ba0121027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34ffffffffedd005dc7790ef65c206abd1ab718e75252a40f4b1310e4102cd692eca9cacb0d10000006b48304502207722d6f9038673c86a1019b1c4de2d687ae246477cd4ca7002762be0299de385022100e594a11e3a313942595f7666dcf7078bcb14f1330f4206b95c917e7ec0e82fac012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffffdf28d6e26fb7a85a1e6a229b972c1bae0edc1c11cb9ca51e4caf5e59fbea35a1000000006b483045022100a63a4788027b79b65c6f9d9e054f68cf3b4eed19efd82a2d53f70dcbe64683390220526f243671425b2bd05745fcf2729361f985cfe84ea80c7cfc817b93d8134374012103a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52ffffffffae2a2320a1582faa24469eff3024a6b98bfe00eb4f554d8a0b1421ba53bfd6a5010000006c493046022100b200ac6db16842f76dab9abe807ce423c992805879bc50abd46ed8275a59d9cf022100c0d518e85dd345b3c29dd4dc47b9a420d3ce817b18720e94966d2fe23413a408012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffffb3cc5a12548aa1794b4d2bbf076838cfd7fbafb7716da51ee8221a4ff19c291b000000006b483045022100ededc441c3103a6f2bd6cab7639421af0f6ec5e60503bce1e603cf34f00aee1c02205cb75f3f519a13fb348783b21db3085cb5ec7552c59e394fdbc3e1feea43f967012103a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52ffffffff85145367313888d2cf2747274a32e20b2df074027bafd6f970003fcbcdf11d07150000006b483045022100d9eed5413d2a4b4b98625aa6e3169edc4fb4663e7862316d69224454e70cd8ca022061e506521d5ced51dd0ea36496e75904d756a4c4f9fb111568555075d5f68d9a012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff8292c11f6d35abab5bac3ebb627a4ff949e8ecd62d33ed137adf7aeb00e512b0090000006b48304502207e84b27139c4c19c828cb1e30c349bba88e4d9b59be97286960793b5ddc0a2af0221008cdc7a951e7f31c20953ed5635fbabf228e80b7047f32faaa0313e7693005177012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff883dcf9a86063db088ad064d0953258d4b0ff3425857402d2f3f839cee0f84581e0000006a4730440220426540dfed9c4ab5812e5f06df705b8bcf307dd7d20f7fa6512298b2a6314f420220064055096e3ca62f6c7352c66a5447767c53f946acdf35025ab3807ddb2fa404012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff6697dbb3ed98afe481b568459fa67e503f8a4254532465a670e54669d19c9fe6720000006a47304402200a5e673996f2fc88e21cc8613611f08a650bc0370338803591d85d0ec5663764022040b6664a0d1ec83a7f01975b8fde5232992b8ca58bf48af6725d2f92a936ab2e012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff023ffc2182517e1d3fa0896c5b0bd7b4d2ef8a1e42655abe2ced54f657125d59670000006c493046022100d93b30219c5735f673be5c3b4688366d96f545561c74cb62c6958c00f6960806022100ec8200adcb028f2184fa2a4f6faac7f8bb57cb4503bb7584ac11051fece31b3d012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffff16f8c77166b0df3d7cc8b5b2ce825afbea9309ad7acd8e2461a255958f81fc06010000006b483045022100a13934e68d3f5b22b130c4cb33f4da468cffc52323a47fbfbe06b64858162246022047081e0a70ff770e64a2e2d31e5d520d9102268b57a47009a72fe73ec766901801210234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cdffffffff197b96f3c87a3adfaa17f63fddc2a738a690ca665439f9431dbbd655816c41fb000000006c49304602210097f1f35d5bdc1a3a60390a1b015b8e7c4f916aa3847aafd969e04975e15bbe70022100a9052eb25517d481f1fda1b129eb1b534da50ea1a51f3ee012dca3601c11b86a0121027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34ffffffff20d9a261ee27aa1bd92e7db2fdca935909a40b648e974cd24a10d63b68b94039dd0000006b483045022012b3138c591bf7154b6fef457f2c4a3c7162225003788ac0024a99355865ff13022100b71b125ae1ffb2e1d1571f580cd3ebc8cd049a2d7a8a41f138ba94aeb982106f012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffff50f179d5d16cd872f9a63c26c448464ae9bd95cd9421c0476113b5d314571b71010000006b483045022100f834ccc8b22ee72712a3e5e6ef4acb8b2fb791b5385b70e2cd4332674d6667f4022024fbda0a997e0c253503f217501f508a4d56edce2c813ecdd9ad796dbeba907401210234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cdffffffff551b865d1568ac0a305e5f9c5dae6c540982334efbe789074318e0efc5b564631b0000006b48304502203b2fd1e39ae0e469d7a15768f262661b0de41470daf0fe8c4fd0c26542a0870002210081c57e331f9a2d214457d953e3542904727ee412c63028113635d7224da3dccc012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff57503e5a016189d407a721791459280875264f908ca2c5d4862c01386e7fb50b470400006b48304502206947a9c54f0664ece4430fd4ae999891dc50bb6126bc36b6a15a3189f29d25e9022100a86cfc4e2fdd9e39a20e305cfd1b76509c67b3e313e0f118229105caa0e823c9012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff3f16c1fb9d3e1a26d872933e955df85ee7f3f817711062b00b54a2144827349b250000006b483045022100c7128fe10b2d38744ae8177776054c29fc8ec13f07207723e70766ab7164847402201d2cf09009b9596de74c0183d1ab832e5edddb7a9965880bb400097e850850f8012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff4142a69d85b8498af214f0dd427b6ab29c240a0b8577e2944d37a7d8c05c6bb8140000006b48304502203b89a71628a28cc3703d170ca3be77786cff6b867e38a18b719705f8a326578f022100b2a9879e1acf621faa6466c207746a7f3eb4c8514c1482969aba3f2a957f1321012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff36e2feecc0a4bff7480015d42c12121932db389025ed0ac1d344ecee53230a3df20000006c493046022100ef794a8ef7fd6752d2a183c18866ff6e8dc0f5bd889a63e2c21cf303a6302461022100c1b09662d9e92988c3f9fcf17d1bcc79b5403647095d7212b9f8a1278a532d68012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffff0260f73608000000001976a9148fd139bb39ced713f231c58a4d07bf6954d1c20188ac41420f00000000001976a9146c772e9cf96371bba3da8cb733da70a2fcf2007888ac00000000
diff --git a/test/util/data/tt-delin1-out.json b/test/util/data/tt-delin1-out.json
new file mode 100644
index 0000000000..712a2c27f8
--- /dev/null
+++ b/test/util/data/tt-delin1-out.json
@@ -0,0 +1,217 @@
+{
+ "txid": "81b2035be1da1abe745c6141174a73d151009ec17b3d5ebffa2e177408c50dfd",
+ "hash": "81b2035be1da1abe745c6141174a73d151009ec17b3d5ebffa2e177408c50dfd",
+ "version": 1,
+ "locktime": 0,
+ "vin": [
+ {
+ "txid": "27871a1a27d833e99cd392a502a647beaaeda6da535417501c76312d52235cfd",
+ "vout": 332,
+ "scriptSig": {
+ "asm": "3046022100b4251ecd63778a3dde0155abe4cd162947620ae9ee45a874353551092325b116022100db307baf4ff3781ec520bd18f387948cedd15dc27bafe17c894b0fe6ffffcafa[ALL] 03091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc",
+ "hex": "493046022100b4251ecd63778a3dde0155abe4cd162947620ae9ee45a874353551092325b116022100db307baf4ff3781ec520bd18f387948cedd15dc27bafe17c894b0fe6ffffcafa012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "752f7f69b915637dc1c2f7aed1466ad676f6f3e24cf922809705f664e97ab3c1",
+ "vout": 1,
+ "scriptSig": {
+ "asm": "3044022079bd62ee09621a3be96b760c39e8ef78170101d46313923c6b07ae60a95c90670220238e51ea29fc70b04b65508450523caedbb11cb4dd5aa608c81487de798925ba[ALL] 027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34",
+ "hex": "473044022079bd62ee09621a3be96b760c39e8ef78170101d46313923c6b07ae60a95c90670220238e51ea29fc70b04b65508450523caedbb11cb4dd5aa608c81487de798925ba0121027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "b0ac9cca2e69cd02410e31b1f4402a25758e71abd1ab06c265ef9077dc05d0ed",
+ "vout": 209,
+ "scriptSig": {
+ "asm": "304502207722d6f9038673c86a1019b1c4de2d687ae246477cd4ca7002762be0299de385022100e594a11e3a313942595f7666dcf7078bcb14f1330f4206b95c917e7ec0e82fac[ALL] 03091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc",
+ "hex": "48304502207722d6f9038673c86a1019b1c4de2d687ae246477cd4ca7002762be0299de385022100e594a11e3a313942595f7666dcf7078bcb14f1330f4206b95c917e7ec0e82fac012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "a135eafb595eaf4c1ea59ccb111cdc0eae1b2c979b226a1e5aa8b76fe2d628df",
+ "vout": 0,
+ "scriptSig": {
+ "asm": "3045022100a63a4788027b79b65c6f9d9e054f68cf3b4eed19efd82a2d53f70dcbe64683390220526f243671425b2bd05745fcf2729361f985cfe84ea80c7cfc817b93d8134374[ALL] 03a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52",
+ "hex": "483045022100a63a4788027b79b65c6f9d9e054f68cf3b4eed19efd82a2d53f70dcbe64683390220526f243671425b2bd05745fcf2729361f985cfe84ea80c7cfc817b93d8134374012103a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "a5d6bf53ba21140b8a4d554feb00fe8bb9a62430ff9e4624aa2f58a120232aae",
+ "vout": 1,
+ "scriptSig": {
+ "asm": "3046022100b200ac6db16842f76dab9abe807ce423c992805879bc50abd46ed8275a59d9cf022100c0d518e85dd345b3c29dd4dc47b9a420d3ce817b18720e94966d2fe23413a408[ALL] 03091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc",
+ "hex": "493046022100b200ac6db16842f76dab9abe807ce423c992805879bc50abd46ed8275a59d9cf022100c0d518e85dd345b3c29dd4dc47b9a420d3ce817b18720e94966d2fe23413a408012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "1b299cf14f1a22e81ea56d71b7affbd7cf386807bf2b4d4b79a18a54125accb3",
+ "vout": 0,
+ "scriptSig": {
+ "asm": "3045022100ededc441c3103a6f2bd6cab7639421af0f6ec5e60503bce1e603cf34f00aee1c02205cb75f3f519a13fb348783b21db3085cb5ec7552c59e394fdbc3e1feea43f967[ALL] 03a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52",
+ "hex": "483045022100ededc441c3103a6f2bd6cab7639421af0f6ec5e60503bce1e603cf34f00aee1c02205cb75f3f519a13fb348783b21db3085cb5ec7552c59e394fdbc3e1feea43f967012103a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "071df1cdcb3f0070f9d6af7b0274f02d0be2324a274727cfd288383167531485",
+ "vout": 21,
+ "scriptSig": {
+ "asm": "3045022100d9eed5413d2a4b4b98625aa6e3169edc4fb4663e7862316d69224454e70cd8ca022061e506521d5ced51dd0ea36496e75904d756a4c4f9fb111568555075d5f68d9a[ALL] 03f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c",
+ "hex": "483045022100d9eed5413d2a4b4b98625aa6e3169edc4fb4663e7862316d69224454e70cd8ca022061e506521d5ced51dd0ea36496e75904d756a4c4f9fb111568555075d5f68d9a012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "b012e500eb7adf7a13ed332dd6ece849f94f7a62bb3eac5babab356d1fc19282",
+ "vout": 9,
+ "scriptSig": {
+ "asm": "304502207e84b27139c4c19c828cb1e30c349bba88e4d9b59be97286960793b5ddc0a2af0221008cdc7a951e7f31c20953ed5635fbabf228e80b7047f32faaa0313e7693005177[ALL] 03f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c",
+ "hex": "48304502207e84b27139c4c19c828cb1e30c349bba88e4d9b59be97286960793b5ddc0a2af0221008cdc7a951e7f31c20953ed5635fbabf228e80b7047f32faaa0313e7693005177012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "58840fee9c833f2f2d40575842f30f4b8d2553094d06ad88b03d06869acf3d88",
+ "vout": 30,
+ "scriptSig": {
+ "asm": "30440220426540dfed9c4ab5812e5f06df705b8bcf307dd7d20f7fa6512298b2a6314f420220064055096e3ca62f6c7352c66a5447767c53f946acdf35025ab3807ddb2fa404[ALL] 03f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c",
+ "hex": "4730440220426540dfed9c4ab5812e5f06df705b8bcf307dd7d20f7fa6512298b2a6314f420220064055096e3ca62f6c7352c66a5447767c53f946acdf35025ab3807ddb2fa404012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "e69f9cd16946e570a665245354428a3f507ea69f4568b581e4af98edb3db9766",
+ "vout": 114,
+ "scriptSig": {
+ "asm": "304402200a5e673996f2fc88e21cc8613611f08a650bc0370338803591d85d0ec5663764022040b6664a0d1ec83a7f01975b8fde5232992b8ca58bf48af6725d2f92a936ab2e[ALL] 03f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c",
+ "hex": "47304402200a5e673996f2fc88e21cc8613611f08a650bc0370338803591d85d0ec5663764022040b6664a0d1ec83a7f01975b8fde5232992b8ca58bf48af6725d2f92a936ab2e012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "595d1257f654ed2cbe5a65421e8aefd2b4d70b5b6c89a03f1d7e518221fc3f02",
+ "vout": 103,
+ "scriptSig": {
+ "asm": "3046022100d93b30219c5735f673be5c3b4688366d96f545561c74cb62c6958c00f6960806022100ec8200adcb028f2184fa2a4f6faac7f8bb57cb4503bb7584ac11051fece31b3d[ALL] 03091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc",
+ "hex": "493046022100d93b30219c5735f673be5c3b4688366d96f545561c74cb62c6958c00f6960806022100ec8200adcb028f2184fa2a4f6faac7f8bb57cb4503bb7584ac11051fece31b3d012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "06fc818f9555a261248ecd7aad0993eafb5a82ceb2b5c87c3ddfb06671c7f816",
+ "vout": 1,
+ "scriptSig": {
+ "asm": "3045022100a13934e68d3f5b22b130c4cb33f4da468cffc52323a47fbfbe06b64858162246022047081e0a70ff770e64a2e2d31e5d520d9102268b57a47009a72fe73ec7669018[ALL] 0234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cd",
+ "hex": "483045022100a13934e68d3f5b22b130c4cb33f4da468cffc52323a47fbfbe06b64858162246022047081e0a70ff770e64a2e2d31e5d520d9102268b57a47009a72fe73ec766901801210234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cd"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "fb416c8155d6bb1d43f9395466ca90a638a7c2dd3ff617aadf3a7ac8f3967b19",
+ "vout": 0,
+ "scriptSig": {
+ "asm": "304602210097f1f35d5bdc1a3a60390a1b015b8e7c4f916aa3847aafd969e04975e15bbe70022100a9052eb25517d481f1fda1b129eb1b534da50ea1a51f3ee012dca3601c11b86a[ALL] 027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34",
+ "hex": "49304602210097f1f35d5bdc1a3a60390a1b015b8e7c4f916aa3847aafd969e04975e15bbe70022100a9052eb25517d481f1fda1b129eb1b534da50ea1a51f3ee012dca3601c11b86a0121027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "3940b9683bd6104ad24c978e640ba4095993cafdb27d2ed91baa27ee61a2d920",
+ "vout": 221,
+ "scriptSig": {
+ "asm": "3045022012b3138c591bf7154b6fef457f2c4a3c7162225003788ac0024a99355865ff13022100b71b125ae1ffb2e1d1571f580cd3ebc8cd049a2d7a8a41f138ba94aeb982106f[ALL] 03091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc",
+ "hex": "483045022012b3138c591bf7154b6fef457f2c4a3c7162225003788ac0024a99355865ff13022100b71b125ae1ffb2e1d1571f580cd3ebc8cd049a2d7a8a41f138ba94aeb982106f012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "711b5714d3b5136147c02194cd95bde94a4648c4263ca6f972d86cd1d579f150",
+ "vout": 1,
+ "scriptSig": {
+ "asm": "3045022100f834ccc8b22ee72712a3e5e6ef4acb8b2fb791b5385b70e2cd4332674d6667f4022024fbda0a997e0c253503f217501f508a4d56edce2c813ecdd9ad796dbeba9074[ALL] 0234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cd",
+ "hex": "483045022100f834ccc8b22ee72712a3e5e6ef4acb8b2fb791b5385b70e2cd4332674d6667f4022024fbda0a997e0c253503f217501f508a4d56edce2c813ecdd9ad796dbeba907401210234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cd"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "6364b5c5efe018430789e7fb4e338209546cae5d9c5f5e300aac68155d861b55",
+ "vout": 27,
+ "scriptSig": {
+ "asm": "304502203b2fd1e39ae0e469d7a15768f262661b0de41470daf0fe8c4fd0c26542a0870002210081c57e331f9a2d214457d953e3542904727ee412c63028113635d7224da3dccc[ALL] 03f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c",
+ "hex": "48304502203b2fd1e39ae0e469d7a15768f262661b0de41470daf0fe8c4fd0c26542a0870002210081c57e331f9a2d214457d953e3542904727ee412c63028113635d7224da3dccc012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "0bb57f6e38012c86d4c5a28c904f2675082859147921a707d48961015a3e5057",
+ "vout": 1095,
+ "scriptSig": {
+ "asm": "304502206947a9c54f0664ece4430fd4ae999891dc50bb6126bc36b6a15a3189f29d25e9022100a86cfc4e2fdd9e39a20e305cfd1b76509c67b3e313e0f118229105caa0e823c9[ALL] 03f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c",
+ "hex": "48304502206947a9c54f0664ece4430fd4ae999891dc50bb6126bc36b6a15a3189f29d25e9022100a86cfc4e2fdd9e39a20e305cfd1b76509c67b3e313e0f118229105caa0e823c9012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "9b34274814a2540bb062107117f8f3e75ef85d953e9372d8261a3e9dfbc1163f",
+ "vout": 37,
+ "scriptSig": {
+ "asm": "3045022100c7128fe10b2d38744ae8177776054c29fc8ec13f07207723e70766ab7164847402201d2cf09009b9596de74c0183d1ab832e5edddb7a9965880bb400097e850850f8[ALL] 03f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c",
+ "hex": "483045022100c7128fe10b2d38744ae8177776054c29fc8ec13f07207723e70766ab7164847402201d2cf09009b9596de74c0183d1ab832e5edddb7a9965880bb400097e850850f8012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "b86b5cc0d8a7374d94e277850b0a249cb26a7b42ddf014f28a49b8859da64241",
+ "vout": 20,
+ "scriptSig": {
+ "asm": "304502203b89a71628a28cc3703d170ca3be77786cff6b867e38a18b719705f8a326578f022100b2a9879e1acf621faa6466c207746a7f3eb4c8514c1482969aba3f2a957f1321[ALL] 03f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c",
+ "hex": "48304502203b89a71628a28cc3703d170ca3be77786cff6b867e38a18b719705f8a326578f022100b2a9879e1acf621faa6466c207746a7f3eb4c8514c1482969aba3f2a957f1321012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "3d0a2353eeec44d3c10aed259038db321912122cd4150048f7bfa4c0ecfee236",
+ "vout": 242,
+ "scriptSig": {
+ "asm": "3046022100ef794a8ef7fd6752d2a183c18866ff6e8dc0f5bd889a63e2c21cf303a6302461022100c1b09662d9e92988c3f9fcf17d1bcc79b5403647095d7212b9f8a1278a532d68[ALL] 03091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc",
+ "hex": "493046022100ef794a8ef7fd6752d2a183c18866ff6e8dc0f5bd889a63e2c21cf303a6302461022100c1b09662d9e92988c3f9fcf17d1bcc79b5403647095d7212b9f8a1278a532d68012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc"
+ },
+ "sequence": 4294967295
+ }
+ ],
+ "vout": [
+ {
+ "value": 1.3782,
+ "n": 0,
+ "scriptPubKey": {
+ "asm": "OP_DUP OP_HASH160 8fd139bb39ced713f231c58a4d07bf6954d1c201 OP_EQUALVERIFY OP_CHECKSIG",
+ "hex": "76a9148fd139bb39ced713f231c58a4d07bf6954d1c20188ac",
+ "reqSigs": 1,
+ "type": "pubkeyhash",
+ "addresses": [
+ "1E7SGgAZFCHDnVZLuRViX3gUmxpMfdvd2o"
+ ]
+ }
+ },
+ {
+ "value": 0.01000001,
+ "n": 1,
+ "scriptPubKey": {
+ "asm": "OP_DUP OP_HASH160 6c772e9cf96371bba3da8cb733da70a2fcf20078 OP_EQUALVERIFY OP_CHECKSIG",
+ "hex": "76a9146c772e9cf96371bba3da8cb733da70a2fcf2007888ac",
+ "reqSigs": 1,
+ "type": "pubkeyhash",
+ "addresses": [
+ "1AtWkdmfmYkErU16d3KYykJUbEp9MAj9Sb"
+ ]
+ }
+ }
+ ],
+ "hex": "0100000014fd5c23522d31761c50175453daa6edaabe47a602a592d39ce933d8271a1a87274c0100006c493046022100b4251ecd63778a3dde0155abe4cd162947620ae9ee45a874353551092325b116022100db307baf4ff3781ec520bd18f387948cedd15dc27bafe17c894b0fe6ffffcafa012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffffc1b37ae964f605978022f94ce2f3f676d66a46d1aef7c2c17d6315b9697f2f75010000006a473044022079bd62ee09621a3be96b760c39e8ef78170101d46313923c6b07ae60a95c90670220238e51ea29fc70b04b65508450523caedbb11cb4dd5aa608c81487de798925ba0121027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34ffffffffedd005dc7790ef65c206abd1ab718e75252a40f4b1310e4102cd692eca9cacb0d10000006b48304502207722d6f9038673c86a1019b1c4de2d687ae246477cd4ca7002762be0299de385022100e594a11e3a313942595f7666dcf7078bcb14f1330f4206b95c917e7ec0e82fac012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffffdf28d6e26fb7a85a1e6a229b972c1bae0edc1c11cb9ca51e4caf5e59fbea35a1000000006b483045022100a63a4788027b79b65c6f9d9e054f68cf3b4eed19efd82a2d53f70dcbe64683390220526f243671425b2bd05745fcf2729361f985cfe84ea80c7cfc817b93d8134374012103a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52ffffffffae2a2320a1582faa24469eff3024a6b98bfe00eb4f554d8a0b1421ba53bfd6a5010000006c493046022100b200ac6db16842f76dab9abe807ce423c992805879bc50abd46ed8275a59d9cf022100c0d518e85dd345b3c29dd4dc47b9a420d3ce817b18720e94966d2fe23413a408012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffffb3cc5a12548aa1794b4d2bbf076838cfd7fbafb7716da51ee8221a4ff19c291b000000006b483045022100ededc441c3103a6f2bd6cab7639421af0f6ec5e60503bce1e603cf34f00aee1c02205cb75f3f519a13fb348783b21db3085cb5ec7552c59e394fdbc3e1feea43f967012103a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52ffffffff85145367313888d2cf2747274a32e20b2df074027bafd6f970003fcbcdf11d07150000006b483045022100d9eed5413d2a4b4b98625aa6e3169edc4fb4663e7862316d69224454e70cd8ca022061e506521d5ced51dd0ea36496e75904d756a4c4f9fb111568555075d5f68d9a012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff8292c11f6d35abab5bac3ebb627a4ff949e8ecd62d33ed137adf7aeb00e512b0090000006b48304502207e84b27139c4c19c828cb1e30c349bba88e4d9b59be97286960793b5ddc0a2af0221008cdc7a951e7f31c20953ed5635fbabf228e80b7047f32faaa0313e7693005177012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff883dcf9a86063db088ad064d0953258d4b0ff3425857402d2f3f839cee0f84581e0000006a4730440220426540dfed9c4ab5812e5f06df705b8bcf307dd7d20f7fa6512298b2a6314f420220064055096e3ca62f6c7352c66a5447767c53f946acdf35025ab3807ddb2fa404012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff6697dbb3ed98afe481b568459fa67e503f8a4254532465a670e54669d19c9fe6720000006a47304402200a5e673996f2fc88e21cc8613611f08a650bc0370338803591d85d0ec5663764022040b6664a0d1ec83a7f01975b8fde5232992b8ca58bf48af6725d2f92a936ab2e012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff023ffc2182517e1d3fa0896c5b0bd7b4d2ef8a1e42655abe2ced54f657125d59670000006c493046022100d93b30219c5735f673be5c3b4688366d96f545561c74cb62c6958c00f6960806022100ec8200adcb028f2184fa2a4f6faac7f8bb57cb4503bb7584ac11051fece31b3d012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffff16f8c77166b0df3d7cc8b5b2ce825afbea9309ad7acd8e2461a255958f81fc06010000006b483045022100a13934e68d3f5b22b130c4cb33f4da468cffc52323a47fbfbe06b64858162246022047081e0a70ff770e64a2e2d31e5d520d9102268b57a47009a72fe73ec766901801210234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cdffffffff197b96f3c87a3adfaa17f63fddc2a738a690ca665439f9431dbbd655816c41fb000000006c49304602210097f1f35d5bdc1a3a60390a1b015b8e7c4f916aa3847aafd969e04975e15bbe70022100a9052eb25517d481f1fda1b129eb1b534da50ea1a51f3ee012dca3601c11b86a0121027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34ffffffff20d9a261ee27aa1bd92e7db2fdca935909a40b648e974cd24a10d63b68b94039dd0000006b483045022012b3138c591bf7154b6fef457f2c4a3c7162225003788ac0024a99355865ff13022100b71b125ae1ffb2e1d1571f580cd3ebc8cd049a2d7a8a41f138ba94aeb982106f012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffff50f179d5d16cd872f9a63c26c448464ae9bd95cd9421c0476113b5d314571b71010000006b483045022100f834ccc8b22ee72712a3e5e6ef4acb8b2fb791b5385b70e2cd4332674d6667f4022024fbda0a997e0c253503f217501f508a4d56edce2c813ecdd9ad796dbeba907401210234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cdffffffff551b865d1568ac0a305e5f9c5dae6c540982334efbe789074318e0efc5b564631b0000006b48304502203b2fd1e39ae0e469d7a15768f262661b0de41470daf0fe8c4fd0c26542a0870002210081c57e331f9a2d214457d953e3542904727ee412c63028113635d7224da3dccc012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff57503e5a016189d407a721791459280875264f908ca2c5d4862c01386e7fb50b470400006b48304502206947a9c54f0664ece4430fd4ae999891dc50bb6126bc36b6a15a3189f29d25e9022100a86cfc4e2fdd9e39a20e305cfd1b76509c67b3e313e0f118229105caa0e823c9012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff3f16c1fb9d3e1a26d872933e955df85ee7f3f817711062b00b54a2144827349b250000006b483045022100c7128fe10b2d38744ae8177776054c29fc8ec13f07207723e70766ab7164847402201d2cf09009b9596de74c0183d1ab832e5edddb7a9965880bb400097e850850f8012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff4142a69d85b8498af214f0dd427b6ab29c240a0b8577e2944d37a7d8c05c6bb8140000006b48304502203b89a71628a28cc3703d170ca3be77786cff6b867e38a18b719705f8a326578f022100b2a9879e1acf621faa6466c207746a7f3eb4c8514c1482969aba3f2a957f1321012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff36e2feecc0a4bff7480015d42c12121932db389025ed0ac1d344ecee53230a3df20000006c493046022100ef794a8ef7fd6752d2a183c18866ff6e8dc0f5bd889a63e2c21cf303a6302461022100c1b09662d9e92988c3f9fcf17d1bcc79b5403647095d7212b9f8a1278a532d68012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffff0260f73608000000001976a9148fd139bb39ced713f231c58a4d07bf6954d1c20188ac41420f00000000001976a9146c772e9cf96371bba3da8cb733da70a2fcf2007888ac00000000"
+}
diff --git a/test/util/data/tt-delout1-out.hex b/test/util/data/tt-delout1-out.hex
new file mode 100644
index 0000000000..cc60c3fac6
--- /dev/null
+++ b/test/util/data/tt-delout1-out.hex
@@ -0,0 +1 @@
+0100000015fd5c23522d31761c50175453daa6edaabe47a602a592d39ce933d8271a1a87274c0100006c493046022100b4251ecd63778a3dde0155abe4cd162947620ae9ee45a874353551092325b116022100db307baf4ff3781ec520bd18f387948cedd15dc27bafe17c894b0fe6ffffcafa012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffffcb4ed1baba3a1eb2171e00ddec8e5b72b346dd8c07f9c2b0d122d0d06bc92ea7000000006c493046022100a9b617843b68c284715d3e02fd120479cd0d96a6c43bf01e697fb0a460a21a3a022100ba0a12fbe8b993d4e7911fa3467615765dbe421ddf5c51b57a9c1ee19dcc00ba012103e633b4fa4ceb705c2da712390767199be8ef2448b3095dc01652e11b2b751505ffffffffc1b37ae964f605978022f94ce2f3f676d66a46d1aef7c2c17d6315b9697f2f75010000006a473044022079bd62ee09621a3be96b760c39e8ef78170101d46313923c6b07ae60a95c90670220238e51ea29fc70b04b65508450523caedbb11cb4dd5aa608c81487de798925ba0121027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34ffffffffedd005dc7790ef65c206abd1ab718e75252a40f4b1310e4102cd692eca9cacb0d10000006b48304502207722d6f9038673c86a1019b1c4de2d687ae246477cd4ca7002762be0299de385022100e594a11e3a313942595f7666dcf7078bcb14f1330f4206b95c917e7ec0e82fac012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffffdf28d6e26fb7a85a1e6a229b972c1bae0edc1c11cb9ca51e4caf5e59fbea35a1000000006b483045022100a63a4788027b79b65c6f9d9e054f68cf3b4eed19efd82a2d53f70dcbe64683390220526f243671425b2bd05745fcf2729361f985cfe84ea80c7cfc817b93d8134374012103a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52ffffffffae2a2320a1582faa24469eff3024a6b98bfe00eb4f554d8a0b1421ba53bfd6a5010000006c493046022100b200ac6db16842f76dab9abe807ce423c992805879bc50abd46ed8275a59d9cf022100c0d518e85dd345b3c29dd4dc47b9a420d3ce817b18720e94966d2fe23413a408012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffffb3cc5a12548aa1794b4d2bbf076838cfd7fbafb7716da51ee8221a4ff19c291b000000006b483045022100ededc441c3103a6f2bd6cab7639421af0f6ec5e60503bce1e603cf34f00aee1c02205cb75f3f519a13fb348783b21db3085cb5ec7552c59e394fdbc3e1feea43f967012103a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52ffffffff85145367313888d2cf2747274a32e20b2df074027bafd6f970003fcbcdf11d07150000006b483045022100d9eed5413d2a4b4b98625aa6e3169edc4fb4663e7862316d69224454e70cd8ca022061e506521d5ced51dd0ea36496e75904d756a4c4f9fb111568555075d5f68d9a012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff8292c11f6d35abab5bac3ebb627a4ff949e8ecd62d33ed137adf7aeb00e512b0090000006b48304502207e84b27139c4c19c828cb1e30c349bba88e4d9b59be97286960793b5ddc0a2af0221008cdc7a951e7f31c20953ed5635fbabf228e80b7047f32faaa0313e7693005177012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff883dcf9a86063db088ad064d0953258d4b0ff3425857402d2f3f839cee0f84581e0000006a4730440220426540dfed9c4ab5812e5f06df705b8bcf307dd7d20f7fa6512298b2a6314f420220064055096e3ca62f6c7352c66a5447767c53f946acdf35025ab3807ddb2fa404012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff6697dbb3ed98afe481b568459fa67e503f8a4254532465a670e54669d19c9fe6720000006a47304402200a5e673996f2fc88e21cc8613611f08a650bc0370338803591d85d0ec5663764022040b6664a0d1ec83a7f01975b8fde5232992b8ca58bf48af6725d2f92a936ab2e012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff023ffc2182517e1d3fa0896c5b0bd7b4d2ef8a1e42655abe2ced54f657125d59670000006c493046022100d93b30219c5735f673be5c3b4688366d96f545561c74cb62c6958c00f6960806022100ec8200adcb028f2184fa2a4f6faac7f8bb57cb4503bb7584ac11051fece31b3d012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffff16f8c77166b0df3d7cc8b5b2ce825afbea9309ad7acd8e2461a255958f81fc06010000006b483045022100a13934e68d3f5b22b130c4cb33f4da468cffc52323a47fbfbe06b64858162246022047081e0a70ff770e64a2e2d31e5d520d9102268b57a47009a72fe73ec766901801210234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cdffffffff197b96f3c87a3adfaa17f63fddc2a738a690ca665439f9431dbbd655816c41fb000000006c49304602210097f1f35d5bdc1a3a60390a1b015b8e7c4f916aa3847aafd969e04975e15bbe70022100a9052eb25517d481f1fda1b129eb1b534da50ea1a51f3ee012dca3601c11b86a0121027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34ffffffff20d9a261ee27aa1bd92e7db2fdca935909a40b648e974cd24a10d63b68b94039dd0000006b483045022012b3138c591bf7154b6fef457f2c4a3c7162225003788ac0024a99355865ff13022100b71b125ae1ffb2e1d1571f580cd3ebc8cd049a2d7a8a41f138ba94aeb982106f012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffff50f179d5d16cd872f9a63c26c448464ae9bd95cd9421c0476113b5d314571b71010000006b483045022100f834ccc8b22ee72712a3e5e6ef4acb8b2fb791b5385b70e2cd4332674d6667f4022024fbda0a997e0c253503f217501f508a4d56edce2c813ecdd9ad796dbeba907401210234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cdffffffff551b865d1568ac0a305e5f9c5dae6c540982334efbe789074318e0efc5b564631b0000006b48304502203b2fd1e39ae0e469d7a15768f262661b0de41470daf0fe8c4fd0c26542a0870002210081c57e331f9a2d214457d953e3542904727ee412c63028113635d7224da3dccc012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff57503e5a016189d407a721791459280875264f908ca2c5d4862c01386e7fb50b470400006b48304502206947a9c54f0664ece4430fd4ae999891dc50bb6126bc36b6a15a3189f29d25e9022100a86cfc4e2fdd9e39a20e305cfd1b76509c67b3e313e0f118229105caa0e823c9012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff3f16c1fb9d3e1a26d872933e955df85ee7f3f817711062b00b54a2144827349b250000006b483045022100c7128fe10b2d38744ae8177776054c29fc8ec13f07207723e70766ab7164847402201d2cf09009b9596de74c0183d1ab832e5edddb7a9965880bb400097e850850f8012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff4142a69d85b8498af214f0dd427b6ab29c240a0b8577e2944d37a7d8c05c6bb8140000006b48304502203b89a71628a28cc3703d170ca3be77786cff6b867e38a18b719705f8a326578f022100b2a9879e1acf621faa6466c207746a7f3eb4c8514c1482969aba3f2a957f1321012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff36e2feecc0a4bff7480015d42c12121932db389025ed0ac1d344ecee53230a3df20000006c493046022100ef794a8ef7fd6752d2a183c18866ff6e8dc0f5bd889a63e2c21cf303a6302461022100c1b09662d9e92988c3f9fcf17d1bcc79b5403647095d7212b9f8a1278a532d68012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffff0160f73608000000001976a9148fd139bb39ced713f231c58a4d07bf6954d1c20188ac00000000
diff --git a/test/util/data/tt-delout1-out.json b/test/util/data/tt-delout1-out.json
new file mode 100644
index 0000000000..afc4e95762
--- /dev/null
+++ b/test/util/data/tt-delout1-out.json
@@ -0,0 +1,213 @@
+{
+ "txid": "c46ccd75b5050e942b2e86a3648f843f525fe6fc000bf0534ba5973063354493",
+ "hash": "c46ccd75b5050e942b2e86a3648f843f525fe6fc000bf0534ba5973063354493",
+ "version": 1,
+ "locktime": 0,
+ "vin": [
+ {
+ "txid": "27871a1a27d833e99cd392a502a647beaaeda6da535417501c76312d52235cfd",
+ "vout": 332,
+ "scriptSig": {
+ "asm": "3046022100b4251ecd63778a3dde0155abe4cd162947620ae9ee45a874353551092325b116022100db307baf4ff3781ec520bd18f387948cedd15dc27bafe17c894b0fe6ffffcafa[ALL] 03091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc",
+ "hex": "493046022100b4251ecd63778a3dde0155abe4cd162947620ae9ee45a874353551092325b116022100db307baf4ff3781ec520bd18f387948cedd15dc27bafe17c894b0fe6ffffcafa012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "a72ec96bd0d022d1b0c2f9078cdd46b3725b8eecdd001e17b21e3ababad14ecb",
+ "vout": 0,
+ "scriptSig": {
+ "asm": "3046022100a9b617843b68c284715d3e02fd120479cd0d96a6c43bf01e697fb0a460a21a3a022100ba0a12fbe8b993d4e7911fa3467615765dbe421ddf5c51b57a9c1ee19dcc00ba[ALL] 03e633b4fa4ceb705c2da712390767199be8ef2448b3095dc01652e11b2b751505",
+ "hex": "493046022100a9b617843b68c284715d3e02fd120479cd0d96a6c43bf01e697fb0a460a21a3a022100ba0a12fbe8b993d4e7911fa3467615765dbe421ddf5c51b57a9c1ee19dcc00ba012103e633b4fa4ceb705c2da712390767199be8ef2448b3095dc01652e11b2b751505"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "752f7f69b915637dc1c2f7aed1466ad676f6f3e24cf922809705f664e97ab3c1",
+ "vout": 1,
+ "scriptSig": {
+ "asm": "3044022079bd62ee09621a3be96b760c39e8ef78170101d46313923c6b07ae60a95c90670220238e51ea29fc70b04b65508450523caedbb11cb4dd5aa608c81487de798925ba[ALL] 027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34",
+ "hex": "473044022079bd62ee09621a3be96b760c39e8ef78170101d46313923c6b07ae60a95c90670220238e51ea29fc70b04b65508450523caedbb11cb4dd5aa608c81487de798925ba0121027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "b0ac9cca2e69cd02410e31b1f4402a25758e71abd1ab06c265ef9077dc05d0ed",
+ "vout": 209,
+ "scriptSig": {
+ "asm": "304502207722d6f9038673c86a1019b1c4de2d687ae246477cd4ca7002762be0299de385022100e594a11e3a313942595f7666dcf7078bcb14f1330f4206b95c917e7ec0e82fac[ALL] 03091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc",
+ "hex": "48304502207722d6f9038673c86a1019b1c4de2d687ae246477cd4ca7002762be0299de385022100e594a11e3a313942595f7666dcf7078bcb14f1330f4206b95c917e7ec0e82fac012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "a135eafb595eaf4c1ea59ccb111cdc0eae1b2c979b226a1e5aa8b76fe2d628df",
+ "vout": 0,
+ "scriptSig": {
+ "asm": "3045022100a63a4788027b79b65c6f9d9e054f68cf3b4eed19efd82a2d53f70dcbe64683390220526f243671425b2bd05745fcf2729361f985cfe84ea80c7cfc817b93d8134374[ALL] 03a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52",
+ "hex": "483045022100a63a4788027b79b65c6f9d9e054f68cf3b4eed19efd82a2d53f70dcbe64683390220526f243671425b2bd05745fcf2729361f985cfe84ea80c7cfc817b93d8134374012103a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "a5d6bf53ba21140b8a4d554feb00fe8bb9a62430ff9e4624aa2f58a120232aae",
+ "vout": 1,
+ "scriptSig": {
+ "asm": "3046022100b200ac6db16842f76dab9abe807ce423c992805879bc50abd46ed8275a59d9cf022100c0d518e85dd345b3c29dd4dc47b9a420d3ce817b18720e94966d2fe23413a408[ALL] 03091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc",
+ "hex": "493046022100b200ac6db16842f76dab9abe807ce423c992805879bc50abd46ed8275a59d9cf022100c0d518e85dd345b3c29dd4dc47b9a420d3ce817b18720e94966d2fe23413a408012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "1b299cf14f1a22e81ea56d71b7affbd7cf386807bf2b4d4b79a18a54125accb3",
+ "vout": 0,
+ "scriptSig": {
+ "asm": "3045022100ededc441c3103a6f2bd6cab7639421af0f6ec5e60503bce1e603cf34f00aee1c02205cb75f3f519a13fb348783b21db3085cb5ec7552c59e394fdbc3e1feea43f967[ALL] 03a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52",
+ "hex": "483045022100ededc441c3103a6f2bd6cab7639421af0f6ec5e60503bce1e603cf34f00aee1c02205cb75f3f519a13fb348783b21db3085cb5ec7552c59e394fdbc3e1feea43f967012103a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "071df1cdcb3f0070f9d6af7b0274f02d0be2324a274727cfd288383167531485",
+ "vout": 21,
+ "scriptSig": {
+ "asm": "3045022100d9eed5413d2a4b4b98625aa6e3169edc4fb4663e7862316d69224454e70cd8ca022061e506521d5ced51dd0ea36496e75904d756a4c4f9fb111568555075d5f68d9a[ALL] 03f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c",
+ "hex": "483045022100d9eed5413d2a4b4b98625aa6e3169edc4fb4663e7862316d69224454e70cd8ca022061e506521d5ced51dd0ea36496e75904d756a4c4f9fb111568555075d5f68d9a012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "b012e500eb7adf7a13ed332dd6ece849f94f7a62bb3eac5babab356d1fc19282",
+ "vout": 9,
+ "scriptSig": {
+ "asm": "304502207e84b27139c4c19c828cb1e30c349bba88e4d9b59be97286960793b5ddc0a2af0221008cdc7a951e7f31c20953ed5635fbabf228e80b7047f32faaa0313e7693005177[ALL] 03f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c",
+ "hex": "48304502207e84b27139c4c19c828cb1e30c349bba88e4d9b59be97286960793b5ddc0a2af0221008cdc7a951e7f31c20953ed5635fbabf228e80b7047f32faaa0313e7693005177012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "58840fee9c833f2f2d40575842f30f4b8d2553094d06ad88b03d06869acf3d88",
+ "vout": 30,
+ "scriptSig": {
+ "asm": "30440220426540dfed9c4ab5812e5f06df705b8bcf307dd7d20f7fa6512298b2a6314f420220064055096e3ca62f6c7352c66a5447767c53f946acdf35025ab3807ddb2fa404[ALL] 03f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c",
+ "hex": "4730440220426540dfed9c4ab5812e5f06df705b8bcf307dd7d20f7fa6512298b2a6314f420220064055096e3ca62f6c7352c66a5447767c53f946acdf35025ab3807ddb2fa404012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "e69f9cd16946e570a665245354428a3f507ea69f4568b581e4af98edb3db9766",
+ "vout": 114,
+ "scriptSig": {
+ "asm": "304402200a5e673996f2fc88e21cc8613611f08a650bc0370338803591d85d0ec5663764022040b6664a0d1ec83a7f01975b8fde5232992b8ca58bf48af6725d2f92a936ab2e[ALL] 03f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c",
+ "hex": "47304402200a5e673996f2fc88e21cc8613611f08a650bc0370338803591d85d0ec5663764022040b6664a0d1ec83a7f01975b8fde5232992b8ca58bf48af6725d2f92a936ab2e012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "595d1257f654ed2cbe5a65421e8aefd2b4d70b5b6c89a03f1d7e518221fc3f02",
+ "vout": 103,
+ "scriptSig": {
+ "asm": "3046022100d93b30219c5735f673be5c3b4688366d96f545561c74cb62c6958c00f6960806022100ec8200adcb028f2184fa2a4f6faac7f8bb57cb4503bb7584ac11051fece31b3d[ALL] 03091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc",
+ "hex": "493046022100d93b30219c5735f673be5c3b4688366d96f545561c74cb62c6958c00f6960806022100ec8200adcb028f2184fa2a4f6faac7f8bb57cb4503bb7584ac11051fece31b3d012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "06fc818f9555a261248ecd7aad0993eafb5a82ceb2b5c87c3ddfb06671c7f816",
+ "vout": 1,
+ "scriptSig": {
+ "asm": "3045022100a13934e68d3f5b22b130c4cb33f4da468cffc52323a47fbfbe06b64858162246022047081e0a70ff770e64a2e2d31e5d520d9102268b57a47009a72fe73ec7669018[ALL] 0234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cd",
+ "hex": "483045022100a13934e68d3f5b22b130c4cb33f4da468cffc52323a47fbfbe06b64858162246022047081e0a70ff770e64a2e2d31e5d520d9102268b57a47009a72fe73ec766901801210234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cd"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "fb416c8155d6bb1d43f9395466ca90a638a7c2dd3ff617aadf3a7ac8f3967b19",
+ "vout": 0,
+ "scriptSig": {
+ "asm": "304602210097f1f35d5bdc1a3a60390a1b015b8e7c4f916aa3847aafd969e04975e15bbe70022100a9052eb25517d481f1fda1b129eb1b534da50ea1a51f3ee012dca3601c11b86a[ALL] 027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34",
+ "hex": "49304602210097f1f35d5bdc1a3a60390a1b015b8e7c4f916aa3847aafd969e04975e15bbe70022100a9052eb25517d481f1fda1b129eb1b534da50ea1a51f3ee012dca3601c11b86a0121027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "3940b9683bd6104ad24c978e640ba4095993cafdb27d2ed91baa27ee61a2d920",
+ "vout": 221,
+ "scriptSig": {
+ "asm": "3045022012b3138c591bf7154b6fef457f2c4a3c7162225003788ac0024a99355865ff13022100b71b125ae1ffb2e1d1571f580cd3ebc8cd049a2d7a8a41f138ba94aeb982106f[ALL] 03091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc",
+ "hex": "483045022012b3138c591bf7154b6fef457f2c4a3c7162225003788ac0024a99355865ff13022100b71b125ae1ffb2e1d1571f580cd3ebc8cd049a2d7a8a41f138ba94aeb982106f012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "711b5714d3b5136147c02194cd95bde94a4648c4263ca6f972d86cd1d579f150",
+ "vout": 1,
+ "scriptSig": {
+ "asm": "3045022100f834ccc8b22ee72712a3e5e6ef4acb8b2fb791b5385b70e2cd4332674d6667f4022024fbda0a997e0c253503f217501f508a4d56edce2c813ecdd9ad796dbeba9074[ALL] 0234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cd",
+ "hex": "483045022100f834ccc8b22ee72712a3e5e6ef4acb8b2fb791b5385b70e2cd4332674d6667f4022024fbda0a997e0c253503f217501f508a4d56edce2c813ecdd9ad796dbeba907401210234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cd"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "6364b5c5efe018430789e7fb4e338209546cae5d9c5f5e300aac68155d861b55",
+ "vout": 27,
+ "scriptSig": {
+ "asm": "304502203b2fd1e39ae0e469d7a15768f262661b0de41470daf0fe8c4fd0c26542a0870002210081c57e331f9a2d214457d953e3542904727ee412c63028113635d7224da3dccc[ALL] 03f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c",
+ "hex": "48304502203b2fd1e39ae0e469d7a15768f262661b0de41470daf0fe8c4fd0c26542a0870002210081c57e331f9a2d214457d953e3542904727ee412c63028113635d7224da3dccc012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "0bb57f6e38012c86d4c5a28c904f2675082859147921a707d48961015a3e5057",
+ "vout": 1095,
+ "scriptSig": {
+ "asm": "304502206947a9c54f0664ece4430fd4ae999891dc50bb6126bc36b6a15a3189f29d25e9022100a86cfc4e2fdd9e39a20e305cfd1b76509c67b3e313e0f118229105caa0e823c9[ALL] 03f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c",
+ "hex": "48304502206947a9c54f0664ece4430fd4ae999891dc50bb6126bc36b6a15a3189f29d25e9022100a86cfc4e2fdd9e39a20e305cfd1b76509c67b3e313e0f118229105caa0e823c9012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "9b34274814a2540bb062107117f8f3e75ef85d953e9372d8261a3e9dfbc1163f",
+ "vout": 37,
+ "scriptSig": {
+ "asm": "3045022100c7128fe10b2d38744ae8177776054c29fc8ec13f07207723e70766ab7164847402201d2cf09009b9596de74c0183d1ab832e5edddb7a9965880bb400097e850850f8[ALL] 03f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c",
+ "hex": "483045022100c7128fe10b2d38744ae8177776054c29fc8ec13f07207723e70766ab7164847402201d2cf09009b9596de74c0183d1ab832e5edddb7a9965880bb400097e850850f8012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "b86b5cc0d8a7374d94e277850b0a249cb26a7b42ddf014f28a49b8859da64241",
+ "vout": 20,
+ "scriptSig": {
+ "asm": "304502203b89a71628a28cc3703d170ca3be77786cff6b867e38a18b719705f8a326578f022100b2a9879e1acf621faa6466c207746a7f3eb4c8514c1482969aba3f2a957f1321[ALL] 03f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c",
+ "hex": "48304502203b89a71628a28cc3703d170ca3be77786cff6b867e38a18b719705f8a326578f022100b2a9879e1acf621faa6466c207746a7f3eb4c8514c1482969aba3f2a957f1321012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "3d0a2353eeec44d3c10aed259038db321912122cd4150048f7bfa4c0ecfee236",
+ "vout": 242,
+ "scriptSig": {
+ "asm": "3046022100ef794a8ef7fd6752d2a183c18866ff6e8dc0f5bd889a63e2c21cf303a6302461022100c1b09662d9e92988c3f9fcf17d1bcc79b5403647095d7212b9f8a1278a532d68[ALL] 03091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc",
+ "hex": "493046022100ef794a8ef7fd6752d2a183c18866ff6e8dc0f5bd889a63e2c21cf303a6302461022100c1b09662d9e92988c3f9fcf17d1bcc79b5403647095d7212b9f8a1278a532d68012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc"
+ },
+ "sequence": 4294967295
+ }
+ ],
+ "vout": [
+ {
+ "value": 1.3782,
+ "n": 0,
+ "scriptPubKey": {
+ "asm": "OP_DUP OP_HASH160 8fd139bb39ced713f231c58a4d07bf6954d1c201 OP_EQUALVERIFY OP_CHECKSIG",
+ "hex": "76a9148fd139bb39ced713f231c58a4d07bf6954d1c20188ac",
+ "reqSigs": 1,
+ "type": "pubkeyhash",
+ "addresses": [
+ "1E7SGgAZFCHDnVZLuRViX3gUmxpMfdvd2o"
+ ]
+ }
+ }
+ ],
+ "hex": "0100000015fd5c23522d31761c50175453daa6edaabe47a602a592d39ce933d8271a1a87274c0100006c493046022100b4251ecd63778a3dde0155abe4cd162947620ae9ee45a874353551092325b116022100db307baf4ff3781ec520bd18f387948cedd15dc27bafe17c894b0fe6ffffcafa012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffffcb4ed1baba3a1eb2171e00ddec8e5b72b346dd8c07f9c2b0d122d0d06bc92ea7000000006c493046022100a9b617843b68c284715d3e02fd120479cd0d96a6c43bf01e697fb0a460a21a3a022100ba0a12fbe8b993d4e7911fa3467615765dbe421ddf5c51b57a9c1ee19dcc00ba012103e633b4fa4ceb705c2da712390767199be8ef2448b3095dc01652e11b2b751505ffffffffc1b37ae964f605978022f94ce2f3f676d66a46d1aef7c2c17d6315b9697f2f75010000006a473044022079bd62ee09621a3be96b760c39e8ef78170101d46313923c6b07ae60a95c90670220238e51ea29fc70b04b65508450523caedbb11cb4dd5aa608c81487de798925ba0121027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34ffffffffedd005dc7790ef65c206abd1ab718e75252a40f4b1310e4102cd692eca9cacb0d10000006b48304502207722d6f9038673c86a1019b1c4de2d687ae246477cd4ca7002762be0299de385022100e594a11e3a313942595f7666dcf7078bcb14f1330f4206b95c917e7ec0e82fac012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffffdf28d6e26fb7a85a1e6a229b972c1bae0edc1c11cb9ca51e4caf5e59fbea35a1000000006b483045022100a63a4788027b79b65c6f9d9e054f68cf3b4eed19efd82a2d53f70dcbe64683390220526f243671425b2bd05745fcf2729361f985cfe84ea80c7cfc817b93d8134374012103a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52ffffffffae2a2320a1582faa24469eff3024a6b98bfe00eb4f554d8a0b1421ba53bfd6a5010000006c493046022100b200ac6db16842f76dab9abe807ce423c992805879bc50abd46ed8275a59d9cf022100c0d518e85dd345b3c29dd4dc47b9a420d3ce817b18720e94966d2fe23413a408012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffffb3cc5a12548aa1794b4d2bbf076838cfd7fbafb7716da51ee8221a4ff19c291b000000006b483045022100ededc441c3103a6f2bd6cab7639421af0f6ec5e60503bce1e603cf34f00aee1c02205cb75f3f519a13fb348783b21db3085cb5ec7552c59e394fdbc3e1feea43f967012103a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52ffffffff85145367313888d2cf2747274a32e20b2df074027bafd6f970003fcbcdf11d07150000006b483045022100d9eed5413d2a4b4b98625aa6e3169edc4fb4663e7862316d69224454e70cd8ca022061e506521d5ced51dd0ea36496e75904d756a4c4f9fb111568555075d5f68d9a012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff8292c11f6d35abab5bac3ebb627a4ff949e8ecd62d33ed137adf7aeb00e512b0090000006b48304502207e84b27139c4c19c828cb1e30c349bba88e4d9b59be97286960793b5ddc0a2af0221008cdc7a951e7f31c20953ed5635fbabf228e80b7047f32faaa0313e7693005177012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff883dcf9a86063db088ad064d0953258d4b0ff3425857402d2f3f839cee0f84581e0000006a4730440220426540dfed9c4ab5812e5f06df705b8bcf307dd7d20f7fa6512298b2a6314f420220064055096e3ca62f6c7352c66a5447767c53f946acdf35025ab3807ddb2fa404012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff6697dbb3ed98afe481b568459fa67e503f8a4254532465a670e54669d19c9fe6720000006a47304402200a5e673996f2fc88e21cc8613611f08a650bc0370338803591d85d0ec5663764022040b6664a0d1ec83a7f01975b8fde5232992b8ca58bf48af6725d2f92a936ab2e012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff023ffc2182517e1d3fa0896c5b0bd7b4d2ef8a1e42655abe2ced54f657125d59670000006c493046022100d93b30219c5735f673be5c3b4688366d96f545561c74cb62c6958c00f6960806022100ec8200adcb028f2184fa2a4f6faac7f8bb57cb4503bb7584ac11051fece31b3d012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffff16f8c77166b0df3d7cc8b5b2ce825afbea9309ad7acd8e2461a255958f81fc06010000006b483045022100a13934e68d3f5b22b130c4cb33f4da468cffc52323a47fbfbe06b64858162246022047081e0a70ff770e64a2e2d31e5d520d9102268b57a47009a72fe73ec766901801210234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cdffffffff197b96f3c87a3adfaa17f63fddc2a738a690ca665439f9431dbbd655816c41fb000000006c49304602210097f1f35d5bdc1a3a60390a1b015b8e7c4f916aa3847aafd969e04975e15bbe70022100a9052eb25517d481f1fda1b129eb1b534da50ea1a51f3ee012dca3601c11b86a0121027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34ffffffff20d9a261ee27aa1bd92e7db2fdca935909a40b648e974cd24a10d63b68b94039dd0000006b483045022012b3138c591bf7154b6fef457f2c4a3c7162225003788ac0024a99355865ff13022100b71b125ae1ffb2e1d1571f580cd3ebc8cd049a2d7a8a41f138ba94aeb982106f012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffff50f179d5d16cd872f9a63c26c448464ae9bd95cd9421c0476113b5d314571b71010000006b483045022100f834ccc8b22ee72712a3e5e6ef4acb8b2fb791b5385b70e2cd4332674d6667f4022024fbda0a997e0c253503f217501f508a4d56edce2c813ecdd9ad796dbeba907401210234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cdffffffff551b865d1568ac0a305e5f9c5dae6c540982334efbe789074318e0efc5b564631b0000006b48304502203b2fd1e39ae0e469d7a15768f262661b0de41470daf0fe8c4fd0c26542a0870002210081c57e331f9a2d214457d953e3542904727ee412c63028113635d7224da3dccc012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff57503e5a016189d407a721791459280875264f908ca2c5d4862c01386e7fb50b470400006b48304502206947a9c54f0664ece4430fd4ae999891dc50bb6126bc36b6a15a3189f29d25e9022100a86cfc4e2fdd9e39a20e305cfd1b76509c67b3e313e0f118229105caa0e823c9012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff3f16c1fb9d3e1a26d872933e955df85ee7f3f817711062b00b54a2144827349b250000006b483045022100c7128fe10b2d38744ae8177776054c29fc8ec13f07207723e70766ab7164847402201d2cf09009b9596de74c0183d1ab832e5edddb7a9965880bb400097e850850f8012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff4142a69d85b8498af214f0dd427b6ab29c240a0b8577e2944d37a7d8c05c6bb8140000006b48304502203b89a71628a28cc3703d170ca3be77786cff6b867e38a18b719705f8a326578f022100b2a9879e1acf621faa6466c207746a7f3eb4c8514c1482969aba3f2a957f1321012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff36e2feecc0a4bff7480015d42c12121932db389025ed0ac1d344ecee53230a3df20000006c493046022100ef794a8ef7fd6752d2a183c18866ff6e8dc0f5bd889a63e2c21cf303a6302461022100c1b09662d9e92988c3f9fcf17d1bcc79b5403647095d7212b9f8a1278a532d68012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffff0160f73608000000001976a9148fd139bb39ced713f231c58a4d07bf6954d1c20188ac00000000"
+}
diff --git a/test/util/data/tt-locktime317000-out.hex b/test/util/data/tt-locktime317000-out.hex
new file mode 100644
index 0000000000..287f420a40
--- /dev/null
+++ b/test/util/data/tt-locktime317000-out.hex
@@ -0,0 +1 @@
+0100000015fd5c23522d31761c50175453daa6edaabe47a602a592d39ce933d8271a1a87274c0100006c493046022100b4251ecd63778a3dde0155abe4cd162947620ae9ee45a874353551092325b116022100db307baf4ff3781ec520bd18f387948cedd15dc27bafe17c894b0fe6ffffcafa012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffffcb4ed1baba3a1eb2171e00ddec8e5b72b346dd8c07f9c2b0d122d0d06bc92ea7000000006c493046022100a9b617843b68c284715d3e02fd120479cd0d96a6c43bf01e697fb0a460a21a3a022100ba0a12fbe8b993d4e7911fa3467615765dbe421ddf5c51b57a9c1ee19dcc00ba012103e633b4fa4ceb705c2da712390767199be8ef2448b3095dc01652e11b2b751505ffffffffc1b37ae964f605978022f94ce2f3f676d66a46d1aef7c2c17d6315b9697f2f75010000006a473044022079bd62ee09621a3be96b760c39e8ef78170101d46313923c6b07ae60a95c90670220238e51ea29fc70b04b65508450523caedbb11cb4dd5aa608c81487de798925ba0121027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34ffffffffedd005dc7790ef65c206abd1ab718e75252a40f4b1310e4102cd692eca9cacb0d10000006b48304502207722d6f9038673c86a1019b1c4de2d687ae246477cd4ca7002762be0299de385022100e594a11e3a313942595f7666dcf7078bcb14f1330f4206b95c917e7ec0e82fac012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffffdf28d6e26fb7a85a1e6a229b972c1bae0edc1c11cb9ca51e4caf5e59fbea35a1000000006b483045022100a63a4788027b79b65c6f9d9e054f68cf3b4eed19efd82a2d53f70dcbe64683390220526f243671425b2bd05745fcf2729361f985cfe84ea80c7cfc817b93d8134374012103a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52ffffffffae2a2320a1582faa24469eff3024a6b98bfe00eb4f554d8a0b1421ba53bfd6a5010000006c493046022100b200ac6db16842f76dab9abe807ce423c992805879bc50abd46ed8275a59d9cf022100c0d518e85dd345b3c29dd4dc47b9a420d3ce817b18720e94966d2fe23413a408012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffffb3cc5a12548aa1794b4d2bbf076838cfd7fbafb7716da51ee8221a4ff19c291b000000006b483045022100ededc441c3103a6f2bd6cab7639421af0f6ec5e60503bce1e603cf34f00aee1c02205cb75f3f519a13fb348783b21db3085cb5ec7552c59e394fdbc3e1feea43f967012103a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52ffffffff85145367313888d2cf2747274a32e20b2df074027bafd6f970003fcbcdf11d07150000006b483045022100d9eed5413d2a4b4b98625aa6e3169edc4fb4663e7862316d69224454e70cd8ca022061e506521d5ced51dd0ea36496e75904d756a4c4f9fb111568555075d5f68d9a012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff8292c11f6d35abab5bac3ebb627a4ff949e8ecd62d33ed137adf7aeb00e512b0090000006b48304502207e84b27139c4c19c828cb1e30c349bba88e4d9b59be97286960793b5ddc0a2af0221008cdc7a951e7f31c20953ed5635fbabf228e80b7047f32faaa0313e7693005177012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff883dcf9a86063db088ad064d0953258d4b0ff3425857402d2f3f839cee0f84581e0000006a4730440220426540dfed9c4ab5812e5f06df705b8bcf307dd7d20f7fa6512298b2a6314f420220064055096e3ca62f6c7352c66a5447767c53f946acdf35025ab3807ddb2fa404012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff6697dbb3ed98afe481b568459fa67e503f8a4254532465a670e54669d19c9fe6720000006a47304402200a5e673996f2fc88e21cc8613611f08a650bc0370338803591d85d0ec5663764022040b6664a0d1ec83a7f01975b8fde5232992b8ca58bf48af6725d2f92a936ab2e012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff023ffc2182517e1d3fa0896c5b0bd7b4d2ef8a1e42655abe2ced54f657125d59670000006c493046022100d93b30219c5735f673be5c3b4688366d96f545561c74cb62c6958c00f6960806022100ec8200adcb028f2184fa2a4f6faac7f8bb57cb4503bb7584ac11051fece31b3d012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffff16f8c77166b0df3d7cc8b5b2ce825afbea9309ad7acd8e2461a255958f81fc06010000006b483045022100a13934e68d3f5b22b130c4cb33f4da468cffc52323a47fbfbe06b64858162246022047081e0a70ff770e64a2e2d31e5d520d9102268b57a47009a72fe73ec766901801210234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cdffffffff197b96f3c87a3adfaa17f63fddc2a738a690ca665439f9431dbbd655816c41fb000000006c49304602210097f1f35d5bdc1a3a60390a1b015b8e7c4f916aa3847aafd969e04975e15bbe70022100a9052eb25517d481f1fda1b129eb1b534da50ea1a51f3ee012dca3601c11b86a0121027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34ffffffff20d9a261ee27aa1bd92e7db2fdca935909a40b648e974cd24a10d63b68b94039dd0000006b483045022012b3138c591bf7154b6fef457f2c4a3c7162225003788ac0024a99355865ff13022100b71b125ae1ffb2e1d1571f580cd3ebc8cd049a2d7a8a41f138ba94aeb982106f012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffff50f179d5d16cd872f9a63c26c448464ae9bd95cd9421c0476113b5d314571b71010000006b483045022100f834ccc8b22ee72712a3e5e6ef4acb8b2fb791b5385b70e2cd4332674d6667f4022024fbda0a997e0c253503f217501f508a4d56edce2c813ecdd9ad796dbeba907401210234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cdffffffff551b865d1568ac0a305e5f9c5dae6c540982334efbe789074318e0efc5b564631b0000006b48304502203b2fd1e39ae0e469d7a15768f262661b0de41470daf0fe8c4fd0c26542a0870002210081c57e331f9a2d214457d953e3542904727ee412c63028113635d7224da3dccc012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff57503e5a016189d407a721791459280875264f908ca2c5d4862c01386e7fb50b470400006b48304502206947a9c54f0664ece4430fd4ae999891dc50bb6126bc36b6a15a3189f29d25e9022100a86cfc4e2fdd9e39a20e305cfd1b76509c67b3e313e0f118229105caa0e823c9012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff3f16c1fb9d3e1a26d872933e955df85ee7f3f817711062b00b54a2144827349b250000006b483045022100c7128fe10b2d38744ae8177776054c29fc8ec13f07207723e70766ab7164847402201d2cf09009b9596de74c0183d1ab832e5edddb7a9965880bb400097e850850f8012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff4142a69d85b8498af214f0dd427b6ab29c240a0b8577e2944d37a7d8c05c6bb8140000006b48304502203b89a71628a28cc3703d170ca3be77786cff6b867e38a18b719705f8a326578f022100b2a9879e1acf621faa6466c207746a7f3eb4c8514c1482969aba3f2a957f1321012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff36e2feecc0a4bff7480015d42c12121932db389025ed0ac1d344ecee53230a3df20000006c493046022100ef794a8ef7fd6752d2a183c18866ff6e8dc0f5bd889a63e2c21cf303a6302461022100c1b09662d9e92988c3f9fcf17d1bcc79b5403647095d7212b9f8a1278a532d68012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffff0260f73608000000001976a9148fd139bb39ced713f231c58a4d07bf6954d1c20188ac41420f00000000001976a9146c772e9cf96371bba3da8cb733da70a2fcf2007888ac48d60400
diff --git a/test/util/data/tt-locktime317000-out.json b/test/util/data/tt-locktime317000-out.json
new file mode 100644
index 0000000000..2b9075f8ac
--- /dev/null
+++ b/test/util/data/tt-locktime317000-out.json
@@ -0,0 +1,226 @@
+{
+ "txid": "aded538f642c17e15f4d3306b8be7e1a4d1ae0c4616d641ab51ea09ba65e5cb5",
+ "hash": "aded538f642c17e15f4d3306b8be7e1a4d1ae0c4616d641ab51ea09ba65e5cb5",
+ "version": 1,
+ "locktime": 317000,
+ "vin": [
+ {
+ "txid": "27871a1a27d833e99cd392a502a647beaaeda6da535417501c76312d52235cfd",
+ "vout": 332,
+ "scriptSig": {
+ "asm": "3046022100b4251ecd63778a3dde0155abe4cd162947620ae9ee45a874353551092325b116022100db307baf4ff3781ec520bd18f387948cedd15dc27bafe17c894b0fe6ffffcafa[ALL] 03091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc",
+ "hex": "493046022100b4251ecd63778a3dde0155abe4cd162947620ae9ee45a874353551092325b116022100db307baf4ff3781ec520bd18f387948cedd15dc27bafe17c894b0fe6ffffcafa012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "a72ec96bd0d022d1b0c2f9078cdd46b3725b8eecdd001e17b21e3ababad14ecb",
+ "vout": 0,
+ "scriptSig": {
+ "asm": "3046022100a9b617843b68c284715d3e02fd120479cd0d96a6c43bf01e697fb0a460a21a3a022100ba0a12fbe8b993d4e7911fa3467615765dbe421ddf5c51b57a9c1ee19dcc00ba[ALL] 03e633b4fa4ceb705c2da712390767199be8ef2448b3095dc01652e11b2b751505",
+ "hex": "493046022100a9b617843b68c284715d3e02fd120479cd0d96a6c43bf01e697fb0a460a21a3a022100ba0a12fbe8b993d4e7911fa3467615765dbe421ddf5c51b57a9c1ee19dcc00ba012103e633b4fa4ceb705c2da712390767199be8ef2448b3095dc01652e11b2b751505"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "752f7f69b915637dc1c2f7aed1466ad676f6f3e24cf922809705f664e97ab3c1",
+ "vout": 1,
+ "scriptSig": {
+ "asm": "3044022079bd62ee09621a3be96b760c39e8ef78170101d46313923c6b07ae60a95c90670220238e51ea29fc70b04b65508450523caedbb11cb4dd5aa608c81487de798925ba[ALL] 027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34",
+ "hex": "473044022079bd62ee09621a3be96b760c39e8ef78170101d46313923c6b07ae60a95c90670220238e51ea29fc70b04b65508450523caedbb11cb4dd5aa608c81487de798925ba0121027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "b0ac9cca2e69cd02410e31b1f4402a25758e71abd1ab06c265ef9077dc05d0ed",
+ "vout": 209,
+ "scriptSig": {
+ "asm": "304502207722d6f9038673c86a1019b1c4de2d687ae246477cd4ca7002762be0299de385022100e594a11e3a313942595f7666dcf7078bcb14f1330f4206b95c917e7ec0e82fac[ALL] 03091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc",
+ "hex": "48304502207722d6f9038673c86a1019b1c4de2d687ae246477cd4ca7002762be0299de385022100e594a11e3a313942595f7666dcf7078bcb14f1330f4206b95c917e7ec0e82fac012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "a135eafb595eaf4c1ea59ccb111cdc0eae1b2c979b226a1e5aa8b76fe2d628df",
+ "vout": 0,
+ "scriptSig": {
+ "asm": "3045022100a63a4788027b79b65c6f9d9e054f68cf3b4eed19efd82a2d53f70dcbe64683390220526f243671425b2bd05745fcf2729361f985cfe84ea80c7cfc817b93d8134374[ALL] 03a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52",
+ "hex": "483045022100a63a4788027b79b65c6f9d9e054f68cf3b4eed19efd82a2d53f70dcbe64683390220526f243671425b2bd05745fcf2729361f985cfe84ea80c7cfc817b93d8134374012103a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "a5d6bf53ba21140b8a4d554feb00fe8bb9a62430ff9e4624aa2f58a120232aae",
+ "vout": 1,
+ "scriptSig": {
+ "asm": "3046022100b200ac6db16842f76dab9abe807ce423c992805879bc50abd46ed8275a59d9cf022100c0d518e85dd345b3c29dd4dc47b9a420d3ce817b18720e94966d2fe23413a408[ALL] 03091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc",
+ "hex": "493046022100b200ac6db16842f76dab9abe807ce423c992805879bc50abd46ed8275a59d9cf022100c0d518e85dd345b3c29dd4dc47b9a420d3ce817b18720e94966d2fe23413a408012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "1b299cf14f1a22e81ea56d71b7affbd7cf386807bf2b4d4b79a18a54125accb3",
+ "vout": 0,
+ "scriptSig": {
+ "asm": "3045022100ededc441c3103a6f2bd6cab7639421af0f6ec5e60503bce1e603cf34f00aee1c02205cb75f3f519a13fb348783b21db3085cb5ec7552c59e394fdbc3e1feea43f967[ALL] 03a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52",
+ "hex": "483045022100ededc441c3103a6f2bd6cab7639421af0f6ec5e60503bce1e603cf34f00aee1c02205cb75f3f519a13fb348783b21db3085cb5ec7552c59e394fdbc3e1feea43f967012103a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "071df1cdcb3f0070f9d6af7b0274f02d0be2324a274727cfd288383167531485",
+ "vout": 21,
+ "scriptSig": {
+ "asm": "3045022100d9eed5413d2a4b4b98625aa6e3169edc4fb4663e7862316d69224454e70cd8ca022061e506521d5ced51dd0ea36496e75904d756a4c4f9fb111568555075d5f68d9a[ALL] 03f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c",
+ "hex": "483045022100d9eed5413d2a4b4b98625aa6e3169edc4fb4663e7862316d69224454e70cd8ca022061e506521d5ced51dd0ea36496e75904d756a4c4f9fb111568555075d5f68d9a012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "b012e500eb7adf7a13ed332dd6ece849f94f7a62bb3eac5babab356d1fc19282",
+ "vout": 9,
+ "scriptSig": {
+ "asm": "304502207e84b27139c4c19c828cb1e30c349bba88e4d9b59be97286960793b5ddc0a2af0221008cdc7a951e7f31c20953ed5635fbabf228e80b7047f32faaa0313e7693005177[ALL] 03f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c",
+ "hex": "48304502207e84b27139c4c19c828cb1e30c349bba88e4d9b59be97286960793b5ddc0a2af0221008cdc7a951e7f31c20953ed5635fbabf228e80b7047f32faaa0313e7693005177012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "58840fee9c833f2f2d40575842f30f4b8d2553094d06ad88b03d06869acf3d88",
+ "vout": 30,
+ "scriptSig": {
+ "asm": "30440220426540dfed9c4ab5812e5f06df705b8bcf307dd7d20f7fa6512298b2a6314f420220064055096e3ca62f6c7352c66a5447767c53f946acdf35025ab3807ddb2fa404[ALL] 03f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c",
+ "hex": "4730440220426540dfed9c4ab5812e5f06df705b8bcf307dd7d20f7fa6512298b2a6314f420220064055096e3ca62f6c7352c66a5447767c53f946acdf35025ab3807ddb2fa404012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "e69f9cd16946e570a665245354428a3f507ea69f4568b581e4af98edb3db9766",
+ "vout": 114,
+ "scriptSig": {
+ "asm": "304402200a5e673996f2fc88e21cc8613611f08a650bc0370338803591d85d0ec5663764022040b6664a0d1ec83a7f01975b8fde5232992b8ca58bf48af6725d2f92a936ab2e[ALL] 03f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c",
+ "hex": "47304402200a5e673996f2fc88e21cc8613611f08a650bc0370338803591d85d0ec5663764022040b6664a0d1ec83a7f01975b8fde5232992b8ca58bf48af6725d2f92a936ab2e012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "595d1257f654ed2cbe5a65421e8aefd2b4d70b5b6c89a03f1d7e518221fc3f02",
+ "vout": 103,
+ "scriptSig": {
+ "asm": "3046022100d93b30219c5735f673be5c3b4688366d96f545561c74cb62c6958c00f6960806022100ec8200adcb028f2184fa2a4f6faac7f8bb57cb4503bb7584ac11051fece31b3d[ALL] 03091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc",
+ "hex": "493046022100d93b30219c5735f673be5c3b4688366d96f545561c74cb62c6958c00f6960806022100ec8200adcb028f2184fa2a4f6faac7f8bb57cb4503bb7584ac11051fece31b3d012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "06fc818f9555a261248ecd7aad0993eafb5a82ceb2b5c87c3ddfb06671c7f816",
+ "vout": 1,
+ "scriptSig": {
+ "asm": "3045022100a13934e68d3f5b22b130c4cb33f4da468cffc52323a47fbfbe06b64858162246022047081e0a70ff770e64a2e2d31e5d520d9102268b57a47009a72fe73ec7669018[ALL] 0234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cd",
+ "hex": "483045022100a13934e68d3f5b22b130c4cb33f4da468cffc52323a47fbfbe06b64858162246022047081e0a70ff770e64a2e2d31e5d520d9102268b57a47009a72fe73ec766901801210234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cd"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "fb416c8155d6bb1d43f9395466ca90a638a7c2dd3ff617aadf3a7ac8f3967b19",
+ "vout": 0,
+ "scriptSig": {
+ "asm": "304602210097f1f35d5bdc1a3a60390a1b015b8e7c4f916aa3847aafd969e04975e15bbe70022100a9052eb25517d481f1fda1b129eb1b534da50ea1a51f3ee012dca3601c11b86a[ALL] 027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34",
+ "hex": "49304602210097f1f35d5bdc1a3a60390a1b015b8e7c4f916aa3847aafd969e04975e15bbe70022100a9052eb25517d481f1fda1b129eb1b534da50ea1a51f3ee012dca3601c11b86a0121027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "3940b9683bd6104ad24c978e640ba4095993cafdb27d2ed91baa27ee61a2d920",
+ "vout": 221,
+ "scriptSig": {
+ "asm": "3045022012b3138c591bf7154b6fef457f2c4a3c7162225003788ac0024a99355865ff13022100b71b125ae1ffb2e1d1571f580cd3ebc8cd049a2d7a8a41f138ba94aeb982106f[ALL] 03091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc",
+ "hex": "483045022012b3138c591bf7154b6fef457f2c4a3c7162225003788ac0024a99355865ff13022100b71b125ae1ffb2e1d1571f580cd3ebc8cd049a2d7a8a41f138ba94aeb982106f012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "711b5714d3b5136147c02194cd95bde94a4648c4263ca6f972d86cd1d579f150",
+ "vout": 1,
+ "scriptSig": {
+ "asm": "3045022100f834ccc8b22ee72712a3e5e6ef4acb8b2fb791b5385b70e2cd4332674d6667f4022024fbda0a997e0c253503f217501f508a4d56edce2c813ecdd9ad796dbeba9074[ALL] 0234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cd",
+ "hex": "483045022100f834ccc8b22ee72712a3e5e6ef4acb8b2fb791b5385b70e2cd4332674d6667f4022024fbda0a997e0c253503f217501f508a4d56edce2c813ecdd9ad796dbeba907401210234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cd"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "6364b5c5efe018430789e7fb4e338209546cae5d9c5f5e300aac68155d861b55",
+ "vout": 27,
+ "scriptSig": {
+ "asm": "304502203b2fd1e39ae0e469d7a15768f262661b0de41470daf0fe8c4fd0c26542a0870002210081c57e331f9a2d214457d953e3542904727ee412c63028113635d7224da3dccc[ALL] 03f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c",
+ "hex": "48304502203b2fd1e39ae0e469d7a15768f262661b0de41470daf0fe8c4fd0c26542a0870002210081c57e331f9a2d214457d953e3542904727ee412c63028113635d7224da3dccc012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "0bb57f6e38012c86d4c5a28c904f2675082859147921a707d48961015a3e5057",
+ "vout": 1095,
+ "scriptSig": {
+ "asm": "304502206947a9c54f0664ece4430fd4ae999891dc50bb6126bc36b6a15a3189f29d25e9022100a86cfc4e2fdd9e39a20e305cfd1b76509c67b3e313e0f118229105caa0e823c9[ALL] 03f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c",
+ "hex": "48304502206947a9c54f0664ece4430fd4ae999891dc50bb6126bc36b6a15a3189f29d25e9022100a86cfc4e2fdd9e39a20e305cfd1b76509c67b3e313e0f118229105caa0e823c9012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "9b34274814a2540bb062107117f8f3e75ef85d953e9372d8261a3e9dfbc1163f",
+ "vout": 37,
+ "scriptSig": {
+ "asm": "3045022100c7128fe10b2d38744ae8177776054c29fc8ec13f07207723e70766ab7164847402201d2cf09009b9596de74c0183d1ab832e5edddb7a9965880bb400097e850850f8[ALL] 03f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c",
+ "hex": "483045022100c7128fe10b2d38744ae8177776054c29fc8ec13f07207723e70766ab7164847402201d2cf09009b9596de74c0183d1ab832e5edddb7a9965880bb400097e850850f8012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "b86b5cc0d8a7374d94e277850b0a249cb26a7b42ddf014f28a49b8859da64241",
+ "vout": 20,
+ "scriptSig": {
+ "asm": "304502203b89a71628a28cc3703d170ca3be77786cff6b867e38a18b719705f8a326578f022100b2a9879e1acf621faa6466c207746a7f3eb4c8514c1482969aba3f2a957f1321[ALL] 03f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c",
+ "hex": "48304502203b89a71628a28cc3703d170ca3be77786cff6b867e38a18b719705f8a326578f022100b2a9879e1acf621faa6466c207746a7f3eb4c8514c1482969aba3f2a957f1321012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64c"
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "3d0a2353eeec44d3c10aed259038db321912122cd4150048f7bfa4c0ecfee236",
+ "vout": 242,
+ "scriptSig": {
+ "asm": "3046022100ef794a8ef7fd6752d2a183c18866ff6e8dc0f5bd889a63e2c21cf303a6302461022100c1b09662d9e92988c3f9fcf17d1bcc79b5403647095d7212b9f8a1278a532d68[ALL] 03091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc",
+ "hex": "493046022100ef794a8ef7fd6752d2a183c18866ff6e8dc0f5bd889a63e2c21cf303a6302461022100c1b09662d9e92988c3f9fcf17d1bcc79b5403647095d7212b9f8a1278a532d68012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adc"
+ },
+ "sequence": 4294967295
+ }
+ ],
+ "vout": [
+ {
+ "value": 1.3782,
+ "n": 0,
+ "scriptPubKey": {
+ "asm": "OP_DUP OP_HASH160 8fd139bb39ced713f231c58a4d07bf6954d1c201 OP_EQUALVERIFY OP_CHECKSIG",
+ "hex": "76a9148fd139bb39ced713f231c58a4d07bf6954d1c20188ac",
+ "reqSigs": 1,
+ "type": "pubkeyhash",
+ "addresses": [
+ "1E7SGgAZFCHDnVZLuRViX3gUmxpMfdvd2o"
+ ]
+ }
+ },
+ {
+ "value": 0.01000001,
+ "n": 1,
+ "scriptPubKey": {
+ "asm": "OP_DUP OP_HASH160 6c772e9cf96371bba3da8cb733da70a2fcf20078 OP_EQUALVERIFY OP_CHECKSIG",
+ "hex": "76a9146c772e9cf96371bba3da8cb733da70a2fcf2007888ac",
+ "reqSigs": 1,
+ "type": "pubkeyhash",
+ "addresses": [
+ "1AtWkdmfmYkErU16d3KYykJUbEp9MAj9Sb"
+ ]
+ }
+ }
+ ],
+ "hex": "0100000015fd5c23522d31761c50175453daa6edaabe47a602a592d39ce933d8271a1a87274c0100006c493046022100b4251ecd63778a3dde0155abe4cd162947620ae9ee45a874353551092325b116022100db307baf4ff3781ec520bd18f387948cedd15dc27bafe17c894b0fe6ffffcafa012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffffcb4ed1baba3a1eb2171e00ddec8e5b72b346dd8c07f9c2b0d122d0d06bc92ea7000000006c493046022100a9b617843b68c284715d3e02fd120479cd0d96a6c43bf01e697fb0a460a21a3a022100ba0a12fbe8b993d4e7911fa3467615765dbe421ddf5c51b57a9c1ee19dcc00ba012103e633b4fa4ceb705c2da712390767199be8ef2448b3095dc01652e11b2b751505ffffffffc1b37ae964f605978022f94ce2f3f676d66a46d1aef7c2c17d6315b9697f2f75010000006a473044022079bd62ee09621a3be96b760c39e8ef78170101d46313923c6b07ae60a95c90670220238e51ea29fc70b04b65508450523caedbb11cb4dd5aa608c81487de798925ba0121027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34ffffffffedd005dc7790ef65c206abd1ab718e75252a40f4b1310e4102cd692eca9cacb0d10000006b48304502207722d6f9038673c86a1019b1c4de2d687ae246477cd4ca7002762be0299de385022100e594a11e3a313942595f7666dcf7078bcb14f1330f4206b95c917e7ec0e82fac012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffffdf28d6e26fb7a85a1e6a229b972c1bae0edc1c11cb9ca51e4caf5e59fbea35a1000000006b483045022100a63a4788027b79b65c6f9d9e054f68cf3b4eed19efd82a2d53f70dcbe64683390220526f243671425b2bd05745fcf2729361f985cfe84ea80c7cfc817b93d8134374012103a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52ffffffffae2a2320a1582faa24469eff3024a6b98bfe00eb4f554d8a0b1421ba53bfd6a5010000006c493046022100b200ac6db16842f76dab9abe807ce423c992805879bc50abd46ed8275a59d9cf022100c0d518e85dd345b3c29dd4dc47b9a420d3ce817b18720e94966d2fe23413a408012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffffb3cc5a12548aa1794b4d2bbf076838cfd7fbafb7716da51ee8221a4ff19c291b000000006b483045022100ededc441c3103a6f2bd6cab7639421af0f6ec5e60503bce1e603cf34f00aee1c02205cb75f3f519a13fb348783b21db3085cb5ec7552c59e394fdbc3e1feea43f967012103a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52ffffffff85145367313888d2cf2747274a32e20b2df074027bafd6f970003fcbcdf11d07150000006b483045022100d9eed5413d2a4b4b98625aa6e3169edc4fb4663e7862316d69224454e70cd8ca022061e506521d5ced51dd0ea36496e75904d756a4c4f9fb111568555075d5f68d9a012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff8292c11f6d35abab5bac3ebb627a4ff949e8ecd62d33ed137adf7aeb00e512b0090000006b48304502207e84b27139c4c19c828cb1e30c349bba88e4d9b59be97286960793b5ddc0a2af0221008cdc7a951e7f31c20953ed5635fbabf228e80b7047f32faaa0313e7693005177012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff883dcf9a86063db088ad064d0953258d4b0ff3425857402d2f3f839cee0f84581e0000006a4730440220426540dfed9c4ab5812e5f06df705b8bcf307dd7d20f7fa6512298b2a6314f420220064055096e3ca62f6c7352c66a5447767c53f946acdf35025ab3807ddb2fa404012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff6697dbb3ed98afe481b568459fa67e503f8a4254532465a670e54669d19c9fe6720000006a47304402200a5e673996f2fc88e21cc8613611f08a650bc0370338803591d85d0ec5663764022040b6664a0d1ec83a7f01975b8fde5232992b8ca58bf48af6725d2f92a936ab2e012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff023ffc2182517e1d3fa0896c5b0bd7b4d2ef8a1e42655abe2ced54f657125d59670000006c493046022100d93b30219c5735f673be5c3b4688366d96f545561c74cb62c6958c00f6960806022100ec8200adcb028f2184fa2a4f6faac7f8bb57cb4503bb7584ac11051fece31b3d012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffff16f8c77166b0df3d7cc8b5b2ce825afbea9309ad7acd8e2461a255958f81fc06010000006b483045022100a13934e68d3f5b22b130c4cb33f4da468cffc52323a47fbfbe06b64858162246022047081e0a70ff770e64a2e2d31e5d520d9102268b57a47009a72fe73ec766901801210234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cdffffffff197b96f3c87a3adfaa17f63fddc2a738a690ca665439f9431dbbd655816c41fb000000006c49304602210097f1f35d5bdc1a3a60390a1b015b8e7c4f916aa3847aafd969e04975e15bbe70022100a9052eb25517d481f1fda1b129eb1b534da50ea1a51f3ee012dca3601c11b86a0121027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34ffffffff20d9a261ee27aa1bd92e7db2fdca935909a40b648e974cd24a10d63b68b94039dd0000006b483045022012b3138c591bf7154b6fef457f2c4a3c7162225003788ac0024a99355865ff13022100b71b125ae1ffb2e1d1571f580cd3ebc8cd049a2d7a8a41f138ba94aeb982106f012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffff50f179d5d16cd872f9a63c26c448464ae9bd95cd9421c0476113b5d314571b71010000006b483045022100f834ccc8b22ee72712a3e5e6ef4acb8b2fb791b5385b70e2cd4332674d6667f4022024fbda0a997e0c253503f217501f508a4d56edce2c813ecdd9ad796dbeba907401210234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cdffffffff551b865d1568ac0a305e5f9c5dae6c540982334efbe789074318e0efc5b564631b0000006b48304502203b2fd1e39ae0e469d7a15768f262661b0de41470daf0fe8c4fd0c26542a0870002210081c57e331f9a2d214457d953e3542904727ee412c63028113635d7224da3dccc012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff57503e5a016189d407a721791459280875264f908ca2c5d4862c01386e7fb50b470400006b48304502206947a9c54f0664ece4430fd4ae999891dc50bb6126bc36b6a15a3189f29d25e9022100a86cfc4e2fdd9e39a20e305cfd1b76509c67b3e313e0f118229105caa0e823c9012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff3f16c1fb9d3e1a26d872933e955df85ee7f3f817711062b00b54a2144827349b250000006b483045022100c7128fe10b2d38744ae8177776054c29fc8ec13f07207723e70766ab7164847402201d2cf09009b9596de74c0183d1ab832e5edddb7a9965880bb400097e850850f8012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff4142a69d85b8498af214f0dd427b6ab29c240a0b8577e2944d37a7d8c05c6bb8140000006b48304502203b89a71628a28cc3703d170ca3be77786cff6b867e38a18b719705f8a326578f022100b2a9879e1acf621faa6466c207746a7f3eb4c8514c1482969aba3f2a957f1321012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff36e2feecc0a4bff7480015d42c12121932db389025ed0ac1d344ecee53230a3df20000006c493046022100ef794a8ef7fd6752d2a183c18866ff6e8dc0f5bd889a63e2c21cf303a6302461022100c1b09662d9e92988c3f9fcf17d1bcc79b5403647095d7212b9f8a1278a532d68012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffff0260f73608000000001976a9148fd139bb39ced713f231c58a4d07bf6954d1c20188ac41420f00000000001976a9146c772e9cf96371bba3da8cb733da70a2fcf2007888ac48d60400"
+}
diff --git a/test/util/data/tx394b54bb.hex b/test/util/data/tx394b54bb.hex
new file mode 100644
index 0000000000..33f26cb4d6
--- /dev/null
+++ b/test/util/data/tx394b54bb.hex
@@ -0,0 +1 @@
+0100000015fd5c23522d31761c50175453daa6edaabe47a602a592d39ce933d8271a1a87274c0100006c493046022100b4251ecd63778a3dde0155abe4cd162947620ae9ee45a874353551092325b116022100db307baf4ff3781ec520bd18f387948cedd15dc27bafe17c894b0fe6ffffcafa012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffffcb4ed1baba3a1eb2171e00ddec8e5b72b346dd8c07f9c2b0d122d0d06bc92ea7000000006c493046022100a9b617843b68c284715d3e02fd120479cd0d96a6c43bf01e697fb0a460a21a3a022100ba0a12fbe8b993d4e7911fa3467615765dbe421ddf5c51b57a9c1ee19dcc00ba012103e633b4fa4ceb705c2da712390767199be8ef2448b3095dc01652e11b2b751505ffffffffc1b37ae964f605978022f94ce2f3f676d66a46d1aef7c2c17d6315b9697f2f75010000006a473044022079bd62ee09621a3be96b760c39e8ef78170101d46313923c6b07ae60a95c90670220238e51ea29fc70b04b65508450523caedbb11cb4dd5aa608c81487de798925ba0121027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34ffffffffedd005dc7790ef65c206abd1ab718e75252a40f4b1310e4102cd692eca9cacb0d10000006b48304502207722d6f9038673c86a1019b1c4de2d687ae246477cd4ca7002762be0299de385022100e594a11e3a313942595f7666dcf7078bcb14f1330f4206b95c917e7ec0e82fac012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffffdf28d6e26fb7a85a1e6a229b972c1bae0edc1c11cb9ca51e4caf5e59fbea35a1000000006b483045022100a63a4788027b79b65c6f9d9e054f68cf3b4eed19efd82a2d53f70dcbe64683390220526f243671425b2bd05745fcf2729361f985cfe84ea80c7cfc817b93d8134374012103a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52ffffffffae2a2320a1582faa24469eff3024a6b98bfe00eb4f554d8a0b1421ba53bfd6a5010000006c493046022100b200ac6db16842f76dab9abe807ce423c992805879bc50abd46ed8275a59d9cf022100c0d518e85dd345b3c29dd4dc47b9a420d3ce817b18720e94966d2fe23413a408012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffffb3cc5a12548aa1794b4d2bbf076838cfd7fbafb7716da51ee8221a4ff19c291b000000006b483045022100ededc441c3103a6f2bd6cab7639421af0f6ec5e60503bce1e603cf34f00aee1c02205cb75f3f519a13fb348783b21db3085cb5ec7552c59e394fdbc3e1feea43f967012103a621f08be22d1bbdcbe4e527ee4927006aa555fc65e2aafa767d4ea2fe9dfa52ffffffff85145367313888d2cf2747274a32e20b2df074027bafd6f970003fcbcdf11d07150000006b483045022100d9eed5413d2a4b4b98625aa6e3169edc4fb4663e7862316d69224454e70cd8ca022061e506521d5ced51dd0ea36496e75904d756a4c4f9fb111568555075d5f68d9a012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff8292c11f6d35abab5bac3ebb627a4ff949e8ecd62d33ed137adf7aeb00e512b0090000006b48304502207e84b27139c4c19c828cb1e30c349bba88e4d9b59be97286960793b5ddc0a2af0221008cdc7a951e7f31c20953ed5635fbabf228e80b7047f32faaa0313e7693005177012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff883dcf9a86063db088ad064d0953258d4b0ff3425857402d2f3f839cee0f84581e0000006a4730440220426540dfed9c4ab5812e5f06df705b8bcf307dd7d20f7fa6512298b2a6314f420220064055096e3ca62f6c7352c66a5447767c53f946acdf35025ab3807ddb2fa404012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff6697dbb3ed98afe481b568459fa67e503f8a4254532465a670e54669d19c9fe6720000006a47304402200a5e673996f2fc88e21cc8613611f08a650bc0370338803591d85d0ec5663764022040b6664a0d1ec83a7f01975b8fde5232992b8ca58bf48af6725d2f92a936ab2e012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff023ffc2182517e1d3fa0896c5b0bd7b4d2ef8a1e42655abe2ced54f657125d59670000006c493046022100d93b30219c5735f673be5c3b4688366d96f545561c74cb62c6958c00f6960806022100ec8200adcb028f2184fa2a4f6faac7f8bb57cb4503bb7584ac11051fece31b3d012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffff16f8c77166b0df3d7cc8b5b2ce825afbea9309ad7acd8e2461a255958f81fc06010000006b483045022100a13934e68d3f5b22b130c4cb33f4da468cffc52323a47fbfbe06b64858162246022047081e0a70ff770e64a2e2d31e5d520d9102268b57a47009a72fe73ec766901801210234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cdffffffff197b96f3c87a3adfaa17f63fddc2a738a690ca665439f9431dbbd655816c41fb000000006c49304602210097f1f35d5bdc1a3a60390a1b015b8e7c4f916aa3847aafd969e04975e15bbe70022100a9052eb25517d481f1fda1b129eb1b534da50ea1a51f3ee012dca3601c11b86a0121027a759be8df971a6a04fafcb4f6babf75dc811c5cdaa0734cddbe9b942ce75b34ffffffff20d9a261ee27aa1bd92e7db2fdca935909a40b648e974cd24a10d63b68b94039dd0000006b483045022012b3138c591bf7154b6fef457f2c4a3c7162225003788ac0024a99355865ff13022100b71b125ae1ffb2e1d1571f580cd3ebc8cd049a2d7a8a41f138ba94aeb982106f012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffff50f179d5d16cd872f9a63c26c448464ae9bd95cd9421c0476113b5d314571b71010000006b483045022100f834ccc8b22ee72712a3e5e6ef4acb8b2fb791b5385b70e2cd4332674d6667f4022024fbda0a997e0c253503f217501f508a4d56edce2c813ecdd9ad796dbeba907401210234b9d9413f247bb78cd3293b7b65a2c38018ba5621ea9ee737f3a6a3523fb4cdffffffff551b865d1568ac0a305e5f9c5dae6c540982334efbe789074318e0efc5b564631b0000006b48304502203b2fd1e39ae0e469d7a15768f262661b0de41470daf0fe8c4fd0c26542a0870002210081c57e331f9a2d214457d953e3542904727ee412c63028113635d7224da3dccc012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff57503e5a016189d407a721791459280875264f908ca2c5d4862c01386e7fb50b470400006b48304502206947a9c54f0664ece4430fd4ae999891dc50bb6126bc36b6a15a3189f29d25e9022100a86cfc4e2fdd9e39a20e305cfd1b76509c67b3e313e0f118229105caa0e823c9012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff3f16c1fb9d3e1a26d872933e955df85ee7f3f817711062b00b54a2144827349b250000006b483045022100c7128fe10b2d38744ae8177776054c29fc8ec13f07207723e70766ab7164847402201d2cf09009b9596de74c0183d1ab832e5edddb7a9965880bb400097e850850f8012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff4142a69d85b8498af214f0dd427b6ab29c240a0b8577e2944d37a7d8c05c6bb8140000006b48304502203b89a71628a28cc3703d170ca3be77786cff6b867e38a18b719705f8a326578f022100b2a9879e1acf621faa6466c207746a7f3eb4c8514c1482969aba3f2a957f1321012103f1575d6124ac78be398c25b31146d08313c6072d23a4d7df5ac6a9f87346c64cffffffff36e2feecc0a4bff7480015d42c12121932db389025ed0ac1d344ecee53230a3df20000006c493046022100ef794a8ef7fd6752d2a183c18866ff6e8dc0f5bd889a63e2c21cf303a6302461022100c1b09662d9e92988c3f9fcf17d1bcc79b5403647095d7212b9f8a1278a532d68012103091137f3ef23f4acfc19a5953a68b2074fae942ad3563ef28c33b0cac9a93adcffffffff0260f73608000000001976a9148fd139bb39ced713f231c58a4d07bf6954d1c20188ac41420f00000000001976a9146c772e9cf96371bba3da8cb733da70a2fcf2007888ac00000000
diff --git a/test/util/data/txcreate1.hex b/test/util/data/txcreate1.hex
new file mode 100644
index 0000000000..9ec6ee3531
--- /dev/null
+++ b/test/util/data/txcreate1.hex
@@ -0,0 +1 @@
+02000000031f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff7cca453133921c50d5025878f7f738d1df891fd359763331935784cf6b9c82bf1200000000fffffffffccd319e04a996c96cfc0bf4c07539aa90bd0b1a700ef72fae535d6504f9a6220100000000ffffffff0280a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac0084d717000000001976a914f2d4db28cad6502226ee484ae24505c2885cb12d88ac00000000
diff --git a/test/util/data/txcreate1.json b/test/util/data/txcreate1.json
new file mode 100644
index 0000000000..f83e036f33
--- /dev/null
+++ b/test/util/data/txcreate1.json
@@ -0,0 +1,64 @@
+{
+ "txid": "fe7d174f42dce0cffa7a527e9bc8368956057619ec817648f6138b98f2533e8f",
+ "hash": "fe7d174f42dce0cffa7a527e9bc8368956057619ec817648f6138b98f2533e8f",
+ "version": 2,
+ "locktime": 0,
+ "vin": [
+ {
+ "txid": "5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f",
+ "vout": 0,
+ "scriptSig": {
+ "asm": "",
+ "hex": ""
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "bf829c6bcf84579331337659d31f89dfd138f7f7785802d5501c92333145ca7c",
+ "vout": 18,
+ "scriptSig": {
+ "asm": "",
+ "hex": ""
+ },
+ "sequence": 4294967295
+ },
+ {
+ "txid": "22a6f904655d53ae2ff70e701a0bbd90aa3975c0f40bfc6cc996a9049e31cdfc",
+ "vout": 1,
+ "scriptSig": {
+ "asm": "",
+ "hex": ""
+ },
+ "sequence": 4294967295
+ }
+ ],
+ "vout": [
+ {
+ "value": 0.18,
+ "n": 0,
+ "scriptPubKey": {
+ "asm": "OP_DUP OP_HASH160 1fc11f39be1729bf973a7ab6a615ca4729d64574 OP_EQUALVERIFY OP_CHECKSIG",
+ "hex": "76a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac",
+ "reqSigs": 1,
+ "type": "pubkeyhash",
+ "addresses": [
+ "13tuJJDR2RgArmgfv6JScSdreahzgc4T6o"
+ ]
+ }
+ },
+ {
+ "value": 4.00,
+ "n": 1,
+ "scriptPubKey": {
+ "asm": "OP_DUP OP_HASH160 f2d4db28cad6502226ee484ae24505c2885cb12d OP_EQUALVERIFY OP_CHECKSIG",
+ "hex": "76a914f2d4db28cad6502226ee484ae24505c2885cb12d88ac",
+ "reqSigs": 1,
+ "type": "pubkeyhash",
+ "addresses": [
+ "1P8yWvZW8jVihP1bzHeqfE4aoXNX8AVa46"
+ ]
+ }
+ }
+ ],
+ "hex": "02000000031f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff7cca453133921c50d5025878f7f738d1df891fd359763331935784cf6b9c82bf1200000000fffffffffccd319e04a996c96cfc0bf4c07539aa90bd0b1a700ef72fae535d6504f9a6220100000000ffffffff0280a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac0084d717000000001976a914f2d4db28cad6502226ee484ae24505c2885cb12d88ac00000000"
+}
diff --git a/test/util/data/txcreate2.hex b/test/util/data/txcreate2.hex
new file mode 100644
index 0000000000..38bb7b1046
--- /dev/null
+++ b/test/util/data/txcreate2.hex
@@ -0,0 +1 @@
+02000000000100000000000000000000000000
diff --git a/test/util/data/txcreate2.json b/test/util/data/txcreate2.json
new file mode 100644
index 0000000000..fb5e177db7
--- /dev/null
+++ b/test/util/data/txcreate2.json
@@ -0,0 +1,20 @@
+{
+ "txid": "0481afb29931341d0d7861d8a2f6f26456fa042abf54a23e96440ed7946e0715",
+ "hash": "0481afb29931341d0d7861d8a2f6f26456fa042abf54a23e96440ed7946e0715",
+ "version": 2,
+ "locktime": 0,
+ "vin": [
+ ],
+ "vout": [
+ {
+ "value": 0.00,
+ "n": 0,
+ "scriptPubKey": {
+ "asm": "",
+ "hex": "",
+ "type": "nonstandard"
+ }
+ }
+ ],
+ "hex": "02000000000100000000000000000000000000"
+}
diff --git a/test/util/data/txcreatedata1.hex b/test/util/data/txcreatedata1.hex
new file mode 100644
index 0000000000..cefd1a05a6
--- /dev/null
+++ b/test/util/data/txcreatedata1.hex
@@ -0,0 +1 @@
+02000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff0280a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac0084d71700000000526a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e00000000
diff --git a/test/util/data/txcreatedata1.json b/test/util/data/txcreatedata1.json
new file mode 100644
index 0000000000..760518d30a
--- /dev/null
+++ b/test/util/data/txcreatedata1.json
@@ -0,0 +1,42 @@
+{
+ "txid": "07894b4d12fe7853dd911402db1620920d261b9627c447f931417d330c25f06e",
+ "hash": "07894b4d12fe7853dd911402db1620920d261b9627c447f931417d330c25f06e",
+ "version": 1,
+ "locktime": 0,
+ "vin": [
+ {
+ "txid": "5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f",
+ "vout": 0,
+ "scriptSig": {
+ "asm": "",
+ "hex": ""
+ },
+ "sequence": 4294967295
+ }
+ ],
+ "vout": [
+ {
+ "value": 0.18,
+ "n": 0,
+ "scriptPubKey": {
+ "asm": "OP_DUP OP_HASH160 1fc11f39be1729bf973a7ab6a615ca4729d64574 OP_EQUALVERIFY OP_CHECKSIG",
+ "hex": "76a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac",
+ "reqSigs": 1,
+ "type": "pubkeyhash",
+ "addresses": [
+ "13tuJJDR2RgArmgfv6JScSdreahzgc4T6o"
+ ]
+ }
+ },
+ {
+ "value": 4.00,
+ "n": 1,
+ "scriptPubKey": {
+ "asm": "OP_RETURN 54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e",
+ "hex": "6a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e",
+ "type": "nulldata"
+ }
+ }
+ ],
+ "hex": "01000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff0280a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac0084d71700000000526a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e00000000"
+}
diff --git a/test/util/data/txcreatedata2.hex b/test/util/data/txcreatedata2.hex
new file mode 100644
index 0000000000..d69cf58ba1
--- /dev/null
+++ b/test/util/data/txcreatedata2.hex
@@ -0,0 +1 @@
+02000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff0280a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac0000000000000000526a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e00000000
diff --git a/test/util/data/txcreatedata2.json b/test/util/data/txcreatedata2.json
new file mode 100644
index 0000000000..3c6da40f90
--- /dev/null
+++ b/test/util/data/txcreatedata2.json
@@ -0,0 +1,42 @@
+{
+ "txid": "c14b007fa3a6c1e7765919c1d14c1cfc2b8642c3a5d3be4b1fa8c4ccfec98bb0",
+ "hash": "c14b007fa3a6c1e7765919c1d14c1cfc2b8642c3a5d3be4b1fa8c4ccfec98bb0",
+ "version": 2,
+ "locktime": 0,
+ "vin": [
+ {
+ "txid": "5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f",
+ "vout": 0,
+ "scriptSig": {
+ "asm": "",
+ "hex": ""
+ },
+ "sequence": 4294967295
+ }
+ ],
+ "vout": [
+ {
+ "value": 0.18,
+ "n": 0,
+ "scriptPubKey": {
+ "asm": "OP_DUP OP_HASH160 1fc11f39be1729bf973a7ab6a615ca4729d64574 OP_EQUALVERIFY OP_CHECKSIG",
+ "hex": "76a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac",
+ "reqSigs": 1,
+ "type": "pubkeyhash",
+ "addresses": [
+ "13tuJJDR2RgArmgfv6JScSdreahzgc4T6o"
+ ]
+ }
+ },
+ {
+ "value": 0.00,
+ "n": 1,
+ "scriptPubKey": {
+ "asm": "OP_RETURN 54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e",
+ "hex": "6a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e",
+ "type": "nulldata"
+ }
+ }
+ ],
+ "hex": "02000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff0280a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac0000000000000000526a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e00000000"
+}
diff --git a/test/util/data/txcreatedata_seq0.hex b/test/util/data/txcreatedata_seq0.hex
new file mode 100644
index 0000000000..54b89d2381
--- /dev/null
+++ b/test/util/data/txcreatedata_seq0.hex
@@ -0,0 +1 @@
+02000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000fdffffff0180a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac00000000
diff --git a/test/util/data/txcreatedata_seq0.json b/test/util/data/txcreatedata_seq0.json
new file mode 100644
index 0000000000..d272a4c447
--- /dev/null
+++ b/test/util/data/txcreatedata_seq0.json
@@ -0,0 +1,33 @@
+{
+ "txid": "8df6ed527472542dd5e137c242a7c5a9f337ac34f7b257ae4af886aeaebb51b0",
+ "hash": "8df6ed527472542dd5e137c242a7c5a9f337ac34f7b257ae4af886aeaebb51b0",
+ "version": 2,
+ "locktime": 0,
+ "vin": [
+ {
+ "txid": "5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f",
+ "vout": 0,
+ "scriptSig": {
+ "asm": "",
+ "hex": ""
+ },
+ "sequence": 4294967293
+ }
+ ],
+ "vout": [
+ {
+ "value": 0.18,
+ "n": 0,
+ "scriptPubKey": {
+ "asm": "OP_DUP OP_HASH160 1fc11f39be1729bf973a7ab6a615ca4729d64574 OP_EQUALVERIFY OP_CHECKSIG",
+ "hex": "76a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac",
+ "reqSigs": 1,
+ "type": "pubkeyhash",
+ "addresses": [
+ "13tuJJDR2RgArmgfv6JScSdreahzgc4T6o"
+ ]
+ }
+ }
+ ],
+ "hex": "02000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000fdffffff0180a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac00000000"
+}
diff --git a/test/util/data/txcreatedata_seq1.hex b/test/util/data/txcreatedata_seq1.hex
new file mode 100644
index 0000000000..4cedcd975c
--- /dev/null
+++ b/test/util/data/txcreatedata_seq1.hex
@@ -0,0 +1 @@
+01000000021f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000fdffffff1f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000010000000180a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac00000000
diff --git a/test/util/data/txcreatedata_seq1.json b/test/util/data/txcreatedata_seq1.json
new file mode 100644
index 0000000000..d323255418
--- /dev/null
+++ b/test/util/data/txcreatedata_seq1.json
@@ -0,0 +1,42 @@
+{
+ "txid": "c4dea671b0d7b48f8ab10bc46650e8329d3c5766931f548f513847a19f5ba75b",
+ "hash": "c4dea671b0d7b48f8ab10bc46650e8329d3c5766931f548f513847a19f5ba75b",
+ "version": 1,
+ "locktime": 0,
+ "vin": [
+ {
+ "txid": "5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f",
+ "vout": 0,
+ "scriptSig": {
+ "asm": "",
+ "hex": ""
+ },
+ "sequence": 4294967293
+ },
+ {
+ "txid": "5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f",
+ "vout": 0,
+ "scriptSig": {
+ "asm": "",
+ "hex": ""
+ },
+ "sequence": 1
+ }
+ ],
+ "vout": [
+ {
+ "value": 0.18,
+ "n": 0,
+ "scriptPubKey": {
+ "asm": "OP_DUP OP_HASH160 1fc11f39be1729bf973a7ab6a615ca4729d64574 OP_EQUALVERIFY OP_CHECKSIG",
+ "hex": "76a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac",
+ "reqSigs": 1,
+ "type": "pubkeyhash",
+ "addresses": [
+ "13tuJJDR2RgArmgfv6JScSdreahzgc4T6o"
+ ]
+ }
+ }
+ ],
+ "hex": "01000000021f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000fdffffff1f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000010000000180a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac00000000"
+}
diff --git a/test/util/data/txcreatemultisig1.hex b/test/util/data/txcreatemultisig1.hex
new file mode 100644
index 0000000000..9c00004d38
--- /dev/null
+++ b/test/util/data/txcreatemultisig1.hex
@@ -0,0 +1 @@
+01000000000100e1f5050000000069522102a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff39721021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d2102df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb48553ae00000000
diff --git a/test/util/data/txcreatemultisig1.json b/test/util/data/txcreatemultisig1.json
new file mode 100644
index 0000000000..f6ce43c202
--- /dev/null
+++ b/test/util/data/txcreatemultisig1.json
@@ -0,0 +1,26 @@
+{
+ "txid": "0d1d4edfc217d9db3ab6a9298f26a52eae3c52f55a6cb8ccbc14f7c727572894",
+ "hash": "0d1d4edfc217d9db3ab6a9298f26a52eae3c52f55a6cb8ccbc14f7c727572894",
+ "version": 1,
+ "locktime": 0,
+ "vin": [
+ ],
+ "vout": [
+ {
+ "value": 1.00,
+ "n": 0,
+ "scriptPubKey": {
+ "asm": "2 02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397 021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d 02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485 3 OP_CHECKMULTISIG",
+ "hex": "522102a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff39721021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d2102df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb48553ae",
+ "reqSigs": 2,
+ "type": "multisig",
+ "addresses": [
+ "1FoG2386FG2tAJS9acMuiDsKy67aGg9MKz",
+ "1FXtz9KU8JNmQDyHdiEm5HDiALuP3zdHvV",
+ "14LuavcBbXZYJ6Tsz3cAUQj9SuQoL2xCQX"
+ ]
+ }
+ }
+ ],
+ "hex": "01000000000100e1f5050000000069522102a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff39721021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d2102df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb48553ae00000000"
+}
diff --git a/test/util/data/txcreatemultisig2.hex b/test/util/data/txcreatemultisig2.hex
new file mode 100644
index 0000000000..07835c54d3
--- /dev/null
+++ b/test/util/data/txcreatemultisig2.hex
@@ -0,0 +1 @@
+01000000000100e1f5050000000017a9141c6fbaf46d64221e80cbae182c33ddf81b9294ac8700000000
diff --git a/test/util/data/txcreatemultisig2.json b/test/util/data/txcreatemultisig2.json
new file mode 100644
index 0000000000..e09d22060f
--- /dev/null
+++ b/test/util/data/txcreatemultisig2.json
@@ -0,0 +1,24 @@
+{
+ "txid": "0d861f278a3b7bce7cb5a88d71e6e6a903336f95ad5a2c29b295b63835b6eee3",
+ "hash": "0d861f278a3b7bce7cb5a88d71e6e6a903336f95ad5a2c29b295b63835b6eee3",
+ "version": 1,
+ "locktime": 0,
+ "vin": [
+ ],
+ "vout": [
+ {
+ "value": 1.00,
+ "n": 0,
+ "scriptPubKey": {
+ "asm": "OP_HASH160 1c6fbaf46d64221e80cbae182c33ddf81b9294ac OP_EQUAL",
+ "hex": "a9141c6fbaf46d64221e80cbae182c33ddf81b9294ac87",
+ "reqSigs": 1,
+ "type": "scripthash",
+ "addresses": [
+ "34HNh57oBCRKkxNyjTuWAJkTbuGh6jg2Ms"
+ ]
+ }
+ }
+ ],
+ "hex": "01000000000100e1f5050000000017a9141c6fbaf46d64221e80cbae182c33ddf81b9294ac8700000000"
+}
diff --git a/test/util/data/txcreatemultisig3.hex b/test/util/data/txcreatemultisig3.hex
new file mode 100644
index 0000000000..8d34f28f87
--- /dev/null
+++ b/test/util/data/txcreatemultisig3.hex
@@ -0,0 +1 @@
+01000000000100e1f50500000000220020e15a86a23178f433d514dbbce042e87d72662b8b5edcacfd2e37ab7a2d135f0500000000
diff --git a/test/util/data/txcreatemultisig3.json b/test/util/data/txcreatemultisig3.json
new file mode 100644
index 0000000000..88e32bd310
--- /dev/null
+++ b/test/util/data/txcreatemultisig3.json
@@ -0,0 +1,20 @@
+{
+ "txid": "ccc552220b46a3b5140048b03395987ce4f0fa1ddf8c635bba1fa44e0f8c1d7f",
+ "hash": "ccc552220b46a3b5140048b03395987ce4f0fa1ddf8c635bba1fa44e0f8c1d7f",
+ "version": 1,
+ "locktime": 0,
+ "vin": [
+ ],
+ "vout": [
+ {
+ "value": 1.00,
+ "n": 0,
+ "scriptPubKey": {
+ "asm": "0 e15a86a23178f433d514dbbce042e87d72662b8b5edcacfd2e37ab7a2d135f05",
+ "hex": "0020e15a86a23178f433d514dbbce042e87d72662b8b5edcacfd2e37ab7a2d135f05",
+ "type": "witness_v0_scripthash"
+ }
+ }
+ ],
+ "hex": "01000000000100e1f50500000000220020e15a86a23178f433d514dbbce042e87d72662b8b5edcacfd2e37ab7a2d135f0500000000"
+}
diff --git a/test/util/data/txcreatemultisig4.hex b/test/util/data/txcreatemultisig4.hex
new file mode 100644
index 0000000000..7da54366c7
--- /dev/null
+++ b/test/util/data/txcreatemultisig4.hex
@@ -0,0 +1 @@
+01000000000100e1f5050000000017a9146edf12858999f0dae74f9c692e6694ee3621b2ac8700000000
diff --git a/test/util/data/txcreatemultisig4.json b/test/util/data/txcreatemultisig4.json
new file mode 100644
index 0000000000..fc69c7269c
--- /dev/null
+++ b/test/util/data/txcreatemultisig4.json
@@ -0,0 +1,24 @@
+{
+ "txid": "5e8b1cc73234e208d4b7ca9075f136b908c34101be7a048df4ba9ac758b61567",
+ "hash": "5e8b1cc73234e208d4b7ca9075f136b908c34101be7a048df4ba9ac758b61567",
+ "version": 1,
+ "locktime": 0,
+ "vin": [
+ ],
+ "vout": [
+ {
+ "value": 1.00,
+ "n": 0,
+ "scriptPubKey": {
+ "asm": "OP_HASH160 6edf12858999f0dae74f9c692e6694ee3621b2ac OP_EQUAL",
+ "hex": "a9146edf12858999f0dae74f9c692e6694ee3621b2ac87",
+ "reqSigs": 1,
+ "type": "scripthash",
+ "addresses": [
+ "3BoFUz1StqcNcgUTZE5cC1eFhuYFzj3fGH"
+ ]
+ }
+ }
+ ],
+ "hex": "01000000000100e1f5050000000017a9146edf12858999f0dae74f9c692e6694ee3621b2ac8700000000"
+}
diff --git a/test/util/data/txcreateoutpubkey1.hex b/test/util/data/txcreateoutpubkey1.hex
new file mode 100644
index 0000000000..4a08244b2f
--- /dev/null
+++ b/test/util/data/txcreateoutpubkey1.hex
@@ -0,0 +1 @@
+0100000000010000000000000000232102a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397ac00000000
diff --git a/test/util/data/txcreateoutpubkey1.json b/test/util/data/txcreateoutpubkey1.json
new file mode 100644
index 0000000000..6019fa2dcd
--- /dev/null
+++ b/test/util/data/txcreateoutpubkey1.json
@@ -0,0 +1,24 @@
+{
+ "txid": "f42b38ac12e3fafc96ba1a9ba70cbfe326744aef75df5fb9db5d6e2855ca415f",
+ "hash": "f42b38ac12e3fafc96ba1a9ba70cbfe326744aef75df5fb9db5d6e2855ca415f",
+ "version": 1,
+ "locktime": 0,
+ "vin": [
+ ],
+ "vout": [
+ {
+ "value": 0.00,
+ "n": 0,
+ "scriptPubKey": {
+ "asm": "02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397 OP_CHECKSIG",
+ "hex": "2102a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397ac",
+ "reqSigs": 1,
+ "type": "pubkey",
+ "addresses": [
+ "1FoG2386FG2tAJS9acMuiDsKy67aGg9MKz"
+ ]
+ }
+ }
+ ],
+ "hex": "0100000000010000000000000000232102a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397ac00000000"
+}
diff --git a/test/util/data/txcreateoutpubkey2.hex b/test/util/data/txcreateoutpubkey2.hex
new file mode 100644
index 0000000000..8283c722ab
--- /dev/null
+++ b/test/util/data/txcreateoutpubkey2.hex
@@ -0,0 +1 @@
+0100000000010000000000000000160014a2516e770582864a6a56ed21a102044e388c62e300000000
diff --git a/test/util/data/txcreateoutpubkey2.json b/test/util/data/txcreateoutpubkey2.json
new file mode 100644
index 0000000000..6fc3d57527
--- /dev/null
+++ b/test/util/data/txcreateoutpubkey2.json
@@ -0,0 +1,20 @@
+{
+ "txid": "70f2a088cde460e677415fa1fb71895e90c231e6ed38ed203a35b6f848e9cc73",
+ "hash": "70f2a088cde460e677415fa1fb71895e90c231e6ed38ed203a35b6f848e9cc73",
+ "version": 1,
+ "locktime": 0,
+ "vin": [
+ ],
+ "vout": [
+ {
+ "value": 0.00,
+ "n": 0,
+ "scriptPubKey": {
+ "asm": "0 a2516e770582864a6a56ed21a102044e388c62e3",
+ "hex": "0014a2516e770582864a6a56ed21a102044e388c62e3",
+ "type": "witness_v0_keyhash"
+ }
+ }
+ ],
+ "hex": "0100000000010000000000000000160014a2516e770582864a6a56ed21a102044e388c62e300000000"
+}
diff --git a/test/util/data/txcreateoutpubkey3.hex b/test/util/data/txcreateoutpubkey3.hex
new file mode 100644
index 0000000000..84adff4d89
--- /dev/null
+++ b/test/util/data/txcreateoutpubkey3.hex
@@ -0,0 +1 @@
+010000000001000000000000000017a914a5ab14c9804d0d8bf02f1aea4e82780733ad0a838700000000
diff --git a/test/util/data/txcreateoutpubkey3.json b/test/util/data/txcreateoutpubkey3.json
new file mode 100644
index 0000000000..a1a25fc834
--- /dev/null
+++ b/test/util/data/txcreateoutpubkey3.json
@@ -0,0 +1,24 @@
+{
+ "txid": "bfc7e898ee9f6a9652d7b8cca147e2da134502e2ada0f279ed634fc8cf833f8c",
+ "hash": "bfc7e898ee9f6a9652d7b8cca147e2da134502e2ada0f279ed634fc8cf833f8c",
+ "version": 1,
+ "locktime": 0,
+ "vin": [
+ ],
+ "vout": [
+ {
+ "value": 0.00,
+ "n": 0,
+ "scriptPubKey": {
+ "asm": "OP_HASH160 a5ab14c9804d0d8bf02f1aea4e82780733ad0a83 OP_EQUAL",
+ "hex": "a914a5ab14c9804d0d8bf02f1aea4e82780733ad0a8387",
+ "reqSigs": 1,
+ "type": "scripthash",
+ "addresses": [
+ "3GnzN8FqgvYGYdhj8NW6UNxxVv3Uj1ApQn"
+ ]
+ }
+ }
+ ],
+ "hex": "010000000001000000000000000017a914a5ab14c9804d0d8bf02f1aea4e82780733ad0a838700000000"
+}
diff --git a/test/util/data/txcreatescript1.hex b/test/util/data/txcreatescript1.hex
new file mode 100644
index 0000000000..0adce270fb
--- /dev/null
+++ b/test/util/data/txcreatescript1.hex
@@ -0,0 +1 @@
+0100000000010000000000000000017500000000
diff --git a/test/util/data/txcreatescript1.json b/test/util/data/txcreatescript1.json
new file mode 100644
index 0000000000..8ffecba411
--- /dev/null
+++ b/test/util/data/txcreatescript1.json
@@ -0,0 +1,20 @@
+{
+ "txid": "f0851b68202f736b792649cfc960259c2374badcb644ab20cac726b5f72f61c9",
+ "hash": "f0851b68202f736b792649cfc960259c2374badcb644ab20cac726b5f72f61c9",
+ "version": 1,
+ "locktime": 0,
+ "vin": [
+ ],
+ "vout": [
+ {
+ "value": 0.00,
+ "n": 0,
+ "scriptPubKey": {
+ "asm": "OP_DROP",
+ "hex": "75",
+ "type": "nonstandard"
+ }
+ }
+ ],
+ "hex": "0100000000010000000000000000017500000000"
+}
diff --git a/test/util/data/txcreatescript2.hex b/test/util/data/txcreatescript2.hex
new file mode 100644
index 0000000000..5afe8786e3
--- /dev/null
+++ b/test/util/data/txcreatescript2.hex
@@ -0,0 +1 @@
+010000000001000000000000000017a91471ed53322d470bb96657deb786b94f97dd46fb158700000000
diff --git a/test/util/data/txcreatescript2.json b/test/util/data/txcreatescript2.json
new file mode 100644
index 0000000000..41eb69f1af
--- /dev/null
+++ b/test/util/data/txcreatescript2.json
@@ -0,0 +1,24 @@
+{
+ "txid": "6e07a7cc075e0703f32ee8c4e5373fe654bfbc315148fda364e1be286ff290d0",
+ "hash": "6e07a7cc075e0703f32ee8c4e5373fe654bfbc315148fda364e1be286ff290d0",
+ "version": 1,
+ "locktime": 0,
+ "vin": [
+ ],
+ "vout": [
+ {
+ "value": 0.00,
+ "n": 0,
+ "scriptPubKey": {
+ "asm": "OP_HASH160 71ed53322d470bb96657deb786b94f97dd46fb15 OP_EQUAL",
+ "hex": "a91471ed53322d470bb96657deb786b94f97dd46fb1587",
+ "reqSigs": 1,
+ "type": "scripthash",
+ "addresses": [
+ "3C5QarEGh9feKbDJ3QbMf2YNjnMoiPDhNp"
+ ]
+ }
+ }
+ ],
+ "hex": "010000000001000000000000000017a91471ed53322d470bb96657deb786b94f97dd46fb158700000000"
+}
diff --git a/test/util/data/txcreatescript3.hex b/test/util/data/txcreatescript3.hex
new file mode 100644
index 0000000000..8a2b973bf0
--- /dev/null
+++ b/test/util/data/txcreatescript3.hex
@@ -0,0 +1 @@
+01000000000100000000000000002200200bfe935e70c321c7ca3afc75ce0d0ca2f98b5422e008bb31c00c6d7f1f1c0ad600000000
diff --git a/test/util/data/txcreatescript3.json b/test/util/data/txcreatescript3.json
new file mode 100644
index 0000000000..90e7e27f9f
--- /dev/null
+++ b/test/util/data/txcreatescript3.json
@@ -0,0 +1,20 @@
+{
+ "txid": "8a234037b088e987c877030efc83374a07441c321bf9dc6dd2f206bc26507df8",
+ "hash": "8a234037b088e987c877030efc83374a07441c321bf9dc6dd2f206bc26507df8",
+ "version": 1,
+ "locktime": 0,
+ "vin": [
+ ],
+ "vout": [
+ {
+ "value": 0.00,
+ "n": 0,
+ "scriptPubKey": {
+ "asm": "0 0bfe935e70c321c7ca3afc75ce0d0ca2f98b5422e008bb31c00c6d7f1f1c0ad6",
+ "hex": "00200bfe935e70c321c7ca3afc75ce0d0ca2f98b5422e008bb31c00c6d7f1f1c0ad6",
+ "type": "witness_v0_scripthash"
+ }
+ }
+ ],
+ "hex": "01000000000100000000000000002200200bfe935e70c321c7ca3afc75ce0d0ca2f98b5422e008bb31c00c6d7f1f1c0ad600000000"
+}
diff --git a/test/util/data/txcreatescript4.hex b/test/util/data/txcreatescript4.hex
new file mode 100644
index 0000000000..b4cfe58f42
--- /dev/null
+++ b/test/util/data/txcreatescript4.hex
@@ -0,0 +1 @@
+010000000001000000000000000017a9146a2c482f4985f57e702f325816c90e3723ca81ae8700000000
diff --git a/test/util/data/txcreatescript4.json b/test/util/data/txcreatescript4.json
new file mode 100644
index 0000000000..11783751a4
--- /dev/null
+++ b/test/util/data/txcreatescript4.json
@@ -0,0 +1,24 @@
+{
+ "txid": "24225cf5e9391100d6b218134b9f03383ca4c880a1f634ac12990cf28b66adbc",
+ "hash": "24225cf5e9391100d6b218134b9f03383ca4c880a1f634ac12990cf28b66adbc",
+ "version": 1,
+ "locktime": 0,
+ "vin": [
+ ],
+ "vout": [
+ {
+ "value": 0.00,
+ "n": 0,
+ "scriptPubKey": {
+ "asm": "OP_HASH160 6a2c482f4985f57e702f325816c90e3723ca81ae OP_EQUAL",
+ "hex": "a9146a2c482f4985f57e702f325816c90e3723ca81ae87",
+ "reqSigs": 1,
+ "type": "scripthash",
+ "addresses": [
+ "3BNQbeFeJJGMAyDxPwWPuqxPMrjsFLjk3f"
+ ]
+ }
+ }
+ ],
+ "hex": "010000000001000000000000000017a9146a2c482f4985f57e702f325816c90e3723ca81ae8700000000"
+}
diff --git a/test/util/data/txcreatesignv1.hex b/test/util/data/txcreatesignv1.hex
new file mode 100644
index 0000000000..a46fcc88cb
--- /dev/null
+++ b/test/util/data/txcreatesignv1.hex
@@ -0,0 +1 @@
+01000000018594c5bdcaec8f06b78b596f31cd292a294fd031e24eec716f43dac91ea7494d000000008b48304502210096a75056c9e2cc62b7214777b3d2a592cfda7092520126d4ebfcd6d590c99bd8022051bb746359cf98c0603f3004477eac68701132380db8facba19c89dc5ab5c5e201410479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8ffffffff01a0860100000000001976a9145834479edbbe0539b31ffd3a8f8ebadc2165ed0188ac00000000
diff --git a/test/util/data/txcreatesignv1.json b/test/util/data/txcreatesignv1.json
new file mode 100644
index 0000000000..ff39e71b40
--- /dev/null
+++ b/test/util/data/txcreatesignv1.json
@@ -0,0 +1,33 @@
+{
+ "txid": "977e7cd286cb72cd470d539ba6cb48400f8f387d97451d45cdb8819437a303af",
+ "hash": "977e7cd286cb72cd470d539ba6cb48400f8f387d97451d45cdb8819437a303af",
+ "version": 1,
+ "locktime": 0,
+ "vin": [
+ {
+ "txid": "4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485",
+ "vout": 0,
+ "scriptSig": {
+ "asm": "304502210096a75056c9e2cc62b7214777b3d2a592cfda7092520126d4ebfcd6d590c99bd8022051bb746359cf98c0603f3004477eac68701132380db8facba19c89dc5ab5c5e2[ALL] 0479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8",
+ "hex": "48304502210096a75056c9e2cc62b7214777b3d2a592cfda7092520126d4ebfcd6d590c99bd8022051bb746359cf98c0603f3004477eac68701132380db8facba19c89dc5ab5c5e201410479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8"
+ },
+ "sequence": 4294967295
+ }
+ ],
+ "vout": [
+ {
+ "value": 0.001,
+ "n": 0,
+ "scriptPubKey": {
+ "asm": "OP_DUP OP_HASH160 5834479edbbe0539b31ffd3a8f8ebadc2165ed01 OP_EQUALVERIFY OP_CHECKSIG",
+ "hex": "76a9145834479edbbe0539b31ffd3a8f8ebadc2165ed0188ac",
+ "reqSigs": 1,
+ "type": "pubkeyhash",
+ "addresses": [
+ "193P6LtvS4nCnkDvM9uXn1gsSRqh4aDAz7"
+ ]
+ }
+ }
+ ],
+ "hex": "01000000018594c5bdcaec8f06b78b596f31cd292a294fd031e24eec716f43dac91ea7494d000000008b48304502210096a75056c9e2cc62b7214777b3d2a592cfda7092520126d4ebfcd6d590c99bd8022051bb746359cf98c0603f3004477eac68701132380db8facba19c89dc5ab5c5e201410479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8ffffffff01a0860100000000001976a9145834479edbbe0539b31ffd3a8f8ebadc2165ed0188ac00000000"
+}
diff --git a/test/util/data/txcreatesignv2.hex b/test/util/data/txcreatesignv2.hex
new file mode 100644
index 0000000000..ee425cd98c
--- /dev/null
+++ b/test/util/data/txcreatesignv2.hex
@@ -0,0 +1 @@
+02000000018594c5bdcaec8f06b78b596f31cd292a294fd031e24eec716f43dac91ea7494d000000008a473044022079c7aa014177a2e973caf6df7c7b8f15399083b91eba370ea1e19c4caed9181e02205f8f8763505ce8e6cbdd2cd28fab3fd407a75003e7d0dc04e6bebb0a3c89e7cb01410479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8ffffffff01a0860100000000001976a9145834479edbbe0539b31ffd3a8f8ebadc2165ed0188ac00000000