aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.travis.yml1
-rw-r--r--.tx/config2
-rw-r--r--build-aux/m4/bitcoin_qt.m41
-rw-r--r--configure.ac2
-rw-r--r--doc/REST-interface.md12
-rwxr-xr-xqa/pull-tester/rpc-tests.sh2
-rw-r--r--qa/rpc-tests/bignum.py102
-rwxr-xr-xqa/rpc-tests/bipdersig-p2p.py183
-rw-r--r--qa/rpc-tests/blockstore.py127
-rw-r--r--qa/rpc-tests/blocktools.py65
-rwxr-xr-xqa/rpc-tests/comptool.py341
-rwxr-xr-xqa/rpc-tests/invalidblockrequest.py115
-rwxr-xr-xqa/rpc-tests/maxblocksinflight.py101
-rwxr-xr-xqa/rpc-tests/mininode.py1256
-rw-r--r--qa/rpc-tests/script.py896
-rwxr-xr-xqa/rpc-tests/script_test.py253
-rwxr-xr-xqa/rpc-tests/test_framework.py44
-rw-r--r--qa/rpc-tests/util.py19
-rw-r--r--src/bitcoin-cli.cpp2
-rw-r--r--src/bitcoin-tx.cpp2
-rw-r--r--src/bitcoind.cpp1
-rw-r--r--src/bloom.cpp83
-rw-r--r--src/bloom.h28
-rw-r--r--src/chainparams.cpp29
-rw-r--r--src/chainparams.h10
-rw-r--r--src/init.cpp2
-rw-r--r--src/main.cpp19
-rw-r--r--src/mruset.h36
-rw-r--r--src/net.cpp29
-rw-r--r--src/net.h6
-rw-r--r--src/netbase.cpp4
-rw-r--r--src/pow.cpp17
-rw-r--r--src/pow.h3
-rw-r--r--src/qt/bitcoin.cpp2
-rw-r--r--src/qt/transactiondesc.cpp1
-rw-r--r--src/rpcclient.cpp1
-rw-r--r--src/test/bloom_tests.cpp78
-rw-r--r--src/test/data/script_invalid.json20
-rw-r--r--src/test/mruset_tests.cpp126
-rw-r--r--src/test/pow_tests.cpp24
-rw-r--r--src/test/test_bitcoin.cpp2
-rw-r--r--src/ui_interface.h13
-rw-r--r--src/util.cpp5
-rw-r--r--src/util.h20
-rw-r--r--src/wallet/rpcwallet.cpp2
-rw-r--r--src/wallet/wallet.cpp19
46 files changed, 3911 insertions, 195 deletions
diff --git a/.travis.yml b/.travis.yml
index 44ea7b62d7..e6578ee078 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -16,6 +16,7 @@ env:
- CCACHE_COMPRESS=1
- BASE_OUTDIR=$TRAVIS_BUILD_DIR/out
- SDK_URL=https://bitcoincore.org/depends-sources/sdks
+ - PYTHON_DEBUG=1
cache:
apt: true
directories:
diff --git a/.tx/config b/.tx/config
index 472d27b46f..6c534f06e4 100644
--- a/.tx/config
+++ b/.tx/config
@@ -1,7 +1,7 @@
[main]
host = https://www.transifex.com
-[bitcoin.qt-translation-010x]
+[bitcoin.qt-translation-011x]
file_filter = src/qt/locale/bitcoin_<lang>.ts
source_file = src/qt/locale/bitcoin_en.ts
source_lang = en
diff --git a/build-aux/m4/bitcoin_qt.m4 b/build-aux/m4/bitcoin_qt.m4
index 2a72262653..570ccb8b6f 100644
--- a/build-aux/m4/bitcoin_qt.m4
+++ b/build-aux/m4/bitcoin_qt.m4
@@ -6,6 +6,7 @@ AC_DEFUN([BITCOIN_QT_FAIL],[
AC_MSG_WARN([$1; bitcoin-qt frontend will not be built])
fi
bitcoin_enable_qt=no
+ bitcoin_enable_qt_test=no
else
AC_MSG_ERROR([$1])
fi
diff --git a/configure.ac b/configure.ac
index 2c918218bb..7c0a9f4a94 100644
--- a/configure.ac
+++ b/configure.ac
@@ -6,7 +6,7 @@ define(_CLIENT_VERSION_REVISION, 99)
define(_CLIENT_VERSION_BUILD, 0)
define(_CLIENT_VERSION_IS_RELEASE, false)
define(_COPYRIGHT_YEAR, 2015)
-AC_INIT([Bitcoin Core],[_CLIENT_VERSION_MAJOR._CLIENT_VERSION_MINOR._CLIENT_VERSION_REVISION],[info@bitcoin.org],[bitcoin])
+AC_INIT([Bitcoin Core],[_CLIENT_VERSION_MAJOR._CLIENT_VERSION_MINOR._CLIENT_VERSION_REVISION],[https://github.com/bitcoin/bitcoin/issues],[bitcoin])
AC_CONFIG_SRCDIR([src/main.cpp])
AC_CONFIG_HEADERS([src/config/bitcoin-config.h])
AC_CONFIG_AUX_DIR([build-aux])
diff --git a/doc/REST-interface.md b/doc/REST-interface.md
index f14aed7287..68bf7187fa 100644
--- a/doc/REST-interface.md
+++ b/doc/REST-interface.md
@@ -5,6 +5,8 @@ The REST API can be enabled with the `-rest` option.
Supported API
-------------
+
+####Transactions
`GET /rest/tx/TX-HASH.{bin|hex|json}`
Given a transaction hash,
@@ -12,6 +14,7 @@ Returns a transaction, in binary, hex-encoded binary or JSON formats.
For full TX query capability, one must enable the transaction index via "txindex=1" command line / configuration option.
+####Blocks
`GET /rest/block/BLOCK-HASH.{bin|hex|json}`
`GET /rest/block/notxdetails/BLOCK-HASH.{bin|hex|json}`
@@ -22,6 +25,15 @@ The HTTP request and response are both handled entirely in-memory, thus making m
With the /notxdetails/ option JSON response will only contain the transaction hash instead of the complete transaction details. The option only affects the JSON response.
+####Blockheaders
+`GET /rest/headers/<COUNT>/<BLOCK-HASH>.<bin|hex>`
+
+Given a block hash,
+Returns <COUNT> amount of blockheaders in upward direction.
+
+JSON is not supported.
+
+####Chaininfos
`GET /rest/chaininfo.json`
Returns various state info regarding block chain processing.
diff --git a/qa/pull-tester/rpc-tests.sh b/qa/pull-tester/rpc-tests.sh
index dd2f8d4e5e..ae27a94b8d 100755
--- a/qa/pull-tester/rpc-tests.sh
+++ b/qa/pull-tester/rpc-tests.sh
@@ -30,6 +30,8 @@ testScripts=(
'proxy_test.py'
'merkle_blocks.py'
# 'forknotify.py'
+ 'maxblocksinflight.py'
+ 'invalidblockrequest.py'
);
if [ "x${ENABLE_BITCOIND}${ENABLE_UTILS}${ENABLE_WALLET}" = "x111" ]; then
for (( i = 0; i < ${#testScripts[@]}; i++ ))
diff --git a/qa/rpc-tests/bignum.py b/qa/rpc-tests/bignum.py
new file mode 100644
index 0000000000..b0c58ccd47
--- /dev/null
+++ b/qa/rpc-tests/bignum.py
@@ -0,0 +1,102 @@
+#
+#
+# bignum.py
+#
+# This file is copied from python-bitcoinlib.
+#
+# Distributed under the MIT/X11 software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+
+"""Bignum routines"""
+
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+import struct
+
+
+# generic big endian MPI format
+
+def bn_bytes(v, have_ext=False):
+ ext = 0
+ if have_ext:
+ ext = 1
+ return ((v.bit_length()+7)//8) + ext
+
+def bn2bin(v):
+ s = bytearray()
+ i = bn_bytes(v)
+ while i > 0:
+ s.append((v >> ((i-1) * 8)) & 0xff)
+ i -= 1
+ return s
+
+def bin2bn(s):
+ l = 0
+ for ch in s:
+ l = (l << 8) | ch
+ return l
+
+def bn2mpi(v):
+ have_ext = False
+ if v.bit_length() > 0:
+ have_ext = (v.bit_length() & 0x07) == 0
+
+ neg = False
+ if v < 0:
+ neg = True
+ v = -v
+
+ s = struct.pack(b">I", bn_bytes(v, have_ext))
+ ext = bytearray()
+ if have_ext:
+ ext.append(0)
+ v_bin = bn2bin(v)
+ if neg:
+ if have_ext:
+ ext[0] |= 0x80
+ else:
+ v_bin[0] |= 0x80
+ return s + ext + v_bin
+
+def mpi2bn(s):
+ if len(s) < 4:
+ return None
+ s_size = bytes(s[:4])
+ v_len = struct.unpack(b">I", s_size)[0]
+ if len(s) != (v_len + 4):
+ return None
+ if v_len == 0:
+ return 0
+
+ v_str = bytearray(s[4:])
+ neg = False
+ i = v_str[0]
+ if i & 0x80:
+ neg = True
+ i &= ~0x80
+ v_str[0] = i
+
+ v = bin2bn(v_str)
+
+ if neg:
+ return -v
+ return v
+
+# bitcoin-specific little endian format, with implicit size
+def mpi2vch(s):
+ r = s[4:] # strip size
+ r = r[::-1] # reverse string, converting BE->LE
+ return r
+
+def bn2vch(v):
+ return bytes(mpi2vch(bn2mpi(v)))
+
+def vch2mpi(s):
+ r = struct.pack(b">I", len(s)) # size
+ r += s[::-1] # reverse string, converting LE->BE
+ return r
+
+def vch2bn(s):
+ return mpi2bn(vch2mpi(s))
+
diff --git a/qa/rpc-tests/bipdersig-p2p.py b/qa/rpc-tests/bipdersig-p2p.py
new file mode 100755
index 0000000000..ff0c878898
--- /dev/null
+++ b/qa/rpc-tests/bipdersig-p2p.py
@@ -0,0 +1,183 @@
+#!/usr/bin/env python2
+#
+# Distributed under the MIT/X11 software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+
+from test_framework import ComparisonTestFramework
+from util import *
+from mininode import CTransaction, NetworkThread
+from blocktools import create_coinbase, create_block
+from binascii import hexlify, unhexlify
+import cStringIO
+from comptool import TestInstance, TestManager
+from script import CScript
+import time
+
+# A canonical signature consists of:
+# <30> <total len> <02> <len R> <R> <02> <len S> <S> <hashtype>
+def unDERify(tx):
+ '''
+ Make the signature in vin 0 of a tx non-DER-compliant,
+ by adding padding after the S-value.
+ '''
+ scriptSig = CScript(tx.vin[0].scriptSig)
+ newscript = []
+ for i in scriptSig:
+ if (len(newscript) == 0):
+ newscript.append(i[0:-1] + '\0' + i[-1])
+ else:
+ newscript.append(i)
+ tx.vin[0].scriptSig = CScript(newscript)
+
+'''
+This test is meant to exercise BIP66 (DER SIG).
+Connect to a single node.
+Mine 2 (version 2) blocks (save the coinbases for later).
+Generate 98 more version 2 blocks, verify the node accepts.
+Mine 749 version 3 blocks, verify the node accepts.
+Check that the new DERSIG rules are not enforced on the 750th version 3 block.
+Check that the new DERSIG rules are enforced on the 751st version 3 block.
+Mine 199 new version blocks.
+Mine 1 old-version block.
+Mine 1 new version block.
+Mine 1 old version block, see that the node rejects.
+'''
+
+class BIP66Test(ComparisonTestFramework):
+
+ def __init__(self):
+ self.num_nodes = 1
+
+ def setup_network(self):
+ # Must set the blockversion for this test
+ self.nodes = start_nodes(1, self.options.tmpdir,
+ extra_args=[['-debug', '-whitelist=127.0.0.1', '-blockversion=2']],
+ binary=[self.options.testbinary])
+
+ def run_test(self):
+ test = TestManager(self, self.options.tmpdir)
+ test.add_all_connections(self.nodes)
+ NetworkThread().start() # Start up network handling in another thread
+ test.run()
+
+ def create_transaction(self, node, coinbase, to_address, amount):
+ from_txid = node.getblock(coinbase)['tx'][0]
+ inputs = [{ "txid" : from_txid, "vout" : 0}]
+ outputs = { to_address : amount }
+ rawtx = node.createrawtransaction(inputs, outputs)
+ signresult = node.signrawtransaction(rawtx)
+ tx = CTransaction()
+ f = cStringIO.StringIO(unhexlify(signresult['hex']))
+ tx.deserialize(f)
+ return tx
+
+ def get_tests(self):
+
+ self.coinbase_blocks = self.nodes[0].generate(2)
+ self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
+ self.nodeaddress = self.nodes[0].getnewaddress()
+ self.last_block_time = time.time()
+
+ ''' 98 more version 2 blocks '''
+ test_blocks = []
+ for i in xrange(98):
+ block = create_block(self.tip, create_coinbase(2), self.last_block_time + 1)
+ block.nVersion = 2
+ block.rehash()
+ block.solve()
+ test_blocks.append([block, True])
+ self.last_block_time += 1
+ self.tip = block.sha256
+ yield TestInstance(test_blocks, sync_every_block=False)
+
+ ''' Mine 749 version 3 blocks '''
+ test_blocks = []
+ for i in xrange(749):
+ block = create_block(self.tip, create_coinbase(2), self.last_block_time + 1)
+ block.nVersion = 3
+ block.rehash()
+ block.solve()
+ test_blocks.append([block, True])
+ self.last_block_time += 1
+ self.tip = block.sha256
+ yield TestInstance(test_blocks, sync_every_block=False)
+
+ '''
+ Check that the new DERSIG rules are not enforced in the 750th
+ version 3 block.
+ '''
+ spendtx = self.create_transaction(self.nodes[0],
+ self.coinbase_blocks[0], self.nodeaddress, 1.0)
+ unDERify(spendtx)
+ spendtx.rehash()
+
+ block = create_block(self.tip, create_coinbase(2), self.last_block_time + 1)
+ block.nVersion = 3
+ block.vtx.append(spendtx)
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.rehash()
+ block.solve()
+
+ self.last_block_time += 1
+ self.tip = block.sha256
+ yield TestInstance([[block, True]])
+
+ '''
+ Check that the new DERSIG rules are enforced in the 751st version 3
+ block.
+ '''
+ spendtx = self.create_transaction(self.nodes[0],
+ self.coinbase_blocks[1], self.nodeaddress, 1.0)
+ unDERify(spendtx)
+ spendtx.rehash()
+
+ block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
+ block.nVersion = 3
+ block.vtx.append(spendtx)
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.rehash()
+ block.solve()
+ self.last_block_time += 1
+ yield TestInstance([[block, False]])
+
+ ''' Mine 199 new version blocks on last valid tip '''
+ test_blocks = []
+ for i in xrange(199):
+ block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
+ block.nVersion = 3
+ block.rehash()
+ block.solve()
+ test_blocks.append([block, True])
+ self.last_block_time += 1
+ self.tip = block.sha256
+ yield TestInstance(test_blocks, sync_every_block=False)
+
+ ''' Mine 1 old version block '''
+ block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
+ block.nVersion = 2
+ block.rehash()
+ block.solve()
+ self.last_block_time += 1
+ self.tip = block.sha256
+ yield TestInstance([[block, True]])
+
+ ''' Mine 1 new version block '''
+ block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
+ block.nVersion = 3
+ block.rehash()
+ block.solve()
+ self.last_block_time += 1
+ self.tip = block.sha256
+ yield TestInstance([[block, True]])
+
+ ''' Mine 1 old version block, should be invalid '''
+ block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
+ block.nVersion = 2
+ block.rehash()
+ block.solve()
+ self.last_block_time += 1
+ yield TestInstance([[block, False]])
+
+if __name__ == '__main__':
+ BIP66Test().main()
diff --git a/qa/rpc-tests/blockstore.py b/qa/rpc-tests/blockstore.py
new file mode 100644
index 0000000000..c57b6df81b
--- /dev/null
+++ b/qa/rpc-tests/blockstore.py
@@ -0,0 +1,127 @@
+# BlockStore: a helper class that keeps a map of blocks and implements
+# helper functions for responding to getheaders and getdata,
+# and for constructing a getheaders message
+#
+
+from mininode import *
+import dbm
+
+class BlockStore(object):
+ def __init__(self, datadir):
+ self.blockDB = dbm.open(datadir + "/blocks", 'c')
+ self.currentBlock = 0L
+
+ def close(self):
+ self.blockDB.close()
+
+ def get(self, blockhash):
+ serialized_block = None
+ try:
+ serialized_block = self.blockDB[repr(blockhash)]
+ except KeyError:
+ return None
+ f = cStringIO.StringIO(serialized_block)
+ ret = CBlock()
+ ret.deserialize(f)
+ ret.calc_sha256()
+ return ret
+
+ # Note: this pulls full blocks out of the database just to retrieve
+ # the headers -- perhaps we could keep a separate data structure
+ # to avoid this overhead.
+ def headers_for(self, locator, hash_stop, current_tip=None):
+ if current_tip is None:
+ current_tip = self.currentBlock
+ current_block = self.get(current_tip)
+ if current_block is None:
+ return None
+
+ response = msg_headers()
+ headersList = [ CBlockHeader(current_block) ]
+ maxheaders = 2000
+ while (headersList[0].sha256 not in locator.vHave):
+ prevBlockHash = headersList[0].hashPrevBlock
+ prevBlock = self.get(prevBlockHash)
+ if prevBlock is not None:
+ headersList.insert(0, CBlockHeader(prevBlock))
+ else:
+ break
+ headersList = headersList[:maxheaders] # truncate if we have too many
+ hashList = [x.sha256 for x in headersList]
+ index = len(headersList)
+ if (hash_stop in hashList):
+ index = hashList.index(hash_stop)+1
+ response.headers = headersList[:index]
+ return response
+
+ def add_block(self, block):
+ block.calc_sha256()
+ try:
+ self.blockDB[repr(block.sha256)] = bytes(block.serialize())
+ except TypeError as e:
+ print "Unexpected error: ", sys.exc_info()[0], e.args
+ self.currentBlock = block.sha256
+
+ def get_blocks(self, inv):
+ responses = []
+ for i in inv:
+ if (i.type == 2): # MSG_BLOCK
+ block = self.get(i.hash)
+ if block is not None:
+ responses.append(msg_block(block))
+ return responses
+
+ def get_locator(self, current_tip=None):
+ if current_tip is None:
+ current_tip = self.currentBlock
+ r = []
+ counter = 0
+ step = 1
+ lastBlock = self.get(current_tip)
+ while lastBlock is not None:
+ r.append(lastBlock.hashPrevBlock)
+ for i in range(step):
+ lastBlock = self.get(lastBlock.hashPrevBlock)
+ if lastBlock is None:
+ break
+ counter += 1
+ if counter > 10:
+ step *= 2
+ locator = CBlockLocator()
+ locator.vHave = r
+ return locator
+
+class TxStore(object):
+ def __init__(self, datadir):
+ self.txDB = dbm.open(datadir + "/transactions", 'c')
+
+ def close(self):
+ self.txDB.close()
+
+ def get(self, txhash):
+ serialized_tx = None
+ try:
+ serialized_tx = self.txDB[repr(txhash)]
+ except KeyError:
+ return None
+ f = cStringIO.StringIO(serialized_tx)
+ ret = CTransaction()
+ ret.deserialize(f)
+ ret.calc_sha256()
+ return ret
+
+ def add_transaction(self, tx):
+ tx.calc_sha256()
+ try:
+ self.txDB[repr(tx.sha256)] = bytes(tx.serialize())
+ except TypeError as e:
+ print "Unexpected error: ", sys.exc_info()[0], e.args
+
+ def get_transactions(self, inv):
+ responses = []
+ for i in inv:
+ if (i.type == 1): # MSG_TX
+ tx = self.get(i.hash)
+ if tx is not None:
+ responses.append(msg_tx(tx))
+ return responses
diff --git a/qa/rpc-tests/blocktools.py b/qa/rpc-tests/blocktools.py
new file mode 100644
index 0000000000..f397fe7cd6
--- /dev/null
+++ b/qa/rpc-tests/blocktools.py
@@ -0,0 +1,65 @@
+# blocktools.py - utilities for manipulating blocks and transactions
+#
+# Distributed under the MIT/X11 software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+
+from mininode import *
+from script import CScript, CScriptOp
+
+# Create a block (with regtest difficulty)
+def create_block(hashprev, coinbase, nTime=None):
+ block = CBlock()
+ if nTime is None:
+ import time
+ block.nTime = int(time.time()+600)
+ else:
+ block.nTime = nTime
+ block.hashPrevBlock = hashprev
+ block.nBits = 0x207fffff # Will break after a difficulty adjustment...
+ block.vtx.append(coinbase)
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.calc_sha256()
+ return block
+
+def serialize_script_num(value):
+ r = bytearray(0)
+ if value == 0:
+ return r
+ neg = value < 0
+ absvalue = -value if neg else value
+ while (absvalue):
+ r.append(chr(absvalue & 0xff))
+ absvalue >>= 8
+ if r[-1] & 0x80:
+ r.append(0x80 if neg else 0)
+ elif neg:
+ r[-1] |= 0x80
+ return r
+
+counter=1
+# Create an anyone-can-spend coinbase transaction, assuming no miner fees
+def create_coinbase(heightAdjust = 0):
+ global counter
+ coinbase = CTransaction()
+ coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff),
+ ser_string(serialize_script_num(counter+heightAdjust)), 0xffffffff))
+ counter += 1
+ coinbaseoutput = CTxOut()
+ coinbaseoutput.nValue = 50*100000000
+ halvings = int((counter+heightAdjust)/150) # regtest
+ coinbaseoutput.nValue >>= halvings
+ coinbaseoutput.scriptPubKey = ""
+ coinbase.vout = [ coinbaseoutput ]
+ coinbase.calc_sha256()
+ return coinbase
+
+# Create a transaction with an anyone-can-spend output, that spends the
+# nth output of prevtx.
+def create_transaction(prevtx, n, sig, value):
+ tx = CTransaction()
+ assert(n < len(prevtx.vout))
+ tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff))
+ tx.vout.append(CTxOut(value, ""))
+ tx.calc_sha256()
+ return tx
diff --git a/qa/rpc-tests/comptool.py b/qa/rpc-tests/comptool.py
new file mode 100755
index 0000000000..23a979250c
--- /dev/null
+++ b/qa/rpc-tests/comptool.py
@@ -0,0 +1,341 @@
+#!/usr/bin/env python2
+#
+# Distributed under the MIT/X11 software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+
+from mininode import *
+from blockstore import BlockStore, TxStore
+from util import p2p_port
+
+'''
+This is a tool for comparing two or more bitcoinds to each other
+using a script provided.
+
+To use, create a class that implements get_tests(), and pass it in
+as the test generator to TestManager. get_tests() should be a python
+generator that returns TestInstance objects. See below for definition.
+'''
+
+# TestNode behaves as follows:
+# Configure with a BlockStore and TxStore
+# on_inv: log the message but don't request
+# on_headers: log the chain tip
+# on_pong: update ping response map (for synchronization)
+# on_getheaders: provide headers via BlockStore
+# on_getdata: provide blocks via BlockStore
+
+global mininode_lock
+
+class TestNode(NodeConnCB):
+
+ def __init__(self, block_store, tx_store):
+ NodeConnCB.__init__(self)
+ self.create_callback_map()
+ self.conn = None
+ self.bestblockhash = None
+ self.block_store = block_store
+ self.block_request_map = {}
+ self.tx_store = tx_store
+ self.tx_request_map = {}
+
+ # When the pingmap is non-empty we're waiting for
+ # a response
+ self.pingMap = {}
+ self.lastInv = []
+
+ def add_connection(self, conn):
+ self.conn = conn
+
+ def on_headers(self, conn, message):
+ if len(message.headers) > 0:
+ best_header = message.headers[-1]
+ best_header.calc_sha256()
+ self.bestblockhash = best_header.sha256
+
+ def on_getheaders(self, conn, message):
+ response = self.block_store.headers_for(message.locator, message.hashstop)
+ if response is not None:
+ conn.send_message(response)
+
+ def on_getdata(self, conn, message):
+ [conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
+ [conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
+
+ for i in message.inv:
+ if i.type == 1:
+ self.tx_request_map[i.hash] = True
+ elif i.type == 2:
+ self.block_request_map[i.hash] = True
+
+ def on_inv(self, conn, message):
+ self.lastInv = [x.hash for x in message.inv]
+
+ def on_pong(self, conn, message):
+ try:
+ del self.pingMap[message.nonce]
+ except KeyError:
+ raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
+
+ def send_inv(self, obj):
+ mtype = 2 if isinstance(obj, CBlock) else 1
+ self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
+
+ def send_getheaders(self):
+ # We ask for headers from their last tip.
+ m = msg_getheaders()
+ m.locator = self.block_store.get_locator(self.bestblockhash)
+ self.conn.send_message(m)
+
+ # This assumes BIP31
+ def send_ping(self, nonce):
+ self.pingMap[nonce] = True
+ self.conn.send_message(msg_ping(nonce))
+
+ def received_ping_response(self, nonce):
+ return nonce not in self.pingMap
+
+ def send_mempool(self):
+ self.lastInv = []
+ self.conn.send_message(msg_mempool())
+
+# TestInstance:
+#
+# Instances of these are generated by the test generator, and fed into the
+# comptool.
+#
+# "blocks_and_transactions" should be an array of [obj, True/False/None]:
+# - obj is either a CBlock or a CTransaction, and
+# - the second value indicates whether the object should be accepted
+# into the blockchain or mempool (for tests where we expect a certain
+# answer), or "None" if we don't expect a certain answer and are just
+# comparing the behavior of the nodes being tested.
+# sync_every_block: if True, then each block will be inv'ed, synced, and
+# nodes will be tested based on the outcome for the block. If False,
+# then inv's accumulate until all blocks are processed (or max inv size
+# is reached) and then sent out in one inv message. Then the final block
+# will be synced across all connections, and the outcome of the final
+# block will be tested.
+# sync_every_tx: analagous to behavior for sync_every_block, except if outcome
+# on the final tx is None, then contents of entire mempool are compared
+# across all connections. (If outcome of final tx is specified as true
+# or false, then only the last tx is tested against outcome.)
+
+class TestInstance(object):
+ def __init__(self, objects=[], sync_every_block=True, sync_every_tx=False):
+ self.blocks_and_transactions = objects
+ self.sync_every_block = sync_every_block
+ self.sync_every_tx = sync_every_tx
+
+class TestManager(object):
+
+ def __init__(self, testgen, datadir):
+ self.test_generator = testgen
+ self.connections = []
+ self.block_store = BlockStore(datadir)
+ self.tx_store = TxStore(datadir)
+ self.ping_counter = 1
+
+ def add_all_connections(self, nodes):
+ for i in range(len(nodes)):
+ # Create a p2p connection to each node
+ self.connections.append(NodeConn('127.0.0.1', p2p_port(i),
+ nodes[i], TestNode(self.block_store, self.tx_store)))
+ # Make sure the TestNode (callback class) has a reference to its
+ # associated NodeConn
+ self.connections[-1].cb.add_connection(self.connections[-1])
+
+ def wait_for_verack(self):
+ sleep_time = 0.05
+ max_tries = 10 / sleep_time # Wait at most 10 seconds
+ while max_tries > 0:
+ done = True
+ with mininode_lock:
+ for c in self.connections:
+ if c.cb.verack_received is False:
+ done = False
+ break
+ if done:
+ break
+ time.sleep(sleep_time)
+
+ def wait_for_pings(self, counter):
+ received_pongs = False
+ while received_pongs is not True:
+ time.sleep(0.05)
+ received_pongs = True
+ with mininode_lock:
+ for c in self.connections:
+ if c.cb.received_ping_response(counter) is not True:
+ received_pongs = False
+ break
+
+ # sync_blocks: Wait for all connections to request the blockhash given
+ # then send get_headers to find out the tip of each node, and synchronize
+ # the response by using a ping (and waiting for pong with same nonce).
+ def sync_blocks(self, blockhash, num_blocks):
+ # Wait for nodes to request block (50ms sleep * 20 tries * num_blocks)
+ max_tries = 20*num_blocks
+ while max_tries > 0:
+ with mininode_lock:
+ results = [ blockhash in c.cb.block_request_map and
+ c.cb.block_request_map[blockhash] for c in self.connections ]
+ if False not in results:
+ break
+ time.sleep(0.05)
+ max_tries -= 1
+
+ # --> error if not requested
+ if max_tries == 0:
+ # print [ c.cb.block_request_map for c in self.connections ]
+ raise AssertionError("Not all nodes requested block")
+ # --> Answer request (we did this inline!)
+
+ # Send getheaders message
+ [ c.cb.send_getheaders() for c in self.connections ]
+
+ # Send ping and wait for response -- synchronization hack
+ [ c.cb.send_ping(self.ping_counter) for c in self.connections ]
+ self.wait_for_pings(self.ping_counter)
+ self.ping_counter += 1
+
+ # Analogous to sync_block (see above)
+ def sync_transaction(self, txhash, num_events):
+ # Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
+ max_tries = 20*num_events
+ while max_tries > 0:
+ with mininode_lock:
+ results = [ txhash in c.cb.tx_request_map and
+ c.cb.tx_request_map[txhash] for c in self.connections ]
+ if False not in results:
+ break
+ time.sleep(0.05)
+ max_tries -= 1
+
+ # --> error if not requested
+ if max_tries == 0:
+ # print [ c.cb.tx_request_map for c in self.connections ]
+ raise AssertionError("Not all nodes requested transaction")
+ # --> Answer request (we did this inline!)
+
+ # Get the mempool
+ [ c.cb.send_mempool() for c in self.connections ]
+
+ # Send ping and wait for response -- synchronization hack
+ [ c.cb.send_ping(self.ping_counter) for c in self.connections ]
+ self.wait_for_pings(self.ping_counter)
+ self.ping_counter += 1
+
+ # Sort inv responses from each node
+ with mininode_lock:
+ [ c.cb.lastInv.sort() for c in self.connections ]
+
+ # Verify that the tip of each connection all agree with each other, and
+ # with the expected outcome (if given)
+ def check_results(self, blockhash, outcome):
+ with mininode_lock:
+ for c in self.connections:
+ if outcome is None:
+ if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
+ return False
+ elif ((c.cb.bestblockhash == blockhash) != outcome):
+ # print c.cb.bestblockhash, blockhash, outcome
+ return False
+ return True
+
+ # Either check that the mempools all agree with each other, or that
+ # txhash's presence in the mempool matches the outcome specified.
+ # This is somewhat of a strange comparison, in that we're either comparing
+ # a particular tx to an outcome, or the entire mempools altogether;
+ # perhaps it would be useful to add the ability to check explicitly that
+ # a particular tx's existence in the mempool is the same across all nodes.
+ def check_mempool(self, txhash, outcome):
+ with mininode_lock:
+ for c in self.connections:
+ if outcome is None:
+ # Make sure the mempools agree with each other
+ if c.cb.lastInv != self.connections[0].cb.lastInv:
+ # print c.rpc.getrawmempool()
+ return False
+ elif ((txhash in c.cb.lastInv) != outcome):
+ # print c.rpc.getrawmempool(), c.cb.lastInv
+ return False
+ return True
+
+ def run(self):
+ # Wait until verack is received
+ self.wait_for_verack()
+
+ test_number = 1
+ for test_instance in self.test_generator.get_tests():
+ # We use these variables to keep track of the last block
+ # and last transaction in the tests, which are used
+ # if we're not syncing on every block or every tx.
+ [ block, block_outcome ] = [ None, None ]
+ [ tx, tx_outcome ] = [ None, None ]
+ invqueue = []
+
+ for b_or_t, outcome in test_instance.blocks_and_transactions:
+ # Determine if we're dealing with a block or tx
+ if isinstance(b_or_t, CBlock): # Block test runner
+ block = b_or_t
+ block_outcome = outcome
+ # Add to shared block_store, set as current block
+ with mininode_lock:
+ self.block_store.add_block(block)
+ for c in self.connections:
+ c.cb.block_request_map[block.sha256] = False
+ # Either send inv's to each node and sync, or add
+ # to invqueue for later inv'ing.
+ if (test_instance.sync_every_block):
+ [ c.cb.send_inv(block) for c in self.connections ]
+ self.sync_blocks(block.sha256, 1)
+ if (not self.check_results(block.sha256, outcome)):
+ raise AssertionError("Test failed at test %d" % test_number)
+ else:
+ invqueue.append(CInv(2, block.sha256))
+ else: # Tx test runner
+ assert(isinstance(b_or_t, CTransaction))
+ tx = b_or_t
+ tx_outcome = outcome
+ # Add to shared tx store and clear map entry
+ with mininode_lock:
+ self.tx_store.add_transaction(tx)
+ for c in self.connections:
+ c.cb.tx_request_map[tx.sha256] = False
+ # Again, either inv to all nodes or save for later
+ if (test_instance.sync_every_tx):
+ [ c.cb.send_inv(tx) for c in self.connections ]
+ self.sync_transaction(tx.sha256, 1)
+ if (not self.check_mempool(tx.sha256, outcome)):
+ raise AssertionError("Test failed at test %d" % test_number)
+ else:
+ invqueue.append(CInv(1, tx.sha256))
+ # Ensure we're not overflowing the inv queue
+ if len(invqueue) == MAX_INV_SZ:
+ [ c.send_message(msg_inv(invqueue)) for c in self.connections ]
+ invqueue = []
+
+ # Do final sync if we weren't syncing on every block or every tx.
+ if (not test_instance.sync_every_block and block is not None):
+ if len(invqueue) > 0:
+ [ c.send_message(msg_inv(invqueue)) for c in self.connections ]
+ invqueue = []
+ self.sync_blocks(block.sha256,
+ len(test_instance.blocks_and_transactions))
+ if (not self.check_results(block.sha256, block_outcome)):
+ raise AssertionError("Block test failed at test %d" % test_number)
+ if (not test_instance.sync_every_tx and tx is not None):
+ if len(invqueue) > 0:
+ [ c.send_message(msg_inv(invqueue)) for c in self.connections ]
+ invqueue = []
+ self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
+ if (not self.check_mempool(tx.sha256, tx_outcome)):
+ raise AssertionError("Mempool test failed at test %d" % test_number)
+
+ print "Test %d: PASS" % test_number, [ c.rpc.getblockcount() for c in self.connections ]
+ test_number += 1
+
+ self.block_store.close()
+ self.tx_store.close()
+ [ c.disconnect_node() for c in self.connections ]
diff --git a/qa/rpc-tests/invalidblockrequest.py b/qa/rpc-tests/invalidblockrequest.py
new file mode 100755
index 0000000000..8b685ed9b2
--- /dev/null
+++ b/qa/rpc-tests/invalidblockrequest.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python2
+#
+# Distributed under the MIT/X11 software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+
+from test_framework import ComparisonTestFramework
+from util import *
+from comptool import TestManager, TestInstance
+from mininode import *
+from blocktools import *
+import logging
+import copy
+import time
+
+
+'''
+In this test we connect to one node over p2p, and test block requests:
+1) Valid blocks should be requested and become chain tip.
+2) Invalid block with duplicated transaction should be re-requested.
+3) Invalid block with bad coinbase value should be rejected and not
+re-requested.
+'''
+
+# Use the ComparisonTestFramework with 1 node: only use --testbinary.
+class InvalidBlockRequestTest(ComparisonTestFramework):
+
+ ''' Can either run this test as 1 node with expected answers, or two and compare them.
+ Change the "outcome" variable from each TestInstance object to only do the comparison. '''
+ def __init__(self):
+ self.num_nodes = 1
+
+ def run_test(self):
+ test = TestManager(self, self.options.tmpdir)
+ test.add_all_connections(self.nodes)
+ self.tip = None
+ self.block_time = None
+ NetworkThread().start() # Start up network handling in another thread
+ test.run()
+
+ def get_tests(self):
+ if self.tip is None:
+ self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
+ self.block_time = int(time.time())+1
+
+ '''
+ Create a new block with an anyone-can-spend coinbase
+ '''
+ block = create_block(self.tip, create_coinbase(), self.block_time)
+ self.block_time += 1
+ block.solve()
+ # Save the coinbase for later
+ self.block1 = block
+ self.tip = block.sha256
+ yield TestInstance([[block, True]])
+
+ '''
+ Now we need that block to mature so we can spend the coinbase.
+ '''
+ test = TestInstance(sync_every_block=False)
+ for i in xrange(100):
+ block = create_block(self.tip, create_coinbase(), self.block_time)
+ block.solve()
+ self.tip = block.sha256
+ self.block_time += 1
+ test.blocks_and_transactions.append([block, True])
+ yield test
+
+ '''
+ Now we use merkle-root malleability to generate an invalid block with
+ same blockheader.
+ Manufacture a block with 3 transactions (coinbase, spend of prior
+ coinbase, spend of that spend). Duplicate the 3rd transaction to
+ leave merkle root and blockheader unchanged but invalidate the block.
+ '''
+ block2 = create_block(self.tip, create_coinbase(), self.block_time)
+ self.block_time += 1
+
+ # chr(81) is OP_TRUE
+ tx1 = create_transaction(self.block1.vtx[0], 0, chr(81), 50*100000000)
+ tx2 = create_transaction(tx1, 0, chr(81), 50*100000000)
+
+ block2.vtx.extend([tx1, tx2])
+ block2.hashMerkleRoot = block2.calc_merkle_root()
+ block2.rehash()
+ block2.solve()
+ orig_hash = block2.sha256
+ block2_orig = copy.deepcopy(block2)
+
+ # Mutate block 2
+ block2.vtx.append(tx2)
+ assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root())
+ assert_equal(orig_hash, block2.rehash())
+ assert(block2_orig.vtx != block2.vtx)
+
+ self.tip = block2.sha256
+ yield TestInstance([[block2, False], [block2_orig, True]])
+
+ '''
+ Make sure that a totally screwed up block is not valid.
+ '''
+ block3 = create_block(self.tip, create_coinbase(), self.block_time)
+ self.block_time += 1
+ block3.vtx[0].vout[0].nValue = 100*100000000 # Too high!
+ block3.vtx[0].sha256=None
+ block3.vtx[0].calc_sha256()
+ block3.hashMerkleRoot = block3.calc_merkle_root()
+ block3.rehash()
+ block3.solve()
+
+ yield TestInstance([[block3, False]])
+
+
+if __name__ == '__main__':
+ InvalidBlockRequestTest().main()
diff --git a/qa/rpc-tests/maxblocksinflight.py b/qa/rpc-tests/maxblocksinflight.py
new file mode 100755
index 0000000000..87c80cd97e
--- /dev/null
+++ b/qa/rpc-tests/maxblocksinflight.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python2
+#
+# Distributed under the MIT/X11 software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+
+from mininode import *
+from test_framework import BitcoinTestFramework
+from util import *
+import logging
+
+'''
+In this test we connect to one node over p2p, send it numerous inv's, and
+compare the resulting number of getdata requests to a max allowed value. We
+test for exceeding 128 blocks in flight, which was the limit an 0.9 client will
+reach. [0.10 clients shouldn't request more than 16 from a single peer.]
+'''
+MAX_REQUESTS = 128
+
+class TestManager(NodeConnCB):
+ # set up NodeConnCB callbacks, overriding base class
+ def on_getdata(self, conn, message):
+ self.log.debug("got getdata %s" % repr(message))
+ # Log the requests
+ for inv in message.inv:
+ if inv.hash not in self.blockReqCounts:
+ self.blockReqCounts[inv.hash] = 0
+ self.blockReqCounts[inv.hash] += 1
+
+ def on_close(self, conn):
+ if not self.disconnectOkay:
+ raise EarlyDisconnectError(0)
+
+ def __init__(self):
+ NodeConnCB.__init__(self)
+ self.log = logging.getLogger("BlockRelayTest")
+ self.create_callback_map()
+
+ def add_new_connection(self, connection):
+ self.connection = connection
+ self.blockReqCounts = {}
+ self.disconnectOkay = False
+
+ def run(self):
+ try:
+ fail = False
+ self.connection.rpc.generate(1) # Leave IBD
+
+ numBlocksToGenerate = [ 8, 16, 128, 1024 ]
+ for count in range(len(numBlocksToGenerate)):
+ current_invs = []
+ for i in range(numBlocksToGenerate[count]):
+ current_invs.append(CInv(2, random.randrange(0, 1<<256)))
+ if len(current_invs) >= 50000:
+ self.connection.send_message(msg_inv(current_invs))
+ current_invs = []
+ if len(current_invs) > 0:
+ self.connection.send_message(msg_inv(current_invs))
+
+ # Wait and see how many blocks were requested
+ time.sleep(2)
+
+ total_requests = 0
+ with mininode_lock:
+ for key in self.blockReqCounts:
+ total_requests += self.blockReqCounts[key]
+ if self.blockReqCounts[key] > 1:
+ raise AssertionError("Error, test failed: block %064x requested more than once" % key)
+ if total_requests > MAX_REQUESTS:
+ raise AssertionError("Error, too many blocks (%d) requested" % total_requests)
+ print "Round %d: success (total requests: %d)" % (count, total_requests)
+ except AssertionError as e:
+ print "TEST FAILED: ", e.args
+
+ self.disconnectOkay = True
+ self.connection.disconnect_node()
+
+
+class MaxBlocksInFlightTest(BitcoinTestFramework):
+ def add_options(self, parser):
+ parser.add_option("--testbinary", dest="testbinary",
+ default=os.getenv("BITCOIND", "bitcoind"),
+ help="Binary to test max block requests behavior")
+
+ def setup_chain(self):
+ print "Initializing test directory "+self.options.tmpdir
+ initialize_chain_clean(self.options.tmpdir, 1)
+
+ def setup_network(self):
+ self.nodes = start_nodes(1, self.options.tmpdir,
+ extra_args=[['-debug', '-whitelist=127.0.0.1']],
+ binary=[self.options.testbinary])
+
+ def run_test(self):
+ test = TestManager()
+ test.add_new_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test))
+ NetworkThread().start() # Start up network handling in another thread
+ test.run()
+
+if __name__ == '__main__':
+ MaxBlocksInFlightTest().main()
diff --git a/qa/rpc-tests/mininode.py b/qa/rpc-tests/mininode.py
new file mode 100755
index 0000000000..b7d78e74fa
--- /dev/null
+++ b/qa/rpc-tests/mininode.py
@@ -0,0 +1,1256 @@
+# mininode.py - Bitcoin P2P network half-a-node
+#
+# Distributed under the MIT/X11 software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+# This python code was modified from ArtForz' public domain half-a-node, as
+# found in the mini-node branch of http://github.com/jgarzik/pynode.
+#
+# NodeConn: an object which manages p2p connectivity to a bitcoin node
+# NodeConnCB: a base class that describes the interface for receiving
+# callbacks with network messages from a NodeConn
+# CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
+# data structures that should map to corresponding structures in
+# bitcoin/primitives
+# msg_block, msg_tx, msg_headers, etc.:
+# data structures that represent network messages
+# ser_*, deser_*: functions that handle serialization/deserialization
+
+
+import struct
+import socket
+import asyncore
+import binascii
+import time
+import sys
+import random
+import cStringIO
+import hashlib
+from threading import RLock
+from threading import Thread
+import logging
+import copy
+
+BIP0031_VERSION = 60000
+MY_VERSION = 60001 # past bip-31 for ping/pong
+MY_SUBVERSION = "/python-mininode-tester:0.0.1/"
+
+MAX_INV_SZ = 50000
+
+# Keep our own socket map for asyncore, so that we can track disconnects
+# ourselves (to workaround an issue with closing an asyncore socket when
+# using select)
+mininode_socket_map = dict()
+
+# One lock for synchronizing all data access between the networking thread (see
+# NetworkThread below) and the thread running the test logic. For simplicity,
+# NodeConn acquires this lock whenever delivering a message to to a NodeConnCB,
+# and whenever adding anything to the send buffer (in send_message()). This
+# lock should be acquired in the thread running the test logic to synchronize
+# access to any data shared with the NodeConnCB or NodeConn.
+mininode_lock = RLock()
+
+# Serialization/deserialization tools
+def sha256(s):
+ return hashlib.new('sha256', s).digest()
+
+
+def hash256(s):
+ return sha256(sha256(s))
+
+
+def deser_string(f):
+ nit = struct.unpack("<B", f.read(1))[0]
+ if nit == 253:
+ nit = struct.unpack("<H", f.read(2))[0]
+ elif nit == 254:
+ nit = struct.unpack("<I", f.read(4))[0]
+ elif nit == 255:
+ nit = struct.unpack("<Q", f.read(8))[0]
+ return f.read(nit)
+
+
+def ser_string(s):
+ if len(s) < 253:
+ return chr(len(s)) + s
+ elif len(s) < 0x10000:
+ return chr(253) + struct.pack("<H", len(s)) + s
+ elif len(s) < 0x100000000L:
+ return chr(254) + struct.pack("<I", len(s)) + s
+ return chr(255) + struct.pack("<Q", len(s)) + s
+
+
+def deser_uint256(f):
+ r = 0L
+ for i in xrange(8):
+ t = struct.unpack("<I", f.read(4))[0]
+ r += t << (i * 32)
+ return r
+
+
+def ser_uint256(u):
+ rs = ""
+ for i in xrange(8):
+ rs += struct.pack("<I", u & 0xFFFFFFFFL)
+ u >>= 32
+ return rs
+
+
+def uint256_from_str(s):
+ r = 0L
+ t = struct.unpack("<IIIIIIII", s[:32])
+ for i in xrange(8):
+ r += t[i] << (i * 32)
+ return r
+
+
+def uint256_from_compact(c):
+ nbytes = (c >> 24) & 0xFF
+ v = (c & 0xFFFFFFL) << (8 * (nbytes - 3))
+ return v
+
+
+def deser_vector(f, c):
+ nit = struct.unpack("<B", f.read(1))[0]
+ if nit == 253:
+ nit = struct.unpack("<H", f.read(2))[0]
+ elif nit == 254:
+ nit = struct.unpack("<I", f.read(4))[0]
+ elif nit == 255:
+ nit = struct.unpack("<Q", f.read(8))[0]
+ r = []
+ for i in xrange(nit):
+ t = c()
+ t.deserialize(f)
+ r.append(t)
+ return r
+
+
+def ser_vector(l):
+ r = ""
+ if len(l) < 253:
+ r = chr(len(l))
+ elif len(l) < 0x10000:
+ r = chr(253) + struct.pack("<H", len(l))
+ elif len(l) < 0x100000000L:
+ r = chr(254) + struct.pack("<I", len(l))
+ else:
+ r = chr(255) + struct.pack("<Q", len(l))
+ for i in l:
+ r += i.serialize()
+ return r
+
+
+def deser_uint256_vector(f):
+ nit = struct.unpack("<B", f.read(1))[0]
+ if nit == 253:
+ nit = struct.unpack("<H", f.read(2))[0]
+ elif nit == 254:
+ nit = struct.unpack("<I", f.read(4))[0]
+ elif nit == 255:
+ nit = struct.unpack("<Q", f.read(8))[0]
+ r = []
+ for i in xrange(nit):
+ t = deser_uint256(f)
+ r.append(t)
+ return r
+
+
+def ser_uint256_vector(l):
+ r = ""
+ if len(l) < 253:
+ r = chr(len(l))
+ elif len(l) < 0x10000:
+ r = chr(253) + struct.pack("<H", len(l))
+ elif len(l) < 0x100000000L:
+ r = chr(254) + struct.pack("<I", len(l))
+ else:
+ r = chr(255) + struct.pack("<Q", len(l))
+ for i in l:
+ r += ser_uint256(i)
+ return r
+
+
+def deser_string_vector(f):
+ nit = struct.unpack("<B", f.read(1))[0]
+ if nit == 253:
+ nit = struct.unpack("<H", f.read(2))[0]
+ elif nit == 254:
+ nit = struct.unpack("<I", f.read(4))[0]
+ elif nit == 255:
+ nit = struct.unpack("<Q", f.read(8))[0]
+ r = []
+ for i in xrange(nit):
+ t = deser_string(f)
+ r.append(t)
+ return r
+
+
+def ser_string_vector(l):
+ r = ""
+ if len(l) < 253:
+ r = chr(len(l))
+ elif len(l) < 0x10000:
+ r = chr(253) + struct.pack("<H", len(l))
+ elif len(l) < 0x100000000L:
+ r = chr(254) + struct.pack("<I", len(l))
+ else:
+ r = chr(255) + struct.pack("<Q", len(l))
+ for sv in l:
+ r += ser_string(sv)
+ return r
+
+
+def deser_int_vector(f):
+ nit = struct.unpack("<B", f.read(1))[0]
+ if nit == 253:
+ nit = struct.unpack("<H", f.read(2))[0]
+ elif nit == 254:
+ nit = struct.unpack("<I", f.read(4))[0]
+ elif nit == 255:
+ nit = struct.unpack("<Q", f.read(8))[0]
+ r = []
+ for i in xrange(nit):
+ t = struct.unpack("<i", f.read(4))[0]
+ r.append(t)
+ return r
+
+
+def ser_int_vector(l):
+ r = ""
+ if len(l) < 253:
+ r = chr(len(l))
+ elif len(l) < 0x10000:
+ r = chr(253) + struct.pack("<H", len(l))
+ elif len(l) < 0x100000000L:
+ r = chr(254) + struct.pack("<I", len(l))
+ else:
+ r = chr(255) + struct.pack("<Q", len(l))
+ for i in l:
+ r += struct.pack("<i", i)
+ return r
+
+
+# Objects that map to bitcoind objects, which can be serialized/deserialized
+
+class CAddress(object):
+ def __init__(self):
+ self.nServices = 1
+ self.pchReserved = "\x00" * 10 + "\xff" * 2
+ self.ip = "0.0.0.0"
+ self.port = 0
+
+ def deserialize(self, f):
+ self.nServices = struct.unpack("<Q", f.read(8))[0]
+ self.pchReserved = f.read(12)
+ self.ip = socket.inet_ntoa(f.read(4))
+ self.port = struct.unpack(">H", f.read(2))[0]
+
+ def serialize(self):
+ r = ""
+ r += struct.pack("<Q", self.nServices)
+ r += self.pchReserved
+ r += socket.inet_aton(self.ip)
+ r += struct.pack(">H", self.port)
+ return r
+
+ def __repr__(self):
+ return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
+ self.ip, self.port)
+
+
+class CInv(object):
+ typemap = {
+ 0: "Error",
+ 1: "TX",
+ 2: "Block"}
+
+ def __init__(self, t=0, h=0L):
+ self.type = t
+ self.hash = h
+
+ def deserialize(self, f):
+ self.type = struct.unpack("<i", f.read(4))[0]
+ self.hash = deser_uint256(f)
+
+ def serialize(self):
+ r = ""
+ r += struct.pack("<i", self.type)
+ r += ser_uint256(self.hash)
+ return r
+
+ def __repr__(self):
+ return "CInv(type=%s hash=%064x)" \
+ % (self.typemap[self.type], self.hash)
+
+
+class CBlockLocator(object):
+ def __init__(self):
+ self.nVersion = MY_VERSION
+ self.vHave = []
+
+ def deserialize(self, f):
+ self.nVersion = struct.unpack("<i", f.read(4))[0]
+ self.vHave = deser_uint256_vector(f)
+
+ def serialize(self):
+ r = ""
+ r += struct.pack("<i", self.nVersion)
+ r += ser_uint256_vector(self.vHave)
+ return r
+
+ def __repr__(self):
+ return "CBlockLocator(nVersion=%i vHave=%s)" \
+ % (self.nVersion, repr(self.vHave))
+
+
+class COutPoint(object):
+ def __init__(self, hash=0, n=0):
+ self.hash = hash
+ self.n = n
+
+ def deserialize(self, f):
+ self.hash = deser_uint256(f)
+ self.n = struct.unpack("<I", f.read(4))[0]
+
+ def serialize(self):
+ r = ""
+ r += ser_uint256(self.hash)
+ r += struct.pack("<I", self.n)
+ return r
+
+ def __repr__(self):
+ return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
+
+
+class CTxIn(object):
+ def __init__(self, outpoint=None, scriptSig="", nSequence=0):
+ if outpoint is None:
+ self.prevout = COutPoint()
+ else:
+ self.prevout = outpoint
+ self.scriptSig = scriptSig
+ self.nSequence = nSequence
+
+ def deserialize(self, f):
+ self.prevout = COutPoint()
+ self.prevout.deserialize(f)
+ self.scriptSig = deser_string(f)
+ self.nSequence = struct.unpack("<I", f.read(4))[0]
+
+ def serialize(self):
+ r = ""
+ r += self.prevout.serialize()
+ r += ser_string(self.scriptSig)
+ r += struct.pack("<I", self.nSequence)
+ return r
+
+ def __repr__(self):
+ return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
+ % (repr(self.prevout), binascii.hexlify(self.scriptSig),
+ self.nSequence)
+
+
+class CTxOut(object):
+ def __init__(self, nValue=0, scriptPubKey=""):
+ self.nValue = nValue
+ self.scriptPubKey = scriptPubKey
+
+ def deserialize(self, f):
+ self.nValue = struct.unpack("<q", f.read(8))[0]
+ self.scriptPubKey = deser_string(f)
+
+ def serialize(self):
+ r = ""
+ r += struct.pack("<q", self.nValue)
+ r += ser_string(self.scriptPubKey)
+ return r
+
+ def __repr__(self):
+ return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
+ % (self.nValue // 100000000, self.nValue % 100000000,
+ binascii.hexlify(self.scriptPubKey))
+
+
+class CTransaction(object):
+ def __init__(self, tx=None):
+ if tx is None:
+ self.nVersion = 1
+ self.vin = []
+ self.vout = []
+ self.nLockTime = 0
+ self.sha256 = None
+ self.hash = None
+ else:
+ self.nVersion = tx.nVersion
+ self.vin = copy.deepcopy(tx.vin)
+ self.vout = copy.deepcopy(tx.vout)
+ self.nLockTime = tx.nLockTime
+ self.sha256 = None
+ self.hash = None
+
+ def deserialize(self, f):
+ self.nVersion = struct.unpack("<i", f.read(4))[0]
+ self.vin = deser_vector(f, CTxIn)
+ self.vout = deser_vector(f, CTxOut)
+ self.nLockTime = struct.unpack("<I", f.read(4))[0]
+ self.sha256 = None
+ self.hash = None
+
+ def serialize(self):
+ r = ""
+ r += struct.pack("<i", self.nVersion)
+ r += ser_vector(self.vin)
+ r += ser_vector(self.vout)
+ r += struct.pack("<I", self.nLockTime)
+ return r
+
+ def rehash(self):
+ self.sha256 = None
+ self.calc_sha256()
+
+ def calc_sha256(self):
+ if self.sha256 is None:
+ self.sha256 = uint256_from_str(hash256(self.serialize()))
+ self.hash = hash256(self.serialize())[::-1].encode('hex_codec')
+
+ def is_valid(self):
+ self.calc_sha256()
+ for tout in self.vout:
+ if tout.nValue < 0 or tout.nValue > 21000000L * 100000000L:
+ return False
+ return True
+
+ def __repr__(self):
+ return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \
+ % (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
+
+
+class CBlockHeader(object):
+ def __init__(self, header=None):
+ if header is None:
+ self.set_null()
+ else:
+ self.nVersion = header.nVersion
+ self.hashPrevBlock = header.hashPrevBlock
+ self.hashMerkleRoot = header.hashMerkleRoot
+ self.nTime = header.nTime
+ self.nBits = header.nBits
+ self.nNonce = header.nNonce
+ self.sha256 = header.sha256
+ self.hash = header.hash
+ self.calc_sha256()
+
+ def set_null(self):
+ self.nVersion = 1
+ self.hashPrevBlock = 0
+ self.hashMerkleRoot = 0
+ self.nTime = 0
+ self.nBits = 0
+ self.nNonce = 0
+ self.sha256 = None
+ self.hash = None
+
+ def deserialize(self, f):
+ self.nVersion = struct.unpack("<i", f.read(4))[0]
+ self.hashPrevBlock = deser_uint256(f)
+ self.hashMerkleRoot = deser_uint256(f)
+ self.nTime = struct.unpack("<I", f.read(4))[0]
+ self.nBits = struct.unpack("<I", f.read(4))[0]
+ self.nNonce = struct.unpack("<I", f.read(4))[0]
+ self.sha256 = None
+ self.hash = None
+
+ def serialize(self):
+ r = ""
+ r += struct.pack("<i", self.nVersion)
+ r += ser_uint256(self.hashPrevBlock)
+ r += ser_uint256(self.hashMerkleRoot)
+ r += struct.pack("<I", self.nTime)
+ r += struct.pack("<I", self.nBits)
+ r += struct.pack("<I", self.nNonce)
+ return r
+
+ def calc_sha256(self):
+ if self.sha256 is None:
+ r = ""
+ r += struct.pack("<i", self.nVersion)
+ r += ser_uint256(self.hashPrevBlock)
+ r += ser_uint256(self.hashMerkleRoot)
+ r += struct.pack("<I", self.nTime)
+ r += struct.pack("<I", self.nBits)
+ r += struct.pack("<I", self.nNonce)
+ self.sha256 = uint256_from_str(hash256(r))
+ self.hash = hash256(r)[::-1].encode('hex_codec')
+
+ def rehash(self):
+ self.sha256 = None
+ self.calc_sha256()
+ return self.sha256
+
+ def __repr__(self):
+ return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
+ % (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
+ time.ctime(self.nTime), self.nBits, self.nNonce)
+
+
+class CBlock(CBlockHeader):
+ def __init__(self, header=None):
+ super(CBlock, self).__init__(header)
+ self.vtx = []
+
+ def deserialize(self, f):
+ super(CBlock, self).deserialize(f)
+ self.vtx = deser_vector(f, CTransaction)
+
+ def serialize(self):
+ r = ""
+ r += super(CBlock, self).serialize()
+ r += ser_vector(self.vtx)
+ return r
+
+ def calc_merkle_root(self):
+ hashes = []
+ for tx in self.vtx:
+ tx.calc_sha256()
+ hashes.append(ser_uint256(tx.sha256))
+ while len(hashes) > 1:
+ newhashes = []
+ for i in xrange(0, len(hashes), 2):
+ i2 = min(i+1, len(hashes)-1)
+ newhashes.append(hash256(hashes[i] + hashes[i2]))
+ hashes = newhashes
+ return uint256_from_str(hashes[0])
+
+ def is_valid(self):
+ self.calc_sha256()
+ target = uint256_from_compact(self.nBits)
+ if self.sha256 > target:
+ return False
+ for tx in self.vtx:
+ if not tx.is_valid():
+ return False
+ if self.calc_merkle_root() != self.hashMerkleRoot:
+ return False
+ return True
+
+ def solve(self):
+ self.calc_sha256()
+ target = uint256_from_compact(self.nBits)
+ while self.sha256 > target:
+ self.nNonce += 1
+ self.rehash()
+
+ def __repr__(self):
+ return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
+ % (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
+ time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
+
+
+class CUnsignedAlert(object):
+ def __init__(self):
+ self.nVersion = 1
+ self.nRelayUntil = 0
+ self.nExpiration = 0
+ self.nID = 0
+ self.nCancel = 0
+ self.setCancel = []
+ self.nMinVer = 0
+ self.nMaxVer = 0
+ self.setSubVer = []
+ self.nPriority = 0
+ self.strComment = ""
+ self.strStatusBar = ""
+ self.strReserved = ""
+
+ def deserialize(self, f):
+ self.nVersion = struct.unpack("<i", f.read(4))[0]
+ self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
+ self.nExpiration = struct.unpack("<q", f.read(8))[0]
+ self.nID = struct.unpack("<i", f.read(4))[0]
+ self.nCancel = struct.unpack("<i", f.read(4))[0]
+ self.setCancel = deser_int_vector(f)
+ self.nMinVer = struct.unpack("<i", f.read(4))[0]
+ self.nMaxVer = struct.unpack("<i", f.read(4))[0]
+ self.setSubVer = deser_string_vector(f)
+ self.nPriority = struct.unpack("<i", f.read(4))[0]
+ self.strComment = deser_string(f)
+ self.strStatusBar = deser_string(f)
+ self.strReserved = deser_string(f)
+
+ def serialize(self):
+ r = ""
+ r += struct.pack("<i", self.nVersion)
+ r += struct.pack("<q", self.nRelayUntil)
+ r += struct.pack("<q", self.nExpiration)
+ r += struct.pack("<i", self.nID)
+ r += struct.pack("<i", self.nCancel)
+ r += ser_int_vector(self.setCancel)
+ r += struct.pack("<i", self.nMinVer)
+ r += struct.pack("<i", self.nMaxVer)
+ r += ser_string_vector(self.setSubVer)
+ r += struct.pack("<i", self.nPriority)
+ r += ser_string(self.strComment)
+ r += ser_string(self.strStatusBar)
+ r += ser_string(self.strReserved)
+ return r
+
+ def __repr__(self):
+ return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
+ % (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
+ self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
+ self.strComment, self.strStatusBar, self.strReserved)
+
+
+class CAlert(object):
+ def __init__(self):
+ self.vchMsg = ""
+ self.vchSig = ""
+
+ def deserialize(self, f):
+ self.vchMsg = deser_string(f)
+ self.vchSig = deser_string(f)
+
+ def serialize(self):
+ r = ""
+ r += ser_string(self.vchMsg)
+ r += ser_string(self.vchSig)
+ return r
+
+ def __repr__(self):
+ return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
+ % (len(self.vchMsg), len(self.vchSig))
+
+
+# Objects that correspond to messages on the wire
+class msg_version(object):
+ command = "version"
+
+ def __init__(self):
+ self.nVersion = MY_VERSION
+ self.nServices = 1
+ self.nTime = time.time()
+ self.addrTo = CAddress()
+ self.addrFrom = CAddress()
+ self.nNonce = random.getrandbits(64)
+ self.strSubVer = MY_SUBVERSION
+ self.nStartingHeight = -1
+
+ def deserialize(self, f):
+ self.nVersion = struct.unpack("<i", f.read(4))[0]
+ if self.nVersion == 10300:
+ self.nVersion = 300
+ self.nServices = struct.unpack("<Q", f.read(8))[0]
+ self.nTime = struct.unpack("<q", f.read(8))[0]
+ self.addrTo = CAddress()
+ self.addrTo.deserialize(f)
+ if self.nVersion >= 106:
+ self.addrFrom = CAddress()
+ self.addrFrom.deserialize(f)
+ self.nNonce = struct.unpack("<Q", f.read(8))[0]
+ self.strSubVer = deser_string(f)
+ if self.nVersion >= 209:
+ self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
+ else:
+ self.nStartingHeight = None
+ else:
+ self.addrFrom = None
+ self.nNonce = None
+ self.strSubVer = None
+ self.nStartingHeight = None
+
+ def serialize(self):
+ r = ""
+ r += struct.pack("<i", self.nVersion)
+ r += struct.pack("<Q", self.nServices)
+ r += struct.pack("<q", self.nTime)
+ r += self.addrTo.serialize()
+ r += self.addrFrom.serialize()
+ r += struct.pack("<Q", self.nNonce)
+ r += ser_string(self.strSubVer)
+ r += struct.pack("<i", self.nStartingHeight)
+ return r
+
+ def __repr__(self):
+ return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i)' \
+ % (self.nVersion, self.nServices, time.ctime(self.nTime),
+ repr(self.addrTo), repr(self.addrFrom), self.nNonce,
+ self.strSubVer, self.nStartingHeight)
+
+
+class msg_verack(object):
+ command = "verack"
+
+ def __init__(self):
+ pass
+
+ def deserialize(self, f):
+ pass
+
+ def serialize(self):
+ return ""
+
+ def __repr__(self):
+ return "msg_verack()"
+
+
+class msg_addr(object):
+ command = "addr"
+
+ def __init__(self):
+ self.addrs = []
+
+ def deserialize(self, f):
+ self.addrs = deser_vector(f, CAddress)
+
+ def serialize(self):
+ return ser_vector(self.addrs)
+
+ def __repr__(self):
+ return "msg_addr(addrs=%s)" % (repr(self.addrs))
+
+
+class msg_alert(object):
+ command = "alert"
+
+ def __init__(self):
+ self.alert = CAlert()
+
+ def deserialize(self, f):
+ self.alert = CAlert()
+ self.alert.deserialize(f)
+
+ def serialize(self):
+ r = ""
+ r += self.alert.serialize()
+ return r
+
+ def __repr__(self):
+ return "msg_alert(alert=%s)" % (repr(self.alert), )
+
+
+class msg_inv(object):
+ command = "inv"
+
+ def __init__(self, inv=None):
+ if inv is None:
+ self.inv = []
+ else:
+ self.inv = inv
+
+ def deserialize(self, f):
+ self.inv = deser_vector(f, CInv)
+
+ def serialize(self):
+ return ser_vector(self.inv)
+
+ def __repr__(self):
+ return "msg_inv(inv=%s)" % (repr(self.inv))
+
+
+class msg_getdata(object):
+ command = "getdata"
+
+ def __init__(self):
+ self.inv = []
+
+ def deserialize(self, f):
+ self.inv = deser_vector(f, CInv)
+
+ def serialize(self):
+ return ser_vector(self.inv)
+
+ def __repr__(self):
+ return "msg_getdata(inv=%s)" % (repr(self.inv))
+
+
+class msg_getblocks(object):
+ command = "getblocks"
+
+ def __init__(self):
+ self.locator = CBlockLocator()
+ self.hashstop = 0L
+
+ def deserialize(self, f):
+ self.locator = CBlockLocator()
+ self.locator.deserialize(f)
+ self.hashstop = deser_uint256(f)
+
+ def serialize(self):
+ r = ""
+ r += self.locator.serialize()
+ r += ser_uint256(self.hashstop)
+ return r
+
+ def __repr__(self):
+ return "msg_getblocks(locator=%s hashstop=%064x)" \
+ % (repr(self.locator), self.hashstop)
+
+
+class msg_tx(object):
+ command = "tx"
+
+ def __init__(self, tx=CTransaction()):
+ self.tx = tx
+
+ def deserialize(self, f):
+ self.tx.deserialize(f)
+
+ def serialize(self):
+ return self.tx.serialize()
+
+ def __repr__(self):
+ return "msg_tx(tx=%s)" % (repr(self.tx))
+
+
+class msg_block(object):
+ command = "block"
+
+ def __init__(self, block=None):
+ if block is None:
+ self.block = CBlock()
+ else:
+ self.block = block
+
+ def deserialize(self, f):
+ self.block.deserialize(f)
+
+ def serialize(self):
+ return self.block.serialize()
+
+ def __repr__(self):
+ return "msg_block(block=%s)" % (repr(self.block))
+
+
+class msg_getaddr(object):
+ command = "getaddr"
+
+ def __init__(self):
+ pass
+
+ def deserialize(self, f):
+ pass
+
+ def serialize(self):
+ return ""
+
+ def __repr__(self):
+ return "msg_getaddr()"
+
+
+class msg_ping_prebip31(object):
+ command = "ping"
+
+ def __init__(self):
+ pass
+
+ def deserialize(self, f):
+ pass
+
+ def serialize(self):
+ return ""
+
+ def __repr__(self):
+ return "msg_ping() (pre-bip31)"
+
+
+class msg_ping(object):
+ command = "ping"
+
+ def __init__(self, nonce=0L):
+ self.nonce = nonce
+
+ def deserialize(self, f):
+ self.nonce = struct.unpack("<Q", f.read(8))[0]
+
+ def serialize(self):
+ r = ""
+ r += struct.pack("<Q", self.nonce)
+ return r
+
+ def __repr__(self):
+ return "msg_ping(nonce=%08x)" % self.nonce
+
+
+class msg_pong(object):
+ command = "pong"
+
+ def __init__(self, nonce=0L):
+ self.nonce = nonce
+
+ def deserialize(self, f):
+ self.nonce = struct.unpack("<Q", f.read(8))[0]
+
+ def serialize(self):
+ r = ""
+ r += struct.pack("<Q", self.nonce)
+ return r
+
+ def __repr__(self):
+ return "msg_pong(nonce=%08x)" % self.nonce
+
+
+class msg_mempool(object):
+ command = "mempool"
+
+ def __init__(self):
+ pass
+
+ def deserialize(self, f):
+ pass
+
+ def serialize(self):
+ return ""
+
+ def __repr__(self):
+ return "msg_mempool()"
+
+
+# getheaders message has
+# number of entries
+# vector of hashes
+# hash_stop (hash of last desired block header, 0 to get as many as possible)
+class msg_getheaders(object):
+ command = "getheaders"
+
+ def __init__(self):
+ self.locator = CBlockLocator()
+ self.hashstop = 0L
+
+ def deserialize(self, f):
+ self.locator = CBlockLocator()
+ self.locator.deserialize(f)
+ self.hashstop = deser_uint256(f)
+
+ def serialize(self):
+ r = ""
+ r += self.locator.serialize()
+ r += ser_uint256(self.hashstop)
+ return r
+
+ def __repr__(self):
+ return "msg_getheaders(locator=%s, stop=%064x)" \
+ % (repr(self.locator), self.hashstop)
+
+
+# headers message has
+# <count> <vector of block headers>
+class msg_headers(object):
+ command = "headers"
+
+ def __init__(self):
+ self.headers = []
+
+ def deserialize(self, f):
+ # comment in bitcoind indicates these should be deserialized as blocks
+ blocks = deser_vector(f, CBlock)
+ for x in blocks:
+ self.headers.append(CBlockHeader(x))
+
+ def serialize(self):
+ blocks = [CBlock(x) for x in self.headers]
+ return ser_vector(blocks)
+
+ def __repr__(self):
+ return "msg_headers(headers=%s)" % repr(self.headers)
+
+
+class msg_reject(object):
+ command = "reject"
+
+ def __init__(self):
+ self.message = ""
+ self.code = ""
+ self.reason = ""
+ self.data = 0L
+
+ def deserialize(self, f):
+ self.message = deser_string(f)
+ self.code = struct.unpack("<B", f.read(1))[0]
+ self.reason = deser_string(f)
+ if (self.message == "block" or self.message == "tx"):
+ self.data = deser_uint256(f)
+
+ def serialize(self):
+ r = ser_string(self.message)
+ r += struct.pack("<B", self.code)
+ r += ser_string(self.reason)
+ if (self.message == "block" or self.message == "tx"):
+ r += ser_uint256(self.data)
+ return r
+
+ def __repr__(self):
+ return "msg_reject: %s %d %s [%064x]" \
+ % (self.message, self.code, self.reason, self.data)
+
+
+# This is what a callback should look like for NodeConn
+# Reimplement the on_* functions to provide handling for events
+class NodeConnCB(object):
+ def __init__(self):
+ self.verack_received = False
+
+ # Derived classes should call this function once to set the message map
+ # which associates the derived classes' functions to incoming messages
+ def create_callback_map(self):
+ self.cbmap = {
+ "version": self.on_version,
+ "verack": self.on_verack,
+ "addr": self.on_addr,
+ "alert": self.on_alert,
+ "inv": self.on_inv,
+ "getdata": self.on_getdata,
+ "getblocks": self.on_getblocks,
+ "tx": self.on_tx,
+ "block": self.on_block,
+ "getaddr": self.on_getaddr,
+ "ping": self.on_ping,
+ "pong": self.on_pong,
+ "headers": self.on_headers,
+ "getheaders": self.on_getheaders,
+ "reject": self.on_reject,
+ "mempool": self.on_mempool
+ }
+
+ def deliver(self, conn, message):
+ with mininode_lock:
+ try:
+ self.cbmap[message.command](conn, message)
+ except:
+ print "ERROR delivering %s (%s)" % (repr(message),
+ sys.exc_info()[0])
+
+ def on_version(self, conn, message):
+ if message.nVersion >= 209:
+ conn.send_message(msg_verack())
+ conn.ver_send = min(MY_VERSION, message.nVersion)
+ if message.nVersion < 209:
+ conn.ver_recv = conn.ver_send
+
+ def on_verack(self, conn, message):
+ conn.ver_recv = conn.ver_send
+ self.verack_received = True
+
+ def on_inv(self, conn, message):
+ want = msg_getdata()
+ for i in message.inv:
+ if i.type != 0:
+ want.inv.append(i)
+ if len(want.inv):
+ conn.send_message(want)
+
+ def on_addr(self, conn, message): pass
+ def on_alert(self, conn, message): pass
+ def on_getdata(self, conn, message): pass
+ def on_getblocks(self, conn, message): pass
+ def on_tx(self, conn, message): pass
+ def on_block(self, conn, message): pass
+ def on_getaddr(self, conn, message): pass
+ def on_headers(self, conn, message): pass
+ def on_getheaders(self, conn, message): pass
+ def on_ping(self, conn, message):
+ if conn.ver_send > BIP0031_VERSION:
+ conn.send_message(msg_pong(message.nonce))
+ def on_reject(self, conn, message): pass
+ def on_close(self, conn): pass
+ def on_mempool(self, conn): pass
+ def on_pong(self, conn, message): pass
+
+
+# The actual NodeConn class
+# This class provides an interface for a p2p connection to a specified node
+class NodeConn(asyncore.dispatcher):
+ messagemap = {
+ "version": msg_version,
+ "verack": msg_verack,
+ "addr": msg_addr,
+ "alert": msg_alert,
+ "inv": msg_inv,
+ "getdata": msg_getdata,
+ "getblocks": msg_getblocks,
+ "tx": msg_tx,
+ "block": msg_block,
+ "getaddr": msg_getaddr,
+ "ping": msg_ping,
+ "pong": msg_pong,
+ "headers": msg_headers,
+ "getheaders": msg_getheaders,
+ "reject": msg_reject,
+ "mempool": msg_mempool
+ }
+ MAGIC_BYTES = {
+ "mainnet": "\xf9\xbe\xb4\xd9", # mainnet
+ "testnet3": "\x0b\x11\x09\x07", # testnet3
+ "regtest": "\xfa\xbf\xb5\xda" # regtest
+ }
+
+ def __init__(self, dstaddr, dstport, rpc, callback, net="regtest"):
+ asyncore.dispatcher.__init__(self, map=mininode_socket_map)
+ self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport))
+ self.dstaddr = dstaddr
+ self.dstport = dstport
+ self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.sendbuf = ""
+ self.recvbuf = ""
+ self.ver_send = 209
+ self.ver_recv = 209
+ self.last_sent = 0
+ self.state = "connecting"
+ self.network = net
+ self.cb = callback
+ self.disconnect = False
+
+ # stuff version msg into sendbuf
+ vt = msg_version()
+ vt.addrTo.ip = self.dstaddr
+ vt.addrTo.port = self.dstport
+ vt.addrFrom.ip = "0.0.0.0"
+ vt.addrFrom.port = 0
+ self.send_message(vt, True)
+ print 'MiniNode: Connecting to Bitcoin Node IP # ' + dstaddr + ':' \
+ + str(dstport)
+
+ try:
+ self.connect((dstaddr, dstport))
+ except:
+ self.handle_close()
+ self.rpc = rpc
+
+ def show_debug_msg(self, msg):
+ self.log.debug(msg)
+
+ def handle_connect(self):
+ self.show_debug_msg("MiniNode: Connected & Listening: \n")
+ self.state = "connected"
+
+ def handle_close(self):
+ self.show_debug_msg("MiniNode: Closing Connection to %s:%d... "
+ % (self.dstaddr, self.dstport))
+ self.state = "closed"
+ self.recvbuf = ""
+ self.sendbuf = ""
+ try:
+ self.close()
+ except:
+ pass
+ self.cb.on_close(self)
+
+ def handle_read(self):
+ try:
+ t = self.recv(8192)
+ if len(t) > 0:
+ self.recvbuf += t
+ self.got_data()
+ except:
+ pass
+
+ def readable(self):
+ return True
+
+ def writable(self):
+ with mininode_lock:
+ length = len(self.sendbuf)
+ return (length > 0)
+
+ def handle_write(self):
+ with mininode_lock:
+ try:
+ sent = self.send(self.sendbuf)
+ except:
+ self.handle_close()
+ return
+ self.sendbuf = self.sendbuf[sent:]
+
+ def got_data(self):
+ while True:
+ if len(self.recvbuf) < 4:
+ return
+ if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
+ raise ValueError("got garbage %s" % repr(self.recvbuf))
+ if self.ver_recv < 209:
+ if len(self.recvbuf) < 4 + 12 + 4:
+ return
+ command = self.recvbuf[4:4+12].split("\x00", 1)[0]
+ msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
+ checksum = None
+ if len(self.recvbuf) < 4 + 12 + 4 + msglen:
+ return
+ msg = self.recvbuf[4+12+4:4+12+4+msglen]
+ self.recvbuf = self.recvbuf[4+12+4+msglen:]
+ else:
+ if len(self.recvbuf) < 4 + 12 + 4 + 4:
+ return
+ command = self.recvbuf[4:4+12].split("\x00", 1)[0]
+ msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
+ checksum = self.recvbuf[4+12+4:4+12+4+4]
+ if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
+ return
+ msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
+ th = sha256(msg)
+ h = sha256(th)
+ if checksum != h[:4]:
+ raise ValueError("got bad checksum " + repr(self.recvbuf))
+ self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
+ if command in self.messagemap:
+ f = cStringIO.StringIO(msg)
+ t = self.messagemap[command]()
+ t.deserialize(f)
+ self.got_message(t)
+ else:
+ self.show_debug_msg("Unknown command: '" + command + "' " +
+ repr(msg))
+
+ def send_message(self, message, pushbuf=False):
+ if self.state != "connected" and not pushbuf:
+ return
+ self.show_debug_msg("Send %s" % repr(message))
+ command = message.command
+ data = message.serialize()
+ tmsg = self.MAGIC_BYTES[self.network]
+ tmsg += command
+ tmsg += "\x00" * (12 - len(command))
+ tmsg += struct.pack("<I", len(data))
+ if self.ver_send >= 209:
+ th = sha256(data)
+ h = sha256(th)
+ tmsg += h[:4]
+ tmsg += data
+ with mininode_lock:
+ self.sendbuf += tmsg
+ self.last_sent = time.time()
+
+ def got_message(self, message):
+ if message.command == "version":
+ if message.nVersion <= BIP0031_VERSION:
+ self.messagemap['ping'] = msg_ping_prebip31
+ if self.last_sent + 30 * 60 < time.time():
+ self.send_message(self.messagemap['ping']())
+ self.show_debug_msg("Recv %s" % repr(message))
+ self.cb.deliver(self, message)
+
+ def disconnect_node(self):
+ self.disconnect = True
+
+
+class NetworkThread(Thread):
+ def run(self):
+ while mininode_socket_map:
+ # We check for whether to disconnect outside of the asyncore
+ # loop to workaround the behavior of asyncore when using
+ # select
+ disconnected = []
+ for fd, obj in mininode_socket_map.items():
+ if obj.disconnect:
+ disconnected.append(obj)
+ [ obj.handle_close() for obj in disconnected ]
+ asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
+
+
+# An exception we can raise if we detect a potential disconnect
+# (p2p or rpc) before the test is complete
+class EarlyDisconnectError(Exception):
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return repr(self.value)
diff --git a/qa/rpc-tests/script.py b/qa/rpc-tests/script.py
new file mode 100644
index 0000000000..03695b8635
--- /dev/null
+++ b/qa/rpc-tests/script.py
@@ -0,0 +1,896 @@
+#
+# script.py
+#
+# This file is modified from python-bitcoinlib.
+#
+# Distributed under the MIT/X11 software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+
+"""Scripts
+
+Functionality to build scripts, as well as SignatureHash().
+"""
+
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+from mininode import CTransaction, CTxOut, hash256
+
+import sys
+bchr = chr
+bord = ord
+if sys.version > '3':
+ long = int
+ bchr = lambda x: bytes([x])
+ bord = lambda x: x
+
+import copy
+import struct
+
+import bignum
+
+MAX_SCRIPT_SIZE = 10000
+MAX_SCRIPT_ELEMENT_SIZE = 520
+MAX_SCRIPT_OPCODES = 201
+
+OPCODE_NAMES = {}
+
+_opcode_instances = []
+class CScriptOp(int):
+ """A single script opcode"""
+ __slots__ = []
+
+ @staticmethod
+ def encode_op_pushdata(d):
+ """Encode a PUSHDATA op, returning bytes"""
+ if len(d) < 0x4c:
+ return b'' + bchr(len(d)) + d # OP_PUSHDATA
+ elif len(d) <= 0xff:
+ return b'\x4c' + bchr(len(d)) + d # OP_PUSHDATA1
+ elif len(d) <= 0xffff:
+ return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2
+ elif len(d) <= 0xffffffff:
+ return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4
+ else:
+ raise ValueError("Data too long to encode in a PUSHDATA op")
+
+ @staticmethod
+ def encode_op_n(n):
+ """Encode a small integer op, returning an opcode"""
+ if not (0 <= n <= 16):
+ raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
+
+ if n == 0:
+ return OP_0
+ else:
+ return CScriptOp(OP_1 + n-1)
+
+ def decode_op_n(self):
+ """Decode a small integer opcode, returning an integer"""
+ if self == OP_0:
+ return 0
+
+ if not (self == OP_0 or OP_1 <= self <= OP_16):
+ raise ValueError('op %r is not an OP_N' % self)
+
+ return int(self - OP_1+1)
+
+ def is_small_int(self):
+ """Return true if the op pushes a small integer to the stack"""
+ if 0x51 <= self <= 0x60 or self == 0:
+ return True
+ else:
+ return False
+
+ def __str__(self):
+ return repr(self)
+
+ def __repr__(self):
+ if self in OPCODE_NAMES:
+ return OPCODE_NAMES[self]
+ else:
+ return 'CScriptOp(0x%x)' % self
+
+ def __new__(cls, n):
+ try:
+ return _opcode_instances[n]
+ except IndexError:
+ assert len(_opcode_instances) == n
+ _opcode_instances.append(super(CScriptOp, cls).__new__(cls, n))
+ return _opcode_instances[n]
+
+# Populate opcode instance table
+for n in range(0xff+1):
+ CScriptOp(n)
+
+
+# push value
+OP_0 = CScriptOp(0x00)
+OP_FALSE = OP_0
+OP_PUSHDATA1 = CScriptOp(0x4c)
+OP_PUSHDATA2 = CScriptOp(0x4d)
+OP_PUSHDATA4 = CScriptOp(0x4e)
+OP_1NEGATE = CScriptOp(0x4f)
+OP_RESERVED = CScriptOp(0x50)
+OP_1 = CScriptOp(0x51)
+OP_TRUE=OP_1
+OP_2 = CScriptOp(0x52)
+OP_3 = CScriptOp(0x53)
+OP_4 = CScriptOp(0x54)
+OP_5 = CScriptOp(0x55)
+OP_6 = CScriptOp(0x56)
+OP_7 = CScriptOp(0x57)
+OP_8 = CScriptOp(0x58)
+OP_9 = CScriptOp(0x59)
+OP_10 = CScriptOp(0x5a)
+OP_11 = CScriptOp(0x5b)
+OP_12 = CScriptOp(0x5c)
+OP_13 = CScriptOp(0x5d)
+OP_14 = CScriptOp(0x5e)
+OP_15 = CScriptOp(0x5f)
+OP_16 = CScriptOp(0x60)
+
+# control
+OP_NOP = CScriptOp(0x61)
+OP_VER = CScriptOp(0x62)
+OP_IF = CScriptOp(0x63)
+OP_NOTIF = CScriptOp(0x64)
+OP_VERIF = CScriptOp(0x65)
+OP_VERNOTIF = CScriptOp(0x66)
+OP_ELSE = CScriptOp(0x67)
+OP_ENDIF = CScriptOp(0x68)
+OP_VERIFY = CScriptOp(0x69)
+OP_RETURN = CScriptOp(0x6a)
+
+# stack ops
+OP_TOALTSTACK = CScriptOp(0x6b)
+OP_FROMALTSTACK = CScriptOp(0x6c)
+OP_2DROP = CScriptOp(0x6d)
+OP_2DUP = CScriptOp(0x6e)
+OP_3DUP = CScriptOp(0x6f)
+OP_2OVER = CScriptOp(0x70)
+OP_2ROT = CScriptOp(0x71)
+OP_2SWAP = CScriptOp(0x72)
+OP_IFDUP = CScriptOp(0x73)
+OP_DEPTH = CScriptOp(0x74)
+OP_DROP = CScriptOp(0x75)
+OP_DUP = CScriptOp(0x76)
+OP_NIP = CScriptOp(0x77)
+OP_OVER = CScriptOp(0x78)
+OP_PICK = CScriptOp(0x79)
+OP_ROLL = CScriptOp(0x7a)
+OP_ROT = CScriptOp(0x7b)
+OP_SWAP = CScriptOp(0x7c)
+OP_TUCK = CScriptOp(0x7d)
+
+# splice ops
+OP_CAT = CScriptOp(0x7e)
+OP_SUBSTR = CScriptOp(0x7f)
+OP_LEFT = CScriptOp(0x80)
+OP_RIGHT = CScriptOp(0x81)
+OP_SIZE = CScriptOp(0x82)
+
+# bit logic
+OP_INVERT = CScriptOp(0x83)
+OP_AND = CScriptOp(0x84)
+OP_OR = CScriptOp(0x85)
+OP_XOR = CScriptOp(0x86)
+OP_EQUAL = CScriptOp(0x87)
+OP_EQUALVERIFY = CScriptOp(0x88)
+OP_RESERVED1 = CScriptOp(0x89)
+OP_RESERVED2 = CScriptOp(0x8a)
+
+# numeric
+OP_1ADD = CScriptOp(0x8b)
+OP_1SUB = CScriptOp(0x8c)
+OP_2MUL = CScriptOp(0x8d)
+OP_2DIV = CScriptOp(0x8e)
+OP_NEGATE = CScriptOp(0x8f)
+OP_ABS = CScriptOp(0x90)
+OP_NOT = CScriptOp(0x91)
+OP_0NOTEQUAL = CScriptOp(0x92)
+
+OP_ADD = CScriptOp(0x93)
+OP_SUB = CScriptOp(0x94)
+OP_MUL = CScriptOp(0x95)
+OP_DIV = CScriptOp(0x96)
+OP_MOD = CScriptOp(0x97)
+OP_LSHIFT = CScriptOp(0x98)
+OP_RSHIFT = CScriptOp(0x99)
+
+OP_BOOLAND = CScriptOp(0x9a)
+OP_BOOLOR = CScriptOp(0x9b)
+OP_NUMEQUAL = CScriptOp(0x9c)
+OP_NUMEQUALVERIFY = CScriptOp(0x9d)
+OP_NUMNOTEQUAL = CScriptOp(0x9e)
+OP_LESSTHAN = CScriptOp(0x9f)
+OP_GREATERTHAN = CScriptOp(0xa0)
+OP_LESSTHANOREQUAL = CScriptOp(0xa1)
+OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
+OP_MIN = CScriptOp(0xa3)
+OP_MAX = CScriptOp(0xa4)
+
+OP_WITHIN = CScriptOp(0xa5)
+
+# crypto
+OP_RIPEMD160 = CScriptOp(0xa6)
+OP_SHA1 = CScriptOp(0xa7)
+OP_SHA256 = CScriptOp(0xa8)
+OP_HASH160 = CScriptOp(0xa9)
+OP_HASH256 = CScriptOp(0xaa)
+OP_CODESEPARATOR = CScriptOp(0xab)
+OP_CHECKSIG = CScriptOp(0xac)
+OP_CHECKSIGVERIFY = CScriptOp(0xad)
+OP_CHECKMULTISIG = CScriptOp(0xae)
+OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
+
+# expansion
+OP_NOP1 = CScriptOp(0xb0)
+OP_NOP2 = CScriptOp(0xb1)
+OP_NOP3 = CScriptOp(0xb2)
+OP_NOP4 = CScriptOp(0xb3)
+OP_NOP5 = CScriptOp(0xb4)
+OP_NOP6 = CScriptOp(0xb5)
+OP_NOP7 = CScriptOp(0xb6)
+OP_NOP8 = CScriptOp(0xb7)
+OP_NOP9 = CScriptOp(0xb8)
+OP_NOP10 = CScriptOp(0xb9)
+
+# template matching params
+OP_SMALLINTEGER = CScriptOp(0xfa)
+OP_PUBKEYS = CScriptOp(0xfb)
+OP_PUBKEYHASH = CScriptOp(0xfd)
+OP_PUBKEY = CScriptOp(0xfe)
+
+OP_INVALIDOPCODE = CScriptOp(0xff)
+
+VALID_OPCODES = {
+ OP_1NEGATE,
+ OP_RESERVED,
+ OP_1,
+ OP_2,
+ OP_3,
+ OP_4,
+ OP_5,
+ OP_6,
+ OP_7,
+ OP_8,
+ OP_9,
+ OP_10,
+ OP_11,
+ OP_12,
+ OP_13,
+ OP_14,
+ OP_15,
+ OP_16,
+
+ OP_NOP,
+ OP_VER,
+ OP_IF,
+ OP_NOTIF,
+ OP_VERIF,
+ OP_VERNOTIF,
+ OP_ELSE,
+ OP_ENDIF,
+ OP_VERIFY,
+ OP_RETURN,
+
+ OP_TOALTSTACK,
+ OP_FROMALTSTACK,
+ OP_2DROP,
+ OP_2DUP,
+ OP_3DUP,
+ OP_2OVER,
+ OP_2ROT,
+ OP_2SWAP,
+ OP_IFDUP,
+ OP_DEPTH,
+ OP_DROP,
+ OP_DUP,
+ OP_NIP,
+ OP_OVER,
+ OP_PICK,
+ OP_ROLL,
+ OP_ROT,
+ OP_SWAP,
+ OP_TUCK,
+
+ OP_CAT,
+ OP_SUBSTR,
+ OP_LEFT,
+ OP_RIGHT,
+ OP_SIZE,
+
+ OP_INVERT,
+ OP_AND,
+ OP_OR,
+ OP_XOR,
+ OP_EQUAL,
+ OP_EQUALVERIFY,
+ OP_RESERVED1,
+ OP_RESERVED2,
+
+ OP_1ADD,
+ OP_1SUB,
+ OP_2MUL,
+ OP_2DIV,
+ OP_NEGATE,
+ OP_ABS,
+ OP_NOT,
+ OP_0NOTEQUAL,
+
+ OP_ADD,
+ OP_SUB,
+ OP_MUL,
+ OP_DIV,
+ OP_MOD,
+ OP_LSHIFT,
+ OP_RSHIFT,
+
+ OP_BOOLAND,
+ OP_BOOLOR,
+ OP_NUMEQUAL,
+ OP_NUMEQUALVERIFY,
+ OP_NUMNOTEQUAL,
+ OP_LESSTHAN,
+ OP_GREATERTHAN,
+ OP_LESSTHANOREQUAL,
+ OP_GREATERTHANOREQUAL,
+ OP_MIN,
+ OP_MAX,
+
+ OP_WITHIN,
+
+ OP_RIPEMD160,
+ OP_SHA1,
+ OP_SHA256,
+ OP_HASH160,
+ OP_HASH256,
+ OP_CODESEPARATOR,
+ OP_CHECKSIG,
+ OP_CHECKSIGVERIFY,
+ OP_CHECKMULTISIG,
+ OP_CHECKMULTISIGVERIFY,
+
+ OP_NOP1,
+ OP_NOP2,
+ OP_NOP3,
+ OP_NOP4,
+ OP_NOP5,
+ OP_NOP6,
+ OP_NOP7,
+ OP_NOP8,
+ OP_NOP9,
+ OP_NOP10,
+
+ OP_SMALLINTEGER,
+ OP_PUBKEYS,
+ OP_PUBKEYHASH,
+ OP_PUBKEY,
+}
+
+OPCODE_NAMES.update({
+ OP_0 : 'OP_0',
+ OP_PUSHDATA1 : 'OP_PUSHDATA1',
+ OP_PUSHDATA2 : 'OP_PUSHDATA2',
+ OP_PUSHDATA4 : 'OP_PUSHDATA4',
+ OP_1NEGATE : 'OP_1NEGATE',
+ OP_RESERVED : 'OP_RESERVED',
+ OP_1 : 'OP_1',
+ OP_2 : 'OP_2',
+ OP_3 : 'OP_3',
+ OP_4 : 'OP_4',
+ OP_5 : 'OP_5',
+ OP_6 : 'OP_6',
+ OP_7 : 'OP_7',
+ OP_8 : 'OP_8',
+ OP_9 : 'OP_9',
+ OP_10 : 'OP_10',
+ OP_11 : 'OP_11',
+ OP_12 : 'OP_12',
+ OP_13 : 'OP_13',
+ OP_14 : 'OP_14',
+ OP_15 : 'OP_15',
+ OP_16 : 'OP_16',
+ OP_NOP : 'OP_NOP',
+ OP_VER : 'OP_VER',
+ OP_IF : 'OP_IF',
+ OP_NOTIF : 'OP_NOTIF',
+ OP_VERIF : 'OP_VERIF',
+ OP_VERNOTIF : 'OP_VERNOTIF',
+ OP_ELSE : 'OP_ELSE',
+ OP_ENDIF : 'OP_ENDIF',
+ OP_VERIFY : 'OP_VERIFY',
+ OP_RETURN : 'OP_RETURN',
+ OP_TOALTSTACK : 'OP_TOALTSTACK',
+ OP_FROMALTSTACK : 'OP_FROMALTSTACK',
+ OP_2DROP : 'OP_2DROP',
+ OP_2DUP : 'OP_2DUP',
+ OP_3DUP : 'OP_3DUP',
+ OP_2OVER : 'OP_2OVER',
+ OP_2ROT : 'OP_2ROT',
+ OP_2SWAP : 'OP_2SWAP',
+ OP_IFDUP : 'OP_IFDUP',
+ OP_DEPTH : 'OP_DEPTH',
+ OP_DROP : 'OP_DROP',
+ OP_DUP : 'OP_DUP',
+ OP_NIP : 'OP_NIP',
+ OP_OVER : 'OP_OVER',
+ OP_PICK : 'OP_PICK',
+ OP_ROLL : 'OP_ROLL',
+ OP_ROT : 'OP_ROT',
+ OP_SWAP : 'OP_SWAP',
+ OP_TUCK : 'OP_TUCK',
+ OP_CAT : 'OP_CAT',
+ OP_SUBSTR : 'OP_SUBSTR',
+ OP_LEFT : 'OP_LEFT',
+ OP_RIGHT : 'OP_RIGHT',
+ OP_SIZE : 'OP_SIZE',
+ OP_INVERT : 'OP_INVERT',
+ OP_AND : 'OP_AND',
+ OP_OR : 'OP_OR',
+ OP_XOR : 'OP_XOR',
+ OP_EQUAL : 'OP_EQUAL',
+ OP_EQUALVERIFY : 'OP_EQUALVERIFY',
+ OP_RESERVED1 : 'OP_RESERVED1',
+ OP_RESERVED2 : 'OP_RESERVED2',
+ OP_1ADD : 'OP_1ADD',
+ OP_1SUB : 'OP_1SUB',
+ OP_2MUL : 'OP_2MUL',
+ OP_2DIV : 'OP_2DIV',
+ OP_NEGATE : 'OP_NEGATE',
+ OP_ABS : 'OP_ABS',
+ OP_NOT : 'OP_NOT',
+ OP_0NOTEQUAL : 'OP_0NOTEQUAL',
+ OP_ADD : 'OP_ADD',
+ OP_SUB : 'OP_SUB',
+ OP_MUL : 'OP_MUL',
+ OP_DIV : 'OP_DIV',
+ OP_MOD : 'OP_MOD',
+ OP_LSHIFT : 'OP_LSHIFT',
+ OP_RSHIFT : 'OP_RSHIFT',
+ OP_BOOLAND : 'OP_BOOLAND',
+ OP_BOOLOR : 'OP_BOOLOR',
+ OP_NUMEQUAL : 'OP_NUMEQUAL',
+ OP_NUMEQUALVERIFY : 'OP_NUMEQUALVERIFY',
+ OP_NUMNOTEQUAL : 'OP_NUMNOTEQUAL',
+ OP_LESSTHAN : 'OP_LESSTHAN',
+ OP_GREATERTHAN : 'OP_GREATERTHAN',
+ OP_LESSTHANOREQUAL : 'OP_LESSTHANOREQUAL',
+ OP_GREATERTHANOREQUAL : 'OP_GREATERTHANOREQUAL',
+ OP_MIN : 'OP_MIN',
+ OP_MAX : 'OP_MAX',
+ OP_WITHIN : 'OP_WITHIN',
+ OP_RIPEMD160 : 'OP_RIPEMD160',
+ OP_SHA1 : 'OP_SHA1',
+ OP_SHA256 : 'OP_SHA256',
+ OP_HASH160 : 'OP_HASH160',
+ OP_HASH256 : 'OP_HASH256',
+ OP_CODESEPARATOR : 'OP_CODESEPARATOR',
+ OP_CHECKSIG : 'OP_CHECKSIG',
+ OP_CHECKSIGVERIFY : 'OP_CHECKSIGVERIFY',
+ OP_CHECKMULTISIG : 'OP_CHECKMULTISIG',
+ OP_CHECKMULTISIGVERIFY : 'OP_CHECKMULTISIGVERIFY',
+ OP_NOP1 : 'OP_NOP1',
+ OP_NOP2 : 'OP_NOP2',
+ OP_NOP3 : 'OP_NOP3',
+ OP_NOP4 : 'OP_NOP4',
+ OP_NOP5 : 'OP_NOP5',
+ OP_NOP6 : 'OP_NOP6',
+ OP_NOP7 : 'OP_NOP7',
+ OP_NOP8 : 'OP_NOP8',
+ OP_NOP9 : 'OP_NOP9',
+ OP_NOP10 : 'OP_NOP10',
+ OP_SMALLINTEGER : 'OP_SMALLINTEGER',
+ OP_PUBKEYS : 'OP_PUBKEYS',
+ OP_PUBKEYHASH : 'OP_PUBKEYHASH',
+ OP_PUBKEY : 'OP_PUBKEY',
+ OP_INVALIDOPCODE : 'OP_INVALIDOPCODE',
+})
+
+OPCODES_BY_NAME = {
+ 'OP_0' : OP_0,
+ 'OP_PUSHDATA1' : OP_PUSHDATA1,
+ 'OP_PUSHDATA2' : OP_PUSHDATA2,
+ 'OP_PUSHDATA4' : OP_PUSHDATA4,
+ 'OP_1NEGATE' : OP_1NEGATE,
+ 'OP_RESERVED' : OP_RESERVED,
+ 'OP_1' : OP_1,
+ 'OP_2' : OP_2,
+ 'OP_3' : OP_3,
+ 'OP_4' : OP_4,
+ 'OP_5' : OP_5,
+ 'OP_6' : OP_6,
+ 'OP_7' : OP_7,
+ 'OP_8' : OP_8,
+ 'OP_9' : OP_9,
+ 'OP_10' : OP_10,
+ 'OP_11' : OP_11,
+ 'OP_12' : OP_12,
+ 'OP_13' : OP_13,
+ 'OP_14' : OP_14,
+ 'OP_15' : OP_15,
+ 'OP_16' : OP_16,
+ 'OP_NOP' : OP_NOP,
+ 'OP_VER' : OP_VER,
+ 'OP_IF' : OP_IF,
+ 'OP_NOTIF' : OP_NOTIF,
+ 'OP_VERIF' : OP_VERIF,
+ 'OP_VERNOTIF' : OP_VERNOTIF,
+ 'OP_ELSE' : OP_ELSE,
+ 'OP_ENDIF' : OP_ENDIF,
+ 'OP_VERIFY' : OP_VERIFY,
+ 'OP_RETURN' : OP_RETURN,
+ 'OP_TOALTSTACK' : OP_TOALTSTACK,
+ 'OP_FROMALTSTACK' : OP_FROMALTSTACK,
+ 'OP_2DROP' : OP_2DROP,
+ 'OP_2DUP' : OP_2DUP,
+ 'OP_3DUP' : OP_3DUP,
+ 'OP_2OVER' : OP_2OVER,
+ 'OP_2ROT' : OP_2ROT,
+ 'OP_2SWAP' : OP_2SWAP,
+ 'OP_IFDUP' : OP_IFDUP,
+ 'OP_DEPTH' : OP_DEPTH,
+ 'OP_DROP' : OP_DROP,
+ 'OP_DUP' : OP_DUP,
+ 'OP_NIP' : OP_NIP,
+ 'OP_OVER' : OP_OVER,
+ 'OP_PICK' : OP_PICK,
+ 'OP_ROLL' : OP_ROLL,
+ 'OP_ROT' : OP_ROT,
+ 'OP_SWAP' : OP_SWAP,
+ 'OP_TUCK' : OP_TUCK,
+ 'OP_CAT' : OP_CAT,
+ 'OP_SUBSTR' : OP_SUBSTR,
+ 'OP_LEFT' : OP_LEFT,
+ 'OP_RIGHT' : OP_RIGHT,
+ 'OP_SIZE' : OP_SIZE,
+ 'OP_INVERT' : OP_INVERT,
+ 'OP_AND' : OP_AND,
+ 'OP_OR' : OP_OR,
+ 'OP_XOR' : OP_XOR,
+ 'OP_EQUAL' : OP_EQUAL,
+ 'OP_EQUALVERIFY' : OP_EQUALVERIFY,
+ 'OP_RESERVED1' : OP_RESERVED1,
+ 'OP_RESERVED2' : OP_RESERVED2,
+ 'OP_1ADD' : OP_1ADD,
+ 'OP_1SUB' : OP_1SUB,
+ 'OP_2MUL' : OP_2MUL,
+ 'OP_2DIV' : OP_2DIV,
+ 'OP_NEGATE' : OP_NEGATE,
+ 'OP_ABS' : OP_ABS,
+ 'OP_NOT' : OP_NOT,
+ 'OP_0NOTEQUAL' : OP_0NOTEQUAL,
+ 'OP_ADD' : OP_ADD,
+ 'OP_SUB' : OP_SUB,
+ 'OP_MUL' : OP_MUL,
+ 'OP_DIV' : OP_DIV,
+ 'OP_MOD' : OP_MOD,
+ 'OP_LSHIFT' : OP_LSHIFT,
+ 'OP_RSHIFT' : OP_RSHIFT,
+ 'OP_BOOLAND' : OP_BOOLAND,
+ 'OP_BOOLOR' : OP_BOOLOR,
+ 'OP_NUMEQUAL' : OP_NUMEQUAL,
+ 'OP_NUMEQUALVERIFY' : OP_NUMEQUALVERIFY,
+ 'OP_NUMNOTEQUAL' : OP_NUMNOTEQUAL,
+ 'OP_LESSTHAN' : OP_LESSTHAN,
+ 'OP_GREATERTHAN' : OP_GREATERTHAN,
+ 'OP_LESSTHANOREQUAL' : OP_LESSTHANOREQUAL,
+ 'OP_GREATERTHANOREQUAL' : OP_GREATERTHANOREQUAL,
+ 'OP_MIN' : OP_MIN,
+ 'OP_MAX' : OP_MAX,
+ 'OP_WITHIN' : OP_WITHIN,
+ 'OP_RIPEMD160' : OP_RIPEMD160,
+ 'OP_SHA1' : OP_SHA1,
+ 'OP_SHA256' : OP_SHA256,
+ 'OP_HASH160' : OP_HASH160,
+ 'OP_HASH256' : OP_HASH256,
+ 'OP_CODESEPARATOR' : OP_CODESEPARATOR,
+ 'OP_CHECKSIG' : OP_CHECKSIG,
+ 'OP_CHECKSIGVERIFY' : OP_CHECKSIGVERIFY,
+ 'OP_CHECKMULTISIG' : OP_CHECKMULTISIG,
+ 'OP_CHECKMULTISIGVERIFY' : OP_CHECKMULTISIGVERIFY,
+ 'OP_NOP1' : OP_NOP1,
+ 'OP_NOP2' : OP_NOP2,
+ 'OP_NOP3' : OP_NOP3,
+ 'OP_NOP4' : OP_NOP4,
+ 'OP_NOP5' : OP_NOP5,
+ 'OP_NOP6' : OP_NOP6,
+ 'OP_NOP7' : OP_NOP7,
+ 'OP_NOP8' : OP_NOP8,
+ 'OP_NOP9' : OP_NOP9,
+ 'OP_NOP10' : OP_NOP10,
+ 'OP_SMALLINTEGER' : OP_SMALLINTEGER,
+ 'OP_PUBKEYS' : OP_PUBKEYS,
+ 'OP_PUBKEYHASH' : OP_PUBKEYHASH,
+ 'OP_PUBKEY' : OP_PUBKEY,
+}
+
+class CScriptInvalidError(Exception):
+ """Base class for CScript exceptions"""
+ pass
+
+class CScriptTruncatedPushDataError(CScriptInvalidError):
+ """Invalid pushdata due to truncation"""
+ def __init__(self, msg, data):
+ self.data = data
+ super(CScriptTruncatedPushDataError, self).__init__(msg)
+
+# This is used, eg, for blockchain heights in coinbase scripts (bip34)
+class CScriptNum(object):
+ def __init__(self, d=0):
+ self.value = d
+
+ @staticmethod
+ def encode(obj):
+ r = bytearray(0)
+ if obj.value == 0:
+ return bytes(r)
+ neg = obj.value < 0
+ absvalue = -obj.value if neg else obj.value
+ while (absvalue):
+ r.append(chr(absvalue & 0xff))
+ absvalue >>= 8
+ if r[-1] & 0x80:
+ r.append(0x80 if neg else 0)
+ elif neg:
+ r[-1] |= 0x80
+ return bytes(bchr(len(r)) + r)
+
+
+class CScript(bytes):
+ """Serialized script
+
+ A bytes subclass, so you can use this directly whenever bytes are accepted.
+ Note that this means that indexing does *not* work - you'll get an index by
+ byte rather than opcode. This format was chosen for efficiency so that the
+ general case would not require creating a lot of little CScriptOP objects.
+
+ iter(script) however does iterate by opcode.
+ """
+ @classmethod
+ def __coerce_instance(cls, other):
+ # Coerce other into bytes
+ if isinstance(other, CScriptOp):
+ other = bchr(other)
+ elif isinstance(other, CScriptNum):
+ if (other.value == 0):
+ other = bchr(CScriptOp(OP_0))
+ else:
+ other = CScriptNum.encode(other)
+ elif isinstance(other, (int, long)):
+ if 0 <= other <= 16:
+ other = bytes(bchr(CScriptOp.encode_op_n(other)))
+ elif other == -1:
+ other = bytes(bchr(OP_1NEGATE))
+ else:
+ other = CScriptOp.encode_op_pushdata(bignum.bn2vch(other))
+ elif isinstance(other, (bytes, bytearray)):
+ other = CScriptOp.encode_op_pushdata(other)
+ return other
+
+ def __add__(self, other):
+ # Do the coercion outside of the try block so that errors in it are
+ # noticed.
+ other = self.__coerce_instance(other)
+
+ try:
+ # bytes.__add__ always returns bytes instances unfortunately
+ return CScript(super(CScript, self).__add__(other))
+ except TypeError:
+ raise TypeError('Can not add a %r instance to a CScript' % other.__class__)
+
+ def join(self, iterable):
+ # join makes no sense for a CScript()
+ raise NotImplementedError
+
+ def __new__(cls, value=b''):
+ if isinstance(value, bytes) or isinstance(value, bytearray):
+ return super(CScript, cls).__new__(cls, value)
+ else:
+ def coerce_iterable(iterable):
+ for instance in iterable:
+ yield cls.__coerce_instance(instance)
+ # Annoyingly on both python2 and python3 bytes.join() always
+ # returns a bytes instance even when subclassed.
+ return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value)))
+
+ def raw_iter(self):
+ """Raw iteration
+
+ Yields tuples of (opcode, data, sop_idx) so that the different possible
+ PUSHDATA encodings can be accurately distinguished, as well as
+ determining the exact opcode byte indexes. (sop_idx)
+ """
+ i = 0
+ while i < len(self):
+ sop_idx = i
+ opcode = bord(self[i])
+ i += 1
+
+ if opcode > OP_PUSHDATA4:
+ yield (opcode, None, sop_idx)
+ else:
+ datasize = None
+ pushdata_type = None
+ if opcode < OP_PUSHDATA1:
+ pushdata_type = 'PUSHDATA(%d)' % opcode
+ datasize = opcode
+
+ elif opcode == OP_PUSHDATA1:
+ pushdata_type = 'PUSHDATA1'
+ if i >= len(self):
+ raise CScriptInvalidError('PUSHDATA1: missing data length')
+ datasize = bord(self[i])
+ i += 1
+
+ elif opcode == OP_PUSHDATA2:
+ pushdata_type = 'PUSHDATA2'
+ if i + 1 >= len(self):
+ raise CScriptInvalidError('PUSHDATA2: missing data length')
+ datasize = bord(self[i]) + (bord(self[i+1]) << 8)
+ i += 2
+
+ elif opcode == OP_PUSHDATA4:
+ pushdata_type = 'PUSHDATA4'
+ if i + 3 >= len(self):
+ raise CScriptInvalidError('PUSHDATA4: missing data length')
+ datasize = bord(self[i]) + (bord(self[i+1]) << 8) + (bord(self[i+2]) << 16) + (bord(self[i+3]) << 24)
+ i += 4
+
+ else:
+ assert False # shouldn't happen
+
+
+ data = bytes(self[i:i+datasize])
+
+ # Check for truncation
+ if len(data) < datasize:
+ raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
+
+ i += datasize
+
+ yield (opcode, data, sop_idx)
+
+ def __iter__(self):
+ """'Cooked' iteration
+
+ Returns either a CScriptOP instance, an integer, or bytes, as
+ appropriate.
+
+ See raw_iter() if you need to distinguish the different possible
+ PUSHDATA encodings.
+ """
+ for (opcode, data, sop_idx) in self.raw_iter():
+ if data is not None:
+ yield data
+ else:
+ opcode = CScriptOp(opcode)
+
+ if opcode.is_small_int():
+ yield opcode.decode_op_n()
+ else:
+ yield CScriptOp(opcode)
+
+ def __repr__(self):
+ # For Python3 compatibility add b before strings so testcases don't
+ # need to change
+ def _repr(o):
+ if isinstance(o, bytes):
+ return "x('%s')" % binascii.hexlify(o).decode('utf8')
+ else:
+ return repr(o)
+
+ ops = []
+ i = iter(self)
+ while True:
+ op = None
+ try:
+ op = _repr(next(i))
+ except CScriptTruncatedPushDataError as err:
+ op = '%s...<ERROR: %s>' % (_repr(err.data), err)
+ break
+ except CScriptInvalidError as err:
+ op = '<ERROR: %s>' % err
+ break
+ except StopIteration:
+ break
+ finally:
+ if op is not None:
+ ops.append(op)
+
+ return "CScript([%s])" % ', '.join(ops)
+
+ def GetSigOpCount(self, fAccurate):
+ """Get the SigOp count.
+
+ fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details.
+
+ Note that this is consensus-critical.
+ """
+ n = 0
+ lastOpcode = OP_INVALIDOPCODE
+ for (opcode, data, sop_idx) in self.raw_iter():
+ if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
+ n += 1
+ elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
+ if fAccurate and (OP_1 <= lastOpcode <= OP_16):
+ n += opcode.decode_op_n()
+ else:
+ n += 20
+ lastOpcode = opcode
+ return n
+
+
+SIGHASH_ALL = 1
+SIGHASH_NONE = 2
+SIGHASH_SINGLE = 3
+SIGHASH_ANYONECANPAY = 0x80
+
+def FindAndDelete(script, sig):
+ """Consensus critical, see FindAndDelete() in Satoshi codebase"""
+ r = b''
+ last_sop_idx = sop_idx = 0
+ skip = True
+ for (opcode, data, sop_idx) in script.raw_iter():
+ if not skip:
+ r += script[last_sop_idx:sop_idx]
+ last_sop_idx = sop_idx
+ if script[sop_idx:sop_idx + len(sig)] == sig:
+ skip = True
+ else:
+ skip = False
+ if not skip:
+ r += script[last_sop_idx:]
+ return CScript(r)
+
+
+def SignatureHash(script, txTo, inIdx, hashtype):
+ """Consensus-correct SignatureHash
+
+ Returns (hash, err) to precisely match the consensus-critical behavior of
+ the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity)
+ """
+ HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+
+ if inIdx >= len(txTo.vin):
+ return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
+ txtmp = CTransaction(txTo)
+
+ for txin in txtmp.vin:
+ txin.scriptSig = b''
+ txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
+
+ if (hashtype & 0x1f) == SIGHASH_NONE:
+ txtmp.vout = []
+
+ for i in range(len(txtmp.vin)):
+ if i != inIdx:
+ txtmp.vin[i].nSequence = 0
+
+ elif (hashtype & 0x1f) == SIGHASH_SINGLE:
+ outIdx = inIdx
+ if outIdx >= len(txtmp.vout):
+ return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
+
+ tmp = txtmp.vout[outIdx]
+ txtmp.vout = []
+ for i in range(outIdx):
+ txtmp.vout.append(CTxOut())
+ txtmp.vout.append(tmp)
+
+ for i in range(len(txtmp.vin)):
+ if i != inIdx:
+ txtmp.vin[i].nSequence = 0
+
+ if hashtype & SIGHASH_ANYONECANPAY:
+ tmp = txtmp.vin[inIdx]
+ txtmp.vin = []
+ txtmp.vin.append(tmp)
+
+ s = txtmp.serialize()
+ s += struct.pack(b"<I", hashtype)
+
+ hash = hash256(s)
+
+ return (hash, None)
diff --git a/qa/rpc-tests/script_test.py b/qa/rpc-tests/script_test.py
new file mode 100755
index 0000000000..1ba3a478a8
--- /dev/null
+++ b/qa/rpc-tests/script_test.py
@@ -0,0 +1,253 @@
+#!/usr/bin/env python2
+#
+# Distributed under the MIT/X11 software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+
+'''
+Test notes:
+This test uses the script_valid and script_invalid tests from the unittest
+framework to do end-to-end testing where we compare that two nodes agree on
+whether blocks containing a given test script are valid.
+
+We generally ignore the script flags associated with each test (since we lack
+the precision to test each script using those flags in this framework), but
+for tests with SCRIPT_VERIFY_P2SH, we can use a block time after the BIP16
+switchover date to try to test with that flag enabled (and for tests without
+that flag, we use a block time before the switchover date).
+
+NOTE: This test is very slow and may take more than 40 minutes to run.
+'''
+
+from test_framework import ComparisonTestFramework
+from util import *
+from comptool import TestInstance, TestManager
+from mininode import *
+from blocktools import *
+from script import *
+import logging
+import copy
+import json
+
+script_valid_file = "../../src/test/data/script_valid.json"
+script_invalid_file = "../../src/test/data/script_invalid.json"
+
+# Pass in a set of json files to open.
+class ScriptTestFile(object):
+
+ def __init__(self, files):
+ self.files = files
+ self.index = -1
+ self.data = []
+
+ def load_files(self):
+ for f in self.files:
+ self.data.extend(json.loads(open(f).read()))
+
+ # Skip over records that are not long enough to be tests
+ def get_records(self):
+ while (self.index < len(self.data)):
+ if len(self.data[self.index]) >= 3:
+ yield self.data[self.index]
+ self.index += 1
+
+
+# Helper for parsing the flags specified in the .json files
+SCRIPT_VERIFY_NONE = 0
+SCRIPT_VERIFY_P2SH = 1
+SCRIPT_VERIFY_STRICTENC = 1 << 1
+SCRIPT_VERIFY_DERSIG = 1 << 2
+SCRIPT_VERIFY_LOW_S = 1 << 3
+SCRIPT_VERIFY_NULLDUMMY = 1 << 4
+SCRIPT_VERIFY_SIGPUSHONLY = 1 << 5
+SCRIPT_VERIFY_MINIMALDATA = 1 << 6
+SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_NOPS = 1 << 7
+SCRIPT_VERIFY_CLEANSTACK = 1 << 8
+
+flag_map = {
+ "": SCRIPT_VERIFY_NONE,
+ "NONE": SCRIPT_VERIFY_NONE,
+ "P2SH": SCRIPT_VERIFY_P2SH,
+ "STRICTENC": SCRIPT_VERIFY_STRICTENC,
+ "DERSIG": SCRIPT_VERIFY_DERSIG,
+ "LOW_S": SCRIPT_VERIFY_LOW_S,
+ "NULLDUMMY": SCRIPT_VERIFY_NULLDUMMY,
+ "SIGPUSHONLY": SCRIPT_VERIFY_SIGPUSHONLY,
+ "MINIMALDATA": SCRIPT_VERIFY_MINIMALDATA,
+ "DISCOURAGE_UPGRADABLE_NOPS": SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_NOPS,
+ "CLEANSTACK": SCRIPT_VERIFY_CLEANSTACK,
+}
+
+def ParseScriptFlags(flag_string):
+ flags = 0
+ for x in flag_string.split(","):
+ if x in flag_map:
+ flags |= flag_map[x]
+ else:
+ print "Error: unrecognized script flag: ", x
+ return flags
+
+'''
+Given a string that is a scriptsig or scriptpubkey from the .json files above,
+convert it to a CScript()
+'''
+# Replicates behavior from core_read.cpp
+def ParseScript(json_script):
+ script = json_script.split(" ")
+ parsed_script = CScript()
+ for x in script:
+ if len(x) == 0:
+ # Empty string, ignore.
+ pass
+ elif x.isdigit() or (len(x) >= 1 and x[0] == "-" and x[1:].isdigit()):
+ # Number
+ n = int(x, 0)
+ if (n == -1) or (n >= 1 and n <= 16):
+ parsed_script = CScript(bytes(parsed_script) + bytes(CScript([n])))
+ else:
+ parsed_script += CScriptNum(int(x, 0))
+ elif x.startswith("0x"):
+ # Raw hex data, inserted NOT pushed onto stack:
+ for i in xrange(2, len(x), 2):
+ parsed_script = CScript(bytes(parsed_script) + bytes(chr(int(x[i:i+2],16))))
+ elif x.startswith("'") and x.endswith("'") and len(x) >= 2:
+ # Single-quoted string, pushed as data.
+ parsed_script += CScript([x[1:-1]])
+ else:
+ # opcode, e.g. OP_ADD or ADD:
+ tryopname = "OP_" + x
+ if tryopname in OPCODES_BY_NAME:
+ parsed_script += CScriptOp(OPCODES_BY_NAME["OP_" + x])
+ else:
+ print "ParseScript: error parsing '%s'" % x
+ return ""
+ return parsed_script
+
+class TestBuilder(object):
+ def create_credit_tx(self, scriptPubKey):
+ # self.tx1 is a coinbase transaction, modeled after the one created by script_tests.cpp
+ # This allows us to reuse signatures created in the unit test framework.
+ self.tx1 = create_coinbase() # this has a bip34 scriptsig,
+ self.tx1.vin[0].scriptSig = CScript([0, 0]) # but this matches the unit tests
+ self.tx1.vout[0].nValue = 0
+ self.tx1.vout[0].scriptPubKey = scriptPubKey
+ self.tx1.rehash()
+ def create_spend_tx(self, scriptSig):
+ self.tx2 = create_transaction(self.tx1, 0, CScript(), 0)
+ self.tx2.vin[0].scriptSig = scriptSig
+ self.tx2.vout[0].scriptPubKey = CScript()
+ self.tx2.rehash()
+ def rehash(self):
+ self.tx1.rehash()
+ self.tx2.rehash()
+
+# This test uses the (default) two nodes provided by ComparisonTestFramework,
+# specified on the command line with --testbinary and --refbinary.
+# See comptool.py
+class ScriptTest(ComparisonTestFramework):
+
+ def run_test(self):
+ # Set up the comparison tool TestManager
+ test = TestManager(self, self.options.tmpdir)
+ test.add_all_connections(self.nodes)
+
+ # Load scripts
+ self.scripts = ScriptTestFile([script_valid_file, script_invalid_file])
+ self.scripts.load_files()
+
+ # Some variables we re-use between test instances (to build blocks)
+ self.tip = None
+ self.block_time = None
+
+ NetworkThread().start() # Start up network handling in another thread
+ test.run()
+
+ def generate_test_instance(self, pubkeystring, scriptsigstring):
+ scriptpubkey = ParseScript(pubkeystring)
+ scriptsig = ParseScript(scriptsigstring)
+
+ test = TestInstance(sync_every_block=False)
+ test_build = TestBuilder()
+ test_build.create_credit_tx(scriptpubkey)
+ test_build.create_spend_tx(scriptsig)
+ test_build.rehash()
+
+ block = create_block(self.tip, test_build.tx1, self.block_time)
+ self.block_time += 1
+ block.solve()
+ self.tip = block.sha256
+ test.blocks_and_transactions = [[block, True]]
+
+ for i in xrange(100):
+ block = create_block(self.tip, create_coinbase(), self.block_time)
+ self.block_time += 1
+ block.solve()
+ self.tip = block.sha256
+ test.blocks_and_transactions.append([block, True])
+
+ block = create_block(self.tip, create_coinbase(), self.block_time)
+ self.block_time += 1
+ block.vtx.append(test_build.tx2)
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.rehash()
+ block.solve()
+ test.blocks_and_transactions.append([block, None])
+ return test
+
+ # This generates the tests for TestManager.
+ def get_tests(self):
+ self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
+ self.block_time = 1333230000 # before the BIP16 switchover
+
+ '''
+ Create a new block with an anyone-can-spend coinbase
+ '''
+ block = create_block(self.tip, create_coinbase(), self.block_time)
+ self.block_time += 1
+ block.solve()
+ self.tip = block.sha256
+ yield TestInstance(objects=[[block, True]])
+
+ '''
+ Build out to 100 blocks total, maturing the coinbase.
+ '''
+ test = TestInstance(objects=[], sync_every_block=False, sync_every_tx=False)
+ for i in xrange(100):
+ b = create_block(self.tip, create_coinbase(), self.block_time)
+ b.solve()
+ test.blocks_and_transactions.append([b, True])
+ self.tip = b.sha256
+ self.block_time += 1
+ yield test
+
+ ''' Iterate through script tests. '''
+ counter = 0
+ for script_test in self.scripts.get_records():
+ ''' Reset the blockchain to genesis block + 100 blocks. '''
+ if self.nodes[0].getblockcount() > 101:
+ self.nodes[0].invalidateblock(self.nodes[0].getblockhash(102))
+ self.nodes[1].invalidateblock(self.nodes[1].getblockhash(102))
+
+ self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
+
+ [scriptsig, scriptpubkey, flags] = script_test[0:3]
+ flags = ParseScriptFlags(flags)
+
+ # We can use block time to determine whether the nodes should be
+ # enforcing BIP16.
+ #
+ # We intentionally let the block time grow by 1 each time.
+ # This forces the block hashes to differ between tests, so that
+ # a call to invalidateblock doesn't interfere with a later test.
+ if (flags & SCRIPT_VERIFY_P2SH):
+ self.block_time = 1333238400 + counter # Advance to enforcing BIP16
+ else:
+ self.block_time = 1333230000 + counter # Before the BIP16 switchover
+
+ print "Script test: [%s]" % script_test
+
+ yield self.generate_test_instance(scriptpubkey, scriptsig)
+ counter += 1
+
+if __name__ == '__main__':
+ ScriptTest().main()
diff --git a/qa/rpc-tests/test_framework.py b/qa/rpc-tests/test_framework.py
index 4c8a11b821..15a357a340 100755
--- a/qa/rpc-tests/test_framework.py
+++ b/qa/rpc-tests/test_framework.py
@@ -89,8 +89,10 @@ class BitcoinTestFramework(object):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
+ parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
+ help="Don't stop bitcoinds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
- help="Source directory containing bitcoind/bitcoin-cli (default: %default%)")
+ help="Source directory containing bitcoind/bitcoin-cli (default: %default)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
@@ -128,10 +130,15 @@ class BitcoinTestFramework(object):
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
- if not self.options.nocleanup:
- print("Cleaning up")
+ if not self.options.noshutdown:
+ print("Stopping nodes")
stop_nodes(self.nodes)
wait_bitcoinds()
+ else:
+ print("Note: bitcoinds were not stopped and may still be running")
+
+ if not self.options.nocleanup and not self.options.noshutdown:
+ print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
if success:
@@ -140,3 +147,34 @@ class BitcoinTestFramework(object):
else:
print("Failed")
sys.exit(1)
+
+
+# Test framework for doing p2p comparison testing, which sets up some bitcoind
+# binaries:
+# 1 binary: test binary
+# 2 binaries: 1 test binary, 1 ref binary
+# n>2 binaries: 1 test binary, n-1 ref binaries
+
+class ComparisonTestFramework(BitcoinTestFramework):
+
+ # Can override the num_nodes variable to indicate how many nodes to run.
+ def __init__(self):
+ self.num_nodes = 2
+
+ def add_options(self, parser):
+ parser.add_option("--testbinary", dest="testbinary",
+ default=os.getenv("BITCOIND", "bitcoind"),
+ help="bitcoind binary to test")
+ parser.add_option("--refbinary", dest="refbinary",
+ default=os.getenv("BITCOIND", "bitcoind"),
+ help="bitcoind binary to use for reference nodes (if any)")
+
+ def setup_chain(self):
+ print "Initializing test directory "+self.options.tmpdir
+ initialize_chain_clean(self.options.tmpdir, self.num_nodes)
+
+ def setup_network(self):
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
+ extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes,
+ binary=[self.options.testbinary] +
+ [self.options.refbinary]*(self.num_nodes-1))
diff --git a/qa/rpc-tests/util.py b/qa/rpc-tests/util.py
index 297f2d8b03..3b4a10e46b 100644
--- a/qa/rpc-tests/util.py
+++ b/qa/rpc-tests/util.py
@@ -88,8 +88,12 @@ def initialize_chain(test_dir):
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
+ if os.getenv("PYTHON_DEBUG", ""):
+ print "initialize_chain: bitcoind started, calling bitcoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("BITCOINCLI", "bitcoin-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
+ if os.getenv("PYTHON_DEBUG", ""):
+ print "initialize_chain: bitcoin-cli -rpcwait getblockcount completed"
devnull.close()
rpcs = []
for i in range(4):
@@ -158,18 +162,24 @@ def _rpchost_to_args(rpchost):
rv += ['-rpcport=' + rpcport]
return rv
-def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None):
+def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a bitcoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
- args = [ os.getenv("BITCOIND", "bitcoind"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
+ if binary is None:
+ binary = os.getenv("BITCOIND", "bitcoind")
+ args = [ binary, "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
+ if os.getenv("PYTHON_DEBUG", ""):
+ print "start_node: bitcoind started, calling bitcoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("BITCOINCLI", "bitcoin-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
+ if os.getenv("PYTHON_DEBUG", ""):
+ print "start_node: calling bitcoin-cli -rpcwait getblockcount returned"
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
if timewait is not None:
@@ -179,12 +189,13 @@ def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None):
proxy.url = url # store URL on proxy for info
return proxy
-def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
+def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple bitcoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
- return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
+ if binary is None: binary = [ None for i in range(num_nodes) ]
+ return [ start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp
index 2fa8de6fd8..1269d7a119 100644
--- a/src/bitcoin-cli.cpp
+++ b/src/bitcoin-cli.cpp
@@ -12,8 +12,6 @@
#include <boost/filesystem/operations.hpp>
-#define _(x) std::string(x) /* Keep the _() around in case gettext or such will be used later to translate non-UI */
-
using namespace std;
using namespace json_spirit;
diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp
index f1c1c0ff8b..d024b48020 100644
--- a/src/bitcoin-tx.cpp
+++ b/src/bitcoin-tx.cpp
@@ -11,7 +11,6 @@
#include "primitives/transaction.h"
#include "script/script.h"
#include "script/sign.h"
-#include "ui_interface.h" // for _(...)
#include "univalue/univalue.h"
#include "util.h"
#include "utilmoneystr.h"
@@ -26,7 +25,6 @@ using namespace std;
static bool fCreateBlank;
static map<string,UniValue> registers;
-CClientUIInterface uiInterface;
static bool AppInitRawTx(int argc, char* argv[])
{
diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp
index 2172f4a21b..eeca8655c9 100644
--- a/src/bitcoind.cpp
+++ b/src/bitcoind.cpp
@@ -8,7 +8,6 @@
#include "init.h"
#include "main.h"
#include "noui.h"
-#include "ui_interface.h"
#include "util.h"
#include <boost/algorithm/string/predicate.hpp>
diff --git a/src/bloom.cpp b/src/bloom.cpp
index e60576f4b4..36cba491c4 100644
--- a/src/bloom.cpp
+++ b/src/bloom.cpp
@@ -21,22 +21,33 @@
using namespace std;
CBloomFilter::CBloomFilter(unsigned int nElements, double nFPRate, unsigned int nTweakIn, unsigned char nFlagsIn) :
-/**
- * The ideal size for a bloom filter with a given number of elements and false positive rate is:
- * - nElements * log(fp rate) / ln(2)^2
- * We ignore filter parameters which will create a bloom filter larger than the protocol limits
- */
-vData(min((unsigned int)(-1 / LN2SQUARED * nElements * log(nFPRate)), MAX_BLOOM_FILTER_SIZE * 8) / 8),
-/**
- * The ideal number of hash functions is filter size * ln(2) / number of elements
- * Again, we ignore filter parameters which will create a bloom filter with more hash functions than the protocol limits
- * See https://en.wikipedia.org/wiki/Bloom_filter for an explanation of these formulas
- */
-isFull(false),
-isEmpty(false),
-nHashFuncs(min((unsigned int)(vData.size() * 8 / nElements * LN2), MAX_HASH_FUNCS)),
-nTweak(nTweakIn),
-nFlags(nFlagsIn)
+ /**
+ * The ideal size for a bloom filter with a given number of elements and false positive rate is:
+ * - nElements * log(fp rate) / ln(2)^2
+ * We ignore filter parameters which will create a bloom filter larger than the protocol limits
+ */
+ vData(min((unsigned int)(-1 / LN2SQUARED * nElements * log(nFPRate)), MAX_BLOOM_FILTER_SIZE * 8) / 8),
+ /**
+ * The ideal number of hash functions is filter size * ln(2) / number of elements
+ * Again, we ignore filter parameters which will create a bloom filter with more hash functions than the protocol limits
+ * See https://en.wikipedia.org/wiki/Bloom_filter for an explanation of these formulas
+ */
+ isFull(false),
+ isEmpty(false),
+ nHashFuncs(min((unsigned int)(vData.size() * 8 / nElements * LN2), MAX_HASH_FUNCS)),
+ nTweak(nTweakIn),
+ nFlags(nFlagsIn)
+{
+}
+
+// Private constructor used by CRollingBloomFilter
+CBloomFilter::CBloomFilter(unsigned int nElements, double nFPRate, unsigned int nTweakIn) :
+ vData((unsigned int)(-1 / LN2SQUARED * nElements * log(nFPRate)) / 8),
+ isFull(false),
+ isEmpty(true),
+ nHashFuncs((unsigned int)(vData.size() * 8 / nElements * LN2)),
+ nTweak(nTweakIn),
+ nFlags(BLOOM_UPDATE_NONE)
{
}
@@ -197,3 +208,43 @@ void CBloomFilter::UpdateEmptyFull()
isFull = full;
isEmpty = empty;
}
+
+CRollingBloomFilter::CRollingBloomFilter(unsigned int nElements, double fpRate, unsigned int nTweak) :
+ b1(nElements * 2, fpRate, nTweak), b2(nElements * 2, fpRate, nTweak)
+{
+ // Implemented using two bloom filters of 2 * nElements each.
+ // We fill them up, and clear them, staggered, every nElements
+ // inserted, so at least one always contains the last nElements
+ // inserted.
+ nBloomSize = nElements * 2;
+ nInsertions = 0;
+}
+
+void CRollingBloomFilter::insert(const std::vector<unsigned char>& vKey)
+{
+ if (nInsertions == 0) {
+ b1.clear();
+ } else if (nInsertions == nBloomSize / 2) {
+ b2.clear();
+ }
+ b1.insert(vKey);
+ b2.insert(vKey);
+ if (++nInsertions == nBloomSize) {
+ nInsertions = 0;
+ }
+}
+
+bool CRollingBloomFilter::contains(const std::vector<unsigned char>& vKey) const
+{
+ if (nInsertions < nBloomSize / 2) {
+ return b2.contains(vKey);
+ }
+ return b1.contains(vKey);
+}
+
+void CRollingBloomFilter::clear()
+{
+ b1.clear();
+ b2.clear();
+ nInsertions = 0;
+}
diff --git a/src/bloom.h b/src/bloom.h
index 15bc312c4b..bb17f59c86 100644
--- a/src/bloom.h
+++ b/src/bloom.h
@@ -53,6 +53,10 @@ private:
unsigned int Hash(unsigned int nHashNum, const std::vector<unsigned char>& vDataToHash) const;
+ // Private constructor for CRollingBloomFilter, no restrictions on size
+ CBloomFilter(unsigned int nElements, double nFPRate, unsigned int nTweak);
+ friend class CRollingBloomFilter;
+
public:
/**
* Creates a new bloom filter which will provide the given fp rate when filled with the given number of elements
@@ -97,4 +101,28 @@ public:
void UpdateEmptyFull();
};
+/**
+ * RollingBloomFilter is a probabilistic "keep track of most recently inserted" set.
+ * Construct it with the number of items to keep track of, and a false-positive rate.
+ *
+ * contains(item) will always return true if item was one of the last N things
+ * insert()'ed ... but may also return true for items that were not inserted.
+ */
+class CRollingBloomFilter
+{
+public:
+ CRollingBloomFilter(unsigned int nElements, double nFPRate, unsigned int nTweak);
+
+ void insert(const std::vector<unsigned char>& vKey);
+ bool contains(const std::vector<unsigned char>& vKey) const;
+
+ void clear();
+
+private:
+ unsigned int nBloomSize;
+ unsigned int nInsertions;
+ CBloomFilter b1, b2;
+};
+
+
#endif // BITCOIN_BLOOM_H
diff --git a/src/chainparams.cpp b/src/chainparams.cpp
index a9dd4c257a..1e044ad491 100644
--- a/src/chainparams.cpp
+++ b/src/chainparams.cpp
@@ -5,7 +5,6 @@
#include "chainparams.h"
-#include "random.h"
#include "util.h"
#include "utilstrencodings.h"
@@ -15,35 +14,11 @@
using namespace std;
-struct SeedSpec6 {
- uint8_t addr[16];
- uint16_t port;
-};
-
#include "chainparamsseeds.h"
/**
* Main network
*/
-
-//! Convert the pnSeeds6 array into usable address objects.
-static void convertSeed6(std::vector<CAddress> &vSeedsOut, const SeedSpec6 *data, unsigned int count)
-{
- // It'll only connect to one or two seed nodes because once it connects,
- // it'll get a pile of addresses with newer timestamps.
- // Seed nodes are given a random 'last seen time' of between one and two
- // weeks ago.
- const int64_t nOneWeek = 7*24*60*60;
- for (unsigned int i = 0; i < count; i++)
- {
- struct in6_addr ip;
- memcpy(&ip, data[i].addr, sizeof(ip));
- CAddress addr(CService(ip, data[i].port));
- addr.nTime = GetTime() - GetRand(nOneWeek) - nOneWeek;
- vSeedsOut.push_back(addr);
- }
-}
-
/**
* What makes a good checkpoint block?
* + Is surrounded by blocks with reasonable timestamps
@@ -165,7 +140,7 @@ public:
base58Prefixes[EXT_PUBLIC_KEY] = boost::assign::list_of(0x04)(0x88)(0xB2)(0x1E).convert_to_container<std::vector<unsigned char> >();
base58Prefixes[EXT_SECRET_KEY] = boost::assign::list_of(0x04)(0x88)(0xAD)(0xE4).convert_to_container<std::vector<unsigned char> >();
- convertSeed6(vFixedSeeds, pnSeed6_main, ARRAYLEN(pnSeed6_main));
+ vFixedSeeds = std::vector<SeedSpec6>(pnSeed6_main, pnSeed6_main + ARRAYLEN(pnSeed6_main));
fRequireRPCPassword = true;
fMiningRequiresPeers = true;
@@ -221,7 +196,7 @@ public:
base58Prefixes[EXT_PUBLIC_KEY] = boost::assign::list_of(0x04)(0x35)(0x87)(0xCF).convert_to_container<std::vector<unsigned char> >();
base58Prefixes[EXT_SECRET_KEY] = boost::assign::list_of(0x04)(0x35)(0x83)(0x94).convert_to_container<std::vector<unsigned char> >();
- convertSeed6(vFixedSeeds, pnSeed6_test, ARRAYLEN(pnSeed6_test));
+ vFixedSeeds = std::vector<SeedSpec6>(pnSeed6_test, pnSeed6_test + ARRAYLEN(pnSeed6_test));
fRequireRPCPassword = true;
fMiningRequiresPeers = true;
diff --git a/src/chainparams.h b/src/chainparams.h
index 1b03900990..bfefe242b7 100644
--- a/src/chainparams.h
+++ b/src/chainparams.h
@@ -19,6 +19,12 @@ struct CDNSSeedData {
CDNSSeedData(const std::string &strName, const std::string &strHost) : name(strName), host(strHost) {}
};
+struct SeedSpec6 {
+ uint8_t addr[16];
+ uint16_t port;
+};
+
+
/**
* CChainParams defines various tweakable parameters of a given instance of the
* Bitcoin system. There are three: the main network on which people trade goods
@@ -67,7 +73,7 @@ public:
std::string NetworkIDString() const { return strNetworkID; }
const std::vector<CDNSSeedData>& DNSSeeds() const { return vSeeds; }
const std::vector<unsigned char>& Base58Prefix(Base58Type type) const { return base58Prefixes[type]; }
- const std::vector<CAddress>& FixedSeeds() const { return vFixedSeeds; }
+ const std::vector<SeedSpec6>& FixedSeeds() const { return vFixedSeeds; }
virtual const Checkpoints::CCheckpointData& Checkpoints() const = 0;
protected:
CChainParams() {}
@@ -83,7 +89,7 @@ protected:
std::vector<unsigned char> base58Prefixes[MAX_BASE58_TYPES];
std::string strNetworkID;
CBlock genesis;
- std::vector<CAddress> vFixedSeeds;
+ std::vector<SeedSpec6> vFixedSeeds;
bool fRequireRPCPassword;
bool fMiningRequiresPeers;
bool fDefaultConsistencyChecks;
diff --git a/src/init.cpp b/src/init.cpp
index 6f852fcaa5..f4caa4717f 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -68,7 +68,7 @@ enum BindFlags {
};
static const char* FEE_ESTIMATES_FILENAME="fee_estimates.dat";
-CClientUIInterface uiInterface;
+CClientUIInterface uiInterface; // Declared but not defined in ui_interface.h
//////////////////////////////////////////////////////////////////////////////
//
diff --git a/src/main.cpp b/src/main.cpp
index 07156a9af2..7d7e670119 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -3676,7 +3676,6 @@ bool static AlreadyHave(const CInv& inv)
return true;
}
-
void static ProcessGetData(CNode* pfrom)
{
std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
@@ -3704,11 +3703,13 @@ void static ProcessGetData(CNode* pfrom)
if (chainActive.Contains(mi->second)) {
send = true;
} else {
+ static const int nOneMonth = 30 * 24 * 60 * 60;
// To prevent fingerprinting attacks, only send blocks outside of the active
- // chain if they are valid, and no more than a month older than the best header
- // chain we know about.
+ // chain if they are valid, and no more than a month older (both in time, and in
+ // best equivalent proof of work) than the best header chain we know about.
send = mi->second->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != NULL) &&
- (mi->second->GetBlockTime() > pindexBestHeader->GetBlockTime() - 30 * 24 * 60 * 60);
+ (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() < nOneMonth) &&
+ (GetBlockProofEquivalentTime(*pindexBestHeader, *mi->second, *pindexBestHeader, Params().GetConsensus()) < nOneMonth);
if (!send) {
LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId());
}
@@ -3995,7 +3996,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
{
LOCK(cs_vNodes);
// Use deterministic randomness to send to the same nodes for 24 hours
- // at a time so the setAddrKnowns of the chosen nodes prevent repeats
+ // at a time so the addrKnowns of the chosen nodes prevent repeats
static uint256 hashSalt;
if (hashSalt.IsNull())
hashSalt = GetRandHash();
@@ -4779,9 +4780,9 @@ bool SendMessages(CNode* pto, bool fSendTrickle)
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
{
- // Periodically clear setAddrKnown to allow refresh broadcasts
+ // Periodically clear addrKnown to allow refresh broadcasts
if (nLastRebroadcast)
- pnode->setAddrKnown.clear();
+ pnode->addrKnown.clear();
// Rebroadcast our address
AdvertizeLocal(pnode);
@@ -4799,9 +4800,9 @@ bool SendMessages(CNode* pto, bool fSendTrickle)
vAddr.reserve(pto->vAddrToSend.size());
BOOST_FOREACH(const CAddress& addr, pto->vAddrToSend)
{
- // returns true if wasn't already contained in the set
- if (pto->setAddrKnown.insert(addr).second)
+ if (!pto->addrKnown.contains(addr.GetKey()))
{
+ pto->addrKnown.insert(addr.GetKey());
vAddr.push_back(addr);
// receiver rejects addr messages larger than 1000
if (vAddr.size() >= 1000)
diff --git a/src/mruset.h b/src/mruset.h
index 1969f419cb..398aa173bf 100644
--- a/src/mruset.h
+++ b/src/mruset.h
@@ -1,12 +1,12 @@
-// Copyright (c) 2012 The Bitcoin Core developers
+// Copyright (c) 2012-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_MRUSET_H
#define BITCOIN_MRUSET_H
-#include <deque>
#include <set>
+#include <vector>
#include <utility>
/** STL-like set container that only keeps the most recent N elements. */
@@ -22,11 +22,13 @@ public:
protected:
std::set<T> set;
- std::deque<T> queue;
- size_type nMaxSize;
+ std::vector<iterator> order;
+ size_type first_used;
+ size_type first_unused;
+ const size_type nMaxSize;
public:
- mruset(size_type nMaxSizeIn = 0) { nMaxSize = nMaxSizeIn; }
+ mruset(size_type nMaxSizeIn = 1) : nMaxSize(nMaxSizeIn) { clear(); }
iterator begin() const { return set.begin(); }
iterator end() const { return set.end(); }
size_type size() const { return set.size(); }
@@ -36,7 +38,9 @@ public:
void clear()
{
set.clear();
- queue.clear();
+ order.assign(nMaxSize, set.end());
+ first_used = 0;
+ first_unused = 0;
}
bool inline friend operator==(const mruset<T>& a, const mruset<T>& b) { return a.set == b.set; }
bool inline friend operator==(const mruset<T>& a, const std::set<T>& b) { return a.set == b; }
@@ -45,25 +49,17 @@ public:
{
std::pair<iterator, bool> ret = set.insert(x);
if (ret.second) {
- if (nMaxSize && queue.size() == nMaxSize) {
- set.erase(queue.front());
- queue.pop_front();
+ if (set.size() == nMaxSize + 1) {
+ set.erase(order[first_used]);
+ order[first_used] = set.end();
+ if (++first_used == nMaxSize) first_used = 0;
}
- queue.push_back(x);
+ order[first_unused] = ret.first;
+ if (++first_unused == nMaxSize) first_unused = 0;
}
return ret;
}
size_type max_size() const { return nMaxSize; }
- size_type max_size(size_type s)
- {
- if (s)
- while (queue.size() > s) {
- set.erase(queue.front());
- queue.pop_front();
- }
- nMaxSize = s;
- return nMaxSize;
- }
};
#endif // BITCOIN_MRUSET_H
diff --git a/src/net.cpp b/src/net.cpp
index 45a06a105f..2de04fc574 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -142,6 +142,27 @@ bool GetLocal(CService& addr, const CNetAddr *paddrPeer)
return nBestScore >= 0;
}
+//! Convert the pnSeeds6 array into usable address objects.
+static std::vector<CAddress> convertSeed6(const std::vector<SeedSpec6> &vSeedsIn)
+{
+ // It'll only connect to one or two seed nodes because once it connects,
+ // it'll get a pile of addresses with newer timestamps.
+ // Seed nodes are given a random 'last seen time' of between one and two
+ // weeks ago.
+ const int64_t nOneWeek = 7*24*60*60;
+ std::vector<CAddress> vSeedsOut;
+ vSeedsOut.reserve(vSeedsIn.size());
+ for (std::vector<SeedSpec6>::const_iterator i(vSeedsIn.begin()); i != vSeedsIn.end(); ++i)
+ {
+ struct in6_addr ip;
+ memcpy(&ip, i->addr, sizeof(ip));
+ CAddress addr(CService(ip, i->port));
+ addr.nTime = GetTime() - GetRand(nOneWeek) - nOneWeek;
+ vSeedsOut.push_back(addr);
+ }
+ return vSeedsOut;
+}
+
// get best local address for a particular peer as a CAddress
// Otherwise, return the unroutable 0.0.0.0 but filled in with
// the normal parameters, since the IP may be changed to a useful
@@ -1195,7 +1216,7 @@ void ThreadOpenConnections()
static bool done = false;
if (!done) {
LogPrintf("Adding fixed seed nodes as DNS doesn't seem to be available.\n");
- addrman.Add(Params().FixedSeeds(), CNetAddr("127.0.0.1"));
+ addrman.Add(convertSeed6(Params().FixedSeeds()), CNetAddr("127.0.0.1"));
done = true;
}
}
@@ -1884,7 +1905,10 @@ bool CAddrDB::Read(CAddrMan& addr)
unsigned int ReceiveFloodSize() { return 1000*GetArg("-maxreceivebuffer", 5*1000); }
unsigned int SendBufferSize() { return 1000*GetArg("-maxsendbuffer", 1*1000); }
-CNode::CNode(SOCKET hSocketIn, CAddress addrIn, std::string addrNameIn, bool fInboundIn) : ssSend(SER_NETWORK, INIT_PROTO_VERSION), setAddrKnown(5000)
+CNode::CNode(SOCKET hSocketIn, CAddress addrIn, std::string addrNameIn, bool fInboundIn) :
+ ssSend(SER_NETWORK, INIT_PROTO_VERSION),
+ addrKnown(5000, 0.001, insecure_rand()),
+ setInventoryKnown(SendBufferSize() / 1000)
{
nServices = 0;
hSocket = hSocketIn;
@@ -1913,7 +1937,6 @@ CNode::CNode(SOCKET hSocketIn, CAddress addrIn, std::string addrNameIn, bool fIn
nStartingHeight = -1;
fGetAddr = false;
fRelayTxes = false;
- setInventoryKnown.max_size(SendBufferSize() / 1000);
pfilter = new CBloomFilter();
nPingNonceSent = 0;
nPingUsecStart = 0;
diff --git a/src/net.h b/src/net.h
index 5ce011b667..7c61a2be6c 100644
--- a/src/net.h
+++ b/src/net.h
@@ -300,7 +300,7 @@ public:
// flood relay
std::vector<CAddress> vAddrToSend;
- mruset<CAddress> setAddrKnown;
+ CRollingBloomFilter addrKnown;
bool fGetAddr;
std::set<uint256> setKnown;
@@ -380,7 +380,7 @@ public:
void AddAddressKnown(const CAddress& addr)
{
- setAddrKnown.insert(addr);
+ addrKnown.insert(addr.GetKey());
}
void PushAddress(const CAddress& addr)
@@ -388,7 +388,7 @@ public:
// Known checking here is only to save space from duplicates.
// SendMessages will filter it again for knowns that were added
// after addresses were pushed.
- if (addr.IsValid() && !setAddrKnown.count(addr)) {
+ if (addr.IsValid() && !addrKnown.contains(addr.GetKey())) {
if (vAddrToSend.size() >= MAX_ADDR_TO_SEND) {
vAddrToSend[insecure_rand() % vAddrToSend.size()] = addr;
} else {
diff --git a/src/netbase.cpp b/src/netbase.cpp
index 1837cfa9c3..2015d0271a 100644
--- a/src/netbase.cpp
+++ b/src/netbase.cpp
@@ -293,7 +293,7 @@ struct ProxyCredentials
};
/** Connect using SOCKS5 (as described in RFC1928) */
-bool static Socks5(string strDest, int port, const ProxyCredentials *auth, SOCKET& hSocket)
+static bool Socks5(const std::string& strDest, int port, const ProxyCredentials *auth, SOCKET& hSocket)
{
LogPrintf("SOCKS5 connecting %s\n", strDest);
if (strDest.size() > 255) {
@@ -558,7 +558,7 @@ bool IsProxy(const CNetAddr &addr) {
return false;
}
-static bool ConnectThroughProxy(const proxyType &proxy, const std::string strDest, int port, SOCKET& hSocketRet, int nTimeout, bool *outProxyConnectionFailed)
+static bool ConnectThroughProxy(const proxyType &proxy, const std::string& strDest, int port, SOCKET& hSocketRet, int nTimeout, bool *outProxyConnectionFailed)
{
SOCKET hSocket = INVALID_SOCKET;
// first connect to proxy server
diff --git a/src/pow.cpp b/src/pow.cpp
index fc6ed4f3d1..bb53ad204b 100644
--- a/src/pow.cpp
+++ b/src/pow.cpp
@@ -114,3 +114,20 @@ arith_uint256 GetBlockProof(const CBlockIndex& block)
// or ~bnTarget / (nTarget+1) + 1.
return (~bnTarget / (bnTarget + 1)) + 1;
}
+
+int64_t GetBlockProofEquivalentTime(const CBlockIndex& to, const CBlockIndex& from, const CBlockIndex& tip, const Consensus::Params& params)
+{
+ arith_uint256 r;
+ int sign = 1;
+ if (to.nChainWork > from.nChainWork) {
+ r = to.nChainWork - from.nChainWork;
+ } else {
+ r = from.nChainWork - to.nChainWork;
+ sign = -1;
+ }
+ r = r * arith_uint256(params.nPowTargetSpacing) / GetBlockProof(tip);
+ if (r.bits() > 63) {
+ return sign * std::numeric_limits<int64_t>::max();
+ }
+ return sign * r.GetLow64();
+}
diff --git a/src/pow.h b/src/pow.h
index a5d32db178..e864a474cc 100644
--- a/src/pow.h
+++ b/src/pow.h
@@ -22,4 +22,7 @@ unsigned int CalculateNextWorkRequired(const CBlockIndex* pindexLast, int64_t nF
bool CheckProofOfWork(uint256 hash, unsigned int nBits, const Consensus::Params&);
arith_uint256 GetBlockProof(const CBlockIndex& block);
+/** Return the time it would take to redo the work difference between from and to, assuming the current hashrate corresponds to the difficulty at tip, in seconds. */
+int64_t GetBlockProofEquivalentTime(const CBlockIndex& to, const CBlockIndex& from, const CBlockIndex& tip, const Consensus::Params&);
+
#endif // BITCOIN_POW_H
diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp
index 069601ab67..018169cfdc 100644
--- a/src/qt/bitcoin.cpp
+++ b/src/qt/bitcoin.cpp
@@ -532,7 +532,7 @@ int main(int argc, char *argv[])
// Now that QSettings are accessible, initialize translations
QTranslator qtTranslatorBase, qtTranslator, translatorBase, translator;
initTranslations(qtTranslatorBase, qtTranslator, translatorBase, translator);
- uiInterface.Translate.connect(Translate);
+ translationInterface.Translate.connect(Translate);
// Show help message immediately after parsing command-line options (for "-lang") and setting locale,
// but before showing splash screen.
diff --git a/src/qt/transactiondesc.cpp b/src/qt/transactiondesc.cpp
index 7214249435..4fffd03adf 100644
--- a/src/qt/transactiondesc.cpp
+++ b/src/qt/transactiondesc.cpp
@@ -14,7 +14,6 @@
#include "main.h"
#include "script/script.h"
#include "timedata.h"
-#include "ui_interface.h"
#include "util.h"
#include "wallet/db.h"
#include "wallet/wallet.h"
diff --git a/src/rpcclient.cpp b/src/rpcclient.cpp
index ad676f9edc..4b576b3707 100644
--- a/src/rpcclient.cpp
+++ b/src/rpcclient.cpp
@@ -7,7 +7,6 @@
#include "rpcprotocol.h"
#include "util.h"
-#include "ui_interface.h"
#include <set>
#include <stdint.h>
diff --git a/src/test/bloom_tests.cpp b/src/test/bloom_tests.cpp
index 73a146f05c..1bda8a7ea1 100644
--- a/src/test/bloom_tests.cpp
+++ b/src/test/bloom_tests.cpp
@@ -8,6 +8,7 @@
#include "clientversion.h"
#include "key.h"
#include "merkleblock.h"
+#include "random.h"
#include "serialize.h"
#include "streams.h"
#include "uint256.h"
@@ -459,4 +460,81 @@ BOOST_AUTO_TEST_CASE(merkle_block_4_test_update_none)
BOOST_CHECK(!filter.contains(COutPoint(uint256S("0x02981fa052f0481dbc5868f4fc2166035a10f27a03cfd2de67326471df5bc041"), 0)));
}
+static std::vector<unsigned char> RandomData()
+{
+ uint256 r = GetRandHash();
+ return std::vector<unsigned char>(r.begin(), r.end());
+}
+
+BOOST_AUTO_TEST_CASE(rolling_bloom)
+{
+ // last-100-entry, 1% false positive:
+ CRollingBloomFilter rb1(100, 0.01, 0);
+
+ // Overfill:
+ static const int DATASIZE=399;
+ std::vector<unsigned char> data[DATASIZE];
+ for (int i = 0; i < DATASIZE; i++) {
+ data[i] = RandomData();
+ rb1.insert(data[i]);
+ }
+ // Last 100 guaranteed to be remembered:
+ for (int i = 299; i < DATASIZE; i++) {
+ BOOST_CHECK(rb1.contains(data[i]));
+ }
+
+ // false positive rate is 1%, so we should get about 100 hits if
+ // testing 10,000 random keys. We get worst-case false positive
+ // behavior when the filter is as full as possible, which is
+ // when we've inserted one minus an integer multiple of nElement*2.
+ unsigned int nHits = 0;
+ for (int i = 0; i < 10000; i++) {
+ if (rb1.contains(RandomData()))
+ ++nHits;
+ }
+ // Run test_bitcoin with --log_level=message to see BOOST_TEST_MESSAGEs:
+ BOOST_TEST_MESSAGE("RollingBloomFilter got " << nHits << " false positives (~100 expected)");
+
+ // Insanely unlikely to get a fp count outside this range:
+ BOOST_CHECK(nHits > 25);
+ BOOST_CHECK(nHits < 175);
+
+ BOOST_CHECK(rb1.contains(data[DATASIZE-1]));
+ rb1.clear();
+ BOOST_CHECK(!rb1.contains(data[DATASIZE-1]));
+
+ // Now roll through data, make sure last 100 entries
+ // are always remembered:
+ for (int i = 0; i < DATASIZE; i++) {
+ if (i >= 100)
+ BOOST_CHECK(rb1.contains(data[i-100]));
+ rb1.insert(data[i]);
+ }
+
+ // Insert 999 more random entries:
+ for (int i = 0; i < 999; i++) {
+ rb1.insert(RandomData());
+ }
+ // Sanity check to make sure the filter isn't just filling up:
+ nHits = 0;
+ for (int i = 0; i < DATASIZE; i++) {
+ if (rb1.contains(data[i]))
+ ++nHits;
+ }
+ // Expect about 5 false positives, more than 100 means
+ // something is definitely broken.
+ BOOST_TEST_MESSAGE("RollingBloomFilter got " << nHits << " false positives (~5 expected)");
+ BOOST_CHECK(nHits < 100);
+
+ // last-1000-entry, 0.01% false positive:
+ CRollingBloomFilter rb2(1000, 0.001, 0);
+ for (int i = 0; i < DATASIZE; i++) {
+ rb2.insert(data[i]);
+ }
+ // ... room for all of them:
+ for (int i = 0; i < DATASIZE; i++) {
+ BOOST_CHECK(rb2.contains(data[i]));
+ }
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/data/script_invalid.json b/src/test/data/script_invalid.json
index 271bc70f73..7afa2abf49 100644
--- a/src/test/data/script_invalid.json
+++ b/src/test/data/script_invalid.json
@@ -141,6 +141,8 @@
["2 2 0 IF LSHIFT ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC", "LSHIFT disabled"],
["2 2 0 IF RSHIFT ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC", "RSHIFT disabled"],
+["", "EQUAL NOT", "P2SH,STRICTENC", "EQUAL must error when there are no stack items"],
+["0", "EQUAL NOT", "P2SH,STRICTENC", "EQUAL must error when there are not 2 stack items"],
["0 1","EQUAL", "P2SH,STRICTENC"],
["1 1 ADD", "0 EQUAL", "P2SH,STRICTENC"],
["11 1 ADD 12 SUB", "11 EQUAL", "P2SH,STRICTENC"],
@@ -368,6 +370,16 @@
["NOP", "HASH160 1", "P2SH,STRICTENC"],
["NOP", "HASH256 1", "P2SH,STRICTENC"],
+["Increase CHECKSIG and CHECKMULTISIG negative test coverage"],
+["", "CHECKSIG NOT", "STRICTENC", "CHECKSIG must error when there are no stack items"],
+["0", "CHECKSIG NOT", "STRICTENC", "CHECKSIG must error when there are not 2 stack items"],
+["", "CHECKMULTISIG NOT", "STRICTENC", "CHECKMULTISIG must error when there are no stack items"],
+["", "-1 CHECKMULTISIG NOT", "STRICTENC", "CHECKMULTISIG must error when the specified number of pubkeys is negative"],
+["", "1 CHECKMULTISIG NOT", "STRICTENC", "CHECKMULTISIG must error when there are not enough pubkeys on the stack"],
+["", "-1 0 CHECKMULTISIG NOT", "STRICTENC", "CHECKMULTISIG must error when the specified number of signatures is negative"],
+["", "1 'pk1' 1 CHECKMULTISIG NOT", "STRICTENC", "CHECKMULTISIG must error when there are not enough signatures on the stack"],
+["", "'dummy' 'sig1' 1 'pk1' 1 CHECKMULTISIG IF 1 ENDIF", "", "CHECKMULTISIG must push false to stack when signature is invalid when NOT in strict enc mode"],
+
["",
"0 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG",
"P2SH,STRICTENC",
@@ -426,7 +438,7 @@
["0x4d 0xFF00 0x111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", "DROP 1", "MINIMALDATA",
"PUSHDATA2 of 255 bytes minimally represented by PUSHDATA1"],
-["0x4f 0x00100000 0x11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", "DROP 1", "MINIMALDATA",
+["0x4e 0x00010000 0x11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", "DROP 1", "MINIMALDATA",
"PUSHDATA4 of 256 bytes minimally represented by PUSHDATA2"],
@@ -780,6 +792,12 @@
"P2SH(P2PK) with non-push scriptSig"
],
[
+ "0 0x47 0x304402205451ce65ad844dbb978b8bdedf5082e33b43cae8279c30f2c74d9e9ee49a94f802203fe95a7ccf74da7a232ee523ef4a53cb4d14bdd16289680cdb97a63819b8f42f01 0x46 0x304402205451ce65ad844dbb978b8bdedf5082e33b43cae8279c30f2c74d9e9ee49a94f802203fe95a7ccf74da7a232ee523ef4a53cb4d14bdd16289680cdb97a63819b8f42f",
+ "2 0x21 0x02a673638cb9587cb68ea08dbef685c6f2d2a751a8b3c6f2a7e9a4999e6e4bfaf5 0x21 0x02a673638cb9587cb68ea08dbef685c6f2d2a751a8b3c6f2a7e9a4999e6e4bfaf5 0x21 0x02a673638cb9587cb68ea08dbef685c6f2d2a751a8b3c6f2a7e9a4999e6e4bfaf5 3 CHECKMULTISIG",
+ "P2SH,STRICTENC",
+ "2-of-3 with one valid and one invalid signature due to parse error, nSigs > validSigs"
+],
+[
"11 0x47 0x304402200a5c6163f07b8d3b013c4d1d6dba25e780b39658d79ba37af7057a3b7f15ffa102201fd9b4eaa9943f734928b99a83592c2e7bf342ea2680f6a2bb705167966b742001",
"0x41 0x0479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8 CHECKSIG",
"CLEANSTACK,P2SH",
diff --git a/src/test/mruset_tests.cpp b/src/test/mruset_tests.cpp
index bd4e9c1d38..2b68f8899e 100644
--- a/src/test/mruset_tests.cpp
+++ b/src/test/mruset_tests.cpp
@@ -17,83 +17,65 @@
using namespace std;
-class mrutester
-{
-private:
- mruset<int> mru;
- std::set<int> set;
-
-public:
- mrutester() { mru.max_size(MAX_SIZE); }
- int size() const { return set.size(); }
-
- void insert(int n)
- {
- mru.insert(n);
- set.insert(n);
- BOOST_CHECK(mru == set);
- }
-};
-
BOOST_FIXTURE_TEST_SUITE(mruset_tests, BasicTestingSetup)
-// Test that an mruset behaves like a set, as long as no more than MAX_SIZE elements are in it
-BOOST_AUTO_TEST_CASE(mruset_like_set)
-{
-
- for (int nTest=0; nTest<NUM_TESTS; nTest++)
- {
- mrutester tester;
- while (tester.size() < MAX_SIZE)
- tester.insert(GetRandInt(2 * MAX_SIZE));
- }
-
-}
-
-// Test that an mruset's size never exceeds its max_size
-BOOST_AUTO_TEST_CASE(mruset_limited_size)
+BOOST_AUTO_TEST_CASE(mruset_test)
{
- for (int nTest=0; nTest<NUM_TESTS; nTest++)
- {
- mruset<int> mru(MAX_SIZE);
- for (int nAction=0; nAction<3*MAX_SIZE; nAction++)
- {
- int n = GetRandInt(2 * MAX_SIZE);
- mru.insert(n);
- BOOST_CHECK(mru.size() <= MAX_SIZE);
+ // The mruset being tested.
+ mruset<int> mru(5000);
+
+ // Run the test 10 times.
+ for (int test = 0; test < 10; test++) {
+ // Reset mru.
+ mru.clear();
+
+ // A deque + set to simulate the mruset.
+ std::deque<int> rep;
+ std::set<int> all;
+
+ // Insert 10000 random integers below 15000.
+ for (int j=0; j<10000; j++) {
+ int add = GetRandInt(15000);
+ mru.insert(add);
+
+ // Add the number to rep/all as well.
+ if (all.count(add) == 0) {
+ all.insert(add);
+ rep.push_back(add);
+ if (all.size() == 5001) {
+ all.erase(rep.front());
+ rep.pop_front();
+ }
+ }
+
+ // Do a full comparison between mru and the simulated mru every 1000 and every 5001 elements.
+ if (j % 1000 == 0 || j % 5001 == 0) {
+ mruset<int> mru2 = mru; // Also try making a copy
+
+ // Check that all elements that should be in there, are in there.
+ BOOST_FOREACH(int x, rep) {
+ BOOST_CHECK(mru.count(x));
+ BOOST_CHECK(mru2.count(x));
+ }
+
+ // Check that all elements that are in there, should be in there.
+ BOOST_FOREACH(int x, mru) {
+ BOOST_CHECK(all.count(x));
+ }
+
+ // Check that all elements that are in there, should be in there.
+ BOOST_FOREACH(int x, mru2) {
+ BOOST_CHECK(all.count(x));
+ }
+
+ for (int t = 0; t < 10; t++) {
+ int r = GetRandInt(15000);
+ BOOST_CHECK(all.count(r) == mru.count(r));
+ BOOST_CHECK(all.count(r) == mru2.count(r));
+ }
+ }
}
}
}
-// 16-bit permutation function
-int static permute(int n)
-{
- // hexadecimals of pi; verified to be linearly independent
- static const int table[16] = {0x243F, 0x6A88, 0x85A3, 0x08D3, 0x1319, 0x8A2E, 0x0370, 0x7344,
- 0xA409, 0x3822, 0x299F, 0x31D0, 0x082E, 0xFA98, 0xEC4E, 0x6C89};
-
- int ret = 0;
- for (int bit=0; bit<16; bit++)
- if (n & (1<<bit))
- ret ^= table[bit];
-
- return ret;
-}
-
-// Test that an mruset acts like a moving window, if no duplicate elements are added
-BOOST_AUTO_TEST_CASE(mruset_window)
-{
- mruset<int> mru(MAX_SIZE);
- for (int n=0; n<10*MAX_SIZE; n++)
- {
- mru.insert(permute(n));
-
- set<int> tester;
- for (int m=max(0,n-MAX_SIZE+1); m<=n; m++)
- tester.insert(permute(m));
-
- BOOST_CHECK(mru == tester);
- }
-}
-
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/pow_tests.cpp b/src/test/pow_tests.cpp
index 4ce1591c35..a436749287 100644
--- a/src/test/pow_tests.cpp
+++ b/src/test/pow_tests.cpp
@@ -69,4 +69,28 @@ BOOST_AUTO_TEST_CASE(get_next_work_upper_limit_actual)
BOOST_CHECK_EQUAL(CalculateNextWorkRequired(&pindexLast, nLastRetargetTime, params), 0x1d00e1fd);
}
+BOOST_AUTO_TEST_CASE(GetBlockProofEquivalentTime_test)
+{
+ SelectParams(CBaseChainParams::MAIN);
+ const Consensus::Params& params = Params().GetConsensus();
+
+ std::vector<CBlockIndex> blocks(10000);
+ for (int i = 0; i < 10000; i++) {
+ blocks[i].pprev = i ? &blocks[i - 1] : NULL;
+ blocks[i].nHeight = i;
+ blocks[i].nTime = 1269211443 + i * params.nPowTargetSpacing;
+ blocks[i].nBits = 0x207fffff; /* target 0x7fffff000... */
+ blocks[i].nChainWork = i ? blocks[i - 1].nChainWork + GetBlockProof(blocks[i - 1]) : arith_uint256(0);
+ }
+
+ for (int j = 0; j < 1000; j++) {
+ CBlockIndex *p1 = &blocks[GetRand(10000)];
+ CBlockIndex *p2 = &blocks[GetRand(10000)];
+ CBlockIndex *p3 = &blocks[GetRand(10000)];
+
+ int64_t tdiff = GetBlockProofEquivalentTime(*p1, *p2, *p3, params);
+ BOOST_CHECK_EQUAL(tdiff, p1->GetBlockTime() - p2->GetBlockTime());
+ }
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/test_bitcoin.cpp b/src/test/test_bitcoin.cpp
index a2cb78c989..4057eccbed 100644
--- a/src/test/test_bitcoin.cpp
+++ b/src/test/test_bitcoin.cpp
@@ -20,7 +20,7 @@
#include <boost/test/unit_test.hpp>
#include <boost/thread.hpp>
-CClientUIInterface uiInterface;
+CClientUIInterface uiInterface; // Declared but not defined in ui_interface.h
CWallet* pwalletMain;
extern bool fPrintToConsole;
diff --git a/src/ui_interface.h b/src/ui_interface.h
index 3f11a1ddab..32a92a4b81 100644
--- a/src/ui_interface.h
+++ b/src/ui_interface.h
@@ -78,9 +78,6 @@ public:
/** Progress message during initialization. */
boost::signals2::signal<void (const std::string &message)> InitMessage;
- /** Translate a message to the native language of the user. */
- boost::signals2::signal<std::string (const char* psz)> Translate;
-
/** Number of network connections changed. */
boost::signals2::signal<void (int newNumConnections)> NotifyNumConnectionsChanged;
@@ -102,14 +99,4 @@ public:
extern CClientUIInterface uiInterface;
-/**
- * Translation function: Call Translate signal on UI interface, which returns a boost::optional result.
- * If no translation slot is registered, nothing is returned, and simply return the input.
- */
-inline std::string _(const char* psz)
-{
- boost::optional<std::string> rv = uiInterface.Translate(psz);
- return rv ? (*rv) : psz;
-}
-
#endif // BITCOIN_UI_INTERFACE_H
diff --git a/src/util.cpp b/src/util.cpp
index 1bb7df7085..c9e8242d47 100644
--- a/src/util.cpp
+++ b/src/util.cpp
@@ -7,7 +7,7 @@
#include "config/bitcoin-config.h"
#endif
-#if (defined(__FreeBSD__) || defined(__OpenBSD__))
+#if (defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__))
#include <pthread.h>
#include <pthread_np.h>
#endif
@@ -109,6 +109,7 @@ string strMiscWarning;
bool fLogTimestamps = false;
bool fLogIPs = false;
volatile bool fReopenDebugLog = false;
+CTranslationInterface translationInterface;
/** Init OpenSSL library multithreading support */
static CCriticalSection** ppmutexOpenSSL;
@@ -712,7 +713,7 @@ void RenameThread(const char* name)
#if defined(PR_SET_NAME)
// Only the first 15 characters are used (16 - NUL terminator)
::prctl(PR_SET_NAME, name, 0, 0, 0);
-#elif (defined(__FreeBSD__) || defined(__OpenBSD__))
+#elif (defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__))
pthread_set_name_np(pthread_self(), name);
#elif defined(MAC_OSX)
diff --git a/src/util.h b/src/util.h
index 9b5a4153dd..483d9d7858 100644
--- a/src/util.h
+++ b/src/util.h
@@ -25,8 +25,17 @@
#include <vector>
#include <boost/filesystem/path.hpp>
+#include <boost/signals2/signal.hpp>
#include <boost/thread/exceptions.hpp>
+/** Signals for translation. */
+class CTranslationInterface
+{
+public:
+ /** Translate a message to the native language of the user. */
+ boost::signals2::signal<std::string (const char* psz)> Translate;
+};
+
extern std::map<std::string, std::string> mapArgs;
extern std::map<std::string, std::vector<std::string> > mapMultiArgs;
extern bool fDebug;
@@ -37,6 +46,17 @@ extern std::string strMiscWarning;
extern bool fLogTimestamps;
extern bool fLogIPs;
extern volatile bool fReopenDebugLog;
+extern CTranslationInterface translationInterface;
+
+/**
+ * Translation function: Call Translate signal on UI interface, which returns a boost::optional result.
+ * If no translation slot is registered, nothing is returned, and simply return the input.
+ */
+inline std::string _(const char* psz)
+{
+ boost::optional<std::string> rv = translationInterface.Translate(psz);
+ return rv ? (*rv) : psz;
+}
void SetupEnvironment();
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index c31c09d922..dd5240e3c0 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -2330,7 +2330,7 @@ Value listunspent(const Array& params, bool fHelp)
if (pk.IsPayToScriptHash()) {
CTxDestination address;
if (ExtractDestination(pk, address)) {
- const CScriptID& hash = boost::get<const CScriptID&>(address);
+ const CScriptID& hash = boost::get<CScriptID>(address);
CScript redeemScript;
if (pwalletMain->GetCScript(hash, redeemScript))
entry.push_back(Pair("redeemScript", HexStr(redeemScript.begin(), redeemScript.end())));
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index b57955dae2..cb20998d26 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -1101,6 +1101,9 @@ void CWallet::ReacceptWalletTransactions()
if (!fBroadcastTransactions)
return;
LOCK2(cs_main, cs_wallet);
+ std::map<int64_t, CWalletTx*> mapSorted;
+
+ // Sort pending wallet transactions based on their initial wallet insertion order
BOOST_FOREACH(PAIRTYPE(const uint256, CWalletTx)& item, mapWallet)
{
const uint256& wtxid = item.first;
@@ -1109,13 +1112,19 @@ void CWallet::ReacceptWalletTransactions()
int nDepth = wtx.GetDepthInMainChain();
- if (!wtx.IsCoinBase() && nDepth < 0)
- {
- // Try to add to memory pool
- LOCK(mempool.cs);
- wtx.AcceptToMemoryPool(false);
+ if (!wtx.IsCoinBase() && nDepth < 0) {
+ mapSorted.insert(std::make_pair(wtx.nOrderPos, &wtx));
}
}
+
+ // Try to add wallet transactions to memory pool
+ BOOST_FOREACH(PAIRTYPE(const int64_t, CWalletTx*)& item, mapSorted)
+ {
+ CWalletTx& wtx = *(item.second);
+
+ LOCK(mempool.cs);
+ wtx.AcceptToMemoryPool(false);
+ }
}
bool CWalletTx::RelayWalletTransaction()