diff options
Diffstat (limited to 'qa/rpc-tests/test_framework')
-rw-r--r-- | qa/rpc-tests/test_framework/__init__.py | 0 | ||||
-rw-r--r-- | qa/rpc-tests/test_framework/bignum.py | 102 | ||||
-rw-r--r-- | qa/rpc-tests/test_framework/blockstore.py | 127 | ||||
-rw-r--r-- | qa/rpc-tests/test_framework/blocktools.py | 65 | ||||
-rwxr-xr-x | qa/rpc-tests/test_framework/comptool.py | 341 | ||||
-rwxr-xr-x | qa/rpc-tests/test_framework/mininode.py | 1256 | ||||
-rw-r--r-- | qa/rpc-tests/test_framework/netutil.py | 139 | ||||
-rw-r--r-- | qa/rpc-tests/test_framework/python-bitcoinrpc/bitcoinrpc/.gitignore | 1 | ||||
-rw-r--r-- | qa/rpc-tests/test_framework/python-bitcoinrpc/bitcoinrpc/__init__.py | 0 | ||||
-rw-r--r-- | qa/rpc-tests/test_framework/python-bitcoinrpc/bitcoinrpc/authproxy.py | 156 | ||||
-rw-r--r-- | qa/rpc-tests/test_framework/python-bitcoinrpc/setup.py | 15 | ||||
-rw-r--r-- | qa/rpc-tests/test_framework/script.py | 896 | ||||
-rw-r--r-- | qa/rpc-tests/test_framework/socks5.py | 160 | ||||
-rwxr-xr-x | qa/rpc-tests/test_framework/test_framework.py | 180 | ||||
-rw-r--r-- | qa/rpc-tests/test_framework/util.py | 357 |
15 files changed, 3795 insertions, 0 deletions
diff --git a/qa/rpc-tests/test_framework/__init__.py b/qa/rpc-tests/test_framework/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/qa/rpc-tests/test_framework/__init__.py diff --git a/qa/rpc-tests/test_framework/bignum.py b/qa/rpc-tests/test_framework/bignum.py new file mode 100644 index 0000000000..b0c58ccd47 --- /dev/null +++ b/qa/rpc-tests/test_framework/bignum.py @@ -0,0 +1,102 @@ +# +# +# bignum.py +# +# This file is copied from python-bitcoinlib. +# +# Distributed under the MIT/X11 software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +# + +"""Bignum routines""" + +from __future__ import absolute_import, division, print_function, unicode_literals + +import struct + + +# generic big endian MPI format + +def bn_bytes(v, have_ext=False): + ext = 0 + if have_ext: + ext = 1 + return ((v.bit_length()+7)//8) + ext + +def bn2bin(v): + s = bytearray() + i = bn_bytes(v) + while i > 0: + s.append((v >> ((i-1) * 8)) & 0xff) + i -= 1 + return s + +def bin2bn(s): + l = 0 + for ch in s: + l = (l << 8) | ch + return l + +def bn2mpi(v): + have_ext = False + if v.bit_length() > 0: + have_ext = (v.bit_length() & 0x07) == 0 + + neg = False + if v < 0: + neg = True + v = -v + + s = struct.pack(b">I", bn_bytes(v, have_ext)) + ext = bytearray() + if have_ext: + ext.append(0) + v_bin = bn2bin(v) + if neg: + if have_ext: + ext[0] |= 0x80 + else: + v_bin[0] |= 0x80 + return s + ext + v_bin + +def mpi2bn(s): + if len(s) < 4: + return None + s_size = bytes(s[:4]) + v_len = struct.unpack(b">I", s_size)[0] + if len(s) != (v_len + 4): + return None + if v_len == 0: + return 0 + + v_str = bytearray(s[4:]) + neg = False + i = v_str[0] + if i & 0x80: + neg = True + i &= ~0x80 + v_str[0] = i + + v = bin2bn(v_str) + + if neg: + return -v + return v + +# bitcoin-specific little endian format, with implicit size +def mpi2vch(s): + r = s[4:] # strip size + r = r[::-1] # reverse string, converting BE->LE + return r + +def bn2vch(v): + return bytes(mpi2vch(bn2mpi(v))) + +def vch2mpi(s): + r = struct.pack(b">I", len(s)) # size + r += s[::-1] # reverse string, converting LE->BE + return r + +def vch2bn(s): + return mpi2bn(vch2mpi(s)) + diff --git a/qa/rpc-tests/test_framework/blockstore.py b/qa/rpc-tests/test_framework/blockstore.py new file mode 100644 index 0000000000..c57b6df81b --- /dev/null +++ b/qa/rpc-tests/test_framework/blockstore.py @@ -0,0 +1,127 @@ +# BlockStore: a helper class that keeps a map of blocks and implements +# helper functions for responding to getheaders and getdata, +# and for constructing a getheaders message +# + +from mininode import * +import dbm + +class BlockStore(object): + def __init__(self, datadir): + self.blockDB = dbm.open(datadir + "/blocks", 'c') + self.currentBlock = 0L + + def close(self): + self.blockDB.close() + + def get(self, blockhash): + serialized_block = None + try: + serialized_block = self.blockDB[repr(blockhash)] + except KeyError: + return None + f = cStringIO.StringIO(serialized_block) + ret = CBlock() + ret.deserialize(f) + ret.calc_sha256() + return ret + + # Note: this pulls full blocks out of the database just to retrieve + # the headers -- perhaps we could keep a separate data structure + # to avoid this overhead. + def headers_for(self, locator, hash_stop, current_tip=None): + if current_tip is None: + current_tip = self.currentBlock + current_block = self.get(current_tip) + if current_block is None: + return None + + response = msg_headers() + headersList = [ CBlockHeader(current_block) ] + maxheaders = 2000 + while (headersList[0].sha256 not in locator.vHave): + prevBlockHash = headersList[0].hashPrevBlock + prevBlock = self.get(prevBlockHash) + if prevBlock is not None: + headersList.insert(0, CBlockHeader(prevBlock)) + else: + break + headersList = headersList[:maxheaders] # truncate if we have too many + hashList = [x.sha256 for x in headersList] + index = len(headersList) + if (hash_stop in hashList): + index = hashList.index(hash_stop)+1 + response.headers = headersList[:index] + return response + + def add_block(self, block): + block.calc_sha256() + try: + self.blockDB[repr(block.sha256)] = bytes(block.serialize()) + except TypeError as e: + print "Unexpected error: ", sys.exc_info()[0], e.args + self.currentBlock = block.sha256 + + def get_blocks(self, inv): + responses = [] + for i in inv: + if (i.type == 2): # MSG_BLOCK + block = self.get(i.hash) + if block is not None: + responses.append(msg_block(block)) + return responses + + def get_locator(self, current_tip=None): + if current_tip is None: + current_tip = self.currentBlock + r = [] + counter = 0 + step = 1 + lastBlock = self.get(current_tip) + while lastBlock is not None: + r.append(lastBlock.hashPrevBlock) + for i in range(step): + lastBlock = self.get(lastBlock.hashPrevBlock) + if lastBlock is None: + break + counter += 1 + if counter > 10: + step *= 2 + locator = CBlockLocator() + locator.vHave = r + return locator + +class TxStore(object): + def __init__(self, datadir): + self.txDB = dbm.open(datadir + "/transactions", 'c') + + def close(self): + self.txDB.close() + + def get(self, txhash): + serialized_tx = None + try: + serialized_tx = self.txDB[repr(txhash)] + except KeyError: + return None + f = cStringIO.StringIO(serialized_tx) + ret = CTransaction() + ret.deserialize(f) + ret.calc_sha256() + return ret + + def add_transaction(self, tx): + tx.calc_sha256() + try: + self.txDB[repr(tx.sha256)] = bytes(tx.serialize()) + except TypeError as e: + print "Unexpected error: ", sys.exc_info()[0], e.args + + def get_transactions(self, inv): + responses = [] + for i in inv: + if (i.type == 1): # MSG_TX + tx = self.get(i.hash) + if tx is not None: + responses.append(msg_tx(tx)) + return responses diff --git a/qa/rpc-tests/test_framework/blocktools.py b/qa/rpc-tests/test_framework/blocktools.py new file mode 100644 index 0000000000..f397fe7cd6 --- /dev/null +++ b/qa/rpc-tests/test_framework/blocktools.py @@ -0,0 +1,65 @@ +# blocktools.py - utilities for manipulating blocks and transactions +# +# Distributed under the MIT/X11 software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +# + +from mininode import * +from script import CScript, CScriptOp + +# Create a block (with regtest difficulty) +def create_block(hashprev, coinbase, nTime=None): + block = CBlock() + if nTime is None: + import time + block.nTime = int(time.time()+600) + else: + block.nTime = nTime + block.hashPrevBlock = hashprev + block.nBits = 0x207fffff # Will break after a difficulty adjustment... + block.vtx.append(coinbase) + block.hashMerkleRoot = block.calc_merkle_root() + block.calc_sha256() + return block + +def serialize_script_num(value): + r = bytearray(0) + if value == 0: + return r + neg = value < 0 + absvalue = -value if neg else value + while (absvalue): + r.append(chr(absvalue & 0xff)) + absvalue >>= 8 + if r[-1] & 0x80: + r.append(0x80 if neg else 0) + elif neg: + r[-1] |= 0x80 + return r + +counter=1 +# Create an anyone-can-spend coinbase transaction, assuming no miner fees +def create_coinbase(heightAdjust = 0): + global counter + coinbase = CTransaction() + coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), + ser_string(serialize_script_num(counter+heightAdjust)), 0xffffffff)) + counter += 1 + coinbaseoutput = CTxOut() + coinbaseoutput.nValue = 50*100000000 + halvings = int((counter+heightAdjust)/150) # regtest + coinbaseoutput.nValue >>= halvings + coinbaseoutput.scriptPubKey = "" + coinbase.vout = [ coinbaseoutput ] + coinbase.calc_sha256() + return coinbase + +# Create a transaction with an anyone-can-spend output, that spends the +# nth output of prevtx. +def create_transaction(prevtx, n, sig, value): + tx = CTransaction() + assert(n < len(prevtx.vout)) + tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff)) + tx.vout.append(CTxOut(value, "")) + tx.calc_sha256() + return tx diff --git a/qa/rpc-tests/test_framework/comptool.py b/qa/rpc-tests/test_framework/comptool.py new file mode 100755 index 0000000000..23a979250c --- /dev/null +++ b/qa/rpc-tests/test_framework/comptool.py @@ -0,0 +1,341 @@ +#!/usr/bin/env python2 +# +# Distributed under the MIT/X11 software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +# + +from mininode import * +from blockstore import BlockStore, TxStore +from util import p2p_port + +''' +This is a tool for comparing two or more bitcoinds to each other +using a script provided. + +To use, create a class that implements get_tests(), and pass it in +as the test generator to TestManager. get_tests() should be a python +generator that returns TestInstance objects. See below for definition. +''' + +# TestNode behaves as follows: +# Configure with a BlockStore and TxStore +# on_inv: log the message but don't request +# on_headers: log the chain tip +# on_pong: update ping response map (for synchronization) +# on_getheaders: provide headers via BlockStore +# on_getdata: provide blocks via BlockStore + +global mininode_lock + +class TestNode(NodeConnCB): + + def __init__(self, block_store, tx_store): + NodeConnCB.__init__(self) + self.create_callback_map() + self.conn = None + self.bestblockhash = None + self.block_store = block_store + self.block_request_map = {} + self.tx_store = tx_store + self.tx_request_map = {} + + # When the pingmap is non-empty we're waiting for + # a response + self.pingMap = {} + self.lastInv = [] + + def add_connection(self, conn): + self.conn = conn + + def on_headers(self, conn, message): + if len(message.headers) > 0: + best_header = message.headers[-1] + best_header.calc_sha256() + self.bestblockhash = best_header.sha256 + + def on_getheaders(self, conn, message): + response = self.block_store.headers_for(message.locator, message.hashstop) + if response is not None: + conn.send_message(response) + + def on_getdata(self, conn, message): + [conn.send_message(r) for r in self.block_store.get_blocks(message.inv)] + [conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)] + + for i in message.inv: + if i.type == 1: + self.tx_request_map[i.hash] = True + elif i.type == 2: + self.block_request_map[i.hash] = True + + def on_inv(self, conn, message): + self.lastInv = [x.hash for x in message.inv] + + def on_pong(self, conn, message): + try: + del self.pingMap[message.nonce] + except KeyError: + raise AssertionError("Got pong for unknown ping [%s]" % repr(message)) + + def send_inv(self, obj): + mtype = 2 if isinstance(obj, CBlock) else 1 + self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)])) + + def send_getheaders(self): + # We ask for headers from their last tip. + m = msg_getheaders() + m.locator = self.block_store.get_locator(self.bestblockhash) + self.conn.send_message(m) + + # This assumes BIP31 + def send_ping(self, nonce): + self.pingMap[nonce] = True + self.conn.send_message(msg_ping(nonce)) + + def received_ping_response(self, nonce): + return nonce not in self.pingMap + + def send_mempool(self): + self.lastInv = [] + self.conn.send_message(msg_mempool()) + +# TestInstance: +# +# Instances of these are generated by the test generator, and fed into the +# comptool. +# +# "blocks_and_transactions" should be an array of [obj, True/False/None]: +# - obj is either a CBlock or a CTransaction, and +# - the second value indicates whether the object should be accepted +# into the blockchain or mempool (for tests where we expect a certain +# answer), or "None" if we don't expect a certain answer and are just +# comparing the behavior of the nodes being tested. +# sync_every_block: if True, then each block will be inv'ed, synced, and +# nodes will be tested based on the outcome for the block. If False, +# then inv's accumulate until all blocks are processed (or max inv size +# is reached) and then sent out in one inv message. Then the final block +# will be synced across all connections, and the outcome of the final +# block will be tested. +# sync_every_tx: analagous to behavior for sync_every_block, except if outcome +# on the final tx is None, then contents of entire mempool are compared +# across all connections. (If outcome of final tx is specified as true +# or false, then only the last tx is tested against outcome.) + +class TestInstance(object): + def __init__(self, objects=[], sync_every_block=True, sync_every_tx=False): + self.blocks_and_transactions = objects + self.sync_every_block = sync_every_block + self.sync_every_tx = sync_every_tx + +class TestManager(object): + + def __init__(self, testgen, datadir): + self.test_generator = testgen + self.connections = [] + self.block_store = BlockStore(datadir) + self.tx_store = TxStore(datadir) + self.ping_counter = 1 + + def add_all_connections(self, nodes): + for i in range(len(nodes)): + # Create a p2p connection to each node + self.connections.append(NodeConn('127.0.0.1', p2p_port(i), + nodes[i], TestNode(self.block_store, self.tx_store))) + # Make sure the TestNode (callback class) has a reference to its + # associated NodeConn + self.connections[-1].cb.add_connection(self.connections[-1]) + + def wait_for_verack(self): + sleep_time = 0.05 + max_tries = 10 / sleep_time # Wait at most 10 seconds + while max_tries > 0: + done = True + with mininode_lock: + for c in self.connections: + if c.cb.verack_received is False: + done = False + break + if done: + break + time.sleep(sleep_time) + + def wait_for_pings(self, counter): + received_pongs = False + while received_pongs is not True: + time.sleep(0.05) + received_pongs = True + with mininode_lock: + for c in self.connections: + if c.cb.received_ping_response(counter) is not True: + received_pongs = False + break + + # sync_blocks: Wait for all connections to request the blockhash given + # then send get_headers to find out the tip of each node, and synchronize + # the response by using a ping (and waiting for pong with same nonce). + def sync_blocks(self, blockhash, num_blocks): + # Wait for nodes to request block (50ms sleep * 20 tries * num_blocks) + max_tries = 20*num_blocks + while max_tries > 0: + with mininode_lock: + results = [ blockhash in c.cb.block_request_map and + c.cb.block_request_map[blockhash] for c in self.connections ] + if False not in results: + break + time.sleep(0.05) + max_tries -= 1 + + # --> error if not requested + if max_tries == 0: + # print [ c.cb.block_request_map for c in self.connections ] + raise AssertionError("Not all nodes requested block") + # --> Answer request (we did this inline!) + + # Send getheaders message + [ c.cb.send_getheaders() for c in self.connections ] + + # Send ping and wait for response -- synchronization hack + [ c.cb.send_ping(self.ping_counter) for c in self.connections ] + self.wait_for_pings(self.ping_counter) + self.ping_counter += 1 + + # Analogous to sync_block (see above) + def sync_transaction(self, txhash, num_events): + # Wait for nodes to request transaction (50ms sleep * 20 tries * num_events) + max_tries = 20*num_events + while max_tries > 0: + with mininode_lock: + results = [ txhash in c.cb.tx_request_map and + c.cb.tx_request_map[txhash] for c in self.connections ] + if False not in results: + break + time.sleep(0.05) + max_tries -= 1 + + # --> error if not requested + if max_tries == 0: + # print [ c.cb.tx_request_map for c in self.connections ] + raise AssertionError("Not all nodes requested transaction") + # --> Answer request (we did this inline!) + + # Get the mempool + [ c.cb.send_mempool() for c in self.connections ] + + # Send ping and wait for response -- synchronization hack + [ c.cb.send_ping(self.ping_counter) for c in self.connections ] + self.wait_for_pings(self.ping_counter) + self.ping_counter += 1 + + # Sort inv responses from each node + with mininode_lock: + [ c.cb.lastInv.sort() for c in self.connections ] + + # Verify that the tip of each connection all agree with each other, and + # with the expected outcome (if given) + def check_results(self, blockhash, outcome): + with mininode_lock: + for c in self.connections: + if outcome is None: + if c.cb.bestblockhash != self.connections[0].cb.bestblockhash: + return False + elif ((c.cb.bestblockhash == blockhash) != outcome): + # print c.cb.bestblockhash, blockhash, outcome + return False + return True + + # Either check that the mempools all agree with each other, or that + # txhash's presence in the mempool matches the outcome specified. + # This is somewhat of a strange comparison, in that we're either comparing + # a particular tx to an outcome, or the entire mempools altogether; + # perhaps it would be useful to add the ability to check explicitly that + # a particular tx's existence in the mempool is the same across all nodes. + def check_mempool(self, txhash, outcome): + with mininode_lock: + for c in self.connections: + if outcome is None: + # Make sure the mempools agree with each other + if c.cb.lastInv != self.connections[0].cb.lastInv: + # print c.rpc.getrawmempool() + return False + elif ((txhash in c.cb.lastInv) != outcome): + # print c.rpc.getrawmempool(), c.cb.lastInv + return False + return True + + def run(self): + # Wait until verack is received + self.wait_for_verack() + + test_number = 1 + for test_instance in self.test_generator.get_tests(): + # We use these variables to keep track of the last block + # and last transaction in the tests, which are used + # if we're not syncing on every block or every tx. + [ block, block_outcome ] = [ None, None ] + [ tx, tx_outcome ] = [ None, None ] + invqueue = [] + + for b_or_t, outcome in test_instance.blocks_and_transactions: + # Determine if we're dealing with a block or tx + if isinstance(b_or_t, CBlock): # Block test runner + block = b_or_t + block_outcome = outcome + # Add to shared block_store, set as current block + with mininode_lock: + self.block_store.add_block(block) + for c in self.connections: + c.cb.block_request_map[block.sha256] = False + # Either send inv's to each node and sync, or add + # to invqueue for later inv'ing. + if (test_instance.sync_every_block): + [ c.cb.send_inv(block) for c in self.connections ] + self.sync_blocks(block.sha256, 1) + if (not self.check_results(block.sha256, outcome)): + raise AssertionError("Test failed at test %d" % test_number) + else: + invqueue.append(CInv(2, block.sha256)) + else: # Tx test runner + assert(isinstance(b_or_t, CTransaction)) + tx = b_or_t + tx_outcome = outcome + # Add to shared tx store and clear map entry + with mininode_lock: + self.tx_store.add_transaction(tx) + for c in self.connections: + c.cb.tx_request_map[tx.sha256] = False + # Again, either inv to all nodes or save for later + if (test_instance.sync_every_tx): + [ c.cb.send_inv(tx) for c in self.connections ] + self.sync_transaction(tx.sha256, 1) + if (not self.check_mempool(tx.sha256, outcome)): + raise AssertionError("Test failed at test %d" % test_number) + else: + invqueue.append(CInv(1, tx.sha256)) + # Ensure we're not overflowing the inv queue + if len(invqueue) == MAX_INV_SZ: + [ c.send_message(msg_inv(invqueue)) for c in self.connections ] + invqueue = [] + + # Do final sync if we weren't syncing on every block or every tx. + if (not test_instance.sync_every_block and block is not None): + if len(invqueue) > 0: + [ c.send_message(msg_inv(invqueue)) for c in self.connections ] + invqueue = [] + self.sync_blocks(block.sha256, + len(test_instance.blocks_and_transactions)) + if (not self.check_results(block.sha256, block_outcome)): + raise AssertionError("Block test failed at test %d" % test_number) + if (not test_instance.sync_every_tx and tx is not None): + if len(invqueue) > 0: + [ c.send_message(msg_inv(invqueue)) for c in self.connections ] + invqueue = [] + self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions)) + if (not self.check_mempool(tx.sha256, tx_outcome)): + raise AssertionError("Mempool test failed at test %d" % test_number) + + print "Test %d: PASS" % test_number, [ c.rpc.getblockcount() for c in self.connections ] + test_number += 1 + + self.block_store.close() + self.tx_store.close() + [ c.disconnect_node() for c in self.connections ] diff --git a/qa/rpc-tests/test_framework/mininode.py b/qa/rpc-tests/test_framework/mininode.py new file mode 100755 index 0000000000..b7d78e74fa --- /dev/null +++ b/qa/rpc-tests/test_framework/mininode.py @@ -0,0 +1,1256 @@ +# mininode.py - Bitcoin P2P network half-a-node +# +# Distributed under the MIT/X11 software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +# +# This python code was modified from ArtForz' public domain half-a-node, as +# found in the mini-node branch of http://github.com/jgarzik/pynode. +# +# NodeConn: an object which manages p2p connectivity to a bitcoin node +# NodeConnCB: a base class that describes the interface for receiving +# callbacks with network messages from a NodeConn +# CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....: +# data structures that should map to corresponding structures in +# bitcoin/primitives +# msg_block, msg_tx, msg_headers, etc.: +# data structures that represent network messages +# ser_*, deser_*: functions that handle serialization/deserialization + + +import struct +import socket +import asyncore +import binascii +import time +import sys +import random +import cStringIO +import hashlib +from threading import RLock +from threading import Thread +import logging +import copy + +BIP0031_VERSION = 60000 +MY_VERSION = 60001 # past bip-31 for ping/pong +MY_SUBVERSION = "/python-mininode-tester:0.0.1/" + +MAX_INV_SZ = 50000 + +# Keep our own socket map for asyncore, so that we can track disconnects +# ourselves (to workaround an issue with closing an asyncore socket when +# using select) +mininode_socket_map = dict() + +# One lock for synchronizing all data access between the networking thread (see +# NetworkThread below) and the thread running the test logic. For simplicity, +# NodeConn acquires this lock whenever delivering a message to to a NodeConnCB, +# and whenever adding anything to the send buffer (in send_message()). This +# lock should be acquired in the thread running the test logic to synchronize +# access to any data shared with the NodeConnCB or NodeConn. +mininode_lock = RLock() + +# Serialization/deserialization tools +def sha256(s): + return hashlib.new('sha256', s).digest() + + +def hash256(s): + return sha256(sha256(s)) + + +def deser_string(f): + nit = struct.unpack("<B", f.read(1))[0] + if nit == 253: + nit = struct.unpack("<H", f.read(2))[0] + elif nit == 254: + nit = struct.unpack("<I", f.read(4))[0] + elif nit == 255: + nit = struct.unpack("<Q", f.read(8))[0] + return f.read(nit) + + +def ser_string(s): + if len(s) < 253: + return chr(len(s)) + s + elif len(s) < 0x10000: + return chr(253) + struct.pack("<H", len(s)) + s + elif len(s) < 0x100000000L: + return chr(254) + struct.pack("<I", len(s)) + s + return chr(255) + struct.pack("<Q", len(s)) + s + + +def deser_uint256(f): + r = 0L + for i in xrange(8): + t = struct.unpack("<I", f.read(4))[0] + r += t << (i * 32) + return r + + +def ser_uint256(u): + rs = "" + for i in xrange(8): + rs += struct.pack("<I", u & 0xFFFFFFFFL) + u >>= 32 + return rs + + +def uint256_from_str(s): + r = 0L + t = struct.unpack("<IIIIIIII", s[:32]) + for i in xrange(8): + r += t[i] << (i * 32) + return r + + +def uint256_from_compact(c): + nbytes = (c >> 24) & 0xFF + v = (c & 0xFFFFFFL) << (8 * (nbytes - 3)) + return v + + +def deser_vector(f, c): + nit = struct.unpack("<B", f.read(1))[0] + if nit == 253: + nit = struct.unpack("<H", f.read(2))[0] + elif nit == 254: + nit = struct.unpack("<I", f.read(4))[0] + elif nit == 255: + nit = struct.unpack("<Q", f.read(8))[0] + r = [] + for i in xrange(nit): + t = c() + t.deserialize(f) + r.append(t) + return r + + +def ser_vector(l): + r = "" + if len(l) < 253: + r = chr(len(l)) + elif len(l) < 0x10000: + r = chr(253) + struct.pack("<H", len(l)) + elif len(l) < 0x100000000L: + r = chr(254) + struct.pack("<I", len(l)) + else: + r = chr(255) + struct.pack("<Q", len(l)) + for i in l: + r += i.serialize() + return r + + +def deser_uint256_vector(f): + nit = struct.unpack("<B", f.read(1))[0] + if nit == 253: + nit = struct.unpack("<H", f.read(2))[0] + elif nit == 254: + nit = struct.unpack("<I", f.read(4))[0] + elif nit == 255: + nit = struct.unpack("<Q", f.read(8))[0] + r = [] + for i in xrange(nit): + t = deser_uint256(f) + r.append(t) + return r + + +def ser_uint256_vector(l): + r = "" + if len(l) < 253: + r = chr(len(l)) + elif len(l) < 0x10000: + r = chr(253) + struct.pack("<H", len(l)) + elif len(l) < 0x100000000L: + r = chr(254) + struct.pack("<I", len(l)) + else: + r = chr(255) + struct.pack("<Q", len(l)) + for i in l: + r += ser_uint256(i) + return r + + +def deser_string_vector(f): + nit = struct.unpack("<B", f.read(1))[0] + if nit == 253: + nit = struct.unpack("<H", f.read(2))[0] + elif nit == 254: + nit = struct.unpack("<I", f.read(4))[0] + elif nit == 255: + nit = struct.unpack("<Q", f.read(8))[0] + r = [] + for i in xrange(nit): + t = deser_string(f) + r.append(t) + return r + + +def ser_string_vector(l): + r = "" + if len(l) < 253: + r = chr(len(l)) + elif len(l) < 0x10000: + r = chr(253) + struct.pack("<H", len(l)) + elif len(l) < 0x100000000L: + r = chr(254) + struct.pack("<I", len(l)) + else: + r = chr(255) + struct.pack("<Q", len(l)) + for sv in l: + r += ser_string(sv) + return r + + +def deser_int_vector(f): + nit = struct.unpack("<B", f.read(1))[0] + if nit == 253: + nit = struct.unpack("<H", f.read(2))[0] + elif nit == 254: + nit = struct.unpack("<I", f.read(4))[0] + elif nit == 255: + nit = struct.unpack("<Q", f.read(8))[0] + r = [] + for i in xrange(nit): + t = struct.unpack("<i", f.read(4))[0] + r.append(t) + return r + + +def ser_int_vector(l): + r = "" + if len(l) < 253: + r = chr(len(l)) + elif len(l) < 0x10000: + r = chr(253) + struct.pack("<H", len(l)) + elif len(l) < 0x100000000L: + r = chr(254) + struct.pack("<I", len(l)) + else: + r = chr(255) + struct.pack("<Q", len(l)) + for i in l: + r += struct.pack("<i", i) + return r + + +# Objects that map to bitcoind objects, which can be serialized/deserialized + +class CAddress(object): + def __init__(self): + self.nServices = 1 + self.pchReserved = "\x00" * 10 + "\xff" * 2 + self.ip = "0.0.0.0" + self.port = 0 + + def deserialize(self, f): + self.nServices = struct.unpack("<Q", f.read(8))[0] + self.pchReserved = f.read(12) + self.ip = socket.inet_ntoa(f.read(4)) + self.port = struct.unpack(">H", f.read(2))[0] + + def serialize(self): + r = "" + r += struct.pack("<Q", self.nServices) + r += self.pchReserved + r += socket.inet_aton(self.ip) + r += struct.pack(">H", self.port) + return r + + def __repr__(self): + return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices, + self.ip, self.port) + + +class CInv(object): + typemap = { + 0: "Error", + 1: "TX", + 2: "Block"} + + def __init__(self, t=0, h=0L): + self.type = t + self.hash = h + + def deserialize(self, f): + self.type = struct.unpack("<i", f.read(4))[0] + self.hash = deser_uint256(f) + + def serialize(self): + r = "" + r += struct.pack("<i", self.type) + r += ser_uint256(self.hash) + return r + + def __repr__(self): + return "CInv(type=%s hash=%064x)" \ + % (self.typemap[self.type], self.hash) + + +class CBlockLocator(object): + def __init__(self): + self.nVersion = MY_VERSION + self.vHave = [] + + def deserialize(self, f): + self.nVersion = struct.unpack("<i", f.read(4))[0] + self.vHave = deser_uint256_vector(f) + + def serialize(self): + r = "" + r += struct.pack("<i", self.nVersion) + r += ser_uint256_vector(self.vHave) + return r + + def __repr__(self): + return "CBlockLocator(nVersion=%i vHave=%s)" \ + % (self.nVersion, repr(self.vHave)) + + +class COutPoint(object): + def __init__(self, hash=0, n=0): + self.hash = hash + self.n = n + + def deserialize(self, f): + self.hash = deser_uint256(f) + self.n = struct.unpack("<I", f.read(4))[0] + + def serialize(self): + r = "" + r += ser_uint256(self.hash) + r += struct.pack("<I", self.n) + return r + + def __repr__(self): + return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n) + + +class CTxIn(object): + def __init__(self, outpoint=None, scriptSig="", nSequence=0): + if outpoint is None: + self.prevout = COutPoint() + else: + self.prevout = outpoint + self.scriptSig = scriptSig + self.nSequence = nSequence + + def deserialize(self, f): + self.prevout = COutPoint() + self.prevout.deserialize(f) + self.scriptSig = deser_string(f) + self.nSequence = struct.unpack("<I", f.read(4))[0] + + def serialize(self): + r = "" + r += self.prevout.serialize() + r += ser_string(self.scriptSig) + r += struct.pack("<I", self.nSequence) + return r + + def __repr__(self): + return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \ + % (repr(self.prevout), binascii.hexlify(self.scriptSig), + self.nSequence) + + +class CTxOut(object): + def __init__(self, nValue=0, scriptPubKey=""): + self.nValue = nValue + self.scriptPubKey = scriptPubKey + + def deserialize(self, f): + self.nValue = struct.unpack("<q", f.read(8))[0] + self.scriptPubKey = deser_string(f) + + def serialize(self): + r = "" + r += struct.pack("<q", self.nValue) + r += ser_string(self.scriptPubKey) + return r + + def __repr__(self): + return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \ + % (self.nValue // 100000000, self.nValue % 100000000, + binascii.hexlify(self.scriptPubKey)) + + +class CTransaction(object): + def __init__(self, tx=None): + if tx is None: + self.nVersion = 1 + self.vin = [] + self.vout = [] + self.nLockTime = 0 + self.sha256 = None + self.hash = None + else: + self.nVersion = tx.nVersion + self.vin = copy.deepcopy(tx.vin) + self.vout = copy.deepcopy(tx.vout) + self.nLockTime = tx.nLockTime + self.sha256 = None + self.hash = None + + def deserialize(self, f): + self.nVersion = struct.unpack("<i", f.read(4))[0] + self.vin = deser_vector(f, CTxIn) + self.vout = deser_vector(f, CTxOut) + self.nLockTime = struct.unpack("<I", f.read(4))[0] + self.sha256 = None + self.hash = None + + def serialize(self): + r = "" + r += struct.pack("<i", self.nVersion) + r += ser_vector(self.vin) + r += ser_vector(self.vout) + r += struct.pack("<I", self.nLockTime) + return r + + def rehash(self): + self.sha256 = None + self.calc_sha256() + + def calc_sha256(self): + if self.sha256 is None: + self.sha256 = uint256_from_str(hash256(self.serialize())) + self.hash = hash256(self.serialize())[::-1].encode('hex_codec') + + def is_valid(self): + self.calc_sha256() + for tout in self.vout: + if tout.nValue < 0 or tout.nValue > 21000000L * 100000000L: + return False + return True + + def __repr__(self): + return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \ + % (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime) + + +class CBlockHeader(object): + def __init__(self, header=None): + if header is None: + self.set_null() + else: + self.nVersion = header.nVersion + self.hashPrevBlock = header.hashPrevBlock + self.hashMerkleRoot = header.hashMerkleRoot + self.nTime = header.nTime + self.nBits = header.nBits + self.nNonce = header.nNonce + self.sha256 = header.sha256 + self.hash = header.hash + self.calc_sha256() + + def set_null(self): + self.nVersion = 1 + self.hashPrevBlock = 0 + self.hashMerkleRoot = 0 + self.nTime = 0 + self.nBits = 0 + self.nNonce = 0 + self.sha256 = None + self.hash = None + + def deserialize(self, f): + self.nVersion = struct.unpack("<i", f.read(4))[0] + self.hashPrevBlock = deser_uint256(f) + self.hashMerkleRoot = deser_uint256(f) + self.nTime = struct.unpack("<I", f.read(4))[0] + self.nBits = struct.unpack("<I", f.read(4))[0] + self.nNonce = struct.unpack("<I", f.read(4))[0] + self.sha256 = None + self.hash = None + + def serialize(self): + r = "" + r += struct.pack("<i", self.nVersion) + r += ser_uint256(self.hashPrevBlock) + r += ser_uint256(self.hashMerkleRoot) + r += struct.pack("<I", self.nTime) + r += struct.pack("<I", self.nBits) + r += struct.pack("<I", self.nNonce) + return r + + def calc_sha256(self): + if self.sha256 is None: + r = "" + r += struct.pack("<i", self.nVersion) + r += ser_uint256(self.hashPrevBlock) + r += ser_uint256(self.hashMerkleRoot) + r += struct.pack("<I", self.nTime) + r += struct.pack("<I", self.nBits) + r += struct.pack("<I", self.nNonce) + self.sha256 = uint256_from_str(hash256(r)) + self.hash = hash256(r)[::-1].encode('hex_codec') + + def rehash(self): + self.sha256 = None + self.calc_sha256() + return self.sha256 + + def __repr__(self): + return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \ + % (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, + time.ctime(self.nTime), self.nBits, self.nNonce) + + +class CBlock(CBlockHeader): + def __init__(self, header=None): + super(CBlock, self).__init__(header) + self.vtx = [] + + def deserialize(self, f): + super(CBlock, self).deserialize(f) + self.vtx = deser_vector(f, CTransaction) + + def serialize(self): + r = "" + r += super(CBlock, self).serialize() + r += ser_vector(self.vtx) + return r + + def calc_merkle_root(self): + hashes = [] + for tx in self.vtx: + tx.calc_sha256() + hashes.append(ser_uint256(tx.sha256)) + while len(hashes) > 1: + newhashes = [] + for i in xrange(0, len(hashes), 2): + i2 = min(i+1, len(hashes)-1) + newhashes.append(hash256(hashes[i] + hashes[i2])) + hashes = newhashes + return uint256_from_str(hashes[0]) + + def is_valid(self): + self.calc_sha256() + target = uint256_from_compact(self.nBits) + if self.sha256 > target: + return False + for tx in self.vtx: + if not tx.is_valid(): + return False + if self.calc_merkle_root() != self.hashMerkleRoot: + return False + return True + + def solve(self): + self.calc_sha256() + target = uint256_from_compact(self.nBits) + while self.sha256 > target: + self.nNonce += 1 + self.rehash() + + def __repr__(self): + return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \ + % (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, + time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx)) + + +class CUnsignedAlert(object): + def __init__(self): + self.nVersion = 1 + self.nRelayUntil = 0 + self.nExpiration = 0 + self.nID = 0 + self.nCancel = 0 + self.setCancel = [] + self.nMinVer = 0 + self.nMaxVer = 0 + self.setSubVer = [] + self.nPriority = 0 + self.strComment = "" + self.strStatusBar = "" + self.strReserved = "" + + def deserialize(self, f): + self.nVersion = struct.unpack("<i", f.read(4))[0] + self.nRelayUntil = struct.unpack("<q", f.read(8))[0] + self.nExpiration = struct.unpack("<q", f.read(8))[0] + self.nID = struct.unpack("<i", f.read(4))[0] + self.nCancel = struct.unpack("<i", f.read(4))[0] + self.setCancel = deser_int_vector(f) + self.nMinVer = struct.unpack("<i", f.read(4))[0] + self.nMaxVer = struct.unpack("<i", f.read(4))[0] + self.setSubVer = deser_string_vector(f) + self.nPriority = struct.unpack("<i", f.read(4))[0] + self.strComment = deser_string(f) + self.strStatusBar = deser_string(f) + self.strReserved = deser_string(f) + + def serialize(self): + r = "" + r += struct.pack("<i", self.nVersion) + r += struct.pack("<q", self.nRelayUntil) + r += struct.pack("<q", self.nExpiration) + r += struct.pack("<i", self.nID) + r += struct.pack("<i", self.nCancel) + r += ser_int_vector(self.setCancel) + r += struct.pack("<i", self.nMinVer) + r += struct.pack("<i", self.nMaxVer) + r += ser_string_vector(self.setSubVer) + r += struct.pack("<i", self.nPriority) + r += ser_string(self.strComment) + r += ser_string(self.strStatusBar) + r += ser_string(self.strReserved) + return r + + def __repr__(self): + return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \ + % (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID, + self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority, + self.strComment, self.strStatusBar, self.strReserved) + + +class CAlert(object): + def __init__(self): + self.vchMsg = "" + self.vchSig = "" + + def deserialize(self, f): + self.vchMsg = deser_string(f) + self.vchSig = deser_string(f) + + def serialize(self): + r = "" + r += ser_string(self.vchMsg) + r += ser_string(self.vchSig) + return r + + def __repr__(self): + return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \ + % (len(self.vchMsg), len(self.vchSig)) + + +# Objects that correspond to messages on the wire +class msg_version(object): + command = "version" + + def __init__(self): + self.nVersion = MY_VERSION + self.nServices = 1 + self.nTime = time.time() + self.addrTo = CAddress() + self.addrFrom = CAddress() + self.nNonce = random.getrandbits(64) + self.strSubVer = MY_SUBVERSION + self.nStartingHeight = -1 + + def deserialize(self, f): + self.nVersion = struct.unpack("<i", f.read(4))[0] + if self.nVersion == 10300: + self.nVersion = 300 + self.nServices = struct.unpack("<Q", f.read(8))[0] + self.nTime = struct.unpack("<q", f.read(8))[0] + self.addrTo = CAddress() + self.addrTo.deserialize(f) + if self.nVersion >= 106: + self.addrFrom = CAddress() + self.addrFrom.deserialize(f) + self.nNonce = struct.unpack("<Q", f.read(8))[0] + self.strSubVer = deser_string(f) + if self.nVersion >= 209: + self.nStartingHeight = struct.unpack("<i", f.read(4))[0] + else: + self.nStartingHeight = None + else: + self.addrFrom = None + self.nNonce = None + self.strSubVer = None + self.nStartingHeight = None + + def serialize(self): + r = "" + r += struct.pack("<i", self.nVersion) + r += struct.pack("<Q", self.nServices) + r += struct.pack("<q", self.nTime) + r += self.addrTo.serialize() + r += self.addrFrom.serialize() + r += struct.pack("<Q", self.nNonce) + r += ser_string(self.strSubVer) + r += struct.pack("<i", self.nStartingHeight) + return r + + def __repr__(self): + return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i)' \ + % (self.nVersion, self.nServices, time.ctime(self.nTime), + repr(self.addrTo), repr(self.addrFrom), self.nNonce, + self.strSubVer, self.nStartingHeight) + + +class msg_verack(object): + command = "verack" + + def __init__(self): + pass + + def deserialize(self, f): + pass + + def serialize(self): + return "" + + def __repr__(self): + return "msg_verack()" + + +class msg_addr(object): + command = "addr" + + def __init__(self): + self.addrs = [] + + def deserialize(self, f): + self.addrs = deser_vector(f, CAddress) + + def serialize(self): + return ser_vector(self.addrs) + + def __repr__(self): + return "msg_addr(addrs=%s)" % (repr(self.addrs)) + + +class msg_alert(object): + command = "alert" + + def __init__(self): + self.alert = CAlert() + + def deserialize(self, f): + self.alert = CAlert() + self.alert.deserialize(f) + + def serialize(self): + r = "" + r += self.alert.serialize() + return r + + def __repr__(self): + return "msg_alert(alert=%s)" % (repr(self.alert), ) + + +class msg_inv(object): + command = "inv" + + def __init__(self, inv=None): + if inv is None: + self.inv = [] + else: + self.inv = inv + + def deserialize(self, f): + self.inv = deser_vector(f, CInv) + + def serialize(self): + return ser_vector(self.inv) + + def __repr__(self): + return "msg_inv(inv=%s)" % (repr(self.inv)) + + +class msg_getdata(object): + command = "getdata" + + def __init__(self): + self.inv = [] + + def deserialize(self, f): + self.inv = deser_vector(f, CInv) + + def serialize(self): + return ser_vector(self.inv) + + def __repr__(self): + return "msg_getdata(inv=%s)" % (repr(self.inv)) + + +class msg_getblocks(object): + command = "getblocks" + + def __init__(self): + self.locator = CBlockLocator() + self.hashstop = 0L + + def deserialize(self, f): + self.locator = CBlockLocator() + self.locator.deserialize(f) + self.hashstop = deser_uint256(f) + + def serialize(self): + r = "" + r += self.locator.serialize() + r += ser_uint256(self.hashstop) + return r + + def __repr__(self): + return "msg_getblocks(locator=%s hashstop=%064x)" \ + % (repr(self.locator), self.hashstop) + + +class msg_tx(object): + command = "tx" + + def __init__(self, tx=CTransaction()): + self.tx = tx + + def deserialize(self, f): + self.tx.deserialize(f) + + def serialize(self): + return self.tx.serialize() + + def __repr__(self): + return "msg_tx(tx=%s)" % (repr(self.tx)) + + +class msg_block(object): + command = "block" + + def __init__(self, block=None): + if block is None: + self.block = CBlock() + else: + self.block = block + + def deserialize(self, f): + self.block.deserialize(f) + + def serialize(self): + return self.block.serialize() + + def __repr__(self): + return "msg_block(block=%s)" % (repr(self.block)) + + +class msg_getaddr(object): + command = "getaddr" + + def __init__(self): + pass + + def deserialize(self, f): + pass + + def serialize(self): + return "" + + def __repr__(self): + return "msg_getaddr()" + + +class msg_ping_prebip31(object): + command = "ping" + + def __init__(self): + pass + + def deserialize(self, f): + pass + + def serialize(self): + return "" + + def __repr__(self): + return "msg_ping() (pre-bip31)" + + +class msg_ping(object): + command = "ping" + + def __init__(self, nonce=0L): + self.nonce = nonce + + def deserialize(self, f): + self.nonce = struct.unpack("<Q", f.read(8))[0] + + def serialize(self): + r = "" + r += struct.pack("<Q", self.nonce) + return r + + def __repr__(self): + return "msg_ping(nonce=%08x)" % self.nonce + + +class msg_pong(object): + command = "pong" + + def __init__(self, nonce=0L): + self.nonce = nonce + + def deserialize(self, f): + self.nonce = struct.unpack("<Q", f.read(8))[0] + + def serialize(self): + r = "" + r += struct.pack("<Q", self.nonce) + return r + + def __repr__(self): + return "msg_pong(nonce=%08x)" % self.nonce + + +class msg_mempool(object): + command = "mempool" + + def __init__(self): + pass + + def deserialize(self, f): + pass + + def serialize(self): + return "" + + def __repr__(self): + return "msg_mempool()" + + +# getheaders message has +# number of entries +# vector of hashes +# hash_stop (hash of last desired block header, 0 to get as many as possible) +class msg_getheaders(object): + command = "getheaders" + + def __init__(self): + self.locator = CBlockLocator() + self.hashstop = 0L + + def deserialize(self, f): + self.locator = CBlockLocator() + self.locator.deserialize(f) + self.hashstop = deser_uint256(f) + + def serialize(self): + r = "" + r += self.locator.serialize() + r += ser_uint256(self.hashstop) + return r + + def __repr__(self): + return "msg_getheaders(locator=%s, stop=%064x)" \ + % (repr(self.locator), self.hashstop) + + +# headers message has +# <count> <vector of block headers> +class msg_headers(object): + command = "headers" + + def __init__(self): + self.headers = [] + + def deserialize(self, f): + # comment in bitcoind indicates these should be deserialized as blocks + blocks = deser_vector(f, CBlock) + for x in blocks: + self.headers.append(CBlockHeader(x)) + + def serialize(self): + blocks = [CBlock(x) for x in self.headers] + return ser_vector(blocks) + + def __repr__(self): + return "msg_headers(headers=%s)" % repr(self.headers) + + +class msg_reject(object): + command = "reject" + + def __init__(self): + self.message = "" + self.code = "" + self.reason = "" + self.data = 0L + + def deserialize(self, f): + self.message = deser_string(f) + self.code = struct.unpack("<B", f.read(1))[0] + self.reason = deser_string(f) + if (self.message == "block" or self.message == "tx"): + self.data = deser_uint256(f) + + def serialize(self): + r = ser_string(self.message) + r += struct.pack("<B", self.code) + r += ser_string(self.reason) + if (self.message == "block" or self.message == "tx"): + r += ser_uint256(self.data) + return r + + def __repr__(self): + return "msg_reject: %s %d %s [%064x]" \ + % (self.message, self.code, self.reason, self.data) + + +# This is what a callback should look like for NodeConn +# Reimplement the on_* functions to provide handling for events +class NodeConnCB(object): + def __init__(self): + self.verack_received = False + + # Derived classes should call this function once to set the message map + # which associates the derived classes' functions to incoming messages + def create_callback_map(self): + self.cbmap = { + "version": self.on_version, + "verack": self.on_verack, + "addr": self.on_addr, + "alert": self.on_alert, + "inv": self.on_inv, + "getdata": self.on_getdata, + "getblocks": self.on_getblocks, + "tx": self.on_tx, + "block": self.on_block, + "getaddr": self.on_getaddr, + "ping": self.on_ping, + "pong": self.on_pong, + "headers": self.on_headers, + "getheaders": self.on_getheaders, + "reject": self.on_reject, + "mempool": self.on_mempool + } + + def deliver(self, conn, message): + with mininode_lock: + try: + self.cbmap[message.command](conn, message) + except: + print "ERROR delivering %s (%s)" % (repr(message), + sys.exc_info()[0]) + + def on_version(self, conn, message): + if message.nVersion >= 209: + conn.send_message(msg_verack()) + conn.ver_send = min(MY_VERSION, message.nVersion) + if message.nVersion < 209: + conn.ver_recv = conn.ver_send + + def on_verack(self, conn, message): + conn.ver_recv = conn.ver_send + self.verack_received = True + + def on_inv(self, conn, message): + want = msg_getdata() + for i in message.inv: + if i.type != 0: + want.inv.append(i) + if len(want.inv): + conn.send_message(want) + + def on_addr(self, conn, message): pass + def on_alert(self, conn, message): pass + def on_getdata(self, conn, message): pass + def on_getblocks(self, conn, message): pass + def on_tx(self, conn, message): pass + def on_block(self, conn, message): pass + def on_getaddr(self, conn, message): pass + def on_headers(self, conn, message): pass + def on_getheaders(self, conn, message): pass + def on_ping(self, conn, message): + if conn.ver_send > BIP0031_VERSION: + conn.send_message(msg_pong(message.nonce)) + def on_reject(self, conn, message): pass + def on_close(self, conn): pass + def on_mempool(self, conn): pass + def on_pong(self, conn, message): pass + + +# The actual NodeConn class +# This class provides an interface for a p2p connection to a specified node +class NodeConn(asyncore.dispatcher): + messagemap = { + "version": msg_version, + "verack": msg_verack, + "addr": msg_addr, + "alert": msg_alert, + "inv": msg_inv, + "getdata": msg_getdata, + "getblocks": msg_getblocks, + "tx": msg_tx, + "block": msg_block, + "getaddr": msg_getaddr, + "ping": msg_ping, + "pong": msg_pong, + "headers": msg_headers, + "getheaders": msg_getheaders, + "reject": msg_reject, + "mempool": msg_mempool + } + MAGIC_BYTES = { + "mainnet": "\xf9\xbe\xb4\xd9", # mainnet + "testnet3": "\x0b\x11\x09\x07", # testnet3 + "regtest": "\xfa\xbf\xb5\xda" # regtest + } + + def __init__(self, dstaddr, dstport, rpc, callback, net="regtest"): + asyncore.dispatcher.__init__(self, map=mininode_socket_map) + self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport)) + self.dstaddr = dstaddr + self.dstport = dstport + self.create_socket(socket.AF_INET, socket.SOCK_STREAM) + self.sendbuf = "" + self.recvbuf = "" + self.ver_send = 209 + self.ver_recv = 209 + self.last_sent = 0 + self.state = "connecting" + self.network = net + self.cb = callback + self.disconnect = False + + # stuff version msg into sendbuf + vt = msg_version() + vt.addrTo.ip = self.dstaddr + vt.addrTo.port = self.dstport + vt.addrFrom.ip = "0.0.0.0" + vt.addrFrom.port = 0 + self.send_message(vt, True) + print 'MiniNode: Connecting to Bitcoin Node IP # ' + dstaddr + ':' \ + + str(dstport) + + try: + self.connect((dstaddr, dstport)) + except: + self.handle_close() + self.rpc = rpc + + def show_debug_msg(self, msg): + self.log.debug(msg) + + def handle_connect(self): + self.show_debug_msg("MiniNode: Connected & Listening: \n") + self.state = "connected" + + def handle_close(self): + self.show_debug_msg("MiniNode: Closing Connection to %s:%d... " + % (self.dstaddr, self.dstport)) + self.state = "closed" + self.recvbuf = "" + self.sendbuf = "" + try: + self.close() + except: + pass + self.cb.on_close(self) + + def handle_read(self): + try: + t = self.recv(8192) + if len(t) > 0: + self.recvbuf += t + self.got_data() + except: + pass + + def readable(self): + return True + + def writable(self): + with mininode_lock: + length = len(self.sendbuf) + return (length > 0) + + def handle_write(self): + with mininode_lock: + try: + sent = self.send(self.sendbuf) + except: + self.handle_close() + return + self.sendbuf = self.sendbuf[sent:] + + def got_data(self): + while True: + if len(self.recvbuf) < 4: + return + if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]: + raise ValueError("got garbage %s" % repr(self.recvbuf)) + if self.ver_recv < 209: + if len(self.recvbuf) < 4 + 12 + 4: + return + command = self.recvbuf[4:4+12].split("\x00", 1)[0] + msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0] + checksum = None + if len(self.recvbuf) < 4 + 12 + 4 + msglen: + return + msg = self.recvbuf[4+12+4:4+12+4+msglen] + self.recvbuf = self.recvbuf[4+12+4+msglen:] + else: + if len(self.recvbuf) < 4 + 12 + 4 + 4: + return + command = self.recvbuf[4:4+12].split("\x00", 1)[0] + msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0] + checksum = self.recvbuf[4+12+4:4+12+4+4] + if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen: + return + msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen] + th = sha256(msg) + h = sha256(th) + if checksum != h[:4]: + raise ValueError("got bad checksum " + repr(self.recvbuf)) + self.recvbuf = self.recvbuf[4+12+4+4+msglen:] + if command in self.messagemap: + f = cStringIO.StringIO(msg) + t = self.messagemap[command]() + t.deserialize(f) + self.got_message(t) + else: + self.show_debug_msg("Unknown command: '" + command + "' " + + repr(msg)) + + def send_message(self, message, pushbuf=False): + if self.state != "connected" and not pushbuf: + return + self.show_debug_msg("Send %s" % repr(message)) + command = message.command + data = message.serialize() + tmsg = self.MAGIC_BYTES[self.network] + tmsg += command + tmsg += "\x00" * (12 - len(command)) + tmsg += struct.pack("<I", len(data)) + if self.ver_send >= 209: + th = sha256(data) + h = sha256(th) + tmsg += h[:4] + tmsg += data + with mininode_lock: + self.sendbuf += tmsg + self.last_sent = time.time() + + def got_message(self, message): + if message.command == "version": + if message.nVersion <= BIP0031_VERSION: + self.messagemap['ping'] = msg_ping_prebip31 + if self.last_sent + 30 * 60 < time.time(): + self.send_message(self.messagemap['ping']()) + self.show_debug_msg("Recv %s" % repr(message)) + self.cb.deliver(self, message) + + def disconnect_node(self): + self.disconnect = True + + +class NetworkThread(Thread): + def run(self): + while mininode_socket_map: + # We check for whether to disconnect outside of the asyncore + # loop to workaround the behavior of asyncore when using + # select + disconnected = [] + for fd, obj in mininode_socket_map.items(): + if obj.disconnect: + disconnected.append(obj) + [ obj.handle_close() for obj in disconnected ] + asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1) + + +# An exception we can raise if we detect a potential disconnect +# (p2p or rpc) before the test is complete +class EarlyDisconnectError(Exception): + def __init__(self, value): + self.value = value + + def __str__(self): + return repr(self.value) diff --git a/qa/rpc-tests/test_framework/netutil.py b/qa/rpc-tests/test_framework/netutil.py new file mode 100644 index 0000000000..b30a88a4f7 --- /dev/null +++ b/qa/rpc-tests/test_framework/netutil.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python2 +# Copyright (c) 2014 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +# Linux network utilities +import sys +import socket +import fcntl +import struct +import array +import os +import binascii + +# Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal +STATE_ESTABLISHED = '01' +STATE_SYN_SENT = '02' +STATE_SYN_RECV = '03' +STATE_FIN_WAIT1 = '04' +STATE_FIN_WAIT2 = '05' +STATE_TIME_WAIT = '06' +STATE_CLOSE = '07' +STATE_CLOSE_WAIT = '08' +STATE_LAST_ACK = '09' +STATE_LISTEN = '0A' +STATE_CLOSING = '0B' + +def get_socket_inodes(pid): + ''' + Get list of socket inodes for process pid. + ''' + base = '/proc/%i/fd' % pid + inodes = [] + for item in os.listdir(base): + target = os.readlink(os.path.join(base, item)) + if target.startswith('socket:'): + inodes.append(int(target[8:-1])) + return inodes + +def _remove_empty(array): + return [x for x in array if x !=''] + +def _convert_ip_port(array): + host,port = array.split(':') + # convert host from mangled-per-four-bytes form as used by kernel + host = binascii.unhexlify(host) + host_out = '' + for x in range(0, len(host)/4): + (val,) = struct.unpack('=I', host[x*4:(x+1)*4]) + host_out += '%08x' % val + + return host_out,int(port,16) + +def netstat(typ='tcp'): + ''' + Function to return a list with status of tcp connections at linux systems + To get pid of all network process running on system, you must run this script + as superuser + ''' + with open('/proc/net/'+typ,'r') as f: + content = f.readlines() + content.pop(0) + result = [] + for line in content: + line_array = _remove_empty(line.split(' ')) # Split lines and remove empty spaces. + tcp_id = line_array[0] + l_addr = _convert_ip_port(line_array[1]) + r_addr = _convert_ip_port(line_array[2]) + state = line_array[3] + inode = int(line_array[9]) # Need the inode to match with process pid. + nline = [tcp_id, l_addr, r_addr, state, inode] + result.append(nline) + return result + +def get_bind_addrs(pid): + ''' + Get bind addresses as (host,port) tuples for process pid. + ''' + inodes = get_socket_inodes(pid) + bind_addrs = [] + for conn in netstat('tcp') + netstat('tcp6'): + if conn[3] == STATE_LISTEN and conn[4] in inodes: + bind_addrs.append(conn[1]) + return bind_addrs + +# from: http://code.activestate.com/recipes/439093/ +def all_interfaces(): + ''' + Return all interfaces that are up + ''' + is_64bits = sys.maxsize > 2**32 + struct_size = 40 if is_64bits else 32 + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + max_possible = 8 # initial value + while True: + bytes = max_possible * struct_size + names = array.array('B', '\0' * bytes) + outbytes = struct.unpack('iL', fcntl.ioctl( + s.fileno(), + 0x8912, # SIOCGIFCONF + struct.pack('iL', bytes, names.buffer_info()[0]) + ))[0] + if outbytes == bytes: + max_possible *= 2 + else: + break + namestr = names.tostring() + return [(namestr[i:i+16].split('\0', 1)[0], + socket.inet_ntoa(namestr[i+20:i+24])) + for i in range(0, outbytes, struct_size)] + +def addr_to_hex(addr): + ''' + Convert string IPv4 or IPv6 address to binary address as returned by + get_bind_addrs. + Very naive implementation that certainly doesn't work for all IPv6 variants. + ''' + if '.' in addr: # IPv4 + addr = [int(x) for x in addr.split('.')] + elif ':' in addr: # IPv6 + sub = [[], []] # prefix, suffix + x = 0 + addr = addr.split(':') + for i,comp in enumerate(addr): + if comp == '': + if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end + continue + x += 1 # :: skips to suffix + assert(x < 2) + else: # two bytes per component + val = int(comp, 16) + sub[x].append(val >> 8) + sub[x].append(val & 0xff) + nullbytes = 16 - len(sub[0]) - len(sub[1]) + assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0)) + addr = sub[0] + ([0] * nullbytes) + sub[1] + else: + raise ValueError('Could not parse address %s' % addr) + return binascii.hexlify(bytearray(addr)) diff --git a/qa/rpc-tests/test_framework/python-bitcoinrpc/bitcoinrpc/.gitignore b/qa/rpc-tests/test_framework/python-bitcoinrpc/bitcoinrpc/.gitignore new file mode 100644 index 0000000000..0d20b6487c --- /dev/null +++ b/qa/rpc-tests/test_framework/python-bitcoinrpc/bitcoinrpc/.gitignore @@ -0,0 +1 @@ +*.pyc diff --git a/qa/rpc-tests/test_framework/python-bitcoinrpc/bitcoinrpc/__init__.py b/qa/rpc-tests/test_framework/python-bitcoinrpc/bitcoinrpc/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/qa/rpc-tests/test_framework/python-bitcoinrpc/bitcoinrpc/__init__.py diff --git a/qa/rpc-tests/test_framework/python-bitcoinrpc/bitcoinrpc/authproxy.py b/qa/rpc-tests/test_framework/python-bitcoinrpc/bitcoinrpc/authproxy.py new file mode 100644 index 0000000000..bc7d655fdf --- /dev/null +++ b/qa/rpc-tests/test_framework/python-bitcoinrpc/bitcoinrpc/authproxy.py @@ -0,0 +1,156 @@ + +""" + Copyright 2011 Jeff Garzik + + AuthServiceProxy has the following improvements over python-jsonrpc's + ServiceProxy class: + + - HTTP connections persist for the life of the AuthServiceProxy object + (if server supports HTTP/1.1) + - sends protocol 'version', per JSON-RPC 1.1 + - sends proper, incrementing 'id' + - sends Basic HTTP authentication headers + - parses all JSON numbers that look like floats as Decimal + - uses standard Python json lib + + Previous copyright, from python-jsonrpc/jsonrpc/proxy.py: + + Copyright (c) 2007 Jan-Klaas Kollhof + + This file is part of jsonrpc. + + jsonrpc is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + This software is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with this software; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +try: + import http.client as httplib +except ImportError: + import httplib +import base64 +import decimal +import json +import logging +try: + import urllib.parse as urlparse +except ImportError: + import urlparse + +USER_AGENT = "AuthServiceProxy/0.1" + +HTTP_TIMEOUT = 30 + +log = logging.getLogger("BitcoinRPC") + +class JSONRPCException(Exception): + def __init__(self, rpc_error): + Exception.__init__(self) + self.error = rpc_error + + +def EncodeDecimal(o): + if isinstance(o, decimal.Decimal): + return round(o, 8) + raise TypeError(repr(o) + " is not JSON serializable") + +class AuthServiceProxy(object): + __id_count = 0 + + def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None): + self.__service_url = service_url + self.__service_name = service_name + self.__url = urlparse.urlparse(service_url) + if self.__url.port is None: + port = 80 + else: + port = self.__url.port + (user, passwd) = (self.__url.username, self.__url.password) + try: + user = user.encode('utf8') + except AttributeError: + pass + try: + passwd = passwd.encode('utf8') + except AttributeError: + pass + authpair = user + b':' + passwd + self.__auth_header = b'Basic ' + base64.b64encode(authpair) + + if connection: + # Callables re-use the connection of the original proxy + self.__conn = connection + elif self.__url.scheme == 'https': + self.__conn = httplib.HTTPSConnection(self.__url.hostname, port, + None, None, False, + timeout) + else: + self.__conn = httplib.HTTPConnection(self.__url.hostname, port, + False, timeout) + + def __getattr__(self, name): + if name.startswith('__') and name.endswith('__'): + # Python internal stuff + raise AttributeError + if self.__service_name is not None: + name = "%s.%s" % (self.__service_name, name) + return AuthServiceProxy(self.__service_url, name, connection=self.__conn) + + def __call__(self, *args): + AuthServiceProxy.__id_count += 1 + + log.debug("-%s-> %s %s"%(AuthServiceProxy.__id_count, self.__service_name, + json.dumps(args, default=EncodeDecimal))) + postdata = json.dumps({'version': '1.1', + 'method': self.__service_name, + 'params': args, + 'id': AuthServiceProxy.__id_count}, default=EncodeDecimal) + self.__conn.request('POST', self.__url.path, postdata, + {'Host': self.__url.hostname, + 'User-Agent': USER_AGENT, + 'Authorization': self.__auth_header, + 'Content-type': 'application/json'}) + + response = self._get_response() + if response['error'] is not None: + raise JSONRPCException(response['error']) + elif 'result' not in response: + raise JSONRPCException({ + 'code': -343, 'message': 'missing JSON-RPC result'}) + else: + return response['result'] + + def _batch(self, rpc_call_list): + postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal) + log.debug("--> "+postdata) + self.__conn.request('POST', self.__url.path, postdata, + {'Host': self.__url.hostname, + 'User-Agent': USER_AGENT, + 'Authorization': self.__auth_header, + 'Content-type': 'application/json'}) + + return self._get_response() + + def _get_response(self): + http_response = self.__conn.getresponse() + if http_response is None: + raise JSONRPCException({ + 'code': -342, 'message': 'missing HTTP response from server'}) + + responsedata = http_response.read().decode('utf8') + response = json.loads(responsedata, parse_float=decimal.Decimal) + if "error" in response and response["error"] is None: + log.debug("<-%s- %s"%(response["id"], json.dumps(response["result"], default=EncodeDecimal))) + else: + log.debug("<-- "+responsedata) + return response diff --git a/qa/rpc-tests/test_framework/python-bitcoinrpc/setup.py b/qa/rpc-tests/test_framework/python-bitcoinrpc/setup.py new file mode 100644 index 0000000000..43cdb1c038 --- /dev/null +++ b/qa/rpc-tests/test_framework/python-bitcoinrpc/setup.py @@ -0,0 +1,15 @@ +#!/usr/bin/env python2 + +from distutils.core import setup + +setup(name='python-bitcoinrpc', + version='0.1', + description='Enhanced version of python-jsonrpc for use with Bitcoin', + long_description=open('README').read(), + author='Jeff Garzik', + author_email='<jgarzik@exmulti.com>', + maintainer='Jeff Garzik', + maintainer_email='<jgarzik@exmulti.com>', + url='http://www.github.com/jgarzik/python-bitcoinrpc', + packages=['bitcoinrpc'], + classifiers=['License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)', 'Operating System :: OS Independent']) diff --git a/qa/rpc-tests/test_framework/script.py b/qa/rpc-tests/test_framework/script.py new file mode 100644 index 0000000000..e37ab5d45a --- /dev/null +++ b/qa/rpc-tests/test_framework/script.py @@ -0,0 +1,896 @@ +# +# script.py +# +# This file is modified from python-bitcoinlib. +# +# Distributed under the MIT/X11 software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +# + +"""Scripts + +Functionality to build scripts, as well as SignatureHash(). +""" + +from __future__ import absolute_import, division, print_function, unicode_literals + +from test_framework.mininode import CTransaction, CTxOut, hash256 + +import sys +bchr = chr +bord = ord +if sys.version > '3': + long = int + bchr = lambda x: bytes([x]) + bord = lambda x: x + +import copy +import struct + +import test_framework.bignum + +MAX_SCRIPT_SIZE = 10000 +MAX_SCRIPT_ELEMENT_SIZE = 520 +MAX_SCRIPT_OPCODES = 201 + +OPCODE_NAMES = {} + +_opcode_instances = [] +class CScriptOp(int): + """A single script opcode""" + __slots__ = [] + + @staticmethod + def encode_op_pushdata(d): + """Encode a PUSHDATA op, returning bytes""" + if len(d) < 0x4c: + return b'' + bchr(len(d)) + d # OP_PUSHDATA + elif len(d) <= 0xff: + return b'\x4c' + bchr(len(d)) + d # OP_PUSHDATA1 + elif len(d) <= 0xffff: + return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2 + elif len(d) <= 0xffffffff: + return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4 + else: + raise ValueError("Data too long to encode in a PUSHDATA op") + + @staticmethod + def encode_op_n(n): + """Encode a small integer op, returning an opcode""" + if not (0 <= n <= 16): + raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n) + + if n == 0: + return OP_0 + else: + return CScriptOp(OP_1 + n-1) + + def decode_op_n(self): + """Decode a small integer opcode, returning an integer""" + if self == OP_0: + return 0 + + if not (self == OP_0 or OP_1 <= self <= OP_16): + raise ValueError('op %r is not an OP_N' % self) + + return int(self - OP_1+1) + + def is_small_int(self): + """Return true if the op pushes a small integer to the stack""" + if 0x51 <= self <= 0x60 or self == 0: + return True + else: + return False + + def __str__(self): + return repr(self) + + def __repr__(self): + if self in OPCODE_NAMES: + return OPCODE_NAMES[self] + else: + return 'CScriptOp(0x%x)' % self + + def __new__(cls, n): + try: + return _opcode_instances[n] + except IndexError: + assert len(_opcode_instances) == n + _opcode_instances.append(super(CScriptOp, cls).__new__(cls, n)) + return _opcode_instances[n] + +# Populate opcode instance table +for n in range(0xff+1): + CScriptOp(n) + + +# push value +OP_0 = CScriptOp(0x00) +OP_FALSE = OP_0 +OP_PUSHDATA1 = CScriptOp(0x4c) +OP_PUSHDATA2 = CScriptOp(0x4d) +OP_PUSHDATA4 = CScriptOp(0x4e) +OP_1NEGATE = CScriptOp(0x4f) +OP_RESERVED = CScriptOp(0x50) +OP_1 = CScriptOp(0x51) +OP_TRUE=OP_1 +OP_2 = CScriptOp(0x52) +OP_3 = CScriptOp(0x53) +OP_4 = CScriptOp(0x54) +OP_5 = CScriptOp(0x55) +OP_6 = CScriptOp(0x56) +OP_7 = CScriptOp(0x57) +OP_8 = CScriptOp(0x58) +OP_9 = CScriptOp(0x59) +OP_10 = CScriptOp(0x5a) +OP_11 = CScriptOp(0x5b) +OP_12 = CScriptOp(0x5c) +OP_13 = CScriptOp(0x5d) +OP_14 = CScriptOp(0x5e) +OP_15 = CScriptOp(0x5f) +OP_16 = CScriptOp(0x60) + +# control +OP_NOP = CScriptOp(0x61) +OP_VER = CScriptOp(0x62) +OP_IF = CScriptOp(0x63) +OP_NOTIF = CScriptOp(0x64) +OP_VERIF = CScriptOp(0x65) +OP_VERNOTIF = CScriptOp(0x66) +OP_ELSE = CScriptOp(0x67) +OP_ENDIF = CScriptOp(0x68) +OP_VERIFY = CScriptOp(0x69) +OP_RETURN = CScriptOp(0x6a) + +# stack ops +OP_TOALTSTACK = CScriptOp(0x6b) +OP_FROMALTSTACK = CScriptOp(0x6c) +OP_2DROP = CScriptOp(0x6d) +OP_2DUP = CScriptOp(0x6e) +OP_3DUP = CScriptOp(0x6f) +OP_2OVER = CScriptOp(0x70) +OP_2ROT = CScriptOp(0x71) +OP_2SWAP = CScriptOp(0x72) +OP_IFDUP = CScriptOp(0x73) +OP_DEPTH = CScriptOp(0x74) +OP_DROP = CScriptOp(0x75) +OP_DUP = CScriptOp(0x76) +OP_NIP = CScriptOp(0x77) +OP_OVER = CScriptOp(0x78) +OP_PICK = CScriptOp(0x79) +OP_ROLL = CScriptOp(0x7a) +OP_ROT = CScriptOp(0x7b) +OP_SWAP = CScriptOp(0x7c) +OP_TUCK = CScriptOp(0x7d) + +# splice ops +OP_CAT = CScriptOp(0x7e) +OP_SUBSTR = CScriptOp(0x7f) +OP_LEFT = CScriptOp(0x80) +OP_RIGHT = CScriptOp(0x81) +OP_SIZE = CScriptOp(0x82) + +# bit logic +OP_INVERT = CScriptOp(0x83) +OP_AND = CScriptOp(0x84) +OP_OR = CScriptOp(0x85) +OP_XOR = CScriptOp(0x86) +OP_EQUAL = CScriptOp(0x87) +OP_EQUALVERIFY = CScriptOp(0x88) +OP_RESERVED1 = CScriptOp(0x89) +OP_RESERVED2 = CScriptOp(0x8a) + +# numeric +OP_1ADD = CScriptOp(0x8b) +OP_1SUB = CScriptOp(0x8c) +OP_2MUL = CScriptOp(0x8d) +OP_2DIV = CScriptOp(0x8e) +OP_NEGATE = CScriptOp(0x8f) +OP_ABS = CScriptOp(0x90) +OP_NOT = CScriptOp(0x91) +OP_0NOTEQUAL = CScriptOp(0x92) + +OP_ADD = CScriptOp(0x93) +OP_SUB = CScriptOp(0x94) +OP_MUL = CScriptOp(0x95) +OP_DIV = CScriptOp(0x96) +OP_MOD = CScriptOp(0x97) +OP_LSHIFT = CScriptOp(0x98) +OP_RSHIFT = CScriptOp(0x99) + +OP_BOOLAND = CScriptOp(0x9a) +OP_BOOLOR = CScriptOp(0x9b) +OP_NUMEQUAL = CScriptOp(0x9c) +OP_NUMEQUALVERIFY = CScriptOp(0x9d) +OP_NUMNOTEQUAL = CScriptOp(0x9e) +OP_LESSTHAN = CScriptOp(0x9f) +OP_GREATERTHAN = CScriptOp(0xa0) +OP_LESSTHANOREQUAL = CScriptOp(0xa1) +OP_GREATERTHANOREQUAL = CScriptOp(0xa2) +OP_MIN = CScriptOp(0xa3) +OP_MAX = CScriptOp(0xa4) + +OP_WITHIN = CScriptOp(0xa5) + +# crypto +OP_RIPEMD160 = CScriptOp(0xa6) +OP_SHA1 = CScriptOp(0xa7) +OP_SHA256 = CScriptOp(0xa8) +OP_HASH160 = CScriptOp(0xa9) +OP_HASH256 = CScriptOp(0xaa) +OP_CODESEPARATOR = CScriptOp(0xab) +OP_CHECKSIG = CScriptOp(0xac) +OP_CHECKSIGVERIFY = CScriptOp(0xad) +OP_CHECKMULTISIG = CScriptOp(0xae) +OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf) + +# expansion +OP_NOP1 = CScriptOp(0xb0) +OP_NOP2 = CScriptOp(0xb1) +OP_NOP3 = CScriptOp(0xb2) +OP_NOP4 = CScriptOp(0xb3) +OP_NOP5 = CScriptOp(0xb4) +OP_NOP6 = CScriptOp(0xb5) +OP_NOP7 = CScriptOp(0xb6) +OP_NOP8 = CScriptOp(0xb7) +OP_NOP9 = CScriptOp(0xb8) +OP_NOP10 = CScriptOp(0xb9) + +# template matching params +OP_SMALLINTEGER = CScriptOp(0xfa) +OP_PUBKEYS = CScriptOp(0xfb) +OP_PUBKEYHASH = CScriptOp(0xfd) +OP_PUBKEY = CScriptOp(0xfe) + +OP_INVALIDOPCODE = CScriptOp(0xff) + +VALID_OPCODES = { + OP_1NEGATE, + OP_RESERVED, + OP_1, + OP_2, + OP_3, + OP_4, + OP_5, + OP_6, + OP_7, + OP_8, + OP_9, + OP_10, + OP_11, + OP_12, + OP_13, + OP_14, + OP_15, + OP_16, + + OP_NOP, + OP_VER, + OP_IF, + OP_NOTIF, + OP_VERIF, + OP_VERNOTIF, + OP_ELSE, + OP_ENDIF, + OP_VERIFY, + OP_RETURN, + + OP_TOALTSTACK, + OP_FROMALTSTACK, + OP_2DROP, + OP_2DUP, + OP_3DUP, + OP_2OVER, + OP_2ROT, + OP_2SWAP, + OP_IFDUP, + OP_DEPTH, + OP_DROP, + OP_DUP, + OP_NIP, + OP_OVER, + OP_PICK, + OP_ROLL, + OP_ROT, + OP_SWAP, + OP_TUCK, + + OP_CAT, + OP_SUBSTR, + OP_LEFT, + OP_RIGHT, + OP_SIZE, + + OP_INVERT, + OP_AND, + OP_OR, + OP_XOR, + OP_EQUAL, + OP_EQUALVERIFY, + OP_RESERVED1, + OP_RESERVED2, + + OP_1ADD, + OP_1SUB, + OP_2MUL, + OP_2DIV, + OP_NEGATE, + OP_ABS, + OP_NOT, + OP_0NOTEQUAL, + + OP_ADD, + OP_SUB, + OP_MUL, + OP_DIV, + OP_MOD, + OP_LSHIFT, + OP_RSHIFT, + + OP_BOOLAND, + OP_BOOLOR, + OP_NUMEQUAL, + OP_NUMEQUALVERIFY, + OP_NUMNOTEQUAL, + OP_LESSTHAN, + OP_GREATERTHAN, + OP_LESSTHANOREQUAL, + OP_GREATERTHANOREQUAL, + OP_MIN, + OP_MAX, + + OP_WITHIN, + + OP_RIPEMD160, + OP_SHA1, + OP_SHA256, + OP_HASH160, + OP_HASH256, + OP_CODESEPARATOR, + OP_CHECKSIG, + OP_CHECKSIGVERIFY, + OP_CHECKMULTISIG, + OP_CHECKMULTISIGVERIFY, + + OP_NOP1, + OP_NOP2, + OP_NOP3, + OP_NOP4, + OP_NOP5, + OP_NOP6, + OP_NOP7, + OP_NOP8, + OP_NOP9, + OP_NOP10, + + OP_SMALLINTEGER, + OP_PUBKEYS, + OP_PUBKEYHASH, + OP_PUBKEY, +} + +OPCODE_NAMES.update({ + OP_0 : 'OP_0', + OP_PUSHDATA1 : 'OP_PUSHDATA1', + OP_PUSHDATA2 : 'OP_PUSHDATA2', + OP_PUSHDATA4 : 'OP_PUSHDATA4', + OP_1NEGATE : 'OP_1NEGATE', + OP_RESERVED : 'OP_RESERVED', + OP_1 : 'OP_1', + OP_2 : 'OP_2', + OP_3 : 'OP_3', + OP_4 : 'OP_4', + OP_5 : 'OP_5', + OP_6 : 'OP_6', + OP_7 : 'OP_7', + OP_8 : 'OP_8', + OP_9 : 'OP_9', + OP_10 : 'OP_10', + OP_11 : 'OP_11', + OP_12 : 'OP_12', + OP_13 : 'OP_13', + OP_14 : 'OP_14', + OP_15 : 'OP_15', + OP_16 : 'OP_16', + OP_NOP : 'OP_NOP', + OP_VER : 'OP_VER', + OP_IF : 'OP_IF', + OP_NOTIF : 'OP_NOTIF', + OP_VERIF : 'OP_VERIF', + OP_VERNOTIF : 'OP_VERNOTIF', + OP_ELSE : 'OP_ELSE', + OP_ENDIF : 'OP_ENDIF', + OP_VERIFY : 'OP_VERIFY', + OP_RETURN : 'OP_RETURN', + OP_TOALTSTACK : 'OP_TOALTSTACK', + OP_FROMALTSTACK : 'OP_FROMALTSTACK', + OP_2DROP : 'OP_2DROP', + OP_2DUP : 'OP_2DUP', + OP_3DUP : 'OP_3DUP', + OP_2OVER : 'OP_2OVER', + OP_2ROT : 'OP_2ROT', + OP_2SWAP : 'OP_2SWAP', + OP_IFDUP : 'OP_IFDUP', + OP_DEPTH : 'OP_DEPTH', + OP_DROP : 'OP_DROP', + OP_DUP : 'OP_DUP', + OP_NIP : 'OP_NIP', + OP_OVER : 'OP_OVER', + OP_PICK : 'OP_PICK', + OP_ROLL : 'OP_ROLL', + OP_ROT : 'OP_ROT', + OP_SWAP : 'OP_SWAP', + OP_TUCK : 'OP_TUCK', + OP_CAT : 'OP_CAT', + OP_SUBSTR : 'OP_SUBSTR', + OP_LEFT : 'OP_LEFT', + OP_RIGHT : 'OP_RIGHT', + OP_SIZE : 'OP_SIZE', + OP_INVERT : 'OP_INVERT', + OP_AND : 'OP_AND', + OP_OR : 'OP_OR', + OP_XOR : 'OP_XOR', + OP_EQUAL : 'OP_EQUAL', + OP_EQUALVERIFY : 'OP_EQUALVERIFY', + OP_RESERVED1 : 'OP_RESERVED1', + OP_RESERVED2 : 'OP_RESERVED2', + OP_1ADD : 'OP_1ADD', + OP_1SUB : 'OP_1SUB', + OP_2MUL : 'OP_2MUL', + OP_2DIV : 'OP_2DIV', + OP_NEGATE : 'OP_NEGATE', + OP_ABS : 'OP_ABS', + OP_NOT : 'OP_NOT', + OP_0NOTEQUAL : 'OP_0NOTEQUAL', + OP_ADD : 'OP_ADD', + OP_SUB : 'OP_SUB', + OP_MUL : 'OP_MUL', + OP_DIV : 'OP_DIV', + OP_MOD : 'OP_MOD', + OP_LSHIFT : 'OP_LSHIFT', + OP_RSHIFT : 'OP_RSHIFT', + OP_BOOLAND : 'OP_BOOLAND', + OP_BOOLOR : 'OP_BOOLOR', + OP_NUMEQUAL : 'OP_NUMEQUAL', + OP_NUMEQUALVERIFY : 'OP_NUMEQUALVERIFY', + OP_NUMNOTEQUAL : 'OP_NUMNOTEQUAL', + OP_LESSTHAN : 'OP_LESSTHAN', + OP_GREATERTHAN : 'OP_GREATERTHAN', + OP_LESSTHANOREQUAL : 'OP_LESSTHANOREQUAL', + OP_GREATERTHANOREQUAL : 'OP_GREATERTHANOREQUAL', + OP_MIN : 'OP_MIN', + OP_MAX : 'OP_MAX', + OP_WITHIN : 'OP_WITHIN', + OP_RIPEMD160 : 'OP_RIPEMD160', + OP_SHA1 : 'OP_SHA1', + OP_SHA256 : 'OP_SHA256', + OP_HASH160 : 'OP_HASH160', + OP_HASH256 : 'OP_HASH256', + OP_CODESEPARATOR : 'OP_CODESEPARATOR', + OP_CHECKSIG : 'OP_CHECKSIG', + OP_CHECKSIGVERIFY : 'OP_CHECKSIGVERIFY', + OP_CHECKMULTISIG : 'OP_CHECKMULTISIG', + OP_CHECKMULTISIGVERIFY : 'OP_CHECKMULTISIGVERIFY', + OP_NOP1 : 'OP_NOP1', + OP_NOP2 : 'OP_NOP2', + OP_NOP3 : 'OP_NOP3', + OP_NOP4 : 'OP_NOP4', + OP_NOP5 : 'OP_NOP5', + OP_NOP6 : 'OP_NOP6', + OP_NOP7 : 'OP_NOP7', + OP_NOP8 : 'OP_NOP8', + OP_NOP9 : 'OP_NOP9', + OP_NOP10 : 'OP_NOP10', + OP_SMALLINTEGER : 'OP_SMALLINTEGER', + OP_PUBKEYS : 'OP_PUBKEYS', + OP_PUBKEYHASH : 'OP_PUBKEYHASH', + OP_PUBKEY : 'OP_PUBKEY', + OP_INVALIDOPCODE : 'OP_INVALIDOPCODE', +}) + +OPCODES_BY_NAME = { + 'OP_0' : OP_0, + 'OP_PUSHDATA1' : OP_PUSHDATA1, + 'OP_PUSHDATA2' : OP_PUSHDATA2, + 'OP_PUSHDATA4' : OP_PUSHDATA4, + 'OP_1NEGATE' : OP_1NEGATE, + 'OP_RESERVED' : OP_RESERVED, + 'OP_1' : OP_1, + 'OP_2' : OP_2, + 'OP_3' : OP_3, + 'OP_4' : OP_4, + 'OP_5' : OP_5, + 'OP_6' : OP_6, + 'OP_7' : OP_7, + 'OP_8' : OP_8, + 'OP_9' : OP_9, + 'OP_10' : OP_10, + 'OP_11' : OP_11, + 'OP_12' : OP_12, + 'OP_13' : OP_13, + 'OP_14' : OP_14, + 'OP_15' : OP_15, + 'OP_16' : OP_16, + 'OP_NOP' : OP_NOP, + 'OP_VER' : OP_VER, + 'OP_IF' : OP_IF, + 'OP_NOTIF' : OP_NOTIF, + 'OP_VERIF' : OP_VERIF, + 'OP_VERNOTIF' : OP_VERNOTIF, + 'OP_ELSE' : OP_ELSE, + 'OP_ENDIF' : OP_ENDIF, + 'OP_VERIFY' : OP_VERIFY, + 'OP_RETURN' : OP_RETURN, + 'OP_TOALTSTACK' : OP_TOALTSTACK, + 'OP_FROMALTSTACK' : OP_FROMALTSTACK, + 'OP_2DROP' : OP_2DROP, + 'OP_2DUP' : OP_2DUP, + 'OP_3DUP' : OP_3DUP, + 'OP_2OVER' : OP_2OVER, + 'OP_2ROT' : OP_2ROT, + 'OP_2SWAP' : OP_2SWAP, + 'OP_IFDUP' : OP_IFDUP, + 'OP_DEPTH' : OP_DEPTH, + 'OP_DROP' : OP_DROP, + 'OP_DUP' : OP_DUP, + 'OP_NIP' : OP_NIP, + 'OP_OVER' : OP_OVER, + 'OP_PICK' : OP_PICK, + 'OP_ROLL' : OP_ROLL, + 'OP_ROT' : OP_ROT, + 'OP_SWAP' : OP_SWAP, + 'OP_TUCK' : OP_TUCK, + 'OP_CAT' : OP_CAT, + 'OP_SUBSTR' : OP_SUBSTR, + 'OP_LEFT' : OP_LEFT, + 'OP_RIGHT' : OP_RIGHT, + 'OP_SIZE' : OP_SIZE, + 'OP_INVERT' : OP_INVERT, + 'OP_AND' : OP_AND, + 'OP_OR' : OP_OR, + 'OP_XOR' : OP_XOR, + 'OP_EQUAL' : OP_EQUAL, + 'OP_EQUALVERIFY' : OP_EQUALVERIFY, + 'OP_RESERVED1' : OP_RESERVED1, + 'OP_RESERVED2' : OP_RESERVED2, + 'OP_1ADD' : OP_1ADD, + 'OP_1SUB' : OP_1SUB, + 'OP_2MUL' : OP_2MUL, + 'OP_2DIV' : OP_2DIV, + 'OP_NEGATE' : OP_NEGATE, + 'OP_ABS' : OP_ABS, + 'OP_NOT' : OP_NOT, + 'OP_0NOTEQUAL' : OP_0NOTEQUAL, + 'OP_ADD' : OP_ADD, + 'OP_SUB' : OP_SUB, + 'OP_MUL' : OP_MUL, + 'OP_DIV' : OP_DIV, + 'OP_MOD' : OP_MOD, + 'OP_LSHIFT' : OP_LSHIFT, + 'OP_RSHIFT' : OP_RSHIFT, + 'OP_BOOLAND' : OP_BOOLAND, + 'OP_BOOLOR' : OP_BOOLOR, + 'OP_NUMEQUAL' : OP_NUMEQUAL, + 'OP_NUMEQUALVERIFY' : OP_NUMEQUALVERIFY, + 'OP_NUMNOTEQUAL' : OP_NUMNOTEQUAL, + 'OP_LESSTHAN' : OP_LESSTHAN, + 'OP_GREATERTHAN' : OP_GREATERTHAN, + 'OP_LESSTHANOREQUAL' : OP_LESSTHANOREQUAL, + 'OP_GREATERTHANOREQUAL' : OP_GREATERTHANOREQUAL, + 'OP_MIN' : OP_MIN, + 'OP_MAX' : OP_MAX, + 'OP_WITHIN' : OP_WITHIN, + 'OP_RIPEMD160' : OP_RIPEMD160, + 'OP_SHA1' : OP_SHA1, + 'OP_SHA256' : OP_SHA256, + 'OP_HASH160' : OP_HASH160, + 'OP_HASH256' : OP_HASH256, + 'OP_CODESEPARATOR' : OP_CODESEPARATOR, + 'OP_CHECKSIG' : OP_CHECKSIG, + 'OP_CHECKSIGVERIFY' : OP_CHECKSIGVERIFY, + 'OP_CHECKMULTISIG' : OP_CHECKMULTISIG, + 'OP_CHECKMULTISIGVERIFY' : OP_CHECKMULTISIGVERIFY, + 'OP_NOP1' : OP_NOP1, + 'OP_NOP2' : OP_NOP2, + 'OP_NOP3' : OP_NOP3, + 'OP_NOP4' : OP_NOP4, + 'OP_NOP5' : OP_NOP5, + 'OP_NOP6' : OP_NOP6, + 'OP_NOP7' : OP_NOP7, + 'OP_NOP8' : OP_NOP8, + 'OP_NOP9' : OP_NOP9, + 'OP_NOP10' : OP_NOP10, + 'OP_SMALLINTEGER' : OP_SMALLINTEGER, + 'OP_PUBKEYS' : OP_PUBKEYS, + 'OP_PUBKEYHASH' : OP_PUBKEYHASH, + 'OP_PUBKEY' : OP_PUBKEY, +} + +class CScriptInvalidError(Exception): + """Base class for CScript exceptions""" + pass + +class CScriptTruncatedPushDataError(CScriptInvalidError): + """Invalid pushdata due to truncation""" + def __init__(self, msg, data): + self.data = data + super(CScriptTruncatedPushDataError, self).__init__(msg) + +# This is used, eg, for blockchain heights in coinbase scripts (bip34) +class CScriptNum(object): + def __init__(self, d=0): + self.value = d + + @staticmethod + def encode(obj): + r = bytearray(0) + if obj.value == 0: + return bytes(r) + neg = obj.value < 0 + absvalue = -obj.value if neg else obj.value + while (absvalue): + r.append(chr(absvalue & 0xff)) + absvalue >>= 8 + if r[-1] & 0x80: + r.append(0x80 if neg else 0) + elif neg: + r[-1] |= 0x80 + return bytes(bchr(len(r)) + r) + + +class CScript(bytes): + """Serialized script + + A bytes subclass, so you can use this directly whenever bytes are accepted. + Note that this means that indexing does *not* work - you'll get an index by + byte rather than opcode. This format was chosen for efficiency so that the + general case would not require creating a lot of little CScriptOP objects. + + iter(script) however does iterate by opcode. + """ + @classmethod + def __coerce_instance(cls, other): + # Coerce other into bytes + if isinstance(other, CScriptOp): + other = bchr(other) + elif isinstance(other, CScriptNum): + if (other.value == 0): + other = bchr(CScriptOp(OP_0)) + else: + other = CScriptNum.encode(other) + elif isinstance(other, (int, long)): + if 0 <= other <= 16: + other = bytes(bchr(CScriptOp.encode_op_n(other))) + elif other == -1: + other = bytes(bchr(OP_1NEGATE)) + else: + other = CScriptOp.encode_op_pushdata(bignum.bn2vch(other)) + elif isinstance(other, (bytes, bytearray)): + other = CScriptOp.encode_op_pushdata(other) + return other + + def __add__(self, other): + # Do the coercion outside of the try block so that errors in it are + # noticed. + other = self.__coerce_instance(other) + + try: + # bytes.__add__ always returns bytes instances unfortunately + return CScript(super(CScript, self).__add__(other)) + except TypeError: + raise TypeError('Can not add a %r instance to a CScript' % other.__class__) + + def join(self, iterable): + # join makes no sense for a CScript() + raise NotImplementedError + + def __new__(cls, value=b''): + if isinstance(value, bytes) or isinstance(value, bytearray): + return super(CScript, cls).__new__(cls, value) + else: + def coerce_iterable(iterable): + for instance in iterable: + yield cls.__coerce_instance(instance) + # Annoyingly on both python2 and python3 bytes.join() always + # returns a bytes instance even when subclassed. + return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value))) + + def raw_iter(self): + """Raw iteration + + Yields tuples of (opcode, data, sop_idx) so that the different possible + PUSHDATA encodings can be accurately distinguished, as well as + determining the exact opcode byte indexes. (sop_idx) + """ + i = 0 + while i < len(self): + sop_idx = i + opcode = bord(self[i]) + i += 1 + + if opcode > OP_PUSHDATA4: + yield (opcode, None, sop_idx) + else: + datasize = None + pushdata_type = None + if opcode < OP_PUSHDATA1: + pushdata_type = 'PUSHDATA(%d)' % opcode + datasize = opcode + + elif opcode == OP_PUSHDATA1: + pushdata_type = 'PUSHDATA1' + if i >= len(self): + raise CScriptInvalidError('PUSHDATA1: missing data length') + datasize = bord(self[i]) + i += 1 + + elif opcode == OP_PUSHDATA2: + pushdata_type = 'PUSHDATA2' + if i + 1 >= len(self): + raise CScriptInvalidError('PUSHDATA2: missing data length') + datasize = bord(self[i]) + (bord(self[i+1]) << 8) + i += 2 + + elif opcode == OP_PUSHDATA4: + pushdata_type = 'PUSHDATA4' + if i + 3 >= len(self): + raise CScriptInvalidError('PUSHDATA4: missing data length') + datasize = bord(self[i]) + (bord(self[i+1]) << 8) + (bord(self[i+2]) << 16) + (bord(self[i+3]) << 24) + i += 4 + + else: + assert False # shouldn't happen + + + data = bytes(self[i:i+datasize]) + + # Check for truncation + if len(data) < datasize: + raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data) + + i += datasize + + yield (opcode, data, sop_idx) + + def __iter__(self): + """'Cooked' iteration + + Returns either a CScriptOP instance, an integer, or bytes, as + appropriate. + + See raw_iter() if you need to distinguish the different possible + PUSHDATA encodings. + """ + for (opcode, data, sop_idx) in self.raw_iter(): + if data is not None: + yield data + else: + opcode = CScriptOp(opcode) + + if opcode.is_small_int(): + yield opcode.decode_op_n() + else: + yield CScriptOp(opcode) + + def __repr__(self): + # For Python3 compatibility add b before strings so testcases don't + # need to change + def _repr(o): + if isinstance(o, bytes): + return "x('%s')" % binascii.hexlify(o).decode('utf8') + else: + return repr(o) + + ops = [] + i = iter(self) + while True: + op = None + try: + op = _repr(next(i)) + except CScriptTruncatedPushDataError as err: + op = '%s...<ERROR: %s>' % (_repr(err.data), err) + break + except CScriptInvalidError as err: + op = '<ERROR: %s>' % err + break + except StopIteration: + break + finally: + if op is not None: + ops.append(op) + + return "CScript([%s])" % ', '.join(ops) + + def GetSigOpCount(self, fAccurate): + """Get the SigOp count. + + fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details. + + Note that this is consensus-critical. + """ + n = 0 + lastOpcode = OP_INVALIDOPCODE + for (opcode, data, sop_idx) in self.raw_iter(): + if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY): + n += 1 + elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY): + if fAccurate and (OP_1 <= lastOpcode <= OP_16): + n += opcode.decode_op_n() + else: + n += 20 + lastOpcode = opcode + return n + + +SIGHASH_ALL = 1 +SIGHASH_NONE = 2 +SIGHASH_SINGLE = 3 +SIGHASH_ANYONECANPAY = 0x80 + +def FindAndDelete(script, sig): + """Consensus critical, see FindAndDelete() in Satoshi codebase""" + r = b'' + last_sop_idx = sop_idx = 0 + skip = True + for (opcode, data, sop_idx) in script.raw_iter(): + if not skip: + r += script[last_sop_idx:sop_idx] + last_sop_idx = sop_idx + if script[sop_idx:sop_idx + len(sig)] == sig: + skip = True + else: + skip = False + if not skip: + r += script[last_sop_idx:] + return CScript(r) + + +def SignatureHash(script, txTo, inIdx, hashtype): + """Consensus-correct SignatureHash + + Returns (hash, err) to precisely match the consensus-critical behavior of + the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity) + """ + HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + + if inIdx >= len(txTo.vin): + return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin))) + txtmp = CTransaction(txTo) + + for txin in txtmp.vin: + txin.scriptSig = b'' + txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR])) + + if (hashtype & 0x1f) == SIGHASH_NONE: + txtmp.vout = [] + + for i in range(len(txtmp.vin)): + if i != inIdx: + txtmp.vin[i].nSequence = 0 + + elif (hashtype & 0x1f) == SIGHASH_SINGLE: + outIdx = inIdx + if outIdx >= len(txtmp.vout): + return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout))) + + tmp = txtmp.vout[outIdx] + txtmp.vout = [] + for i in range(outIdx): + txtmp.vout.append(CTxOut()) + txtmp.vout.append(tmp) + + for i in range(len(txtmp.vin)): + if i != inIdx: + txtmp.vin[i].nSequence = 0 + + if hashtype & SIGHASH_ANYONECANPAY: + tmp = txtmp.vin[inIdx] + txtmp.vin = [] + txtmp.vin.append(tmp) + + s = txtmp.serialize() + s += struct.pack(b"<I", hashtype) + + hash = hash256(s) + + return (hash, None) diff --git a/qa/rpc-tests/test_framework/socks5.py b/qa/rpc-tests/test_framework/socks5.py new file mode 100644 index 0000000000..1dbfb98d5d --- /dev/null +++ b/qa/rpc-tests/test_framework/socks5.py @@ -0,0 +1,160 @@ +# Copyright (c) 2015 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +''' +Dummy Socks5 server for testing. +''' +from __future__ import print_function, division, unicode_literals +import socket, threading, Queue +import traceback, sys + +### Protocol constants +class Command: + CONNECT = 0x01 + +class AddressType: + IPV4 = 0x01 + DOMAINNAME = 0x03 + IPV6 = 0x04 + +### Utility functions +def recvall(s, n): + '''Receive n bytes from a socket, or fail''' + rv = bytearray() + while n > 0: + d = s.recv(n) + if not d: + raise IOError('Unexpected end of stream') + rv.extend(d) + n -= len(d) + return rv + +### Implementation classes +class Socks5Configuration(object): + '''Proxy configuration''' + def __init__(self): + self.addr = None # Bind address (must be set) + self.af = socket.AF_INET # Bind address family + self.unauth = False # Support unauthenticated + self.auth = False # Support authentication + +class Socks5Command(object): + '''Information about an incoming socks5 command''' + def __init__(self, cmd, atyp, addr, port, username, password): + self.cmd = cmd # Command (one of Command.*) + self.atyp = atyp # Address type (one of AddressType.*) + self.addr = addr # Address + self.port = port # Port to connect to + self.username = username + self.password = password + def __repr__(self): + return 'Socks5Command(%s,%s,%s,%s,%s,%s)' % (self.cmd, self.atyp, self.addr, self.port, self.username, self.password) + +class Socks5Connection(object): + def __init__(self, serv, conn, peer): + self.serv = serv + self.conn = conn + self.peer = peer + + def handle(self): + ''' + Handle socks5 request according to RFC1928 + ''' + try: + # Verify socks version + ver = recvall(self.conn, 1)[0] + if ver != 0x05: + raise IOError('Invalid socks version %i' % ver) + # Choose authentication method + nmethods = recvall(self.conn, 1)[0] + methods = bytearray(recvall(self.conn, nmethods)) + method = None + if 0x02 in methods and self.serv.conf.auth: + method = 0x02 # username/password + elif 0x00 in methods and self.serv.conf.unauth: + method = 0x00 # unauthenticated + if method is None: + raise IOError('No supported authentication method was offered') + # Send response + self.conn.sendall(bytearray([0x05, method])) + # Read authentication (optional) + username = None + password = None + if method == 0x02: + ver = recvall(self.conn, 1)[0] + if ver != 0x01: + raise IOError('Invalid auth packet version %i' % ver) + ulen = recvall(self.conn, 1)[0] + username = str(recvall(self.conn, ulen)) + plen = recvall(self.conn, 1)[0] + password = str(recvall(self.conn, plen)) + # Send authentication response + self.conn.sendall(bytearray([0x01, 0x00])) + + # Read connect request + (ver,cmd,rsv,atyp) = recvall(self.conn, 4) + if ver != 0x05: + raise IOError('Invalid socks version %i in connect request' % ver) + if cmd != Command.CONNECT: + raise IOError('Unhandled command %i in connect request' % cmd) + + if atyp == AddressType.IPV4: + addr = recvall(self.conn, 4) + elif atyp == AddressType.DOMAINNAME: + n = recvall(self.conn, 1)[0] + addr = str(recvall(self.conn, n)) + elif atyp == AddressType.IPV6: + addr = recvall(self.conn, 16) + else: + raise IOError('Unknown address type %i' % atyp) + port_hi,port_lo = recvall(self.conn, 2) + port = (port_hi << 8) | port_lo + + # Send dummy response + self.conn.sendall(bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])) + + cmdin = Socks5Command(cmd, atyp, addr, port, username, password) + self.serv.queue.put(cmdin) + print('Proxy: ', cmdin) + # Fall through to disconnect + except Exception,e: + traceback.print_exc(file=sys.stderr) + self.serv.queue.put(e) + finally: + self.conn.close() + +class Socks5Server(object): + def __init__(self, conf): + self.conf = conf + self.s = socket.socket(conf.af) + self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self.s.bind(conf.addr) + self.s.listen(5) + self.running = False + self.thread = None + self.queue = Queue.Queue() # report connections and exceptions to client + + def run(self): + while self.running: + (sockconn, peer) = self.s.accept() + if self.running: + conn = Socks5Connection(self, sockconn, peer) + thread = threading.Thread(None, conn.handle) + thread.daemon = True + thread.start() + + def start(self): + assert(not self.running) + self.running = True + self.thread = threading.Thread(None, self.run) + self.thread.daemon = True + self.thread.start() + + def stop(self): + self.running = False + # connect to self to end run loop + s = socket.socket(self.conf.af) + s.connect(self.conf.addr) + s.close() + self.thread.join() + diff --git a/qa/rpc-tests/test_framework/test_framework.py b/qa/rpc-tests/test_framework/test_framework.py new file mode 100755 index 0000000000..15a357a340 --- /dev/null +++ b/qa/rpc-tests/test_framework/test_framework.py @@ -0,0 +1,180 @@ +#!/usr/bin/env python2 +# Copyright (c) 2014 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +# Base class for RPC testing + +# Add python-bitcoinrpc to module search path: +import os +import sys +sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc")) + +import shutil +import tempfile +import traceback + +from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException +from util import * + + +class BitcoinTestFramework(object): + + # These may be over-ridden by subclasses: + def run_test(self): + for node in self.nodes: + assert_equal(node.getblockcount(), 200) + assert_equal(node.getbalance(), 25*50) + + def add_options(self, parser): + pass + + def setup_chain(self): + print("Initializing test directory "+self.options.tmpdir) + initialize_chain(self.options.tmpdir) + + def setup_nodes(self): + return start_nodes(4, self.options.tmpdir) + + def setup_network(self, split = False): + self.nodes = self.setup_nodes() + + # Connect the nodes as a "chain". This allows us + # to split the network between nodes 1 and 2 to get + # two halves that can work on competing chains. + + # If we joined network halves, connect the nodes from the joint + # on outward. This ensures that chains are properly reorganised. + if not split: + connect_nodes_bi(self.nodes, 1, 2) + sync_blocks(self.nodes[1:3]) + sync_mempools(self.nodes[1:3]) + + connect_nodes_bi(self.nodes, 0, 1) + connect_nodes_bi(self.nodes, 2, 3) + self.is_network_split = split + self.sync_all() + + def split_network(self): + """ + Split the network of four nodes into nodes 0/1 and 2/3. + """ + assert not self.is_network_split + stop_nodes(self.nodes) + wait_bitcoinds() + self.setup_network(True) + + def sync_all(self): + if self.is_network_split: + sync_blocks(self.nodes[:2]) + sync_blocks(self.nodes[2:]) + sync_mempools(self.nodes[:2]) + sync_mempools(self.nodes[2:]) + else: + sync_blocks(self.nodes) + sync_mempools(self.nodes) + + def join_network(self): + """ + Join the (previously split) network halves together. + """ + assert self.is_network_split + stop_nodes(self.nodes) + wait_bitcoinds() + self.setup_network(False) + + def main(self): + import optparse + + parser = optparse.OptionParser(usage="%prog [options]") + parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true", + help="Leave bitcoinds and test.* datadir on exit or error") + parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true", + help="Don't stop bitcoinds after the test execution") + parser.add_option("--srcdir", dest="srcdir", default="../../src", + help="Source directory containing bitcoind/bitcoin-cli (default: %default)") + parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"), + help="Root directory for datadirs") + parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true", + help="Print out all RPC calls as they are made") + self.add_options(parser) + (self.options, self.args) = parser.parse_args() + + if self.options.trace_rpc: + import logging + logging.basicConfig(level=logging.DEBUG) + + os.environ['PATH'] = self.options.srcdir+":"+os.environ['PATH'] + + check_json_precision() + + success = False + try: + if not os.path.isdir(self.options.tmpdir): + os.makedirs(self.options.tmpdir) + self.setup_chain() + + self.setup_network() + + self.run_test() + + success = True + + except JSONRPCException as e: + print("JSONRPC error: "+e.error['message']) + traceback.print_tb(sys.exc_info()[2]) + except AssertionError as e: + print("Assertion failed: "+e.message) + traceback.print_tb(sys.exc_info()[2]) + except Exception as e: + print("Unexpected exception caught during testing: "+str(e)) + traceback.print_tb(sys.exc_info()[2]) + + if not self.options.noshutdown: + print("Stopping nodes") + stop_nodes(self.nodes) + wait_bitcoinds() + else: + print("Note: bitcoinds were not stopped and may still be running") + + if not self.options.nocleanup and not self.options.noshutdown: + print("Cleaning up") + shutil.rmtree(self.options.tmpdir) + + if success: + print("Tests successful") + sys.exit(0) + else: + print("Failed") + sys.exit(1) + + +# Test framework for doing p2p comparison testing, which sets up some bitcoind +# binaries: +# 1 binary: test binary +# 2 binaries: 1 test binary, 1 ref binary +# n>2 binaries: 1 test binary, n-1 ref binaries + +class ComparisonTestFramework(BitcoinTestFramework): + + # Can override the num_nodes variable to indicate how many nodes to run. + def __init__(self): + self.num_nodes = 2 + + def add_options(self, parser): + parser.add_option("--testbinary", dest="testbinary", + default=os.getenv("BITCOIND", "bitcoind"), + help="bitcoind binary to test") + parser.add_option("--refbinary", dest="refbinary", + default=os.getenv("BITCOIND", "bitcoind"), + help="bitcoind binary to use for reference nodes (if any)") + + def setup_chain(self): + print "Initializing test directory "+self.options.tmpdir + initialize_chain_clean(self.options.tmpdir, self.num_nodes) + + def setup_network(self): + self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, + extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes, + binary=[self.options.testbinary] + + [self.options.refbinary]*(self.num_nodes-1)) diff --git a/qa/rpc-tests/test_framework/util.py b/qa/rpc-tests/test_framework/util.py new file mode 100644 index 0000000000..997bbcc373 --- /dev/null +++ b/qa/rpc-tests/test_framework/util.py @@ -0,0 +1,357 @@ +# Copyright (c) 2014 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +# +# Helpful routines for regression testing +# + +# Add python-bitcoinrpc to module search path: +import os +import sys +sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc")) + +from decimal import Decimal, ROUND_DOWN +import json +import random +import shutil +import subprocess +import time +import re + +from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException +from util import * + +def p2p_port(n): + return 11000 + n + os.getpid()%999 +def rpc_port(n): + return 12000 + n + os.getpid()%999 + +def check_json_precision(): + """Make sure json library being used does not lose precision converting BTC values""" + n = Decimal("20000000.00000003") + satoshis = int(json.loads(json.dumps(float(n)))*1.0e8) + if satoshis != 2000000000000003: + raise RuntimeError("JSON encode/decode loses precision") + +def sync_blocks(rpc_connections, wait=1): + """ + Wait until everybody has the same block count + """ + while True: + counts = [ x.getblockcount() for x in rpc_connections ] + if counts == [ counts[0] ]*len(counts): + break + time.sleep(wait) + +def sync_mempools(rpc_connections, wait=1): + """ + Wait until everybody has the same transactions in their memory + pools + """ + while True: + pool = set(rpc_connections[0].getrawmempool()) + num_match = 1 + for i in range(1, len(rpc_connections)): + if set(rpc_connections[i].getrawmempool()) == pool: + num_match = num_match+1 + if num_match == len(rpc_connections): + break + time.sleep(wait) + +bitcoind_processes = {} + +def initialize_datadir(dirname, n): + datadir = os.path.join(dirname, "node"+str(n)) + if not os.path.isdir(datadir): + os.makedirs(datadir) + with open(os.path.join(datadir, "bitcoin.conf"), 'w') as f: + f.write("regtest=1\n"); + f.write("rpcuser=rt\n"); + f.write("rpcpassword=rt\n"); + f.write("port="+str(p2p_port(n))+"\n"); + f.write("rpcport="+str(rpc_port(n))+"\n"); + return datadir + +def initialize_chain(test_dir): + """ + Create (or copy from cache) a 200-block-long chain and + 4 wallets. + bitcoind and bitcoin-cli must be in search path. + """ + + if not os.path.isdir(os.path.join("cache", "node0")): + devnull = open("/dev/null", "w+") + # Create cache directories, run bitcoinds: + for i in range(4): + datadir=initialize_datadir("cache", i) + args = [ os.getenv("BITCOIND", "bitcoind"), "-keypool=1", "-datadir="+datadir, "-discover=0" ] + if i > 0: + args.append("-connect=127.0.0.1:"+str(p2p_port(0))) + bitcoind_processes[i] = subprocess.Popen(args) + if os.getenv("PYTHON_DEBUG", ""): + print "initialize_chain: bitcoind started, calling bitcoin-cli -rpcwait getblockcount" + subprocess.check_call([ os.getenv("BITCOINCLI", "bitcoin-cli"), "-datadir="+datadir, + "-rpcwait", "getblockcount"], stdout=devnull) + if os.getenv("PYTHON_DEBUG", ""): + print "initialize_chain: bitcoin-cli -rpcwait getblockcount completed" + devnull.close() + rpcs = [] + for i in range(4): + try: + url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),) + rpcs.append(AuthServiceProxy(url)) + except: + sys.stderr.write("Error connecting to "+url+"\n") + sys.exit(1) + + # Create a 200-block-long chain; each of the 4 nodes + # gets 25 mature blocks and 25 immature. + # blocks are created with timestamps 10 minutes apart, starting + # at 1 Jan 2014 + block_time = 1388534400 + for i in range(2): + for peer in range(4): + for j in range(25): + set_node_times(rpcs, block_time) + rpcs[peer].generate(1) + block_time += 10*60 + # Must sync before next peer starts generating blocks + sync_blocks(rpcs) + + # Shut them down, and clean up cache directories: + stop_nodes(rpcs) + wait_bitcoinds() + for i in range(4): + os.remove(log_filename("cache", i, "debug.log")) + os.remove(log_filename("cache", i, "db.log")) + os.remove(log_filename("cache", i, "peers.dat")) + os.remove(log_filename("cache", i, "fee_estimates.dat")) + + for i in range(4): + from_dir = os.path.join("cache", "node"+str(i)) + to_dir = os.path.join(test_dir, "node"+str(i)) + shutil.copytree(from_dir, to_dir) + initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf + +def initialize_chain_clean(test_dir, num_nodes): + """ + Create an empty blockchain and num_nodes wallets. + Useful if a test case wants complete control over initialization. + """ + for i in range(num_nodes): + datadir=initialize_datadir(test_dir, i) + + +def _rpchost_to_args(rpchost): + '''Convert optional IP:port spec to rpcconnect/rpcport args''' + if rpchost is None: + return [] + + match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost) + if not match: + raise ValueError('Invalid RPC host spec ' + rpchost) + + rpcconnect = match.group(1) + rpcport = match.group(2) + + if rpcconnect.startswith('['): # remove IPv6 [...] wrapping + rpcconnect = rpcconnect[1:-1] + + rv = ['-rpcconnect=' + rpcconnect] + if rpcport: + rv += ['-rpcport=' + rpcport] + return rv + +def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None): + """ + Start a bitcoind and return RPC connection to it + """ + datadir = os.path.join(dirname, "node"+str(i)) + if binary is None: + binary = os.getenv("BITCOIND", "bitcoind") + args = [ binary, "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ] + if extra_args is not None: args.extend(extra_args) + bitcoind_processes[i] = subprocess.Popen(args) + devnull = open("/dev/null", "w+") + if os.getenv("PYTHON_DEBUG", ""): + print "start_node: bitcoind started, calling bitcoin-cli -rpcwait getblockcount" + subprocess.check_call([ os.getenv("BITCOINCLI", "bitcoin-cli"), "-datadir="+datadir] + + _rpchost_to_args(rpchost) + + ["-rpcwait", "getblockcount"], stdout=devnull) + if os.getenv("PYTHON_DEBUG", ""): + print "start_node: calling bitcoin-cli -rpcwait getblockcount returned" + devnull.close() + url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i)) + if timewait is not None: + proxy = AuthServiceProxy(url, timeout=timewait) + else: + proxy = AuthServiceProxy(url) + proxy.url = url # store URL on proxy for info + return proxy + +def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None): + """ + Start multiple bitcoinds, return RPC connections to them + """ + if extra_args is None: extra_args = [ None for i in range(num_nodes) ] + if binary is None: binary = [ None for i in range(num_nodes) ] + return [ start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]) for i in range(num_nodes) ] + +def log_filename(dirname, n_node, logname): + return os.path.join(dirname, "node"+str(n_node), "regtest", logname) + +def stop_node(node, i): + node.stop() + bitcoind_processes[i].wait() + del bitcoind_processes[i] + +def stop_nodes(nodes): + for node in nodes: + node.stop() + del nodes[:] # Emptying array closes connections as a side effect + +def set_node_times(nodes, t): + for node in nodes: + node.setmocktime(t) + +def wait_bitcoinds(): + # Wait for all bitcoinds to cleanly exit + for bitcoind in bitcoind_processes.values(): + bitcoind.wait() + bitcoind_processes.clear() + +def connect_nodes(from_connection, node_num): + ip_port = "127.0.0.1:"+str(p2p_port(node_num)) + from_connection.addnode(ip_port, "onetry") + # poll until version handshake complete to avoid race conditions + # with transaction relaying + while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()): + time.sleep(0.1) + +def connect_nodes_bi(nodes, a, b): + connect_nodes(nodes[a], b) + connect_nodes(nodes[b], a) + +def find_output(node, txid, amount): + """ + Return index to output of txid with value amount + Raises exception if there is none. + """ + txdata = node.getrawtransaction(txid, 1) + for i in range(len(txdata["vout"])): + if txdata["vout"][i]["value"] == amount: + return i + raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount))) + + +def gather_inputs(from_node, amount_needed, confirmations_required=1): + """ + Return a random set of unspent txouts that are enough to pay amount_needed + """ + assert(confirmations_required >=0) + utxo = from_node.listunspent(confirmations_required) + random.shuffle(utxo) + inputs = [] + total_in = Decimal("0.00000000") + while total_in < amount_needed and len(utxo) > 0: + t = utxo.pop() + total_in += t["amount"] + inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } ) + if total_in < amount_needed: + raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in)) + return (total_in, inputs) + +def make_change(from_node, amount_in, amount_out, fee): + """ + Create change output(s), return them + """ + outputs = {} + amount = amount_out+fee + change = amount_in - amount + if change > amount*2: + # Create an extra change output to break up big inputs + change_address = from_node.getnewaddress() + # Split change in two, being careful of rounding: + outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) + change = amount_in - amount - outputs[change_address] + if change > 0: + outputs[from_node.getnewaddress()] = change + return outputs + +def send_zeropri_transaction(from_node, to_node, amount, fee): + """ + Create&broadcast a zero-priority transaction. + Returns (txid, hex-encoded-txdata) + Ensures transaction is zero-priority by first creating a send-to-self, + then using its output + """ + + # Create a send-to-self with confirmed inputs: + self_address = from_node.getnewaddress() + (total_in, inputs) = gather_inputs(from_node, amount+fee*2) + outputs = make_change(from_node, total_in, amount+fee, fee) + outputs[self_address] = float(amount+fee) + + self_rawtx = from_node.createrawtransaction(inputs, outputs) + self_signresult = from_node.signrawtransaction(self_rawtx) + self_txid = from_node.sendrawtransaction(self_signresult["hex"], True) + + vout = find_output(from_node, self_txid, amount+fee) + # Now immediately spend the output to create a 1-input, 1-output + # zero-priority transaction: + inputs = [ { "txid" : self_txid, "vout" : vout } ] + outputs = { to_node.getnewaddress() : float(amount) } + + rawtx = from_node.createrawtransaction(inputs, outputs) + signresult = from_node.signrawtransaction(rawtx) + txid = from_node.sendrawtransaction(signresult["hex"], True) + + return (txid, signresult["hex"]) + +def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants): + """ + Create a random zero-priority transaction. + Returns (txid, hex-encoded-transaction-data, fee) + """ + from_node = random.choice(nodes) + to_node = random.choice(nodes) + fee = min_fee + fee_increment*random.randint(0,fee_variants) + (txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee) + return (txid, txhex, fee) + +def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants): + """ + Create a random transaction. + Returns (txid, hex-encoded-transaction-data, fee) + """ + from_node = random.choice(nodes) + to_node = random.choice(nodes) + fee = min_fee + fee_increment*random.randint(0,fee_variants) + + (total_in, inputs) = gather_inputs(from_node, amount+fee) + outputs = make_change(from_node, total_in, amount, fee) + outputs[to_node.getnewaddress()] = float(amount) + + rawtx = from_node.createrawtransaction(inputs, outputs) + signresult = from_node.signrawtransaction(rawtx) + txid = from_node.sendrawtransaction(signresult["hex"], True) + + return (txid, signresult["hex"], fee) + +def assert_equal(thing1, thing2): + if thing1 != thing2: + raise AssertionError("%s != %s"%(str(thing1),str(thing2))) + +def assert_greater_than(thing1, thing2): + if thing1 <= thing2: + raise AssertionError("%s <= %s"%(str(thing1),str(thing2))) + +def assert_raises(exc, fun, *args, **kwds): + try: + fun(*args, **kwds) + except exc: + pass + except Exception as e: + raise AssertionError("Unexpected exception raised: "+type(e).__name__) + else: + raise AssertionError("No exception raised") |