diff options
Diffstat (limited to 'qa/rpc-tests/test_framework')
-rw-r--r-- | qa/rpc-tests/test_framework/__init__.py | 0 | ||||
-rw-r--r-- | qa/rpc-tests/test_framework/address.py | 69 | ||||
-rw-r--r-- | qa/rpc-tests/test_framework/authproxy.py | 190 | ||||
-rw-r--r-- | qa/rpc-tests/test_framework/bignum.py | 97 | ||||
-rw-r--r-- | qa/rpc-tests/test_framework/blockstore.py | 170 | ||||
-rw-r--r-- | qa/rpc-tests/test_framework/blocktools.py | 105 | ||||
-rwxr-xr-x | qa/rpc-tests/test_framework/comptool.py | 410 | ||||
-rw-r--r-- | qa/rpc-tests/test_framework/coverage.py | 103 | ||||
-rw-r--r-- | qa/rpc-tests/test_framework/key.py | 232 | ||||
-rwxr-xr-x | qa/rpc-tests/test_framework/mininode.py | 1797 | ||||
-rw-r--r-- | qa/rpc-tests/test_framework/netutil.py | 156 | ||||
-rw-r--r-- | qa/rpc-tests/test_framework/script.py | 939 | ||||
-rw-r--r-- | qa/rpc-tests/test_framework/siphash.py | 63 | ||||
-rw-r--r-- | qa/rpc-tests/test_framework/socks5.py | 160 | ||||
-rwxr-xr-x | qa/rpc-tests/test_framework/test_framework.py | 244 | ||||
-rw-r--r-- | qa/rpc-tests/test_framework/util.py | 670 |
16 files changed, 0 insertions, 5405 deletions
diff --git a/qa/rpc-tests/test_framework/__init__.py b/qa/rpc-tests/test_framework/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 --- a/qa/rpc-tests/test_framework/__init__.py +++ /dev/null diff --git a/qa/rpc-tests/test_framework/address.py b/qa/rpc-tests/test_framework/address.py deleted file mode 100644 index 96bebe1ea1..0000000000 --- a/qa/rpc-tests/test_framework/address.py +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2016 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Encode and decode BASE58, P2PKH and P2SH addresses.""" - -from .script import hash256, hash160, sha256, CScript, OP_0 -from .util import bytes_to_hex_str, hex_str_to_bytes - -chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' - -def byte_to_base58(b, version): - result = '' - str = bytes_to_hex_str(b) - str = bytes_to_hex_str(chr(version).encode('latin-1')) + str - checksum = bytes_to_hex_str(hash256(hex_str_to_bytes(str))) - str += checksum[:8] - value = int('0x'+str,0) - while value > 0: - result = chars[value % 58] + result - value //= 58 - while (str[:2] == '00'): - result = chars[0] + result - str = str[2:] - return result - -# TODO: def base58_decode - -def keyhash_to_p2pkh(hash, main = False): - assert (len(hash) == 20) - version = 0 if main else 111 - return byte_to_base58(hash, version) - -def scripthash_to_p2sh(hash, main = False): - assert (len(hash) == 20) - version = 5 if main else 196 - return byte_to_base58(hash, version) - -def key_to_p2pkh(key, main = False): - key = check_key(key) - return keyhash_to_p2pkh(hash160(key), main) - -def script_to_p2sh(script, main = False): - script = check_script(script) - return scripthash_to_p2sh(hash160(script), main) - -def key_to_p2sh_p2wpkh(key, main = False): - key = check_key(key) - p2shscript = CScript([OP_0, hash160(key)]) - return script_to_p2sh(p2shscript, main) - -def script_to_p2sh_p2wsh(script, main = False): - script = check_script(script) - p2shscript = CScript([OP_0, sha256(script)]) - return script_to_p2sh(p2shscript, main) - -def check_key(key): - if (type(key) is str): - key = hex_str_to_bytes(key) # Assuming this is hex string - if (type(key) is bytes and (len(key) == 33 or len(key) == 65)): - return key - assert(False) - -def check_script(script): - if (type(script) is str): - script = hex_str_to_bytes(script) # Assuming this is hex string - if (type(script) is bytes or type(script) is CScript): - return script - assert(False) diff --git a/qa/rpc-tests/test_framework/authproxy.py b/qa/rpc-tests/test_framework/authproxy.py deleted file mode 100644 index 9ab3094b06..0000000000 --- a/qa/rpc-tests/test_framework/authproxy.py +++ /dev/null @@ -1,190 +0,0 @@ -# Copyright (c) 2011 Jeff Garzik -# -# Previous copyright, from python-jsonrpc/jsonrpc/proxy.py: -# -# Copyright (c) 2007 Jan-Klaas Kollhof -# -# This file is part of jsonrpc. -# -# jsonrpc is free software; you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License as published by -# the Free Software Foundation; either version 2.1 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this software; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -"""HTTP proxy for opening RPC connection to bitcoind. - -AuthServiceProxy has the following improvements over python-jsonrpc's -ServiceProxy class: - -- HTTP connections persist for the life of the AuthServiceProxy object - (if server supports HTTP/1.1) -- sends protocol 'version', per JSON-RPC 1.1 -- sends proper, incrementing 'id' -- sends Basic HTTP authentication headers -- parses all JSON numbers that look like floats as Decimal -- uses standard Python json lib -""" - -try: - import http.client as httplib -except ImportError: - import httplib -import base64 -import decimal -import json -import logging -import socket -try: - import urllib.parse as urlparse -except ImportError: - import urlparse - -USER_AGENT = "AuthServiceProxy/0.1" - -HTTP_TIMEOUT = 30 - -log = logging.getLogger("BitcoinRPC") - -class JSONRPCException(Exception): - def __init__(self, rpc_error): - try: - errmsg = '%(message)s (%(code)i)' % rpc_error - except (KeyError, TypeError): - errmsg = '' - Exception.__init__(self, errmsg) - self.error = rpc_error - - -def EncodeDecimal(o): - if isinstance(o, decimal.Decimal): - return str(o) - raise TypeError(repr(o) + " is not JSON serializable") - -class AuthServiceProxy(object): - __id_count = 0 - - # ensure_ascii: escape unicode as \uXXXX, passed to json.dumps - def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True): - self.__service_url = service_url - self._service_name = service_name - self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests - self.__url = urlparse.urlparse(service_url) - if self.__url.port is None: - port = 80 - else: - port = self.__url.port - (user, passwd) = (self.__url.username, self.__url.password) - try: - user = user.encode('utf8') - except AttributeError: - pass - try: - passwd = passwd.encode('utf8') - except AttributeError: - pass - authpair = user + b':' + passwd - self.__auth_header = b'Basic ' + base64.b64encode(authpair) - - if connection: - # Callables re-use the connection of the original proxy - self.__conn = connection - elif self.__url.scheme == 'https': - self.__conn = httplib.HTTPSConnection(self.__url.hostname, port, - timeout=timeout) - else: - self.__conn = httplib.HTTPConnection(self.__url.hostname, port, - timeout=timeout) - - def __getattr__(self, name): - if name.startswith('__') and name.endswith('__'): - # Python internal stuff - raise AttributeError - if self._service_name is not None: - name = "%s.%s" % (self._service_name, name) - return AuthServiceProxy(self.__service_url, name, connection=self.__conn) - - def _request(self, method, path, postdata): - ''' - Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout). - This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5. - ''' - headers = {'Host': self.__url.hostname, - 'User-Agent': USER_AGENT, - 'Authorization': self.__auth_header, - 'Content-type': 'application/json'} - try: - self.__conn.request(method, path, postdata, headers) - return self._get_response() - except httplib.BadStatusLine as e: - if e.line == "''": # if connection was closed, try again - self.__conn.close() - self.__conn.request(method, path, postdata, headers) - return self._get_response() - else: - raise - except (BrokenPipeError,ConnectionResetError): - # Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset - # ConnectionResetError happens on FreeBSD with Python 3.4 - self.__conn.close() - self.__conn.request(method, path, postdata, headers) - return self._get_response() - - def __call__(self, *args, **argsn): - AuthServiceProxy.__id_count += 1 - - log.debug("-%s-> %s %s"%(AuthServiceProxy.__id_count, self._service_name, - json.dumps(args, default=EncodeDecimal, ensure_ascii=self.ensure_ascii))) - if args and argsn: - raise ValueError('Cannot handle both named and positional arguments') - postdata = json.dumps({'version': '1.1', - 'method': self._service_name, - 'params': args or argsn, - 'id': AuthServiceProxy.__id_count}, default=EncodeDecimal, ensure_ascii=self.ensure_ascii) - response = self._request('POST', self.__url.path, postdata.encode('utf-8')) - if response['error'] is not None: - raise JSONRPCException(response['error']) - elif 'result' not in response: - raise JSONRPCException({ - 'code': -343, 'message': 'missing JSON-RPC result'}) - else: - return response['result'] - - def _batch(self, rpc_call_list): - postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii) - log.debug("--> "+postdata) - return self._request('POST', self.__url.path, postdata.encode('utf-8')) - - def _get_response(self): - try: - http_response = self.__conn.getresponse() - except socket.timeout as e: - raise JSONRPCException({ - 'code': -344, - 'message': '%r RPC took longer than %f seconds. Consider ' - 'using larger timeout for calls that take ' - 'longer to return.' % (self._service_name, - self.__conn.timeout)}) - if http_response is None: - raise JSONRPCException({ - 'code': -342, 'message': 'missing HTTP response from server'}) - - content_type = http_response.getheader('Content-Type') - if content_type != 'application/json': - raise JSONRPCException({ - 'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (http_response.status, http_response.reason)}) - - responsedata = http_response.read().decode('utf8') - response = json.loads(responsedata, parse_float=decimal.Decimal) - if "error" in response and response["error"] is None: - log.debug("<-%s- %s"%(response["id"], json.dumps(response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii))) - else: - log.debug("<-- "+responsedata) - return response diff --git a/qa/rpc-tests/test_framework/bignum.py b/qa/rpc-tests/test_framework/bignum.py deleted file mode 100644 index 024611da6e..0000000000 --- a/qa/rpc-tests/test_framework/bignum.py +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/env python3 -# -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Big number routines. - -This file is copied from python-bitcoinlib. -""" - -import struct - - -# generic big endian MPI format - -def bn_bytes(v, have_ext=False): - ext = 0 - if have_ext: - ext = 1 - return ((v.bit_length()+7)//8) + ext - -def bn2bin(v): - s = bytearray() - i = bn_bytes(v) - while i > 0: - s.append((v >> ((i-1) * 8)) & 0xff) - i -= 1 - return s - -def bin2bn(s): - l = 0 - for ch in s: - l = (l << 8) | ch - return l - -def bn2mpi(v): - have_ext = False - if v.bit_length() > 0: - have_ext = (v.bit_length() & 0x07) == 0 - - neg = False - if v < 0: - neg = True - v = -v - - s = struct.pack(b">I", bn_bytes(v, have_ext)) - ext = bytearray() - if have_ext: - ext.append(0) - v_bin = bn2bin(v) - if neg: - if have_ext: - ext[0] |= 0x80 - else: - v_bin[0] |= 0x80 - return s + ext + v_bin - -def mpi2bn(s): - if len(s) < 4: - return None - s_size = bytes(s[:4]) - v_len = struct.unpack(b">I", s_size)[0] - if len(s) != (v_len + 4): - return None - if v_len == 0: - return 0 - - v_str = bytearray(s[4:]) - neg = False - i = v_str[0] - if i & 0x80: - neg = True - i &= ~0x80 - v_str[0] = i - - v = bin2bn(v_str) - - if neg: - return -v - return v - -# bitcoin-specific little endian format, with implicit size -def mpi2vch(s): - r = s[4:] # strip size - r = r[::-1] # reverse string, converting BE->LE - return r - -def bn2vch(v): - return bytes(mpi2vch(bn2mpi(v))) - -def vch2mpi(s): - r = struct.pack(b">I", len(s)) # size - r += s[::-1] # reverse string, converting LE->BE - return r - -def vch2bn(s): - return mpi2bn(vch2mpi(s)) - diff --git a/qa/rpc-tests/test_framework/blockstore.py b/qa/rpc-tests/test_framework/blockstore.py deleted file mode 100644 index 4cfd682bb5..0000000000 --- a/qa/rpc-tests/test_framework/blockstore.py +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2016 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""BlockStore and TxStore helper classes.""" - -from .mininode import * -from io import BytesIO -import dbm.dumb as dbmd - -logger = logging.getLogger("TestFramework.blockstore") - -class BlockStore(object): - """BlockStore helper class. - - BlockStore keeps a map of blocks and implements helper functions for - responding to getheaders and getdata, and for constructing a getheaders - message. - """ - - def __init__(self, datadir): - self.blockDB = dbmd.open(datadir + "/blocks", 'c') - self.currentBlock = 0 - self.headers_map = dict() - - def close(self): - self.blockDB.close() - - def erase(self, blockhash): - del self.blockDB[repr(blockhash)] - - # lookup an entry and return the item as raw bytes - def get(self, blockhash): - value = None - try: - value = self.blockDB[repr(blockhash)] - except KeyError: - return None - return value - - # lookup an entry and return it as a CBlock - def get_block(self, blockhash): - ret = None - serialized_block = self.get(blockhash) - if serialized_block is not None: - f = BytesIO(serialized_block) - ret = CBlock() - ret.deserialize(f) - ret.calc_sha256() - return ret - - def get_header(self, blockhash): - try: - return self.headers_map[blockhash] - except KeyError: - return None - - # Note: this pulls full blocks out of the database just to retrieve - # the headers -- perhaps we could keep a separate data structure - # to avoid this overhead. - def headers_for(self, locator, hash_stop, current_tip=None): - if current_tip is None: - current_tip = self.currentBlock - current_block_header = self.get_header(current_tip) - if current_block_header is None: - return None - - response = msg_headers() - headersList = [ current_block_header ] - maxheaders = 2000 - while (headersList[0].sha256 not in locator.vHave): - prevBlockHash = headersList[0].hashPrevBlock - prevBlockHeader = self.get_header(prevBlockHash) - if prevBlockHeader is not None: - headersList.insert(0, prevBlockHeader) - else: - break - headersList = headersList[:maxheaders] # truncate if we have too many - hashList = [x.sha256 for x in headersList] - index = len(headersList) - if (hash_stop in hashList): - index = hashList.index(hash_stop)+1 - response.headers = headersList[:index] - return response - - def add_block(self, block): - block.calc_sha256() - try: - self.blockDB[repr(block.sha256)] = bytes(block.serialize()) - except TypeError as e: - logger.exception("Unexpected error") - self.currentBlock = block.sha256 - self.headers_map[block.sha256] = CBlockHeader(block) - - def add_header(self, header): - self.headers_map[header.sha256] = header - - # lookup the hashes in "inv", and return p2p messages for delivering - # blocks found. - def get_blocks(self, inv): - responses = [] - for i in inv: - if (i.type == 2): # MSG_BLOCK - data = self.get(i.hash) - if data is not None: - # Use msg_generic to avoid re-serialization - responses.append(msg_generic(b"block", data)) - return responses - - def get_locator(self, current_tip=None): - if current_tip is None: - current_tip = self.currentBlock - r = [] - counter = 0 - step = 1 - lastBlock = self.get_block(current_tip) - while lastBlock is not None: - r.append(lastBlock.hashPrevBlock) - for i in range(step): - lastBlock = self.get_block(lastBlock.hashPrevBlock) - if lastBlock is None: - break - counter += 1 - if counter > 10: - step *= 2 - locator = CBlockLocator() - locator.vHave = r - return locator - -class TxStore(object): - def __init__(self, datadir): - self.txDB = dbmd.open(datadir + "/transactions", 'c') - - def close(self): - self.txDB.close() - - # lookup an entry and return the item as raw bytes - def get(self, txhash): - value = None - try: - value = self.txDB[repr(txhash)] - except KeyError: - return None - return value - - def get_transaction(self, txhash): - ret = None - serialized_tx = self.get(txhash) - if serialized_tx is not None: - f = BytesIO(serialized_tx) - ret = CTransaction() - ret.deserialize(f) - ret.calc_sha256() - return ret - - def add_transaction(self, tx): - tx.calc_sha256() - try: - self.txDB[repr(tx.sha256)] = bytes(tx.serialize()) - except TypeError as e: - logger.exception("Unexpected error") - - def get_transactions(self, inv): - responses = [] - for i in inv: - if (i.type == 1): # MSG_TX - tx = self.get(i.hash) - if tx is not None: - responses.append(msg_generic(b"tx", tx)) - return responses diff --git a/qa/rpc-tests/test_framework/blocktools.py b/qa/rpc-tests/test_framework/blocktools.py deleted file mode 100644 index 2c9a0857df..0000000000 --- a/qa/rpc-tests/test_framework/blocktools.py +++ /dev/null @@ -1,105 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2016 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Utilities for manipulating blocks and transactions.""" - -from .mininode import * -from .script import CScript, OP_TRUE, OP_CHECKSIG, OP_RETURN - -# Create a block (with regtest difficulty) -def create_block(hashprev, coinbase, nTime=None): - block = CBlock() - if nTime is None: - import time - block.nTime = int(time.time()+600) - else: - block.nTime = nTime - block.hashPrevBlock = hashprev - block.nBits = 0x207fffff # Will break after a difficulty adjustment... - block.vtx.append(coinbase) - block.hashMerkleRoot = block.calc_merkle_root() - block.calc_sha256() - return block - -# From BIP141 -WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed" - -# According to BIP141, blocks with witness rules active must commit to the -# hash of all in-block transactions including witness. -def add_witness_commitment(block, nonce=0): - # First calculate the merkle root of the block's - # transactions, with witnesses. - witness_nonce = nonce - witness_root = block.calc_witness_merkle_root() - witness_commitment = uint256_from_str(hash256(ser_uint256(witness_root)+ser_uint256(witness_nonce))) - # witness_nonce should go to coinbase witness. - block.vtx[0].wit.vtxinwit = [CTxInWitness()] - block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(witness_nonce)] - - # witness commitment is the last OP_RETURN output in coinbase - output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(witness_commitment) - block.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, output_data]))) - block.vtx[0].rehash() - block.hashMerkleRoot = block.calc_merkle_root() - block.rehash() - - -def serialize_script_num(value): - r = bytearray(0) - if value == 0: - return r - neg = value < 0 - absvalue = -value if neg else value - while (absvalue): - r.append(int(absvalue & 0xff)) - absvalue >>= 8 - if r[-1] & 0x80: - r.append(0x80 if neg else 0) - elif neg: - r[-1] |= 0x80 - return r - -# Create a coinbase transaction, assuming no miner fees. -# If pubkey is passed in, the coinbase output will be a P2PK output; -# otherwise an anyone-can-spend output. -def create_coinbase(height, pubkey = None): - coinbase = CTransaction() - coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), - ser_string(serialize_script_num(height)), 0xffffffff)) - coinbaseoutput = CTxOut() - coinbaseoutput.nValue = 50 * COIN - halvings = int(height/150) # regtest - coinbaseoutput.nValue >>= halvings - if (pubkey != None): - coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG]) - else: - coinbaseoutput.scriptPubKey = CScript([OP_TRUE]) - coinbase.vout = [ coinbaseoutput ] - coinbase.calc_sha256() - return coinbase - -# Create a transaction. -# If the scriptPubKey is not specified, make it anyone-can-spend. -def create_transaction(prevtx, n, sig, value, scriptPubKey=CScript()): - tx = CTransaction() - assert(n < len(prevtx.vout)) - tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff)) - tx.vout.append(CTxOut(value, scriptPubKey)) - tx.calc_sha256() - return tx - -def get_legacy_sigopcount_block(block, fAccurate=True): - count = 0 - for tx in block.vtx: - count += get_legacy_sigopcount_tx(tx, fAccurate) - return count - -def get_legacy_sigopcount_tx(tx, fAccurate=True): - count = 0 - for i in tx.vout: - count += i.scriptPubKey.GetSigOpCount(fAccurate) - for j in tx.vin: - # scriptSig might be of type bytes, so convert to CScript for the moment - count += CScript(j.scriptSig).GetSigOpCount(fAccurate) - return count diff --git a/qa/rpc-tests/test_framework/comptool.py b/qa/rpc-tests/test_framework/comptool.py deleted file mode 100755 index 70d1d700ef..0000000000 --- a/qa/rpc-tests/test_framework/comptool.py +++ /dev/null @@ -1,410 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2016 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Compare two or more bitcoinds to each other. - -To use, create a class that implements get_tests(), and pass it in -as the test generator to TestManager. get_tests() should be a python -generator that returns TestInstance objects. See below for definition. - -TestNode behaves as follows: - Configure with a BlockStore and TxStore - on_inv: log the message but don't request - on_headers: log the chain tip - on_pong: update ping response map (for synchronization) - on_getheaders: provide headers via BlockStore - on_getdata: provide blocks via BlockStore -""" - -from .mininode import * -from .blockstore import BlockStore, TxStore -from .util import p2p_port - -import logging - -logger=logging.getLogger("TestFramework.comptool") - -global mininode_lock - -class RejectResult(object): - """Outcome that expects rejection of a transaction or block.""" - def __init__(self, code, reason=b''): - self.code = code - self.reason = reason - def match(self, other): - if self.code != other.code: - return False - return other.reason.startswith(self.reason) - def __repr__(self): - return '%i:%s' % (self.code,self.reason or '*') - -class TestNode(NodeConnCB): - - def __init__(self, block_store, tx_store): - NodeConnCB.__init__(self) - self.conn = None - self.bestblockhash = None - self.block_store = block_store - self.block_request_map = {} - self.tx_store = tx_store - self.tx_request_map = {} - self.block_reject_map = {} - self.tx_reject_map = {} - - # When the pingmap is non-empty we're waiting for - # a response - self.pingMap = {} - self.lastInv = [] - self.closed = False - - def on_close(self, conn): - self.closed = True - - def add_connection(self, conn): - self.conn = conn - - def on_headers(self, conn, message): - if len(message.headers) > 0: - best_header = message.headers[-1] - best_header.calc_sha256() - self.bestblockhash = best_header.sha256 - - def on_getheaders(self, conn, message): - response = self.block_store.headers_for(message.locator, message.hashstop) - if response is not None: - conn.send_message(response) - - def on_getdata(self, conn, message): - [conn.send_message(r) for r in self.block_store.get_blocks(message.inv)] - [conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)] - - for i in message.inv: - if i.type == 1: - self.tx_request_map[i.hash] = True - elif i.type == 2: - self.block_request_map[i.hash] = True - - def on_inv(self, conn, message): - self.lastInv = [x.hash for x in message.inv] - - def on_pong(self, conn, message): - try: - del self.pingMap[message.nonce] - except KeyError: - raise AssertionError("Got pong for unknown ping [%s]" % repr(message)) - - def on_reject(self, conn, message): - if message.message == b'tx': - self.tx_reject_map[message.data] = RejectResult(message.code, message.reason) - if message.message == b'block': - self.block_reject_map[message.data] = RejectResult(message.code, message.reason) - - def send_inv(self, obj): - mtype = 2 if isinstance(obj, CBlock) else 1 - self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)])) - - def send_getheaders(self): - # We ask for headers from their last tip. - m = msg_getheaders() - m.locator = self.block_store.get_locator(self.bestblockhash) - self.conn.send_message(m) - - def send_header(self, header): - m = msg_headers() - m.headers.append(header) - self.conn.send_message(m) - - # This assumes BIP31 - def send_ping(self, nonce): - self.pingMap[nonce] = True - self.conn.send_message(msg_ping(nonce)) - - def received_ping_response(self, nonce): - return nonce not in self.pingMap - - def send_mempool(self): - self.lastInv = [] - self.conn.send_message(msg_mempool()) - -# TestInstance: -# -# Instances of these are generated by the test generator, and fed into the -# comptool. -# -# "blocks_and_transactions" should be an array of -# [obj, True/False/None, hash/None]: -# - obj is either a CBlock, CBlockHeader, or a CTransaction, and -# - the second value indicates whether the object should be accepted -# into the blockchain or mempool (for tests where we expect a certain -# answer), or "None" if we don't expect a certain answer and are just -# comparing the behavior of the nodes being tested. -# - the third value is the hash to test the tip against (if None or omitted, -# use the hash of the block) -# - NOTE: if a block header, no test is performed; instead the header is -# just added to the block_store. This is to facilitate block delivery -# when communicating with headers-first clients (when withholding an -# intermediate block). -# sync_every_block: if True, then each block will be inv'ed, synced, and -# nodes will be tested based on the outcome for the block. If False, -# then inv's accumulate until all blocks are processed (or max inv size -# is reached) and then sent out in one inv message. Then the final block -# will be synced across all connections, and the outcome of the final -# block will be tested. -# sync_every_tx: analogous to behavior for sync_every_block, except if outcome -# on the final tx is None, then contents of entire mempool are compared -# across all connections. (If outcome of final tx is specified as true -# or false, then only the last tx is tested against outcome.) - -class TestInstance(object): - def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False): - self.blocks_and_transactions = objects if objects else [] - self.sync_every_block = sync_every_block - self.sync_every_tx = sync_every_tx - -class TestManager(object): - - def __init__(self, testgen, datadir): - self.test_generator = testgen - self.connections = [] - self.test_nodes = [] - self.block_store = BlockStore(datadir) - self.tx_store = TxStore(datadir) - self.ping_counter = 1 - - def add_all_connections(self, nodes): - for i in range(len(nodes)): - # Create a p2p connection to each node - test_node = TestNode(self.block_store, self.tx_store) - self.test_nodes.append(test_node) - self.connections.append(NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node)) - # Make sure the TestNode (callback class) has a reference to its - # associated NodeConn - test_node.add_connection(self.connections[-1]) - - def clear_all_connections(self): - self.connections = [] - self.test_nodes = [] - - def wait_for_disconnections(self): - def disconnected(): - return all(node.closed for node in self.test_nodes) - return wait_until(disconnected, timeout=10) - - def wait_for_verack(self): - def veracked(): - return all(node.verack_received for node in self.test_nodes) - return wait_until(veracked, timeout=10) - - def wait_for_pings(self, counter): - def received_pongs(): - return all(node.received_ping_response(counter) for node in self.test_nodes) - return wait_until(received_pongs) - - # sync_blocks: Wait for all connections to request the blockhash given - # then send get_headers to find out the tip of each node, and synchronize - # the response by using a ping (and waiting for pong with same nonce). - def sync_blocks(self, blockhash, num_blocks): - def blocks_requested(): - return all( - blockhash in node.block_request_map and node.block_request_map[blockhash] - for node in self.test_nodes - ) - - # --> error if not requested - if not wait_until(blocks_requested, attempts=20*num_blocks): - raise AssertionError("Not all nodes requested block") - - # Send getheaders message - [ c.cb.send_getheaders() for c in self.connections ] - - # Send ping and wait for response -- synchronization hack - [ c.cb.send_ping(self.ping_counter) for c in self.connections ] - self.wait_for_pings(self.ping_counter) - self.ping_counter += 1 - - # Analogous to sync_block (see above) - def sync_transaction(self, txhash, num_events): - # Wait for nodes to request transaction (50ms sleep * 20 tries * num_events) - def transaction_requested(): - return all( - txhash in node.tx_request_map and node.tx_request_map[txhash] - for node in self.test_nodes - ) - - # --> error if not requested - if not wait_until(transaction_requested, attempts=20*num_events): - raise AssertionError("Not all nodes requested transaction") - - # Get the mempool - [ c.cb.send_mempool() for c in self.connections ] - - # Send ping and wait for response -- synchronization hack - [ c.cb.send_ping(self.ping_counter) for c in self.connections ] - self.wait_for_pings(self.ping_counter) - self.ping_counter += 1 - - # Sort inv responses from each node - with mininode_lock: - [ c.cb.lastInv.sort() for c in self.connections ] - - # Verify that the tip of each connection all agree with each other, and - # with the expected outcome (if given) - def check_results(self, blockhash, outcome): - with mininode_lock: - for c in self.connections: - if outcome is None: - if c.cb.bestblockhash != self.connections[0].cb.bestblockhash: - return False - elif isinstance(outcome, RejectResult): # Check that block was rejected w/ code - if c.cb.bestblockhash == blockhash: - return False - if blockhash not in c.cb.block_reject_map: - logger.error('Block not in reject map: %064x' % (blockhash)) - return False - if not outcome.match(c.cb.block_reject_map[blockhash]): - logger.error('Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash)) - return False - elif ((c.cb.bestblockhash == blockhash) != outcome): - return False - return True - - # Either check that the mempools all agree with each other, or that - # txhash's presence in the mempool matches the outcome specified. - # This is somewhat of a strange comparison, in that we're either comparing - # a particular tx to an outcome, or the entire mempools altogether; - # perhaps it would be useful to add the ability to check explicitly that - # a particular tx's existence in the mempool is the same across all nodes. - def check_mempool(self, txhash, outcome): - with mininode_lock: - for c in self.connections: - if outcome is None: - # Make sure the mempools agree with each other - if c.cb.lastInv != self.connections[0].cb.lastInv: - return False - elif isinstance(outcome, RejectResult): # Check that tx was rejected w/ code - if txhash in c.cb.lastInv: - return False - if txhash not in c.cb.tx_reject_map: - logger.error('Tx not in reject map: %064x' % (txhash)) - return False - if not outcome.match(c.cb.tx_reject_map[txhash]): - logger.error('Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash)) - return False - elif ((txhash in c.cb.lastInv) != outcome): - return False - return True - - def run(self): - # Wait until verack is received - self.wait_for_verack() - - test_number = 1 - for test_instance in self.test_generator.get_tests(): - # We use these variables to keep track of the last block - # and last transaction in the tests, which are used - # if we're not syncing on every block or every tx. - [ block, block_outcome, tip ] = [ None, None, None ] - [ tx, tx_outcome ] = [ None, None ] - invqueue = [] - - for test_obj in test_instance.blocks_and_transactions: - b_or_t = test_obj[0] - outcome = test_obj[1] - # Determine if we're dealing with a block or tx - if isinstance(b_or_t, CBlock): # Block test runner - block = b_or_t - block_outcome = outcome - tip = block.sha256 - # each test_obj can have an optional third argument - # to specify the tip we should compare with - # (default is to use the block being tested) - if len(test_obj) >= 3: - tip = test_obj[2] - - # Add to shared block_store, set as current block - # If there was an open getdata request for the block - # previously, and we didn't have an entry in the - # block_store, then immediately deliver, because the - # node wouldn't send another getdata request while - # the earlier one is outstanding. - first_block_with_hash = True - if self.block_store.get(block.sha256) is not None: - first_block_with_hash = False - with mininode_lock: - self.block_store.add_block(block) - for c in self.connections: - if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[block.sha256] == True: - # There was a previous request for this block hash - # Most likely, we delivered a header for this block - # but never had the block to respond to the getdata - c.send_message(msg_block(block)) - else: - c.cb.block_request_map[block.sha256] = False - # Either send inv's to each node and sync, or add - # to invqueue for later inv'ing. - if (test_instance.sync_every_block): - # if we expect success, send inv and sync every block - # if we expect failure, just push the block and see what happens. - if outcome == True: - [ c.cb.send_inv(block) for c in self.connections ] - self.sync_blocks(block.sha256, 1) - else: - [ c.send_message(msg_block(block)) for c in self.connections ] - [ c.cb.send_ping(self.ping_counter) for c in self.connections ] - self.wait_for_pings(self.ping_counter) - self.ping_counter += 1 - if (not self.check_results(tip, outcome)): - raise AssertionError("Test failed at test %d" % test_number) - else: - invqueue.append(CInv(2, block.sha256)) - elif isinstance(b_or_t, CBlockHeader): - block_header = b_or_t - self.block_store.add_header(block_header) - [ c.cb.send_header(block_header) for c in self.connections ] - - else: # Tx test runner - assert(isinstance(b_or_t, CTransaction)) - tx = b_or_t - tx_outcome = outcome - # Add to shared tx store and clear map entry - with mininode_lock: - self.tx_store.add_transaction(tx) - for c in self.connections: - c.cb.tx_request_map[tx.sha256] = False - # Again, either inv to all nodes or save for later - if (test_instance.sync_every_tx): - [ c.cb.send_inv(tx) for c in self.connections ] - self.sync_transaction(tx.sha256, 1) - if (not self.check_mempool(tx.sha256, outcome)): - raise AssertionError("Test failed at test %d" % test_number) - else: - invqueue.append(CInv(1, tx.sha256)) - # Ensure we're not overflowing the inv queue - if len(invqueue) == MAX_INV_SZ: - [ c.send_message(msg_inv(invqueue)) for c in self.connections ] - invqueue = [] - - # Do final sync if we weren't syncing on every block or every tx. - if (not test_instance.sync_every_block and block is not None): - if len(invqueue) > 0: - [ c.send_message(msg_inv(invqueue)) for c in self.connections ] - invqueue = [] - self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions)) - if (not self.check_results(tip, block_outcome)): - raise AssertionError("Block test failed at test %d" % test_number) - if (not test_instance.sync_every_tx and tx is not None): - if len(invqueue) > 0: - [ c.send_message(msg_inv(invqueue)) for c in self.connections ] - invqueue = [] - self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions)) - if (not self.check_mempool(tx.sha256, tx_outcome)): - raise AssertionError("Mempool test failed at test %d" % test_number) - - logger.info("Test %d: PASS" % test_number) - test_number += 1 - - [ c.disconnect_node() for c in self.connections ] - self.wait_for_disconnections() - self.block_store.close() - self.tx_store.close() diff --git a/qa/rpc-tests/test_framework/coverage.py b/qa/rpc-tests/test_framework/coverage.py deleted file mode 100644 index 3f87ef91f6..0000000000 --- a/qa/rpc-tests/test_framework/coverage.py +++ /dev/null @@ -1,103 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2016 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Utilities for doing coverage analysis on the RPC interface. - -Provides a way to track which RPC commands are exercised during -testing. -""" - -import os - - -REFERENCE_FILENAME = 'rpc_interface.txt' - - -class AuthServiceProxyWrapper(object): - """ - An object that wraps AuthServiceProxy to record specific RPC calls. - - """ - def __init__(self, auth_service_proxy_instance, coverage_logfile=None): - """ - Kwargs: - auth_service_proxy_instance (AuthServiceProxy): the instance - being wrapped. - coverage_logfile (str): if specified, write each service_name - out to a file when called. - - """ - self.auth_service_proxy_instance = auth_service_proxy_instance - self.coverage_logfile = coverage_logfile - - def __getattr__(self, *args, **kwargs): - return_val = self.auth_service_proxy_instance.__getattr__( - *args, **kwargs) - - return AuthServiceProxyWrapper(return_val, self.coverage_logfile) - - def __call__(self, *args, **kwargs): - """ - Delegates to AuthServiceProxy, then writes the particular RPC method - called to a file. - - """ - return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs) - rpc_method = self.auth_service_proxy_instance._service_name - - if self.coverage_logfile: - with open(self.coverage_logfile, 'a+', encoding='utf8') as f: - f.write("%s\n" % rpc_method) - - return return_val - - @property - def url(self): - return self.auth_service_proxy_instance.url - - -def get_filename(dirname, n_node): - """ - Get a filename unique to the test process ID and node. - - This file will contain a list of RPC commands covered. - """ - pid = str(os.getpid()) - return os.path.join( - dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node))) - - -def write_all_rpc_commands(dirname, node): - """ - Write out a list of all RPC functions available in `bitcoin-cli` for - coverage comparison. This will only happen once per coverage - directory. - - Args: - dirname (str): temporary test dir - node (AuthServiceProxy): client - - Returns: - bool. if the RPC interface file was written. - - """ - filename = os.path.join(dirname, REFERENCE_FILENAME) - - if os.path.isfile(filename): - return False - - help_output = node.help().split('\n') - commands = set() - - for line in help_output: - line = line.strip() - - # Ignore blanks and headers - if line and not line.startswith('='): - commands.add("%s\n" % line.split()[0]) - - with open(filename, 'w', encoding='utf8') as f: - f.writelines(list(commands)) - - return True diff --git a/qa/rpc-tests/test_framework/key.py b/qa/rpc-tests/test_framework/key.py deleted file mode 100644 index 85a6158a2f..0000000000 --- a/qa/rpc-tests/test_framework/key.py +++ /dev/null @@ -1,232 +0,0 @@ -# Copyright (c) 2011 Sam Rushing -"""ECC secp256k1 OpenSSL wrapper. - -WARNING: This module does not mlock() secrets; your private keys may end up on -disk in swap! Use with caution! - -This file is modified from python-bitcoinlib. -""" - -import ctypes -import ctypes.util -import hashlib -import sys - -ssl = ctypes.cdll.LoadLibrary(ctypes.util.find_library ('ssl') or 'libeay32') - -ssl.BN_new.restype = ctypes.c_void_p -ssl.BN_new.argtypes = [] - -ssl.BN_bin2bn.restype = ctypes.c_void_p -ssl.BN_bin2bn.argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.c_void_p] - -ssl.BN_CTX_free.restype = None -ssl.BN_CTX_free.argtypes = [ctypes.c_void_p] - -ssl.BN_CTX_new.restype = ctypes.c_void_p -ssl.BN_CTX_new.argtypes = [] - -ssl.ECDH_compute_key.restype = ctypes.c_int -ssl.ECDH_compute_key.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p] - -ssl.ECDSA_sign.restype = ctypes.c_int -ssl.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] - -ssl.ECDSA_verify.restype = ctypes.c_int -ssl.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p] - -ssl.EC_KEY_free.restype = None -ssl.EC_KEY_free.argtypes = [ctypes.c_void_p] - -ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p -ssl.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int] - -ssl.EC_KEY_get0_group.restype = ctypes.c_void_p -ssl.EC_KEY_get0_group.argtypes = [ctypes.c_void_p] - -ssl.EC_KEY_get0_public_key.restype = ctypes.c_void_p -ssl.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p] - -ssl.EC_KEY_set_private_key.restype = ctypes.c_int -ssl.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p] - -ssl.EC_KEY_set_conv_form.restype = None -ssl.EC_KEY_set_conv_form.argtypes = [ctypes.c_void_p, ctypes.c_int] - -ssl.EC_KEY_set_public_key.restype = ctypes.c_int -ssl.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p] - -ssl.i2o_ECPublicKey.restype = ctypes.c_void_p -ssl.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p] - -ssl.EC_POINT_new.restype = ctypes.c_void_p -ssl.EC_POINT_new.argtypes = [ctypes.c_void_p] - -ssl.EC_POINT_free.restype = None -ssl.EC_POINT_free.argtypes = [ctypes.c_void_p] - -ssl.EC_POINT_mul.restype = ctypes.c_int -ssl.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] - -# this specifies the curve used with ECDSA. -NID_secp256k1 = 714 # from openssl/obj_mac.h - -SECP256K1_ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141 -SECP256K1_ORDER_HALF = SECP256K1_ORDER // 2 - -# Thx to Sam Devlin for the ctypes magic 64-bit fix. -def _check_result(val, func, args): - if val == 0: - raise ValueError - else: - return ctypes.c_void_p (val) - -ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p -ssl.EC_KEY_new_by_curve_name.errcheck = _check_result - -class CECKey(object): - """Wrapper around OpenSSL's EC_KEY""" - - POINT_CONVERSION_COMPRESSED = 2 - POINT_CONVERSION_UNCOMPRESSED = 4 - - def __init__(self): - self.k = ssl.EC_KEY_new_by_curve_name(NID_secp256k1) - - def __del__(self): - if ssl: - ssl.EC_KEY_free(self.k) - self.k = None - - def set_secretbytes(self, secret): - priv_key = ssl.BN_bin2bn(secret, 32, ssl.BN_new()) - group = ssl.EC_KEY_get0_group(self.k) - pub_key = ssl.EC_POINT_new(group) - ctx = ssl.BN_CTX_new() - if not ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx): - raise ValueError("Could not derive public key from the supplied secret.") - ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx) - ssl.EC_KEY_set_private_key(self.k, priv_key) - ssl.EC_KEY_set_public_key(self.k, pub_key) - ssl.EC_POINT_free(pub_key) - ssl.BN_CTX_free(ctx) - return self.k - - def set_privkey(self, key): - self.mb = ctypes.create_string_buffer(key) - return ssl.d2i_ECPrivateKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key)) - - def set_pubkey(self, key): - self.mb = ctypes.create_string_buffer(key) - return ssl.o2i_ECPublicKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key)) - - def get_privkey(self): - size = ssl.i2d_ECPrivateKey(self.k, 0) - mb_pri = ctypes.create_string_buffer(size) - ssl.i2d_ECPrivateKey(self.k, ctypes.byref(ctypes.pointer(mb_pri))) - return mb_pri.raw - - def get_pubkey(self): - size = ssl.i2o_ECPublicKey(self.k, 0) - mb = ctypes.create_string_buffer(size) - ssl.i2o_ECPublicKey(self.k, ctypes.byref(ctypes.pointer(mb))) - return mb.raw - - def get_raw_ecdh_key(self, other_pubkey): - ecdh_keybuffer = ctypes.create_string_buffer(32) - r = ssl.ECDH_compute_key(ctypes.pointer(ecdh_keybuffer), 32, - ssl.EC_KEY_get0_public_key(other_pubkey.k), - self.k, 0) - if r != 32: - raise Exception('CKey.get_ecdh_key(): ECDH_compute_key() failed') - return ecdh_keybuffer.raw - - def get_ecdh_key(self, other_pubkey, kdf=lambda k: hashlib.sha256(k).digest()): - # FIXME: be warned it's not clear what the kdf should be as a default - r = self.get_raw_ecdh_key(other_pubkey) - return kdf(r) - - def sign(self, hash, low_s = True): - # FIXME: need unit tests for below cases - if not isinstance(hash, bytes): - raise TypeError('Hash must be bytes instance; got %r' % hash.__class__) - if len(hash) != 32: - raise ValueError('Hash must be exactly 32 bytes long') - - sig_size0 = ctypes.c_uint32() - sig_size0.value = ssl.ECDSA_size(self.k) - mb_sig = ctypes.create_string_buffer(sig_size0.value) - result = ssl.ECDSA_sign(0, hash, len(hash), mb_sig, ctypes.byref(sig_size0), self.k) - assert 1 == result - assert mb_sig.raw[0] == 0x30 - assert mb_sig.raw[1] == sig_size0.value - 2 - total_size = mb_sig.raw[1] - assert mb_sig.raw[2] == 2 - r_size = mb_sig.raw[3] - assert mb_sig.raw[4 + r_size] == 2 - s_size = mb_sig.raw[5 + r_size] - s_value = int.from_bytes(mb_sig.raw[6+r_size:6+r_size+s_size], byteorder='big') - if (not low_s) or s_value <= SECP256K1_ORDER_HALF: - return mb_sig.raw[:sig_size0.value] - else: - low_s_value = SECP256K1_ORDER - s_value - low_s_bytes = (low_s_value).to_bytes(33, byteorder='big') - while len(low_s_bytes) > 1 and low_s_bytes[0] == 0 and low_s_bytes[1] < 0x80: - low_s_bytes = low_s_bytes[1:] - new_s_size = len(low_s_bytes) - new_total_size_byte = (total_size + new_s_size - s_size).to_bytes(1,byteorder='big') - new_s_size_byte = (new_s_size).to_bytes(1,byteorder='big') - return b'\x30' + new_total_size_byte + mb_sig.raw[2:5+r_size] + new_s_size_byte + low_s_bytes - - def verify(self, hash, sig): - """Verify a DER signature""" - return ssl.ECDSA_verify(0, hash, len(hash), sig, len(sig), self.k) == 1 - - def set_compressed(self, compressed): - if compressed: - form = self.POINT_CONVERSION_COMPRESSED - else: - form = self.POINT_CONVERSION_UNCOMPRESSED - ssl.EC_KEY_set_conv_form(self.k, form) - - -class CPubKey(bytes): - """An encapsulated public key - - Attributes: - - is_valid - Corresponds to CPubKey.IsValid() - is_fullyvalid - Corresponds to CPubKey.IsFullyValid() - is_compressed - Corresponds to CPubKey.IsCompressed() - """ - - def __new__(cls, buf, _cec_key=None): - self = super(CPubKey, cls).__new__(cls, buf) - if _cec_key is None: - _cec_key = CECKey() - self._cec_key = _cec_key - self.is_fullyvalid = _cec_key.set_pubkey(self) != 0 - return self - - @property - def is_valid(self): - return len(self) > 0 - - @property - def is_compressed(self): - return len(self) == 33 - - def verify(self, hash, sig): - return self._cec_key.verify(hash, sig) - - def __str__(self): - return repr(self) - - def __repr__(self): - # Always have represent as b'<secret>' so test cases don't have to - # change for py2/3 - if sys.version > '3': - return '%s(%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__()) - else: - return '%s(b%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__()) - diff --git a/qa/rpc-tests/test_framework/mininode.py b/qa/rpc-tests/test_framework/mininode.py deleted file mode 100755 index aace17a043..0000000000 --- a/qa/rpc-tests/test_framework/mininode.py +++ /dev/null @@ -1,1797 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2010 ArtForz -- public domain half-a-node -# Copyright (c) 2012 Jeff Garzik -# Copyright (c) 2010-2016 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Bitcoin P2P network half-a-node. - -This python code was modified from ArtForz' public domain half-a-node, as -found in the mini-node branch of http://github.com/jgarzik/pynode. - -NodeConn: an object which manages p2p connectivity to a bitcoin node -NodeConnCB: a base class that describes the interface for receiving - callbacks with network messages from a NodeConn -CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....: - data structures that should map to corresponding structures in - bitcoin/primitives -msg_block, msg_tx, msg_headers, etc.: - data structures that represent network messages -ser_*, deser_*: functions that handle serialization/deserialization -""" - -import struct -import socket -import asyncore -import time -import sys -import random -from .util import hex_str_to_bytes, bytes_to_hex_str -from io import BytesIO -from codecs import encode -import hashlib -from threading import RLock -from threading import Thread -import logging -import copy -from test_framework.siphash import siphash256 - -BIP0031_VERSION = 60000 -MY_VERSION = 70014 # past bip-31 for ping/pong -MY_SUBVERSION = b"/python-mininode-tester:0.0.3/" -MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37) - -MAX_INV_SZ = 50000 -MAX_BLOCK_BASE_SIZE = 1000000 - -COIN = 100000000 # 1 btc in satoshis - -NODE_NETWORK = (1 << 0) -NODE_GETUTXO = (1 << 1) -NODE_BLOOM = (1 << 2) -NODE_WITNESS = (1 << 3) - -logger = logging.getLogger("TestFramework.mininode") - -# Keep our own socket map for asyncore, so that we can track disconnects -# ourselves (to workaround an issue with closing an asyncore socket when -# using select) -mininode_socket_map = dict() - -# One lock for synchronizing all data access between the networking thread (see -# NetworkThread below) and the thread running the test logic. For simplicity, -# NodeConn acquires this lock whenever delivering a message to to a NodeConnCB, -# and whenever adding anything to the send buffer (in send_message()). This -# lock should be acquired in the thread running the test logic to synchronize -# access to any data shared with the NodeConnCB or NodeConn. -mininode_lock = RLock() - -# Serialization/deserialization tools -def sha256(s): - return hashlib.new('sha256', s).digest() - -def ripemd160(s): - return hashlib.new('ripemd160', s).digest() - -def hash256(s): - return sha256(sha256(s)) - -def ser_compact_size(l): - r = b"" - if l < 253: - r = struct.pack("B", l) - elif l < 0x10000: - r = struct.pack("<BH", 253, l) - elif l < 0x100000000: - r = struct.pack("<BI", 254, l) - else: - r = struct.pack("<BQ", 255, l) - return r - -def deser_compact_size(f): - nit = struct.unpack("<B", f.read(1))[0] - if nit == 253: - nit = struct.unpack("<H", f.read(2))[0] - elif nit == 254: - nit = struct.unpack("<I", f.read(4))[0] - elif nit == 255: - nit = struct.unpack("<Q", f.read(8))[0] - return nit - -def deser_string(f): - nit = deser_compact_size(f) - return f.read(nit) - -def ser_string(s): - return ser_compact_size(len(s)) + s - -def deser_uint256(f): - r = 0 - for i in range(8): - t = struct.unpack("<I", f.read(4))[0] - r += t << (i * 32) - return r - - -def ser_uint256(u): - rs = b"" - for i in range(8): - rs += struct.pack("<I", u & 0xFFFFFFFF) - u >>= 32 - return rs - - -def uint256_from_str(s): - r = 0 - t = struct.unpack("<IIIIIIII", s[:32]) - for i in range(8): - r += t[i] << (i * 32) - return r - - -def uint256_from_compact(c): - nbytes = (c >> 24) & 0xFF - v = (c & 0xFFFFFF) << (8 * (nbytes - 3)) - return v - - -def deser_vector(f, c): - nit = deser_compact_size(f) - r = [] - for i in range(nit): - t = c() - t.deserialize(f) - r.append(t) - return r - - -# ser_function_name: Allow for an alternate serialization function on the -# entries in the vector (we use this for serializing the vector of transactions -# for a witness block). -def ser_vector(l, ser_function_name=None): - r = ser_compact_size(len(l)) - for i in l: - if ser_function_name: - r += getattr(i, ser_function_name)() - else: - r += i.serialize() - return r - - -def deser_uint256_vector(f): - nit = deser_compact_size(f) - r = [] - for i in range(nit): - t = deser_uint256(f) - r.append(t) - return r - - -def ser_uint256_vector(l): - r = ser_compact_size(len(l)) - for i in l: - r += ser_uint256(i) - return r - - -def deser_string_vector(f): - nit = deser_compact_size(f) - r = [] - for i in range(nit): - t = deser_string(f) - r.append(t) - return r - - -def ser_string_vector(l): - r = ser_compact_size(len(l)) - for sv in l: - r += ser_string(sv) - return r - - -def deser_int_vector(f): - nit = deser_compact_size(f) - r = [] - for i in range(nit): - t = struct.unpack("<i", f.read(4))[0] - r.append(t) - return r - - -def ser_int_vector(l): - r = ser_compact_size(len(l)) - for i in l: - r += struct.pack("<i", i) - return r - -# Deserialize from a hex string representation (eg from RPC) -def FromHex(obj, hex_string): - obj.deserialize(BytesIO(hex_str_to_bytes(hex_string))) - return obj - -# Convert a binary-serializable object to hex (eg for submission via RPC) -def ToHex(obj): - return bytes_to_hex_str(obj.serialize()) - -# Objects that map to bitcoind objects, which can be serialized/deserialized - -class CAddress(object): - def __init__(self): - self.nServices = 1 - self.pchReserved = b"\x00" * 10 + b"\xff" * 2 - self.ip = "0.0.0.0" - self.port = 0 - - def deserialize(self, f): - self.nServices = struct.unpack("<Q", f.read(8))[0] - self.pchReserved = f.read(12) - self.ip = socket.inet_ntoa(f.read(4)) - self.port = struct.unpack(">H", f.read(2))[0] - - def serialize(self): - r = b"" - r += struct.pack("<Q", self.nServices) - r += self.pchReserved - r += socket.inet_aton(self.ip) - r += struct.pack(">H", self.port) - return r - - def __repr__(self): - return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices, - self.ip, self.port) - -MSG_WITNESS_FLAG = 1<<30 - -class CInv(object): - typemap = { - 0: "Error", - 1: "TX", - 2: "Block", - 1|MSG_WITNESS_FLAG: "WitnessTx", - 2|MSG_WITNESS_FLAG : "WitnessBlock", - 4: "CompactBlock" - } - - def __init__(self, t=0, h=0): - self.type = t - self.hash = h - - def deserialize(self, f): - self.type = struct.unpack("<i", f.read(4))[0] - self.hash = deser_uint256(f) - - def serialize(self): - r = b"" - r += struct.pack("<i", self.type) - r += ser_uint256(self.hash) - return r - - def __repr__(self): - return "CInv(type=%s hash=%064x)" \ - % (self.typemap[self.type], self.hash) - - -class CBlockLocator(object): - def __init__(self): - self.nVersion = MY_VERSION - self.vHave = [] - - def deserialize(self, f): - self.nVersion = struct.unpack("<i", f.read(4))[0] - self.vHave = deser_uint256_vector(f) - - def serialize(self): - r = b"" - r += struct.pack("<i", self.nVersion) - r += ser_uint256_vector(self.vHave) - return r - - def __repr__(self): - return "CBlockLocator(nVersion=%i vHave=%s)" \ - % (self.nVersion, repr(self.vHave)) - - -class COutPoint(object): - def __init__(self, hash=0, n=0): - self.hash = hash - self.n = n - - def deserialize(self, f): - self.hash = deser_uint256(f) - self.n = struct.unpack("<I", f.read(4))[0] - - def serialize(self): - r = b"" - r += ser_uint256(self.hash) - r += struct.pack("<I", self.n) - return r - - def __repr__(self): - return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n) - - -class CTxIn(object): - def __init__(self, outpoint=None, scriptSig=b"", nSequence=0): - if outpoint is None: - self.prevout = COutPoint() - else: - self.prevout = outpoint - self.scriptSig = scriptSig - self.nSequence = nSequence - - def deserialize(self, f): - self.prevout = COutPoint() - self.prevout.deserialize(f) - self.scriptSig = deser_string(f) - self.nSequence = struct.unpack("<I", f.read(4))[0] - - def serialize(self): - r = b"" - r += self.prevout.serialize() - r += ser_string(self.scriptSig) - r += struct.pack("<I", self.nSequence) - return r - - def __repr__(self): - return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \ - % (repr(self.prevout), bytes_to_hex_str(self.scriptSig), - self.nSequence) - - -class CTxOut(object): - def __init__(self, nValue=0, scriptPubKey=b""): - self.nValue = nValue - self.scriptPubKey = scriptPubKey - - def deserialize(self, f): - self.nValue = struct.unpack("<q", f.read(8))[0] - self.scriptPubKey = deser_string(f) - - def serialize(self): - r = b"" - r += struct.pack("<q", self.nValue) - r += ser_string(self.scriptPubKey) - return r - - def __repr__(self): - return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \ - % (self.nValue // COIN, self.nValue % COIN, - bytes_to_hex_str(self.scriptPubKey)) - - -class CScriptWitness(object): - def __init__(self): - # stack is a vector of strings - self.stack = [] - - def __repr__(self): - return "CScriptWitness(%s)" % \ - (",".join([bytes_to_hex_str(x) for x in self.stack])) - - def is_null(self): - if self.stack: - return False - return True - - -class CTxInWitness(object): - def __init__(self): - self.scriptWitness = CScriptWitness() - - def deserialize(self, f): - self.scriptWitness.stack = deser_string_vector(f) - - def serialize(self): - return ser_string_vector(self.scriptWitness.stack) - - def __repr__(self): - return repr(self.scriptWitness) - - def is_null(self): - return self.scriptWitness.is_null() - - -class CTxWitness(object): - def __init__(self): - self.vtxinwit = [] - - def deserialize(self, f): - for i in range(len(self.vtxinwit)): - self.vtxinwit[i].deserialize(f) - - def serialize(self): - r = b"" - # This is different than the usual vector serialization -- - # we omit the length of the vector, which is required to be - # the same length as the transaction's vin vector. - for x in self.vtxinwit: - r += x.serialize() - return r - - def __repr__(self): - return "CTxWitness(%s)" % \ - (';'.join([repr(x) for x in self.vtxinwit])) - - def is_null(self): - for x in self.vtxinwit: - if not x.is_null(): - return False - return True - - -class CTransaction(object): - def __init__(self, tx=None): - if tx is None: - self.nVersion = 1 - self.vin = [] - self.vout = [] - self.wit = CTxWitness() - self.nLockTime = 0 - self.sha256 = None - self.hash = None - else: - self.nVersion = tx.nVersion - self.vin = copy.deepcopy(tx.vin) - self.vout = copy.deepcopy(tx.vout) - self.nLockTime = tx.nLockTime - self.sha256 = tx.sha256 - self.hash = tx.hash - self.wit = copy.deepcopy(tx.wit) - - def deserialize(self, f): - self.nVersion = struct.unpack("<i", f.read(4))[0] - self.vin = deser_vector(f, CTxIn) - flags = 0 - if len(self.vin) == 0: - flags = struct.unpack("<B", f.read(1))[0] - # Not sure why flags can't be zero, but this - # matches the implementation in bitcoind - if (flags != 0): - self.vin = deser_vector(f, CTxIn) - self.vout = deser_vector(f, CTxOut) - else: - self.vout = deser_vector(f, CTxOut) - if flags != 0: - self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))] - self.wit.deserialize(f) - self.nLockTime = struct.unpack("<I", f.read(4))[0] - self.sha256 = None - self.hash = None - - def serialize_without_witness(self): - r = b"" - r += struct.pack("<i", self.nVersion) - r += ser_vector(self.vin) - r += ser_vector(self.vout) - r += struct.pack("<I", self.nLockTime) - return r - - # Only serialize with witness when explicitly called for - def serialize_with_witness(self): - flags = 0 - if not self.wit.is_null(): - flags |= 1 - r = b"" - r += struct.pack("<i", self.nVersion) - if flags: - dummy = [] - r += ser_vector(dummy) - r += struct.pack("<B", flags) - r += ser_vector(self.vin) - r += ser_vector(self.vout) - if flags & 1: - if (len(self.wit.vtxinwit) != len(self.vin)): - # vtxinwit must have the same length as vin - self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)] - for i in range(len(self.wit.vtxinwit), len(self.vin)): - self.wit.vtxinwit.append(CTxInWitness()) - r += self.wit.serialize() - r += struct.pack("<I", self.nLockTime) - return r - - # Regular serialization is without witness -- must explicitly - # call serialize_with_witness to include witness data. - def serialize(self): - return self.serialize_without_witness() - - # Recalculate the txid (transaction hash without witness) - def rehash(self): - self.sha256 = None - self.calc_sha256() - - # We will only cache the serialization without witness in - # self.sha256 and self.hash -- those are expected to be the txid. - def calc_sha256(self, with_witness=False): - if with_witness: - # Don't cache the result, just return it - return uint256_from_str(hash256(self.serialize_with_witness())) - - if self.sha256 is None: - self.sha256 = uint256_from_str(hash256(self.serialize_without_witness())) - self.hash = encode(hash256(self.serialize())[::-1], 'hex_codec').decode('ascii') - - def is_valid(self): - self.calc_sha256() - for tout in self.vout: - if tout.nValue < 0 or tout.nValue > 21000000 * COIN: - return False - return True - - def __repr__(self): - return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \ - % (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime) - - -class CBlockHeader(object): - def __init__(self, header=None): - if header is None: - self.set_null() - else: - self.nVersion = header.nVersion - self.hashPrevBlock = header.hashPrevBlock - self.hashMerkleRoot = header.hashMerkleRoot - self.nTime = header.nTime - self.nBits = header.nBits - self.nNonce = header.nNonce - self.sha256 = header.sha256 - self.hash = header.hash - self.calc_sha256() - - def set_null(self): - self.nVersion = 1 - self.hashPrevBlock = 0 - self.hashMerkleRoot = 0 - self.nTime = 0 - self.nBits = 0 - self.nNonce = 0 - self.sha256 = None - self.hash = None - - def deserialize(self, f): - self.nVersion = struct.unpack("<i", f.read(4))[0] - self.hashPrevBlock = deser_uint256(f) - self.hashMerkleRoot = deser_uint256(f) - self.nTime = struct.unpack("<I", f.read(4))[0] - self.nBits = struct.unpack("<I", f.read(4))[0] - self.nNonce = struct.unpack("<I", f.read(4))[0] - self.sha256 = None - self.hash = None - - def serialize(self): - r = b"" - r += struct.pack("<i", self.nVersion) - r += ser_uint256(self.hashPrevBlock) - r += ser_uint256(self.hashMerkleRoot) - r += struct.pack("<I", self.nTime) - r += struct.pack("<I", self.nBits) - r += struct.pack("<I", self.nNonce) - return r - - def calc_sha256(self): - if self.sha256 is None: - r = b"" - r += struct.pack("<i", self.nVersion) - r += ser_uint256(self.hashPrevBlock) - r += ser_uint256(self.hashMerkleRoot) - r += struct.pack("<I", self.nTime) - r += struct.pack("<I", self.nBits) - r += struct.pack("<I", self.nNonce) - self.sha256 = uint256_from_str(hash256(r)) - self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii') - - def rehash(self): - self.sha256 = None - self.calc_sha256() - return self.sha256 - - def __repr__(self): - return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \ - % (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, - time.ctime(self.nTime), self.nBits, self.nNonce) - - -class CBlock(CBlockHeader): - def __init__(self, header=None): - super(CBlock, self).__init__(header) - self.vtx = [] - - def deserialize(self, f): - super(CBlock, self).deserialize(f) - self.vtx = deser_vector(f, CTransaction) - - def serialize(self, with_witness=False): - r = b"" - r += super(CBlock, self).serialize() - if with_witness: - r += ser_vector(self.vtx, "serialize_with_witness") - else: - r += ser_vector(self.vtx) - return r - - # Calculate the merkle root given a vector of transaction hashes - def get_merkle_root(self, hashes): - while len(hashes) > 1: - newhashes = [] - for i in range(0, len(hashes), 2): - i2 = min(i+1, len(hashes)-1) - newhashes.append(hash256(hashes[i] + hashes[i2])) - hashes = newhashes - return uint256_from_str(hashes[0]) - - def calc_merkle_root(self): - hashes = [] - for tx in self.vtx: - tx.calc_sha256() - hashes.append(ser_uint256(tx.sha256)) - return self.get_merkle_root(hashes) - - def calc_witness_merkle_root(self): - # For witness root purposes, the hash of the - # coinbase, with witness, is defined to be 0...0 - hashes = [ser_uint256(0)] - - for tx in self.vtx[1:]: - # Calculate the hashes with witness data - hashes.append(ser_uint256(tx.calc_sha256(True))) - - return self.get_merkle_root(hashes) - - def is_valid(self): - self.calc_sha256() - target = uint256_from_compact(self.nBits) - if self.sha256 > target: - return False - for tx in self.vtx: - if not tx.is_valid(): - return False - if self.calc_merkle_root() != self.hashMerkleRoot: - return False - return True - - def solve(self): - self.rehash() - target = uint256_from_compact(self.nBits) - while self.sha256 > target: - self.nNonce += 1 - self.rehash() - - def __repr__(self): - return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \ - % (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, - time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx)) - - -class CUnsignedAlert(object): - def __init__(self): - self.nVersion = 1 - self.nRelayUntil = 0 - self.nExpiration = 0 - self.nID = 0 - self.nCancel = 0 - self.setCancel = [] - self.nMinVer = 0 - self.nMaxVer = 0 - self.setSubVer = [] - self.nPriority = 0 - self.strComment = b"" - self.strStatusBar = b"" - self.strReserved = b"" - - def deserialize(self, f): - self.nVersion = struct.unpack("<i", f.read(4))[0] - self.nRelayUntil = struct.unpack("<q", f.read(8))[0] - self.nExpiration = struct.unpack("<q", f.read(8))[0] - self.nID = struct.unpack("<i", f.read(4))[0] - self.nCancel = struct.unpack("<i", f.read(4))[0] - self.setCancel = deser_int_vector(f) - self.nMinVer = struct.unpack("<i", f.read(4))[0] - self.nMaxVer = struct.unpack("<i", f.read(4))[0] - self.setSubVer = deser_string_vector(f) - self.nPriority = struct.unpack("<i", f.read(4))[0] - self.strComment = deser_string(f) - self.strStatusBar = deser_string(f) - self.strReserved = deser_string(f) - - def serialize(self): - r = b"" - r += struct.pack("<i", self.nVersion) - r += struct.pack("<q", self.nRelayUntil) - r += struct.pack("<q", self.nExpiration) - r += struct.pack("<i", self.nID) - r += struct.pack("<i", self.nCancel) - r += ser_int_vector(self.setCancel) - r += struct.pack("<i", self.nMinVer) - r += struct.pack("<i", self.nMaxVer) - r += ser_string_vector(self.setSubVer) - r += struct.pack("<i", self.nPriority) - r += ser_string(self.strComment) - r += ser_string(self.strStatusBar) - r += ser_string(self.strReserved) - return r - - def __repr__(self): - return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \ - % (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID, - self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority, - self.strComment, self.strStatusBar, self.strReserved) - - -class CAlert(object): - def __init__(self): - self.vchMsg = b"" - self.vchSig = b"" - - def deserialize(self, f): - self.vchMsg = deser_string(f) - self.vchSig = deser_string(f) - - def serialize(self): - r = b"" - r += ser_string(self.vchMsg) - r += ser_string(self.vchSig) - return r - - def __repr__(self): - return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \ - % (len(self.vchMsg), len(self.vchSig)) - - -class PrefilledTransaction(object): - def __init__(self, index=0, tx = None): - self.index = index - self.tx = tx - - def deserialize(self, f): - self.index = deser_compact_size(f) - self.tx = CTransaction() - self.tx.deserialize(f) - - def serialize(self, with_witness=False): - r = b"" - r += ser_compact_size(self.index) - if with_witness: - r += self.tx.serialize_with_witness() - else: - r += self.tx.serialize_without_witness() - return r - - def serialize_with_witness(self): - return self.serialize(with_witness=True) - - def __repr__(self): - return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx)) - -# This is what we send on the wire, in a cmpctblock message. -class P2PHeaderAndShortIDs(object): - def __init__(self): - self.header = CBlockHeader() - self.nonce = 0 - self.shortids_length = 0 - self.shortids = [] - self.prefilled_txn_length = 0 - self.prefilled_txn = [] - - def deserialize(self, f): - self.header.deserialize(f) - self.nonce = struct.unpack("<Q", f.read(8))[0] - self.shortids_length = deser_compact_size(f) - for i in range(self.shortids_length): - # shortids are defined to be 6 bytes in the spec, so append - # two zero bytes and read it in as an 8-byte number - self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0]) - self.prefilled_txn = deser_vector(f, PrefilledTransaction) - self.prefilled_txn_length = len(self.prefilled_txn) - - # When using version 2 compact blocks, we must serialize with_witness. - def serialize(self, with_witness=False): - r = b"" - r += self.header.serialize() - r += struct.pack("<Q", self.nonce) - r += ser_compact_size(self.shortids_length) - for x in self.shortids: - # We only want the first 6 bytes - r += struct.pack("<Q", x)[0:6] - if with_witness: - r += ser_vector(self.prefilled_txn, "serialize_with_witness") - else: - r += ser_vector(self.prefilled_txn) - return r - - def __repr__(self): - return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn)) - -# P2P version of the above that will use witness serialization (for compact -# block version 2) -class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs): - def serialize(self): - return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True) - -# Calculate the BIP 152-compact blocks shortid for a given transaction hash -def calculate_shortid(k0, k1, tx_hash): - expected_shortid = siphash256(k0, k1, tx_hash) - expected_shortid &= 0x0000ffffffffffff - return expected_shortid - -# This version gets rid of the array lengths, and reinterprets the differential -# encoding into indices that can be used for lookup. -class HeaderAndShortIDs(object): - def __init__(self, p2pheaders_and_shortids = None): - self.header = CBlockHeader() - self.nonce = 0 - self.shortids = [] - self.prefilled_txn = [] - self.use_witness = False - - if p2pheaders_and_shortids != None: - self.header = p2pheaders_and_shortids.header - self.nonce = p2pheaders_and_shortids.nonce - self.shortids = p2pheaders_and_shortids.shortids - last_index = -1 - for x in p2pheaders_and_shortids.prefilled_txn: - self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx)) - last_index = self.prefilled_txn[-1].index - - def to_p2p(self): - if self.use_witness: - ret = P2PHeaderAndShortWitnessIDs() - else: - ret = P2PHeaderAndShortIDs() - ret.header = self.header - ret.nonce = self.nonce - ret.shortids_length = len(self.shortids) - ret.shortids = self.shortids - ret.prefilled_txn_length = len(self.prefilled_txn) - ret.prefilled_txn = [] - last_index = -1 - for x in self.prefilled_txn: - ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx)) - last_index = x.index - return ret - - def get_siphash_keys(self): - header_nonce = self.header.serialize() - header_nonce += struct.pack("<Q", self.nonce) - hash_header_nonce_as_str = sha256(header_nonce) - key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0] - key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0] - return [ key0, key1 ] - - # Version 2 compact blocks use wtxid in shortids (rather than txid) - def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False): - self.header = CBlockHeader(block) - self.nonce = nonce - self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ] - self.shortids = [] - self.use_witness = use_witness - [k0, k1] = self.get_siphash_keys() - for i in range(len(block.vtx)): - if i not in prefill_list: - tx_hash = block.vtx[i].sha256 - if use_witness: - tx_hash = block.vtx[i].calc_sha256(with_witness=True) - self.shortids.append(calculate_shortid(k0, k1, tx_hash)) - - def __repr__(self): - return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn)) - - -class BlockTransactionsRequest(object): - - def __init__(self, blockhash=0, indexes = None): - self.blockhash = blockhash - self.indexes = indexes if indexes != None else [] - - def deserialize(self, f): - self.blockhash = deser_uint256(f) - indexes_length = deser_compact_size(f) - for i in range(indexes_length): - self.indexes.append(deser_compact_size(f)) - - def serialize(self): - r = b"" - r += ser_uint256(self.blockhash) - r += ser_compact_size(len(self.indexes)) - for x in self.indexes: - r += ser_compact_size(x) - return r - - # helper to set the differentially encoded indexes from absolute ones - def from_absolute(self, absolute_indexes): - self.indexes = [] - last_index = -1 - for x in absolute_indexes: - self.indexes.append(x-last_index-1) - last_index = x - - def to_absolute(self): - absolute_indexes = [] - last_index = -1 - for x in self.indexes: - absolute_indexes.append(x+last_index+1) - last_index = absolute_indexes[-1] - return absolute_indexes - - def __repr__(self): - return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes)) - - -class BlockTransactions(object): - - def __init__(self, blockhash=0, transactions = None): - self.blockhash = blockhash - self.transactions = transactions if transactions != None else [] - - def deserialize(self, f): - self.blockhash = deser_uint256(f) - self.transactions = deser_vector(f, CTransaction) - - def serialize(self, with_witness=False): - r = b"" - r += ser_uint256(self.blockhash) - if with_witness: - r += ser_vector(self.transactions, "serialize_with_witness") - else: - r += ser_vector(self.transactions) - return r - - def __repr__(self): - return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions)) - - -# Objects that correspond to messages on the wire -class msg_version(object): - command = b"version" - - def __init__(self): - self.nVersion = MY_VERSION - self.nServices = 1 - self.nTime = int(time.time()) - self.addrTo = CAddress() - self.addrFrom = CAddress() - self.nNonce = random.getrandbits(64) - self.strSubVer = MY_SUBVERSION - self.nStartingHeight = -1 - self.nRelay = MY_RELAY - - def deserialize(self, f): - self.nVersion = struct.unpack("<i", f.read(4))[0] - if self.nVersion == 10300: - self.nVersion = 300 - self.nServices = struct.unpack("<Q", f.read(8))[0] - self.nTime = struct.unpack("<q", f.read(8))[0] - self.addrTo = CAddress() - self.addrTo.deserialize(f) - - if self.nVersion >= 106: - self.addrFrom = CAddress() - self.addrFrom.deserialize(f) - self.nNonce = struct.unpack("<Q", f.read(8))[0] - self.strSubVer = deser_string(f) - else: - self.addrFrom = None - self.nNonce = None - self.strSubVer = None - self.nStartingHeight = None - - if self.nVersion >= 209: - self.nStartingHeight = struct.unpack("<i", f.read(4))[0] - else: - self.nStartingHeight = None - - if self.nVersion >= 70001: - # Relay field is optional for version 70001 onwards - try: - self.nRelay = struct.unpack("<b", f.read(1))[0] - except: - self.nRelay = 0 - else: - self.nRelay = 0 - - def serialize(self): - r = b"" - r += struct.pack("<i", self.nVersion) - r += struct.pack("<Q", self.nServices) - r += struct.pack("<q", self.nTime) - r += self.addrTo.serialize() - r += self.addrFrom.serialize() - r += struct.pack("<Q", self.nNonce) - r += ser_string(self.strSubVer) - r += struct.pack("<i", self.nStartingHeight) - r += struct.pack("<b", self.nRelay) - return r - - def __repr__(self): - return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \ - % (self.nVersion, self.nServices, time.ctime(self.nTime), - repr(self.addrTo), repr(self.addrFrom), self.nNonce, - self.strSubVer, self.nStartingHeight, self.nRelay) - - -class msg_verack(object): - command = b"verack" - - def __init__(self): - pass - - def deserialize(self, f): - pass - - def serialize(self): - return b"" - - def __repr__(self): - return "msg_verack()" - - -class msg_addr(object): - command = b"addr" - - def __init__(self): - self.addrs = [] - - def deserialize(self, f): - self.addrs = deser_vector(f, CAddress) - - def serialize(self): - return ser_vector(self.addrs) - - def __repr__(self): - return "msg_addr(addrs=%s)" % (repr(self.addrs)) - - -class msg_alert(object): - command = b"alert" - - def __init__(self): - self.alert = CAlert() - - def deserialize(self, f): - self.alert = CAlert() - self.alert.deserialize(f) - - def serialize(self): - r = b"" - r += self.alert.serialize() - return r - - def __repr__(self): - return "msg_alert(alert=%s)" % (repr(self.alert), ) - - -class msg_inv(object): - command = b"inv" - - def __init__(self, inv=None): - if inv is None: - self.inv = [] - else: - self.inv = inv - - def deserialize(self, f): - self.inv = deser_vector(f, CInv) - - def serialize(self): - return ser_vector(self.inv) - - def __repr__(self): - return "msg_inv(inv=%s)" % (repr(self.inv)) - - -class msg_getdata(object): - command = b"getdata" - - def __init__(self, inv=None): - self.inv = inv if inv != None else [] - - def deserialize(self, f): - self.inv = deser_vector(f, CInv) - - def serialize(self): - return ser_vector(self.inv) - - def __repr__(self): - return "msg_getdata(inv=%s)" % (repr(self.inv)) - - -class msg_getblocks(object): - command = b"getblocks" - - def __init__(self): - self.locator = CBlockLocator() - self.hashstop = 0 - - def deserialize(self, f): - self.locator = CBlockLocator() - self.locator.deserialize(f) - self.hashstop = deser_uint256(f) - - def serialize(self): - r = b"" - r += self.locator.serialize() - r += ser_uint256(self.hashstop) - return r - - def __repr__(self): - return "msg_getblocks(locator=%s hashstop=%064x)" \ - % (repr(self.locator), self.hashstop) - - -class msg_tx(object): - command = b"tx" - - def __init__(self, tx=CTransaction()): - self.tx = tx - - def deserialize(self, f): - self.tx.deserialize(f) - - def serialize(self): - return self.tx.serialize_without_witness() - - def __repr__(self): - return "msg_tx(tx=%s)" % (repr(self.tx)) - -class msg_witness_tx(msg_tx): - - def serialize(self): - return self.tx.serialize_with_witness() - - -class msg_block(object): - command = b"block" - - def __init__(self, block=None): - if block is None: - self.block = CBlock() - else: - self.block = block - - def deserialize(self, f): - self.block.deserialize(f) - - def serialize(self): - return self.block.serialize() - - def __repr__(self): - return "msg_block(block=%s)" % (repr(self.block)) - -# for cases where a user needs tighter control over what is sent over the wire -# note that the user must supply the name of the command, and the data -class msg_generic(object): - def __init__(self, command, data=None): - self.command = command - self.data = data - - def serialize(self): - return self.data - - def __repr__(self): - return "msg_generic()" - -class msg_witness_block(msg_block): - - def serialize(self): - r = self.block.serialize(with_witness=True) - return r - -class msg_getaddr(object): - command = b"getaddr" - - def __init__(self): - pass - - def deserialize(self, f): - pass - - def serialize(self): - return b"" - - def __repr__(self): - return "msg_getaddr()" - - -class msg_ping_prebip31(object): - command = b"ping" - - def __init__(self): - pass - - def deserialize(self, f): - pass - - def serialize(self): - return b"" - - def __repr__(self): - return "msg_ping() (pre-bip31)" - - -class msg_ping(object): - command = b"ping" - - def __init__(self, nonce=0): - self.nonce = nonce - - def deserialize(self, f): - self.nonce = struct.unpack("<Q", f.read(8))[0] - - def serialize(self): - r = b"" - r += struct.pack("<Q", self.nonce) - return r - - def __repr__(self): - return "msg_ping(nonce=%08x)" % self.nonce - - -class msg_pong(object): - command = b"pong" - - def __init__(self, nonce=0): - self.nonce = nonce - - def deserialize(self, f): - self.nonce = struct.unpack("<Q", f.read(8))[0] - - def serialize(self): - r = b"" - r += struct.pack("<Q", self.nonce) - return r - - def __repr__(self): - return "msg_pong(nonce=%08x)" % self.nonce - - -class msg_mempool(object): - command = b"mempool" - - def __init__(self): - pass - - def deserialize(self, f): - pass - - def serialize(self): - return b"" - - def __repr__(self): - return "msg_mempool()" - -class msg_sendheaders(object): - command = b"sendheaders" - - def __init__(self): - pass - - def deserialize(self, f): - pass - - def serialize(self): - return b"" - - def __repr__(self): - return "msg_sendheaders()" - - -# getheaders message has -# number of entries -# vector of hashes -# hash_stop (hash of last desired block header, 0 to get as many as possible) -class msg_getheaders(object): - command = b"getheaders" - - def __init__(self): - self.locator = CBlockLocator() - self.hashstop = 0 - - def deserialize(self, f): - self.locator = CBlockLocator() - self.locator.deserialize(f) - self.hashstop = deser_uint256(f) - - def serialize(self): - r = b"" - r += self.locator.serialize() - r += ser_uint256(self.hashstop) - return r - - def __repr__(self): - return "msg_getheaders(locator=%s, stop=%064x)" \ - % (repr(self.locator), self.hashstop) - - -# headers message has -# <count> <vector of block headers> -class msg_headers(object): - command = b"headers" - - def __init__(self): - self.headers = [] - - def deserialize(self, f): - # comment in bitcoind indicates these should be deserialized as blocks - blocks = deser_vector(f, CBlock) - for x in blocks: - self.headers.append(CBlockHeader(x)) - - def serialize(self): - blocks = [CBlock(x) for x in self.headers] - return ser_vector(blocks) - - def __repr__(self): - return "msg_headers(headers=%s)" % repr(self.headers) - - -class msg_reject(object): - command = b"reject" - REJECT_MALFORMED = 1 - - def __init__(self): - self.message = b"" - self.code = 0 - self.reason = b"" - self.data = 0 - - def deserialize(self, f): - self.message = deser_string(f) - self.code = struct.unpack("<B", f.read(1))[0] - self.reason = deser_string(f) - if (self.code != self.REJECT_MALFORMED and - (self.message == b"block" or self.message == b"tx")): - self.data = deser_uint256(f) - - def serialize(self): - r = ser_string(self.message) - r += struct.pack("<B", self.code) - r += ser_string(self.reason) - if (self.code != self.REJECT_MALFORMED and - (self.message == b"block" or self.message == b"tx")): - r += ser_uint256(self.data) - return r - - def __repr__(self): - return "msg_reject: %s %d %s [%064x]" \ - % (self.message, self.code, self.reason, self.data) - -# Helper function -def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf')): - attempt = 0 - elapsed = 0 - - while attempt < attempts and elapsed < timeout: - with mininode_lock: - if predicate(): - return True - attempt += 1 - elapsed += 0.05 - time.sleep(0.05) - - return False - -class msg_feefilter(object): - command = b"feefilter" - - def __init__(self, feerate=0): - self.feerate = feerate - - def deserialize(self, f): - self.feerate = struct.unpack("<Q", f.read(8))[0] - - def serialize(self): - r = b"" - r += struct.pack("<Q", self.feerate) - return r - - def __repr__(self): - return "msg_feefilter(feerate=%08x)" % self.feerate - -class msg_sendcmpct(object): - command = b"sendcmpct" - - def __init__(self): - self.announce = False - self.version = 1 - - def deserialize(self, f): - self.announce = struct.unpack("<?", f.read(1))[0] - self.version = struct.unpack("<Q", f.read(8))[0] - - def serialize(self): - r = b"" - r += struct.pack("<?", self.announce) - r += struct.pack("<Q", self.version) - return r - - def __repr__(self): - return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version) - -class msg_cmpctblock(object): - command = b"cmpctblock" - - def __init__(self, header_and_shortids = None): - self.header_and_shortids = header_and_shortids - - def deserialize(self, f): - self.header_and_shortids = P2PHeaderAndShortIDs() - self.header_and_shortids.deserialize(f) - - def serialize(self): - r = b"" - r += self.header_and_shortids.serialize() - return r - - def __repr__(self): - return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids) - -class msg_getblocktxn(object): - command = b"getblocktxn" - - def __init__(self): - self.block_txn_request = None - - def deserialize(self, f): - self.block_txn_request = BlockTransactionsRequest() - self.block_txn_request.deserialize(f) - - def serialize(self): - r = b"" - r += self.block_txn_request.serialize() - return r - - def __repr__(self): - return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request)) - -class msg_blocktxn(object): - command = b"blocktxn" - - def __init__(self): - self.block_transactions = BlockTransactions() - - def deserialize(self, f): - self.block_transactions.deserialize(f) - - def serialize(self): - r = b"" - r += self.block_transactions.serialize() - return r - - def __repr__(self): - return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions)) - -class msg_witness_blocktxn(msg_blocktxn): - def serialize(self): - r = b"" - r += self.block_transactions.serialize(with_witness=True) - return r - -# This is what a callback should look like for NodeConn -# Reimplement the on_* functions to provide handling for events -class NodeConnCB(object): - def __init__(self): - self.verack_received = False - # deliver_sleep_time is helpful for debugging race conditions in p2p - # tests; it causes message delivery to sleep for the specified time - # before acquiring the global lock and delivering the next message. - self.deliver_sleep_time = None - # Remember the services our peer has advertised - self.peer_services = None - - def set_deliver_sleep_time(self, value): - with mininode_lock: - self.deliver_sleep_time = value - - def get_deliver_sleep_time(self): - with mininode_lock: - return self.deliver_sleep_time - - # Spin until verack message is received from the node. - # Tests may want to use this as a signal that the test can begin. - # This can be called from the testing thread, so it needs to acquire the - # global lock. - def wait_for_verack(self): - while True: - with mininode_lock: - if self.verack_received: - return - time.sleep(0.05) - - def deliver(self, conn, message): - deliver_sleep = self.get_deliver_sleep_time() - if deliver_sleep is not None: - time.sleep(deliver_sleep) - with mininode_lock: - try: - getattr(self, 'on_' + message.command.decode('ascii'))(conn, message) - except: - logger.exception("ERROR delivering %s" % repr(message)) - - def on_version(self, conn, message): - if message.nVersion >= 209: - conn.send_message(msg_verack()) - conn.ver_send = min(MY_VERSION, message.nVersion) - if message.nVersion < 209: - conn.ver_recv = conn.ver_send - conn.nServices = message.nServices - - def on_verack(self, conn, message): - conn.ver_recv = conn.ver_send - self.verack_received = True - - def on_inv(self, conn, message): - want = msg_getdata() - for i in message.inv: - if i.type != 0: - want.inv.append(i) - if len(want.inv): - conn.send_message(want) - - def on_addr(self, conn, message): pass - def on_alert(self, conn, message): pass - def on_getdata(self, conn, message): pass - def on_getblocks(self, conn, message): pass - def on_tx(self, conn, message): pass - def on_block(self, conn, message): pass - def on_getaddr(self, conn, message): pass - def on_headers(self, conn, message): pass - def on_getheaders(self, conn, message): pass - def on_ping(self, conn, message): - if conn.ver_send > BIP0031_VERSION: - conn.send_message(msg_pong(message.nonce)) - def on_reject(self, conn, message): pass - def on_open(self, conn): pass - def on_close(self, conn): pass - def on_mempool(self, conn): pass - def on_pong(self, conn, message): pass - def on_feefilter(self, conn, message): pass - def on_sendheaders(self, conn, message): pass - def on_sendcmpct(self, conn, message): pass - def on_cmpctblock(self, conn, message): pass - def on_getblocktxn(self, conn, message): pass - def on_blocktxn(self, conn, message): pass - -# More useful callbacks and functions for NodeConnCB's which have a single NodeConn -class SingleNodeConnCB(NodeConnCB): - def __init__(self): - NodeConnCB.__init__(self) - self.connection = None - self.ping_counter = 1 - self.last_pong = msg_pong() - - def add_connection(self, conn): - self.connection = conn - - # Wrapper for the NodeConn's send_message function - def send_message(self, message): - self.connection.send_message(message) - - def send_and_ping(self, message): - self.send_message(message) - self.sync_with_ping() - - def on_pong(self, conn, message): - self.last_pong = message - - # Sync up with the node - def sync_with_ping(self, timeout=30): - def received_pong(): - return (self.last_pong.nonce == self.ping_counter) - self.send_message(msg_ping(nonce=self.ping_counter)) - success = wait_until(received_pong, timeout=timeout) - self.ping_counter += 1 - return success - -# The actual NodeConn class -# This class provides an interface for a p2p connection to a specified node -class NodeConn(asyncore.dispatcher): - messagemap = { - b"version": msg_version, - b"verack": msg_verack, - b"addr": msg_addr, - b"alert": msg_alert, - b"inv": msg_inv, - b"getdata": msg_getdata, - b"getblocks": msg_getblocks, - b"tx": msg_tx, - b"block": msg_block, - b"getaddr": msg_getaddr, - b"ping": msg_ping, - b"pong": msg_pong, - b"headers": msg_headers, - b"getheaders": msg_getheaders, - b"reject": msg_reject, - b"mempool": msg_mempool, - b"feefilter": msg_feefilter, - b"sendheaders": msg_sendheaders, - b"sendcmpct": msg_sendcmpct, - b"cmpctblock": msg_cmpctblock, - b"getblocktxn": msg_getblocktxn, - b"blocktxn": msg_blocktxn - } - MAGIC_BYTES = { - "mainnet": b"\xf9\xbe\xb4\xd9", # mainnet - "testnet3": b"\x0b\x11\x09\x07", # testnet3 - "regtest": b"\xfa\xbf\xb5\xda", # regtest - } - - def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK, send_version=True): - asyncore.dispatcher.__init__(self, map=mininode_socket_map) - self.dstaddr = dstaddr - self.dstport = dstport - self.create_socket(socket.AF_INET, socket.SOCK_STREAM) - self.sendbuf = b"" - self.recvbuf = b"" - self.ver_send = 209 - self.ver_recv = 209 - self.last_sent = 0 - self.state = "connecting" - self.network = net - self.cb = callback - self.disconnect = False - self.nServices = 0 - - if send_version: - # stuff version msg into sendbuf - vt = msg_version() - vt.nServices = services - vt.addrTo.ip = self.dstaddr - vt.addrTo.port = self.dstport - vt.addrFrom.ip = "0.0.0.0" - vt.addrFrom.port = 0 - self.send_message(vt, True) - - logger.info('Connecting to Bitcoin Node: %s:%d' % (self.dstaddr, self.dstport)) - - try: - self.connect((dstaddr, dstport)) - except: - self.handle_close() - self.rpc = rpc - - def handle_connect(self): - if self.state != "connected": - logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport)) - self.state = "connected" - self.cb.on_open(self) - - def handle_close(self): - logger.debug("Closing connection to: %s:%d" % (self.dstaddr, self.dstport)) - self.state = "closed" - self.recvbuf = b"" - self.sendbuf = b"" - try: - self.close() - except: - pass - self.cb.on_close(self) - - def handle_read(self): - try: - t = self.recv(8192) - if len(t) > 0: - self.recvbuf += t - self.got_data() - except: - pass - - def readable(self): - return True - - def writable(self): - with mininode_lock: - pre_connection = self.state == "connecting" - length = len(self.sendbuf) - return (length > 0 or pre_connection) - - def handle_write(self): - with mininode_lock: - # asyncore does not expose socket connection, only the first read/write - # event, thus we must check connection manually here to know when we - # actually connect - if self.state == "connecting": - self.handle_connect() - if not self.writable(): - return - - try: - sent = self.send(self.sendbuf) - except: - self.handle_close() - return - self.sendbuf = self.sendbuf[sent:] - - def got_data(self): - try: - while True: - if len(self.recvbuf) < 4: - return - if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]: - raise ValueError("got garbage %s" % repr(self.recvbuf)) - if self.ver_recv < 209: - if len(self.recvbuf) < 4 + 12 + 4: - return - command = self.recvbuf[4:4+12].split(b"\x00", 1)[0] - msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0] - checksum = None - if len(self.recvbuf) < 4 + 12 + 4 + msglen: - return - msg = self.recvbuf[4+12+4:4+12+4+msglen] - self.recvbuf = self.recvbuf[4+12+4+msglen:] - else: - if len(self.recvbuf) < 4 + 12 + 4 + 4: - return - command = self.recvbuf[4:4+12].split(b"\x00", 1)[0] - msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0] - checksum = self.recvbuf[4+12+4:4+12+4+4] - if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen: - return - msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen] - th = sha256(msg) - h = sha256(th) - if checksum != h[:4]: - raise ValueError("got bad checksum " + repr(self.recvbuf)) - self.recvbuf = self.recvbuf[4+12+4+4+msglen:] - if command in self.messagemap: - f = BytesIO(msg) - t = self.messagemap[command]() - t.deserialize(f) - self.got_message(t) - else: - logger.warning("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg))) - except Exception as e: - logger.exception('got_data:', repr(e)) - - def send_message(self, message, pushbuf=False): - if self.state != "connected" and not pushbuf: - raise IOError('Not connected, no pushbuf') - logger.debug("Send message to %s:%d: %s" % (self.dstaddr, self.dstport, repr(message))) - command = message.command - data = message.serialize() - tmsg = self.MAGIC_BYTES[self.network] - tmsg += command - tmsg += b"\x00" * (12 - len(command)) - tmsg += struct.pack("<I", len(data)) - if self.ver_send >= 209: - th = sha256(data) - h = sha256(th) - tmsg += h[:4] - tmsg += data - with mininode_lock: - self.sendbuf += tmsg - self.last_sent = time.time() - - def got_message(self, message): - if message.command == b"version": - if message.nVersion <= BIP0031_VERSION: - self.messagemap[b'ping'] = msg_ping_prebip31 - if self.last_sent + 30 * 60 < time.time(): - self.send_message(self.messagemap[b'ping']()) - logger.debug("Received message from %s:%d: %s" % (self.dstaddr, self.dstport, repr(message))) - self.cb.deliver(self, message) - - def disconnect_node(self): - self.disconnect = True - - -class NetworkThread(Thread): - def run(self): - while mininode_socket_map: - # We check for whether to disconnect outside of the asyncore - # loop to workaround the behavior of asyncore when using - # select - disconnected = [] - for fd, obj in mininode_socket_map.items(): - if obj.disconnect: - disconnected.append(obj) - [ obj.handle_close() for obj in disconnected ] - asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1) - - -# An exception we can raise if we detect a potential disconnect -# (p2p or rpc) before the test is complete -class EarlyDisconnectError(Exception): - def __init__(self, value): - self.value = value - - def __str__(self): - return repr(self.value) diff --git a/qa/rpc-tests/test_framework/netutil.py b/qa/rpc-tests/test_framework/netutil.py deleted file mode 100644 index 45d8e22d22..0000000000 --- a/qa/rpc-tests/test_framework/netutil.py +++ /dev/null @@ -1,156 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Linux network utilities. - -Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal -""" - -import sys -import socket -import fcntl -import struct -import array -import os -from binascii import unhexlify, hexlify - -STATE_ESTABLISHED = '01' -STATE_SYN_SENT = '02' -STATE_SYN_RECV = '03' -STATE_FIN_WAIT1 = '04' -STATE_FIN_WAIT2 = '05' -STATE_TIME_WAIT = '06' -STATE_CLOSE = '07' -STATE_CLOSE_WAIT = '08' -STATE_LAST_ACK = '09' -STATE_LISTEN = '0A' -STATE_CLOSING = '0B' - -def get_socket_inodes(pid): - ''' - Get list of socket inodes for process pid. - ''' - base = '/proc/%i/fd' % pid - inodes = [] - for item in os.listdir(base): - target = os.readlink(os.path.join(base, item)) - if target.startswith('socket:'): - inodes.append(int(target[8:-1])) - return inodes - -def _remove_empty(array): - return [x for x in array if x !=''] - -def _convert_ip_port(array): - host,port = array.split(':') - # convert host from mangled-per-four-bytes form as used by kernel - host = unhexlify(host) - host_out = '' - for x in range(0, len(host) // 4): - (val,) = struct.unpack('=I', host[x*4:(x+1)*4]) - host_out += '%08x' % val - - return host_out,int(port,16) - -def netstat(typ='tcp'): - ''' - Function to return a list with status of tcp connections at linux systems - To get pid of all network process running on system, you must run this script - as superuser - ''' - with open('/proc/net/'+typ,'r',encoding='utf8') as f: - content = f.readlines() - content.pop(0) - result = [] - for line in content: - line_array = _remove_empty(line.split(' ')) # Split lines and remove empty spaces. - tcp_id = line_array[0] - l_addr = _convert_ip_port(line_array[1]) - r_addr = _convert_ip_port(line_array[2]) - state = line_array[3] - inode = int(line_array[9]) # Need the inode to match with process pid. - nline = [tcp_id, l_addr, r_addr, state, inode] - result.append(nline) - return result - -def get_bind_addrs(pid): - ''' - Get bind addresses as (host,port) tuples for process pid. - ''' - inodes = get_socket_inodes(pid) - bind_addrs = [] - for conn in netstat('tcp') + netstat('tcp6'): - if conn[3] == STATE_LISTEN and conn[4] in inodes: - bind_addrs.append(conn[1]) - return bind_addrs - -# from: http://code.activestate.com/recipes/439093/ -def all_interfaces(): - ''' - Return all interfaces that are up - ''' - is_64bits = sys.maxsize > 2**32 - struct_size = 40 if is_64bits else 32 - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - max_possible = 8 # initial value - while True: - bytes = max_possible * struct_size - names = array.array('B', b'\0' * bytes) - outbytes = struct.unpack('iL', fcntl.ioctl( - s.fileno(), - 0x8912, # SIOCGIFCONF - struct.pack('iL', bytes, names.buffer_info()[0]) - ))[0] - if outbytes == bytes: - max_possible *= 2 - else: - break - namestr = names.tostring() - return [(namestr[i:i+16].split(b'\0', 1)[0], - socket.inet_ntoa(namestr[i+20:i+24])) - for i in range(0, outbytes, struct_size)] - -def addr_to_hex(addr): - ''' - Convert string IPv4 or IPv6 address to binary address as returned by - get_bind_addrs. - Very naive implementation that certainly doesn't work for all IPv6 variants. - ''' - if '.' in addr: # IPv4 - addr = [int(x) for x in addr.split('.')] - elif ':' in addr: # IPv6 - sub = [[], []] # prefix, suffix - x = 0 - addr = addr.split(':') - for i,comp in enumerate(addr): - if comp == '': - if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end - continue - x += 1 # :: skips to suffix - assert(x < 2) - else: # two bytes per component - val = int(comp, 16) - sub[x].append(val >> 8) - sub[x].append(val & 0xff) - nullbytes = 16 - len(sub[0]) - len(sub[1]) - assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0)) - addr = sub[0] + ([0] * nullbytes) + sub[1] - else: - raise ValueError('Could not parse address %s' % addr) - return hexlify(bytearray(addr)).decode('ascii') - -def test_ipv6_local(): - ''' - Check for (local) IPv6 support. - ''' - import socket - # By using SOCK_DGRAM this will not actually make a connection, but it will - # fail if there is no route to IPv6 localhost. - have_ipv6 = True - try: - s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) - s.connect(('::1', 0)) - except socket.error: - have_ipv6 = False - return have_ipv6 diff --git a/qa/rpc-tests/test_framework/script.py b/qa/rpc-tests/test_framework/script.py deleted file mode 100644 index 3d9572788e..0000000000 --- a/qa/rpc-tests/test_framework/script.py +++ /dev/null @@ -1,939 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2016 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Functionality to build scripts, as well as SignatureHash(). - -This file is modified from python-bitcoinlib. -""" - -from .mininode import CTransaction, CTxOut, sha256, hash256, uint256_from_str, ser_uint256, ser_string -from binascii import hexlify -import hashlib - -import sys -bchr = chr -bord = ord -if sys.version > '3': - long = int - bchr = lambda x: bytes([x]) - bord = lambda x: x - -import struct - -from .bignum import bn2vch - -MAX_SCRIPT_SIZE = 10000 -MAX_SCRIPT_ELEMENT_SIZE = 520 -MAX_SCRIPT_OPCODES = 201 - -OPCODE_NAMES = {} - -def hash160(s): - return hashlib.new('ripemd160', sha256(s)).digest() - - -_opcode_instances = [] -class CScriptOp(int): - """A single script opcode""" - __slots__ = [] - - @staticmethod - def encode_op_pushdata(d): - """Encode a PUSHDATA op, returning bytes""" - if len(d) < 0x4c: - return b'' + bchr(len(d)) + d # OP_PUSHDATA - elif len(d) <= 0xff: - return b'\x4c' + bchr(len(d)) + d # OP_PUSHDATA1 - elif len(d) <= 0xffff: - return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2 - elif len(d) <= 0xffffffff: - return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4 - else: - raise ValueError("Data too long to encode in a PUSHDATA op") - - @staticmethod - def encode_op_n(n): - """Encode a small integer op, returning an opcode""" - if not (0 <= n <= 16): - raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n) - - if n == 0: - return OP_0 - else: - return CScriptOp(OP_1 + n-1) - - def decode_op_n(self): - """Decode a small integer opcode, returning an integer""" - if self == OP_0: - return 0 - - if not (self == OP_0 or OP_1 <= self <= OP_16): - raise ValueError('op %r is not an OP_N' % self) - - return int(self - OP_1+1) - - def is_small_int(self): - """Return true if the op pushes a small integer to the stack""" - if 0x51 <= self <= 0x60 or self == 0: - return True - else: - return False - - def __str__(self): - return repr(self) - - def __repr__(self): - if self in OPCODE_NAMES: - return OPCODE_NAMES[self] - else: - return 'CScriptOp(0x%x)' % self - - def __new__(cls, n): - try: - return _opcode_instances[n] - except IndexError: - assert len(_opcode_instances) == n - _opcode_instances.append(super(CScriptOp, cls).__new__(cls, n)) - return _opcode_instances[n] - -# Populate opcode instance table -for n in range(0xff+1): - CScriptOp(n) - - -# push value -OP_0 = CScriptOp(0x00) -OP_FALSE = OP_0 -OP_PUSHDATA1 = CScriptOp(0x4c) -OP_PUSHDATA2 = CScriptOp(0x4d) -OP_PUSHDATA4 = CScriptOp(0x4e) -OP_1NEGATE = CScriptOp(0x4f) -OP_RESERVED = CScriptOp(0x50) -OP_1 = CScriptOp(0x51) -OP_TRUE=OP_1 -OP_2 = CScriptOp(0x52) -OP_3 = CScriptOp(0x53) -OP_4 = CScriptOp(0x54) -OP_5 = CScriptOp(0x55) -OP_6 = CScriptOp(0x56) -OP_7 = CScriptOp(0x57) -OP_8 = CScriptOp(0x58) -OP_9 = CScriptOp(0x59) -OP_10 = CScriptOp(0x5a) -OP_11 = CScriptOp(0x5b) -OP_12 = CScriptOp(0x5c) -OP_13 = CScriptOp(0x5d) -OP_14 = CScriptOp(0x5e) -OP_15 = CScriptOp(0x5f) -OP_16 = CScriptOp(0x60) - -# control -OP_NOP = CScriptOp(0x61) -OP_VER = CScriptOp(0x62) -OP_IF = CScriptOp(0x63) -OP_NOTIF = CScriptOp(0x64) -OP_VERIF = CScriptOp(0x65) -OP_VERNOTIF = CScriptOp(0x66) -OP_ELSE = CScriptOp(0x67) -OP_ENDIF = CScriptOp(0x68) -OP_VERIFY = CScriptOp(0x69) -OP_RETURN = CScriptOp(0x6a) - -# stack ops -OP_TOALTSTACK = CScriptOp(0x6b) -OP_FROMALTSTACK = CScriptOp(0x6c) -OP_2DROP = CScriptOp(0x6d) -OP_2DUP = CScriptOp(0x6e) -OP_3DUP = CScriptOp(0x6f) -OP_2OVER = CScriptOp(0x70) -OP_2ROT = CScriptOp(0x71) -OP_2SWAP = CScriptOp(0x72) -OP_IFDUP = CScriptOp(0x73) -OP_DEPTH = CScriptOp(0x74) -OP_DROP = CScriptOp(0x75) -OP_DUP = CScriptOp(0x76) -OP_NIP = CScriptOp(0x77) -OP_OVER = CScriptOp(0x78) -OP_PICK = CScriptOp(0x79) -OP_ROLL = CScriptOp(0x7a) -OP_ROT = CScriptOp(0x7b) -OP_SWAP = CScriptOp(0x7c) -OP_TUCK = CScriptOp(0x7d) - -# splice ops -OP_CAT = CScriptOp(0x7e) -OP_SUBSTR = CScriptOp(0x7f) -OP_LEFT = CScriptOp(0x80) -OP_RIGHT = CScriptOp(0x81) -OP_SIZE = CScriptOp(0x82) - -# bit logic -OP_INVERT = CScriptOp(0x83) -OP_AND = CScriptOp(0x84) -OP_OR = CScriptOp(0x85) -OP_XOR = CScriptOp(0x86) -OP_EQUAL = CScriptOp(0x87) -OP_EQUALVERIFY = CScriptOp(0x88) -OP_RESERVED1 = CScriptOp(0x89) -OP_RESERVED2 = CScriptOp(0x8a) - -# numeric -OP_1ADD = CScriptOp(0x8b) -OP_1SUB = CScriptOp(0x8c) -OP_2MUL = CScriptOp(0x8d) -OP_2DIV = CScriptOp(0x8e) -OP_NEGATE = CScriptOp(0x8f) -OP_ABS = CScriptOp(0x90) -OP_NOT = CScriptOp(0x91) -OP_0NOTEQUAL = CScriptOp(0x92) - -OP_ADD = CScriptOp(0x93) -OP_SUB = CScriptOp(0x94) -OP_MUL = CScriptOp(0x95) -OP_DIV = CScriptOp(0x96) -OP_MOD = CScriptOp(0x97) -OP_LSHIFT = CScriptOp(0x98) -OP_RSHIFT = CScriptOp(0x99) - -OP_BOOLAND = CScriptOp(0x9a) -OP_BOOLOR = CScriptOp(0x9b) -OP_NUMEQUAL = CScriptOp(0x9c) -OP_NUMEQUALVERIFY = CScriptOp(0x9d) -OP_NUMNOTEQUAL = CScriptOp(0x9e) -OP_LESSTHAN = CScriptOp(0x9f) -OP_GREATERTHAN = CScriptOp(0xa0) -OP_LESSTHANOREQUAL = CScriptOp(0xa1) -OP_GREATERTHANOREQUAL = CScriptOp(0xa2) -OP_MIN = CScriptOp(0xa3) -OP_MAX = CScriptOp(0xa4) - -OP_WITHIN = CScriptOp(0xa5) - -# crypto -OP_RIPEMD160 = CScriptOp(0xa6) -OP_SHA1 = CScriptOp(0xa7) -OP_SHA256 = CScriptOp(0xa8) -OP_HASH160 = CScriptOp(0xa9) -OP_HASH256 = CScriptOp(0xaa) -OP_CODESEPARATOR = CScriptOp(0xab) -OP_CHECKSIG = CScriptOp(0xac) -OP_CHECKSIGVERIFY = CScriptOp(0xad) -OP_CHECKMULTISIG = CScriptOp(0xae) -OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf) - -# expansion -OP_NOP1 = CScriptOp(0xb0) -OP_CHECKLOCKTIMEVERIFY = CScriptOp(0xb1) -OP_CHECKSEQUENCEVERIFY = CScriptOp(0xb2) -OP_NOP4 = CScriptOp(0xb3) -OP_NOP5 = CScriptOp(0xb4) -OP_NOP6 = CScriptOp(0xb5) -OP_NOP7 = CScriptOp(0xb6) -OP_NOP8 = CScriptOp(0xb7) -OP_NOP9 = CScriptOp(0xb8) -OP_NOP10 = CScriptOp(0xb9) - -# template matching params -OP_SMALLINTEGER = CScriptOp(0xfa) -OP_PUBKEYS = CScriptOp(0xfb) -OP_PUBKEYHASH = CScriptOp(0xfd) -OP_PUBKEY = CScriptOp(0xfe) - -OP_INVALIDOPCODE = CScriptOp(0xff) - -VALID_OPCODES = { - OP_1NEGATE, - OP_RESERVED, - OP_1, - OP_2, - OP_3, - OP_4, - OP_5, - OP_6, - OP_7, - OP_8, - OP_9, - OP_10, - OP_11, - OP_12, - OP_13, - OP_14, - OP_15, - OP_16, - - OP_NOP, - OP_VER, - OP_IF, - OP_NOTIF, - OP_VERIF, - OP_VERNOTIF, - OP_ELSE, - OP_ENDIF, - OP_VERIFY, - OP_RETURN, - - OP_TOALTSTACK, - OP_FROMALTSTACK, - OP_2DROP, - OP_2DUP, - OP_3DUP, - OP_2OVER, - OP_2ROT, - OP_2SWAP, - OP_IFDUP, - OP_DEPTH, - OP_DROP, - OP_DUP, - OP_NIP, - OP_OVER, - OP_PICK, - OP_ROLL, - OP_ROT, - OP_SWAP, - OP_TUCK, - - OP_CAT, - OP_SUBSTR, - OP_LEFT, - OP_RIGHT, - OP_SIZE, - - OP_INVERT, - OP_AND, - OP_OR, - OP_XOR, - OP_EQUAL, - OP_EQUALVERIFY, - OP_RESERVED1, - OP_RESERVED2, - - OP_1ADD, - OP_1SUB, - OP_2MUL, - OP_2DIV, - OP_NEGATE, - OP_ABS, - OP_NOT, - OP_0NOTEQUAL, - - OP_ADD, - OP_SUB, - OP_MUL, - OP_DIV, - OP_MOD, - OP_LSHIFT, - OP_RSHIFT, - - OP_BOOLAND, - OP_BOOLOR, - OP_NUMEQUAL, - OP_NUMEQUALVERIFY, - OP_NUMNOTEQUAL, - OP_LESSTHAN, - OP_GREATERTHAN, - OP_LESSTHANOREQUAL, - OP_GREATERTHANOREQUAL, - OP_MIN, - OP_MAX, - - OP_WITHIN, - - OP_RIPEMD160, - OP_SHA1, - OP_SHA256, - OP_HASH160, - OP_HASH256, - OP_CODESEPARATOR, - OP_CHECKSIG, - OP_CHECKSIGVERIFY, - OP_CHECKMULTISIG, - OP_CHECKMULTISIGVERIFY, - - OP_NOP1, - OP_CHECKLOCKTIMEVERIFY, - OP_CHECKSEQUENCEVERIFY, - OP_NOP4, - OP_NOP5, - OP_NOP6, - OP_NOP7, - OP_NOP8, - OP_NOP9, - OP_NOP10, - - OP_SMALLINTEGER, - OP_PUBKEYS, - OP_PUBKEYHASH, - OP_PUBKEY, -} - -OPCODE_NAMES.update({ - OP_0 : 'OP_0', - OP_PUSHDATA1 : 'OP_PUSHDATA1', - OP_PUSHDATA2 : 'OP_PUSHDATA2', - OP_PUSHDATA4 : 'OP_PUSHDATA4', - OP_1NEGATE : 'OP_1NEGATE', - OP_RESERVED : 'OP_RESERVED', - OP_1 : 'OP_1', - OP_2 : 'OP_2', - OP_3 : 'OP_3', - OP_4 : 'OP_4', - OP_5 : 'OP_5', - OP_6 : 'OP_6', - OP_7 : 'OP_7', - OP_8 : 'OP_8', - OP_9 : 'OP_9', - OP_10 : 'OP_10', - OP_11 : 'OP_11', - OP_12 : 'OP_12', - OP_13 : 'OP_13', - OP_14 : 'OP_14', - OP_15 : 'OP_15', - OP_16 : 'OP_16', - OP_NOP : 'OP_NOP', - OP_VER : 'OP_VER', - OP_IF : 'OP_IF', - OP_NOTIF : 'OP_NOTIF', - OP_VERIF : 'OP_VERIF', - OP_VERNOTIF : 'OP_VERNOTIF', - OP_ELSE : 'OP_ELSE', - OP_ENDIF : 'OP_ENDIF', - OP_VERIFY : 'OP_VERIFY', - OP_RETURN : 'OP_RETURN', - OP_TOALTSTACK : 'OP_TOALTSTACK', - OP_FROMALTSTACK : 'OP_FROMALTSTACK', - OP_2DROP : 'OP_2DROP', - OP_2DUP : 'OP_2DUP', - OP_3DUP : 'OP_3DUP', - OP_2OVER : 'OP_2OVER', - OP_2ROT : 'OP_2ROT', - OP_2SWAP : 'OP_2SWAP', - OP_IFDUP : 'OP_IFDUP', - OP_DEPTH : 'OP_DEPTH', - OP_DROP : 'OP_DROP', - OP_DUP : 'OP_DUP', - OP_NIP : 'OP_NIP', - OP_OVER : 'OP_OVER', - OP_PICK : 'OP_PICK', - OP_ROLL : 'OP_ROLL', - OP_ROT : 'OP_ROT', - OP_SWAP : 'OP_SWAP', - OP_TUCK : 'OP_TUCK', - OP_CAT : 'OP_CAT', - OP_SUBSTR : 'OP_SUBSTR', - OP_LEFT : 'OP_LEFT', - OP_RIGHT : 'OP_RIGHT', - OP_SIZE : 'OP_SIZE', - OP_INVERT : 'OP_INVERT', - OP_AND : 'OP_AND', - OP_OR : 'OP_OR', - OP_XOR : 'OP_XOR', - OP_EQUAL : 'OP_EQUAL', - OP_EQUALVERIFY : 'OP_EQUALVERIFY', - OP_RESERVED1 : 'OP_RESERVED1', - OP_RESERVED2 : 'OP_RESERVED2', - OP_1ADD : 'OP_1ADD', - OP_1SUB : 'OP_1SUB', - OP_2MUL : 'OP_2MUL', - OP_2DIV : 'OP_2DIV', - OP_NEGATE : 'OP_NEGATE', - OP_ABS : 'OP_ABS', - OP_NOT : 'OP_NOT', - OP_0NOTEQUAL : 'OP_0NOTEQUAL', - OP_ADD : 'OP_ADD', - OP_SUB : 'OP_SUB', - OP_MUL : 'OP_MUL', - OP_DIV : 'OP_DIV', - OP_MOD : 'OP_MOD', - OP_LSHIFT : 'OP_LSHIFT', - OP_RSHIFT : 'OP_RSHIFT', - OP_BOOLAND : 'OP_BOOLAND', - OP_BOOLOR : 'OP_BOOLOR', - OP_NUMEQUAL : 'OP_NUMEQUAL', - OP_NUMEQUALVERIFY : 'OP_NUMEQUALVERIFY', - OP_NUMNOTEQUAL : 'OP_NUMNOTEQUAL', - OP_LESSTHAN : 'OP_LESSTHAN', - OP_GREATERTHAN : 'OP_GREATERTHAN', - OP_LESSTHANOREQUAL : 'OP_LESSTHANOREQUAL', - OP_GREATERTHANOREQUAL : 'OP_GREATERTHANOREQUAL', - OP_MIN : 'OP_MIN', - OP_MAX : 'OP_MAX', - OP_WITHIN : 'OP_WITHIN', - OP_RIPEMD160 : 'OP_RIPEMD160', - OP_SHA1 : 'OP_SHA1', - OP_SHA256 : 'OP_SHA256', - OP_HASH160 : 'OP_HASH160', - OP_HASH256 : 'OP_HASH256', - OP_CODESEPARATOR : 'OP_CODESEPARATOR', - OP_CHECKSIG : 'OP_CHECKSIG', - OP_CHECKSIGVERIFY : 'OP_CHECKSIGVERIFY', - OP_CHECKMULTISIG : 'OP_CHECKMULTISIG', - OP_CHECKMULTISIGVERIFY : 'OP_CHECKMULTISIGVERIFY', - OP_NOP1 : 'OP_NOP1', - OP_CHECKLOCKTIMEVERIFY : 'OP_CHECKLOCKTIMEVERIFY', - OP_CHECKSEQUENCEVERIFY : 'OP_CHECKSEQUENCEVERIFY', - OP_NOP4 : 'OP_NOP4', - OP_NOP5 : 'OP_NOP5', - OP_NOP6 : 'OP_NOP6', - OP_NOP7 : 'OP_NOP7', - OP_NOP8 : 'OP_NOP8', - OP_NOP9 : 'OP_NOP9', - OP_NOP10 : 'OP_NOP10', - OP_SMALLINTEGER : 'OP_SMALLINTEGER', - OP_PUBKEYS : 'OP_PUBKEYS', - OP_PUBKEYHASH : 'OP_PUBKEYHASH', - OP_PUBKEY : 'OP_PUBKEY', - OP_INVALIDOPCODE : 'OP_INVALIDOPCODE', -}) - -OPCODES_BY_NAME = { - 'OP_0' : OP_0, - 'OP_PUSHDATA1' : OP_PUSHDATA1, - 'OP_PUSHDATA2' : OP_PUSHDATA2, - 'OP_PUSHDATA4' : OP_PUSHDATA4, - 'OP_1NEGATE' : OP_1NEGATE, - 'OP_RESERVED' : OP_RESERVED, - 'OP_1' : OP_1, - 'OP_2' : OP_2, - 'OP_3' : OP_3, - 'OP_4' : OP_4, - 'OP_5' : OP_5, - 'OP_6' : OP_6, - 'OP_7' : OP_7, - 'OP_8' : OP_8, - 'OP_9' : OP_9, - 'OP_10' : OP_10, - 'OP_11' : OP_11, - 'OP_12' : OP_12, - 'OP_13' : OP_13, - 'OP_14' : OP_14, - 'OP_15' : OP_15, - 'OP_16' : OP_16, - 'OP_NOP' : OP_NOP, - 'OP_VER' : OP_VER, - 'OP_IF' : OP_IF, - 'OP_NOTIF' : OP_NOTIF, - 'OP_VERIF' : OP_VERIF, - 'OP_VERNOTIF' : OP_VERNOTIF, - 'OP_ELSE' : OP_ELSE, - 'OP_ENDIF' : OP_ENDIF, - 'OP_VERIFY' : OP_VERIFY, - 'OP_RETURN' : OP_RETURN, - 'OP_TOALTSTACK' : OP_TOALTSTACK, - 'OP_FROMALTSTACK' : OP_FROMALTSTACK, - 'OP_2DROP' : OP_2DROP, - 'OP_2DUP' : OP_2DUP, - 'OP_3DUP' : OP_3DUP, - 'OP_2OVER' : OP_2OVER, - 'OP_2ROT' : OP_2ROT, - 'OP_2SWAP' : OP_2SWAP, - 'OP_IFDUP' : OP_IFDUP, - 'OP_DEPTH' : OP_DEPTH, - 'OP_DROP' : OP_DROP, - 'OP_DUP' : OP_DUP, - 'OP_NIP' : OP_NIP, - 'OP_OVER' : OP_OVER, - 'OP_PICK' : OP_PICK, - 'OP_ROLL' : OP_ROLL, - 'OP_ROT' : OP_ROT, - 'OP_SWAP' : OP_SWAP, - 'OP_TUCK' : OP_TUCK, - 'OP_CAT' : OP_CAT, - 'OP_SUBSTR' : OP_SUBSTR, - 'OP_LEFT' : OP_LEFT, - 'OP_RIGHT' : OP_RIGHT, - 'OP_SIZE' : OP_SIZE, - 'OP_INVERT' : OP_INVERT, - 'OP_AND' : OP_AND, - 'OP_OR' : OP_OR, - 'OP_XOR' : OP_XOR, - 'OP_EQUAL' : OP_EQUAL, - 'OP_EQUALVERIFY' : OP_EQUALVERIFY, - 'OP_RESERVED1' : OP_RESERVED1, - 'OP_RESERVED2' : OP_RESERVED2, - 'OP_1ADD' : OP_1ADD, - 'OP_1SUB' : OP_1SUB, - 'OP_2MUL' : OP_2MUL, - 'OP_2DIV' : OP_2DIV, - 'OP_NEGATE' : OP_NEGATE, - 'OP_ABS' : OP_ABS, - 'OP_NOT' : OP_NOT, - 'OP_0NOTEQUAL' : OP_0NOTEQUAL, - 'OP_ADD' : OP_ADD, - 'OP_SUB' : OP_SUB, - 'OP_MUL' : OP_MUL, - 'OP_DIV' : OP_DIV, - 'OP_MOD' : OP_MOD, - 'OP_LSHIFT' : OP_LSHIFT, - 'OP_RSHIFT' : OP_RSHIFT, - 'OP_BOOLAND' : OP_BOOLAND, - 'OP_BOOLOR' : OP_BOOLOR, - 'OP_NUMEQUAL' : OP_NUMEQUAL, - 'OP_NUMEQUALVERIFY' : OP_NUMEQUALVERIFY, - 'OP_NUMNOTEQUAL' : OP_NUMNOTEQUAL, - 'OP_LESSTHAN' : OP_LESSTHAN, - 'OP_GREATERTHAN' : OP_GREATERTHAN, - 'OP_LESSTHANOREQUAL' : OP_LESSTHANOREQUAL, - 'OP_GREATERTHANOREQUAL' : OP_GREATERTHANOREQUAL, - 'OP_MIN' : OP_MIN, - 'OP_MAX' : OP_MAX, - 'OP_WITHIN' : OP_WITHIN, - 'OP_RIPEMD160' : OP_RIPEMD160, - 'OP_SHA1' : OP_SHA1, - 'OP_SHA256' : OP_SHA256, - 'OP_HASH160' : OP_HASH160, - 'OP_HASH256' : OP_HASH256, - 'OP_CODESEPARATOR' : OP_CODESEPARATOR, - 'OP_CHECKSIG' : OP_CHECKSIG, - 'OP_CHECKSIGVERIFY' : OP_CHECKSIGVERIFY, - 'OP_CHECKMULTISIG' : OP_CHECKMULTISIG, - 'OP_CHECKMULTISIGVERIFY' : OP_CHECKMULTISIGVERIFY, - 'OP_NOP1' : OP_NOP1, - 'OP_CHECKLOCKTIMEVERIFY' : OP_CHECKLOCKTIMEVERIFY, - 'OP_CHECKSEQUENCEVERIFY' : OP_CHECKSEQUENCEVERIFY, - 'OP_NOP4' : OP_NOP4, - 'OP_NOP5' : OP_NOP5, - 'OP_NOP6' : OP_NOP6, - 'OP_NOP7' : OP_NOP7, - 'OP_NOP8' : OP_NOP8, - 'OP_NOP9' : OP_NOP9, - 'OP_NOP10' : OP_NOP10, - 'OP_SMALLINTEGER' : OP_SMALLINTEGER, - 'OP_PUBKEYS' : OP_PUBKEYS, - 'OP_PUBKEYHASH' : OP_PUBKEYHASH, - 'OP_PUBKEY' : OP_PUBKEY, -} - -class CScriptInvalidError(Exception): - """Base class for CScript exceptions""" - pass - -class CScriptTruncatedPushDataError(CScriptInvalidError): - """Invalid pushdata due to truncation""" - def __init__(self, msg, data): - self.data = data - super(CScriptTruncatedPushDataError, self).__init__(msg) - -# This is used, eg, for blockchain heights in coinbase scripts (bip34) -class CScriptNum(object): - def __init__(self, d=0): - self.value = d - - @staticmethod - def encode(obj): - r = bytearray(0) - if obj.value == 0: - return bytes(r) - neg = obj.value < 0 - absvalue = -obj.value if neg else obj.value - while (absvalue): - r.append(absvalue & 0xff) - absvalue >>= 8 - if r[-1] & 0x80: - r.append(0x80 if neg else 0) - elif neg: - r[-1] |= 0x80 - return bytes(bchr(len(r)) + r) - - -class CScript(bytes): - """Serialized script - - A bytes subclass, so you can use this directly whenever bytes are accepted. - Note that this means that indexing does *not* work - you'll get an index by - byte rather than opcode. This format was chosen for efficiency so that the - general case would not require creating a lot of little CScriptOP objects. - - iter(script) however does iterate by opcode. - """ - @classmethod - def __coerce_instance(cls, other): - # Coerce other into bytes - if isinstance(other, CScriptOp): - other = bchr(other) - elif isinstance(other, CScriptNum): - if (other.value == 0): - other = bchr(CScriptOp(OP_0)) - else: - other = CScriptNum.encode(other) - elif isinstance(other, int): - if 0 <= other <= 16: - other = bytes(bchr(CScriptOp.encode_op_n(other))) - elif other == -1: - other = bytes(bchr(OP_1NEGATE)) - else: - other = CScriptOp.encode_op_pushdata(bn2vch(other)) - elif isinstance(other, (bytes, bytearray)): - other = CScriptOp.encode_op_pushdata(other) - return other - - def __add__(self, other): - # Do the coercion outside of the try block so that errors in it are - # noticed. - other = self.__coerce_instance(other) - - try: - # bytes.__add__ always returns bytes instances unfortunately - return CScript(super(CScript, self).__add__(other)) - except TypeError: - raise TypeError('Can not add a %r instance to a CScript' % other.__class__) - - def join(self, iterable): - # join makes no sense for a CScript() - raise NotImplementedError - - def __new__(cls, value=b''): - if isinstance(value, bytes) or isinstance(value, bytearray): - return super(CScript, cls).__new__(cls, value) - else: - def coerce_iterable(iterable): - for instance in iterable: - yield cls.__coerce_instance(instance) - # Annoyingly on both python2 and python3 bytes.join() always - # returns a bytes instance even when subclassed. - return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value))) - - def raw_iter(self): - """Raw iteration - - Yields tuples of (opcode, data, sop_idx) so that the different possible - PUSHDATA encodings can be accurately distinguished, as well as - determining the exact opcode byte indexes. (sop_idx) - """ - i = 0 - while i < len(self): - sop_idx = i - opcode = bord(self[i]) - i += 1 - - if opcode > OP_PUSHDATA4: - yield (opcode, None, sop_idx) - else: - datasize = None - pushdata_type = None - if opcode < OP_PUSHDATA1: - pushdata_type = 'PUSHDATA(%d)' % opcode - datasize = opcode - - elif opcode == OP_PUSHDATA1: - pushdata_type = 'PUSHDATA1' - if i >= len(self): - raise CScriptInvalidError('PUSHDATA1: missing data length') - datasize = bord(self[i]) - i += 1 - - elif opcode == OP_PUSHDATA2: - pushdata_type = 'PUSHDATA2' - if i + 1 >= len(self): - raise CScriptInvalidError('PUSHDATA2: missing data length') - datasize = bord(self[i]) + (bord(self[i+1]) << 8) - i += 2 - - elif opcode == OP_PUSHDATA4: - pushdata_type = 'PUSHDATA4' - if i + 3 >= len(self): - raise CScriptInvalidError('PUSHDATA4: missing data length') - datasize = bord(self[i]) + (bord(self[i+1]) << 8) + (bord(self[i+2]) << 16) + (bord(self[i+3]) << 24) - i += 4 - - else: - assert False # shouldn't happen - - - data = bytes(self[i:i+datasize]) - - # Check for truncation - if len(data) < datasize: - raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data) - - i += datasize - - yield (opcode, data, sop_idx) - - def __iter__(self): - """'Cooked' iteration - - Returns either a CScriptOP instance, an integer, or bytes, as - appropriate. - - See raw_iter() if you need to distinguish the different possible - PUSHDATA encodings. - """ - for (opcode, data, sop_idx) in self.raw_iter(): - if data is not None: - yield data - else: - opcode = CScriptOp(opcode) - - if opcode.is_small_int(): - yield opcode.decode_op_n() - else: - yield CScriptOp(opcode) - - def __repr__(self): - # For Python3 compatibility add b before strings so testcases don't - # need to change - def _repr(o): - if isinstance(o, bytes): - return b"x('%s')" % hexlify(o).decode('ascii') - else: - return repr(o) - - ops = [] - i = iter(self) - while True: - op = None - try: - op = _repr(next(i)) - except CScriptTruncatedPushDataError as err: - op = '%s...<ERROR: %s>' % (_repr(err.data), err) - break - except CScriptInvalidError as err: - op = '<ERROR: %s>' % err - break - except StopIteration: - break - finally: - if op is not None: - ops.append(op) - - return "CScript([%s])" % ', '.join(ops) - - def GetSigOpCount(self, fAccurate): - """Get the SigOp count. - - fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details. - - Note that this is consensus-critical. - """ - n = 0 - lastOpcode = OP_INVALIDOPCODE - for (opcode, data, sop_idx) in self.raw_iter(): - if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY): - n += 1 - elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY): - if fAccurate and (OP_1 <= lastOpcode <= OP_16): - n += opcode.decode_op_n() - else: - n += 20 - lastOpcode = opcode - return n - - -SIGHASH_ALL = 1 -SIGHASH_NONE = 2 -SIGHASH_SINGLE = 3 -SIGHASH_ANYONECANPAY = 0x80 - -def FindAndDelete(script, sig): - """Consensus critical, see FindAndDelete() in Satoshi codebase""" - r = b'' - last_sop_idx = sop_idx = 0 - skip = True - for (opcode, data, sop_idx) in script.raw_iter(): - if not skip: - r += script[last_sop_idx:sop_idx] - last_sop_idx = sop_idx - if script[sop_idx:sop_idx + len(sig)] == sig: - skip = True - else: - skip = False - if not skip: - r += script[last_sop_idx:] - return CScript(r) - - -def SignatureHash(script, txTo, inIdx, hashtype): - """Consensus-correct SignatureHash - - Returns (hash, err) to precisely match the consensus-critical behavior of - the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity) - """ - HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - - if inIdx >= len(txTo.vin): - return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin))) - txtmp = CTransaction(txTo) - - for txin in txtmp.vin: - txin.scriptSig = b'' - txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR])) - - if (hashtype & 0x1f) == SIGHASH_NONE: - txtmp.vout = [] - - for i in range(len(txtmp.vin)): - if i != inIdx: - txtmp.vin[i].nSequence = 0 - - elif (hashtype & 0x1f) == SIGHASH_SINGLE: - outIdx = inIdx - if outIdx >= len(txtmp.vout): - return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout))) - - tmp = txtmp.vout[outIdx] - txtmp.vout = [] - for i in range(outIdx): - txtmp.vout.append(CTxOut(-1)) - txtmp.vout.append(tmp) - - for i in range(len(txtmp.vin)): - if i != inIdx: - txtmp.vin[i].nSequence = 0 - - if hashtype & SIGHASH_ANYONECANPAY: - tmp = txtmp.vin[inIdx] - txtmp.vin = [] - txtmp.vin.append(tmp) - - s = txtmp.serialize() - s += struct.pack(b"<I", hashtype) - - hash = hash256(s) - - return (hash, None) - -# TODO: Allow cached hashPrevouts/hashSequence/hashOutputs to be provided. -# Performance optimization probably not necessary for python tests, however. -# Note that this corresponds to sigversion == 1 in EvalScript, which is used -# for version 0 witnesses. -def SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, amount): - - hashPrevouts = 0 - hashSequence = 0 - hashOutputs = 0 - - if not (hashtype & SIGHASH_ANYONECANPAY): - serialize_prevouts = bytes() - for i in txTo.vin: - serialize_prevouts += i.prevout.serialize() - hashPrevouts = uint256_from_str(hash256(serialize_prevouts)) - - if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE): - serialize_sequence = bytes() - for i in txTo.vin: - serialize_sequence += struct.pack("<I", i.nSequence) - hashSequence = uint256_from_str(hash256(serialize_sequence)) - - if ((hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE): - serialize_outputs = bytes() - for o in txTo.vout: - serialize_outputs += o.serialize() - hashOutputs = uint256_from_str(hash256(serialize_outputs)) - elif ((hashtype & 0x1f) == SIGHASH_SINGLE and inIdx < len(txTo.vout)): - serialize_outputs = txTo.vout[inIdx].serialize() - hashOutputs = uint256_from_str(hash256(serialize_outputs)) - - ss = bytes() - ss += struct.pack("<i", txTo.nVersion) - ss += ser_uint256(hashPrevouts) - ss += ser_uint256(hashSequence) - ss += txTo.vin[inIdx].prevout.serialize() - ss += ser_string(script) - ss += struct.pack("<q", amount) - ss += struct.pack("<I", txTo.vin[inIdx].nSequence) - ss += ser_uint256(hashOutputs) - ss += struct.pack("<i", txTo.nLockTime) - ss += struct.pack("<I", hashtype) - - return hash256(ss) diff --git a/qa/rpc-tests/test_framework/siphash.py b/qa/rpc-tests/test_framework/siphash.py deleted file mode 100644 index f68ecad36b..0000000000 --- a/qa/rpc-tests/test_framework/siphash.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2016 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Specialized SipHash-2-4 implementations. - -This implements SipHash-2-4 for 256-bit integers. -""" - -def rotl64(n, b): - return n >> (64 - b) | (n & ((1 << (64 - b)) - 1)) << b - -def siphash_round(v0, v1, v2, v3): - v0 = (v0 + v1) & ((1 << 64) - 1) - v1 = rotl64(v1, 13) - v1 ^= v0 - v0 = rotl64(v0, 32) - v2 = (v2 + v3) & ((1 << 64) - 1) - v3 = rotl64(v3, 16) - v3 ^= v2 - v0 = (v0 + v3) & ((1 << 64) - 1) - v3 = rotl64(v3, 21) - v3 ^= v0 - v2 = (v2 + v1) & ((1 << 64) - 1) - v1 = rotl64(v1, 17) - v1 ^= v2 - v2 = rotl64(v2, 32) - return (v0, v1, v2, v3) - -def siphash256(k0, k1, h): - n0 = h & ((1 << 64) - 1) - n1 = (h >> 64) & ((1 << 64) - 1) - n2 = (h >> 128) & ((1 << 64) - 1) - n3 = (h >> 192) & ((1 << 64) - 1) - v0 = 0x736f6d6570736575 ^ k0 - v1 = 0x646f72616e646f6d ^ k1 - v2 = 0x6c7967656e657261 ^ k0 - v3 = 0x7465646279746573 ^ k1 ^ n0 - v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) - v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) - v0 ^= n0 - v3 ^= n1 - v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) - v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) - v0 ^= n1 - v3 ^= n2 - v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) - v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) - v0 ^= n2 - v3 ^= n3 - v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) - v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) - v0 ^= n3 - v3 ^= 0x2000000000000000 - v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) - v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) - v0 ^= 0x2000000000000000 - v2 ^= 0xFF - v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) - v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) - v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) - v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) - return v0 ^ v1 ^ v2 ^ v3 diff --git a/qa/rpc-tests/test_framework/socks5.py b/qa/rpc-tests/test_framework/socks5.py deleted file mode 100644 index dd7624d454..0000000000 --- a/qa/rpc-tests/test_framework/socks5.py +++ /dev/null @@ -1,160 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2016 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Dummy Socks5 server for testing.""" - -import socket, threading, queue -import traceback, sys -import logging - -logger = logging.getLogger("TestFramework.socks5") - -### Protocol constants -class Command: - CONNECT = 0x01 - -class AddressType: - IPV4 = 0x01 - DOMAINNAME = 0x03 - IPV6 = 0x04 - -### Utility functions -def recvall(s, n): - """Receive n bytes from a socket, or fail.""" - rv = bytearray() - while n > 0: - d = s.recv(n) - if not d: - raise IOError('Unexpected end of stream') - rv.extend(d) - n -= len(d) - return rv - -### Implementation classes -class Socks5Configuration(object): - """Proxy configuration.""" - def __init__(self): - self.addr = None # Bind address (must be set) - self.af = socket.AF_INET # Bind address family - self.unauth = False # Support unauthenticated - self.auth = False # Support authentication - -class Socks5Command(object): - """Information about an incoming socks5 command.""" - def __init__(self, cmd, atyp, addr, port, username, password): - self.cmd = cmd # Command (one of Command.*) - self.atyp = atyp # Address type (one of AddressType.*) - self.addr = addr # Address - self.port = port # Port to connect to - self.username = username - self.password = password - def __repr__(self): - return 'Socks5Command(%s,%s,%s,%s,%s,%s)' % (self.cmd, self.atyp, self.addr, self.port, self.username, self.password) - -class Socks5Connection(object): - def __init__(self, serv, conn, peer): - self.serv = serv - self.conn = conn - self.peer = peer - - def handle(self): - """Handle socks5 request according to RFC192.""" - try: - # Verify socks version - ver = recvall(self.conn, 1)[0] - if ver != 0x05: - raise IOError('Invalid socks version %i' % ver) - # Choose authentication method - nmethods = recvall(self.conn, 1)[0] - methods = bytearray(recvall(self.conn, nmethods)) - method = None - if 0x02 in methods and self.serv.conf.auth: - method = 0x02 # username/password - elif 0x00 in methods and self.serv.conf.unauth: - method = 0x00 # unauthenticated - if method is None: - raise IOError('No supported authentication method was offered') - # Send response - self.conn.sendall(bytearray([0x05, method])) - # Read authentication (optional) - username = None - password = None - if method == 0x02: - ver = recvall(self.conn, 1)[0] - if ver != 0x01: - raise IOError('Invalid auth packet version %i' % ver) - ulen = recvall(self.conn, 1)[0] - username = str(recvall(self.conn, ulen)) - plen = recvall(self.conn, 1)[0] - password = str(recvall(self.conn, plen)) - # Send authentication response - self.conn.sendall(bytearray([0x01, 0x00])) - - # Read connect request - (ver,cmd,rsv,atyp) = recvall(self.conn, 4) - if ver != 0x05: - raise IOError('Invalid socks version %i in connect request' % ver) - if cmd != Command.CONNECT: - raise IOError('Unhandled command %i in connect request' % cmd) - - if atyp == AddressType.IPV4: - addr = recvall(self.conn, 4) - elif atyp == AddressType.DOMAINNAME: - n = recvall(self.conn, 1)[0] - addr = recvall(self.conn, n) - elif atyp == AddressType.IPV6: - addr = recvall(self.conn, 16) - else: - raise IOError('Unknown address type %i' % atyp) - port_hi,port_lo = recvall(self.conn, 2) - port = (port_hi << 8) | port_lo - - # Send dummy response - self.conn.sendall(bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])) - - cmdin = Socks5Command(cmd, atyp, addr, port, username, password) - self.serv.queue.put(cmdin) - logger.info('Proxy: %s', cmdin) - # Fall through to disconnect - except Exception as e: - logger.exception("socks5 request handling failed.") - self.serv.queue.put(e) - finally: - self.conn.close() - -class Socks5Server(object): - def __init__(self, conf): - self.conf = conf - self.s = socket.socket(conf.af) - self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - self.s.bind(conf.addr) - self.s.listen(5) - self.running = False - self.thread = None - self.queue = queue.Queue() # report connections and exceptions to client - - def run(self): - while self.running: - (sockconn, peer) = self.s.accept() - if self.running: - conn = Socks5Connection(self, sockconn, peer) - thread = threading.Thread(None, conn.handle) - thread.daemon = True - thread.start() - - def start(self): - assert(not self.running) - self.running = True - self.thread = threading.Thread(None, self.run) - self.thread.daemon = True - self.thread.start() - - def stop(self): - self.running = False - # connect to self to end run loop - s = socket.socket(self.conf.af) - s.connect(self.conf.addr) - s.close() - self.thread.join() - diff --git a/qa/rpc-tests/test_framework/test_framework.py b/qa/rpc-tests/test_framework/test_framework.py deleted file mode 100755 index d7072fa78d..0000000000 --- a/qa/rpc-tests/test_framework/test_framework.py +++ /dev/null @@ -1,244 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Base class for RPC testing.""" - -import logging -import optparse -import os -import sys -import shutil -import tempfile -import traceback - -from .util import ( - initialize_chain, - start_nodes, - connect_nodes_bi, - sync_blocks, - sync_mempools, - stop_nodes, - stop_node, - enable_coverage, - check_json_precision, - initialize_chain_clean, - PortSeed, -) -from .authproxy import JSONRPCException - - -class BitcoinTestFramework(object): - - def __init__(self): - self.num_nodes = 4 - self.setup_clean_chain = False - self.nodes = None - - def run_test(self): - raise NotImplementedError - - def add_options(self, parser): - pass - - def setup_chain(self): - self.log.info("Initializing test directory "+self.options.tmpdir) - if self.setup_clean_chain: - initialize_chain_clean(self.options.tmpdir, self.num_nodes) - else: - initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir) - - def stop_node(self, num_node): - stop_node(self.nodes[num_node], num_node) - - def setup_nodes(self): - return start_nodes(self.num_nodes, self.options.tmpdir) - - def setup_network(self, split = False): - self.nodes = self.setup_nodes() - - # Connect the nodes as a "chain". This allows us - # to split the network between nodes 1 and 2 to get - # two halves that can work on competing chains. - - # If we joined network halves, connect the nodes from the joint - # on outward. This ensures that chains are properly reorganised. - if not split: - connect_nodes_bi(self.nodes, 1, 2) - sync_blocks(self.nodes[1:3]) - sync_mempools(self.nodes[1:3]) - - connect_nodes_bi(self.nodes, 0, 1) - connect_nodes_bi(self.nodes, 2, 3) - self.is_network_split = split - self.sync_all() - - def split_network(self): - """ - Split the network of four nodes into nodes 0/1 and 2/3. - """ - assert not self.is_network_split - stop_nodes(self.nodes) - self.setup_network(True) - - def sync_all(self): - if self.is_network_split: - sync_blocks(self.nodes[:2]) - sync_blocks(self.nodes[2:]) - sync_mempools(self.nodes[:2]) - sync_mempools(self.nodes[2:]) - else: - sync_blocks(self.nodes) - sync_mempools(self.nodes) - - def join_network(self): - """ - Join the (previously split) network halves together. - """ - assert self.is_network_split - stop_nodes(self.nodes) - self.setup_network(False) - - def main(self): - - parser = optparse.OptionParser(usage="%prog [options]") - parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true", - help="Leave bitcoinds and test.* datadir on exit or error") - parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true", - help="Don't stop bitcoinds after the test execution") - parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"), - help="Source directory containing bitcoind/bitcoin-cli (default: %default)") - parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../cache"), - help="Directory for caching pregenerated datadirs") - parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"), - help="Root directory for datadirs") - parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO", - help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.") - parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true", - help="Print out all RPC calls as they are made") - parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int', - help="The seed to use for assigning port numbers (default: current process id)") - parser.add_option("--coveragedir", dest="coveragedir", - help="Write tested RPC commands into this directory") - self.add_options(parser) - (self.options, self.args) = parser.parse_args() - - # backup dir variable for removal at cleanup - self.options.root, self.options.tmpdir = self.options.tmpdir, self.options.tmpdir + '/' + str(self.options.port_seed) - - if self.options.coveragedir: - enable_coverage(self.options.coveragedir) - - PortSeed.n = self.options.port_seed - - os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH'] - - check_json_precision() - - # Set up temp directory and start logging - os.makedirs(self.options.tmpdir, exist_ok=False) - self._start_logging() - - success = False - - try: - self.setup_chain() - self.setup_network() - self.run_test() - success = True - except JSONRPCException as e: - self.log.exception("JSONRPC error") - except AssertionError as e: - self.log.exception("Assertion failed") - except KeyError as e: - self.log.exception("Key error") - except Exception as e: - self.log.exception("Unexpected exception caught during testing") - except KeyboardInterrupt as e: - self.log.warning("Exiting after keyboard interrupt") - - if not self.options.noshutdown: - self.log.info("Stopping nodes") - stop_nodes(self.nodes) - else: - self.log.info("Note: bitcoinds were not stopped and may still be running") - - if not self.options.nocleanup and not self.options.noshutdown and success: - self.log.info("Cleaning up") - shutil.rmtree(self.options.tmpdir) - if not os.listdir(self.options.root): - os.rmdir(self.options.root) - else: - self.log.warning("Not cleaning up dir %s" % self.options.tmpdir) - if os.getenv("PYTHON_DEBUG", ""): - # Dump the end of the debug logs, to aid in debugging rare - # travis failures. - import glob - filenames = glob.glob(self.options.tmpdir + "/node*/regtest/debug.log") - MAX_LINES_TO_PRINT = 1000 - for f in filenames: - print("From" , f, ":") - from collections import deque - print("".join(deque(open(f), MAX_LINES_TO_PRINT))) - if success: - self.log.info("Tests successful") - sys.exit(0) - else: - self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir) - logging.shutdown() - sys.exit(1) - - def _start_logging(self): - # Add logger and logging handlers - self.log = logging.getLogger('TestFramework') - self.log.setLevel(logging.DEBUG) - # Create file handler to log all messages - fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log') - fh.setLevel(logging.DEBUG) - # Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel. - ch = logging.StreamHandler(sys.stdout) - # User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int - ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper() - ch.setLevel(ll) - # Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted) - formatter = logging.Formatter(fmt = '%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S') - fh.setFormatter(formatter) - ch.setFormatter(formatter) - # add the handlers to the logger - self.log.addHandler(fh) - self.log.addHandler(ch) - - if self.options.trace_rpc: - rpc_logger = logging.getLogger("BitcoinRPC") - rpc_logger.setLevel(logging.DEBUG) - rpc_handler = logging.StreamHandler(sys.stdout) - rpc_handler.setLevel(logging.DEBUG) - rpc_logger.addHandler(rpc_handler) - -# Test framework for doing p2p comparison testing, which sets up some bitcoind -# binaries: -# 1 binary: test binary -# 2 binaries: 1 test binary, 1 ref binary -# n>2 binaries: 1 test binary, n-1 ref binaries - -class ComparisonTestFramework(BitcoinTestFramework): - - def __init__(self): - super().__init__() - self.num_nodes = 2 - self.setup_clean_chain = True - - def add_options(self, parser): - parser.add_option("--testbinary", dest="testbinary", - default=os.getenv("BITCOIND", "bitcoind"), - help="bitcoind binary to test") - parser.add_option("--refbinary", dest="refbinary", - default=os.getenv("BITCOIND", "bitcoind"), - help="bitcoind binary to use for reference nodes (if any)") - - def setup_network(self): - self.nodes = start_nodes( - self.num_nodes, self.options.tmpdir, - extra_args=[['-whitelist=127.0.0.1']] * self.num_nodes, - binary=[self.options.testbinary] + - [self.options.refbinary]*(self.num_nodes-1)) diff --git a/qa/rpc-tests/test_framework/util.py b/qa/rpc-tests/test_framework/util.py deleted file mode 100644 index 23ac324510..0000000000 --- a/qa/rpc-tests/test_framework/util.py +++ /dev/null @@ -1,670 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2016 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Helpful routines for regression testing.""" - -import os -import sys - -from binascii import hexlify, unhexlify -from base64 import b64encode -from decimal import Decimal, ROUND_DOWN -import json -import http.client -import random -import shutil -import subprocess -import tempfile -import time -import re -import errno -import logging - -from . import coverage -from .authproxy import AuthServiceProxy, JSONRPCException - -COVERAGE_DIR = None - -logger = logging.getLogger("TestFramework.utils") - -# The maximum number of nodes a single test can spawn -MAX_NODES = 8 -# Don't assign rpc or p2p ports lower than this -PORT_MIN = 11000 -# The number of ports to "reserve" for p2p and rpc, each -PORT_RANGE = 5000 - -BITCOIND_PROC_WAIT_TIMEOUT = 60 - - -class PortSeed: - # Must be initialized with a unique integer for each process - n = None - -#Set Mocktime default to OFF. -#MOCKTIME is only needed for scripts that use the -#cached version of the blockchain. If the cached -#version of the blockchain is used without MOCKTIME -#then the mempools will not sync due to IBD. -MOCKTIME = 0 - -def enable_mocktime(): - #For backwared compatibility of the python scripts - #with previous versions of the cache, set MOCKTIME - #to Jan 1, 2014 + (201 * 10 * 60) - global MOCKTIME - MOCKTIME = 1388534400 + (201 * 10 * 60) - -def disable_mocktime(): - global MOCKTIME - MOCKTIME = 0 - -def get_mocktime(): - return MOCKTIME - -def enable_coverage(dirname): - """Maintain a log of which RPC calls are made during testing.""" - global COVERAGE_DIR - COVERAGE_DIR = dirname - - -def get_rpc_proxy(url, node_number, timeout=None): - """ - Args: - url (str): URL of the RPC server to call - node_number (int): the node number (or id) that this calls to - - Kwargs: - timeout (int): HTTP timeout in seconds - - Returns: - AuthServiceProxy. convenience object for making RPC calls. - - """ - proxy_kwargs = {} - if timeout is not None: - proxy_kwargs['timeout'] = timeout - - proxy = AuthServiceProxy(url, **proxy_kwargs) - proxy.url = url # store URL on proxy for info - - coverage_logfile = coverage.get_filename( - COVERAGE_DIR, node_number) if COVERAGE_DIR else None - - return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile) - - -def p2p_port(n): - assert(n <= MAX_NODES) - return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) - -def rpc_port(n): - return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) - -def check_json_precision(): - """Make sure json library being used does not lose precision converting BTC values""" - n = Decimal("20000000.00000003") - satoshis = int(json.loads(json.dumps(float(n)))*1.0e8) - if satoshis != 2000000000000003: - raise RuntimeError("JSON encode/decode loses precision") - -def count_bytes(hex_string): - return len(bytearray.fromhex(hex_string)) - -def bytes_to_hex_str(byte_str): - return hexlify(byte_str).decode('ascii') - -def hex_str_to_bytes(hex_str): - return unhexlify(hex_str.encode('ascii')) - -def str_to_b64str(string): - return b64encode(string.encode('utf-8')).decode('ascii') - -def sync_blocks(rpc_connections, *, wait=1, timeout=60): - """ - Wait until everybody has the same tip. - - sync_blocks needs to be called with an rpc_connections set that has least - one node already synced to the latest, stable tip, otherwise there's a - chance it might return before all nodes are stably synced. - """ - # Use getblockcount() instead of waitforblockheight() to determine the - # initial max height because the two RPCs look at different internal global - # variables (chainActive vs latestBlock) and the former gets updated - # earlier. - maxheight = max(x.getblockcount() for x in rpc_connections) - start_time = cur_time = time.time() - while cur_time <= start_time + timeout: - tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections] - if all(t["height"] == maxheight for t in tips): - if all(t["hash"] == tips[0]["hash"] for t in tips): - return - raise AssertionError("Block sync failed, mismatched block hashes:{}".format( - "".join("\n {!r}".format(tip) for tip in tips))) - cur_time = time.time() - raise AssertionError("Block sync to height {} timed out:{}".format( - maxheight, "".join("\n {!r}".format(tip) for tip in tips))) - -def sync_chain(rpc_connections, *, wait=1, timeout=60): - """ - Wait until everybody has the same best block - """ - while timeout > 0: - best_hash = [x.getbestblockhash() for x in rpc_connections] - if best_hash == [best_hash[0]]*len(best_hash): - return - time.sleep(wait) - timeout -= wait - raise AssertionError("Chain sync failed: Best block hashes don't match") - -def sync_mempools(rpc_connections, *, wait=1, timeout=60): - """ - Wait until everybody has the same transactions in their memory - pools - """ - while timeout > 0: - pool = set(rpc_connections[0].getrawmempool()) - num_match = 1 - for i in range(1, len(rpc_connections)): - if set(rpc_connections[i].getrawmempool()) == pool: - num_match = num_match+1 - if num_match == len(rpc_connections): - return - time.sleep(wait) - timeout -= wait - raise AssertionError("Mempool sync failed") - -bitcoind_processes = {} - -def initialize_datadir(dirname, n): - datadir = os.path.join(dirname, "node"+str(n)) - if not os.path.isdir(datadir): - os.makedirs(datadir) - rpc_u, rpc_p = rpc_auth_pair(n) - with open(os.path.join(datadir, "bitcoin.conf"), 'w', encoding='utf8') as f: - f.write("regtest=1\n") - f.write("rpcuser=" + rpc_u + "\n") - f.write("rpcpassword=" + rpc_p + "\n") - f.write("port="+str(p2p_port(n))+"\n") - f.write("rpcport="+str(rpc_port(n))+"\n") - f.write("listenonion=0\n") - return datadir - -def rpc_auth_pair(n): - return 'rpcuser💻' + str(n), 'rpcpass🔑' + str(n) - -def rpc_url(i, rpchost=None): - rpc_u, rpc_p = rpc_auth_pair(i) - host = '127.0.0.1' - port = rpc_port(i) - if rpchost: - parts = rpchost.split(':') - if len(parts) == 2: - host, port = parts - else: - host = rpchost - return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port)) - -def wait_for_bitcoind_start(process, url, i): - ''' - Wait for bitcoind to start. This means that RPC is accessible and fully initialized. - Raise an exception if bitcoind exits during initialization. - ''' - while True: - if process.poll() is not None: - raise Exception('bitcoind exited with status %i during initialization' % process.returncode) - try: - rpc = get_rpc_proxy(url, i) - blocks = rpc.getblockcount() - break # break out of loop on success - except IOError as e: - if e.errno != errno.ECONNREFUSED: # Port not yet open? - raise # unknown IO error - except JSONRPCException as e: # Initialization phase - if e.error['code'] != -28: # RPC in warmup? - raise # unknown JSON RPC exception - time.sleep(0.25) - -def initialize_chain(test_dir, num_nodes, cachedir): - """ - Create a cache of a 200-block-long chain (with wallet) for MAX_NODES - Afterward, create num_nodes copies from the cache - """ - - assert num_nodes <= MAX_NODES - create_cache = False - for i in range(MAX_NODES): - if not os.path.isdir(os.path.join(cachedir, 'node'+str(i))): - create_cache = True - break - - if create_cache: - logger.debug("Creating data directories from cached datadir") - - #find and delete old cache directories if any exist - for i in range(MAX_NODES): - if os.path.isdir(os.path.join(cachedir,"node"+str(i))): - shutil.rmtree(os.path.join(cachedir,"node"+str(i))) - - # Create cache directories, run bitcoinds: - for i in range(MAX_NODES): - datadir=initialize_datadir(cachedir, i) - args = [ os.getenv("BITCOIND", "bitcoind"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ] - if i > 0: - args.append("-connect=127.0.0.1:"+str(p2p_port(0))) - bitcoind_processes[i] = subprocess.Popen(args) - logger.debug("initialize_chain: bitcoind started, waiting for RPC to come up") - wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i) - logger.debug("initialize_chain: RPC successfully started") - - rpcs = [] - for i in range(MAX_NODES): - try: - rpcs.append(get_rpc_proxy(rpc_url(i), i)) - except: - sys.stderr.write("Error connecting to "+url+"\n") - sys.exit(1) - - # Create a 200-block-long chain; each of the 4 first nodes - # gets 25 mature blocks and 25 immature. - # Note: To preserve compatibility with older versions of - # initialize_chain, only 4 nodes will generate coins. - # - # blocks are created with timestamps 10 minutes apart - # starting from 2010 minutes in the past - enable_mocktime() - block_time = get_mocktime() - (201 * 10 * 60) - for i in range(2): - for peer in range(4): - for j in range(25): - set_node_times(rpcs, block_time) - rpcs[peer].generate(1) - block_time += 10*60 - # Must sync before next peer starts generating blocks - sync_blocks(rpcs) - - # Shut them down, and clean up cache directories: - stop_nodes(rpcs) - disable_mocktime() - for i in range(MAX_NODES): - os.remove(log_filename(cachedir, i, "debug.log")) - os.remove(log_filename(cachedir, i, "db.log")) - os.remove(log_filename(cachedir, i, "peers.dat")) - os.remove(log_filename(cachedir, i, "fee_estimates.dat")) - - for i in range(num_nodes): - from_dir = os.path.join(cachedir, "node"+str(i)) - to_dir = os.path.join(test_dir, "node"+str(i)) - shutil.copytree(from_dir, to_dir) - initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf - -def initialize_chain_clean(test_dir, num_nodes): - """ - Create an empty blockchain and num_nodes wallets. - Useful if a test case wants complete control over initialization. - """ - for i in range(num_nodes): - datadir=initialize_datadir(test_dir, i) - - -def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None): - """ - Start a bitcoind and return RPC connection to it - """ - datadir = os.path.join(dirname, "node"+str(i)) - if binary is None: - binary = os.getenv("BITCOIND", "bitcoind") - args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-logtimemicros", "-debug", "-mocktime="+str(get_mocktime()) ] - if extra_args is not None: args.extend(extra_args) - bitcoind_processes[i] = subprocess.Popen(args, stderr=stderr) - logger.debug("initialize_chain: bitcoind started, waiting for RPC to come up") - url = rpc_url(i, rpchost) - wait_for_bitcoind_start(bitcoind_processes[i], url, i) - logger.debug("initialize_chain: RPC successfully started") - proxy = get_rpc_proxy(url, i, timeout=timewait) - - if COVERAGE_DIR: - coverage.write_all_rpc_commands(COVERAGE_DIR, proxy) - - return proxy - -def assert_start_raises_init_error(i, dirname, extra_args=None, expected_msg=None): - with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr: - try: - node = start_node(i, dirname, extra_args, stderr=log_stderr) - stop_node(node, i) - except Exception as e: - assert 'bitcoind exited' in str(e) #node must have shutdown - if expected_msg is not None: - log_stderr.seek(0) - stderr = log_stderr.read().decode('utf-8') - if expected_msg not in stderr: - raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr) - else: - if expected_msg is None: - assert_msg = "bitcoind should have exited with an error" - else: - assert_msg = "bitcoind should have exited with expected error " + expected_msg - raise AssertionError(assert_msg) - -def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None): - """ - Start multiple bitcoinds, return RPC connections to them - """ - if extra_args is None: extra_args = [ None for _ in range(num_nodes) ] - if binary is None: binary = [ None for _ in range(num_nodes) ] - rpcs = [] - try: - for i in range(num_nodes): - rpcs.append(start_node(i, dirname, extra_args[i], rpchost, timewait=timewait, binary=binary[i])) - except: # If one node failed to start, stop the others - stop_nodes(rpcs) - raise - return rpcs - -def log_filename(dirname, n_node, logname): - return os.path.join(dirname, "node"+str(n_node), "regtest", logname) - -def stop_node(node, i): - logger.debug("Stopping node %d" % i) - try: - node.stop() - except http.client.CannotSendRequest as e: - logger.exception("Unable to stop node") - return_code = bitcoind_processes[i].wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT) - assert_equal(return_code, 0) - del bitcoind_processes[i] - -def stop_nodes(nodes): - for i, node in enumerate(nodes): - stop_node(node, i) - assert not bitcoind_processes.values() # All connections must be gone now - -def set_node_times(nodes, t): - for node in nodes: - node.setmocktime(t) - -def connect_nodes(from_connection, node_num): - ip_port = "127.0.0.1:"+str(p2p_port(node_num)) - from_connection.addnode(ip_port, "onetry") - # poll until version handshake complete to avoid race conditions - # with transaction relaying - while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()): - time.sleep(0.1) - -def connect_nodes_bi(nodes, a, b): - connect_nodes(nodes[a], b) - connect_nodes(nodes[b], a) - -def find_output(node, txid, amount): - """ - Return index to output of txid with value amount - Raises exception if there is none. - """ - txdata = node.getrawtransaction(txid, 1) - for i in range(len(txdata["vout"])): - if txdata["vout"][i]["value"] == amount: - return i - raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount))) - - -def gather_inputs(from_node, amount_needed, confirmations_required=1): - """ - Return a random set of unspent txouts that are enough to pay amount_needed - """ - assert(confirmations_required >=0) - utxo = from_node.listunspent(confirmations_required) - random.shuffle(utxo) - inputs = [] - total_in = Decimal("0.00000000") - while total_in < amount_needed and len(utxo) > 0: - t = utxo.pop() - total_in += t["amount"] - inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } ) - if total_in < amount_needed: - raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in)) - return (total_in, inputs) - -def make_change(from_node, amount_in, amount_out, fee): - """ - Create change output(s), return them - """ - outputs = {} - amount = amount_out+fee - change = amount_in - amount - if change > amount*2: - # Create an extra change output to break up big inputs - change_address = from_node.getnewaddress() - # Split change in two, being careful of rounding: - outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) - change = amount_in - amount - outputs[change_address] - if change > 0: - outputs[from_node.getnewaddress()] = change - return outputs - -def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants): - """ - Create a random transaction. - Returns (txid, hex-encoded-transaction-data, fee) - """ - from_node = random.choice(nodes) - to_node = random.choice(nodes) - fee = min_fee + fee_increment*random.randint(0,fee_variants) - - (total_in, inputs) = gather_inputs(from_node, amount+fee) - outputs = make_change(from_node, total_in, amount, fee) - outputs[to_node.getnewaddress()] = float(amount) - - rawtx = from_node.createrawtransaction(inputs, outputs) - signresult = from_node.signrawtransaction(rawtx) - txid = from_node.sendrawtransaction(signresult["hex"], True) - - return (txid, signresult["hex"], fee) - -def assert_fee_amount(fee, tx_size, fee_per_kB): - """Assert the fee was in range""" - target_fee = tx_size * fee_per_kB / 1000 - if fee < target_fee: - raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)"%(str(fee), str(target_fee))) - # allow the wallet's estimation to be at most 2 bytes off - if fee > (tx_size + 2) * fee_per_kB / 1000: - raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)"%(str(fee), str(target_fee))) - -def assert_equal(thing1, thing2, *args): - if thing1 != thing2 or any(thing1 != arg for arg in args): - raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args)) - -def assert_greater_than(thing1, thing2): - if thing1 <= thing2: - raise AssertionError("%s <= %s"%(str(thing1),str(thing2))) - -def assert_greater_than_or_equal(thing1, thing2): - if thing1 < thing2: - raise AssertionError("%s < %s"%(str(thing1),str(thing2))) - -def assert_raises(exc, fun, *args, **kwds): - assert_raises_message(exc, None, fun, *args, **kwds) - -def assert_raises_message(exc, message, fun, *args, **kwds): - try: - fun(*args, **kwds) - except exc as e: - if message is not None and message not in e.error['message']: - raise AssertionError("Expected substring not found:"+e.error['message']) - except Exception as e: - raise AssertionError("Unexpected exception raised: "+type(e).__name__) - else: - raise AssertionError("No exception raised") - -def assert_raises_jsonrpc(code, message, fun, *args, **kwds): - """Run an RPC and verify that a specific JSONRPC exception code and message is raised. - - Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException - and verifies that the error code and message are as expected. Throws AssertionError if - no JSONRPCException was returned or if the error code/message are not as expected. - - Args: - code (int), optional: the error code returned by the RPC call (defined - in src/rpc/protocol.h). Set to None if checking the error code is not required. - message (string), optional: [a substring of] the error string returned by the - RPC call. Set to None if checking the error string is not required - fun (function): the function to call. This should be the name of an RPC. - args*: positional arguments for the function. - kwds**: named arguments for the function. - """ - try: - fun(*args, **kwds) - except JSONRPCException as e: - # JSONRPCException was thrown as expected. Check the code and message values are correct. - if (code is not None) and (code != e.error["code"]): - raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"]) - if (message is not None) and (message not in e.error['message']): - raise AssertionError("Expected substring not found:"+e.error['message']) - except Exception as e: - raise AssertionError("Unexpected exception raised: "+type(e).__name__) - else: - raise AssertionError("No exception raised") - -def assert_is_hex_string(string): - try: - int(string, 16) - except Exception as e: - raise AssertionError( - "Couldn't interpret %r as hexadecimal; raised: %s" % (string, e)) - -def assert_is_hash_string(string, length=64): - if not isinstance(string, str): - raise AssertionError("Expected a string, got type %r" % type(string)) - elif length and len(string) != length: - raise AssertionError( - "String of length %d expected; got %d" % (length, len(string))) - elif not re.match('[abcdef0-9]+$', string): - raise AssertionError( - "String %r contains invalid characters for a hash." % string) - -def assert_array_result(object_array, to_match, expected, should_not_find = False): - """ - Pass in array of JSON objects, a dictionary with key/value pairs - to match against, and another dictionary with expected key/value - pairs. - If the should_not_find flag is true, to_match should not be found - in object_array - """ - if should_not_find == True: - assert_equal(expected, { }) - num_matched = 0 - for item in object_array: - all_match = True - for key,value in to_match.items(): - if item[key] != value: - all_match = False - if not all_match: - continue - elif should_not_find == True: - num_matched = num_matched+1 - for key,value in expected.items(): - if item[key] != value: - raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value))) - num_matched = num_matched+1 - if num_matched == 0 and should_not_find != True: - raise AssertionError("No objects matched %s"%(str(to_match))) - if num_matched > 0 and should_not_find == True: - raise AssertionError("Objects were found %s"%(str(to_match))) - -def satoshi_round(amount): - return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) - -# Helper to create at least "count" utxos -# Pass in a fee that is sufficient for relay and mining new transactions. -def create_confirmed_utxos(fee, node, count): - node.generate(int(0.5*count)+101) - utxos = node.listunspent() - iterations = count - len(utxos) - addr1 = node.getnewaddress() - addr2 = node.getnewaddress() - if iterations <= 0: - return utxos - for i in range(iterations): - t = utxos.pop() - inputs = [] - inputs.append({ "txid" : t["txid"], "vout" : t["vout"]}) - outputs = {} - send_value = t['amount'] - fee - outputs[addr1] = satoshi_round(send_value/2) - outputs[addr2] = satoshi_round(send_value/2) - raw_tx = node.createrawtransaction(inputs, outputs) - signed_tx = node.signrawtransaction(raw_tx)["hex"] - txid = node.sendrawtransaction(signed_tx) - - while (node.getmempoolinfo()['size'] > 0): - node.generate(1) - - utxos = node.listunspent() - assert(len(utxos) >= count) - return utxos - -# Create large OP_RETURN txouts that can be appended to a transaction -# to make it large (helper for constructing large transactions). -def gen_return_txouts(): - # Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create - # So we have big transactions (and therefore can't fit very many into each block) - # create one script_pubkey - script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes - for i in range (512): - script_pubkey = script_pubkey + "01" - # concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change - txouts = "81" - for k in range(128): - # add txout value - txouts = txouts + "0000000000000000" - # add length of script_pubkey - txouts = txouts + "fd0402" - # add script_pubkey - txouts = txouts + script_pubkey - return txouts - -def create_tx(node, coinbase, to_address, amount): - inputs = [{ "txid" : coinbase, "vout" : 0}] - outputs = { to_address : amount } - rawtx = node.createrawtransaction(inputs, outputs) - signresult = node.signrawtransaction(rawtx) - assert_equal(signresult["complete"], True) - return signresult["hex"] - -# Create a spend of each passed-in utxo, splicing in "txouts" to each raw -# transaction to make it large. See gen_return_txouts() above. -def create_lots_of_big_transactions(node, txouts, utxos, num, fee): - addr = node.getnewaddress() - txids = [] - for _ in range(num): - t = utxos.pop() - inputs=[{ "txid" : t["txid"], "vout" : t["vout"]}] - outputs = {} - change = t['amount'] - fee - outputs[addr] = satoshi_round(change) - rawtx = node.createrawtransaction(inputs, outputs) - newtx = rawtx[0:92] - newtx = newtx + txouts - newtx = newtx + rawtx[94:] - signresult = node.signrawtransaction(newtx, None, None, "NONE") - txid = node.sendrawtransaction(signresult["hex"], True) - txids.append(txid) - return txids - -def mine_large_block(node, utxos=None): - # generate a 66k transaction, - # and 14 of them is close to the 1MB block limit - num = 14 - txouts = gen_return_txouts() - utxos = utxos if utxos is not None else [] - if len(utxos) < num: - utxos.clear() - utxos.extend(node.listunspent()) - fee = 100 * node.getnetworkinfo()["relayfee"] - create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee) - node.generate(1) - -def get_bip9_status(node, key): - info = node.getblockchaininfo() - return info['bip9_softforks'][key] |