aboutsummaryrefslogtreecommitdiff
path: root/qa
diff options
context:
space:
mode:
Diffstat (limited to 'qa')
-rwxr-xr-xqa/pull-tester/rpc-tests.sh2
-rw-r--r--qa/rpc-tests/README.md2
-rwxr-xr-xqa/rpc-tests/comptool.py85
-rwxr-xr-xqa/rpc-tests/maxblocksinflight.py9
-rwxr-xr-xqa/rpc-tests/mininode.py67
-rwxr-xr-xqa/rpc-tests/rawtransactions.py69
-rwxr-xr-xqa/rpc-tests/rest.py186
-rwxr-xr-xqa/rpc-tests/signrawtransactions.py109
-rw-r--r--qa/rpc-tests/util.py2
-rwxr-xr-xqa/rpc-tests/wallet.py2
10 files changed, 452 insertions, 81 deletions
diff --git a/qa/pull-tester/rpc-tests.sh b/qa/pull-tester/rpc-tests.sh
index ae27a94b8d..4f82263a7a 100755
--- a/qa/pull-tester/rpc-tests.sh
+++ b/qa/pull-tester/rpc-tests.sh
@@ -22,6 +22,7 @@ testScripts=(
'txn_doublespend.py'
'txn_doublespend.py --mineblock'
'getchaintips.py'
+ 'rawtransactions.py'
'rest.py'
'mempool_spendcoinbase.py'
'mempool_coinbase_spends.py'
@@ -29,6 +30,7 @@ testScripts=(
'zapwallettxes.py'
'proxy_test.py'
'merkle_blocks.py'
+ 'signrawtransactions.py'
# 'forknotify.py'
'maxblocksinflight.py'
'invalidblockrequest.py'
diff --git a/qa/rpc-tests/README.md b/qa/rpc-tests/README.md
index 02170d13ec..d9fbb109e8 100644
--- a/qa/rpc-tests/README.md
+++ b/qa/rpc-tests/README.md
@@ -28,7 +28,7 @@ Notes
A 200-block -regtest blockchain and wallets for four nodes
is created the first time a regression test is run and
is stored in the cache/ directory. Each node has 25 mature
-blocks (25*50=1250 BTC) in their wallet.
+blocks (25*50=1250 BTC) in its wallet.
After the first run, the cache/ blockchain and wallets are
copied into a temporary directory and used as the initial
diff --git a/qa/rpc-tests/comptool.py b/qa/rpc-tests/comptool.py
index 6125bae51e..23a979250c 100755
--- a/qa/rpc-tests/comptool.py
+++ b/qa/rpc-tests/comptool.py
@@ -25,6 +25,8 @@ generator that returns TestInstance objects. See below for definition.
# on_getheaders: provide headers via BlockStore
# on_getdata: provide blocks via BlockStore
+global mininode_lock
+
class TestNode(NodeConnCB):
def __init__(self, block_store, tx_store):
@@ -148,10 +150,11 @@ class TestManager(object):
max_tries = 10 / sleep_time # Wait at most 10 seconds
while max_tries > 0:
done = True
- for c in self.connections:
- if c.cb.verack_received is False:
- done = False
- break
+ with mininode_lock:
+ for c in self.connections:
+ if c.cb.verack_received is False:
+ done = False
+ break
if done:
break
time.sleep(sleep_time)
@@ -161,10 +164,11 @@ class TestManager(object):
while received_pongs is not True:
time.sleep(0.05)
received_pongs = True
- for c in self.connections:
- if c.cb.received_ping_response(counter) is not True:
- received_pongs = False
- break
+ with mininode_lock:
+ for c in self.connections:
+ if c.cb.received_ping_response(counter) is not True:
+ received_pongs = False
+ break
# sync_blocks: Wait for all connections to request the blockhash given
# then send get_headers to find out the tip of each node, and synchronize
@@ -173,8 +177,9 @@ class TestManager(object):
# Wait for nodes to request block (50ms sleep * 20 tries * num_blocks)
max_tries = 20*num_blocks
while max_tries > 0:
- results = [ blockhash in c.cb.block_request_map and
- c.cb.block_request_map[blockhash] for c in self.connections ]
+ with mininode_lock:
+ results = [ blockhash in c.cb.block_request_map and
+ c.cb.block_request_map[blockhash] for c in self.connections ]
if False not in results:
break
time.sleep(0.05)
@@ -199,8 +204,9 @@ class TestManager(object):
# Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
max_tries = 20*num_events
while max_tries > 0:
- results = [ txhash in c.cb.tx_request_map and
- c.cb.tx_request_map[txhash] for c in self.connections ]
+ with mininode_lock:
+ results = [ txhash in c.cb.tx_request_map and
+ c.cb.tx_request_map[txhash] for c in self.connections ]
if False not in results:
break
time.sleep(0.05)
@@ -221,19 +227,21 @@ class TestManager(object):
self.ping_counter += 1
# Sort inv responses from each node
- [ c.cb.lastInv.sort() for c in self.connections ]
+ with mininode_lock:
+ [ c.cb.lastInv.sort() for c in self.connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
- for c in self.connections:
- if outcome is None:
- if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
+ with mininode_lock:
+ for c in self.connections:
+ if outcome is None:
+ if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
+ return False
+ elif ((c.cb.bestblockhash == blockhash) != outcome):
+ # print c.cb.bestblockhash, blockhash, outcome
return False
- elif ((c.cb.bestblockhash == blockhash) != outcome):
- # print c.cb.bestblockhash, blockhash, outcome
- return False
- return True
+ return True
# Either check that the mempools all agree with each other, or that
# txhash's presence in the mempool matches the outcome specified.
@@ -242,16 +250,17 @@ class TestManager(object):
# perhaps it would be useful to add the ability to check explicitly that
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
- for c in self.connections:
- if outcome is None:
- # Make sure the mempools agree with each other
- if c.cb.lastInv != self.connections[0].cb.lastInv:
- # print c.rpc.getrawmempool()
+ with mininode_lock:
+ for c in self.connections:
+ if outcome is None:
+ # Make sure the mempools agree with each other
+ if c.cb.lastInv != self.connections[0].cb.lastInv:
+ # print c.rpc.getrawmempool()
+ return False
+ elif ((txhash in c.cb.lastInv) != outcome):
+ # print c.rpc.getrawmempool(), c.cb.lastInv
return False
- elif ((txhash in c.cb.lastInv) != outcome):
- # print c.rpc.getrawmempool(), c.cb.lastInv
- return False
- return True
+ return True
def run(self):
# Wait until verack is received
@@ -272,9 +281,10 @@ class TestManager(object):
block = b_or_t
block_outcome = outcome
# Add to shared block_store, set as current block
- self.block_store.add_block(block)
- for c in self.connections:
- c.cb.block_request_map[block.sha256] = False
+ with mininode_lock:
+ self.block_store.add_block(block)
+ for c in self.connections:
+ c.cb.block_request_map[block.sha256] = False
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
@@ -288,10 +298,11 @@ class TestManager(object):
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
tx_outcome = outcome
- # Add to shared tx store
- self.tx_store.add_transaction(tx)
- for c in self.connections:
- c.cb.tx_request_map[tx.sha256] = False
+ # Add to shared tx store and clear map entry
+ with mininode_lock:
+ self.tx_store.add_transaction(tx)
+ for c in self.connections:
+ c.cb.tx_request_map[tx.sha256] = False
# Again, either inv to all nodes or save for later
if (test_instance.sync_every_tx):
[ c.cb.send_inv(tx) for c in self.connections ]
@@ -302,7 +313,7 @@ class TestManager(object):
invqueue.append(CInv(1, tx.sha256))
# Ensure we're not overflowing the inv queue
if len(invqueue) == MAX_INV_SZ:
- [ c.sb.send_message(msg_inv(invqueue)) for c in self.connections ]
+ [ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
diff --git a/qa/rpc-tests/maxblocksinflight.py b/qa/rpc-tests/maxblocksinflight.py
index 94535822d8..87c80cd97e 100755
--- a/qa/rpc-tests/maxblocksinflight.py
+++ b/qa/rpc-tests/maxblocksinflight.py
@@ -61,10 +61,11 @@ class TestManager(NodeConnCB):
time.sleep(2)
total_requests = 0
- for key in self.blockReqCounts:
- total_requests += self.blockReqCounts[key]
- if self.blockReqCounts[key] > 1:
- raise AssertionError("Error, test failed: block %064x requested more than once" % key)
+ with mininode_lock:
+ for key in self.blockReqCounts:
+ total_requests += self.blockReqCounts[key]
+ if self.blockReqCounts[key] > 1:
+ raise AssertionError("Error, test failed: block %064x requested more than once" % key)
if total_requests > MAX_REQUESTS:
raise AssertionError("Error, too many blocks (%d) requested" % total_requests)
print "Round %d: success (total requests: %d)" % (count, total_requests)
diff --git a/qa/rpc-tests/mininode.py b/qa/rpc-tests/mininode.py
index c5c1bcfbbe..b7d78e74fa 100755
--- a/qa/rpc-tests/mininode.py
+++ b/qa/rpc-tests/mininode.py
@@ -26,7 +26,7 @@ import sys
import random
import cStringIO
import hashlib
-from threading import Lock
+from threading import RLock
from threading import Thread
import logging
import copy
@@ -37,6 +37,19 @@ MY_SUBVERSION = "/python-mininode-tester:0.0.1/"
MAX_INV_SZ = 50000
+# Keep our own socket map for asyncore, so that we can track disconnects
+# ourselves (to workaround an issue with closing an asyncore socket when
+# using select)
+mininode_socket_map = dict()
+
+# One lock for synchronizing all data access between the networking thread (see
+# NetworkThread below) and the thread running the test logic. For simplicity,
+# NodeConn acquires this lock whenever delivering a message to to a NodeConnCB,
+# and whenever adding anything to the send buffer (in send_message()). This
+# lock should be acquired in the thread running the test logic to synchronize
+# access to any data shared with the NodeConnCB or NodeConn.
+mininode_lock = RLock()
+
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
@@ -975,10 +988,6 @@ class msg_reject(object):
# Reimplement the on_* functions to provide handling for events
class NodeConnCB(object):
def __init__(self):
- # Acquire on all callbacks -- overkill for now since asyncore is
- # single-threaded, but may be useful for synchronizing access to
- # member variables in derived classes.
- self.cbLock = Lock()
self.verack_received = False
# Derived classes should call this function once to set the message map
@@ -1004,7 +1013,7 @@ class NodeConnCB(object):
}
def deliver(self, conn, message):
- with self.cbLock:
+ with mininode_lock:
try:
self.cbmap[message.command](conn, message)
except:
@@ -1076,7 +1085,7 @@ class NodeConn(asyncore.dispatcher):
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest"):
- asyncore.dispatcher.__init__(self)
+ asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport))
self.dstaddr = dstaddr
self.dstport = dstport
@@ -1089,7 +1098,6 @@ class NodeConn(asyncore.dispatcher):
self.state = "connecting"
self.network = net
self.cb = callback
- self.sendbufLock = Lock() # for protecting the sendbuffer
self.disconnect = False
# stuff version msg into sendbuf
@@ -1140,24 +1148,18 @@ class NodeConn(asyncore.dispatcher):
return True
def writable(self):
- if self.disconnect:
- self.handle_close()
- return False
- else:
- self.sendbufLock.acquire()
+ with mininode_lock:
length = len(self.sendbuf)
- self.sendbufLock.release()
- return (length > 0)
+ return (length > 0)
def handle_write(self):
- self.sendbufLock.acquire()
- try:
- sent = self.send(self.sendbuf)
- except:
- self.handle_close()
- return
- self.sendbuf = self.sendbuf[sent:]
- self.sendbufLock.release()
+ with mininode_lock:
+ try:
+ sent = self.send(self.sendbuf)
+ except:
+ self.handle_close()
+ return
+ self.sendbuf = self.sendbuf[sent:]
def got_data(self):
while True:
@@ -1201,7 +1203,6 @@ class NodeConn(asyncore.dispatcher):
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
return
- self.sendbufLock.acquire()
self.show_debug_msg("Send %s" % repr(message))
command = message.command
data = message.serialize()
@@ -1214,9 +1215,9 @@ class NodeConn(asyncore.dispatcher):
h = sha256(th)
tmsg += h[:4]
tmsg += data
- self.sendbuf += tmsg
- self.last_sent = time.time()
- self.sendbufLock.release()
+ with mininode_lock:
+ self.sendbuf += tmsg
+ self.last_sent = time.time()
def got_message(self, message):
if message.command == "version":
@@ -1229,12 +1230,20 @@ class NodeConn(asyncore.dispatcher):
def disconnect_node(self):
self.disconnect = True
- self.send_message(self.messagemap['ping']())
class NetworkThread(Thread):
def run(self):
- asyncore.loop(0.1, True)
+ while mininode_socket_map:
+ # We check for whether to disconnect outside of the asyncore
+ # loop to workaround the behavior of asyncore when using
+ # select
+ disconnected = []
+ for fd, obj in mininode_socket_map.items():
+ if obj.disconnect:
+ disconnected.append(obj)
+ [ obj.handle_close() for obj in disconnected ]
+ asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
# An exception we can raise if we detect a potential disconnect
diff --git a/qa/rpc-tests/rawtransactions.py b/qa/rpc-tests/rawtransactions.py
new file mode 100755
index 0000000000..c02c929d8d
--- /dev/null
+++ b/qa/rpc-tests/rawtransactions.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python2
+# Copyright (c) 2014 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#
+# Test re-org scenarios with a mempool that contains transactions
+# that spend (directly or indirectly) coinbase transactions.
+#
+
+from test_framework import BitcoinTestFramework
+from util import *
+from pprint import pprint
+from time import sleep
+
+# Create one-input, one-output, no-fee transaction:
+class RawTransactionsTest(BitcoinTestFramework):
+
+ def setup_chain(self):
+ print("Initializing test directory "+self.options.tmpdir)
+ initialize_chain_clean(self.options.tmpdir, 3)
+
+ def setup_network(self, split=False):
+ self.nodes = start_nodes(3, self.options.tmpdir)
+
+ #connect to a local machine for debugging
+ #url = "http://bitcoinrpc:DP6DvqZtqXarpeNWyN3LZTFchCCyCUuHwNF7E8pX99x1@%s:%d" % ('127.0.0.1', 18332)
+ #proxy = AuthServiceProxy(url)
+ #proxy.url = url # store URL on proxy for info
+ #self.nodes.append(proxy)
+
+ connect_nodes_bi(self.nodes,0,1)
+ connect_nodes_bi(self.nodes,1,2)
+ connect_nodes_bi(self.nodes,0,2)
+
+ self.is_network_split=False
+ self.sync_all()
+
+ def run_test(self):
+
+ #prepare some coins for multiple *rawtransaction commands
+ self.nodes[2].generate(1)
+ self.nodes[0].generate(101)
+ self.sync_all()
+ self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5);
+ self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0);
+ self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0);
+ self.sync_all()
+ self.nodes[0].generate(5)
+ self.sync_all()
+
+ #########################################
+ # sendrawtransaction with missing input #
+ #########################################
+ inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
+ outputs = { self.nodes[0].getnewaddress() : 4.998 }
+ rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
+ rawtx = self.nodes[2].signrawtransaction(rawtx)
+
+ errorString = ""
+ try:
+ rawtx = self.nodes[2].sendrawtransaction(rawtx['hex'])
+ except JSONRPCException,e:
+ errorString = e.error['message']
+
+ assert_equal("Missing inputs" in errorString, True);
+
+if __name__ == '__main__':
+ RawTransactionsTest().main()
diff --git a/qa/rpc-tests/rest.py b/qa/rpc-tests/rest.py
index 9b7008531c..9f0d049fe9 100755
--- a/qa/rpc-tests/rest.py
+++ b/qa/rpc-tests/rest.py
@@ -9,7 +9,10 @@
from test_framework import BitcoinTestFramework
from util import *
+from struct import *
+import binascii
import json
+import StringIO
try:
import http.client as httplib
@@ -20,45 +23,210 @@ try:
except ImportError:
import urlparse
-def http_get_call(host, port, path, response_object = 0):
+def deser_uint256(f):
+ r = 0
+ for i in range(8):
+ t = unpack(b"<I", f.read(4))[0]
+ r += t << (i * 32)
+ return r
+
+#allows simple http get calls with a request body
+def http_get_call(host, port, path, requestdata = '', response_object = 0):
conn = httplib.HTTPConnection(host, port)
- conn.request('GET', path)
+ conn.request('GET', path, requestdata)
if response_object:
return conn.getresponse()
return conn.getresponse().read()
-
class RESTTest (BitcoinTestFramework):
FORMAT_SEPARATOR = "."
+ def setup_chain(self):
+ print("Initializing test directory "+self.options.tmpdir)
+ initialize_chain_clean(self.options.tmpdir, 3)
+
+ def setup_network(self, split=False):
+ self.nodes = start_nodes(3, self.options.tmpdir)
+ connect_nodes_bi(self.nodes,0,1)
+ connect_nodes_bi(self.nodes,1,2)
+ connect_nodes_bi(self.nodes,0,2)
+ self.is_network_split=False
+ self.sync_all()
+
def run_test(self):
url = urlparse.urlparse(self.nodes[0].url)
+ print "Mining blocks..."
+
+ self.nodes[0].generate(1)
+ self.sync_all()
+ self.nodes[2].generate(100)
+ self.sync_all()
+
+ assert_equal(self.nodes[0].getbalance(), 50)
+
+ txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
+ self.sync_all()
+ self.nodes[2].generate(1)
+ self.sync_all()
+ bb_hash = self.nodes[0].getbestblockhash()
+
+ assert_equal(self.nodes[1].getbalance(), Decimal("0.1")) #balance now should be 0.1 on node 1
+
+ # load the latest 0.1 tx over the REST API
+ json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
+ json_obj = json.loads(json_string)
+ vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
+ # get n of 0.1 outpoint
+ n = 0
+ for vout in json_obj['vout']:
+ if vout['value'] == 0.1:
+ n = vout['n']
+
+
+ ######################################
+ # GETUTXOS: query a unspent outpoint #
+ ######################################
+ json_request = '{"checkmempool":true,"outpoints":[{"txid":"'+txid+'","n":'+str(n)+'}]}'
+ json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request)
+ json_obj = json.loads(json_string)
+
+ #check chainTip response
+ assert_equal(json_obj['chaintipHash'], bb_hash)
+
+ #make sure there is one utxo
+ assert_equal(len(json_obj['utxos']), 1)
+ assert_equal(json_obj['utxos'][0]['value'], 0.1)
+
+
+ ################################################
+ # GETUTXOS: now query a already spent outpoint #
+ ################################################
+ json_request = '{"checkmempool":true,"outpoints":[{"txid":"'+vintx+'","n":0}]}'
+ json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request)
+ json_obj = json.loads(json_string)
+
+ #check chainTip response
+ assert_equal(json_obj['chaintipHash'], bb_hash)
+
+ #make sure there is no utox in the response because this oupoint has been spent
+ assert_equal(len(json_obj['utxos']), 0)
+
+ #check bitmap
+ assert_equal(json_obj['bitmap'], "0")
+
+
+ ##################################################
+ # GETUTXOS: now check both with the same request #
+ ##################################################
+ json_request = '{"checkmempool":true,"outpoints":[{"txid":"'+txid+'","n":'+str(n)+'},{"txid":"'+vintx+'","n":0}]}'
+ json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request)
+ json_obj = json.loads(json_string)
+ assert_equal(len(json_obj['utxos']), 1)
+ assert_equal(json_obj['bitmap'], "10")
+
+ #test binary response
bb_hash = self.nodes[0].getbestblockhash()
+ binaryRequest = b'\x01\x02'
+ binaryRequest += binascii.unhexlify(txid)
+ binaryRequest += pack("i", n);
+ binaryRequest += binascii.unhexlify(vintx);
+ binaryRequest += pack("i", 0);
+
+ bin_response = http_get_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', binaryRequest)
+
+ output = StringIO.StringIO()
+ output.write(bin_response)
+ output.seek(0)
+ chainHeight = unpack("i", output.read(4))[0]
+ hashFromBinResponse = hex(deser_uint256(output))[2:].zfill(65).rstrip("L")
+
+ assert_equal(bb_hash, hashFromBinResponse) #check if getutxo's chaintip during calculation was fine
+ assert_equal(chainHeight, 102) #chain height must be 102
+
+
+ ############################
+ # GETUTXOS: mempool checks #
+ ############################
+
+ # do a tx and don't sync
+ txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
+ json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
+ json_obj = json.loads(json_string)
+ vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
+ # get n of 0.1 outpoint
+ n = 0
+ for vout in json_obj['vout']:
+ if vout['value'] == 0.1:
+ n = vout['n']
+
+ json_request = '{"checkmempool":false,"outpoints":[{"txid":"'+txid+'","n":'+str(n)+'}]}'
+ json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request)
+ json_obj = json.loads(json_string)
+ assert_equal(len(json_obj['utxos']), 0) #there should be a outpoint because it has just added to the mempool
+
+ json_request = '{"checkmempool":true,"outpoints":[{"txid":"'+txid+'","n":'+str(n)+'}]}'
+ json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request)
+ json_obj = json.loads(json_string)
+ assert_equal(len(json_obj['utxos']), 1) #there should be a outpoint because it has just added to the mempool
+
+ #do some invalid requests
+ json_request = '{"checkmempool'
+ response = http_get_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True)
+ assert_equal(response.status, 500) #must be a 500 because we send a invalid json request
+
+ json_request = '{"checkmempool'
+ response = http_get_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', json_request, True)
+ assert_equal(response.status, 500) #must be a 500 because we send a invalid bin request
+
+ #test limits
+ json_request = '{"checkmempool":true,"outpoints":['
+ for x in range(0, 200):
+ json_request += '{"txid":"'+txid+'","n":'+str(n)+'},'
+ json_request = json_request.rstrip(",")
+ json_request+="]}";
+ response = http_get_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True)
+ assert_equal(response.status, 500) #must be a 500 because we exceeding the limits
+
+ json_request = '{"checkmempool":true,"outpoints":['
+ for x in range(0, 90):
+ json_request += '{"txid":"'+txid+'","n":'+str(n)+'},'
+ json_request = json_request.rstrip(",")
+ json_request+="]}";
+ response = http_get_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True)
+ assert_equal(response.status, 200) #must be a 500 because we exceeding the limits
+
+ self.nodes[0].generate(1) #generate block to not affect upcomming tests
+ self.sync_all()
+
+ ################
+ # /rest/block/ #
+ ################
+
# check binary format
- response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
+ response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", "", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 80)
response_str = response.read()
# compare with block header
- response_header = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
+ response_header = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"bin", "", True)
assert_equal(response_header.status, 200)
assert_equal(int(response_header.getheader('content-length')), 80)
response_header_str = response_header.read()
assert_equal(response_str[0:80], response_header_str)
# check block hex format
- response_hex = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
+ response_hex = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"hex", "", True)
assert_equal(response_hex.status, 200)
assert_greater_than(int(response_hex.getheader('content-length')), 160)
response_hex_str = response_hex.read()
assert_equal(response_str.encode("hex")[0:160], response_hex_str[0:160])
# compare with hex block header
- response_header_hex = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
+ response_header_hex = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"hex", "", True)
assert_equal(response_header_hex.status, 200)
assert_greater_than(int(response_header_hex.getheader('content-length')), 160)
response_header_hex_str = response_header_hex.read()
@@ -77,9 +245,11 @@ class RESTTest (BitcoinTestFramework):
assert_equal(json_obj['txid'], tx_hash)
# check hex format response
- hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", True)
+ hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", "", True)
assert_equal(hex_string.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
+
+
# check block tx details
# let's make 3 tx and mine them on node 1
diff --git a/qa/rpc-tests/signrawtransactions.py b/qa/rpc-tests/signrawtransactions.py
new file mode 100755
index 0000000000..943634bd19
--- /dev/null
+++ b/qa/rpc-tests/signrawtransactions.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python2
+# Copyright (c) 2015 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+from test_framework import BitcoinTestFramework
+from util import *
+
+
+class SignRawTransactionsTest(BitcoinTestFramework):
+ """Tests transaction signing via RPC command "signrawtransaction"."""
+
+ def setup_chain(self):
+ print('Initializing test directory ' + self.options.tmpdir)
+ initialize_chain_clean(self.options.tmpdir, 1)
+
+ def setup_network(self, split=False):
+ self.nodes = start_nodes(1, self.options.tmpdir)
+ self.is_network_split = False
+
+ def successful_signing_test(self):
+ """Creates and signs a valid raw transaction with one input.
+
+ Expected results:
+
+ 1) The transaction has a complete set of signatures
+ 2) No script verification error occurred"""
+ privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
+
+ inputs = [
+ # Valid pay-to-pubkey script
+ {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
+ 'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'}
+ ]
+
+ outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
+
+ rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
+ rawTxSigned = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys)
+
+ # 1) The transaction has a complete set of signatures
+ assert 'complete' in rawTxSigned
+ assert_equal(rawTxSigned['complete'], True)
+
+ # 2) No script verification error occurred
+ assert 'errors' not in rawTxSigned
+
+ def script_verification_error_test(self):
+ """Creates and signs a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.
+
+ Expected results:
+
+ 3) The transaction has no complete set of signatures
+ 4) Two script verification errors occurred
+ 5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")
+ 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)"""
+ privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
+
+ inputs = [
+ # Valid pay-to-pubkey script
+ {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0},
+ # Invalid script
+ {'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7},
+ # Missing scriptPubKey
+ {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1},
+ ]
+
+ scripts = [
+ # Valid pay-to-pubkey script
+ {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
+ 'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
+ # Invalid script
+ {'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7,
+ 'scriptPubKey': 'badbadbadbad'}
+ ]
+
+ outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
+
+ rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
+ rawTxSigned = self.nodes[0].signrawtransaction(rawTx, scripts, privKeys)
+
+ # 3) The transaction has no complete set of signatures
+ assert 'complete' in rawTxSigned
+ assert_equal(rawTxSigned['complete'], False)
+
+ # 4) Two script verification errors occurred
+ assert 'errors' in rawTxSigned
+ assert_equal(len(rawTxSigned['errors']), 2)
+
+ # 5) Script verification errors have certain properties
+ assert 'txid' in rawTxSigned['errors'][0]
+ assert 'vout' in rawTxSigned['errors'][0]
+ assert 'scriptSig' in rawTxSigned['errors'][0]
+ assert 'sequence' in rawTxSigned['errors'][0]
+ assert 'error' in rawTxSigned['errors'][0]
+
+ # 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)
+ assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
+ assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
+ assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
+ assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
+
+ def run_test(self):
+ self.successful_signing_test()
+ self.script_verification_error_test()
+
+
+if __name__ == '__main__':
+ SignRawTransactionsTest().main()
diff --git a/qa/rpc-tests/util.py b/qa/rpc-tests/util.py
index 1cf96b314a..3b4a10e46b 100644
--- a/qa/rpc-tests/util.py
+++ b/qa/rpc-tests/util.py
@@ -283,7 +283,7 @@ def send_zeropri_transaction(from_node, to_node, amount, fee):
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
- then using it's output
+ then using its output
"""
# Create a send-to-self with confirmed inputs:
diff --git a/qa/rpc-tests/wallet.py b/qa/rpc-tests/wallet.py
index 08032fc538..b8965b3662 100755
--- a/qa/rpc-tests/wallet.py
+++ b/qa/rpc-tests/wallet.py
@@ -62,7 +62,7 @@ class WalletTest (BitcoinTestFramework):
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
- # Have node0 mine a block, thus they will collect their own fee.
+ # Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all()