aboutsummaryrefslogtreecommitdiff
path: root/qa
diff options
context:
space:
mode:
authorWladimir J. van der Laan <laanwj@gmail.com>2015-05-13 16:54:13 +0200
committerWladimir J. van der Laan <laanwj@gmail.com>2015-05-13 17:10:02 +0200
commit2cc1372190c01bc6aae70f94fcc3b81ae4f7aba3 (patch)
treed49994f9f96459811c7b1cacc0cc0b301aedc55b /qa
parent484821870b5a92f64d3075cfd30757ea8bd29739 (diff)
parentb649e0395464a659f4b3485ec71d28dc95ba48bd (diff)
Merge pull request #5159
b649e03 Create new BlockPolicyEstimator for fee estimates (Alex Morcos)
Diffstat (limited to 'qa')
-rwxr-xr-xqa/rpc-tests/smartfees.py281
-rw-r--r--qa/rpc-tests/util.py8
2 files changed, 228 insertions, 61 deletions
diff --git a/qa/rpc-tests/smartfees.py b/qa/rpc-tests/smartfees.py
index 4eb8bb4842..69f3c22c17 100755
--- a/qa/rpc-tests/smartfees.py
+++ b/qa/rpc-tests/smartfees.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python2
-# Copyright (c) 2014 The Bitcoin Core developers
+# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -11,82 +11,249 @@ from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
+# Construct 2 trivial P2SH's and the ScriptSigs that spend them
+# So we can create many many transactions without needing to spend
+# time signing.
+P2SH_1 = "2MySexEGVzZpRgNQ1JdjdP5bRETznm3roQ2" # P2SH of "OP_1 OP_DROP"
+P2SH_2 = "2NBdpwq8Aoo1EEKEXPNrKvr5xQr3M9UfcZA" # P2SH of "OP_2 OP_DROP"
+# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2
+# 4 bytes of OP_TRUE and push 2-byte redeem script of "OP_1 OP_DROP" or "OP_2 OP_DROP"
+SCRIPT_SIG = ["0451025175", "0451025275"]
+
+def satoshi_round(amount):
+ return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
+
+def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment):
+ '''
+ Create and send a transaction with a random fee.
+ The transaction pays to a trival P2SH script, and assumes that its inputs
+ are of the same form.
+ The function takes a list of confirmed outputs and unconfirmed outputs
+ and attempts to use the confirmed list first for its inputs.
+ It adds the newly created outputs to the unconfirmed list.
+ Returns (raw transaction, fee)
+ '''
+ # It's best to exponentially distribute our random fees
+ # because the buckets are exponentially spaced.
+ # Exponentially distributed from 1-128 * fee_increment
+ rand_fee = float(fee_increment)*(1.1892**random.randint(0,28))
+ # Total fee ranges from min_fee to min_fee + 127*fee_increment
+ fee = min_fee - fee_increment + satoshi_round(rand_fee)
+ inputs = []
+ total_in = Decimal("0.00000000")
+ while total_in <= (amount + fee) and len(conflist) > 0:
+ t = conflist.pop(0)
+ total_in += t["amount"]
+ inputs.append({ "txid" : t["txid"], "vout" : t["vout"]} )
+ if total_in <= amount + fee:
+ while total_in <= (amount + fee) and len(unconflist) > 0:
+ t = unconflist.pop(0)
+ total_in += t["amount"]
+ inputs.append({ "txid" : t["txid"], "vout" : t["vout"]} )
+ if total_in <= amount + fee:
+ raise RuntimeError("Insufficient funds: need %d, have %d"%(amount+fee, total_in))
+ outputs = {}
+ outputs[P2SH_1] = total_in - amount - fee
+ outputs[P2SH_2] = amount
+ rawtx = from_node.createrawtransaction(inputs, outputs)
+ # Createrawtransaction constructions a transaction that is ready to be signed
+ # These transactions don't need to be signed, but we still have to insert the ScriptSig
+ # that will satisfy the ScriptPubKey.
+ completetx = rawtx[0:10]
+ inputnum = 0
+ for inp in inputs:
+ completetx += rawtx[10+82*inputnum:82+82*inputnum]
+ completetx += SCRIPT_SIG[inp["vout"]]
+ completetx += rawtx[84+82*inputnum:92+82*inputnum]
+ inputnum += 1
+ completetx += rawtx[10+82*inputnum:]
+ txid = from_node.sendrawtransaction(completetx, True)
+ unconflist.append({ "txid" : txid, "vout" : 0 , "amount" : total_in - amount - fee})
+ unconflist.append({ "txid" : txid, "vout" : 1 , "amount" : amount})
+
+ return (completetx, fee)
+
+def split_inputs(from_node, txins, txouts, initial_split = False):
+ '''
+ We need to generate a lot of very small inputs so we can generate a ton of transactions
+ and they will have low priority.
+ This function takes an input from txins, and creates and sends a transaction
+ which splits the value into 2 outputs which are appended to txouts.
+ '''
+ prevtxout = txins.pop()
+ inputs = []
+ outputs = {}
+ inputs.append({ "txid" : prevtxout["txid"], "vout" : prevtxout["vout"] })
+ half_change = satoshi_round(prevtxout["amount"]/2)
+ rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000")
+ outputs[P2SH_1] = half_change
+ outputs[P2SH_2] = rem_change
+ rawtx = from_node.createrawtransaction(inputs, outputs)
+ # If this is the initial split we actually need to sign the transaction
+ # Otherwise we just need to insert the property ScriptSig
+ if (initial_split) :
+ completetx = from_node.signrawtransaction(rawtx)["hex"]
+ else :
+ completetx = rawtx[0:82] + SCRIPT_SIG[prevtxout["vout"]] + rawtx[84:]
+ txid = from_node.sendrawtransaction(completetx, True)
+ txouts.append({ "txid" : txid, "vout" : 0 , "amount" : half_change})
+ txouts.append({ "txid" : txid, "vout" : 1 , "amount" : rem_change})
+
+def check_estimates(node, fees_seen, max_invalid, print_estimates = True):
+ '''
+ This function calls estimatefee and verifies that the estimates
+ meet certain invariants.
+ '''
+ all_estimates = [ node.estimatefee(i) for i in range(1,26) ]
+ if print_estimates:
+ print([str(all_estimates[e-1]) for e in [1,2,3,6,15,25]])
+ delta = 1.0e-6 # account for rounding error
+ last_e = max(fees_seen)
+ for e in filter(lambda x: x >= 0, all_estimates):
+ # Estimates should be within the bounds of what transactions fees actually were:
+ if float(e)+delta < min(fees_seen) or float(e)-delta > max(fees_seen):
+ raise AssertionError("Estimated fee (%f) out of range (%f,%f)"
+ %(float(e), min(fees_seen), max(fees_seen)))
+ # Estimates should be monotonically decreasing
+ if float(e)-delta > last_e:
+ raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms"
+ %(float(e),float(last_e)))
+ last_e = e
+ valid_estimate = False
+ invalid_estimates = 0
+ for e in all_estimates:
+ if e >= 0:
+ valid_estimate = True
+ else:
+ invalid_estimates += 1
+ # Once we're at a high enough confirmation count that we can give an estimate
+ # We should have estimates for all higher confirmation counts
+ if valid_estimate and e < 0:
+ raise AssertionError("Invalid estimate appears at higher confirm count than valid estimate")
+ # Check on the expected number of different confirmation counts
+ # that we might not have valid estimates for
+ if invalid_estimates > max_invalid:
+ raise AssertionError("More than (%d) invalid estimates"%(max_invalid))
+ return all_estimates
+
+
class EstimateFeeTest(BitcoinTestFramework):
def setup_network(self):
+ '''
+ We'll setup the network to have 3 nodes that all mine with different parameters.
+ But first we need to use one node to create a lot of small low priority outputs
+ which we will use to generate our transactions.
+ '''
self.nodes = []
- self.nodes.append(start_node(0, self.options.tmpdir,
- ["-debug=mempool", "-debug=estimatefee", "-relaypriority=0"]))
- # Node1 mines small-but-not-tiny blocks, and allows free transactions.
+ # Use node0 to mine blocks for input splitting
+ self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000",
+ "-relaypriority=0", "-whitelist=127.0.0.1"]))
+
+ print("This test is time consuming, please be patient")
+ print("Splitting inputs to small size so we can generate low priority tx's")
+ self.txouts = []
+ self.txouts2 = []
+ # Split a coinbase into two transaction puzzle outputs
+ split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True)
+
+ # Mine
+ while (len(self.nodes[0].getrawmempool()) > 0):
+ self.nodes[0].generate(1)
+
+ # Repeatedly split those 2 outputs, doubling twice for each rep
+ # Use txouts to monitor the available utxo, since these won't be tracked in wallet
+ reps = 0
+ while (reps < 5):
+ #Double txouts to txouts2
+ while (len(self.txouts)>0):
+ split_inputs(self.nodes[0], self.txouts, self.txouts2)
+ while (len(self.nodes[0].getrawmempool()) > 0):
+ self.nodes[0].generate(1)
+ #Double txouts2 to txouts
+ while (len(self.txouts2)>0):
+ split_inputs(self.nodes[0], self.txouts2, self.txouts)
+ while (len(self.nodes[0].getrawmempool()) > 0):
+ self.nodes[0].generate(1)
+ reps += 1
+ print("Finished splitting")
+
+ # Now we can connect the other nodes, didn't want to connect them earlier
+ # so the estimates would not be affected by the splitting transactions
+ # Node1 mines small blocks but that are bigger than the expected transaction rate,
+ # and allows free transactions.
# NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes,
- # so blockmaxsize of 2,000 is really just 1,000 bytes (room enough for
- # 6 or 7 transactions)
+ # (17k is room enough for 110 or so transactions)
self.nodes.append(start_node(1, self.options.tmpdir,
- ["-blockprioritysize=1500", "-blockmaxsize=2000",
- "-debug=mempool", "-debug=estimatefee", "-relaypriority=0"]))
+ ["-blockprioritysize=1500", "-blockmaxsize=18000",
+ "-maxorphantx=1000", "-relaypriority=0", "-debug=estimatefee"]))
connect_nodes(self.nodes[1], 0)
# Node2 is a stingy miner, that
- # produces very small blocks (room for only 3 or so transactions)
- node2args = [ "-blockprioritysize=0", "-blockmaxsize=1500",
- "-debug=mempool", "-debug=estimatefee", "-relaypriority=0"]
+ # produces too small blocks (room for only 70 or so transactions)
+ node2args = ["-blockprioritysize=0", "-blockmaxsize=12000", "-maxorphantx=1000", "-relaypriority=0"]
+
self.nodes.append(start_node(2, self.options.tmpdir, node2args))
- connect_nodes(self.nodes[2], 0)
+ connect_nodes(self.nodes[0], 2)
+ connect_nodes(self.nodes[2], 1)
self.is_network_split = False
self.sync_all()
-
+
+ def transact_and_mine(self, numblocks, mining_node):
+ min_fee = Decimal("0.00001")
+ # We will now mine numblocks blocks generating on average 100 transactions between each block
+ # We shuffle our confirmed txout set before each set of transactions
+ # small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible
+ # resorting to tx's that depend on the mempool when those run out
+ for i in range(numblocks):
+ random.shuffle(self.confutxo)
+ for j in range(random.randrange(100-50,100+50)):
+ from_index = random.randint(1,2)
+ (txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo,
+ self.memutxo, Decimal("0.005"), min_fee, min_fee)
+ tx_kbytes = (len(txhex)/2)/1000.0
+ self.fees_per_kb.append(float(fee)/tx_kbytes)
+ sync_mempools(self.nodes[0:3],.1)
+ mined = mining_node.getblock(mining_node.generate(1)[0],True)["tx"]
+ sync_blocks(self.nodes[0:3],.1)
+ #update which txouts are confirmed
+ newmem = []
+ for utx in self.memutxo:
+ if utx["txid"] in mined:
+ self.confutxo.append(utx)
+ else:
+ newmem.append(utx)
+ self.memutxo = newmem
def run_test(self):
- # Prime the memory pool with pairs of transactions
- # (high-priority, random fee and zero-priority, random fee)
- min_fee = Decimal("0.001")
- fees_per_kb = [];
- for i in range(12):
- (txid, txhex, fee) = random_zeropri_transaction(self.nodes, Decimal("1.1"),
- min_fee, min_fee, 20)
- tx_kbytes = (len(txhex)/2)/1000.0
- fees_per_kb.append(float(fee)/tx_kbytes)
-
- # Mine blocks with node2 until the memory pool clears:
- count_start = self.nodes[2].getblockcount()
- while len(self.nodes[2].getrawmempool()) > 0:
- self.nodes[2].generate(1)
- self.sync_all()
-
- all_estimates = [ self.nodes[0].estimatefee(i) for i in range(1,20) ]
- print("Fee estimates, super-stingy miner: "+str([str(e) for e in all_estimates]))
+ self.fees_per_kb = []
+ self.memutxo = []
+ self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
+ print("Checking estimates for 1/2/3/6/15/25 blocks")
+ print("Creating transactions and mining them with a huge block size")
+ # Create transactions and mine 20 big blocks with node 0 such that the mempool is always emptied
+ self.transact_and_mine(30, self.nodes[0])
+ check_estimates(self.nodes[1], self.fees_per_kb, 1)
- # Estimates should be within the bounds of what transactions fees actually were:
- delta = 1.0e-6 # account for rounding error
- for e in filter(lambda x: x >= 0, all_estimates):
- if float(e)+delta < min(fees_per_kb) or float(e)-delta > max(fees_per_kb):
- raise AssertionError("Estimated fee (%f) out of range (%f,%f)"%(float(e), min_fee_kb, max_fee_kb))
-
- # Generate transactions while mining 30 more blocks, this time with node1:
- for i in range(30):
- for j in range(random.randrange(6-4,6+4)):
- (txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"),
- Decimal("0.0"), min_fee, 20)
- tx_kbytes = (len(txhex)/2)/1000.0
- fees_per_kb.append(float(fee)/tx_kbytes)
- self.nodes[1].generate(1)
- self.sync_all()
+ print("Creating transactions and mining them with a block size that can't keep up")
+ # Create transactions and mine 30 small blocks with node 2, but create txs faster than we can mine
+ self.transact_and_mine(20, self.nodes[2])
+ check_estimates(self.nodes[1], self.fees_per_kb, 3)
- all_estimates = [ self.nodes[0].estimatefee(i) for i in range(1,20) ]
- print("Fee estimates, more generous miner: "+str([ str(e) for e in all_estimates]))
- for e in filter(lambda x: x >= 0, all_estimates):
- if float(e)+delta < min(fees_per_kb) or float(e)-delta > max(fees_per_kb):
- raise AssertionError("Estimated fee (%f) out of range (%f,%f)"%(float(e), min_fee_kb, max_fee_kb))
+ print("Creating transactions and mining them at a block size that is just big enough")
+ # Generate transactions while mining 40 more blocks, this time with node1
+ # which mines blocks with capacity just above the rate that transactions are being created
+ self.transact_and_mine(40, self.nodes[1])
+ check_estimates(self.nodes[1], self.fees_per_kb, 2)
# Finish by mining a normal-sized block:
- while len(self.nodes[0].getrawmempool()) > 0:
- self.nodes[0].generate(1)
- self.sync_all()
-
- final_estimates = [ self.nodes[0].estimatefee(i) for i in range(1,20) ]
- print("Final fee estimates: "+str([ str(e) for e in final_estimates]))
+ while len(self.nodes[1].getrawmempool()) > 0:
+ self.nodes[1].generate(1)
+ sync_blocks(self.nodes[0:3],.1)
+ print("Final estimates after emptying mempools")
+ check_estimates(self.nodes[1], self.fees_per_kb, 2)
if __name__ == '__main__':
EstimateFeeTest().main()
diff --git a/qa/rpc-tests/util.py b/qa/rpc-tests/util.py
index 3b4a10e46b..997bbcc373 100644
--- a/qa/rpc-tests/util.py
+++ b/qa/rpc-tests/util.py
@@ -33,7 +33,7 @@ def check_json_precision():
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
-def sync_blocks(rpc_connections):
+def sync_blocks(rpc_connections, wait=1):
"""
Wait until everybody has the same block count
"""
@@ -41,9 +41,9 @@ def sync_blocks(rpc_connections):
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
- time.sleep(1)
+ time.sleep(wait)
-def sync_mempools(rpc_connections):
+def sync_mempools(rpc_connections, wait=1):
"""
Wait until everybody has the same transactions in their memory
pools
@@ -56,7 +56,7 @@ def sync_mempools(rpc_connections):
num_match = num_match+1
if num_match == len(rpc_connections):
break
- time.sleep(1)
+ time.sleep(wait)
bitcoind_processes = {}