aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/gitian-building.md2
-rw-r--r--doc/gitian-building/create_vm_memsize.pngbin89475 -> 22158 bytes
-rwxr-xr-xqa/pull-tester/rpc-tests.py1
-rwxr-xr-xqa/rpc-tests/bip65-cltv-p2p.py40
-rwxr-xr-xqa/rpc-tests/bip65-cltv.py3
-rwxr-xr-xqa/rpc-tests/bipdersig-p2p.py42
-rwxr-xr-xqa/rpc-tests/p2p-compactblocks.py608
-rwxr-xr-xqa/rpc-tests/test_framework/mininode.py373
-rw-r--r--qa/rpc-tests/test_framework/siphash.py64
-rwxr-xr-xqa/rpc-tests/wallet-dump.py144
-rw-r--r--src/chainparams.cpp28
-rw-r--r--src/chainparams.h5
-rw-r--r--src/consensus/params.h8
-rw-r--r--src/init.cpp82
-rw-r--r--src/main.cpp46
-rw-r--r--src/net.cpp12
-rw-r--r--src/rpc/blockchain.cpp36
-rw-r--r--src/test/README.md21
-rw-r--r--src/test/hash_tests.cpp4
-rw-r--r--src/test/test_bitcoin.cpp5
20 files changed, 1228 insertions, 296 deletions
diff --git a/doc/gitian-building.md b/doc/gitian-building.md
index 7796a5fc9c..938f92ff12 100644
--- a/doc/gitian-building.md
+++ b/doc/gitian-building.md
@@ -55,7 +55,7 @@ In the VirtualBox GUI click "New" and choose the following parameters in the wiz
![](gitian-building/create_vm_memsize.png)
-- Memory Size: at least 1024MB, anything less will really slow down the build.
+- Memory Size: at least 3000MB, anything less and the build might not complete.
![](gitian-building/create_vm_hard_disk.png)
diff --git a/doc/gitian-building/create_vm_memsize.png b/doc/gitian-building/create_vm_memsize.png
index 5abfee5337..6f42cda73f 100644
--- a/doc/gitian-building/create_vm_memsize.png
+++ b/doc/gitian-building/create_vm_memsize.png
Binary files differ
diff --git a/qa/pull-tester/rpc-tests.py b/qa/pull-tester/rpc-tests.py
index e3921cfbed..f65a3eefc3 100755
--- a/qa/pull-tester/rpc-tests.py
+++ b/qa/pull-tester/rpc-tests.py
@@ -142,6 +142,7 @@ testScripts = [
'segwit.py',
'importprunedfunds.py',
'signmessages.py',
+ 'p2p-compactblocks.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
diff --git a/qa/rpc-tests/bip65-cltv-p2p.py b/qa/rpc-tests/bip65-cltv-p2p.py
index 754b6873b7..e903b2fbf0 100755
--- a/qa/rpc-tests/bip65-cltv-p2p.py
+++ b/qa/rpc-tests/bip65-cltv-p2p.py
@@ -71,9 +71,9 @@ class BIP65Test(ComparisonTestFramework):
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = int(time.time())
- ''' 98 more version 3 blocks '''
+ ''' 398 more version 3 blocks '''
test_blocks = []
- for i in range(98):
+ for i in range(398):
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
@@ -118,24 +118,6 @@ class BIP65Test(ComparisonTestFramework):
height += 1
yield TestInstance([[block, True]])
- '''
- Check that the new CLTV rules are enforced in the 751st version 4
- block.
- '''
- spendtx = self.create_transaction(self.nodes[0],
- self.coinbase_blocks[1], self.nodeaddress, 1.0)
- cltv_invalidate(spendtx)
- spendtx.rehash()
-
- block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
- block.nVersion = 4
- block.vtx.append(spendtx)
- block.hashMerkleRoot = block.calc_merkle_root()
- block.rehash()
- block.solve()
- self.last_block_time += 1
- yield TestInstance([[block, False]])
-
''' Mine 199 new version blocks on last valid tip '''
test_blocks = []
for i in range(199):
@@ -169,6 +151,24 @@ class BIP65Test(ComparisonTestFramework):
height += 1
yield TestInstance([[block, True]])
+ '''
+ Check that the new CLTV rules are enforced in the 951st version 4
+ block.
+ '''
+ spendtx = self.create_transaction(self.nodes[0],
+ self.coinbase_blocks[1], self.nodeaddress, 1.0)
+ cltv_invalidate(spendtx)
+ spendtx.rehash()
+
+ block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
+ block.nVersion = 4
+ block.vtx.append(spendtx)
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.rehash()
+ block.solve()
+ self.last_block_time += 1
+ yield TestInstance([[block, False]])
+
''' Mine 1 old version block, should be invalid '''
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
diff --git a/qa/rpc-tests/bip65-cltv.py b/qa/rpc-tests/bip65-cltv.py
index abba7fc20e..baa77b92a0 100755
--- a/qa/rpc-tests/bip65-cltv.py
+++ b/qa/rpc-tests/bip65-cltv.py
@@ -30,7 +30,8 @@ class BIP65Test(BitcoinTestFramework):
cnt = self.nodes[0].getblockcount()
# Mine some old-version blocks
- self.nodes[1].generate(100)
+ self.nodes[1].generate(200)
+ cnt += 100
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 100):
raise AssertionError("Failed to mine 100 version=3 blocks")
diff --git a/qa/rpc-tests/bipdersig-p2p.py b/qa/rpc-tests/bipdersig-p2p.py
index 4e4936a4ae..3bad5af5e6 100755
--- a/qa/rpc-tests/bipdersig-p2p.py
+++ b/qa/rpc-tests/bipdersig-p2p.py
@@ -79,9 +79,9 @@ class BIP66Test(ComparisonTestFramework):
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = int(time.time())
- ''' 98 more version 2 blocks '''
+ ''' 298 more version 2 blocks '''
test_blocks = []
- for i in range(98):
+ for i in range(298):
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 2
block.rehash()
@@ -124,25 +124,7 @@ class BIP66Test(ComparisonTestFramework):
self.last_block_time += 1
self.tip = block.sha256
height += 1
- yield TestInstance([[block, True]])
-
- '''
- Check that the new DERSIG rules are enforced in the 751st version 3
- block.
- '''
- spendtx = self.create_transaction(self.nodes[0],
- self.coinbase_blocks[1], self.nodeaddress, 1.0)
- unDERify(spendtx)
- spendtx.rehash()
-
- block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
- block.nVersion = 3
- block.vtx.append(spendtx)
- block.hashMerkleRoot = block.calc_merkle_root()
- block.rehash()
- block.solve()
- self.last_block_time += 1
- yield TestInstance([[block, False]])
+ yield TestInstance([[block, True]])
''' Mine 199 new version blocks on last valid tip '''
test_blocks = []
@@ -177,6 +159,24 @@ class BIP66Test(ComparisonTestFramework):
height += 1
yield TestInstance([[block, True]])
+ '''
+ Check that the new DERSIG rules are enforced in the 951st version 3
+ block.
+ '''
+ spendtx = self.create_transaction(self.nodes[0],
+ self.coinbase_blocks[1], self.nodeaddress, 1.0)
+ unDERify(spendtx)
+ spendtx.rehash()
+
+ block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
+ block.nVersion = 3
+ block.vtx.append(spendtx)
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.rehash()
+ block.solve()
+ self.last_block_time += 1
+ yield TestInstance([[block, False]])
+
''' Mine 1 old version block, should be invalid '''
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 2
diff --git a/qa/rpc-tests/p2p-compactblocks.py b/qa/rpc-tests/p2p-compactblocks.py
new file mode 100755
index 0000000000..7fe7ecc16c
--- /dev/null
+++ b/qa/rpc-tests/p2p-compactblocks.py
@@ -0,0 +1,608 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+from test_framework.mininode import *
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import *
+from test_framework.blocktools import create_block, create_coinbase
+from test_framework.siphash import siphash256
+from test_framework.script import CScript, OP_TRUE
+
+'''
+CompactBlocksTest -- test compact blocks (BIP 152)
+'''
+
+
+# TestNode: A peer we use to send messages to bitcoind, and store responses.
+class TestNode(SingleNodeConnCB):
+ def __init__(self):
+ SingleNodeConnCB.__init__(self)
+ self.last_sendcmpct = None
+ self.last_headers = None
+ self.last_inv = None
+ self.last_cmpctblock = None
+ self.block_announced = False
+ self.last_getdata = None
+ self.last_getblocktxn = None
+ self.last_block = None
+ self.last_blocktxn = None
+
+ def on_sendcmpct(self, conn, message):
+ self.last_sendcmpct = message
+
+ def on_block(self, conn, message):
+ self.last_block = message
+
+ def on_cmpctblock(self, conn, message):
+ self.last_cmpctblock = message
+ self.block_announced = True
+
+ def on_headers(self, conn, message):
+ self.last_headers = message
+ self.block_announced = True
+
+ def on_inv(self, conn, message):
+ self.last_inv = message
+ self.block_announced = True
+
+ def on_getdata(self, conn, message):
+ self.last_getdata = message
+
+ def on_getblocktxn(self, conn, message):
+ self.last_getblocktxn = message
+
+ def on_blocktxn(self, conn, message):
+ self.last_blocktxn = message
+
+ # Requires caller to hold mininode_lock
+ def received_block_announcement(self):
+ return self.block_announced
+
+ def clear_block_announcement(self):
+ with mininode_lock:
+ self.block_announced = False
+ self.last_inv = None
+ self.last_headers = None
+ self.last_cmpctblock = None
+
+ def get_headers(self, locator, hashstop):
+ msg = msg_getheaders()
+ msg.locator.vHave = locator
+ msg.hashstop = hashstop
+ self.connection.send_message(msg)
+
+ def send_header_for_blocks(self, new_blocks):
+ headers_message = msg_headers()
+ headers_message.headers = [CBlockHeader(b) for b in new_blocks]
+ self.send_message(headers_message)
+
+
+class CompactBlocksTest(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+ self.utxos = []
+
+ def setup_network(self):
+ self.nodes = []
+
+ # Turn off segwit in this test, as compact blocks don't currently work
+ # with segwit. (After BIP 152 is updated to support segwit, we can
+ # test behavior with and without segwit enabled by adding a second node
+ # to the test.)
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [["-debug", "-logtimemicros=1", "-bip9params=segwit:0:0"]])
+
+ def build_block_on_tip(self):
+ height = self.nodes[0].getblockcount()
+ tip = self.nodes[0].getbestblockhash()
+ mtp = self.nodes[0].getblockheader(tip)['mediantime']
+ block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1)
+ block.solve()
+ return block
+
+ # Create 10 more anyone-can-spend utxo's for testing.
+ def make_utxos(self):
+ block = self.build_block_on_tip()
+ self.test_node.send_and_ping(msg_block(block))
+ assert(int(self.nodes[0].getbestblockhash(), 16) == block.sha256)
+ self.nodes[0].generate(100)
+
+ total_value = block.vtx[0].vout[0].nValue
+ out_value = total_value // 10
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))
+ for i in range(10):
+ tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
+ tx.rehash()
+
+ block2 = self.build_block_on_tip()
+ block2.vtx.append(tx)
+ block2.hashMerkleRoot = block2.calc_merkle_root()
+ block2.solve()
+ self.test_node.send_and_ping(msg_block(block2))
+ assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256)
+ self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)])
+ return
+
+ # Test "sendcmpct":
+ # - No compact block announcements or getdata(MSG_CMPCT_BLOCK) unless
+ # sendcmpct is sent.
+ # - If sendcmpct is sent with version > 0, the message is ignored.
+ # - If sendcmpct is sent with boolean 0, then block announcements are not
+ # made with compact blocks.
+ # - If sendcmpct is then sent with boolean 1, then new block announcements
+ # are made with compact blocks.
+ def test_sendcmpct(self):
+ print("Testing SENDCMPCT p2p message... ")
+
+ # Make sure we get a version 0 SENDCMPCT message from our peer
+ def received_sendcmpct():
+ return (self.test_node.last_sendcmpct is not None)
+ got_message = wait_until(received_sendcmpct, timeout=30)
+ assert(got_message)
+ assert_equal(self.test_node.last_sendcmpct.version, 1)
+
+ tip = int(self.nodes[0].getbestblockhash(), 16)
+
+ def check_announcement_of_new_block(node, peer, predicate):
+ self.test_node.clear_block_announcement()
+ node.generate(1)
+ got_message = wait_until(peer.received_block_announcement, timeout=30)
+ assert(got_message)
+ with mininode_lock:
+ assert(predicate)
+
+ # We shouldn't get any block announcements via cmpctblock yet.
+ check_announcement_of_new_block(self.nodes[0], self.test_node, lambda: self.test_node.last_cmpctblock is None)
+
+ # Try one more time, this time after requesting headers.
+ self.test_node.clear_block_announcement()
+ self.test_node.get_headers(locator=[tip], hashstop=0)
+ wait_until(self.test_node.received_block_announcement, timeout=30)
+ self.test_node.clear_block_announcement()
+
+ check_announcement_of_new_block(self.nodes[0], self.test_node, lambda: self.test_node.last_cmpctblock is None and self.test_node.last_inv is not None)
+
+ # Now try a SENDCMPCT message with too-high version
+ sendcmpct = msg_sendcmpct()
+ sendcmpct.version = 2
+ self.test_node.send_message(sendcmpct)
+
+ check_announcement_of_new_block(self.nodes[0], self.test_node, lambda: self.test_node.last_cmpctblock is None)
+
+ # Now try a SENDCMPCT message with valid version, but announce=False
+ self.test_node.send_message(msg_sendcmpct())
+ check_announcement_of_new_block(self.nodes[0], self.test_node, lambda: self.test_node.last_cmpctblock is None)
+
+ # Finally, try a SENDCMPCT message with announce=True
+ sendcmpct.version = 1
+ sendcmpct.announce = True
+ self.test_node.send_message(sendcmpct)
+ check_announcement_of_new_block(self.nodes[0], self.test_node, lambda: self.test_node.last_cmpctblock is not None)
+
+ # Try one more time
+ check_announcement_of_new_block(self.nodes[0], self.test_node, lambda: self.test_node.last_cmpctblock is not None)
+
+ # Try one more time, after turning on sendheaders
+ self.test_node.send_message(msg_sendheaders())
+ check_announcement_of_new_block(self.nodes[0], self.test_node, lambda: self.test_node.last_cmpctblock is not None)
+
+ # Now turn off announcements
+ sendcmpct.announce = False
+ check_announcement_of_new_block(self.nodes[0], self.test_node, lambda: self.test_node.last_cmpctblock is None and self.test_node.last_headers is not None)
+
+ # This test actually causes bitcoind to (reasonably!) disconnect us, so do this last.
+ def test_invalid_cmpctblock_message(self):
+ print("Testing invalid index in cmpctblock message...")
+ self.nodes[0].generate(101)
+ block = self.build_block_on_tip()
+
+ cmpct_block = P2PHeaderAndShortIDs()
+ cmpct_block.header = CBlockHeader(block)
+ cmpct_block.prefilled_txn_length = 1
+ # This index will be too high
+ prefilled_txn = PrefilledTransaction(1, block.vtx[0])
+ cmpct_block.prefilled_txn = [prefilled_txn]
+ self.test_node.send_and_ping(msg_cmpctblock(cmpct_block))
+ assert(int(self.nodes[0].getbestblockhash(), 16) == block.hashPrevBlock)
+
+ # Compare the generated shortids to what we expect based on BIP 152, given
+ # bitcoind's choice of nonce.
+ def test_compactblock_construction(self):
+ print("Testing compactblock headers and shortIDs are correct...")
+
+ # Generate a bunch of transactions.
+ self.nodes[0].generate(101)
+ num_transactions = 25
+ address = self.nodes[0].getnewaddress()
+ for i in range(num_transactions):
+ self.nodes[0].sendtoaddress(address, 0.1)
+
+ # Now mine a block, and look at the resulting compact block.
+ self.test_node.clear_block_announcement()
+ block_hash = int(self.nodes[0].generate(1)[0], 16)
+
+ # Store the raw block in our internal format.
+ block = FromHex(CBlock(), self.nodes[0].getblock("%02x" % block_hash, False))
+ [tx.calc_sha256() for tx in block.vtx]
+ block.rehash()
+
+ # Don't care which type of announcement came back for this test; just
+ # request the compact block if we didn't get one yet.
+ wait_until(self.test_node.received_block_announcement, timeout=30)
+
+ with mininode_lock:
+ if self.test_node.last_cmpctblock is None:
+ self.test_node.clear_block_announcement()
+ inv = CInv(4, block_hash) # 4 == "CompactBlock"
+ self.test_node.send_message(msg_getdata([inv]))
+
+ wait_until(self.test_node.received_block_announcement, timeout=30)
+
+ # Now we should have the compactblock
+ header_and_shortids = None
+ with mininode_lock:
+ assert(self.test_node.last_cmpctblock is not None)
+ # Convert the on-the-wire representation to absolute indexes
+ header_and_shortids = HeaderAndShortIDs(self.test_node.last_cmpctblock.header_and_shortids)
+
+ # Check that we got the right block!
+ header_and_shortids.header.calc_sha256()
+ assert_equal(header_and_shortids.header.sha256, block_hash)
+
+ # Make sure the prefilled_txn appears to have included the coinbase
+ assert(len(header_and_shortids.prefilled_txn) >= 1)
+ assert_equal(header_and_shortids.prefilled_txn[0].index, 0)
+
+ # Check that all prefilled_txn entries match what's in the block.
+ for entry in header_and_shortids.prefilled_txn:
+ entry.tx.calc_sha256()
+ assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256)
+
+ # Check that the cmpctblock message announced all the transactions.
+ assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx))
+
+ # And now check that all the shortids are as expected as well.
+ # Determine the siphash keys to use.
+ [k0, k1] = header_and_shortids.get_siphash_keys()
+
+ index = 0
+ while index < len(block.vtx):
+ if (len(header_and_shortids.prefilled_txn) > 0 and
+ header_and_shortids.prefilled_txn[0].index == index):
+ # Already checked prefilled transactions above
+ header_and_shortids.prefilled_txn.pop(0)
+ else:
+ shortid = calculate_shortid(k0, k1, block.vtx[index].sha256)
+ assert_equal(shortid, header_and_shortids.shortids[0])
+ header_and_shortids.shortids.pop(0)
+ index += 1
+
+ # Test that bitcoind requests compact blocks when we announce new blocks
+ # via header or inv, and that responding to getblocktxn causes the block
+ # to be successfully reconstructed.
+ def test_compactblock_requests(self):
+ print("Testing compactblock requests... ")
+
+ # Try announcing a block with an inv or header, expect a compactblock
+ # request
+ for announce in ["inv", "header"]:
+ block = self.build_block_on_tip()
+ with mininode_lock:
+ self.test_node.last_getdata = None
+
+ if announce == "inv":
+ self.test_node.send_message(msg_inv([CInv(2, block.sha256)]))
+ else:
+ self.test_node.send_header_for_blocks([block])
+ success = wait_until(lambda: self.test_node.last_getdata is not None, timeout=30)
+ assert(success)
+ assert_equal(len(self.test_node.last_getdata.inv), 1)
+ assert_equal(self.test_node.last_getdata.inv[0].type, 4)
+ assert_equal(self.test_node.last_getdata.inv[0].hash, block.sha256)
+
+ # Send back a compactblock message that omits the coinbase
+ comp_block = HeaderAndShortIDs()
+ comp_block.header = CBlockHeader(block)
+ comp_block.nonce = 0
+ comp_block.shortids = [1] # this is useless, and wrong
+ self.test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
+ assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock)
+ # Expect a getblocktxn message.
+ with mininode_lock:
+ assert(self.test_node.last_getblocktxn is not None)
+ absolute_indexes = self.test_node.last_getblocktxn.block_txn_request.to_absolute()
+ assert_equal(absolute_indexes, [0]) # should be a coinbase request
+
+ # Send the coinbase, and verify that the tip advances.
+ msg = msg_blocktxn()
+ msg.block_transactions.blockhash = block.sha256
+ msg.block_transactions.transactions = [block.vtx[0]]
+ self.test_node.send_and_ping(msg)
+ assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
+
+ # Create a chain of transactions from given utxo, and add to a new block.
+ def build_block_with_transactions(self, utxo, num_transactions):
+ block = self.build_block_on_tip()
+
+ for i in range(num_transactions):
+ tx = CTransaction()
+ tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))
+ tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE])))
+ tx.rehash()
+ utxo = [tx.sha256, 0, tx.vout[0].nValue]
+ block.vtx.append(tx)
+
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.solve()
+ return block
+
+ # Test that we only receive getblocktxn requests for transactions that the
+ # node needs, and that responding to them causes the block to be
+ # reconstructed.
+ def test_getblocktxn_requests(self):
+ print("Testing getblocktxn requests...")
+
+ # First try announcing compactblocks that won't reconstruct, and verify
+ # that we receive getblocktxn messages back.
+ utxo = self.utxos.pop(0)
+
+ block = self.build_block_with_transactions(utxo, 5)
+ self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
+
+ comp_block = HeaderAndShortIDs()
+ comp_block.initialize_from_block(block)
+
+ self.test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
+ with mininode_lock:
+ assert(self.test_node.last_getblocktxn is not None)
+ absolute_indexes = self.test_node.last_getblocktxn.block_txn_request.to_absolute()
+ assert_equal(absolute_indexes, [1, 2, 3, 4, 5])
+ msg = msg_blocktxn()
+ msg.block_transactions = BlockTransactions(block.sha256, block.vtx[1:])
+ self.test_node.send_and_ping(msg)
+ assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
+
+ utxo = self.utxos.pop(0)
+ block = self.build_block_with_transactions(utxo, 5)
+ self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
+
+ # Now try interspersing the prefilled transactions
+ comp_block.initialize_from_block(block, prefill_list=[0, 1, 5])
+ self.test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
+ with mininode_lock:
+ assert(self.test_node.last_getblocktxn is not None)
+ absolute_indexes = self.test_node.last_getblocktxn.block_txn_request.to_absolute()
+ assert_equal(absolute_indexes, [2, 3, 4])
+ msg.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5])
+ self.test_node.send_and_ping(msg)
+ assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
+
+ # Now try giving one transaction ahead of time.
+ utxo = self.utxos.pop(0)
+ block = self.build_block_with_transactions(utxo, 5)
+ self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
+ self.test_node.send_and_ping(msg_tx(block.vtx[1]))
+ assert(block.vtx[1].hash in self.nodes[0].getrawmempool())
+
+ # Prefill 4 out of the 6 transactions, and verify that only the one
+ # that was not in the mempool is requested.
+ comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4])
+ self.test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
+ with mininode_lock:
+ assert(self.test_node.last_getblocktxn is not None)
+ absolute_indexes = self.test_node.last_getblocktxn.block_txn_request.to_absolute()
+ assert_equal(absolute_indexes, [5])
+
+ msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]])
+ self.test_node.send_and_ping(msg)
+ assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
+
+ # Now provide all transactions to the node before the block is
+ # announced and verify reconstruction happens immediately.
+ utxo = self.utxos.pop(0)
+ block = self.build_block_with_transactions(utxo, 10)
+ self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
+ for tx in block.vtx[1:]:
+ self.test_node.send_message(msg_tx(tx))
+ self.test_node.sync_with_ping()
+ # Make sure all transactions were accepted.
+ mempool = self.nodes[0].getrawmempool()
+ for tx in block.vtx[1:]:
+ assert(tx.hash in mempool)
+
+ # Clear out last request.
+ with mininode_lock:
+ self.test_node.last_getblocktxn = None
+
+ # Send compact block
+ comp_block.initialize_from_block(block, prefill_list=[0])
+ self.test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
+ with mininode_lock:
+ # Shouldn't have gotten a request for any transaction
+ assert(self.test_node.last_getblocktxn is None)
+ # Tip should have updated
+ assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
+
+ # Incorrectly responding to a getblocktxn shouldn't cause the block to be
+ # permanently failed.
+ def test_incorrect_blocktxn_response(self):
+ print("Testing handling of incorrect blocktxn responses...")
+
+ if (len(self.utxos) == 0):
+ self.make_utxos()
+ utxo = self.utxos.pop(0)
+
+ block = self.build_block_with_transactions(utxo, 10)
+ self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
+ # Relay the first 5 transactions from the block in advance
+ for tx in block.vtx[1:6]:
+ self.test_node.send_message(msg_tx(tx))
+ self.test_node.sync_with_ping()
+ # Make sure all transactions were accepted.
+ mempool = self.nodes[0].getrawmempool()
+ for tx in block.vtx[1:6]:
+ assert(tx.hash in mempool)
+
+ # Send compact block
+ comp_block = HeaderAndShortIDs()
+ comp_block.initialize_from_block(block, prefill_list=[0])
+ self.test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
+ absolute_indexes = []
+ with mininode_lock:
+ assert(self.test_node.last_getblocktxn is not None)
+ absolute_indexes = self.test_node.last_getblocktxn.block_txn_request.to_absolute()
+ assert_equal(absolute_indexes, [6, 7, 8, 9, 10])
+
+ # Now give an incorrect response.
+ # Note that it's possible for bitcoind to be smart enough to know we're
+ # lying, since it could check to see if the shortid matches what we're
+ # sending, and eg disconnect us for misbehavior. If that behavior
+ # change were made, we could just modify this test by having a
+ # different peer provide the block further down, so that we're still
+ # verifying that the block isn't marked bad permanently. This is good
+ # enough for now.
+ msg = msg_blocktxn()
+ msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:])
+ self.test_node.send_and_ping(msg)
+
+ # Tip should not have updated
+ assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock)
+
+ # We should receive a getdata request
+ success = wait_until(lambda: self.test_node.last_getdata is not None, timeout=10)
+ assert(success)
+ assert_equal(len(self.test_node.last_getdata.inv), 1)
+ assert_equal(self.test_node.last_getdata.inv[0].type, 2)
+ assert_equal(self.test_node.last_getdata.inv[0].hash, block.sha256)
+
+ # Deliver the block
+ self.test_node.send_and_ping(msg_block(block))
+ assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
+
+ def test_getblocktxn_handler(self):
+ print("Testing getblocktxn handler...")
+
+ # bitcoind won't respond for blocks whose height is more than 15 blocks
+ # deep.
+ MAX_GETBLOCKTXN_DEPTH = 15
+ chain_height = self.nodes[0].getblockcount()
+ current_height = chain_height
+ while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH):
+ block_hash = self.nodes[0].getblockhash(current_height)
+ block = FromHex(CBlock(), self.nodes[0].getblock(block_hash, False))
+
+ msg = msg_getblocktxn()
+ msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [])
+ num_to_request = random.randint(1, len(block.vtx))
+ msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
+ self.test_node.send_message(msg)
+ success = wait_until(lambda: self.test_node.last_blocktxn is not None, timeout=10)
+ assert(success)
+
+ [tx.calc_sha256() for tx in block.vtx]
+ with mininode_lock:
+ assert_equal(self.test_node.last_blocktxn.block_transactions.blockhash, int(block_hash, 16))
+ all_indices = msg.block_txn_request.to_absolute()
+ for index in all_indices:
+ tx = self.test_node.last_blocktxn.block_transactions.transactions.pop(0)
+ tx.calc_sha256()
+ assert_equal(tx.sha256, block.vtx[index].sha256)
+ self.test_node.last_blocktxn = None
+ current_height -= 1
+
+ # Next request should be ignored, as we're past the allowed depth.
+ block_hash = self.nodes[0].getblockhash(current_height)
+ msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])
+ self.test_node.send_and_ping(msg)
+ with mininode_lock:
+ assert_equal(self.test_node.last_blocktxn, None)
+
+ def test_compactblocks_not_at_tip(self):
+ print("Testing compactblock requests/announcements not at chain tip...")
+
+ # Test that requesting old compactblocks doesn't work.
+ MAX_CMPCTBLOCK_DEPTH = 11
+ new_blocks = []
+ for i in range(MAX_CMPCTBLOCK_DEPTH):
+ self.test_node.clear_block_announcement()
+ new_blocks.append(self.nodes[0].generate(1)[0])
+ wait_until(self.test_node.received_block_announcement, timeout=30)
+
+ self.test_node.clear_block_announcement()
+ self.test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
+ success = wait_until(lambda: self.test_node.last_cmpctblock is not None, timeout=30)
+ assert(success)
+
+ self.test_node.clear_block_announcement()
+ self.nodes[0].generate(1)
+ wait_until(self.test_node.received_block_announcement, timeout=30)
+ self.test_node.clear_block_announcement()
+ self.test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
+ success = wait_until(lambda: self.test_node.last_block is not None, timeout=30)
+ assert(success)
+ with mininode_lock:
+ self.test_node.last_block.block.calc_sha256()
+ assert_equal(self.test_node.last_block.block.sha256, int(new_blocks[0], 16))
+
+ # Generate an old compactblock, and verify that it's not accepted.
+ cur_height = self.nodes[0].getblockcount()
+ hashPrevBlock = int(self.nodes[0].getblockhash(cur_height-5), 16)
+ block = self.build_block_on_tip()
+ block.hashPrevBlock = hashPrevBlock
+ block.solve()
+
+ comp_block = HeaderAndShortIDs()
+ comp_block.initialize_from_block(block)
+ self.test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
+
+ tips = self.nodes[0].getchaintips()
+ found = False
+ for x in tips:
+ if x["hash"] == block.hash:
+ assert_equal(x["status"], "headers-only")
+ found = True
+ break
+ assert(found)
+
+ # Requesting this block via getblocktxn should silently fail
+ # (to avoid fingerprinting attacks).
+ msg = msg_getblocktxn()
+ msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
+ with mininode_lock:
+ self.test_node.last_blocktxn = None
+ self.test_node.send_and_ping(msg)
+ with mininode_lock:
+ assert(self.test_node.last_blocktxn is None)
+
+ def run_test(self):
+ # Setup the p2p connections and start up the network thread.
+ self.test_node = TestNode()
+
+ connections = []
+ connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
+ self.test_node.add_connection(connections[0])
+
+ NetworkThread().start() # Start up network handling in another thread
+
+ # Test logic begins here
+ self.test_node.wait_for_verack()
+
+ # We will need UTXOs to construct transactions in later tests.
+ self.make_utxos()
+
+ self.test_sendcmpct()
+ self.test_compactblock_construction()
+ self.test_compactblock_requests()
+ self.test_getblocktxn_requests()
+ self.test_getblocktxn_handler()
+ self.test_compactblocks_not_at_tip()
+ self.test_incorrect_blocktxn_response()
+ self.test_invalid_cmpctblock_message()
+
+
+if __name__ == '__main__':
+ CompactBlocksTest().main()
diff --git a/qa/rpc-tests/test_framework/mininode.py b/qa/rpc-tests/test_framework/mininode.py
index cdd5292cd6..caffab3535 100755
--- a/qa/rpc-tests/test_framework/mininode.py
+++ b/qa/rpc-tests/test_framework/mininode.py
@@ -36,9 +36,10 @@ from threading import RLock
from threading import Thread
import logging
import copy
+from test_framework.siphash import siphash256
BIP0031_VERSION = 60000
-MY_VERSION = 60001 # past bip-31 for ping/pong
+MY_VERSION = 70014 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MAX_INV_SZ = 50000
@@ -52,7 +53,7 @@ NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
# Keep our own socket map for asyncore, so that we can track disconnects
-# ourselves (to workaround an issue with closing an asyncore socket when
+# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
@@ -74,8 +75,19 @@ def ripemd160(s):
def hash256(s):
return sha256(sha256(s))
+def ser_compact_size(l):
+ r = b""
+ if l < 253:
+ r = struct.pack("B", l)
+ elif l < 0x10000:
+ r = struct.pack("<BH", 253, l)
+ elif l < 0x100000000:
+ r = struct.pack("<BI", 254, l)
+ else:
+ r = struct.pack("<BQ", 255, l)
+ return r
-def deser_string(f):
+def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
@@ -83,16 +95,14 @@ def deser_string(f):
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
+ return nit
+
+def deser_string(f):
+ nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
- if len(s) < 253:
- return struct.pack("B", len(s)) + s
- elif len(s) < 0x10000:
- return struct.pack("<BH", 253, len(s)) + s
- elif len(s) < 0x100000000:
- return struct.pack("<BI", 254, len(s)) + s
- return struct.pack("<BQ", 255, len(s)) + s
+ return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
@@ -125,13 +135,7 @@ def uint256_from_compact(c):
def deser_vector(f, c):
- nit = struct.unpack("<B", f.read(1))[0]
- if nit == 253:
- nit = struct.unpack("<H", f.read(2))[0]
- elif nit == 254:
- nit = struct.unpack("<I", f.read(4))[0]
- elif nit == 255:
- nit = struct.unpack("<Q", f.read(8))[0]
+ nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
@@ -144,15 +148,7 @@ def deser_vector(f, c):
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
- r = b""
- if len(l) < 253:
- r = struct.pack("B", len(l))
- elif len(l) < 0x10000:
- r = struct.pack("<BH", 253, len(l))
- elif len(l) < 0x100000000:
- r = struct.pack("<BI", 254, len(l))
- else:
- r = struct.pack("<BQ", 255, len(l))
+ r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
@@ -162,13 +158,7 @@ def ser_vector(l, ser_function_name=None):
def deser_uint256_vector(f):
- nit = struct.unpack("<B", f.read(1))[0]
- if nit == 253:
- nit = struct.unpack("<H", f.read(2))[0]
- elif nit == 254:
- nit = struct.unpack("<I", f.read(4))[0]
- elif nit == 255:
- nit = struct.unpack("<Q", f.read(8))[0]
+ nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
@@ -177,28 +167,14 @@ def deser_uint256_vector(f):
def ser_uint256_vector(l):
- r = b""
- if len(l) < 253:
- r = struct.pack("B", len(l))
- elif len(l) < 0x10000:
- r = struct.pack("<BH", 253, len(l))
- elif len(l) < 0x100000000:
- r = struct.pack("<BI", 254, len(l))
- else:
- r = struct.pack("<BQ", 255, len(l))
+ r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
- nit = struct.unpack("<B", f.read(1))[0]
- if nit == 253:
- nit = struct.unpack("<H", f.read(2))[0]
- elif nit == 254:
- nit = struct.unpack("<I", f.read(4))[0]
- elif nit == 255:
- nit = struct.unpack("<Q", f.read(8))[0]
+ nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
@@ -207,28 +183,14 @@ def deser_string_vector(f):
def ser_string_vector(l):
- r = b""
- if len(l) < 253:
- r = struct.pack("B", len(l))
- elif len(l) < 0x10000:
- r = struct.pack("<BH", 253, len(l))
- elif len(l) < 0x100000000:
- r = struct.pack("<BI", 254, len(l))
- else:
- r = struct.pack("<BQ", 255, len(l))
+ r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
- nit = struct.unpack("<B", f.read(1))[0]
- if nit == 253:
- nit = struct.unpack("<H", f.read(2))[0]
- elif nit == 254:
- nit = struct.unpack("<I", f.read(4))[0]
- elif nit == 255:
- nit = struct.unpack("<Q", f.read(8))[0]
+ nit = deser_compact_size(f)
r = []
for i in range(nit):
t = struct.unpack("<i", f.read(4))[0]
@@ -237,15 +199,7 @@ def deser_int_vector(f):
def ser_int_vector(l):
- r = b""
- if len(l) < 253:
- r = struct.pack("B", len(l))
- elif len(l) < 0x10000:
- r = struct.pack("<BH", 253, len(l))
- elif len(l) < 0x100000000:
- r = struct.pack("<BI", 254, len(l))
- else:
- r = struct.pack("<BQ", 255, len(l))
+ r = ser_compact_size(len(l))
for i in l:
r += struct.pack("<i", i)
return r
@@ -294,7 +248,8 @@ class CInv(object):
1: "TX",
2: "Block",
1|MSG_WITNESS_FLAG: "WitnessTx",
- 2|MSG_WITNESS_FLAG : "WitnessBlock"
+ 2|MSG_WITNESS_FLAG : "WitnessBlock",
+ 4: "CompactBlock"
}
def __init__(self, t=0, h=0):
@@ -781,6 +736,187 @@ class CAlert(object):
% (len(self.vchMsg), len(self.vchSig))
+class PrefilledTransaction(object):
+ def __init__(self, index=0, tx = None):
+ self.index = index
+ self.tx = tx
+
+ def deserialize(self, f):
+ self.index = deser_compact_size(f)
+ self.tx = CTransaction()
+ self.tx.deserialize(f)
+
+ def serialize(self, with_witness=False):
+ r = b""
+ r += ser_compact_size(self.index)
+ if with_witness:
+ r += self.tx.serialize_with_witness()
+ else:
+ r += self.tx.serialize_without_witness()
+ return r
+
+ def __repr__(self):
+ return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
+
+# This is what we send on the wire, in a cmpctblock message.
+class P2PHeaderAndShortIDs(object):
+ def __init__(self):
+ self.header = CBlockHeader()
+ self.nonce = 0
+ self.shortids_length = 0
+ self.shortids = []
+ self.prefilled_txn_length = 0
+ self.prefilled_txn = []
+
+ def deserialize(self, f):
+ self.header.deserialize(f)
+ self.nonce = struct.unpack("<Q", f.read(8))[0]
+ self.shortids_length = deser_compact_size(f)
+ for i in range(self.shortids_length):
+ # shortids are defined to be 6 bytes in the spec, so append
+ # two zero bytes and read it in as an 8-byte number
+ self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
+ self.prefilled_txn = deser_vector(f, PrefilledTransaction)
+ self.prefilled_txn_length = len(self.prefilled_txn)
+
+ def serialize(self, with_witness=False):
+ r = b""
+ r += self.header.serialize()
+ r += struct.pack("<Q", self.nonce)
+ r += ser_compact_size(self.shortids_length)
+ for x in self.shortids:
+ # We only want the first 6 bytes
+ r += struct.pack("<Q", x)[0:6]
+ r += ser_vector(self.prefilled_txn)
+ return r
+
+ def __repr__(self):
+ return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
+
+
+# Calculate the BIP 152-compact blocks shortid for a given transaction hash
+def calculate_shortid(k0, k1, tx_hash):
+ expected_shortid = siphash256(k0, k1, tx_hash)
+ expected_shortid &= 0x0000ffffffffffff
+ return expected_shortid
+
+# This version gets rid of the array lengths, and reinterprets the differential
+# encoding into indices that can be used for lookup.
+class HeaderAndShortIDs(object):
+ def __init__(self, p2pheaders_and_shortids = None):
+ self.header = CBlockHeader()
+ self.nonce = 0
+ self.shortids = []
+ self.prefilled_txn = []
+
+ if p2pheaders_and_shortids != None:
+ self.header = p2pheaders_and_shortids.header
+ self.nonce = p2pheaders_and_shortids.nonce
+ self.shortids = p2pheaders_and_shortids.shortids
+ last_index = -1
+ for x in p2pheaders_and_shortids.prefilled_txn:
+ self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
+ last_index = self.prefilled_txn[-1].index
+
+ def to_p2p(self):
+ ret = P2PHeaderAndShortIDs()
+ ret.header = self.header
+ ret.nonce = self.nonce
+ ret.shortids_length = len(self.shortids)
+ ret.shortids = self.shortids
+ ret.prefilled_txn_length = len(self.prefilled_txn)
+ ret.prefilled_txn = []
+ last_index = -1
+ for x in self.prefilled_txn:
+ ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
+ last_index = x.index
+ return ret
+
+ def get_siphash_keys(self):
+ header_nonce = self.header.serialize()
+ header_nonce += struct.pack("<Q", self.nonce)
+ hash_header_nonce_as_str = sha256(header_nonce)
+ key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
+ key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
+ return [ key0, key1 ]
+
+ def initialize_from_block(self, block, nonce=0, prefill_list = [0]):
+ self.header = CBlockHeader(block)
+ self.nonce = nonce
+ self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
+ self.shortids = []
+ [k0, k1] = self.get_siphash_keys()
+ for i in range(len(block.vtx)):
+ if i not in prefill_list:
+ self.shortids.append(calculate_shortid(k0, k1, block.vtx[i].sha256))
+
+ def __repr__(self):
+ return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
+
+
+class BlockTransactionsRequest(object):
+
+ def __init__(self, blockhash=0, indexes = None):
+ self.blockhash = blockhash
+ self.indexes = indexes if indexes != None else []
+
+ def deserialize(self, f):
+ self.blockhash = deser_uint256(f)
+ indexes_length = deser_compact_size(f)
+ for i in range(indexes_length):
+ self.indexes.append(deser_compact_size(f))
+
+ def serialize(self):
+ r = b""
+ r += ser_uint256(self.blockhash)
+ r += ser_compact_size(len(self.indexes))
+ for x in self.indexes:
+ r += ser_compact_size(x)
+ return r
+
+ # helper to set the differentially encoded indexes from absolute ones
+ def from_absolute(self, absolute_indexes):
+ self.indexes = []
+ last_index = -1
+ for x in absolute_indexes:
+ self.indexes.append(x-last_index-1)
+ last_index = x
+
+ def to_absolute(self):
+ absolute_indexes = []
+ last_index = -1
+ for x in self.indexes:
+ absolute_indexes.append(x+last_index+1)
+ last_index = absolute_indexes[-1]
+ return absolute_indexes
+
+ def __repr__(self):
+ return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
+
+
+class BlockTransactions(object):
+
+ def __init__(self, blockhash=0, transactions = None):
+ self.blockhash = blockhash
+ self.transactions = transactions if transactions != None else []
+
+ def deserialize(self, f):
+ self.blockhash = deser_uint256(f)
+ self.transactions = deser_vector(f, CTransaction)
+
+ def serialize(self, with_witness=False):
+ r = b""
+ r += ser_uint256(self.blockhash)
+ if with_witness:
+ r += ser_vector(self.transactions, "serialize_with_witness")
+ else:
+ r += ser_vector(self.transactions)
+ return r
+
+ def __repr__(self):
+ return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
+
+
# Objects that correspond to messages on the wire
class msg_version(object):
command = b"version"
@@ -1215,6 +1351,79 @@ class msg_feefilter(object):
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
+class msg_sendcmpct(object):
+ command = b"sendcmpct"
+
+ def __init__(self):
+ self.announce = False
+ self.version = 1
+
+ def deserialize(self, f):
+ self.announce = struct.unpack("<?", f.read(1))[0]
+ self.version = struct.unpack("<Q", f.read(8))[0]
+
+ def serialize(self):
+ r = b""
+ r += struct.pack("<?", self.announce)
+ r += struct.pack("<Q", self.version)
+ return r
+
+ def __repr__(self):
+ return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
+
+class msg_cmpctblock(object):
+ command = b"cmpctblock"
+
+ def __init__(self, header_and_shortids = None):
+ self.header_and_shortids = header_and_shortids
+
+ def deserialize(self, f):
+ self.header_and_shortids = P2PHeaderAndShortIDs()
+ self.header_and_shortids.deserialize(f)
+
+ def serialize(self):
+ r = b""
+ r += self.header_and_shortids.serialize()
+ return r
+
+ def __repr__(self):
+ return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
+
+class msg_getblocktxn(object):
+ command = b"getblocktxn"
+
+ def __init__(self):
+ self.block_txn_request = None
+
+ def deserialize(self, f):
+ self.block_txn_request = BlockTransactionsRequest()
+ self.block_txn_request.deserialize(f)
+
+ def serialize(self):
+ r = b""
+ r += self.block_txn_request.serialize()
+ return r
+
+ def __repr__(self):
+ return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
+
+class msg_blocktxn(object):
+ command = b"blocktxn"
+
+ def __init__(self):
+ self.block_transactions = BlockTransactions()
+
+ def deserialize(self, f):
+ self.block_transactions.deserialize(f)
+
+ def serialize(self):
+ r = b""
+ r += self.block_transactions.serialize()
+ return r
+
+ def __repr__(self):
+ return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
+
# This is what a callback should look like for NodeConn
# Reimplement the on_* functions to provide handling for events
class NodeConnCB(object):
@@ -1295,6 +1504,10 @@ class NodeConnCB(object):
def on_pong(self, conn, message): pass
def on_feefilter(self, conn, message): pass
def on_sendheaders(self, conn, message): pass
+ def on_sendcmpct(self, conn, message): pass
+ def on_cmpctblock(self, conn, message): pass
+ def on_getblocktxn(self, conn, message): pass
+ def on_blocktxn(self, conn, message): pass
# More useful callbacks and functions for NodeConnCB's which have a single NodeConn
class SingleNodeConnCB(NodeConnCB):
@@ -1311,6 +1524,10 @@ class SingleNodeConnCB(NodeConnCB):
def send_message(self, message):
self.connection.send_message(message)
+ def send_and_ping(self, message):
+ self.send_message(message)
+ self.sync_with_ping()
+
def on_pong(self, conn, message):
self.last_pong = message
@@ -1344,7 +1561,11 @@ class NodeConn(asyncore.dispatcher):
b"reject": msg_reject,
b"mempool": msg_mempool,
b"feefilter": msg_feefilter,
- b"sendheaders": msg_sendheaders
+ b"sendheaders": msg_sendheaders,
+ b"sendcmpct": msg_sendcmpct,
+ b"cmpctblock": msg_cmpctblock,
+ b"getblocktxn": msg_getblocktxn,
+ b"blocktxn": msg_blocktxn
}
MAGIC_BYTES = {
"mainnet": b"\xf9\xbe\xb4\xd9", # mainnet
diff --git a/qa/rpc-tests/test_framework/siphash.py b/qa/rpc-tests/test_framework/siphash.py
new file mode 100644
index 0000000000..9c0574bd93
--- /dev/null
+++ b/qa/rpc-tests/test_framework/siphash.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python3
+# Copyright (c) 2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#
+# siphash.py - Specialized SipHash-2-4 implementations
+#
+# This implements SipHash-2-4 for 256-bit integers.
+
+def rotl64(n, b):
+ return n >> (64 - b) | (n & ((1 << (64 - b)) - 1)) << b
+
+def siphash_round(v0, v1, v2, v3):
+ v0 = (v0 + v1) & ((1 << 64) - 1)
+ v1 = rotl64(v1, 13)
+ v1 ^= v0
+ v0 = rotl64(v0, 32)
+ v2 = (v2 + v3) & ((1 << 64) - 1)
+ v3 = rotl64(v3, 16)
+ v3 ^= v2
+ v0 = (v0 + v3) & ((1 << 64) - 1)
+ v3 = rotl64(v3, 21)
+ v3 ^= v0
+ v2 = (v2 + v1) & ((1 << 64) - 1)
+ v1 = rotl64(v1, 17)
+ v1 ^= v2
+ v2 = rotl64(v2, 32)
+ return (v0, v1, v2, v3)
+
+def siphash256(k0, k1, h):
+ n0 = h & ((1 << 64) - 1)
+ n1 = (h >> 64) & ((1 << 64) - 1)
+ n2 = (h >> 128) & ((1 << 64) - 1)
+ n3 = (h >> 192) & ((1 << 64) - 1)
+ v0 = 0x736f6d6570736575 ^ k0
+ v1 = 0x646f72616e646f6d ^ k1
+ v2 = 0x6c7967656e657261 ^ k0
+ v3 = 0x7465646279746573 ^ k1 ^ n0
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0 ^= n0
+ v3 ^= n1
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0 ^= n1
+ v3 ^= n2
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0 ^= n2
+ v3 ^= n3
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0 ^= n3
+ v3 ^= 0x2000000000000000
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0 ^= 0x2000000000000000
+ v2 ^= 0xFF
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
+ return v0 ^ v1 ^ v2 ^ v3
diff --git a/qa/rpc-tests/wallet-dump.py b/qa/rpc-tests/wallet-dump.py
index dd675f57fc..6028d2c20b 100755
--- a/qa/rpc-tests/wallet-dump.py
+++ b/qa/rpc-tests/wallet-dump.py
@@ -4,9 +4,52 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import *
-import os
-import shutil
+from test_framework.util import (start_nodes, start_node, assert_equal, bitcoind_processes)
+
+
+def read_dump(file_name, addrs, hd_master_addr_old):
+ """
+ Read the given dump, count the addrs that match, count change and reserve.
+ Also check that the old hd_master is inactive
+ """
+ with open(file_name) as inputfile:
+ found_addr = 0
+ found_addr_chg = 0
+ found_addr_rsv = 0
+ hd_master_addr_ret = None
+ for line in inputfile:
+ # only read non comment lines
+ if line[0] != "#" and len(line) > 10:
+ # split out some data
+ key_label, comment = line.split("#")
+ # key = key_label.split(" ")[0]
+ keytype = key_label.split(" ")[2]
+ if len(comment) > 1:
+ addr_keypath = comment.split(" addr=")[1]
+ addr = addr_keypath.split(" ")[0]
+ keypath = None
+ if keytype == "inactivehdmaster=1":
+ # ensure the old master is still available
+ assert(hd_master_addr_old == addr)
+ elif keytype == "hdmaster=1":
+ # ensure we have generated a new hd master key
+ assert(hd_master_addr_old != addr)
+ hd_master_addr_ret = addr
+ else:
+ keypath = addr_keypath.rstrip().split("hdkeypath=")[1]
+
+ # count key types
+ for addrObj in addrs:
+ if addrObj['address'] == addr and addrObj['hdkeypath'] == keypath and keytype == "label=":
+ found_addr += 1
+ break
+ elif keytype == "change=1":
+ found_addr_chg += 1
+ break
+ elif keytype == "reserve=1":
+ found_addr_rsv += 1
+ break
+ return found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret
class WalletDumpTest(BitcoinTestFramework):
@@ -15,106 +58,47 @@ class WalletDumpTest(BitcoinTestFramework):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
+ self.extra_args = [["-keypool=90"]]
def setup_network(self, split=False):
- extra_args = [["-keypool=100"]]
- self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
def run_test (self):
tmpdir = self.options.tmpdir
- #generate 20 addresses to compare against the dump
+ # generate 20 addresses to compare against the dump
test_addr_count = 20
addrs = []
for i in range(0,test_addr_count):
addr = self.nodes[0].getnewaddress()
vaddr= self.nodes[0].validateaddress(addr) #required to get hd keypath
addrs.append(vaddr)
+ # Should be a no-op:
+ self.nodes[0].keypoolrefill()
# dump unencrypted wallet
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.unencrypted.dump")
- #open file
- inputfile = open(tmpdir + "/node0/wallet.unencrypted.dump")
- found_addr = 0
- found_addr_chg = 0
- found_addr_rsv = 0
- hdmasteraddr = ""
- for line in inputfile:
- #only read non comment lines
- if line[0] != "#" and len(line) > 10:
- #split out some data
- keyLabel, comment = line.split("#")
- key = keyLabel.split(" ")[0]
- keytype = keyLabel.split(" ")[2]
- if len(comment) > 1:
- addrKeypath = comment.split(" addr=")[1]
- addr = addrKeypath.split(" ")[0]
- keypath = ""
- if keytype != "hdmaster=1":
- keypath = addrKeypath.rstrip().split("hdkeypath=")[1]
- else:
- #keep hd master for later comp.
- hdmasteraddr = addr
-
- #count key types
- for addrObj in addrs:
- if (addrObj['address'] == addr and addrObj['hdkeypath'] == keypath and keytype == "label="):
- found_addr+=1
- break
- elif (keytype == "change=1"):
- found_addr_chg+=1
- break
- elif (keytype == "reserve=1"):
- found_addr_rsv+=1
- break
- assert(found_addr == test_addr_count) #all keys must be in the dump
- assert(found_addr_chg == 50) #50 blocks where mined
- assert(found_addr_rsv == 100) #100 reserve keys (keypool)
+ found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
+ read_dump(tmpdir + "/node0/wallet.unencrypted.dump", addrs, None)
+ assert_equal(found_addr, test_addr_count) # all keys must be in the dump
+ assert_equal(found_addr_chg, 50) # 50 blocks where mined
+ assert_equal(found_addr_rsv, 90 + 1) # keypool size (TODO: fix off-by-one)
#encrypt wallet, restart, unlock and dump
self.nodes[0].encryptwallet('test')
bitcoind_processes[0].wait()
- self.nodes[0] = start_node(0, self.options.tmpdir)
+ self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0])
self.nodes[0].walletpassphrase('test', 10)
+ # Should be a no-op:
+ self.nodes[0].keypoolrefill()
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump")
- #open dump done with an encrypted wallet
- inputfile = open(tmpdir + "/node0/wallet.encrypted.dump")
- found_addr = 0
- found_addr_chg = 0
- found_addr_rsv = 0
- for line in inputfile:
- if line[0] != "#" and len(line) > 10:
- keyLabel, comment = line.split("#")
- key = keyLabel.split(" ")[0]
- keytype = keyLabel.split(" ")[2]
- if len(comment) > 1:
- addrKeypath = comment.split(" addr=")[1]
- addr = addrKeypath.split(" ")[0]
- keypath = ""
- if keytype != "hdmaster=1":
- keypath = addrKeypath.rstrip().split("hdkeypath=")[1]
- else:
- #ensure we have generated a new hd master key
- assert(hdmasteraddr != addr)
- if keytype == "inactivehdmaster=1":
- #ensure the old master is still available
- assert(hdmasteraddr == addr)
- for addrObj in addrs:
- if (addrObj['address'] == addr and addrObj['hdkeypath'] == keypath and keytype == "label="):
- found_addr+=1
- break
- elif (keytype == "change=1"):
- found_addr_chg+=1
- break
- elif (keytype == "reserve=1"):
- found_addr_rsv+=1
- break
-
- assert(found_addr == test_addr_count)
- assert(found_addr_chg == 150) #old reserve keys are marked as change now
- assert(found_addr_rsv == 100) #keypool size
+ found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_enc = \
+ read_dump(tmpdir + "/node0/wallet.encrypted.dump", addrs, hd_master_addr_unenc)
+ assert_equal(found_addr, test_addr_count)
+ assert_equal(found_addr_chg, 90 + 1 + 50) # old reserve keys are marked as change now
+ assert_equal(found_addr_rsv, 90 + 1) # keypool size (TODO: fix off-by-one)
if __name__ == '__main__':
WalletDumpTest().main ()
diff --git a/src/chainparams.cpp b/src/chainparams.cpp
index 86bef1e105..ea6e3aada2 100644
--- a/src/chainparams.cpp
+++ b/src/chainparams.cpp
@@ -71,11 +71,10 @@ public:
CMainParams() {
strNetworkID = "main";
consensus.nSubsidyHalvingInterval = 210000;
- consensus.nMajorityEnforceBlockUpgrade = 750;
- consensus.nMajorityRejectBlockOutdated = 950;
- consensus.nMajorityWindow = 1000;
consensus.BIP34Height = 227931;
consensus.BIP34Hash = uint256S("0x000000000000024b89b42a942fe0d9fea3bb44ab7bd1b19115dd6a759c0808b8");
+ consensus.BIP65Height = 388381; // 000000000000000004c2b624ed5d7756c508d90fd0da2c7c679febfa6c4735f0
+ consensus.BIP66Height = 363725; // 00000000000000000379eaa19dce8c9b722d46ae6a57c2f1a988119488b50931
consensus.powLimit = uint256S("00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff");
consensus.nPowTargetTimespan = 14 * 24 * 60 * 60; // two weeks
consensus.nPowTargetSpacing = 10 * 60;
@@ -167,11 +166,10 @@ public:
CTestNetParams() {
strNetworkID = "test";
consensus.nSubsidyHalvingInterval = 210000;
- consensus.nMajorityEnforceBlockUpgrade = 51;
- consensus.nMajorityRejectBlockOutdated = 75;
- consensus.nMajorityWindow = 100;
consensus.BIP34Height = 21111;
consensus.BIP34Hash = uint256S("0x0000000023b3a96d3484e5abb3755c413e7d41500f8e2a5c3f0dd01299cd8ef8");
+ consensus.BIP65Height = 581885; // 00000000007f6655f22f98e72ed80d8b06dc761d5da09df0fa1dc4be4f861eb6
+ consensus.BIP66Height = 330776; // 000000002104c8c45e99a8853285a3b592602a3ccde2b832481da85e9e4ba182
consensus.powLimit = uint256S("00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff");
consensus.nPowTargetTimespan = 14 * 24 * 60 * 60; // two weeks
consensus.nPowTargetSpacing = 10 * 60;
@@ -247,11 +245,10 @@ public:
CRegTestParams() {
strNetworkID = "regtest";
consensus.nSubsidyHalvingInterval = 150;
- consensus.nMajorityEnforceBlockUpgrade = 750;
- consensus.nMajorityRejectBlockOutdated = 950;
- consensus.nMajorityWindow = 1000;
- consensus.BIP34Height = -1; // BIP34 has not necessarily activated on regtest
+ consensus.BIP34Height = 100000000; // BIP34 has not activated on regtest (far in the future so block v1 are not rejected in tests)
consensus.BIP34Hash = uint256();
+ consensus.BIP65Height = 1351; // BIP65 activated on regtest (Used in rpc activation tests)
+ consensus.BIP66Height = 1251; // BIP66 activated on regtest (Used in rpc activation tests)
consensus.powLimit = uint256S("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff");
consensus.nPowTargetTimespan = 14 * 24 * 60 * 60; // two weeks
consensus.nPowTargetSpacing = 10 * 60;
@@ -303,6 +300,12 @@ public:
base58Prefixes[EXT_PUBLIC_KEY] = boost::assign::list_of(0x04)(0x35)(0x87)(0xCF).convert_to_container<std::vector<unsigned char> >();
base58Prefixes[EXT_SECRET_KEY] = boost::assign::list_of(0x04)(0x35)(0x83)(0x94).convert_to_container<std::vector<unsigned char> >();
}
+
+ void UpdateBIP9Parameters(Consensus::DeploymentPos d, int64_t nStartTime, int64_t nTimeout)
+ {
+ consensus.vDeployments[d].nStartTime = nStartTime;
+ consensus.vDeployments[d].nTimeout = nTimeout;
+ }
};
static CRegTestParams regTestParams;
@@ -330,4 +333,9 @@ void SelectParams(const std::string& network)
SelectBaseParams(network);
pCurrentParams = &Params(network);
}
+
+void UpdateRegtestBIP9Parameters(Consensus::DeploymentPos d, int64_t nStartTime, int64_t nTimeout)
+{
+ regTestParams.UpdateBIP9Parameters(d, nStartTime, nTimeout);
+}
diff --git a/src/chainparams.h b/src/chainparams.h
index 638893e9ad..0c3820b7c6 100644
--- a/src/chainparams.h
+++ b/src/chainparams.h
@@ -112,4 +112,9 @@ CChainParams& Params(const std::string& chain);
*/
void SelectParams(const std::string& chain);
+/**
+ * Allows modifying the BIP9 regtest parameters.
+ */
+void UpdateRegtestBIP9Parameters(Consensus::DeploymentPos d, int64_t nStartTime, int64_t nTimeout);
+
#endif // BITCOIN_CHAINPARAMS_H
diff --git a/src/consensus/params.h b/src/consensus/params.h
index 822ec87d69..5b2f49184f 100644
--- a/src/consensus/params.h
+++ b/src/consensus/params.h
@@ -39,13 +39,13 @@ struct BIP9Deployment {
struct Params {
uint256 hashGenesisBlock;
int nSubsidyHalvingInterval;
- /** Used to check majorities for block version upgrade */
- int nMajorityEnforceBlockUpgrade;
- int nMajorityRejectBlockOutdated;
- int nMajorityWindow;
/** Block height and hash at which BIP34 becomes active */
int BIP34Height;
uint256 BIP34Hash;
+ /** Block height at which BIP65 becomes active */
+ int BIP65Height;
+ /** Block height at which BIP66 becomes active */
+ int BIP66Height;
/**
* Minimum blocks including miner confirmation of the total of 2016 blocks in a retargetting period,
* (nPowTargetTimespan / nPowTargetSpacing) which is also used for BIP9 deployments.
diff --git a/src/init.cpp b/src/init.cpp
index 8d4a2cafbf..4d9de6cfc8 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -410,6 +410,7 @@ std::string HelpMessage(HelpMessageMode mode)
strUsage += HelpMessageOpt("-limitancestorsize=<n>", strprintf("Do not accept transactions whose size with all in-mempool ancestors exceeds <n> kilobytes (default: %u)", DEFAULT_ANCESTOR_SIZE_LIMIT));
strUsage += HelpMessageOpt("-limitdescendantcount=<n>", strprintf("Do not accept transactions if any ancestor would have <n> or more in-mempool descendants (default: %u)", DEFAULT_DESCENDANT_LIMIT));
strUsage += HelpMessageOpt("-limitdescendantsize=<n>", strprintf("Do not accept transactions if any ancestor would have more than <n> kilobytes of in-mempool descendants (default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT));
+ strUsage += HelpMessageOpt("-bip9params=deployment:start:end", "Use given start/end times for specified BIP9 deployment (regtest-only)");
}
string debugCategories = "addrman, alert, bench, coindb, db, http, libevent, lock, mempool, mempoolrej, net, proxy, prune, rand, reindex, rpc, selectcoins, tor, zmq"; // Don't translate these and qt below
if (mode == HMM_BITCOIN_QT)
@@ -510,6 +511,21 @@ static void BlockNotifyCallback(bool initialSync, const CBlockIndex *pBlockIndex
boost::thread t(runCommand, strCmd); // thread runs free
}
+static bool fHaveGenesis = false;
+static boost::mutex cs_GenesisWait;
+static CConditionVariable condvar_GenesisWait;
+
+static void BlockNotifyGenesisWait(bool, const CBlockIndex *pBlockIndex)
+{
+ if (pBlockIndex != NULL) {
+ {
+ boost::unique_lock<boost::mutex> lock_GenesisWait(cs_GenesisWait);
+ fHaveGenesis = true;
+ }
+ condvar_GenesisWait.notify_all();
+ }
+}
+
struct CImportingNow
{
CImportingNow() {
@@ -975,6 +991,41 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
fEnableReplacement = (std::find(vstrReplacementModes.begin(), vstrReplacementModes.end(), "fee") != vstrReplacementModes.end());
}
+ if (!mapMultiArgs["-bip9params"].empty()) {
+ // Allow overriding BIP9 parameters for testing
+ if (!Params().MineBlocksOnDemand()) {
+ return InitError("BIP9 parameters may only be overridden on regtest.");
+ }
+ const vector<string>& deployments = mapMultiArgs["-bip9params"];
+ for (auto i : deployments) {
+ std::vector<std::string> vDeploymentParams;
+ boost::split(vDeploymentParams, i, boost::is_any_of(":"));
+ if (vDeploymentParams.size() != 3) {
+ return InitError("BIP9 parameters malformed, expecting deployment:start:end");
+ }
+ int64_t nStartTime, nTimeout;
+ if (!ParseInt64(vDeploymentParams[1], &nStartTime)) {
+ return InitError(strprintf("Invalid nStartTime (%s)", vDeploymentParams[1]));
+ }
+ if (!ParseInt64(vDeploymentParams[2], &nTimeout)) {
+ return InitError(strprintf("Invalid nTimeout (%s)", vDeploymentParams[2]));
+ }
+ bool found = false;
+ for (int j=0; j<(int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; ++j)
+ {
+ if (vDeploymentParams[0].compare(VersionBitsDeploymentInfo[j].name) == 0) {
+ UpdateRegtestBIP9Parameters(Consensus::DeploymentPos(j), nStartTime, nTimeout);
+ found = true;
+ LogPrintf("Setting BIP9 activation parameters for %s to start=%ld, timeout=%ld\n", vDeploymentParams[0], nStartTime, nTimeout);
+ break;
+ }
+ }
+ if (!found) {
+ return InitError(strprintf("Invalid deployment (%s)", vDeploymentParams[0]));
+ }
+ }
+ }
+
// ********************************************************* Step 4: application initialization: dir lock, daemonize, pidfile, debug log
// Initialize elliptic curve code
@@ -1286,7 +1337,7 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
break;
}
- if (!fReindex) {
+ if (!fReindex && chainActive.Tip() != NULL) {
uiInterface.InitMessage(_("Rewinding blocks..."));
if (!RewindBlockIndex(chainparams)) {
strLoadError = _("Unable to rewind the database to a pre-fork state. You will need to redownload the blockchain");
@@ -1403,6 +1454,17 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
// ********************************************************* Step 10: import blocks
+ if (!CheckDiskSpace())
+ return false;
+
+ // Either install a handler to notify us when genesis activates, or set fHaveGenesis directly.
+ // No locking, as this happens before any background thread is started.
+ if (chainActive.Tip() == NULL) {
+ uiInterface.NotifyBlockTip.connect(BlockNotifyGenesisWait);
+ } else {
+ fHaveGenesis = true;
+ }
+
if (mapArgs.count("-blocknotify"))
uiInterface.NotifyBlockTip.connect(BlockNotifyCallback);
@@ -1412,26 +1474,20 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
BOOST_FOREACH(const std::string& strFile, mapMultiArgs["-loadblock"])
vImportFiles.push_back(strFile);
}
+
threadGroup.create_thread(boost::bind(&ThreadImport, vImportFiles));
// Wait for genesis block to be processed
- bool fHaveGenesis = false;
- while (!fHaveGenesis && !fRequestShutdown) {
- {
- LOCK(cs_main);
- fHaveGenesis = (chainActive.Tip() != NULL);
- }
-
- if (!fHaveGenesis) {
- MilliSleep(10);
+ {
+ boost::unique_lock<boost::mutex> lock(cs_GenesisWait);
+ while (!fHaveGenesis) {
+ condvar_GenesisWait.wait(lock);
}
+ uiInterface.NotifyBlockTip.disconnect(BlockNotifyGenesisWait);
}
// ********************************************************* Step 11: start node
- if (!CheckDiskSpace())
- return false;
-
if (!strErrors.str().empty())
return InitError(strErrors.str());
diff --git a/src/main.cpp b/src/main.cpp
index 46118fff81..3a07190a62 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -106,11 +106,6 @@ map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(cs_main);
map<COutPoint, set<map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(cs_main);
void EraseOrphansFor(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
-/**
- * Returns true if there are nRequired or more blocks of minVersion or above
- * in the last Consensus::Params::nMajorityWindow blocks, starting at pstart and going backwards.
- */
-static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned nRequired, const Consensus::Params& consensusParams);
static void CheckBlockIndex(const Consensus::Params& consensusParams);
/** Constant stuff for coinbase transactions we create: */
@@ -2372,15 +2367,13 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
unsigned int flags = fStrictPayToScriptHash ? SCRIPT_VERIFY_P2SH : SCRIPT_VERIFY_NONE;
- // Start enforcing the DERSIG (BIP66) rules, for block.nVersion=3 blocks,
- // when 75% of the network has upgraded:
- if (block.nVersion >= 3 && IsSuperMajority(3, pindex->pprev, chainparams.GetConsensus().nMajorityEnforceBlockUpgrade, chainparams.GetConsensus())) {
+ // Start enforcing the DERSIG (BIP66) rule
+ if (pindex->nHeight >= chainparams.GetConsensus().BIP66Height) {
flags |= SCRIPT_VERIFY_DERSIG;
}
- // Start enforcing CHECKLOCKTIMEVERIFY, (BIP65) for block.nVersion=4
- // blocks, when 75% of the network has upgraded:
- if (block.nVersion >= 4 && IsSuperMajority(4, pindex->pprev, chainparams.GetConsensus().nMajorityEnforceBlockUpgrade, chainparams.GetConsensus())) {
+ // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule
+ if (pindex->nHeight >= chainparams.GetConsensus().BIP65Height) {
flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY;
}
@@ -3504,6 +3497,7 @@ std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBloc
bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev, int64_t nAdjustedTime)
{
+ const int nHeight = pindexPrev == NULL ? 0 : pindexPrev->nHeight + 1;
// Check proof of work
if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams))
return state.DoS(100, false, REJECT_INVALID, "bad-diffbits", false, "incorrect proof of work");
@@ -3517,10 +3511,12 @@ bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& sta
return state.Invalid(false, REJECT_INVALID, "time-too-new", "block timestamp too far in the future");
// Reject outdated version blocks when 95% (75% on testnet) of the network has upgraded:
- for (int32_t version = 2; version < 5; ++version) // check for version 2, 3 and 4 upgrades
- if (block.nVersion < version && IsSuperMajority(version, pindexPrev, consensusParams.nMajorityRejectBlockOutdated, consensusParams))
- return state.Invalid(false, REJECT_OBSOLETE, strprintf("bad-version(0x%08x)", version - 1),
- strprintf("rejected nVersion=0x%08x block", version - 1));
+ // check for version 2, 3 and 4 upgrades
+ if((block.nVersion < 2 && nHeight >= consensusParams.BIP34Height) ||
+ (block.nVersion < 3 && nHeight >= consensusParams.BIP66Height) ||
+ (block.nVersion < 4 && nHeight >= consensusParams.BIP65Height))
+ return state.Invalid(false, REJECT_OBSOLETE, strprintf("bad-version(0x%08x)", block.nVersion),
+ strprintf("rejected nVersion=0x%08x block", block.nVersion));
return true;
}
@@ -3546,9 +3542,8 @@ bool ContextualCheckBlock(const CBlock& block, CValidationState& state, const Co
}
}
- // Enforce block.nVersion=2 rule that the coinbase starts with serialized block height
- // if 750 of the last 1,000 blocks are version 2 or greater (51/100 if testnet):
- if (block.nVersion >= 2 && IsSuperMajority(2, pindexPrev, consensusParams.nMajorityEnforceBlockUpgrade, consensusParams))
+ // Enforce rule that the coinbase starts with serialized block height
+ if (nHeight >= consensusParams.BIP34Height)
{
CScript expect = CScript() << nHeight;
if (block.vtx[0].vin[0].scriptSig.size() < expect.size() ||
@@ -3722,19 +3717,6 @@ static bool AcceptBlock(const CBlock& block, CValidationState& state, const CCha
return true;
}
-static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned nRequired, const Consensus::Params& consensusParams)
-{
- unsigned int nFound = 0;
- for (int i = 0; i < consensusParams.nMajorityWindow && nFound < nRequired && pstart != NULL; i++)
- {
- if (pstart->nVersion >= minVersion)
- ++nFound;
- pstart = pstart->pprev;
- }
- return (nFound >= nRequired);
-}
-
-
bool ProcessNewBlock(CValidationState& state, const CChainParams& chainparams, CNode* pfrom, const CBlock* pblock, bool fForceProcessing, const CDiskBlockPos* dbp)
{
{
@@ -4331,8 +4313,6 @@ bool InitBlockIndex(const CChainParams& chainparams)
CBlockIndex *pindex = AddToBlockIndex(block);
if (!ReceivedBlockTransactions(block, state, pindex, blockPos))
return error("LoadBlockIndex(): genesis block not accepted");
- if (!ActivateBestChain(state, chainparams, &block))
- return error("LoadBlockIndex(): genesis block cannot be activated");
// Force a chainstate write so that when we VerifyDB in a moment, it doesn't check stale data
return FlushStateToDisk(state, FLUSH_STATE_ALWAYS);
} catch (const std::runtime_error& e) {
diff --git a/src/net.cpp b/src/net.cpp
index 4cbc43e4d8..4bbe5059df 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -982,11 +982,11 @@ static bool AttemptToEvictConnection() {
uint64_t naMostConnections;
unsigned int nMostConnections = 0;
int64_t nMostConnectionsTime = 0;
- std::map<uint64_t, std::vector<NodeEvictionCandidate> > mapAddrCounts;
+ std::map<uint64_t, std::vector<NodeEvictionCandidate> > mapNetGroupNodes;
BOOST_FOREACH(const NodeEvictionCandidate &node, vEvictionCandidates) {
- mapAddrCounts[node.nKeyedNetGroup].push_back(node);
- int64_t grouptime = mapAddrCounts[node.nKeyedNetGroup][0].nTimeConnected;
- size_t groupsize = mapAddrCounts[node.nKeyedNetGroup].size();
+ mapNetGroupNodes[node.nKeyedNetGroup].push_back(node);
+ int64_t grouptime = mapNetGroupNodes[node.nKeyedNetGroup][0].nTimeConnected;
+ size_t groupsize = mapNetGroupNodes[node.nKeyedNetGroup].size();
if (groupsize > nMostConnections || (groupsize == nMostConnections && grouptime > nMostConnectionsTime)) {
nMostConnections = groupsize;
@@ -996,7 +996,7 @@ static bool AttemptToEvictConnection() {
}
// Reduce to the network group with the most connections
- vEvictionCandidates = std::move(mapAddrCounts[naMostConnections]);
+ vEvictionCandidates = std::move(mapNetGroupNodes[naMostConnections]);
// Disconnect from the network group with the most connections
NodeId evicted = vEvictionCandidates.front().id;
@@ -2050,6 +2050,8 @@ void StartNode(boost::thread_group& threadGroup, CScheduler& scheduler)
DumpBanlist();
}
+ uiInterface.InitMessage(_("Starting network threads..."));
+
fAddressesInitialized = true;
if (semOutbound == NULL) {
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 9dc896b7af..e3c32d905a 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -817,22 +817,23 @@ UniValue verifychain(const UniValue& params, bool fHelp)
}
/** Implementation of IsSuperMajority with better feedback */
-static UniValue SoftForkMajorityDesc(int minVersion, CBlockIndex* pindex, int nRequired, const Consensus::Params& consensusParams)
+static UniValue SoftForkMajorityDesc(int version, CBlockIndex* pindex, const Consensus::Params& consensusParams)
{
- int nFound = 0;
- CBlockIndex* pstart = pindex;
- for (int i = 0; i < consensusParams.nMajorityWindow && pstart != NULL; i++)
+ UniValue rv(UniValue::VOBJ);
+ bool activated = false;
+ switch(version)
{
- if (pstart->nVersion >= minVersion)
- ++nFound;
- pstart = pstart->pprev;
+ case 2:
+ activated = pindex->nHeight >= consensusParams.BIP34Height;
+ break;
+ case 3:
+ activated = pindex->nHeight >= consensusParams.BIP66Height;
+ break;
+ case 4:
+ activated = pindex->nHeight >= consensusParams.BIP65Height;
+ break;
}
-
- UniValue rv(UniValue::VOBJ);
- rv.push_back(Pair("status", nFound >= nRequired));
- rv.push_back(Pair("found", nFound));
- rv.push_back(Pair("required", nRequired));
- rv.push_back(Pair("window", consensusParams.nMajorityWindow));
+ rv.push_back(Pair("status", activated));
return rv;
}
@@ -841,8 +842,7 @@ static UniValue SoftForkDesc(const std::string &name, int version, CBlockIndex*
UniValue rv(UniValue::VOBJ);
rv.push_back(Pair("id", name));
rv.push_back(Pair("version", version));
- rv.push_back(Pair("enforce", SoftForkMajorityDesc(version, pindex, consensusParams.nMajorityEnforceBlockUpgrade, consensusParams)));
- rv.push_back(Pair("reject", SoftForkMajorityDesc(version, pindex, consensusParams.nMajorityRejectBlockOutdated, consensusParams)));
+ rv.push_back(Pair("reject", SoftForkMajorityDesc(version, pindex, consensusParams)));
return rv;
}
@@ -897,13 +897,9 @@ UniValue getblockchaininfo(const UniValue& params, bool fHelp)
" {\n"
" \"id\": \"xxxx\", (string) name of softfork\n"
" \"version\": xx, (numeric) block version\n"
- " \"enforce\": { (object) progress toward enforcing the softfork rules for new-version blocks\n"
+ " \"reject\": { (object) progress toward rejecting pre-softfork blocks\n"
" \"status\": xx, (boolean) true if threshold reached\n"
- " \"found\": xx, (numeric) number of blocks with the new version found\n"
- " \"required\": xx, (numeric) number of blocks required to trigger\n"
- " \"window\": xx, (numeric) maximum size of examined window of recent blocks\n"
" },\n"
- " \"reject\": { ... } (object) progress toward rejecting pre-softfork blocks (same fields as \"enforce\")\n"
" }, ...\n"
" ],\n"
" \"bip9_softforks\": { (object) status of BIP9 softforks in progress\n"
diff --git a/src/test/README.md b/src/test/README.md
index b2d6be14f1..61462642bf 100644
--- a/src/test/README.md
+++ b/src/test/README.md
@@ -5,18 +5,15 @@ sense to simply use this framework rather than require developers to
configure some other framework (we want as few impediments to creating
unit tests as possible).
-The build system is setup to compile an executable called "test_bitcoin"
+The build system is setup to compile an executable called `test_bitcoin`
that runs all of the unit tests. The main source file is called
-test_bitcoin.cpp, which simply includes other files that contain the
-actual unit tests (outside of a couple required preprocessor
-directives). The pattern is to create one test file for each class or
-source file for which you want to create unit tests. The file naming
-convention is "<source_filename>_tests.cpp" and such files should wrap
-their tests in a test suite called "<source_filename>_tests". For an
-examples of this pattern, examine uint160_tests.cpp and
-uint256_tests.cpp.
-
-Add the source files to /src/Makefile.test.include to add them to the build.
+test_bitcoin.cpp. To add a new unit test file to our test suite you need
+to add the file to `src/Makefile.test.include`. The pattern is to create
+one test file for each class or source file for which you want to create
+unit tests. The file naming convention is `<source_filename>_tests.cpp`
+and such files should wrap their tests in a test suite
+called `<source_filename>_tests`. For an example of this pattern,
+examine `uint256_tests.cpp`.
For further reading, I found the following website to be helpful in
explaining how the boost unit test framework works:
@@ -31,5 +28,5 @@ example, to run just the getarg_tests verbosely:
test_bitcoin --run_test=getarg_tests/doubledash
-Run test_bitcoin --help for the full list.
+Run `test_bitcoin --help` for the full list.
diff --git a/src/test/hash_tests.cpp b/src/test/hash_tests.cpp
index 82d61209b5..fa9624f13d 100644
--- a/src/test/hash_tests.cpp
+++ b/src/test/hash_tests.cpp
@@ -122,6 +122,10 @@ BOOST_AUTO_TEST_CASE(siphash)
hasher3.Write(uint64_t(x)|(uint64_t(x+1)<<8)|(uint64_t(x+2)<<16)|(uint64_t(x+3)<<24)|
(uint64_t(x+4)<<32)|(uint64_t(x+5)<<40)|(uint64_t(x+6)<<48)|(uint64_t(x+7)<<56));
}
+
+ CHashWriter ss(SER_DISK, CLIENT_VERSION);
+ ss << CTransaction();
+ BOOST_CHECK_EQUAL(SipHashUint256(1, 2, ss.GetHash()), 0x79751e980c2a0a35ULL);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/test_bitcoin.cpp b/src/test/test_bitcoin.cpp
index 856f9b8423..056f2982cf 100644
--- a/src/test/test_bitcoin.cpp
+++ b/src/test/test_bitcoin.cpp
@@ -60,6 +60,11 @@ TestingSetup::TestingSetup(const std::string& chainName) : BasicTestingSetup(cha
pcoinsdbview = new CCoinsViewDB(1 << 23, true);
pcoinsTip = new CCoinsViewCache(pcoinsdbview);
InitBlockIndex(chainparams);
+ {
+ CValidationState state;
+ bool ok = ActivateBestChain(state, chainparams);
+ BOOST_CHECK(ok);
+ }
nScriptCheckThreads = 3;
for (int i=0; i < nScriptCheckThreads-1; i++)
threadGroup.create_thread(&ThreadScriptCheck);