diff options
-rw-r--r-- | doc/bips.md | 1 | ||||
-rw-r--r-- | doc/release-notes.md | 9 | ||||
-rwxr-xr-x | qa/pull-tester/rpc-tests.py | 1 | ||||
-rwxr-xr-x | qa/rpc-tests/sendheaders.py | 519 | ||||
-rwxr-xr-x | qa/rpc-tests/test_framework/mininode.py | 32 | ||||
-rw-r--r-- | src/Makefile.am | 2 | ||||
-rw-r--r-- | src/Makefile.test.include | 1 | ||||
-rw-r--r-- | src/chain.cpp | 3 | ||||
-rw-r--r-- | src/chainparams.cpp | 3 | ||||
-rw-r--r-- | src/consensus/merkle.cpp | 172 | ||||
-rw-r--r-- | src/consensus/merkle.h | 32 | ||||
-rw-r--r-- | src/main.cpp | 251 | ||||
-rw-r--r-- | src/main.h | 3 | ||||
-rw-r--r-- | src/miner.cpp | 3 | ||||
-rw-r--r-- | src/net.h | 9 | ||||
-rw-r--r-- | src/primitives/block.cpp | 63 | ||||
-rw-r--r-- | src/primitives/block.h | 6 | ||||
-rw-r--r-- | src/qt/optionsdialog.cpp | 75 | ||||
-rw-r--r-- | src/qt/optionsdialog.h | 23 | ||||
-rw-r--r-- | src/qt/qvalidatedlineedit.cpp | 16 | ||||
-rw-r--r-- | src/qt/qvalidatedlineedit.h | 4 | ||||
-rw-r--r-- | src/test/main_tests.cpp | 1 | ||||
-rw-r--r-- | src/test/merkle_tests.cpp | 136 | ||||
-rw-r--r-- | src/test/miner_tests.cpp | 3 | ||||
-rw-r--r-- | src/test/pmt_tests.cpp | 3 | ||||
-rw-r--r-- | src/version.h | 5 |
26 files changed, 1223 insertions, 153 deletions
diff --git a/doc/bips.md b/doc/bips.md index c780e2dde0..962b216123 100644 --- a/doc/bips.md +++ b/doc/bips.md @@ -18,3 +18,4 @@ BIPs that are implemented by Bitcoin Core (up-to-date up to **v0.12.0**): * [`BIP 66`](https://github.com/bitcoin/bips/blob/master/bip-0066.mediawiki): The strict DER rules and associated version 3 blocks have been implemented since **v0.10.0** ([PR #5713](https://github.com/bitcoin/bitcoin/pull/5713)). * [`BIP 70`](https://github.com/bitcoin/bips/blob/master/bip-0070.mediawiki) [`71`](https://github.com/bitcoin/bips/blob/master/bip-0071.mediawiki) [`72`](https://github.com/bitcoin/bips/blob/master/bip-0072.mediawiki): Payment Protocol support has been available in Bitcoin Core GUI since **v0.9.0** ([PR #5216](https://github.com/bitcoin/bitcoin/pull/5216)). * [`BIP 111`](https://github.com/bitcoin/bips/blob/master/bip-0111.mediawiki): `NODE_BLOOM` service bit added, but only enforced for peer versions `>=70011` as of **v0.12.0** ([PR #6579](https://github.com/bitcoin/bitcoin/pull/6579)). +* [`BIP 130`](https://github.com/bitcoin/bips/blob/master/bip-0130.mediawiki): direct headers announcement is negotiated with peer versions `>=70012` as of **v0.12.0** ([PR 6494](https://github.com/bitcoin/bitcoin/pull/6494)). diff --git a/doc/release-notes.md b/doc/release-notes.md index 009baaed51..f7958381b6 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -206,6 +206,15 @@ Libsecp256k1 has undergone very extensive testing and validation. A side effect of this change is that libconsensus no longer depends on OpenSSL. +Direct headers announcement (BIP 130) +------------------------------------- + +Between compatible peers, BIP 130 direct headers announcement is used. This +means that blocks are advertized by announcing their headers directly, instead +of just announcing the hash. In a reorganization, all new headers are sent, +instead of just the new tip. This can often prevent an extra roundtrip before +the actual block is downloaded. + 0.12.0 Change log ================= diff --git a/qa/pull-tester/rpc-tests.py b/qa/pull-tester/rpc-tests.py index 3d156a2e7b..5004b09c18 100755 --- a/qa/pull-tester/rpc-tests.py +++ b/qa/pull-tester/rpc-tests.py @@ -91,6 +91,7 @@ testScripts = [ 'p2p-fullblocktest.py', 'blockchain.py', 'disablewallet.py', + 'sendheaders.py', ] testScriptsExt = [ 'bip65-cltv.py', diff --git a/qa/rpc-tests/sendheaders.py b/qa/rpc-tests/sendheaders.py new file mode 100755 index 0000000000..d7f4292090 --- /dev/null +++ b/qa/rpc-tests/sendheaders.py @@ -0,0 +1,519 @@ +#!/usr/bin/env python2 +# +# Distributed under the MIT/X11 software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +# + +from test_framework.mininode import * +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import * +import time +from test_framework.blocktools import create_block, create_coinbase + +''' +SendHeadersTest -- test behavior of headers messages to announce blocks. + +Setup: + +- Two nodes, two p2p connections to node0. One p2p connection should only ever + receive inv's (omitted from testing description below, this is our control). + Second node is used for creating reorgs. + +Part 1: No headers announcements before "sendheaders" +a. node mines a block [expect: inv] + send getdata for the block [expect: block] +b. node mines another block [expect: inv] + send getheaders and getdata [expect: headers, then block] +c. node mines another block [expect: inv] + peer mines a block, announces with header [expect: getdata] +d. node mines another block [expect: inv] + +Part 2: After "sendheaders", headers announcements should generally work. +a. peer sends sendheaders [expect: no response] + peer sends getheaders with current tip [expect: no response] +b. node mines a block [expect: tip header] +c. for N in 1, ..., 10: + * for announce-type in {inv, header} + - peer mines N blocks, announces with announce-type + [ expect: getheaders/getdata or getdata, deliver block(s) ] + - node mines a block [ expect: 1 header ] + +Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer. +- For response-type in {inv, getheaders} + * node mines a 7 block reorg [ expect: headers announcement of 8 blocks ] + * node mines an 8-block reorg [ expect: inv at tip ] + * peer responds with getblocks/getdata [expect: inv, blocks ] + * node mines another block [ expect: inv at tip, peer sends getdata, expect: block ] + * node mines another block at tip [ expect: inv ] + * peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers] + * peer requests block [ expect: block ] + * node mines another block at tip [ expect: inv, peer sends getdata, expect: block ] + * peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block] + * node mines 1 block [expect: 1 header, peer responds with getdata] + +Part 4: Test direct fetch behavior +a. Announce 2 old block headers. + Expect: no getdata requests. +b. Announce 3 new blocks via 1 headers message. + Expect: one getdata request for all 3 blocks. + (Send blocks.) +c. Announce 1 header that forks off the last two blocks. + Expect: no response. +d. Announce 1 more header that builds on that fork. + Expect: one getdata request for two blocks. +e. Announce 16 more headers that build on that fork. + Expect: getdata request for 14 more blocks. +f. Announce 1 more header that builds on that fork. + Expect: no response. +''' + +class BaseNode(NodeConnCB): + def __init__(self): + NodeConnCB.__init__(self) + self.create_callback_map() + self.connection = None + self.last_inv = None + self.last_headers = None + self.last_block = None + self.ping_counter = 1 + self.last_pong = msg_pong(0) + self.last_getdata = None + self.sleep_time = 0.05 + self.block_announced = False + + def clear_last_announcement(self): + with mininode_lock: + self.block_announced = False + self.last_inv = None + self.last_headers = None + + def add_connection(self, conn): + self.connection = conn + + # Request data for a list of block hashes + def get_data(self, block_hashes): + msg = msg_getdata() + for x in block_hashes: + msg.inv.append(CInv(2, x)) + self.connection.send_message(msg) + + def get_headers(self, locator, hashstop): + msg = msg_getheaders() + msg.locator.vHave = locator + msg.hashstop = hashstop + self.connection.send_message(msg) + + def send_block_inv(self, blockhash): + msg = msg_inv() + msg.inv = [CInv(2, blockhash)] + self.connection.send_message(msg) + + # Wrapper for the NodeConn's send_message function + def send_message(self, message): + self.connection.send_message(message) + + def on_inv(self, conn, message): + self.last_inv = message + self.block_announced = True + + def on_headers(self, conn, message): + self.last_headers = message + self.block_announced = True + + def on_block(self, conn, message): + self.last_block = message.block + self.last_block.calc_sha256() + + def on_getdata(self, conn, message): + self.last_getdata = message + + def on_pong(self, conn, message): + self.last_pong = message + + # Test whether the last announcement we received had the + # right header or the right inv + # inv and headers should be lists of block hashes + def check_last_announcement(self, headers=None, inv=None): + expect_headers = headers if headers != None else [] + expect_inv = inv if inv != None else [] + test_function = lambda: self.block_announced + self.sync(test_function) + with mininode_lock: + self.block_announced = False + + success = True + compare_inv = [] + if self.last_inv != None: + compare_inv = [x.hash for x in self.last_inv.inv] + if compare_inv != expect_inv: + success = False + + hash_headers = [] + if self.last_headers != None: + # treat headers as a list of block hashes + hash_headers = [ x.sha256 for x in self.last_headers.headers ] + if hash_headers != expect_headers: + success = False + + self.last_inv = None + self.last_headers = None + return success + + # Syncing helpers + def sync(self, test_function, timeout=60): + while timeout > 0: + with mininode_lock: + if test_function(): + return + time.sleep(self.sleep_time) + timeout -= self.sleep_time + raise AssertionError("Sync failed to complete") + + def sync_with_ping(self, timeout=60): + self.send_message(msg_ping(nonce=self.ping_counter)) + test_function = lambda: self.last_pong.nonce == self.ping_counter + self.sync(test_function, timeout) + self.ping_counter += 1 + return + + def wait_for_block(self, blockhash, timeout=60): + test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash + self.sync(test_function, timeout) + return + + def wait_for_getdata(self, hash_list, timeout=60): + if hash_list == []: + return + + test_function = lambda: self.last_getdata != None and [x.hash for x in self.last_getdata.inv] == hash_list + self.sync(test_function, timeout) + return + + def send_header_for_blocks(self, new_blocks): + headers_message = msg_headers() + headers_message.headers = [ CBlockHeader(b) for b in new_blocks ] + self.send_message(headers_message) + + def send_getblocks(self, locator): + getblocks_message = msg_getblocks() + getblocks_message.locator.vHave = locator + self.send_message(getblocks_message) + +# InvNode: This peer should only ever receive inv's, because it doesn't ever send a +# "sendheaders" message. +class InvNode(BaseNode): + def __init__(self): + BaseNode.__init__(self) + +# TestNode: This peer is the one we use for most of the testing. +class TestNode(BaseNode): + def __init__(self): + BaseNode.__init__(self) + +class SendHeadersTest(BitcoinTestFramework): + def setup_chain(self): + initialize_chain_clean(self.options.tmpdir, 2) + + def setup_network(self): + self.nodes = [] + self.nodes = start_nodes(2, self.options.tmpdir, [["-debug", "-logtimemicros=1"]]*2) + connect_nodes(self.nodes[0], 1) + + # mine count blocks and return the new tip + def mine_blocks(self, count): + self.nodes[0].generate(count) + return int(self.nodes[0].getbestblockhash(), 16) + + # mine a reorg that invalidates length blocks (replacing them with + # length+1 blocks). + # peers is the p2p nodes we're using; we clear their state after the + # to-be-reorged-out blocks are mined, so that we don't break later tests. + # return the list of block hashes newly mined + def mine_reorg(self, length, peers): + self.nodes[0].generate(length) # make sure all invalidated blocks are node0's + sync_blocks(self.nodes, wait=0.1) + [x.clear_last_announcement() for x in peers] + + tip_height = self.nodes[1].getblockcount() + hash_to_invalidate = self.nodes[1].getblockhash(tip_height-(length-1)) + self.nodes[1].invalidateblock(hash_to_invalidate) + all_hashes = self.nodes[1].generate(length+1) # Must be longer than the orig chain + sync_blocks(self.nodes, wait=0.1) + return [int(x, 16) for x in all_hashes] + + def run_test(self): + # Setup the p2p connections and start up the network thread. + inv_node = InvNode() + test_node = TestNode() + + connections = [] + connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node)) + # Set nServices to 0 for test_node, so no block download will occur outside of + # direct fetching + connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0)) + inv_node.add_connection(connections[0]) + test_node.add_connection(connections[1]) + + NetworkThread().start() # Start up network handling in another thread + + # Test logic begins here + inv_node.wait_for_verack() + test_node.wait_for_verack() + + tip = int(self.nodes[0].getbestblockhash(), 16) + + # PART 1 + # 1. Mine a block; expect inv announcements each time + print "Part 1: headers don't start before sendheaders message..." + for i in xrange(4): + old_tip = tip + tip = self.mine_blocks(1) + assert_equal(inv_node.check_last_announcement(inv=[tip]), True) + assert_equal(test_node.check_last_announcement(inv=[tip]), True) + # Try a few different responses; none should affect next announcement + if i == 0: + # first request the block + test_node.get_data([tip]) + test_node.wait_for_block(tip, timeout=5) + elif i == 1: + # next try requesting header and block + test_node.get_headers(locator=[old_tip], hashstop=tip) + test_node.get_data([tip]) + test_node.wait_for_block(tip) + test_node.clear_last_announcement() # since we requested headers... + elif i == 2: + # this time announce own block via headers + height = self.nodes[0].getblockcount() + last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + block_time = last_time + 1 + new_block = create_block(tip, create_coinbase(height+1), block_time) + new_block.solve() + test_node.send_header_for_blocks([new_block]) + test_node.wait_for_getdata([new_block.sha256], timeout=5) + test_node.send_message(msg_block(new_block)) + test_node.sync_with_ping() # make sure this block is processed + inv_node.clear_last_announcement() + test_node.clear_last_announcement() + + print "Part 1: success!" + print "Part 2: announce blocks with headers after sendheaders message..." + # PART 2 + # 2. Send a sendheaders message and test that headers announcements + # commence and keep working. + test_node.send_message(msg_sendheaders()) + prev_tip = int(self.nodes[0].getbestblockhash(), 16) + test_node.get_headers(locator=[prev_tip], hashstop=0L) + test_node.sync_with_ping() + test_node.clear_last_announcement() # Clear out empty headers response + + # Now that we've synced headers, headers announcements should work + tip = self.mine_blocks(1) + assert_equal(inv_node.check_last_announcement(inv=[tip]), True) + assert_equal(test_node.check_last_announcement(headers=[tip]), True) + + height = self.nodes[0].getblockcount()+1 + block_time += 10 # Advance far enough ahead + for i in xrange(10): + # Mine i blocks, and alternate announcing either via + # inv (of tip) or via headers. After each, new blocks + # mined by the node should successfully be announced + # with block header, even though the blocks are never requested + for j in xrange(2): + blocks = [] + for b in xrange(i+1): + blocks.append(create_block(tip, create_coinbase(height), block_time)) + blocks[-1].solve() + tip = blocks[-1].sha256 + block_time += 1 + height += 1 + if j == 0: + # Announce via inv + test_node.send_block_inv(tip) + test_node.wait_for_getdata([tip], timeout=5) + # Test that duplicate inv's won't result in duplicate + # getdata requests, or duplicate headers announcements + inv_node.send_block_inv(tip) + # Should have received a getheaders as well! + test_node.send_header_for_blocks(blocks) + test_node.wait_for_getdata([x.sha256 for x in blocks[0:-1]], timeout=5) + [ inv_node.send_block_inv(x.sha256) for x in blocks[0:-1] ] + inv_node.sync_with_ping() + else: + # Announce via headers + test_node.send_header_for_blocks(blocks) + test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=5) + # Test that duplicate headers won't result in duplicate + # getdata requests (the check is further down) + inv_node.send_header_for_blocks(blocks) + inv_node.sync_with_ping() + [ test_node.send_message(msg_block(x)) for x in blocks ] + test_node.sync_with_ping() + inv_node.sync_with_ping() + # This block should not be announced to the inv node (since it also + # broadcast it) + assert_equal(inv_node.last_inv, None) + assert_equal(inv_node.last_headers, None) + inv_node.clear_last_announcement() + test_node.clear_last_announcement() + tip = self.mine_blocks(1) + assert_equal(inv_node.check_last_announcement(inv=[tip]), True) + assert_equal(test_node.check_last_announcement(headers=[tip]), True) + height += 1 + block_time += 1 + + print "Part 2: success!" + + print "Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer..." + + # PART 3. Headers announcements can stop after large reorg, and resume after + # getheaders or inv from peer. + for j in xrange(2): + # First try mining a reorg that can propagate with header announcement + new_block_hashes = self.mine_reorg(length=7, peers=[test_node, inv_node]) + tip = new_block_hashes[-1] + assert_equal(inv_node.check_last_announcement(inv=[tip]), True) + assert_equal(test_node.check_last_announcement(headers=new_block_hashes), True) + + block_time += 8 + + # Mine a too-large reorg, which should be announced with a single inv + new_block_hashes = self.mine_reorg(length=8, peers=[test_node, inv_node]) + tip = new_block_hashes[-1] + assert_equal(inv_node.check_last_announcement(inv=[tip]), True) + assert_equal(test_node.check_last_announcement(inv=[tip]), True) + + block_time += 9 + + fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"] + fork_point = int(fork_point, 16) + + # Use getblocks/getdata + test_node.send_getblocks(locator = [fork_point]) + assert_equal(test_node.check_last_announcement(inv=new_block_hashes[0:-1]), True) + test_node.get_data(new_block_hashes) + test_node.wait_for_block(new_block_hashes[-1]) + + for i in xrange(3): + # Mine another block, still should get only an inv + tip = self.mine_blocks(1) + assert_equal(inv_node.check_last_announcement(inv=[tip]), True) + assert_equal(test_node.check_last_announcement(inv=[tip]), True) + if i == 0: + # Just get the data -- shouldn't cause headers announcements to resume + test_node.get_data([tip]) + test_node.wait_for_block(tip) + elif i == 1: + # Send a getheaders message that shouldn't trigger headers announcements + # to resume (best header sent will be too old) + test_node.get_headers(locator=[fork_point], hashstop=new_block_hashes[1]) + test_node.get_data([tip]) + test_node.wait_for_block(tip) + test_node.clear_last_announcement() + elif i == 2: + test_node.get_data([tip]) + test_node.wait_for_block(tip) + # This time, try sending either a getheaders to trigger resumption + # of headers announcements, or mine a new block and inv it, also + # triggering resumption of headers announcements. + if j == 0: + test_node.get_headers(locator=[tip], hashstop=0L) + test_node.sync_with_ping() + else: + test_node.send_block_inv(tip) + test_node.sync_with_ping() + # New blocks should now be announced with header + tip = self.mine_blocks(1) + assert_equal(inv_node.check_last_announcement(inv=[tip]), True) + assert_equal(test_node.check_last_announcement(headers=[tip]), True) + + print "Part 3: success!" + + print "Part 4: Testing direct fetch behavior..." + tip = self.mine_blocks(1) + height = self.nodes[0].getblockcount() + 1 + last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + block_time = last_time + 1 + + # Create 2 blocks. Send the blocks, then send the headers. + blocks = [] + for b in xrange(2): + blocks.append(create_block(tip, create_coinbase(height), block_time)) + blocks[-1].solve() + tip = blocks[-1].sha256 + block_time += 1 + height += 1 + inv_node.send_message(msg_block(blocks[-1])) + + inv_node.sync_with_ping() # Make sure blocks are processed + test_node.last_getdata = None + test_node.send_header_for_blocks(blocks); + test_node.sync_with_ping() + # should not have received any getdata messages + with mininode_lock: + assert_equal(test_node.last_getdata, None) + + # This time, direct fetch should work + blocks = [] + for b in xrange(3): + blocks.append(create_block(tip, create_coinbase(height), block_time)) + blocks[-1].solve() + tip = blocks[-1].sha256 + block_time += 1 + height += 1 + + test_node.send_header_for_blocks(blocks) + test_node.sync_with_ping() + test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=test_node.sleep_time) + + [ test_node.send_message(msg_block(x)) for x in blocks ] + + test_node.sync_with_ping() + + # Now announce a header that forks the last two blocks + tip = blocks[0].sha256 + height -= 1 + blocks = [] + + # Create extra blocks for later + for b in xrange(20): + blocks.append(create_block(tip, create_coinbase(height), block_time)) + blocks[-1].solve() + tip = blocks[-1].sha256 + block_time += 1 + height += 1 + + # Announcing one block on fork should not trigger direct fetch + # (less work than tip) + test_node.last_getdata = None + test_node.send_header_for_blocks(blocks[0:1]) + test_node.sync_with_ping() + with mininode_lock: + assert_equal(test_node.last_getdata, None) + + # Announcing one more block on fork should trigger direct fetch for + # both blocks (same work as tip) + test_node.send_header_for_blocks(blocks[1:2]) + test_node.sync_with_ping() + test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=test_node.sleep_time) + + # Announcing 16 more headers should trigger direct fetch for 14 more + # blocks + test_node.send_header_for_blocks(blocks[2:18]) + test_node.sync_with_ping() + test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=test_node.sleep_time) + + # Announcing 1 more header should not trigger any response + test_node.last_getdata = None + test_node.send_header_for_blocks(blocks[18:19]) + test_node.sync_with_ping() + with mininode_lock: + assert_equal(test_node.last_getdata, None) + + print "Part 4: success!" + + # Finally, check that the inv node never received a getdata request, + # throughout the test + assert_equal(inv_node.last_getdata, None) + +if __name__ == '__main__': + SendHeadersTest().main() diff --git a/qa/rpc-tests/test_framework/mininode.py b/qa/rpc-tests/test_framework/mininode.py index b7d78e74fa..64985d58e2 100755 --- a/qa/rpc-tests/test_framework/mininode.py +++ b/qa/rpc-tests/test_framework/mininode.py @@ -751,8 +751,8 @@ class msg_inv(object): class msg_getdata(object): command = "getdata" - def __init__(self): - self.inv = [] + def __init__(self, inv=None): + self.inv = inv if inv != None else [] def deserialize(self, f): self.inv = deser_vector(f, CInv) @@ -905,6 +905,20 @@ class msg_mempool(object): def __repr__(self): return "msg_mempool()" +class msg_sendheaders(object): + command = "sendheaders" + + def __init__(self): + pass + + def deserialize(self, f): + pass + + def serialize(self): + return "" + + def __repr__(self): + return "msg_sendheaders()" # getheaders message has # number of entries @@ -990,6 +1004,17 @@ class NodeConnCB(object): def __init__(self): self.verack_received = False + # Spin until verack message is received from the node. + # Tests may want to use this as a signal that the test can begin. + # This can be called from the testing thread, so it needs to acquire the + # global lock. + def wait_for_verack(self): + while True: + with mininode_lock: + if self.verack_received: + return + time.sleep(0.05) + # Derived classes should call this function once to set the message map # which associates the derived classes' functions to incoming messages def create_callback_map(self): @@ -1084,7 +1109,7 @@ class NodeConn(asyncore.dispatcher): "regtest": "\xfa\xbf\xb5\xda" # regtest } - def __init__(self, dstaddr, dstport, rpc, callback, net="regtest"): + def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=1): asyncore.dispatcher.__init__(self, map=mininode_socket_map) self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport)) self.dstaddr = dstaddr @@ -1102,6 +1127,7 @@ class NodeConn(asyncore.dispatcher): # stuff version msg into sendbuf vt = msg_version() + vt.nServices = services vt.addrTo.ip = self.dstaddr vt.addrTo.port = self.dstport vt.addrFrom.ip = "0.0.0.0" diff --git a/src/Makefile.am b/src/Makefile.am index f1e98dabde..40f2e19af0 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -100,6 +100,7 @@ BITCOIN_CORE_H = \ compat/sanity.h \ compressor.h \ consensus/consensus.h \ + consensus/merkle.h \ consensus/params.h \ consensus/validation.h \ core_io.h \ @@ -268,6 +269,7 @@ libbitcoin_common_a_SOURCES = \ chainparams.cpp \ coins.cpp \ compressor.cpp \ + consensus/merkle.cpp \ core_read.cpp \ core_write.cpp \ hash.cpp \ diff --git a/src/Makefile.test.include b/src/Makefile.test.include index fafc1a2944..c377183ad5 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -57,6 +57,7 @@ BITCOIN_TESTS =\ test/dbwrapper_tests.cpp \ test/main_tests.cpp \ test/mempool_tests.cpp \ + test/merkle_tests.cpp \ test/miner_tests.cpp \ test/mruset_tests.cpp \ test/multisig_tests.cpp \ diff --git a/src/chain.cpp b/src/chain.cpp index 5b8ce076c4..3450ed6c3f 100644 --- a/src/chain.cpp +++ b/src/chain.cpp @@ -51,6 +51,9 @@ CBlockLocator CChain::GetLocator(const CBlockIndex *pindex) const { } const CBlockIndex *CChain::FindFork(const CBlockIndex *pindex) const { + if (pindex == NULL) { + return NULL; + } if (pindex->nHeight > Height()) pindex = pindex->GetAncestor(Height()); while (pindex && !Contains(pindex)) diff --git a/src/chainparams.cpp b/src/chainparams.cpp index 5d6d1ef9d8..a46866a2be 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -4,6 +4,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "chainparams.h" +#include "consensus/merkle.h" #include "tinyformat.h" #include "util.h" @@ -32,7 +33,7 @@ static CBlock CreateGenesisBlock(const char* pszTimestamp, const CScript& genesi genesis.nVersion = nVersion; genesis.vtx.push_back(txNew); genesis.hashPrevBlock.SetNull(); - genesis.hashMerkleRoot = genesis.ComputeMerkleRoot(); + genesis.hashMerkleRoot = BlockMerkleRoot(genesis); return genesis; } diff --git a/src/consensus/merkle.cpp b/src/consensus/merkle.cpp new file mode 100644 index 0000000000..9a8afa8a33 --- /dev/null +++ b/src/consensus/merkle.cpp @@ -0,0 +1,172 @@ +#include "merkle.h" +#include "hash.h" +#include "utilstrencodings.h" + +/* WARNING! If you're reading this because you're learning about crypto + and/or designing a new system that will use merkle trees, keep in mind + that the following merkle tree algorithm has a serious flaw related to + duplicate txids, resulting in a vulnerability (CVE-2012-2459). + + The reason is that if the number of hashes in the list at a given time + is odd, the last one is duplicated before computing the next level (which + is unusual in Merkle trees). This results in certain sequences of + transactions leading to the same merkle root. For example, these two + trees: + + A A + / \ / \ + B C B C + / \ | / \ / \ + D E F D E F F + / \ / \ / \ / \ / \ / \ / \ + 1 2 3 4 5 6 1 2 3 4 5 6 5 6 + + for transaction lists [1,2,3,4,5,6] and [1,2,3,4,5,6,5,6] (where 5 and + 6 are repeated) result in the same root hash A (because the hash of both + of (F) and (F,F) is C). + + The vulnerability results from being able to send a block with such a + transaction list, with the same merkle root, and the same block hash as + the original without duplication, resulting in failed validation. If the + receiving node proceeds to mark that block as permanently invalid + however, it will fail to accept further unmodified (and thus potentially + valid) versions of the same block. We defend against this by detecting + the case where we would hash two identical hashes at the end of the list + together, and treating that identically to the block having an invalid + merkle root. Assuming no double-SHA256 collisions, this will detect all + known ways of changing the transactions without affecting the merkle + root. +*/ + +/* This implements a constant-space merkle root/path calculator, limited to 2^32 leaves. */ +static void MerkleComputation(const std::vector<uint256>& leaves, uint256* proot, bool* pmutated, uint32_t branchpos, std::vector<uint256>* pbranch) { + if (pbranch) pbranch->clear(); + if (leaves.size() == 0) { + if (pmutated) *pmutated = false; + if (proot) *proot = uint256(); + return; + } + bool mutated = false; + // count is the number of leaves processed so far. + uint32_t count = 0; + // inner is an array of eagerly computed subtree hashes, indexed by tree + // level (0 being the leaves). + // For example, when count is 25 (11001 in binary), inner[4] is the hash of + // the first 16 leaves, inner[3] of the next 8 leaves, and inner[0] equal to + // the last leaf. The other inner entries are undefined. + uint256 inner[32]; + // Which position in inner is a hash that depends on the matching leaf. + int matchlevel = -1; + // First process all leaves into 'inner' values. + while (count < leaves.size()) { + uint256 h = leaves[count]; + bool matchh = count == branchpos; + count++; + int level; + // For each of the lower bits in count that are 0, do 1 step. Each + // corresponds to an inner value that existed before processing the + // current leaf, and each needs a hash to combine it. + for (level = 0; !(count & (((uint32_t)1) << level)); level++) { + if (pbranch) { + if (matchh) { + pbranch->push_back(inner[level]); + } else if (matchlevel == level) { + pbranch->push_back(h); + matchh = true; + } + } + mutated |= (inner[level] == h); + CHash256().Write(inner[level].begin(), 32).Write(h.begin(), 32).Finalize(h.begin()); + } + // Store the resulting hash at inner position level. + inner[level] = h; + if (matchh) { + matchlevel = level; + } + } + // Do a final 'sweep' over the rightmost branch of the tree to process + // odd levels, and reduce everything to a single top value. + // Level is the level (counted from the bottom) up to which we've sweeped. + int level = 0; + // As long as bit number level in count is zero, skip it. It means there + // is nothing left at this level. + while (!(count & (((uint32_t)1) << level))) { + level++; + } + uint256 h = inner[level]; + bool matchh = matchlevel == level; + while (count != (((uint32_t)1) << level)) { + // If we reach this point, h is an inner value that is not the top. + // We combine it with itself (Bitcoin's special rule for odd levels in + // the tree) to produce a higher level one. + if (pbranch && matchh) { + pbranch->push_back(h); + } + CHash256().Write(h.begin(), 32).Write(h.begin(), 32).Finalize(h.begin()); + // Increment count to the value it would have if two entries at this + // level had existed. + count += (((uint32_t)1) << level); + level++; + // And propagate the result upwards accordingly. + while (!(count & (((uint32_t)1) << level))) { + if (pbranch) { + if (matchh) { + pbranch->push_back(inner[level]); + } else if (matchlevel == level) { + pbranch->push_back(h); + matchh = true; + } + } + CHash256().Write(inner[level].begin(), 32).Write(h.begin(), 32).Finalize(h.begin()); + level++; + } + } + // Return result. + if (pmutated) *pmutated = mutated; + if (proot) *proot = h; +} + +uint256 ComputeMerkleRoot(const std::vector<uint256>& leaves, bool* mutated) { + uint256 hash; + MerkleComputation(leaves, &hash, mutated, -1, NULL); + return hash; +} + +std::vector<uint256> ComputeMerkleBranch(const std::vector<uint256>& leaves, uint32_t position) { + std::vector<uint256> ret; + MerkleComputation(leaves, NULL, NULL, position, &ret); + return ret; +} + +uint256 ComputeMerkleRootFromBranch(const uint256& leaf, const std::vector<uint256>& vMerkleBranch, uint32_t nIndex) { + uint256 hash = leaf; + for (std::vector<uint256>::const_iterator it = vMerkleBranch.begin(); it != vMerkleBranch.end(); ++it) { + if (nIndex & 1) { + hash = Hash(BEGIN(*it), END(*it), BEGIN(hash), END(hash)); + } else { + hash = Hash(BEGIN(hash), END(hash), BEGIN(*it), END(*it)); + } + nIndex >>= 1; + } + return hash; +} + +uint256 BlockMerkleRoot(const CBlock& block, bool* mutated) +{ + std::vector<uint256> leaves; + leaves.resize(block.vtx.size()); + for (size_t s = 0; s < block.vtx.size(); s++) { + leaves[s] = block.vtx[s].GetHash(); + } + return ComputeMerkleRoot(leaves, mutated); +} + +std::vector<uint256> BlockMerkleBranch(const CBlock& block, uint32_t position) +{ + std::vector<uint256> leaves; + leaves.resize(block.vtx.size()); + for (size_t s = 0; s < block.vtx.size(); s++) { + leaves[s] = block.vtx[s].GetHash(); + } + return ComputeMerkleBranch(leaves, position); +} diff --git a/src/consensus/merkle.h b/src/consensus/merkle.h new file mode 100644 index 0000000000..6ef59745ac --- /dev/null +++ b/src/consensus/merkle.h @@ -0,0 +1,32 @@ +// Copyright (c) 2015 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_MERKLE +#define BITCOIN_MERKLE + +#include <stdint.h> +#include <vector> + +#include "primitives/transaction.h" +#include "primitives/block.h" +#include "uint256.h" + +uint256 ComputeMerkleRoot(const std::vector<uint256>& leaves, bool* mutated = NULL); +std::vector<uint256> ComputeMerkleBranch(const std::vector<uint256>& leaves, uint32_t position); +uint256 ComputeMerkleRootFromBranch(const uint256& leaf, const std::vector<uint256>& branch, uint32_t position); + +/* + * Compute the Merkle root of the transactions in a block. + * *mutated is set to true if a duplicated subtree was found. + */ +uint256 BlockMerkleRoot(const CBlock& block, bool* mutated = NULL); + +/* + * Compute the Merkle branch for the tree of transactions in a block, for a + * given position. + * This can be verified using ComputeMerkleRootFromBranch. + */ +std::vector<uint256> BlockMerkleBranch(const CBlock& block, uint32_t position); + +#endif diff --git a/src/main.cpp b/src/main.cpp index 6b6840ce8c..31913956b7 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -12,6 +12,7 @@ #include "checkpoints.h" #include "checkqueue.h" #include "consensus/consensus.h" +#include "consensus/merkle.h" #include "consensus/validation.h" #include "hash.h" #include "init.h" @@ -246,6 +247,8 @@ struct CNodeState { uint256 hashLastUnknownBlock; //! The last full block we both have. CBlockIndex *pindexLastCommonBlock; + //! The best header we have sent our peer. + CBlockIndex *pindexBestHeaderSent; //! Whether we've started headers synchronization with this peer. bool fSyncStarted; //! Since when we're stalling block download progress (in microseconds), or 0. @@ -255,6 +258,8 @@ struct CNodeState { int nBlocksInFlightValidHeaders; //! Whether we consider this a preferred download peer. bool fPreferredDownload; + //! Whether this peer wants invs or headers (when possible) for block announcements. + bool fPreferHeaders; CNodeState() { fCurrentlyConnected = false; @@ -263,11 +268,13 @@ struct CNodeState { pindexBestKnownBlock = NULL; hashLastUnknownBlock.SetNull(); pindexLastCommonBlock = NULL; + pindexBestHeaderSent = NULL; fSyncStarted = false; nStallingSince = 0; nBlocksInFlight = 0; nBlocksInFlightValidHeaders = 0; fPreferredDownload = false; + fPreferHeaders = false; } }; @@ -397,6 +404,22 @@ void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) { } } +// Requires cs_main +bool CanDirectFetch(const Consensus::Params &consensusParams) +{ + return chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20; +} + +// Requires cs_main +bool PeerHasHeader(CNodeState *state, CBlockIndex *pindex) +{ + if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) + return true; + if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) + return true; + return false; +} + /** Find the last common ancestor two blocks have. * Both pa and pb must be non-NULL. */ CBlockIndex* LastCommonAncestor(CBlockIndex* pa, CBlockIndex* pb) { @@ -2556,16 +2579,17 @@ static bool ActivateBestChainStep(CValidationState& state, const CChainParams& c * or an activated best chain. pblock is either NULL or a pointer to a block * that is already loaded (to avoid loading it again from disk). */ -bool ActivateBestChain(CValidationState& state, const CChainParams& chainparams, const CBlock* pblock) -{ - CBlockIndex *pindexNewTip = NULL; +bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, const CBlock *pblock) { CBlockIndex *pindexMostWork = NULL; do { boost::this_thread::interruption_point(); + CBlockIndex *pindexNewTip = NULL; + const CBlockIndex *pindexFork; bool fInitialDownload; { LOCK(cs_main); + CBlockIndex *pindexOldTip = chainActive.Tip(); pindexMostWork = FindMostWorkChain(); // Whether we have anything to do at all. @@ -2576,26 +2600,44 @@ bool ActivateBestChain(CValidationState& state, const CChainParams& chainparams, return false; pindexNewTip = chainActive.Tip(); + pindexFork = chainActive.FindFork(pindexOldTip); fInitialDownload = IsInitialBlockDownload(); } // When we reach this point, we switched to a new tip (stored in pindexNewTip). // Notifications/callbacks that can run without cs_main if (!fInitialDownload) { - uint256 hashNewTip = pindexNewTip->GetBlockHash(); + // Find the hashes of all blocks that weren't previously in the best chain. + std::vector<uint256> vHashes; + CBlockIndex *pindexToAnnounce = pindexNewTip; + while (pindexToAnnounce != pindexFork) { + vHashes.push_back(pindexToAnnounce->GetBlockHash()); + pindexToAnnounce = pindexToAnnounce->pprev; + if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) { + // Limit announcements in case of a huge reorganization. + // Rely on the peer's synchronization mechanism in that case. + break; + } + } // Relay inventory, but don't relay old inventory during initial block download. int nBlockEstimate = 0; if (fCheckpointsEnabled) nBlockEstimate = Checkpoints::GetTotalBlocksEstimate(chainparams.Checkpoints()); { LOCK(cs_vNodes); - BOOST_FOREACH(CNode* pnode, vNodes) - if (chainActive.Height() > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : nBlockEstimate)) - pnode->PushInventory(CInv(MSG_BLOCK, hashNewTip)); + BOOST_FOREACH(CNode* pnode, vNodes) { + if (chainActive.Height() > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : nBlockEstimate)) { + BOOST_REVERSE_FOREACH(const uint256& hash, vHashes) { + pnode->PushBlockHash(hash); + } + } + } } // Notify external listeners about the new tip. - GetMainSignals().UpdatedBlockTip(pindexNewTip); - uiInterface.NotifyBlockTip(hashNewTip); + if (!vHashes.empty()) { + GetMainSignals().UpdatedBlockTip(pindexNewTip); + uiInterface.NotifyBlockTip(vHashes.front()); + } } } while(pindexMostWork != chainActive.Tip()); CheckBlockIndex(chainparams.GetConsensus()); @@ -2876,7 +2918,7 @@ bool CheckBlock(const CBlock& block, CValidationState& state, bool fCheckPOW, bo // Check the merkle root. if (fCheckMerkleRoot) { bool mutated; - uint256 hashMerkleRoot2 = block.ComputeMerkleRoot(&mutated); + uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated); if (block.hashMerkleRoot != hashMerkleRoot2) return state.DoS(100, error("CheckBlock(): hashMerkleRoot mismatch"), REJECT_INVALID, "bad-txnmrklroot", true); @@ -4332,6 +4374,14 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, LOCK(cs_main); State(pfrom->GetId())->fCurrentlyConnected = true; } + + if (pfrom->nVersion >= SENDHEADERS_VERSION) { + // Tell our peer we prefer to receive headers rather than inv's + // We send this to non-NODE NETWORK peers as well, because even + // non-NODE NETWORK peers can announce blocks (such as pruning + // nodes) + pfrom->PushMessage("sendheaders"); + } } @@ -4401,6 +4451,12 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, pfrom->fDisconnect = true; } + else if (strCommand == "sendheaders") + { + LOCK(cs_main); + State(pfrom->GetId())->fPreferHeaders = true; + } + else if (strCommand == "inv") { @@ -4445,7 +4501,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, // not a direct successor. pfrom->PushMessage("getheaders", chainActive.GetLocator(pindexBestHeader), inv.hash); CNodeState *nodestate = State(pfrom->GetId()); - if (chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - chainparams.GetConsensus().nPowTargetSpacing * 20 && + if (CanDirectFetch(chainparams.GetConsensus()) && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { vToFetch.push_back(inv); // Mark block as in flight already, even though the actual "getdata" message only goes out @@ -4553,6 +4609,8 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, LogPrint("net", "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom->id); return true; } + + CNodeState *nodestate = State(pfrom->GetId()); CBlockIndex* pindex = NULL; if (locator.IsNull()) { @@ -4580,6 +4638,11 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) break; } + // pindex can be NULL either if we sent chainActive.Tip() OR + // if our peer has chainActive.Tip() (and thus we are sending an empty + // headers message). In both cases it's safe to update + // pindexBestHeaderSent to be our tip. + nodestate->pindexBestHeaderSent = pindex ? pindex : chainActive.Tip(); pfrom->PushMessage("headers", vHeaders); } @@ -4609,11 +4672,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, mapAlreadyAskedFor.erase(inv); - // Check for recently rejected (and do other quick existence checks) - if (AlreadyHave(inv)) - return true; - - if (AcceptToMemoryPool(mempool, state, tx, true, &fMissingInputs)) + if (!AlreadyHave(inv) && AcceptToMemoryPool(mempool, state, tx, true, &fMissingInputs)) { mempool.check(pcoinsTip); RelayTransaction(tx); @@ -4693,13 +4752,20 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, if (pfrom->fWhitelisted && GetBoolArg("-whitelistalwaysrelay", DEFAULT_WHITELISTALWAYSRELAY)) { // Always relay transactions received from whitelisted peers, even - // if they were rejected from the mempool, allowing the node to - // function as a gateway for nodes hidden behind it. + // if they were already in the mempool or rejected from it due + // to policy, allowing the node to function as a gateway for + // nodes hidden behind it. // - // FIXME: This includes invalid transactions, which means a - // whitelisted peer could get us banned! We may want to change - // that. - RelayTransaction(tx); + // Never relay transactions that we would assign a non-zero DoS + // score for, as we expect peers to do the same with us in that + // case. + int nDoS = 0; + if (!state.IsInvalid(nDoS) || nDoS == 0) { + LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->id); + RelayTransaction(tx); + } else { + LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s)\n", tx.GetHash().ToString(), pfrom->id, FormatStateMessage(state)); + } } } int nDoS = 0; @@ -4768,6 +4834,53 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, pfrom->PushMessage("getheaders", chainActive.GetLocator(pindexLast), uint256()); } + bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus()); + CNodeState *nodestate = State(pfrom->GetId()); + // If this set of headers is valid and ends in a block with at least as + // much work as our tip, download as much as possible. + if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && chainActive.Tip()->nChainWork <= pindexLast->nChainWork) { + vector<CBlockIndex *> vToFetch; + CBlockIndex *pindexWalk = pindexLast; + // Calculate all the blocks we'd need to switch to pindexLast, up to a limit. + while (pindexWalk && !chainActive.Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { + if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) && + !mapBlocksInFlight.count(pindexWalk->GetBlockHash())) { + // We don't have this block, and it's not yet in flight. + vToFetch.push_back(pindexWalk); + } + pindexWalk = pindexWalk->pprev; + } + // If pindexWalk still isn't on our main chain, we're looking at a + // very large reorg at a time we think we're close to caught up to + // the main chain -- this shouldn't really happen. Bail out on the + // direct fetch and rely on parallel download instead. + if (!chainActive.Contains(pindexWalk)) { + LogPrint("net", "Large reorg, won't direct fetch to %s (%d)\n", + pindexLast->GetBlockHash().ToString(), + pindexLast->nHeight); + } else { + vector<CInv> vGetData; + // Download as much as possible, from earliest to latest. + BOOST_REVERSE_FOREACH(CBlockIndex *pindex, vToFetch) { + if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { + // Can't download any more from this peer + break; + } + vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash())); + MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), chainparams.GetConsensus(), pindex); + LogPrint("net", "Requesting block %s from peer=%d\n", + pindex->GetBlockHash().ToString(), pfrom->id); + } + if (vGetData.size() > 1) { + LogPrint("net", "Downloading blocks toward %s (%d) via headers direct fetch\n", + pindexLast->GetBlockHash().ToString(), pindexLast->nHeight); + } + if (vGetData.size() > 0) { + pfrom->PushMessage("getdata", vGetData); + } + } + } + CheckBlockIndex(chainparams.GetConsensus()); } @@ -5294,6 +5407,100 @@ bool SendMessages(CNode* pto, bool fSendTrickle) } // + // Try sending block announcements via headers + // + { + // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our + // list of block hashes we're relaying, and our peer wants + // headers announcements, then find the first header + // not yet known to our peer but would connect, and send. + // If no header would connect, or if we have too many + // blocks, or if the peer doesn't want headers, just + // add all to the inv queue. + LOCK(pto->cs_inventory); + vector<CBlock> vHeaders; + bool fRevertToInv = (!state.fPreferHeaders || pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE); + CBlockIndex *pBestIndex = NULL; // last header queued for delivery + ProcessBlockAvailability(pto->id); // ensure pindexBestKnownBlock is up-to-date + + if (!fRevertToInv) { + bool fFoundStartingHeader = false; + // Try to find first header that our peer doesn't have, and + // then send all headers past that one. If we come across any + // headers that aren't on chainActive, give up. + BOOST_FOREACH(const uint256 &hash, pto->vBlockHashesToAnnounce) { + BlockMap::iterator mi = mapBlockIndex.find(hash); + assert(mi != mapBlockIndex.end()); + CBlockIndex *pindex = mi->second; + if (chainActive[pindex->nHeight] != pindex) { + // Bail out if we reorged away from this block + fRevertToInv = true; + break; + } + assert(pBestIndex == NULL || pindex->pprev == pBestIndex); + pBestIndex = pindex; + if (fFoundStartingHeader) { + // add this to the headers message + vHeaders.push_back(pindex->GetBlockHeader()); + } else if (PeerHasHeader(&state, pindex)) { + continue; // keep looking for the first new block + } else if (pindex->pprev == NULL || PeerHasHeader(&state, pindex->pprev)) { + // Peer doesn't have this header but they do have the prior one. + // Start sending headers. + fFoundStartingHeader = true; + vHeaders.push_back(pindex->GetBlockHeader()); + } else { + // Peer doesn't have this header or the prior one -- nothing will + // connect, so bail out. + fRevertToInv = true; + break; + } + } + } + if (fRevertToInv) { + // If falling back to using an inv, just try to inv the tip. + // The last entry in vBlockHashesToAnnounce was our tip at some point + // in the past. + if (!pto->vBlockHashesToAnnounce.empty()) { + const uint256 &hashToAnnounce = pto->vBlockHashesToAnnounce.back(); + BlockMap::iterator mi = mapBlockIndex.find(hashToAnnounce); + assert(mi != mapBlockIndex.end()); + CBlockIndex *pindex = mi->second; + + // Warn if we're announcing a block that is not on the main chain. + // This should be very rare and could be optimized out. + // Just log for now. + if (chainActive[pindex->nHeight] != pindex) { + LogPrint("net", "Announcing block %s not on main chain (tip=%s)\n", + hashToAnnounce.ToString(), chainActive.Tip()->GetBlockHash().ToString()); + } + + // If the peer announced this block to us, don't inv it back. + // (Since block announcements may not be via inv's, we can't solely rely on + // setInventoryKnown to track this.) + if (!PeerHasHeader(&state, pindex)) { + pto->PushInventory(CInv(MSG_BLOCK, hashToAnnounce)); + LogPrint("net", "%s: sending inv peer=%d hash=%s\n", __func__, + pto->id, hashToAnnounce.ToString()); + } + } + } else if (!vHeaders.empty()) { + if (vHeaders.size() > 1) { + LogPrint("net", "%s: %u headers, range (%s, %s), to peer=%d\n", __func__, + vHeaders.size(), + vHeaders.front().GetHash().ToString(), + vHeaders.back().GetHash().ToString(), pto->id); + } else { + LogPrint("net", "%s: sending header %s to peer=%d\n", __func__, + vHeaders.front().GetHash().ToString(), pto->id); + } + pto->PushMessage("headers", vHeaders); + state.pindexBestHeaderSent = pBestIndex; + } + pto->vBlockHashesToAnnounce.clear(); + } + + // // Message: inventory // vector<CInv> vInv; diff --git a/src/main.h b/src/main.h index dfa2fcb537..3dec613fc7 100644 --- a/src/main.h +++ b/src/main.h @@ -98,6 +98,9 @@ static const unsigned int DEFAULT_BANSCORE_THRESHOLD = 100; static const bool DEFAULT_TESTSAFEMODE = false; +/** Maximum number of headers to announce when relaying blocks with headers message.*/ +static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8; + struct BlockHasher { size_t operator()(const uint256& hash) const { return hash.GetCheapHash(); } diff --git a/src/miner.cpp b/src/miner.cpp index 5b711210db..27a1fbcf80 100644 --- a/src/miner.cpp +++ b/src/miner.cpp @@ -10,6 +10,7 @@ #include "chainparams.h" #include "coins.h" #include "consensus/consensus.h" +#include "consensus/merkle.h" #include "consensus/validation.h" #include "hash.h" #include "main.h" @@ -373,7 +374,7 @@ void IncrementExtraNonce(CBlock* pblock, const CBlockIndex* pindexPrev, unsigned assert(txCoinbase.vin[0].scriptSig.size() <= 100); pblock->vtx[0] = txCoinbase; - pblock->hashMerkleRoot = pblock->ComputeMerkleRoot(); + pblock->hashMerkleRoot = BlockMerkleRoot(*pblock); } ////////////////////////////////////////////////////////////////////////////// @@ -390,6 +390,9 @@ public: std::vector<CInv> vInventoryToSend; CCriticalSection cs_inventory; std::multimap<int64_t, CInv> mapAskFor; + // Used for headers announcements - unfiltered blocks to relay + // Also protected by cs_inventory + std::vector<uint256> vBlockHashesToAnnounce; // Ping time measurement: // The pong reply we're expecting, or 0 if no pong expected. @@ -504,6 +507,12 @@ public: } } + void PushBlockHash(const uint256 &hash) + { + LOCK(cs_inventory); + vBlockHashesToAnnounce.push_back(hash); + } + void AskFor(const CInv& inv); // TODO: Document the postcondition of this function. Is cs_vSend locked? diff --git a/src/primitives/block.cpp b/src/primitives/block.cpp index 7a58074d24..7280c18f77 100644 --- a/src/primitives/block.cpp +++ b/src/primitives/block.cpp @@ -15,69 +15,6 @@ uint256 CBlockHeader::GetHash() const return SerializeHash(*this); } -uint256 CBlock::ComputeMerkleRoot(bool* fMutated) const -{ - /* WARNING! If you're reading this because you're learning about crypto - and/or designing a new system that will use merkle trees, keep in mind - that the following merkle tree algorithm has a serious flaw related to - duplicate txids, resulting in a vulnerability (CVE-2012-2459). - - The reason is that if the number of hashes in the list at a given time - is odd, the last one is duplicated before computing the next level (which - is unusual in Merkle trees). This results in certain sequences of - transactions leading to the same merkle root. For example, these two - trees: - - A A - / \ / \ - B C B C - / \ | / \ / \ - D E F D E F F - / \ / \ / \ / \ / \ / \ / \ - 1 2 3 4 5 6 1 2 3 4 5 6 5 6 - - for transaction lists [1,2,3,4,5,6] and [1,2,3,4,5,6,5,6] (where 5 and - 6 are repeated) result in the same root hash A (because the hash of both - of (F) and (F,F) is C). - - The vulnerability results from being able to send a block with such a - transaction list, with the same merkle root, and the same block hash as - the original without duplication, resulting in failed validation. If the - receiving node proceeds to mark that block as permanently invalid - however, it will fail to accept further unmodified (and thus potentially - valid) versions of the same block. We defend against this by detecting - the case where we would hash two identical hashes at the end of the list - together, and treating that identically to the block having an invalid - merkle root. Assuming no double-SHA256 collisions, this will detect all - known ways of changing the transactions without affecting the merkle - root. - */ - std::vector<uint256> vMerkleTree; - vMerkleTree.reserve(vtx.size() * 2 + 16); // Safe upper bound for the number of total nodes. - for (std::vector<CTransaction>::const_iterator it(vtx.begin()); it != vtx.end(); ++it) - vMerkleTree.push_back(it->GetHash()); - int j = 0; - bool mutated = false; - for (int nSize = vtx.size(); nSize > 1; nSize = (nSize + 1) / 2) - { - for (int i = 0; i < nSize; i += 2) - { - int i2 = std::min(i+1, nSize-1); - if (i2 == i + 1 && i2 + 1 == nSize && vMerkleTree[j+i] == vMerkleTree[j+i2]) { - // Two identical hashes at the end of the list at a particular level. - mutated = true; - } - vMerkleTree.push_back(Hash(BEGIN(vMerkleTree[j+i]), END(vMerkleTree[j+i]), - BEGIN(vMerkleTree[j+i2]), END(vMerkleTree[j+i2]))); - } - j += nSize; - } - if (fMutated) { - *fMutated = mutated; - } - return (vMerkleTree.empty() ? uint256() : vMerkleTree.back()); -} - std::string CBlock::ToString() const { std::stringstream s; diff --git a/src/primitives/block.h b/src/primitives/block.h index 54731ff557..5c017d436f 100644 --- a/src/primitives/block.h +++ b/src/primitives/block.h @@ -118,12 +118,6 @@ public: return block; } - // Build the merkle tree for this block and return the merkle root. - // If non-NULL, *mutated is set to whether mutation was detected in the merkle - // tree (a duplication of transactions in the block leading to an identical - // merkle root). - uint256 ComputeMerkleRoot(bool* mutated = NULL) const; - std::string ToString() const; }; diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp index d0191fa6d8..647c860bdc 100644 --- a/src/qt/optionsdialog.cpp +++ b/src/qt/optionsdialog.cpp @@ -34,8 +34,7 @@ OptionsDialog::OptionsDialog(QWidget *parent, bool enableWallet) : QDialog(parent), ui(new Ui::OptionsDialog), model(0), - mapper(0), - fProxyIpsValid(true) + mapper(0) { ui->setupUi(this); @@ -60,12 +59,11 @@ OptionsDialog::OptionsDialog(QWidget *parent, bool enableWallet) : connect(ui->connectSocks, SIGNAL(toggled(bool)), ui->proxyIp, SLOT(setEnabled(bool))); connect(ui->connectSocks, SIGNAL(toggled(bool)), ui->proxyPort, SLOT(setEnabled(bool))); + connect(ui->connectSocks, SIGNAL(toggled(bool)), this, SLOT(updateProxyValidationState())); connect(ui->connectSocksTor, SIGNAL(toggled(bool)), ui->proxyIpTor, SLOT(setEnabled(bool))); connect(ui->connectSocksTor, SIGNAL(toggled(bool)), ui->proxyPortTor, SLOT(setEnabled(bool))); - - ui->proxyIp->installEventFilter(this); - ui->proxyIpTor->installEventFilter(this); + connect(ui->connectSocksTor, SIGNAL(toggled(bool)), this, SLOT(updateProxyValidationState())); /* Window elements init */ #ifdef Q_OS_MAC @@ -119,7 +117,12 @@ OptionsDialog::OptionsDialog(QWidget *parent, bool enableWallet) : mapper->setOrientation(Qt::Vertical); /* setup/change UI elements when proxy IPs are invalid/valid */ - connect(this, SIGNAL(proxyIpChecks(QValidatedLineEdit *, int)), this, SLOT(doProxyIpChecks(QValidatedLineEdit *, int))); + ui->proxyIp->setCheckValidator(new ProxyAddressValidator(parent)); + ui->proxyIpTor->setCheckValidator(new ProxyAddressValidator(parent)); + connect(ui->proxyIp, SIGNAL(validationDidChange(QValidatedLineEdit *)), this, SLOT(updateProxyValidationState())); + connect(ui->proxyIpTor, SIGNAL(validationDidChange(QValidatedLineEdit *)), this, SLOT(updateProxyValidationState())); + connect(ui->proxyPort, SIGNAL(textChanged(const QString&)), this, SLOT(updateProxyValidationState())); + connect(ui->proxyPortTor, SIGNAL(textChanged(const QString&)), this, SLOT(updateProxyValidationState())); } OptionsDialog::~OptionsDialog() @@ -200,18 +203,6 @@ void OptionsDialog::setMapper() mapper->addMapping(ui->thirdPartyTxUrls, OptionsModel::ThirdPartyTxUrls); } -void OptionsDialog::enableOkButton() -{ - /* prevent enabling of the OK button when data modified, if there is an invalid proxy address present */ - if(fProxyIpsValid) - setOkButtonState(true); -} - -void OptionsDialog::disableOkButton() -{ - setOkButtonState(false); -} - void OptionsDialog::setOkButtonState(bool fState) { ui->okButton->setEnabled(fState); @@ -269,24 +260,20 @@ void OptionsDialog::clearStatusLabel() ui->statusLabel->clear(); } -void OptionsDialog::doProxyIpChecks(QValidatedLineEdit *pUiProxyIp, int nProxyPort) +void OptionsDialog::updateProxyValidationState() { - Q_UNUSED(nProxyPort); - - CService addrProxy; - - /* Check for a valid IPv4 / IPv6 address */ - if (!(fProxyIpsValid = LookupNumeric(pUiProxyIp->text().toStdString().c_str(), addrProxy))) + QValidatedLineEdit *pUiProxyIp = ui->proxyIp; + QValidatedLineEdit *otherProxyWidget = (pUiProxyIp == ui->proxyIpTor) ? ui->proxyIp : ui->proxyIpTor; + if (pUiProxyIp->isValid() && (!ui->proxyPort->isEnabled() || ui->proxyPort->text().toInt() > 0) && (!ui->proxyPortTor->isEnabled() || ui->proxyPortTor->text().toInt() > 0)) { - disableOkButton(); - pUiProxyIp->setValid(false); - ui->statusLabel->setStyleSheet("QLabel { color: red; }"); - ui->statusLabel->setText(tr("The supplied proxy address is invalid.")); + setOkButtonState(otherProxyWidget->isValid()); //only enable ok button if both proxys are valid + ui->statusLabel->clear(); } else { - enableOkButton(); - ui->statusLabel->clear(); + setOkButtonState(false); + ui->statusLabel->setStyleSheet("QLabel { color: red; }"); + ui->statusLabel->setText(tr("The supplied proxy address is invalid.")); } } @@ -312,18 +299,18 @@ void OptionsDialog::updateDefaultProxyNets() (strProxy == strDefaultProxyGUI.toStdString()) ? ui->proxyReachTor->setChecked(true) : ui->proxyReachTor->setChecked(false); } -bool OptionsDialog::eventFilter(QObject *object, QEvent *event) +ProxyAddressValidator::ProxyAddressValidator(QObject *parent) : +QValidator(parent) { - if(event->type() == QEvent::FocusOut) - { - if(object == ui->proxyIp) - { - Q_EMIT proxyIpChecks(ui->proxyIp, ui->proxyPort->text().toInt()); - } - else if(object == ui->proxyIpTor) - { - Q_EMIT proxyIpChecks(ui->proxyIpTor, ui->proxyPortTor->text().toInt()); - } - } - return QDialog::eventFilter(object, event); +} + +QValidator::State ProxyAddressValidator::validate(QString &input, int &pos) const +{ + Q_UNUSED(pos); + // Validate the proxy + proxyType addrProxy = proxyType(CService(input.toStdString(), 9050), true); + if (addrProxy.IsValid()) + return QValidator::Acceptable; + + return QValidator::Invalid; } diff --git a/src/qt/optionsdialog.h b/src/qt/optionsdialog.h index 348489c599..489e35da49 100644 --- a/src/qt/optionsdialog.h +++ b/src/qt/optionsdialog.h @@ -6,6 +6,7 @@ #define BITCOIN_QT_OPTIONSDIALOG_H #include <QDialog> +#include <QValidator> class OptionsModel; class QValidatedLineEdit; @@ -18,6 +19,18 @@ namespace Ui { class OptionsDialog; } +/** Proxy address widget validator, checks for a valid proxy address. + */ +class ProxyAddressValidator : public QValidator +{ + Q_OBJECT + +public: + explicit ProxyAddressValidator(QObject *parent); + + State validate(QString &input, int &pos) const; +}; + /** Preferences dialog. */ class OptionsDialog : public QDialog { @@ -30,14 +43,7 @@ public: void setModel(OptionsModel *model); void setMapper(); -protected: - bool eventFilter(QObject *object, QEvent *event); - private Q_SLOTS: - /* enable OK button */ - void enableOkButton(); - /* disable OK button */ - void disableOkButton(); /* set OK button state (enabled / disabled) */ void setOkButtonState(bool fState); void on_resetButton_clicked(); @@ -46,7 +52,7 @@ private Q_SLOTS: void showRestartWarning(bool fPersistent = false); void clearStatusLabel(); - void doProxyIpChecks(QValidatedLineEdit *pUiProxyIp, int nProxyPort); + void updateProxyValidationState(); /* query the networks, for which the default proxy is used */ void updateDefaultProxyNets(); @@ -57,7 +63,6 @@ private: Ui::OptionsDialog *ui; OptionsModel *model; QDataWidgetMapper *mapper; - bool fProxyIpsValid; }; #endif // BITCOIN_QT_OPTIONSDIALOG_H diff --git a/src/qt/qvalidatedlineedit.cpp b/src/qt/qvalidatedlineedit.cpp index 346369392c..5658a0bdcf 100644 --- a/src/qt/qvalidatedlineedit.cpp +++ b/src/qt/qvalidatedlineedit.cpp @@ -99,9 +99,25 @@ void QValidatedLineEdit::checkValidity() } else setValid(false); + + Q_EMIT validationDidChange(this); } void QValidatedLineEdit::setCheckValidator(const QValidator *v) { checkValidator = v; } + +bool QValidatedLineEdit::isValid() +{ + // use checkValidator in case the QValidatedLineEdit is disabled + if (checkValidator) + { + QString address = text(); + int pos = 0; + if (checkValidator->validate(address, pos) == QValidator::Acceptable) + return true; + } + + return valid; +} diff --git a/src/qt/qvalidatedlineedit.h b/src/qt/qvalidatedlineedit.h index 8665acda5e..8cb6a425fa 100644 --- a/src/qt/qvalidatedlineedit.h +++ b/src/qt/qvalidatedlineedit.h @@ -18,6 +18,7 @@ public: explicit QValidatedLineEdit(QWidget *parent); void clear(); void setCheckValidator(const QValidator *v); + bool isValid(); protected: void focusInEvent(QFocusEvent *evt); @@ -31,6 +32,9 @@ public Q_SLOTS: void setValid(bool valid); void setEnabled(bool enabled); +Q_SIGNALS: + void validationDidChange(QValidatedLineEdit *validatedLineEdit); + private Q_SLOTS: void markValid(); void checkValidity(); diff --git a/src/test/main_tests.cpp b/src/test/main_tests.cpp index 21ae46d6e9..2b92d239e9 100644 --- a/src/test/main_tests.cpp +++ b/src/test/main_tests.cpp @@ -72,5 +72,4 @@ BOOST_AUTO_TEST_CASE(test_combiner_all) Test.disconnect(&ReturnTrue); BOOST_CHECK(Test()); } - BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/merkle_tests.cpp b/src/test/merkle_tests.cpp new file mode 100644 index 0000000000..1e31f2e679 --- /dev/null +++ b/src/test/merkle_tests.cpp @@ -0,0 +1,136 @@ +// Copyright (c) 2015 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include "consensus/merkle.h" +#include "test/test_bitcoin.h" +#include "random.h" + +#include <boost/test/unit_test.hpp> + +BOOST_FIXTURE_TEST_SUITE(merkle_tests, TestingSetup) + +// Older version of the merkle root computation code, for comparison. +static uint256 BlockBuildMerkleTree(const CBlock& block, bool* fMutated, std::vector<uint256>& vMerkleTree) +{ + vMerkleTree.clear(); + vMerkleTree.reserve(block.vtx.size() * 2 + 16); // Safe upper bound for the number of total nodes. + for (std::vector<CTransaction>::const_iterator it(block.vtx.begin()); it != block.vtx.end(); ++it) + vMerkleTree.push_back(it->GetHash()); + int j = 0; + bool mutated = false; + for (int nSize = block.vtx.size(); nSize > 1; nSize = (nSize + 1) / 2) + { + for (int i = 0; i < nSize; i += 2) + { + int i2 = std::min(i+1, nSize-1); + if (i2 == i + 1 && i2 + 1 == nSize && vMerkleTree[j+i] == vMerkleTree[j+i2]) { + // Two identical hashes at the end of the list at a particular level. + mutated = true; + } + vMerkleTree.push_back(Hash(vMerkleTree[j+i].begin(), vMerkleTree[j+i].end(), + vMerkleTree[j+i2].begin(), vMerkleTree[j+i2].end())); + } + j += nSize; + } + if (fMutated) { + *fMutated = mutated; + } + return (vMerkleTree.empty() ? uint256() : vMerkleTree.back()); +} + +// Older version of the merkle branch computation code, for comparison. +static std::vector<uint256> BlockGetMerkleBranch(const CBlock& block, const std::vector<uint256>& vMerkleTree, int nIndex) +{ + std::vector<uint256> vMerkleBranch; + int j = 0; + for (int nSize = block.vtx.size(); nSize > 1; nSize = (nSize + 1) / 2) + { + int i = std::min(nIndex^1, nSize-1); + vMerkleBranch.push_back(vMerkleTree[j+i]); + nIndex >>= 1; + j += nSize; + } + return vMerkleBranch; +} + +static inline int ctz(uint32_t i) { + if (i == 0) return 0; + int j = 0; + while (!(i & 1)) { + j++; + i >>= 1; + } + return j; +} + +BOOST_AUTO_TEST_CASE(merkle_test) +{ + for (int i = 0; i < 32; i++) { + // Try 32 block sizes: all sizes from 0 to 16 inclusive, and then 15 random sizes. + int ntx = (i <= 16) ? i : 17 + (insecure_rand() % 4000); + // Try up to 3 mutations. + for (int mutate = 0; mutate <= 3; mutate++) { + int duplicate1 = mutate >= 1 ? 1 << ctz(ntx) : 0; // The last how many transactions to duplicate first. + if (duplicate1 >= ntx) break; // Duplication of the entire tree results in a different root (it adds a level). + int ntx1 = ntx + duplicate1; // The resulting number of transactions after the first duplication. + int duplicate2 = mutate >= 2 ? 1 << ctz(ntx1) : 0; // Likewise for the second mutation. + if (duplicate2 >= ntx1) break; + int ntx2 = ntx1 + duplicate2; + int duplicate3 = mutate >= 3 ? 1 << ctz(ntx2) : 0; // And for the the third mutation. + if (duplicate3 >= ntx2) break; + int ntx3 = ntx2 + duplicate3; + // Build a block with ntx different transactions. + CBlock block; + block.vtx.resize(ntx); + for (int j = 0; j < ntx; j++) { + CMutableTransaction mtx; + mtx.nLockTime = j; + block.vtx[j] = mtx; + } + // Compute the root of the block before mutating it. + bool unmutatedMutated = false; + uint256 unmutatedRoot = BlockMerkleRoot(block, &unmutatedMutated); + BOOST_CHECK(unmutatedMutated == false); + // Optionally mutate by duplicating the last transactions, resulting in the same merkle root. + block.vtx.resize(ntx3); + for (int j = 0; j < duplicate1; j++) { + block.vtx[ntx + j] = block.vtx[ntx + j - duplicate1]; + } + for (int j = 0; j < duplicate2; j++) { + block.vtx[ntx1 + j] = block.vtx[ntx1 + j - duplicate2]; + } + for (int j = 0; j < duplicate3; j++) { + block.vtx[ntx2 + j] = block.vtx[ntx2 + j - duplicate3]; + } + // Compute the merkle root and merkle tree using the old mechanism. + bool oldMutated = false; + std::vector<uint256> merkleTree; + uint256 oldRoot = BlockBuildMerkleTree(block, &oldMutated, merkleTree); + // Compute the merkle root using the new mechanism. + bool newMutated = false; + uint256 newRoot = BlockMerkleRoot(block, &newMutated); + BOOST_CHECK(oldRoot == newRoot); + BOOST_CHECK(newRoot == unmutatedRoot); + BOOST_CHECK((newRoot == uint256()) == (ntx == 0)); + BOOST_CHECK(oldMutated == newMutated); + BOOST_CHECK(newMutated == !!mutate); + // If no mutation was done (once for every ntx value), try up to 16 branches. + if (mutate == 0) { + for (int loop = 0; loop < std::min(ntx, 16); loop++) { + // If ntx <= 16, try all branches. Otherise, try 16 random ones. + int mtx = loop; + if (ntx > 16) { + mtx = insecure_rand() % ntx; + } + std::vector<uint256> newBranch = BlockMerkleBranch(block, mtx); + std::vector<uint256> oldBranch = BlockGetMerkleBranch(block, merkleTree, mtx); + BOOST_CHECK(oldBranch == newBranch); + BOOST_CHECK(ComputeMerkleRootFromBranch(block.vtx[mtx].GetHash(), newBranch, mtx) == oldRoot); + } + } + } + } +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp index dc20e34634..1d7c9f65c0 100644 --- a/src/test/miner_tests.cpp +++ b/src/test/miner_tests.cpp @@ -5,6 +5,7 @@ #include "chainparams.h" #include "coins.h" #include "consensus/consensus.h" +#include "consensus/merkle.h" #include "consensus/validation.h" #include "main.h" #include "miner.h" @@ -93,7 +94,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) pblock->vtx[0] = CTransaction(txCoinbase); if (txFirst.size() < 2) txFirst.push_back(new CTransaction(pblock->vtx[0])); - pblock->hashMerkleRoot = pblock->ComputeMerkleRoot(); + pblock->hashMerkleRoot = BlockMerkleRoot(*pblock); pblock->nNonce = blockinfo[i].nonce; CValidationState state; BOOST_CHECK(ProcessNewBlock(state, chainparams, NULL, pblock, true, NULL)); diff --git a/src/test/pmt_tests.cpp b/src/test/pmt_tests.cpp index d9f3c3e467..0d7fb2bc35 100644 --- a/src/test/pmt_tests.cpp +++ b/src/test/pmt_tests.cpp @@ -2,6 +2,7 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. +#include "consensus/merkle.h" #include "merkleblock.h" #include "serialize.h" #include "streams.h" @@ -48,7 +49,7 @@ BOOST_AUTO_TEST_CASE(pmt_test1) } // calculate actual merkle root and height - uint256 merkleRoot1 = block.ComputeMerkleRoot(); + uint256 merkleRoot1 = BlockMerkleRoot(block); std::vector<uint256> vTxid(nTx, uint256()); for (unsigned int j=0; j<nTx; j++) vTxid[j] = block.vtx[j].GetHash(); diff --git a/src/version.h b/src/version.h index 6cdddf9255..f7cf18d0b6 100644 --- a/src/version.h +++ b/src/version.h @@ -9,7 +9,7 @@ * network protocol versioning */ -static const int PROTOCOL_VERSION = 70011; +static const int PROTOCOL_VERSION = 70012; //! initial proto version, to be increased after version/verack negotiation static const int INIT_PROTO_VERSION = 209; @@ -37,4 +37,7 @@ static const int MEMPOOL_GD_VERSION = 60002; //! "filter*" commands are disabled without NODE_BLOOM after and including this version static const int NO_BLOOM_VERSION = 70011; +//! "sendheaders" command and announcing blocks with headers starts with this version +static const int SENDHEADERS_VERSION = 70012; + #endif // BITCOIN_VERSION_H |