diff options
-rw-r--r-- | doc/build-unix.md | 32 | ||||
-rwxr-xr-x | qa/rpc-tests/p2p-compactblocks.py | 607 | ||||
-rwxr-xr-x | qa/rpc-tests/p2p-segwit.py | 10 | ||||
-rwxr-xr-x | qa/rpc-tests/sendheaders.py | 68 | ||||
-rwxr-xr-x | qa/rpc-tests/test_framework/mininode.py | 41 | ||||
-rw-r--r-- | src/blockencodings.cpp | 4 | ||||
-rw-r--r-- | src/blockencodings.h | 2 | ||||
-rw-r--r-- | src/main.cpp | 80 | ||||
-rw-r--r-- | src/qt/clientmodel.cpp | 2 | ||||
-rw-r--r-- | src/qt/modaloverlay.cpp | 30 | ||||
-rw-r--r-- | src/qt/modaloverlay.h | 3 | ||||
-rw-r--r-- | src/test/blockencodings_tests.cpp | 6 | ||||
-rw-r--r-- | src/txmempool.cpp | 2 | ||||
-rw-r--r-- | src/txmempool.h | 2 |
14 files changed, 590 insertions, 299 deletions
diff --git a/doc/build-unix.md b/doc/build-unix.md index 62e3e793e9..5202072f8b 100644 --- a/doc/build-unix.md +++ b/doc/build-unix.md @@ -308,3 +308,35 @@ To build executables for ARM: For further documentation on the depends system see [README.md](../depends/README.md) in the depends directory. + +Building on FreeBSD +-------------------- + +(Updated as of FreeBSD 10.3) + +Clang is installed by default as `cc` compiler, this makes it easier to get +started than on [OpenBSD](build-openbsd.md). Installing dependencies: + + pkg install autoconf automake libtool pkgconf + pkg install boost-libs openssl libevent2 + +(`libressl` instead of `openssl` will also work) + +For the wallet (optional): + + pkg install db5 + +This will give a warning "configure: WARNING: Found Berkeley DB other +than 4.8; wallets opened by this build will not be portable!", but as FreeBSD never +had a binary release, this may not matter. If backwards compatibility +with 4.8-built Bitcoin Core is needed follow the steps under "Berkeley DB" above. + +Then build using: + + ./autogen.sh + ./configure --with-incompatible-bdb CPPFLAGS=-I/usr/local/include/db5 LDFLAGS=-L/usr/local/lib/db5 + make + +*Note on debugging*: The version of `gdb` installed by default is [ancient and considered harmful](https://wiki.freebsd.org/GdbRetirement). +It is not suitable for debugging a multi-threaded C++ program, not even for getting backtraces. Please install the package `gdb` and +use the versioned gdb command e.g. `gdb7111`. diff --git a/qa/rpc-tests/p2p-compactblocks.py b/qa/rpc-tests/p2p-compactblocks.py index ac4655a841..cdcfb36e7b 100755 --- a/qa/rpc-tests/p2p-compactblocks.py +++ b/qa/rpc-tests/p2p-compactblocks.py @@ -12,14 +12,16 @@ from test_framework.script import CScript, OP_TRUE ''' CompactBlocksTest -- test compact blocks (BIP 152) -''' +Version 1 compact blocks are pre-segwit (txids) +Version 2 compact blocks are post-segwit (wtxids) +''' # TestNode: A peer we use to send messages to bitcoind, and store responses. class TestNode(SingleNodeConnCB): def __init__(self): SingleNodeConnCB.__init__(self) - self.last_sendcmpct = None + self.last_sendcmpct = [] self.last_headers = None self.last_inv = None self.last_cmpctblock = None @@ -28,9 +30,13 @@ class TestNode(SingleNodeConnCB): self.last_getblocktxn = None self.last_block = None self.last_blocktxn = None + # Store the hashes of blocks we've seen announced. + # This is for synchronizing the p2p message traffic, + # so we can eg wait until a particular block is announced. + self.set_announced_blockhashes = set() def on_sendcmpct(self, conn, message): - self.last_sendcmpct = message + self.last_sendcmpct.append(message) def on_block(self, conn, message): self.last_block = message @@ -38,14 +44,22 @@ class TestNode(SingleNodeConnCB): def on_cmpctblock(self, conn, message): self.last_cmpctblock = message self.block_announced = True + self.last_cmpctblock.header_and_shortids.header.calc_sha256() + self.set_announced_blockhashes.add(self.last_cmpctblock.header_and_shortids.header.sha256) def on_headers(self, conn, message): self.last_headers = message self.block_announced = True + for x in self.last_headers.headers: + x.calc_sha256() + self.set_announced_blockhashes.add(x.sha256) def on_inv(self, conn, message): self.last_inv = message - self.block_announced = True + for x in self.last_inv.inv: + if x.type == 2: + self.block_announced = True + self.set_announced_blockhashes.add(x.hash) def on_getdata(self, conn, message): self.last_getdata = message @@ -85,34 +99,42 @@ class TestNode(SingleNodeConnCB): assert(self.received_block_announcement()) self.clear_block_announcement() + # Block until a block announcement for a particular block hash is + # received. + def wait_for_block_announcement(self, block_hash, timeout=30): + def received_hash(): + return (block_hash in self.set_announced_blockhashes) + return wait_until(received_hash, timeout=timeout) class CompactBlocksTest(BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True - self.num_nodes = 1 + # Node0 = pre-segwit, node1 = segwit-aware + self.num_nodes = 2 self.utxos = [] def setup_network(self): self.nodes = [] - # Turn off segwit in this test, as compact blocks don't currently work - # with segwit. (After BIP 152 is updated to support segwit, we can - # test behavior with and without segwit enabled by adding a second node - # to the test.) - self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [["-debug", "-logtimemicros=1", "-bip9params=segwit:0:0"]]) + # Start up node0 to be a version 1, pre-segwit node. + self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, + [["-debug", "-logtimemicros=1", "-bip9params=segwit:0:0"], + ["-debug", "-logtimemicros", "-txindex"]]) + connect_nodes(self.nodes[0], 1) - def build_block_on_tip(self): - height = self.nodes[0].getblockcount() - tip = self.nodes[0].getbestblockhash() - mtp = self.nodes[0].getblockheader(tip)['mediantime'] + def build_block_on_tip(self, node): + height = node.getblockcount() + tip = node.getbestblockhash() + mtp = node.getblockheader(tip)['mediantime'] block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1) block.solve() return block # Create 10 more anyone-can-spend utxo's for testing. def make_utxos(self): - block = self.build_block_on_tip() + # Doesn't matter which node we use, just use node0. + block = self.build_block_on_tip(self.nodes[0]) self.test_node.send_and_ping(msg_block(block)) assert(int(self.nodes[0].getbestblockhash(), 16) == block.sha256) self.nodes[0].generate(100) @@ -125,7 +147,7 @@ class CompactBlocksTest(BitcoinTestFramework): tx.vout.append(CTxOut(out_value, CScript([OP_TRUE]))) tx.rehash() - block2 = self.build_block_on_tip() + block2 = self.build_block_on_tip(self.nodes[0]) block2.vtx.append(tx) block2.hashMerkleRoot = block2.calc_merkle_root() block2.solve() @@ -134,26 +156,30 @@ class CompactBlocksTest(BitcoinTestFramework): self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)]) return - # Test "sendcmpct": - # - No compact block announcements or getdata(MSG_CMPCT_BLOCK) unless - # sendcmpct is sent. - # - If sendcmpct is sent with version > 1, the message is ignored. + # Test "sendcmpct" (between peers preferring the same version): + # - No compact block announcements unless sendcmpct is sent. + # - If sendcmpct is sent with version > preferred_version, the message is ignored. # - If sendcmpct is sent with boolean 0, then block announcements are not # made with compact blocks. # - If sendcmpct is then sent with boolean 1, then new block announcements # are made with compact blocks. - def test_sendcmpct(self): - print("Testing SENDCMPCT p2p message... ") - - # Make sure we get a version 0 SENDCMPCT message from our peer + # If old_node is passed in, request compact blocks with version=preferred-1 + # and verify that it receives block announcements via compact block. + def test_sendcmpct(self, node, test_node, preferred_version, old_node=None): + # Make sure we get a SENDCMPCT message from our peer def received_sendcmpct(): - return (self.test_node.last_sendcmpct is not None) + return (len(test_node.last_sendcmpct) > 0) got_message = wait_until(received_sendcmpct, timeout=30) assert(received_sendcmpct()) assert(got_message) - assert_equal(self.test_node.last_sendcmpct.version, 1) + with mininode_lock: + # Check that the first version received is the preferred one + assert_equal(test_node.last_sendcmpct[0].version, preferred_version) + # And that we receive versions down to 1. + assert_equal(test_node.last_sendcmpct[-1].version, 1) + test_node.last_sendcmpct = [] - tip = int(self.nodes[0].getbestblockhash(), 16) + tip = int(node.getbestblockhash(), 16) def check_announcement_of_new_block(node, peer, predicate): peer.clear_block_announcement() @@ -165,56 +191,75 @@ class CompactBlocksTest(BitcoinTestFramework): assert(predicate(peer)) # We shouldn't get any block announcements via cmpctblock yet. - check_announcement_of_new_block(self.nodes[0], self.test_node, lambda p: p.last_cmpctblock is None) + check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None) # Try one more time, this time after requesting headers. - self.test_node.request_headers_and_sync(locator=[tip]) - check_announcement_of_new_block(self.nodes[0], self.test_node, lambda p: p.last_cmpctblock is None and p.last_inv is not None) + test_node.request_headers_and_sync(locator=[tip]) + check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None and p.last_inv is not None) # Test a few ways of using sendcmpct that should NOT # result in compact block announcements. # Before each test, sync the headers chain. - self.test_node.request_headers_and_sync(locator=[tip]) + test_node.request_headers_and_sync(locator=[tip]) # Now try a SENDCMPCT message with too-high version sendcmpct = msg_sendcmpct() - sendcmpct.version = 2 - self.test_node.send_and_ping(sendcmpct) - check_announcement_of_new_block(self.nodes[0], self.test_node, lambda p: p.last_cmpctblock is None) + sendcmpct.version = preferred_version+1 + sendcmpct.announce = True + test_node.send_and_ping(sendcmpct) + check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None) # Headers sync before next test. - self.test_node.request_headers_and_sync(locator=[tip]) + test_node.request_headers_and_sync(locator=[tip]) # Now try a SENDCMPCT message with valid version, but announce=False - self.test_node.send_and_ping(msg_sendcmpct()) - check_announcement_of_new_block(self.nodes[0], self.test_node, lambda p: p.last_cmpctblock is None) + sendcmpct.version = preferred_version + sendcmpct.announce = False + test_node.send_and_ping(sendcmpct) + check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None) # Headers sync before next test. - self.test_node.request_headers_and_sync(locator=[tip]) + test_node.request_headers_and_sync(locator=[tip]) # Finally, try a SENDCMPCT message with announce=True - sendcmpct.version = 1 + sendcmpct.version = preferred_version sendcmpct.announce = True - self.test_node.send_and_ping(sendcmpct) - check_announcement_of_new_block(self.nodes[0], self.test_node, lambda p: p.last_cmpctblock is not None) + test_node.send_and_ping(sendcmpct) + check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None) # Try one more time (no headers sync should be needed!) - check_announcement_of_new_block(self.nodes[0], self.test_node, lambda p: p.last_cmpctblock is not None) + check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None) # Try one more time, after turning on sendheaders - self.test_node.send_and_ping(msg_sendheaders()) - check_announcement_of_new_block(self.nodes[0], self.test_node, lambda p: p.last_cmpctblock is not None) + test_node.send_and_ping(msg_sendheaders()) + check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None) + + # Try one more time, after sending a version-1, announce=false message. + sendcmpct.version = preferred_version-1 + sendcmpct.announce = False + test_node.send_and_ping(sendcmpct) + check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None) # Now turn off announcements + sendcmpct.version = preferred_version sendcmpct.announce = False - self.test_node.send_and_ping(sendcmpct) - check_announcement_of_new_block(self.nodes[0], self.test_node, lambda p: p.last_cmpctblock is None and p.last_headers is not None) + test_node.send_and_ping(sendcmpct) + check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None and p.last_headers is not None) + + if old_node is not None: + # Verify that a peer using an older protocol version can receive + # announcements from this node. + sendcmpct.version = preferred_version-1 + sendcmpct.announce = True + old_node.send_and_ping(sendcmpct) + # Header sync + old_node.request_headers_and_sync(locator=[tip]) + check_announcement_of_new_block(node, old_node, lambda p: p.last_cmpctblock is not None) # This test actually causes bitcoind to (reasonably!) disconnect us, so do this last. def test_invalid_cmpctblock_message(self): - print("Testing invalid index in cmpctblock message...") self.nodes[0].generate(101) - block = self.build_block_on_tip() + block = self.build_block_on_tip(self.nodes[0]) cmpct_block = P2PHeaderAndShortIDs() cmpct_block.header = CBlockHeader(block) @@ -227,45 +272,63 @@ class CompactBlocksTest(BitcoinTestFramework): # Compare the generated shortids to what we expect based on BIP 152, given # bitcoind's choice of nonce. - def test_compactblock_construction(self): - print("Testing compactblock headers and shortIDs are correct...") - + def test_compactblock_construction(self, node, test_node, version, use_witness_address): # Generate a bunch of transactions. - self.nodes[0].generate(101) + node.generate(101) num_transactions = 25 - address = self.nodes[0].getnewaddress() + address = node.getnewaddress() + if use_witness_address: + # Want at least one segwit spend, so move all funds to + # a witness address. + address = node.addwitnessaddress(address) + value_to_send = node.getbalance() + node.sendtoaddress(address, satoshi_round(value_to_send-Decimal(0.1))) + node.generate(1) + + segwit_tx_generated = False for i in range(num_transactions): - self.nodes[0].sendtoaddress(address, 0.1) + txid = node.sendtoaddress(address, 0.1) + hex_tx = node.gettransaction(txid)["hex"] + tx = FromHex(CTransaction(), hex_tx) + if not tx.wit.is_null(): + segwit_tx_generated = True - self.test_node.sync_with_ping() + if use_witness_address: + assert(segwit_tx_generated) # check that our test is not broken + + # Wait until we've seen the block announcement for the resulting tip + tip = int(self.nodes[0].getbestblockhash(), 16) + assert(self.test_node.wait_for_block_announcement(tip)) # Now mine a block, and look at the resulting compact block. - self.test_node.clear_block_announcement() - block_hash = int(self.nodes[0].generate(1)[0], 16) + test_node.clear_block_announcement() + block_hash = int(node.generate(1)[0], 16) # Store the raw block in our internal format. - block = FromHex(CBlock(), self.nodes[0].getblock("%02x" % block_hash, False)) + block = FromHex(CBlock(), node.getblock("%02x" % block_hash, False)) [tx.calc_sha256() for tx in block.vtx] block.rehash() # Don't care which type of announcement came back for this test; just # request the compact block if we didn't get one yet. - wait_until(self.test_node.received_block_announcement, timeout=30) + wait_until(test_node.received_block_announcement, timeout=30) + assert(test_node.received_block_announcement()) with mininode_lock: - if self.test_node.last_cmpctblock is None: - self.test_node.clear_block_announcement() + if test_node.last_cmpctblock is None: + test_node.clear_block_announcement() inv = CInv(4, block_hash) # 4 == "CompactBlock" - self.test_node.send_message(msg_getdata([inv])) + test_node.send_message(msg_getdata([inv])) - wait_until(self.test_node.received_block_announcement, timeout=30) + wait_until(test_node.received_block_announcement, timeout=30) + assert(test_node.received_block_announcement()) # Now we should have the compactblock header_and_shortids = None with mininode_lock: - assert(self.test_node.last_cmpctblock is not None) + assert(test_node.last_cmpctblock is not None) # Convert the on-the-wire representation to absolute indexes - header_and_shortids = HeaderAndShortIDs(self.test_node.last_cmpctblock.header_and_shortids) + header_and_shortids = HeaderAndShortIDs(test_node.last_cmpctblock.header_and_shortids) # Check that we got the right block! header_and_shortids.header.calc_sha256() @@ -278,8 +341,17 @@ class CompactBlocksTest(BitcoinTestFramework): # Check that all prefilled_txn entries match what's in the block. for entry in header_and_shortids.prefilled_txn: entry.tx.calc_sha256() + # This checks the non-witness parts of the tx agree assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256) + # And this checks the witness + wtxid = entry.tx.calc_sha256(True) + if version == 2: + assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True)) + else: + # Shouldn't have received a witness + assert(entry.tx.wit.is_null()) + # Check that the cmpctblock message announced all the transactions. assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx)) @@ -294,7 +366,10 @@ class CompactBlocksTest(BitcoinTestFramework): # Already checked prefilled transactions above header_and_shortids.prefilled_txn.pop(0) else: - shortid = calculate_shortid(k0, k1, block.vtx[index].sha256) + tx_hash = block.vtx[index].sha256 + if version == 2: + tx_hash = block.vtx[index].calc_sha256(True) + shortid = calculate_shortid(k0, k1, tx_hash) assert_equal(shortid, header_and_shortids.shortids[0]) header_and_shortids.shortids.pop(0) index += 1 @@ -302,49 +377,50 @@ class CompactBlocksTest(BitcoinTestFramework): # Test that bitcoind requests compact blocks when we announce new blocks # via header or inv, and that responding to getblocktxn causes the block # to be successfully reconstructed. - def test_compactblock_requests(self): - print("Testing compactblock requests... ") - + # Post-segwit: upgraded nodes would only make this request of cb-version-2, + # NODE_WITNESS peers. Unupgraded nodes would still make this request of + # any cb-version-1-supporting peer. + def test_compactblock_requests(self, node, test_node): # Try announcing a block with an inv or header, expect a compactblock # request for announce in ["inv", "header"]: - block = self.build_block_on_tip() + block = self.build_block_on_tip(node) with mininode_lock: - self.test_node.last_getdata = None + test_node.last_getdata = None if announce == "inv": - self.test_node.send_message(msg_inv([CInv(2, block.sha256)])) + test_node.send_message(msg_inv([CInv(2, block.sha256)])) else: - self.test_node.send_header_for_blocks([block]) - success = wait_until(lambda: self.test_node.last_getdata is not None, timeout=30) + test_node.send_header_for_blocks([block]) + success = wait_until(lambda: test_node.last_getdata is not None, timeout=30) assert(success) - assert_equal(len(self.test_node.last_getdata.inv), 1) - assert_equal(self.test_node.last_getdata.inv[0].type, 4) - assert_equal(self.test_node.last_getdata.inv[0].hash, block.sha256) + assert_equal(len(test_node.last_getdata.inv), 1) + assert_equal(test_node.last_getdata.inv[0].type, 4) + assert_equal(test_node.last_getdata.inv[0].hash, block.sha256) # Send back a compactblock message that omits the coinbase comp_block = HeaderAndShortIDs() comp_block.header = CBlockHeader(block) comp_block.nonce = 0 comp_block.shortids = [1] # this is useless, and wrong - self.test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) - assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock) + test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) + assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) # Expect a getblocktxn message. with mininode_lock: - assert(self.test_node.last_getblocktxn is not None) - absolute_indexes = self.test_node.last_getblocktxn.block_txn_request.to_absolute() + assert(test_node.last_getblocktxn is not None) + absolute_indexes = test_node.last_getblocktxn.block_txn_request.to_absolute() assert_equal(absolute_indexes, [0]) # should be a coinbase request # Send the coinbase, and verify that the tip advances. msg = msg_blocktxn() msg.block_transactions.blockhash = block.sha256 msg.block_transactions.transactions = [block.vtx[0]] - self.test_node.send_and_ping(msg) - assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256) + test_node.send_and_ping(msg) + assert_equal(int(node.getbestblockhash(), 16), block.sha256) # Create a chain of transactions from given utxo, and add to a new block. - def build_block_with_transactions(self, utxo, num_transactions): - block = self.build_block_on_tip() + def build_block_with_transactions(self, node, utxo, num_transactions): + block = self.build_block_on_tip(node) for i in range(num_transactions): tx = CTransaction() @@ -361,118 +437,113 @@ class CompactBlocksTest(BitcoinTestFramework): # Test that we only receive getblocktxn requests for transactions that the # node needs, and that responding to them causes the block to be # reconstructed. - def test_getblocktxn_requests(self): - print("Testing getblocktxn requests...") + def test_getblocktxn_requests(self, node, test_node, version): + with_witness = (version==2) + + def test_getblocktxn_response(compact_block, peer, expected_result): + msg = msg_cmpctblock(compact_block.to_p2p()) + peer.send_and_ping(msg) + with mininode_lock: + assert(peer.last_getblocktxn is not None) + absolute_indexes = peer.last_getblocktxn.block_txn_request.to_absolute() + assert_equal(absolute_indexes, expected_result) + + def test_tip_after_message(node, peer, msg, tip): + peer.send_and_ping(msg) + assert_equal(int(node.getbestblockhash(), 16), tip) # First try announcing compactblocks that won't reconstruct, and verify # that we receive getblocktxn messages back. utxo = self.utxos.pop(0) - block = self.build_block_with_transactions(utxo, 5) + block = self.build_block_with_transactions(node, utxo, 5) self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) - comp_block = HeaderAndShortIDs() - comp_block.initialize_from_block(block) + comp_block.initialize_from_block(block, use_witness=with_witness) - self.test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) - with mininode_lock: - assert(self.test_node.last_getblocktxn is not None) - absolute_indexes = self.test_node.last_getblocktxn.block_txn_request.to_absolute() - assert_equal(absolute_indexes, [1, 2, 3, 4, 5]) - msg = msg_blocktxn() - msg.block_transactions = BlockTransactions(block.sha256, block.vtx[1:]) - self.test_node.send_and_ping(msg) - assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256) + test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5]) + + msg_bt = msg_blocktxn() + if with_witness: + msg_bt = msg_witness_blocktxn() # serialize with witnesses + msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:]) + test_tip_after_message(node, test_node, msg_bt, block.sha256) utxo = self.utxos.pop(0) - block = self.build_block_with_transactions(utxo, 5) + block = self.build_block_with_transactions(node, utxo, 5) self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) # Now try interspersing the prefilled transactions - comp_block.initialize_from_block(block, prefill_list=[0, 1, 5]) - self.test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) - with mininode_lock: - assert(self.test_node.last_getblocktxn is not None) - absolute_indexes = self.test_node.last_getblocktxn.block_txn_request.to_absolute() - assert_equal(absolute_indexes, [2, 3, 4]) - msg.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5]) - self.test_node.send_and_ping(msg) - assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256) + comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=with_witness) + test_getblocktxn_response(comp_block, test_node, [2, 3, 4]) + msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5]) + test_tip_after_message(node, test_node, msg_bt, block.sha256) # Now try giving one transaction ahead of time. utxo = self.utxos.pop(0) - block = self.build_block_with_transactions(utxo, 5) + block = self.build_block_with_transactions(node, utxo, 5) self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) - self.test_node.send_and_ping(msg_tx(block.vtx[1])) - assert(block.vtx[1].hash in self.nodes[0].getrawmempool()) + test_node.send_and_ping(msg_tx(block.vtx[1])) + assert(block.vtx[1].hash in node.getrawmempool()) # Prefill 4 out of the 6 transactions, and verify that only the one # that was not in the mempool is requested. - comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4]) - self.test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) - with mininode_lock: - assert(self.test_node.last_getblocktxn is not None) - absolute_indexes = self.test_node.last_getblocktxn.block_txn_request.to_absolute() - assert_equal(absolute_indexes, [5]) + comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=with_witness) + test_getblocktxn_response(comp_block, test_node, [5]) - msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]]) - self.test_node.send_and_ping(msg) - assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256) + msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]]) + test_tip_after_message(node, test_node, msg_bt, block.sha256) # Now provide all transactions to the node before the block is # announced and verify reconstruction happens immediately. utxo = self.utxos.pop(0) - block = self.build_block_with_transactions(utxo, 10) + block = self.build_block_with_transactions(node, utxo, 10) self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) for tx in block.vtx[1:]: - self.test_node.send_message(msg_tx(tx)) - self.test_node.sync_with_ping() + test_node.send_message(msg_tx(tx)) + test_node.sync_with_ping() # Make sure all transactions were accepted. - mempool = self.nodes[0].getrawmempool() + mempool = node.getrawmempool() for tx in block.vtx[1:]: assert(tx.hash in mempool) # Clear out last request. with mininode_lock: - self.test_node.last_getblocktxn = None + test_node.last_getblocktxn = None # Send compact block - comp_block.initialize_from_block(block, prefill_list=[0]) - self.test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) + comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness) + test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256) with mininode_lock: # Shouldn't have gotten a request for any transaction - assert(self.test_node.last_getblocktxn is None) - # Tip should have updated - assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256) + assert(test_node.last_getblocktxn is None) # Incorrectly responding to a getblocktxn shouldn't cause the block to be # permanently failed. - def test_incorrect_blocktxn_response(self): - print("Testing handling of incorrect blocktxn responses...") - + def test_incorrect_blocktxn_response(self, node, test_node, version): if (len(self.utxos) == 0): self.make_utxos() utxo = self.utxos.pop(0) - block = self.build_block_with_transactions(utxo, 10) + block = self.build_block_with_transactions(node, utxo, 10) self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) # Relay the first 5 transactions from the block in advance for tx in block.vtx[1:6]: - self.test_node.send_message(msg_tx(tx)) - self.test_node.sync_with_ping() + test_node.send_message(msg_tx(tx)) + test_node.sync_with_ping() # Make sure all transactions were accepted. - mempool = self.nodes[0].getrawmempool() + mempool = node.getrawmempool() for tx in block.vtx[1:6]: assert(tx.hash in mempool) # Send compact block comp_block = HeaderAndShortIDs() - comp_block.initialize_from_block(block, prefill_list=[0]) - self.test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) + comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2)) + test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) absolute_indexes = [] with mininode_lock: - assert(self.test_node.last_getblocktxn is not None) - absolute_indexes = self.test_node.last_getblocktxn.block_txn_request.to_absolute() + assert(test_node.last_getblocktxn is not None) + absolute_indexes = test_node.last_getblocktxn.block_txn_request.to_absolute() assert_equal(absolute_indexes, [6, 7, 8, 9, 10]) # Now give an incorrect response. @@ -484,100 +555,107 @@ class CompactBlocksTest(BitcoinTestFramework): # verifying that the block isn't marked bad permanently. This is good # enough for now. msg = msg_blocktxn() + if version==2: + msg = msg_witness_blocktxn() msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:]) - self.test_node.send_and_ping(msg) + test_node.send_and_ping(msg) # Tip should not have updated - assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock) + assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) # We should receive a getdata request - success = wait_until(lambda: self.test_node.last_getdata is not None, timeout=10) + success = wait_until(lambda: test_node.last_getdata is not None, timeout=10) assert(success) - assert_equal(len(self.test_node.last_getdata.inv), 1) - assert_equal(self.test_node.last_getdata.inv[0].type, 2) - assert_equal(self.test_node.last_getdata.inv[0].hash, block.sha256) + assert_equal(len(test_node.last_getdata.inv), 1) + assert(test_node.last_getdata.inv[0].type == 2 or test_node.last_getdata.inv[0].type == 2|MSG_WITNESS_FLAG) + assert_equal(test_node.last_getdata.inv[0].hash, block.sha256) # Deliver the block - self.test_node.send_and_ping(msg_block(block)) - assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256) - - def test_getblocktxn_handler(self): - print("Testing getblocktxn handler...") + if version==2: + test_node.send_and_ping(msg_witness_block(block)) + else: + test_node.send_and_ping(msg_block(block)) + assert_equal(int(node.getbestblockhash(), 16), block.sha256) + def test_getblocktxn_handler(self, node, test_node, version): # bitcoind won't respond for blocks whose height is more than 15 blocks # deep. MAX_GETBLOCKTXN_DEPTH = 15 - chain_height = self.nodes[0].getblockcount() + chain_height = node.getblockcount() current_height = chain_height while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH): - block_hash = self.nodes[0].getblockhash(current_height) - block = FromHex(CBlock(), self.nodes[0].getblock(block_hash, False)) + block_hash = node.getblockhash(current_height) + block = FromHex(CBlock(), node.getblock(block_hash, False)) msg = msg_getblocktxn() msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), []) num_to_request = random.randint(1, len(block.vtx)) msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request))) - self.test_node.send_message(msg) - success = wait_until(lambda: self.test_node.last_blocktxn is not None, timeout=10) + test_node.send_message(msg) + success = wait_until(lambda: test_node.last_blocktxn is not None, timeout=10) assert(success) [tx.calc_sha256() for tx in block.vtx] with mininode_lock: - assert_equal(self.test_node.last_blocktxn.block_transactions.blockhash, int(block_hash, 16)) + assert_equal(test_node.last_blocktxn.block_transactions.blockhash, int(block_hash, 16)) all_indices = msg.block_txn_request.to_absolute() for index in all_indices: - tx = self.test_node.last_blocktxn.block_transactions.transactions.pop(0) + tx = test_node.last_blocktxn.block_transactions.transactions.pop(0) tx.calc_sha256() assert_equal(tx.sha256, block.vtx[index].sha256) - self.test_node.last_blocktxn = None + if version == 1: + # Witnesses should have been stripped + assert(tx.wit.is_null()) + else: + # Check that the witness matches + assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True)) + test_node.last_blocktxn = None current_height -= 1 # Next request should be ignored, as we're past the allowed depth. - block_hash = self.nodes[0].getblockhash(current_height) + block_hash = node.getblockhash(current_height) msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0]) - self.test_node.send_and_ping(msg) + test_node.send_and_ping(msg) with mininode_lock: - assert_equal(self.test_node.last_blocktxn, None) - - def test_compactblocks_not_at_tip(self): - print("Testing compactblock requests/announcements not at chain tip...") + assert_equal(test_node.last_blocktxn, None) + def test_compactblocks_not_at_tip(self, node, test_node): # Test that requesting old compactblocks doesn't work. MAX_CMPCTBLOCK_DEPTH = 11 new_blocks = [] for i in range(MAX_CMPCTBLOCK_DEPTH): - self.test_node.clear_block_announcement() - new_blocks.append(self.nodes[0].generate(1)[0]) - wait_until(self.test_node.received_block_announcement, timeout=30) + test_node.clear_block_announcement() + new_blocks.append(node.generate(1)[0]) + wait_until(test_node.received_block_announcement, timeout=30) - self.test_node.clear_block_announcement() - self.test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))])) - success = wait_until(lambda: self.test_node.last_cmpctblock is not None, timeout=30) + test_node.clear_block_announcement() + test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))])) + success = wait_until(lambda: test_node.last_cmpctblock is not None, timeout=30) assert(success) - self.test_node.clear_block_announcement() - self.nodes[0].generate(1) - wait_until(self.test_node.received_block_announcement, timeout=30) - self.test_node.clear_block_announcement() - self.test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))])) - success = wait_until(lambda: self.test_node.last_block is not None, timeout=30) + test_node.clear_block_announcement() + node.generate(1) + wait_until(test_node.received_block_announcement, timeout=30) + test_node.clear_block_announcement() + test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))])) + success = wait_until(lambda: test_node.last_block is not None, timeout=30) assert(success) with mininode_lock: - self.test_node.last_block.block.calc_sha256() - assert_equal(self.test_node.last_block.block.sha256, int(new_blocks[0], 16)) + test_node.last_block.block.calc_sha256() + assert_equal(test_node.last_block.block.sha256, int(new_blocks[0], 16)) # Generate an old compactblock, and verify that it's not accepted. - cur_height = self.nodes[0].getblockcount() - hashPrevBlock = int(self.nodes[0].getblockhash(cur_height-5), 16) - block = self.build_block_on_tip() + cur_height = node.getblockcount() + hashPrevBlock = int(node.getblockhash(cur_height-5), 16) + block = self.build_block_on_tip(node) block.hashPrevBlock = hashPrevBlock block.solve() comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block) - self.test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) + test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) - tips = self.nodes[0].getchaintips() + tips = node.getchaintips() found = False for x in tips: if x["hash"] == block.hash: @@ -591,18 +669,61 @@ class CompactBlocksTest(BitcoinTestFramework): msg = msg_getblocktxn() msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0]) with mininode_lock: - self.test_node.last_blocktxn = None - self.test_node.send_and_ping(msg) + test_node.last_blocktxn = None + test_node.send_and_ping(msg) with mininode_lock: - assert(self.test_node.last_blocktxn is None) + assert(test_node.last_blocktxn is None) + + def activate_segwit(self, node): + node.generate(144*3) + assert_equal(get_bip9_status(node, "segwit")["status"], 'active') + + def test_end_to_end_block_relay(self, node, listeners): + utxo = self.utxos.pop(0) + + block = self.build_block_with_transactions(node, utxo, 10) + + [l.clear_block_announcement() for l in listeners] + + # ToHex() won't serialize with witness, but this block has no witnesses + # anyway. TODO: repeat this test with witness tx's to a segwit node. + node.submitblock(ToHex(block)) + + for l in listeners: + wait_until(lambda: l.received_block_announcement(), timeout=30) + with mininode_lock: + for l in listeners: + assert(l.last_cmpctblock is not None) + l.last_cmpctblock.header_and_shortids.header.calc_sha256() + assert_equal(l.last_cmpctblock.header_and_shortids.header.sha256, block.sha256) + + # Helper for enabling cb announcements + # Send the sendcmpct request and sync headers + def request_cb_announcements(self, peer, node, version): + tip = node.getbestblockhash() + peer.get_headers(locator=[int(tip, 16)], hashstop=0) + + msg = msg_sendcmpct() + msg.version = version + msg.announce = True + peer.send_and_ping(msg) + def run_test(self): # Setup the p2p connections and start up the network thread. self.test_node = TestNode() + self.segwit_node = TestNode() + self.old_node = TestNode() # version 1 peer <--> segwit node connections = [] connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node)) + connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], + self.segwit_node, services=NODE_NETWORK|NODE_WITNESS)) + connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], + self.old_node, services=NODE_NETWORK)) self.test_node.add_connection(connections[0]) + self.segwit_node.add_connection(connections[1]) + self.old_node.add_connection(connections[2]) NetworkThread().start() # Start up network handling in another thread @@ -612,13 +733,107 @@ class CompactBlocksTest(BitcoinTestFramework): # We will need UTXOs to construct transactions in later tests. self.make_utxos() - self.test_sendcmpct() - self.test_compactblock_construction() - self.test_compactblock_requests() - self.test_getblocktxn_requests() - self.test_getblocktxn_handler() - self.test_compactblocks_not_at_tip() - self.test_incorrect_blocktxn_response() + print("Running tests, pre-segwit activation:") + + print("\tTesting SENDCMPCT p2p message... ") + self.test_sendcmpct(self.nodes[0], self.test_node, 1) + sync_blocks(self.nodes) + self.test_sendcmpct(self.nodes[1], self.segwit_node, 2, old_node=self.old_node) + sync_blocks(self.nodes) + + print("\tTesting compactblock construction...") + self.test_compactblock_construction(self.nodes[0], self.test_node, 1, False) + sync_blocks(self.nodes) + self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, False) + sync_blocks(self.nodes) + + print("\tTesting compactblock requests... ") + self.test_compactblock_requests(self.nodes[0], self.test_node) + sync_blocks(self.nodes) + self.test_compactblock_requests(self.nodes[1], self.segwit_node) + sync_blocks(self.nodes) + + print("\tTesting getblocktxn requests...") + self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1) + sync_blocks(self.nodes) + self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2) + sync_blocks(self.nodes) + + print("\tTesting getblocktxn handler...") + self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1) + sync_blocks(self.nodes) + self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2) + self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1) + sync_blocks(self.nodes) + + print("\tTesting compactblock requests/announcements not at chain tip...") + self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node) + sync_blocks(self.nodes) + self.test_compactblocks_not_at_tip(self.nodes[1], self.segwit_node) + self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node) + sync_blocks(self.nodes) + + print("\tTesting handling of incorrect blocktxn responses...") + self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1) + sync_blocks(self.nodes) + self.test_incorrect_blocktxn_response(self.nodes[1], self.segwit_node, 2) + sync_blocks(self.nodes) + + # End-to-end block relay tests + print("\tTesting end-to-end block relay...") + self.request_cb_announcements(self.test_node, self.nodes[0], 1) + self.request_cb_announcements(self.old_node, self.nodes[1], 1) + self.request_cb_announcements(self.segwit_node, self.nodes[1], 2) + self.test_end_to_end_block_relay(self.nodes[0], [self.segwit_node, self.test_node, self.old_node]) + self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node]) + + # Advance to segwit activation + print ("\nAdvancing to segwit activation\n") + self.activate_segwit(self.nodes[1]) + print ("Running tests, post-segwit activation...") + + print("\tTesting compactblock construction...") + self.test_compactblock_construction(self.nodes[1], self.old_node, 1, True) + self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, True) + sync_blocks(self.nodes) + + print("\tTesting compactblock requests (unupgraded node)... ") + self.test_compactblock_requests(self.nodes[0], self.test_node) + + print("\tTesting getblocktxn requests (unupgraded node)...") + self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1) + + # Need to manually sync node0 and node1, because post-segwit activation, + # node1 will not download blocks from node0. + print("\tSyncing nodes...") + assert(self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash()) + while (self.nodes[0].getblockcount() > self.nodes[1].getblockcount()): + block_hash = self.nodes[0].getblockhash(self.nodes[1].getblockcount()+1) + self.nodes[1].submitblock(self.nodes[0].getblock(block_hash, False)) + assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash()) + + print("\tTesting compactblock requests (segwit node)... ") + self.test_compactblock_requests(self.nodes[1], self.segwit_node) + + print("\tTesting getblocktxn requests (segwit node)...") + self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2) + sync_blocks(self.nodes) + + print("\tTesting getblocktxn handler (segwit node should return witnesses)...") + self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2) + self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1) + + # Test that if we submitblock to node1, we'll get a compact block + # announcement to all peers. + # (Post-segwit activation, blocks won't propagate from node0 to node1 + # automatically, so don't bother testing a block announced to node0.) + print("\tTesting end-to-end block relay...") + self.request_cb_announcements(self.test_node, self.nodes[0], 1) + self.request_cb_announcements(self.old_node, self.nodes[1], 1) + self.request_cb_announcements(self.segwit_node, self.nodes[1], 2) + self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node]) + + print("\tTesting invalid index in cmpctblock message...") self.test_invalid_cmpctblock_message() diff --git a/qa/rpc-tests/p2p-segwit.py b/qa/rpc-tests/p2p-segwit.py index 5c1eb21b1f..c2ea20bb84 100755 --- a/qa/rpc-tests/p2p-segwit.py +++ b/qa/rpc-tests/p2p-segwit.py @@ -912,14 +912,6 @@ class SegWitTest(BitcoinTestFramework): # But eliminating the witness should fix it self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True) - # Verify that inv's to test_node come with getdata's for non-witness tx's - # Just tweak the transaction, announce it, and verify we get a getdata - # for a normal tx - tx.vout[0].scriptPubKey = CScript([OP_TRUE, OP_TRUE]) - tx.rehash() - self.test_node.announce_tx_and_wait_for_getdata(tx) - assert(self.test_node.last_getdata.inv[0].type == 1) - # Cleanup: mine the first transaction and update utxo self.nodes[0].generate(1) assert_equal(len(self.nodes[0].getrawmempool()), 0) @@ -1025,7 +1017,7 @@ class SegWitTest(BitcoinTestFramework): def test_block_relay(self, segwit_activated): print("\tTesting block relay") - blocktype = 2|MSG_WITNESS_FLAG if segwit_activated else 2 + blocktype = 2|MSG_WITNESS_FLAG # test_node has set NODE_WITNESS, so all getdata requests should be for # witness blocks. diff --git a/qa/rpc-tests/sendheaders.py b/qa/rpc-tests/sendheaders.py index c3f3180b6b..81b2442e6a 100755 --- a/qa/rpc-tests/sendheaders.py +++ b/qa/rpc-tests/sendheaders.py @@ -80,20 +80,19 @@ e. Announce one more that doesn't connect. Expect: disconnect. ''' -class BaseNode(NodeConnCB): +direct_fetch_response_time = 0.05 + +class BaseNode(SingleNodeConnCB): def __init__(self): - NodeConnCB.__init__(self) - self.connection = None + SingleNodeConnCB.__init__(self) self.last_inv = None self.last_headers = None self.last_block = None - self.ping_counter = 1 - self.last_pong = msg_pong(0) self.last_getdata = None - self.sleep_time = 0.05 self.block_announced = False self.last_getheaders = None self.disconnected = False + self.last_blockhash_announced = None def clear_last_announcement(self): with mininode_lock: @@ -101,9 +100,6 @@ class BaseNode(NodeConnCB): self.last_inv = None self.last_headers = None - def add_connection(self, conn): - self.connection = conn - # Request data for a list of block hashes def get_data(self, block_hashes): msg = msg_getdata() @@ -122,17 +118,17 @@ class BaseNode(NodeConnCB): msg.inv = [CInv(2, blockhash)] self.connection.send_message(msg) - # Wrapper for the NodeConn's send_message function - def send_message(self, message): - self.connection.send_message(message) - def on_inv(self, conn, message): self.last_inv = message self.block_announced = True + self.last_blockhash_announced = message.inv[-1].hash def on_headers(self, conn, message): self.last_headers = message - self.block_announced = True + if len(message.headers): + self.block_announced = True + message.headers[-1].calc_sha256() + self.last_blockhash_announced = message.headers[-1].sha256 def on_block(self, conn, message): self.last_block = message.block @@ -141,9 +137,6 @@ class BaseNode(NodeConnCB): def on_getdata(self, conn, message): self.last_getdata = message - def on_pong(self, conn, message): - self.last_pong = message - def on_getheaders(self, conn, message): self.last_getheaders = message @@ -157,7 +150,7 @@ class BaseNode(NodeConnCB): expect_headers = headers if headers != None else [] expect_inv = inv if inv != None else [] test_function = lambda: self.block_announced - self.sync(test_function) + assert(wait_until(test_function, timeout=60)) with mininode_lock: self.block_announced = False @@ -180,30 +173,14 @@ class BaseNode(NodeConnCB): return success # Syncing helpers - def sync(self, test_function, timeout=60): - while timeout > 0: - with mininode_lock: - if test_function(): - return - time.sleep(self.sleep_time) - timeout -= self.sleep_time - raise AssertionError("Sync failed to complete") - - def sync_with_ping(self, timeout=60): - self.send_message(msg_ping(nonce=self.ping_counter)) - test_function = lambda: self.last_pong.nonce == self.ping_counter - self.sync(test_function, timeout) - self.ping_counter += 1 - return - def wait_for_block(self, blockhash, timeout=60): test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash - self.sync(test_function, timeout) + assert(wait_until(test_function, timeout=timeout)) return def wait_for_getheaders(self, timeout=60): test_function = lambda: self.last_getheaders != None - self.sync(test_function, timeout) + assert(wait_until(test_function, timeout=timeout)) return def wait_for_getdata(self, hash_list, timeout=60): @@ -211,12 +188,17 @@ class BaseNode(NodeConnCB): return test_function = lambda: self.last_getdata != None and [x.hash for x in self.last_getdata.inv] == hash_list - self.sync(test_function, timeout) + assert(wait_until(test_function, timeout=timeout)) return def wait_for_disconnect(self, timeout=60): test_function = lambda: self.disconnected - self.sync(test_function, timeout) + assert(wait_until(test_function, timeout=timeout)) + return + + def wait_for_block_announcement(self, block_hash, timeout=60): + test_function = lambda: self.last_blockhash_announced == block_hash + assert(wait_until(test_function, timeout=timeout)) return def send_header_for_blocks(self, new_blocks): @@ -266,7 +248,9 @@ class SendHeadersTest(BitcoinTestFramework): def mine_reorg(self, length): self.nodes[0].generate(length) # make sure all invalidated blocks are node0's sync_blocks(self.nodes, wait=0.1) - [x.clear_last_announcement() for x in self.p2p_connections] + for x in self.p2p_connections: + x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16)) + x.clear_last_announcement() tip_height = self.nodes[1].getblockcount() hash_to_invalidate = self.nodes[1].getblockhash(tip_height-(length-1)) @@ -495,7 +479,7 @@ class SendHeadersTest(BitcoinTestFramework): test_node.send_header_for_blocks(blocks) test_node.sync_with_ping() - test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=test_node.sleep_time) + test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=direct_fetch_response_time) [ test_node.send_message(msg_block(x)) for x in blocks ] @@ -526,13 +510,13 @@ class SendHeadersTest(BitcoinTestFramework): # both blocks (same work as tip) test_node.send_header_for_blocks(blocks[1:2]) test_node.sync_with_ping() - test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=test_node.sleep_time) + test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=direct_fetch_response_time) # Announcing 16 more headers should trigger direct fetch for 14 more # blocks test_node.send_header_for_blocks(blocks[2:18]) test_node.sync_with_ping() - test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=test_node.sleep_time) + test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=direct_fetch_response_time) # Announcing 1 more header should not trigger any response test_node.last_getdata = None diff --git a/qa/rpc-tests/test_framework/mininode.py b/qa/rpc-tests/test_framework/mininode.py index 88a3b7e0f7..4d238c08d9 100755 --- a/qa/rpc-tests/test_framework/mininode.py +++ b/qa/rpc-tests/test_framework/mininode.py @@ -452,7 +452,7 @@ class CTransaction(object): else: self.vout = deser_vector(f, CTxOut) if flags != 0: - self.wit.vtxinwit = [CTxInWitness()]*len(self.vin) + self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))] self.wit.deserialize(f) self.nLockTime = struct.unpack("<I", f.read(4))[0] self.sha256 = None @@ -518,8 +518,8 @@ class CTransaction(object): return True def __repr__(self): - return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \ - % (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime) + return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \ + % (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime) class CBlockHeader(object): @@ -755,6 +755,9 @@ class PrefilledTransaction(object): r += self.tx.serialize_without_witness() return r + def serialize_with_witness(self): + return self.serialize(with_witness=True) + def __repr__(self): return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx)) @@ -779,6 +782,7 @@ class P2PHeaderAndShortIDs(object): self.prefilled_txn = deser_vector(f, PrefilledTransaction) self.prefilled_txn_length = len(self.prefilled_txn) + # When using version 2 compact blocks, we must serialize with_witness. def serialize(self, with_witness=False): r = b"" r += self.header.serialize() @@ -787,12 +791,20 @@ class P2PHeaderAndShortIDs(object): for x in self.shortids: # We only want the first 6 bytes r += struct.pack("<Q", x)[0:6] - r += ser_vector(self.prefilled_txn) + if with_witness: + r += ser_vector(self.prefilled_txn, "serialize_with_witness") + else: + r += ser_vector(self.prefilled_txn) return r def __repr__(self): return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn)) +# P2P version of the above that will use witness serialization (for compact +# block version 2) +class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs): + def serialize(self): + return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True) # Calculate the BIP 152-compact blocks shortid for a given transaction hash def calculate_shortid(k0, k1, tx_hash): @@ -808,6 +820,7 @@ class HeaderAndShortIDs(object): self.nonce = 0 self.shortids = [] self.prefilled_txn = [] + self.use_witness = False if p2pheaders_and_shortids != None: self.header = p2pheaders_and_shortids.header @@ -819,7 +832,10 @@ class HeaderAndShortIDs(object): last_index = self.prefilled_txn[-1].index def to_p2p(self): - ret = P2PHeaderAndShortIDs() + if self.use_witness: + ret = P2PHeaderAndShortWitnessIDs() + else: + ret = P2PHeaderAndShortIDs() ret.header = self.header ret.nonce = self.nonce ret.shortids_length = len(self.shortids) @@ -840,15 +856,20 @@ class HeaderAndShortIDs(object): key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0] return [ key0, key1 ] - def initialize_from_block(self, block, nonce=0, prefill_list = [0]): + # Version 2 compact blocks use wtxid in shortids (rather than txid) + def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False): self.header = CBlockHeader(block) self.nonce = nonce self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ] self.shortids = [] + self.use_witness = use_witness [k0, k1] = self.get_siphash_keys() for i in range(len(block.vtx)): if i not in prefill_list: - self.shortids.append(calculate_shortid(k0, k1, block.vtx[i].sha256)) + tx_hash = block.vtx[i].sha256 + if use_witness: + tx_hash = block.vtx[i].calc_sha256(with_witness=True) + self.shortids.append(calculate_shortid(k0, k1, tx_hash)) def __repr__(self): return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn)) @@ -1424,6 +1445,12 @@ class msg_blocktxn(object): def __repr__(self): return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions)) +class msg_witness_blocktxn(msg_blocktxn): + def serialize(self): + r = b"" + r += self.block_transactions.serialize(with_witness=True) + return r + # This is what a callback should look like for NodeConn # Reimplement the on_* functions to provide handling for events class NodeConnCB(object): diff --git a/src/blockencodings.cpp b/src/blockencodings.cpp index df237f8f26..93d3fa372b 100644 --- a/src/blockencodings.cpp +++ b/src/blockencodings.cpp @@ -17,7 +17,7 @@ #define MIN_TRANSACTION_BASE_SIZE (::GetSerializeSize(CTransaction(), SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS)) -CBlockHeaderAndShortTxIDs::CBlockHeaderAndShortTxIDs(const CBlock& block) : +CBlockHeaderAndShortTxIDs::CBlockHeaderAndShortTxIDs(const CBlock& block, bool fUseWTXID) : nonce(GetRand(std::numeric_limits<uint64_t>::max())), shorttxids(block.vtx.size() - 1), prefilledtxn(1), header(block) { FillShortTxIDSelector(); @@ -25,7 +25,7 @@ CBlockHeaderAndShortTxIDs::CBlockHeaderAndShortTxIDs(const CBlock& block) : prefilledtxn[0] = {0, block.vtx[0]}; for (size_t i = 1; i < block.vtx.size(); i++) { const CTransaction& tx = block.vtx[i]; - shorttxids[i - 1] = GetShortID(tx.GetHash()); + shorttxids[i - 1] = GetShortID(fUseWTXID ? tx.GetWitnessHash() : tx.GetHash()); } } diff --git a/src/blockencodings.h b/src/blockencodings.h index 349fcbd50f..99b1cb140d 100644 --- a/src/blockencodings.h +++ b/src/blockencodings.h @@ -146,7 +146,7 @@ public: // Dummy for deserialization CBlockHeaderAndShortTxIDs() {} - CBlockHeaderAndShortTxIDs(const CBlock& block); + CBlockHeaderAndShortTxIDs(const CBlock& block, bool fUseWTXID); uint64_t GetShortID(const uint256& txhash) const; diff --git a/src/main.cpp b/src/main.cpp index 437e972382..c92a38be98 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -289,10 +289,21 @@ struct CNodeState { bool fPreferHeaders; //! Whether this peer wants invs or cmpctblocks (when possible) for block announcements. bool fPreferHeaderAndIDs; - //! Whether this peer will send us cmpctblocks if we request them + /** + * Whether this peer will send us cmpctblocks if we request them. + * This is not used to gate request logic, as we really only care about fSupportsDesiredCmpctVersion, + * but is used as a flag to "lock in" the version of compact blocks (fWantsCmpctWitness) we send. + */ bool fProvidesHeaderAndIDs; //! Whether this peer can give us witnesses bool fHaveWitness; + //! Whether this peer wants witnesses in cmpctblocks/blocktxns + bool fWantsCmpctWitness; + /** + * If we've announced NODE_WITNESS to this peer: whether the peer sends witnesses in cmpctblocks/blocktxns, + * otherwise: whether this peer sends non-witnesses in cmpctblocks/blocktxns. + */ + bool fSupportsDesiredCmpctVersion; CNodeState() { fCurrentlyConnected = false; @@ -313,6 +324,8 @@ struct CNodeState { fPreferHeaderAndIDs = false; fProvidesHeaderAndIDs = false; fHaveWitness = false; + fWantsCmpctWitness = false; + fSupportsDesiredCmpctVersion = false; } }; @@ -467,8 +480,8 @@ void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) { } void MaybeSetPeerAsAnnouncingHeaderAndIDs(const CNodeState* nodestate, CNode* pfrom, CConnman& connman) { - if (pfrom->GetLocalServices() & NODE_WITNESS) { - // Don't ever request compact blocks when segwit is enabled. + if (!nodestate->fSupportsDesiredCmpctVersion) { + // Never ask from peers who can't provide witnesses. return; } if (nodestate->fProvidesHeaderAndIDs) { @@ -476,7 +489,7 @@ void MaybeSetPeerAsAnnouncingHeaderAndIDs(const CNodeState* nodestate, CNode* pf if (nodeid == pfrom->GetId()) return; bool fAnnounceUsingCMPCTBLOCK = false; - uint64_t nCMPCTBLOCKVersion = 1; + uint64_t nCMPCTBLOCKVersion = (pfrom->GetLocalServices() & NODE_WITNESS) ? 2 : 1; if (lNodesAnnouncingHeaderAndIDs.size() >= 3) { // As per BIP152, we only get 3 of our peers to announce // blocks using compact encodings. @@ -4856,11 +4869,12 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam // they wont have a useful mempool to match against a compact block, // and we don't feel like constructing the object for them, so // instead we respond with the full, non-compact block. + bool fPeerWantsWitness = State(pfrom->GetId())->fWantsCmpctWitness; if (mi->second->nHeight >= chainActive.Height() - 10) { - CBlockHeaderAndShortTxIDs cmpctblock(block); - pfrom->PushMessageWithFlag(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::CMPCTBLOCK, cmpctblock); + CBlockHeaderAndShortTxIDs cmpctblock(block, fPeerWantsWitness); + pfrom->PushMessageWithFlag(fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::CMPCTBLOCK, cmpctblock); } else - pfrom->PushMessageWithFlag(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, block); + pfrom->PushMessageWithFlag(fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, block); } // Trigger the peer node to send a getblocks request for the next batch of inventory @@ -4922,7 +4936,7 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam uint32_t GetFetchFlags(CNode* pfrom, CBlockIndex* pprev, const Consensus::Params& chainparams) { uint32_t nFetchFlags = 0; - if (IsWitnessEnabled(pprev, chainparams) && State(pfrom->GetId())->fHaveWitness) { + if ((pfrom->GetLocalServices() & NODE_WITNESS) && State(pfrom->GetId())->fHaveWitness) { nFetchFlags |= MSG_WITNESS_FLAG; } return nFetchFlags; @@ -5128,13 +5142,16 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, pfrom->PushMessage(NetMsgType::SENDHEADERS); } if (pfrom->nVersion >= SHORT_IDS_BLOCKS_VERSION) { - // Tell our peer we are willing to provide version-1 cmpctblocks + // Tell our peer we are willing to provide version 1 or 2 cmpctblocks // However, we do not request new block announcements using // cmpctblock messages. // We send this to non-NODE NETWORK peers as well, because // they may wish to request compact blocks from us bool fAnnounceUsingCMPCTBLOCK = false; - uint64_t nCMPCTBLOCKVersion = 1; + uint64_t nCMPCTBLOCKVersion = 2; + if (pfrom->GetLocalServices() & NODE_WITNESS) + pfrom->PushMessage(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion); + nCMPCTBLOCKVersion = 1; pfrom->PushMessage(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion); } } @@ -5195,12 +5212,23 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, else if (strCommand == NetMsgType::SENDCMPCT) { bool fAnnounceUsingCMPCTBLOCK = false; - uint64_t nCMPCTBLOCKVersion = 1; + uint64_t nCMPCTBLOCKVersion = 0; vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion; - if (nCMPCTBLOCKVersion == 1) { + if (nCMPCTBLOCKVersion == 1 || ((pfrom->GetLocalServices() & NODE_WITNESS) && nCMPCTBLOCKVersion == 2)) { LOCK(cs_main); - State(pfrom->GetId())->fProvidesHeaderAndIDs = true; - State(pfrom->GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK; + // fProvidesHeaderAndIDs is used to "lock in" version of compact blocks we send (fWantsCmpctWitness) + if (!State(pfrom->GetId())->fProvidesHeaderAndIDs) { + State(pfrom->GetId())->fProvidesHeaderAndIDs = true; + State(pfrom->GetId())->fWantsCmpctWitness = nCMPCTBLOCKVersion == 2; + } + if (State(pfrom->GetId())->fWantsCmpctWitness == (nCMPCTBLOCKVersion == 2)) // ignore later version announces + State(pfrom->GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK; + if (!State(pfrom->GetId())->fSupportsDesiredCmpctVersion) { + if (pfrom->GetLocalServices() & NODE_WITNESS) + State(pfrom->GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 2); + else + State(pfrom->GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 1); + } } } @@ -5258,7 +5286,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER && (!IsWitnessEnabled(chainActive.Tip(), chainparams.GetConsensus()) || State(pfrom->GetId())->fHaveWitness)) { inv.type |= nFetchFlags; - if (nodestate->fProvidesHeaderAndIDs && !(pfrom->GetLocalServices() & NODE_WITNESS)) + if (nodestate->fSupportsDesiredCmpctVersion) vToFetch.push_back(CInv(MSG_CMPCT_BLOCK, inv.hash)); else vToFetch.push_back(inv); @@ -5386,7 +5414,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, } resp.txn[i] = block.vtx[req.indexes[i]]; } - pfrom->PushMessageWithFlag(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCKTXN, resp); + pfrom->PushMessageWithFlag(State(pfrom->GetId())->fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCKTXN, resp); } @@ -5650,7 +5678,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, // We requested this block for some reason, but our mempool will probably be useless // so we just grab the block via normal getdata std::vector<CInv> vInv(1); - vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash()); + vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash()); pfrom->PushMessage(NetMsgType::GETDATA, vInv); } return true; @@ -5662,6 +5690,12 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, CNodeState *nodestate = State(pfrom->GetId()); + if (IsWitnessEnabled(pindex->pprev, chainparams.GetConsensus()) && !nodestate->fSupportsDesiredCmpctVersion) { + // Don't bother trying to process compact blocks from v1 peers + // after segwit activates. + return true; + } + // We want to be a bit conservative just to be extra careful about DoS // possibilities in compact block processing... if (pindex->nHeight <= chainActive.Height() + 2) { @@ -5688,7 +5722,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, } else if (status == READ_STATUS_FAILED) { // Duplicate txindexes, the block is now in-flight, so just request it std::vector<CInv> vInv(1); - vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash()); + vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash()); pfrom->PushMessage(NetMsgType::GETDATA, vInv); return true; } @@ -5715,7 +5749,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, // We requested this block, but its far into the future, so our // mempool will probably be useless - request the block normally std::vector<CInv> vInv(1); - vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash()); + vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash()); pfrom->PushMessage(NetMsgType::GETDATA, vInv); return true; } else { @@ -5757,7 +5791,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, } else if (status == READ_STATUS_FAILED) { // Might have collided, fall back to getdata now :( std::vector<CInv> invs; - invs.push_back(CInv(MSG_BLOCK, resp.blockhash)); + invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(pfrom, chainActive.Tip(), chainparams.GetConsensus()), resp.blockhash)); pfrom->PushMessage(NetMsgType::GETDATA, invs); } else { CValidationState state; @@ -5906,7 +5940,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, pindexLast->GetBlockHash().ToString(), pindexLast->nHeight); } if (vGetData.size() > 0) { - if (nodestate->fProvidesHeaderAndIDs && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN) && !(pfrom->GetLocalServices() & NODE_WITNESS)) { + if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) { // We seem to be rather well-synced, so it appears pfrom was the first to provide us // with this block! Let's get them to announce using compact blocks in the future. MaybeSetPeerAsAnnouncingHeaderAndIDs(nodestate, pfrom, connman); @@ -6536,8 +6570,8 @@ bool SendMessages(CNode* pto, CConnman& connman) //TODO: Shouldn't need to reload block from disk, but requires refactor CBlock block; assert(ReadBlockFromDisk(block, pBestIndex, consensusParams)); - CBlockHeaderAndShortTxIDs cmpctblock(block); - pto->PushMessageWithFlag(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::CMPCTBLOCK, cmpctblock); + CBlockHeaderAndShortTxIDs cmpctblock(block, state.fWantsCmpctWitness); + pto->PushMessageWithFlag(state.fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::CMPCTBLOCK, cmpctblock); state.pindexBestHeaderSent = pBestIndex; } else if (state.fPreferHeaders) { if (vHeaders.size() > 1) { diff --git a/src/qt/clientmodel.cpp b/src/qt/clientmodel.cpp index 6d19b3d122..87704c641d 100644 --- a/src/qt/clientmodel.cpp +++ b/src/qt/clientmodel.cpp @@ -256,7 +256,7 @@ static void BlockTipChanged(ClientModel *clientmodel, bool initialSync, const CB int64_t& nLastUpdateNotification = fHeader ? nLastHeaderTipUpdateNotification : nLastBlockTipUpdateNotification; // if we are in-sync, update the UI regardless of last update time - if (fHeader || !initialSync || now - nLastUpdateNotification > MODEL_UPDATE_DELAY) { + if (!initialSync || now - nLastUpdateNotification > MODEL_UPDATE_DELAY) { //pass a async signal to the UI thread QMetaObject::invokeMethod(clientmodel, "numBlocksChanged", Qt::QueuedConnection, Q_ARG(int, pIndex->nHeight), diff --git a/src/qt/modaloverlay.cpp b/src/qt/modaloverlay.cpp index 2de2dde16a..5caade7d38 100644 --- a/src/qt/modaloverlay.cpp +++ b/src/qt/modaloverlay.cpp @@ -13,7 +13,8 @@ ModalOverlay::ModalOverlay(QWidget *parent) : QWidget(parent), ui(new Ui::ModalOverlay), -bestBlockHeight(0), +bestHeaderHeight(0), +bestHeaderDate(QDateTime()), layerIsVisible(false), userClosed(false) { @@ -65,14 +66,9 @@ bool ModalOverlay::event(QEvent* ev) { void ModalOverlay::setKnownBestHeight(int count, const QDateTime& blockDate) { - - /* only update the blockheight if the headerschain-tip is not older then 30 days */ - int64_t now = QDateTime::currentDateTime().toTime_t(); - int64_t btime = blockDate.toTime_t(); - if (btime+3600*24*30 > now) - { - if (count > bestBlockHeight) - bestBlockHeight = count; + if (count > bestHeaderHeight) { + bestHeaderHeight = count; + bestHeaderDate = blockDate; } } @@ -125,11 +121,21 @@ void ModalOverlay::tipUpdate(int count, const QDateTime& blockDate, double nVeri ui->percentageProgress->setText(QString::number(nVerificationProgress*100, 'f', 2)+"%"); ui->progressBar->setValue(nVerificationProgress*100); + if (!bestHeaderDate.isValid()) + // not syncing + return; + + // estimate the number of headers left based on nPowTargetSpacing + // and check if the gui is not aware of the the best header (happens rarely) + int estimateNumHeadersLeft = bestHeaderDate.secsTo(currentDate) / 600; + bool hasBestHeader = bestHeaderHeight >= count; + // show remaining number of blocks - if (bestBlockHeight > 0) - ui->numberOfBlocksLeft->setText(QString::number(bestBlockHeight-count)); - else + if (estimateNumHeadersLeft < 24 && hasBestHeader) { + ui->numberOfBlocksLeft->setText(QString::number(bestHeaderHeight - count)); + } else { ui->expectedTimeLeft->setText(tr("Unknown. Syncing Headers...")); + } } void ModalOverlay::showHide(bool hide, bool userRequested) diff --git a/src/qt/modaloverlay.h b/src/qt/modaloverlay.h index 670c9e58ab..66c0aa78cf 100644 --- a/src/qt/modaloverlay.h +++ b/src/qt/modaloverlay.h @@ -35,7 +35,8 @@ protected: private: Ui::ModalOverlay *ui; - int bestBlockHeight; //best known height (based on the headers) + int bestHeaderHeight; //best known height (based on the headers) + QDateTime bestHeaderDate; QVector<QPair<qint64, double> > blockProcessTime; bool layerIsVisible; bool userClosed; diff --git a/src/test/blockencodings_tests.cpp b/src/test/blockencodings_tests.cpp index d2392cfb22..7530b013bd 100644 --- a/src/test/blockencodings_tests.cpp +++ b/src/test/blockencodings_tests.cpp @@ -64,7 +64,7 @@ BOOST_AUTO_TEST_CASE(SimpleRoundTripTest) // Do a simple ShortTxIDs RT { - CBlockHeaderAndShortTxIDs shortIDs(block); + CBlockHeaderAndShortTxIDs shortIDs(block, true); CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << shortIDs; @@ -116,7 +116,7 @@ public: stream >> *this; } TestHeaderAndShortIDs(const CBlock& block) : - TestHeaderAndShortIDs(CBlockHeaderAndShortTxIDs(block)) {} + TestHeaderAndShortIDs(CBlockHeaderAndShortTxIDs(block, true)) {} uint64_t GetShortID(const uint256& txhash) const { CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); @@ -267,7 +267,7 @@ BOOST_AUTO_TEST_CASE(EmptyBlockRoundTripTest) // Test simple header round-trip with only coinbase { - CBlockHeaderAndShortTxIDs shortIDs(block); + CBlockHeaderAndShortTxIDs shortIDs(block, false); CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << shortIDs; diff --git a/src/txmempool.cpp b/src/txmempool.cpp index 3586123d3e..15fa6fbca3 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -444,7 +444,7 @@ bool CTxMemPool::addUnchecked(const uint256& hash, const CTxMemPoolEntry &entry, totalTxSize += entry.GetTxSize(); minerPolicyEstimator->processTransaction(entry, fCurrentEstimate); - vTxHashes.emplace_back(hash, newit); + vTxHashes.emplace_back(tx.GetWitnessHash(), newit); newit->vTxHashesIdx = vTxHashes.size() - 1; return true; diff --git a/src/txmempool.h b/src/txmempool.h index 6f67dd91d6..941644b2b2 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -465,7 +465,7 @@ public: indexed_transaction_set mapTx; typedef indexed_transaction_set::nth_index<0>::type::iterator txiter; - std::vector<std::pair<uint256, txiter> > vTxHashes; //!< All tx hashes/entries in mapTx, in random order + std::vector<std::pair<uint256, txiter> > vTxHashes; //!< All tx witness hashes/entries in mapTx, in random order struct CompareIteratorByHash { bool operator()(const txiter &a, const txiter &b) const { |