aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.travis.yml1
-rw-r--r--.tx/config2
-rw-r--r--build-aux/m4/bitcoin_qt.m41
-rw-r--r--configure.ac2
-rw-r--r--doc/README_osx.txt4
-rw-r--r--doc/REST-interface.md12
-rw-r--r--doc/bips.md2
-rw-r--r--doc/build-osx.md2
-rw-r--r--doc/build-unix.md6
-rw-r--r--doc/developer-notes.md6
-rw-r--r--doc/dnsseed-policy.md13
-rw-r--r--doc/gitian-building.md10
-rw-r--r--doc/init.md2
-rw-r--r--doc/release-process.md2
-rw-r--r--doc/translation_process.md4
-rwxr-xr-xqa/pull-tester/rpc-tests.sh2
-rw-r--r--qa/rpc-tests/README.md2
-rw-r--r--qa/rpc-tests/bignum.py102
-rwxr-xr-xqa/rpc-tests/bipdersig-p2p.py183
-rw-r--r--qa/rpc-tests/blockstore.py127
-rw-r--r--qa/rpc-tests/blocktools.py65
-rwxr-xr-xqa/rpc-tests/comptool.py341
-rwxr-xr-xqa/rpc-tests/invalidblockrequest.py115
-rwxr-xr-xqa/rpc-tests/maxblocksinflight.py101
-rwxr-xr-xqa/rpc-tests/mininode.py1256
-rw-r--r--qa/rpc-tests/script.py896
-rwxr-xr-xqa/rpc-tests/script_test.py253
-rwxr-xr-xqa/rpc-tests/test_framework.py44
-rw-r--r--qa/rpc-tests/util.py21
-rwxr-xr-xqa/rpc-tests/wallet.py2
-rw-r--r--src/addrman.h8
-rw-r--r--src/base58.h6
-rw-r--r--src/bitcoin-cli.cpp2
-rw-r--r--src/bitcoin-tx.cpp2
-rw-r--r--src/bitcoind.cpp1
-rw-r--r--src/bloom.cpp83
-rw-r--r--src/bloom.h36
-rw-r--r--src/chain.h2
-rw-r--r--src/chainparams.cpp38
-rw-r--r--src/chainparams.h16
-rw-r--r--src/chainparamsbase.h4
-rw-r--r--src/checkpoints.cpp7
-rw-r--r--src/checkpoints.h2
-rw-r--r--src/checkqueue.h6
-rw-r--r--src/init.cpp6
-rw-r--r--src/main.cpp64
-rw-r--r--src/main.h2
-rw-r--r--src/merkleblock.cpp4
-rw-r--r--src/merkleblock.h2
-rw-r--r--src/mruset.h36
-rw-r--r--src/net.cpp29
-rw-r--r--src/net.h10
-rw-r--r--src/pow.cpp17
-rw-r--r--src/pow.h3
-rw-r--r--src/qt/askpassphrasedialog.cpp2
-rw-r--r--src/qt/bitcoin.cpp2
-rw-r--r--src/qt/bitcoingui.cpp2
-rw-r--r--src/qt/bitcoinstrings.cpp6
-rw-r--r--src/qt/coincontroldialog.cpp20
-rw-r--r--src/qt/locale/bitcoin_en.ts132
-rw-r--r--src/qt/optionsdialog.cpp2
-rw-r--r--src/qt/sendcoinsdialog.cpp4
-rw-r--r--src/qt/sendcoinsentry.cpp2
-rw-r--r--src/qt/transactiondesc.cpp1
-rw-r--r--src/rpcclient.cpp1
-rw-r--r--src/rpcserver.cpp4
-rw-r--r--src/test/bloom_tests.cpp78
-rw-r--r--src/test/data/script_invalid.json20
-rw-r--r--src/test/mruset_tests.cpp126
-rw-r--r--src/test/pow_tests.cpp24
-rw-r--r--src/test/test_bitcoin.cpp2
-rw-r--r--src/ui_interface.h13
-rw-r--r--src/util.cpp5
-rw-r--r--src/util.h20
-rw-r--r--src/utiltime.cpp4
-rw-r--r--src/wallet/rpcwallet.cpp2
-rw-r--r--src/wallet/wallet.cpp29
77 files changed, 4092 insertions, 374 deletions
diff --git a/.travis.yml b/.travis.yml
index 44ea7b62d7..e6578ee078 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -16,6 +16,7 @@ env:
- CCACHE_COMPRESS=1
- BASE_OUTDIR=$TRAVIS_BUILD_DIR/out
- SDK_URL=https://bitcoincore.org/depends-sources/sdks
+ - PYTHON_DEBUG=1
cache:
apt: true
directories:
diff --git a/.tx/config b/.tx/config
index 472d27b46f..6c534f06e4 100644
--- a/.tx/config
+++ b/.tx/config
@@ -1,7 +1,7 @@
[main]
host = https://www.transifex.com
-[bitcoin.qt-translation-010x]
+[bitcoin.qt-translation-011x]
file_filter = src/qt/locale/bitcoin_<lang>.ts
source_file = src/qt/locale/bitcoin_en.ts
source_lang = en
diff --git a/build-aux/m4/bitcoin_qt.m4 b/build-aux/m4/bitcoin_qt.m4
index 2a72262653..570ccb8b6f 100644
--- a/build-aux/m4/bitcoin_qt.m4
+++ b/build-aux/m4/bitcoin_qt.m4
@@ -6,6 +6,7 @@ AC_DEFUN([BITCOIN_QT_FAIL],[
AC_MSG_WARN([$1; bitcoin-qt frontend will not be built])
fi
bitcoin_enable_qt=no
+ bitcoin_enable_qt_test=no
else
AC_MSG_ERROR([$1])
fi
diff --git a/configure.ac b/configure.ac
index 2c918218bb..7c0a9f4a94 100644
--- a/configure.ac
+++ b/configure.ac
@@ -6,7 +6,7 @@ define(_CLIENT_VERSION_REVISION, 99)
define(_CLIENT_VERSION_BUILD, 0)
define(_CLIENT_VERSION_IS_RELEASE, false)
define(_COPYRIGHT_YEAR, 2015)
-AC_INIT([Bitcoin Core],[_CLIENT_VERSION_MAJOR._CLIENT_VERSION_MINOR._CLIENT_VERSION_REVISION],[info@bitcoin.org],[bitcoin])
+AC_INIT([Bitcoin Core],[_CLIENT_VERSION_MAJOR._CLIENT_VERSION_MINOR._CLIENT_VERSION_REVISION],[https://github.com/bitcoin/bitcoin/issues],[bitcoin])
AC_CONFIG_SRCDIR([src/main.cpp])
AC_CONFIG_HEADERS([src/config/bitcoin-config.h])
AC_CONFIG_AUX_DIR([build-aux])
diff --git a/doc/README_osx.txt b/doc/README_osx.txt
index 6c0c21c190..e8af46e0e4 100644
--- a/doc/README_osx.txt
+++ b/doc/README_osx.txt
@@ -1,6 +1,6 @@
Deterministic OSX Dmg Notes.
-Working OSX DMG's are created in Linux by combining a recent clang,
+Working OSX DMGs are created in Linux by combining a recent clang,
the Apple's binutils (ld, ar, etc), and DMG authoring tools.
Apple uses clang extensively for development and has upstreamed the necessary
@@ -58,7 +58,7 @@ libdmg-hfsplus project is used to compress it. There are several bugs in this
tool and its maintainer has seemingly abandoned the project. It has been forked
and is available (with fixes) here: https://github.com/theuni/libdmg-hfsplus .
-The 'dmg' tool has the ability to create DMG's from scratch as well, but this
+The 'dmg' tool has the ability to create DMGs from scratch as well, but this
functionality is broken. Only the compression feature is currently used.
Ideally, the creation could be fixed and genisoimage would no longer be necessary.
diff --git a/doc/REST-interface.md b/doc/REST-interface.md
index f14aed7287..68bf7187fa 100644
--- a/doc/REST-interface.md
+++ b/doc/REST-interface.md
@@ -5,6 +5,8 @@ The REST API can be enabled with the `-rest` option.
Supported API
-------------
+
+####Transactions
`GET /rest/tx/TX-HASH.{bin|hex|json}`
Given a transaction hash,
@@ -12,6 +14,7 @@ Returns a transaction, in binary, hex-encoded binary or JSON formats.
For full TX query capability, one must enable the transaction index via "txindex=1" command line / configuration option.
+####Blocks
`GET /rest/block/BLOCK-HASH.{bin|hex|json}`
`GET /rest/block/notxdetails/BLOCK-HASH.{bin|hex|json}`
@@ -22,6 +25,15 @@ The HTTP request and response are both handled entirely in-memory, thus making m
With the /notxdetails/ option JSON response will only contain the transaction hash instead of the complete transaction details. The option only affects the JSON response.
+####Blockheaders
+`GET /rest/headers/<COUNT>/<BLOCK-HASH>.<bin|hex>`
+
+Given a block hash,
+Returns <COUNT> amount of blockheaders in upward direction.
+
+JSON is not supported.
+
+####Chaininfos
`GET /rest/chaininfo.json`
Returns various state info regarding block chain processing.
diff --git a/doc/bips.md b/doc/bips.md
index 579eadfff3..90e98ed419 100644
--- a/doc/bips.md
+++ b/doc/bips.md
@@ -11,7 +11,7 @@ BIPs that are implemented by Bitcoin Core (up-to-date up to **v0.10.0**):
* [`BIP 31`](https://github.com/bitcoin/bips/blob/master/bip-0031.mediawiki): The 'pong' protocol message (and the protocol version bump to 60001) has been implemented since **v0.6.1** ([PR #1081](https://github.com/bitcoin/bitcoin/pull/1081)).
* [`BIP 34`](https://github.com/bitcoin/bips/blob/master/bip-0034.mediawiki): The rule that requires blocks to contain their height (number) in the coinbase input, and the introduction of version 2 blocks has been implemented since **v0.7.0**. The rule took effect for version 2 blocks as of *block 224413* (March 5th 2013), and version 1 blocks are no longer allowed since *block 227931* (March 25th 2013) ([PR #1526](https://github.com/bitcoin/bitcoin/pull/1526)).
* [`BIP 35`](https://github.com/bitcoin/bips/blob/master/bip-0035.mediawiki): The 'mempool' protocol message (and the protocol version bump to 60002) has been implemented since **v0.7.0** ([PR #1641](https://github.com/bitcoin/bitcoin/pull/1641)).
-* [`BIP 37`](https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki): The bloom filtering for transaction relaying, partial merkle trees for blocks , and the protocol version bump to 70001 (enabling low-bandwidth SPV clients) has been implemented since **v0.8.0** ([PR #1795](https://github.com/bitcoin/bitcoin/pull/1795)).
+* [`BIP 37`](https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki): The bloom filtering for transaction relaying, partial merkle trees for blocks, and the protocol version bump to 70001 (enabling low-bandwidth SPV clients) has been implemented since **v0.8.0** ([PR #1795](https://github.com/bitcoin/bitcoin/pull/1795)).
* [`BIP 42`](https://github.com/bitcoin/bips/blob/master/bip-0042.mediawiki): The bug that would have caused the subsidy schedule to resume after block 13440000 was fixed in **v0.9.2** ([PR #3842](https://github.com/bitcoin/bitcoin/pull/3842)).
* [`BIP 61`](https://github.com/bitcoin/bips/blob/master/bip-0061.mediawiki): The 'reject' protocol message (and the protocol version bump to 70002) was added in **v0.9.0** ([PR #3185](https://github.com/bitcoin/bitcoin/pull/3185)).
* [`BIP 66`](https://github.com/bitcoin/bips/blob/master/bip-0066.mediawiki): The strict DER rules and associated version 3 blocks have been implemented since **v0.10.0** ([PR #5713](https://github.com/bitcoin/bitcoin/pull/5713)).
diff --git a/doc/build-osx.md b/doc/build-osx.md
index d6e93cb23d..913e72519f 100644
--- a/doc/build-osx.md
+++ b/doc/build-osx.md
@@ -1,6 +1,6 @@
Mac OS X Build Instructions and Notes
====================================
-This guide will show you how to build bitcoind(headless client) for OSX.
+This guide will show you how to build bitcoind (headless client) for OSX.
Notes
-----
diff --git a/doc/build-unix.md b/doc/build-unix.md
index 0984f689a8..f70bf7f1fe 100644
--- a/doc/build-unix.md
+++ b/doc/build-unix.md
@@ -195,12 +195,12 @@ Hardening enables the following features:
* Position Independent Executable
Build position independent code to take advantage of Address Space Layout Randomization
- offered by some kernels. An attacker who is able to cause execution of code at an arbitrary
- memory location is thwarted if he or she doesn't know where anything useful is located.
+ offered by some kernels. Attackers who can cause execution of code at an arbitrary memory
+ location are thwarted if they don't know where anything useful is located.
The stack and heap are randomly located by default but this allows the code section to be
randomly located as well.
- On an Amd64 processor where a library was not compiled with -fPIC, this will cause an error
+ On an AMD64 processor where a library was not compiled with -fPIC, this will cause an error
such as: "relocation R_X86_64_32 against `......' can not be used when making a shared object;"
To test that you have built PIE executable, install scanelf, part of paxutils, and use:
diff --git a/doc/developer-notes.md b/doc/developer-notes.md
index eaeb90da1d..8f7db31d59 100644
--- a/doc/developer-notes.md
+++ b/doc/developer-notes.md
@@ -53,7 +53,7 @@ bool function(int arg1, const char *arg2)
```
A complete list of `@xxx` commands can be found at http://www.stack.nl/~dimitri/doxygen/manual/commands.html.
As Doxygen recognizes the comments by the delimiters (`/**` and `*/` in this case), you don't
-*need* to provide any commands for a comment to be valid, just a description text is fine.
+*need* to provide any commands for a comment to be valid; just a description text is fine.
To describe a class use the same construct above the class definition:
```c++
@@ -175,7 +175,7 @@ Threads
Pull Request Terminology
------------------------
-Concept ACK - Agree with the idea and overall direction, but haven't reviewed the code changes or tested them.
+Concept ACK - Agree with the idea and overall direction, but have neither reviewed nor tested the code changes.
utACK (untested ACK) - Reviewed and agree with the code changes but haven't actually tested them.
@@ -183,4 +183,4 @@ Tested ACK - Reviewed the code changes and have verified the functionality or bu
ACK - A loose ACK can be confusing. It's best to avoid them unless it's a documentation/comment only change in which case there is nothing to test/verify; therefore the tested/untested distinction is not there.
-NACK - Disagree with the code changes/concept. Should be accompanied by an explanation.
+NACK - Disagree with the code changes/concept. Should be accompanied by an explanation.
diff --git a/doc/dnsseed-policy.md b/doc/dnsseed-policy.md
index f15ff44e5d..506e171153 100644
--- a/doc/dnsseed-policy.md
+++ b/doc/dnsseed-policy.md
@@ -10,15 +10,14 @@ Other implementations of Bitcoin software may also use the same
seeds and may be more exposed. In light of this exposure, this
document establishes some basic expectations for operating dnsseeds.
-0. A DNS seed operating organization or person is expected
-to follow good host security practices and maintain control of
-their serving infrastructure and not sell or transfer control of their
-DNS seed. Any hosting services contracted by the operator are
-equally expected to uphold these expectations.
+0. A DNS seed operating organization or person is expected to follow good
+host security practices, maintain control of applicable infrastructure,
+and not sell or transfer control of the DNS seed. Any hosting services
+contracted by the operator are equally expected to uphold these expectations.
1. The DNS seed results must consist exclusively of fairly selected and
functioning Bitcoin nodes from the public network to the best of the
-operators understanding and capability.
+operator's understanding and capability.
2. For the avoidance of doubt, the results may be randomized but must not
single-out any group of hosts to receive different results unless due to an
@@ -28,7 +27,7 @@ urgent technical necessity and disclosed.
4. Any logging of DNS queries should be only that which is necessary
for the operation of the service or urgent health of the Bitcoin
-network and must not be retained longer than necessary or disclosed
+network and must not be retained longer than necessary nor disclosed
to any third party.
5. Information gathered as a result of the operators node-spidering
diff --git a/doc/gitian-building.md b/doc/gitian-building.md
index d285fffdbb..1fa5b5f989 100644
--- a/doc/gitian-building.md
+++ b/doc/gitian-building.md
@@ -87,7 +87,7 @@ After creating the VM, we need to configure it.
![](gitian-building/network_settings.png)
-- Click `Advanced`, then `Port Forwarding`. We want to set up a port through where we can reach the VM to get files in and out.
+- Click `Advanced`, then `Port Forwarding`. We want to set up a port through which we can reach the VM to get files in and out.
- Create a new rule by clicking the plus icon.
![](gitian-building/port_forwarding_rules.png)
@@ -111,7 +111,7 @@ Installing Debian
This section will explain how to install Debian on the newly created VM.
-- Choose the non-graphical installer. We do not need the graphical environment, it will only increase installation time and disk usage.
+- Choose the non-graphical installer. We do not need the graphical environment; it will only increase installation time and disk usage.
![](gitian-building/debian_install_1_boot_menu.png)
@@ -144,7 +144,7 @@ and proceed, just press `Enter`. To select a different button, press `Tab`.
![](gitian-building/debian_install_9_user_password.png)
-- The installer will set up the clock using a time server, this process should be automatic
+- The installer will set up the clock using a time server; this process should be automatic
- Set up the clock: choose a time zone (depends on the locale settings that you picked earlier; specifics don't matter)
![](gitian-building/debian_install_10_configure_clock.png)
@@ -371,7 +371,7 @@ COMMIT=2014_03_windows_unicode_path
Signing externally
-------------------
-If you want to do the PGP signing on another device that's also possible; just define `SIGNER` as mentioned
+If you want to do the PGP signing on another device, that's also possible; just define `SIGNER` as mentioned
and follow the steps in the build process as normal.
gpg: skipped "laanwj": secret key not available
@@ -393,4 +393,4 @@ Uploading signatures
After building and signing you can push your signatures (both the `.assert` and `.assert.sig` files) to the
[bitcoin/gitian.sigs](https://github.com/bitcoin/gitian.sigs/) repository, or if that's not possible create a pull
-request. You can also mail the files to me (laanwj@gmail.com) and I'll commit them.
+request. You can also mail the files to Wladimir (laanwj@gmail.com) and he will commit them.
diff --git a/doc/init.md b/doc/init.md
index 871bdc8123..1f206a6c02 100644
--- a/doc/init.md
+++ b/doc/init.md
@@ -63,7 +63,7 @@ can then be controlled by group membership.
4a) systemd
-Installing this .service file consists on just copying it to
+Installing this .service file consists of just copying it to
/usr/lib/systemd/system directory, followed by the command
"systemctl daemon-reload" in order to update running systemd configuration.
diff --git a/doc/release-process.md b/doc/release-process.md
index 5dad9bf5de..cdcee0ec36 100644
--- a/doc/release-process.md
+++ b/doc/release-process.md
@@ -164,4 +164,4 @@ Note: check that SHA256SUMS itself doesn't end up in SHA256SUMS, which is a spur
- Add release notes for the new version to the directory `doc/release-notes` in git master
-- Celebrate
+- Celebrate
diff --git a/doc/translation_process.md b/doc/translation_process.md
index eed467706e..3653e53021 100644
--- a/doc/translation_process.md
+++ b/doc/translation_process.md
@@ -32,7 +32,7 @@ QToolBar *toolbar = addToolBar(tr("Tabs toolbar"));
### Creating a pull-request
For general PRs, you shouldn’t include any updates to the translation source files. They will be updated periodically, primarily around pre-releases, allowing time for any new phrases to be translated before public releases. This is also important in avoiding translation related merge conflicts.
-When an updated source file is merged into the Github repo, Transifex will automatically detect it (although it can take several hours). Once processed, the new strings will show up as "Remaining" in the Transifex web interface and are ready for translators.
+When an updated source file is merged into the Github repo, Transifex will automatically detect it (although it can take several hours). Once processed, the new strings will show up as "Remaining" in the Transifex web interface and are ready for translators.
To create the pull-request, use the following commands:
```
@@ -108,4 +108,4 @@ To create a new language template, you will need to edit the languages manifest
### Questions and general assistance
The Bitcoin-Core translation maintainers include *tcatm, seone, Diapolo, wumpus and luke-jr*.You can find them, and others, in the Freenode IRC chatroom - `irc.freenode.net #bitcoin-dev`.
-If you are a translator, you should also subscribe to the mailing list, https://groups.google.com/forum/#!forum/bitcoin-translators. Announcements will be posted during application pre-releases to notify translators to check for updates. \ No newline at end of file
+If you are a translator, you should also subscribe to the mailing list, https://groups.google.com/forum/#!forum/bitcoin-translators. Announcements will be posted during application pre-releases to notify translators to check for updates.
diff --git a/qa/pull-tester/rpc-tests.sh b/qa/pull-tester/rpc-tests.sh
index caa679e70b..a2d4305f98 100755
--- a/qa/pull-tester/rpc-tests.sh
+++ b/qa/pull-tester/rpc-tests.sh
@@ -31,6 +31,8 @@ testScripts=(
'merkle_blocks.py'
'rawtransactions.py'
# 'forknotify.py'
+ 'maxblocksinflight.py'
+ 'invalidblockrequest.py'
);
if [ "x${ENABLE_BITCOIND}${ENABLE_UTILS}${ENABLE_WALLET}" = "x111" ]; then
for (( i = 0; i < ${#testScripts[@]}; i++ ))
diff --git a/qa/rpc-tests/README.md b/qa/rpc-tests/README.md
index 02170d13ec..d9fbb109e8 100644
--- a/qa/rpc-tests/README.md
+++ b/qa/rpc-tests/README.md
@@ -28,7 +28,7 @@ Notes
A 200-block -regtest blockchain and wallets for four nodes
is created the first time a regression test is run and
is stored in the cache/ directory. Each node has 25 mature
-blocks (25*50=1250 BTC) in their wallet.
+blocks (25*50=1250 BTC) in its wallet.
After the first run, the cache/ blockchain and wallets are
copied into a temporary directory and used as the initial
diff --git a/qa/rpc-tests/bignum.py b/qa/rpc-tests/bignum.py
new file mode 100644
index 0000000000..b0c58ccd47
--- /dev/null
+++ b/qa/rpc-tests/bignum.py
@@ -0,0 +1,102 @@
+#
+#
+# bignum.py
+#
+# This file is copied from python-bitcoinlib.
+#
+# Distributed under the MIT/X11 software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+
+"""Bignum routines"""
+
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+import struct
+
+
+# generic big endian MPI format
+
+def bn_bytes(v, have_ext=False):
+ ext = 0
+ if have_ext:
+ ext = 1
+ return ((v.bit_length()+7)//8) + ext
+
+def bn2bin(v):
+ s = bytearray()
+ i = bn_bytes(v)
+ while i > 0:
+ s.append((v >> ((i-1) * 8)) & 0xff)
+ i -= 1
+ return s
+
+def bin2bn(s):
+ l = 0
+ for ch in s:
+ l = (l << 8) | ch
+ return l
+
+def bn2mpi(v):
+ have_ext = False
+ if v.bit_length() > 0:
+ have_ext = (v.bit_length() & 0x07) == 0
+
+ neg = False
+ if v < 0:
+ neg = True
+ v = -v
+
+ s = struct.pack(b">I", bn_bytes(v, have_ext))
+ ext = bytearray()
+ if have_ext:
+ ext.append(0)
+ v_bin = bn2bin(v)
+ if neg:
+ if have_ext:
+ ext[0] |= 0x80
+ else:
+ v_bin[0] |= 0x80
+ return s + ext + v_bin
+
+def mpi2bn(s):
+ if len(s) < 4:
+ return None
+ s_size = bytes(s[:4])
+ v_len = struct.unpack(b">I", s_size)[0]
+ if len(s) != (v_len + 4):
+ return None
+ if v_len == 0:
+ return 0
+
+ v_str = bytearray(s[4:])
+ neg = False
+ i = v_str[0]
+ if i & 0x80:
+ neg = True
+ i &= ~0x80
+ v_str[0] = i
+
+ v = bin2bn(v_str)
+
+ if neg:
+ return -v
+ return v
+
+# bitcoin-specific little endian format, with implicit size
+def mpi2vch(s):
+ r = s[4:] # strip size
+ r = r[::-1] # reverse string, converting BE->LE
+ return r
+
+def bn2vch(v):
+ return bytes(mpi2vch(bn2mpi(v)))
+
+def vch2mpi(s):
+ r = struct.pack(b">I", len(s)) # size
+ r += s[::-1] # reverse string, converting LE->BE
+ return r
+
+def vch2bn(s):
+ return mpi2bn(vch2mpi(s))
+
diff --git a/qa/rpc-tests/bipdersig-p2p.py b/qa/rpc-tests/bipdersig-p2p.py
new file mode 100755
index 0000000000..ff0c878898
--- /dev/null
+++ b/qa/rpc-tests/bipdersig-p2p.py
@@ -0,0 +1,183 @@
+#!/usr/bin/env python2
+#
+# Distributed under the MIT/X11 software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+
+from test_framework import ComparisonTestFramework
+from util import *
+from mininode import CTransaction, NetworkThread
+from blocktools import create_coinbase, create_block
+from binascii import hexlify, unhexlify
+import cStringIO
+from comptool import TestInstance, TestManager
+from script import CScript
+import time
+
+# A canonical signature consists of:
+# <30> <total len> <02> <len R> <R> <02> <len S> <S> <hashtype>
+def unDERify(tx):
+ '''
+ Make the signature in vin 0 of a tx non-DER-compliant,
+ by adding padding after the S-value.
+ '''
+ scriptSig = CScript(tx.vin[0].scriptSig)
+ newscript = []
+ for i in scriptSig:
+ if (len(newscript) == 0):
+ newscript.append(i[0:-1] + '\0' + i[-1])
+ else:
+ newscript.append(i)
+ tx.vin[0].scriptSig = CScript(newscript)
+
+'''
+This test is meant to exercise BIP66 (DER SIG).
+Connect to a single node.
+Mine 2 (version 2) blocks (save the coinbases for later).
+Generate 98 more version 2 blocks, verify the node accepts.
+Mine 749 version 3 blocks, verify the node accepts.
+Check that the new DERSIG rules are not enforced on the 750th version 3 block.
+Check that the new DERSIG rules are enforced on the 751st version 3 block.
+Mine 199 new version blocks.
+Mine 1 old-version block.
+Mine 1 new version block.
+Mine 1 old version block, see that the node rejects.
+'''
+
+class BIP66Test(ComparisonTestFramework):
+
+ def __init__(self):
+ self.num_nodes = 1
+
+ def setup_network(self):
+ # Must set the blockversion for this test
+ self.nodes = start_nodes(1, self.options.tmpdir,
+ extra_args=[['-debug', '-whitelist=127.0.0.1', '-blockversion=2']],
+ binary=[self.options.testbinary])
+
+ def run_test(self):
+ test = TestManager(self, self.options.tmpdir)
+ test.add_all_connections(self.nodes)
+ NetworkThread().start() # Start up network handling in another thread
+ test.run()
+
+ def create_transaction(self, node, coinbase, to_address, amount):
+ from_txid = node.getblock(coinbase)['tx'][0]
+ inputs = [{ "txid" : from_txid, "vout" : 0}]
+ outputs = { to_address : amount }
+ rawtx = node.createrawtransaction(inputs, outputs)
+ signresult = node.signrawtransaction(rawtx)
+ tx = CTransaction()
+ f = cStringIO.StringIO(unhexlify(signresult['hex']))
+ tx.deserialize(f)
+ return tx
+
+ def get_tests(self):
+
+ self.coinbase_blocks = self.nodes[0].generate(2)
+ self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
+ self.nodeaddress = self.nodes[0].getnewaddress()
+ self.last_block_time = time.time()
+
+ ''' 98 more version 2 blocks '''
+ test_blocks = []
+ for i in xrange(98):
+ block = create_block(self.tip, create_coinbase(2), self.last_block_time + 1)
+ block.nVersion = 2
+ block.rehash()
+ block.solve()
+ test_blocks.append([block, True])
+ self.last_block_time += 1
+ self.tip = block.sha256
+ yield TestInstance(test_blocks, sync_every_block=False)
+
+ ''' Mine 749 version 3 blocks '''
+ test_blocks = []
+ for i in xrange(749):
+ block = create_block(self.tip, create_coinbase(2), self.last_block_time + 1)
+ block.nVersion = 3
+ block.rehash()
+ block.solve()
+ test_blocks.append([block, True])
+ self.last_block_time += 1
+ self.tip = block.sha256
+ yield TestInstance(test_blocks, sync_every_block=False)
+
+ '''
+ Check that the new DERSIG rules are not enforced in the 750th
+ version 3 block.
+ '''
+ spendtx = self.create_transaction(self.nodes[0],
+ self.coinbase_blocks[0], self.nodeaddress, 1.0)
+ unDERify(spendtx)
+ spendtx.rehash()
+
+ block = create_block(self.tip, create_coinbase(2), self.last_block_time + 1)
+ block.nVersion = 3
+ block.vtx.append(spendtx)
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.rehash()
+ block.solve()
+
+ self.last_block_time += 1
+ self.tip = block.sha256
+ yield TestInstance([[block, True]])
+
+ '''
+ Check that the new DERSIG rules are enforced in the 751st version 3
+ block.
+ '''
+ spendtx = self.create_transaction(self.nodes[0],
+ self.coinbase_blocks[1], self.nodeaddress, 1.0)
+ unDERify(spendtx)
+ spendtx.rehash()
+
+ block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
+ block.nVersion = 3
+ block.vtx.append(spendtx)
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.rehash()
+ block.solve()
+ self.last_block_time += 1
+ yield TestInstance([[block, False]])
+
+ ''' Mine 199 new version blocks on last valid tip '''
+ test_blocks = []
+ for i in xrange(199):
+ block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
+ block.nVersion = 3
+ block.rehash()
+ block.solve()
+ test_blocks.append([block, True])
+ self.last_block_time += 1
+ self.tip = block.sha256
+ yield TestInstance(test_blocks, sync_every_block=False)
+
+ ''' Mine 1 old version block '''
+ block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
+ block.nVersion = 2
+ block.rehash()
+ block.solve()
+ self.last_block_time += 1
+ self.tip = block.sha256
+ yield TestInstance([[block, True]])
+
+ ''' Mine 1 new version block '''
+ block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
+ block.nVersion = 3
+ block.rehash()
+ block.solve()
+ self.last_block_time += 1
+ self.tip = block.sha256
+ yield TestInstance([[block, True]])
+
+ ''' Mine 1 old version block, should be invalid '''
+ block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
+ block.nVersion = 2
+ block.rehash()
+ block.solve()
+ self.last_block_time += 1
+ yield TestInstance([[block, False]])
+
+if __name__ == '__main__':
+ BIP66Test().main()
diff --git a/qa/rpc-tests/blockstore.py b/qa/rpc-tests/blockstore.py
new file mode 100644
index 0000000000..c57b6df81b
--- /dev/null
+++ b/qa/rpc-tests/blockstore.py
@@ -0,0 +1,127 @@
+# BlockStore: a helper class that keeps a map of blocks and implements
+# helper functions for responding to getheaders and getdata,
+# and for constructing a getheaders message
+#
+
+from mininode import *
+import dbm
+
+class BlockStore(object):
+ def __init__(self, datadir):
+ self.blockDB = dbm.open(datadir + "/blocks", 'c')
+ self.currentBlock = 0L
+
+ def close(self):
+ self.blockDB.close()
+
+ def get(self, blockhash):
+ serialized_block = None
+ try:
+ serialized_block = self.blockDB[repr(blockhash)]
+ except KeyError:
+ return None
+ f = cStringIO.StringIO(serialized_block)
+ ret = CBlock()
+ ret.deserialize(f)
+ ret.calc_sha256()
+ return ret
+
+ # Note: this pulls full blocks out of the database just to retrieve
+ # the headers -- perhaps we could keep a separate data structure
+ # to avoid this overhead.
+ def headers_for(self, locator, hash_stop, current_tip=None):
+ if current_tip is None:
+ current_tip = self.currentBlock
+ current_block = self.get(current_tip)
+ if current_block is None:
+ return None
+
+ response = msg_headers()
+ headersList = [ CBlockHeader(current_block) ]
+ maxheaders = 2000
+ while (headersList[0].sha256 not in locator.vHave):
+ prevBlockHash = headersList[0].hashPrevBlock
+ prevBlock = self.get(prevBlockHash)
+ if prevBlock is not None:
+ headersList.insert(0, CBlockHeader(prevBlock))
+ else:
+ break
+ headersList = headersList[:maxheaders] # truncate if we have too many
+ hashList = [x.sha256 for x in headersList]
+ index = len(headersList)
+ if (hash_stop in hashList):
+ index = hashList.index(hash_stop)+1
+ response.headers = headersList[:index]
+ return response
+
+ def add_block(self, block):
+ block.calc_sha256()
+ try:
+ self.blockDB[repr(block.sha256)] = bytes(block.serialize())
+ except TypeError as e:
+ print "Unexpected error: ", sys.exc_info()[0], e.args
+ self.currentBlock = block.sha256
+
+ def get_blocks(self, inv):
+ responses = []
+ for i in inv:
+ if (i.type == 2): # MSG_BLOCK
+ block = self.get(i.hash)
+ if block is not None:
+ responses.append(msg_block(block))
+ return responses
+
+ def get_locator(self, current_tip=None):
+ if current_tip is None:
+ current_tip = self.currentBlock
+ r = []
+ counter = 0
+ step = 1
+ lastBlock = self.get(current_tip)
+ while lastBlock is not None:
+ r.append(lastBlock.hashPrevBlock)
+ for i in range(step):
+ lastBlock = self.get(lastBlock.hashPrevBlock)
+ if lastBlock is None:
+ break
+ counter += 1
+ if counter > 10:
+ step *= 2
+ locator = CBlockLocator()
+ locator.vHave = r
+ return locator
+
+class TxStore(object):
+ def __init__(self, datadir):
+ self.txDB = dbm.open(datadir + "/transactions", 'c')
+
+ def close(self):
+ self.txDB.close()
+
+ def get(self, txhash):
+ serialized_tx = None
+ try:
+ serialized_tx = self.txDB[repr(txhash)]
+ except KeyError:
+ return None
+ f = cStringIO.StringIO(serialized_tx)
+ ret = CTransaction()
+ ret.deserialize(f)
+ ret.calc_sha256()
+ return ret
+
+ def add_transaction(self, tx):
+ tx.calc_sha256()
+ try:
+ self.txDB[repr(tx.sha256)] = bytes(tx.serialize())
+ except TypeError as e:
+ print "Unexpected error: ", sys.exc_info()[0], e.args
+
+ def get_transactions(self, inv):
+ responses = []
+ for i in inv:
+ if (i.type == 1): # MSG_TX
+ tx = self.get(i.hash)
+ if tx is not None:
+ responses.append(msg_tx(tx))
+ return responses
diff --git a/qa/rpc-tests/blocktools.py b/qa/rpc-tests/blocktools.py
new file mode 100644
index 0000000000..f397fe7cd6
--- /dev/null
+++ b/qa/rpc-tests/blocktools.py
@@ -0,0 +1,65 @@
+# blocktools.py - utilities for manipulating blocks and transactions
+#
+# Distributed under the MIT/X11 software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+
+from mininode import *
+from script import CScript, CScriptOp
+
+# Create a block (with regtest difficulty)
+def create_block(hashprev, coinbase, nTime=None):
+ block = CBlock()
+ if nTime is None:
+ import time
+ block.nTime = int(time.time()+600)
+ else:
+ block.nTime = nTime
+ block.hashPrevBlock = hashprev
+ block.nBits = 0x207fffff # Will break after a difficulty adjustment...
+ block.vtx.append(coinbase)
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.calc_sha256()
+ return block
+
+def serialize_script_num(value):
+ r = bytearray(0)
+ if value == 0:
+ return r
+ neg = value < 0
+ absvalue = -value if neg else value
+ while (absvalue):
+ r.append(chr(absvalue & 0xff))
+ absvalue >>= 8
+ if r[-1] & 0x80:
+ r.append(0x80 if neg else 0)
+ elif neg:
+ r[-1] |= 0x80
+ return r
+
+counter=1
+# Create an anyone-can-spend coinbase transaction, assuming no miner fees
+def create_coinbase(heightAdjust = 0):
+ global counter
+ coinbase = CTransaction()
+ coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff),
+ ser_string(serialize_script_num(counter+heightAdjust)), 0xffffffff))
+ counter += 1
+ coinbaseoutput = CTxOut()
+ coinbaseoutput.nValue = 50*100000000
+ halvings = int((counter+heightAdjust)/150) # regtest
+ coinbaseoutput.nValue >>= halvings
+ coinbaseoutput.scriptPubKey = ""
+ coinbase.vout = [ coinbaseoutput ]
+ coinbase.calc_sha256()
+ return coinbase
+
+# Create a transaction with an anyone-can-spend output, that spends the
+# nth output of prevtx.
+def create_transaction(prevtx, n, sig, value):
+ tx = CTransaction()
+ assert(n < len(prevtx.vout))
+ tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff))
+ tx.vout.append(CTxOut(value, ""))
+ tx.calc_sha256()
+ return tx
diff --git a/qa/rpc-tests/comptool.py b/qa/rpc-tests/comptool.py
new file mode 100755
index 0000000000..23a979250c
--- /dev/null
+++ b/qa/rpc-tests/comptool.py
@@ -0,0 +1,341 @@
+#!/usr/bin/env python2
+#
+# Distributed under the MIT/X11 software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+
+from mininode import *
+from blockstore import BlockStore, TxStore
+from util import p2p_port
+
+'''
+This is a tool for comparing two or more bitcoinds to each other
+using a script provided.
+
+To use, create a class that implements get_tests(), and pass it in
+as the test generator to TestManager. get_tests() should be a python
+generator that returns TestInstance objects. See below for definition.
+'''
+
+# TestNode behaves as follows:
+# Configure with a BlockStore and TxStore
+# on_inv: log the message but don't request
+# on_headers: log the chain tip
+# on_pong: update ping response map (for synchronization)
+# on_getheaders: provide headers via BlockStore
+# on_getdata: provide blocks via BlockStore
+
+global mininode_lock
+
+class TestNode(NodeConnCB):
+
+ def __init__(self, block_store, tx_store):
+ NodeConnCB.__init__(self)
+ self.create_callback_map()
+ self.conn = None
+ self.bestblockhash = None
+ self.block_store = block_store
+ self.block_request_map = {}
+ self.tx_store = tx_store
+ self.tx_request_map = {}
+
+ # When the pingmap is non-empty we're waiting for
+ # a response
+ self.pingMap = {}
+ self.lastInv = []
+
+ def add_connection(self, conn):
+ self.conn = conn
+
+ def on_headers(self, conn, message):
+ if len(message.headers) > 0:
+ best_header = message.headers[-1]
+ best_header.calc_sha256()
+ self.bestblockhash = best_header.sha256
+
+ def on_getheaders(self, conn, message):
+ response = self.block_store.headers_for(message.locator, message.hashstop)
+ if response is not None:
+ conn.send_message(response)
+
+ def on_getdata(self, conn, message):
+ [conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
+ [conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
+
+ for i in message.inv:
+ if i.type == 1:
+ self.tx_request_map[i.hash] = True
+ elif i.type == 2:
+ self.block_request_map[i.hash] = True
+
+ def on_inv(self, conn, message):
+ self.lastInv = [x.hash for x in message.inv]
+
+ def on_pong(self, conn, message):
+ try:
+ del self.pingMap[message.nonce]
+ except KeyError:
+ raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
+
+ def send_inv(self, obj):
+ mtype = 2 if isinstance(obj, CBlock) else 1
+ self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
+
+ def send_getheaders(self):
+ # We ask for headers from their last tip.
+ m = msg_getheaders()
+ m.locator = self.block_store.get_locator(self.bestblockhash)
+ self.conn.send_message(m)
+
+ # This assumes BIP31
+ def send_ping(self, nonce):
+ self.pingMap[nonce] = True
+ self.conn.send_message(msg_ping(nonce))
+
+ def received_ping_response(self, nonce):
+ return nonce not in self.pingMap
+
+ def send_mempool(self):
+ self.lastInv = []
+ self.conn.send_message(msg_mempool())
+
+# TestInstance:
+#
+# Instances of these are generated by the test generator, and fed into the
+# comptool.
+#
+# "blocks_and_transactions" should be an array of [obj, True/False/None]:
+# - obj is either a CBlock or a CTransaction, and
+# - the second value indicates whether the object should be accepted
+# into the blockchain or mempool (for tests where we expect a certain
+# answer), or "None" if we don't expect a certain answer and are just
+# comparing the behavior of the nodes being tested.
+# sync_every_block: if True, then each block will be inv'ed, synced, and
+# nodes will be tested based on the outcome for the block. If False,
+# then inv's accumulate until all blocks are processed (or max inv size
+# is reached) and then sent out in one inv message. Then the final block
+# will be synced across all connections, and the outcome of the final
+# block will be tested.
+# sync_every_tx: analagous to behavior for sync_every_block, except if outcome
+# on the final tx is None, then contents of entire mempool are compared
+# across all connections. (If outcome of final tx is specified as true
+# or false, then only the last tx is tested against outcome.)
+
+class TestInstance(object):
+ def __init__(self, objects=[], sync_every_block=True, sync_every_tx=False):
+ self.blocks_and_transactions = objects
+ self.sync_every_block = sync_every_block
+ self.sync_every_tx = sync_every_tx
+
+class TestManager(object):
+
+ def __init__(self, testgen, datadir):
+ self.test_generator = testgen
+ self.connections = []
+ self.block_store = BlockStore(datadir)
+ self.tx_store = TxStore(datadir)
+ self.ping_counter = 1
+
+ def add_all_connections(self, nodes):
+ for i in range(len(nodes)):
+ # Create a p2p connection to each node
+ self.connections.append(NodeConn('127.0.0.1', p2p_port(i),
+ nodes[i], TestNode(self.block_store, self.tx_store)))
+ # Make sure the TestNode (callback class) has a reference to its
+ # associated NodeConn
+ self.connections[-1].cb.add_connection(self.connections[-1])
+
+ def wait_for_verack(self):
+ sleep_time = 0.05
+ max_tries = 10 / sleep_time # Wait at most 10 seconds
+ while max_tries > 0:
+ done = True
+ with mininode_lock:
+ for c in self.connections:
+ if c.cb.verack_received is False:
+ done = False
+ break
+ if done:
+ break
+ time.sleep(sleep_time)
+
+ def wait_for_pings(self, counter):
+ received_pongs = False
+ while received_pongs is not True:
+ time.sleep(0.05)
+ received_pongs = True
+ with mininode_lock:
+ for c in self.connections:
+ if c.cb.received_ping_response(counter) is not True:
+ received_pongs = False
+ break
+
+ # sync_blocks: Wait for all connections to request the blockhash given
+ # then send get_headers to find out the tip of each node, and synchronize
+ # the response by using a ping (and waiting for pong with same nonce).
+ def sync_blocks(self, blockhash, num_blocks):
+ # Wait for nodes to request block (50ms sleep * 20 tries * num_blocks)
+ max_tries = 20*num_blocks
+ while max_tries > 0:
+ with mininode_lock:
+ results = [ blockhash in c.cb.block_request_map and
+ c.cb.block_request_map[blockhash] for c in self.connections ]
+ if False not in results:
+ break
+ time.sleep(0.05)
+ max_tries -= 1
+
+ # --> error if not requested
+ if max_tries == 0:
+ # print [ c.cb.block_request_map for c in self.connections ]
+ raise AssertionError("Not all nodes requested block")
+ # --> Answer request (we did this inline!)
+
+ # Send getheaders message
+ [ c.cb.send_getheaders() for c in self.connections ]
+
+ # Send ping and wait for response -- synchronization hack
+ [ c.cb.send_ping(self.ping_counter) for c in self.connections ]
+ self.wait_for_pings(self.ping_counter)
+ self.ping_counter += 1
+
+ # Analogous to sync_block (see above)
+ def sync_transaction(self, txhash, num_events):
+ # Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
+ max_tries = 20*num_events
+ while max_tries > 0:
+ with mininode_lock:
+ results = [ txhash in c.cb.tx_request_map and
+ c.cb.tx_request_map[txhash] for c in self.connections ]
+ if False not in results:
+ break
+ time.sleep(0.05)
+ max_tries -= 1
+
+ # --> error if not requested
+ if max_tries == 0:
+ # print [ c.cb.tx_request_map for c in self.connections ]
+ raise AssertionError("Not all nodes requested transaction")
+ # --> Answer request (we did this inline!)
+
+ # Get the mempool
+ [ c.cb.send_mempool() for c in self.connections ]
+
+ # Send ping and wait for response -- synchronization hack
+ [ c.cb.send_ping(self.ping_counter) for c in self.connections ]
+ self.wait_for_pings(self.ping_counter)
+ self.ping_counter += 1
+
+ # Sort inv responses from each node
+ with mininode_lock:
+ [ c.cb.lastInv.sort() for c in self.connections ]
+
+ # Verify that the tip of each connection all agree with each other, and
+ # with the expected outcome (if given)
+ def check_results(self, blockhash, outcome):
+ with mininode_lock:
+ for c in self.connections:
+ if outcome is None:
+ if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
+ return False
+ elif ((c.cb.bestblockhash == blockhash) != outcome):
+ # print c.cb.bestblockhash, blockhash, outcome
+ return False
+ return True
+
+ # Either check that the mempools all agree with each other, or that
+ # txhash's presence in the mempool matches the outcome specified.
+ # This is somewhat of a strange comparison, in that we're either comparing
+ # a particular tx to an outcome, or the entire mempools altogether;
+ # perhaps it would be useful to add the ability to check explicitly that
+ # a particular tx's existence in the mempool is the same across all nodes.
+ def check_mempool(self, txhash, outcome):
+ with mininode_lock:
+ for c in self.connections:
+ if outcome is None:
+ # Make sure the mempools agree with each other
+ if c.cb.lastInv != self.connections[0].cb.lastInv:
+ # print c.rpc.getrawmempool()
+ return False
+ elif ((txhash in c.cb.lastInv) != outcome):
+ # print c.rpc.getrawmempool(), c.cb.lastInv
+ return False
+ return True
+
+ def run(self):
+ # Wait until verack is received
+ self.wait_for_verack()
+
+ test_number = 1
+ for test_instance in self.test_generator.get_tests():
+ # We use these variables to keep track of the last block
+ # and last transaction in the tests, which are used
+ # if we're not syncing on every block or every tx.
+ [ block, block_outcome ] = [ None, None ]
+ [ tx, tx_outcome ] = [ None, None ]
+ invqueue = []
+
+ for b_or_t, outcome in test_instance.blocks_and_transactions:
+ # Determine if we're dealing with a block or tx
+ if isinstance(b_or_t, CBlock): # Block test runner
+ block = b_or_t
+ block_outcome = outcome
+ # Add to shared block_store, set as current block
+ with mininode_lock:
+ self.block_store.add_block(block)
+ for c in self.connections:
+ c.cb.block_request_map[block.sha256] = False
+ # Either send inv's to each node and sync, or add
+ # to invqueue for later inv'ing.
+ if (test_instance.sync_every_block):
+ [ c.cb.send_inv(block) for c in self.connections ]
+ self.sync_blocks(block.sha256, 1)
+ if (not self.check_results(block.sha256, outcome)):
+ raise AssertionError("Test failed at test %d" % test_number)
+ else:
+ invqueue.append(CInv(2, block.sha256))
+ else: # Tx test runner
+ assert(isinstance(b_or_t, CTransaction))
+ tx = b_or_t
+ tx_outcome = outcome
+ # Add to shared tx store and clear map entry
+ with mininode_lock:
+ self.tx_store.add_transaction(tx)
+ for c in self.connections:
+ c.cb.tx_request_map[tx.sha256] = False
+ # Again, either inv to all nodes or save for later
+ if (test_instance.sync_every_tx):
+ [ c.cb.send_inv(tx) for c in self.connections ]
+ self.sync_transaction(tx.sha256, 1)
+ if (not self.check_mempool(tx.sha256, outcome)):
+ raise AssertionError("Test failed at test %d" % test_number)
+ else:
+ invqueue.append(CInv(1, tx.sha256))
+ # Ensure we're not overflowing the inv queue
+ if len(invqueue) == MAX_INV_SZ:
+ [ c.send_message(msg_inv(invqueue)) for c in self.connections ]
+ invqueue = []
+
+ # Do final sync if we weren't syncing on every block or every tx.
+ if (not test_instance.sync_every_block and block is not None):
+ if len(invqueue) > 0:
+ [ c.send_message(msg_inv(invqueue)) for c in self.connections ]
+ invqueue = []
+ self.sync_blocks(block.sha256,
+ len(test_instance.blocks_and_transactions))
+ if (not self.check_results(block.sha256, block_outcome)):
+ raise AssertionError("Block test failed at test %d" % test_number)
+ if (not test_instance.sync_every_tx and tx is not None):
+ if len(invqueue) > 0:
+ [ c.send_message(msg_inv(invqueue)) for c in self.connections ]
+ invqueue = []
+ self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
+ if (not self.check_mempool(tx.sha256, tx_outcome)):
+ raise AssertionError("Mempool test failed at test %d" % test_number)
+
+ print "Test %d: PASS" % test_number, [ c.rpc.getblockcount() for c in self.connections ]
+ test_number += 1
+
+ self.block_store.close()
+ self.tx_store.close()
+ [ c.disconnect_node() for c in self.connections ]
diff --git a/qa/rpc-tests/invalidblockrequest.py b/qa/rpc-tests/invalidblockrequest.py
new file mode 100755
index 0000000000..8b685ed9b2
--- /dev/null
+++ b/qa/rpc-tests/invalidblockrequest.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python2
+#
+# Distributed under the MIT/X11 software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+
+from test_framework import ComparisonTestFramework
+from util import *
+from comptool import TestManager, TestInstance
+from mininode import *
+from blocktools import *
+import logging
+import copy
+import time
+
+
+'''
+In this test we connect to one node over p2p, and test block requests:
+1) Valid blocks should be requested and become chain tip.
+2) Invalid block with duplicated transaction should be re-requested.
+3) Invalid block with bad coinbase value should be rejected and not
+re-requested.
+'''
+
+# Use the ComparisonTestFramework with 1 node: only use --testbinary.
+class InvalidBlockRequestTest(ComparisonTestFramework):
+
+ ''' Can either run this test as 1 node with expected answers, or two and compare them.
+ Change the "outcome" variable from each TestInstance object to only do the comparison. '''
+ def __init__(self):
+ self.num_nodes = 1
+
+ def run_test(self):
+ test = TestManager(self, self.options.tmpdir)
+ test.add_all_connections(self.nodes)
+ self.tip = None
+ self.block_time = None
+ NetworkThread().start() # Start up network handling in another thread
+ test.run()
+
+ def get_tests(self):
+ if self.tip is None:
+ self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
+ self.block_time = int(time.time())+1
+
+ '''
+ Create a new block with an anyone-can-spend coinbase
+ '''
+ block = create_block(self.tip, create_coinbase(), self.block_time)
+ self.block_time += 1
+ block.solve()
+ # Save the coinbase for later
+ self.block1 = block
+ self.tip = block.sha256
+ yield TestInstance([[block, True]])
+
+ '''
+ Now we need that block to mature so we can spend the coinbase.
+ '''
+ test = TestInstance(sync_every_block=False)
+ for i in xrange(100):
+ block = create_block(self.tip, create_coinbase(), self.block_time)
+ block.solve()
+ self.tip = block.sha256
+ self.block_time += 1
+ test.blocks_and_transactions.append([block, True])
+ yield test
+
+ '''
+ Now we use merkle-root malleability to generate an invalid block with
+ same blockheader.
+ Manufacture a block with 3 transactions (coinbase, spend of prior
+ coinbase, spend of that spend). Duplicate the 3rd transaction to
+ leave merkle root and blockheader unchanged but invalidate the block.
+ '''
+ block2 = create_block(self.tip, create_coinbase(), self.block_time)
+ self.block_time += 1
+
+ # chr(81) is OP_TRUE
+ tx1 = create_transaction(self.block1.vtx[0], 0, chr(81), 50*100000000)
+ tx2 = create_transaction(tx1, 0, chr(81), 50*100000000)
+
+ block2.vtx.extend([tx1, tx2])
+ block2.hashMerkleRoot = block2.calc_merkle_root()
+ block2.rehash()
+ block2.solve()
+ orig_hash = block2.sha256
+ block2_orig = copy.deepcopy(block2)
+
+ # Mutate block 2
+ block2.vtx.append(tx2)
+ assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root())
+ assert_equal(orig_hash, block2.rehash())
+ assert(block2_orig.vtx != block2.vtx)
+
+ self.tip = block2.sha256
+ yield TestInstance([[block2, False], [block2_orig, True]])
+
+ '''
+ Make sure that a totally screwed up block is not valid.
+ '''
+ block3 = create_block(self.tip, create_coinbase(), self.block_time)
+ self.block_time += 1
+ block3.vtx[0].vout[0].nValue = 100*100000000 # Too high!
+ block3.vtx[0].sha256=None
+ block3.vtx[0].calc_sha256()
+ block3.hashMerkleRoot = block3.calc_merkle_root()
+ block3.rehash()
+ block3.solve()
+
+ yield TestInstance([[block3, False]])
+
+
+if __name__ == '__main__':
+ InvalidBlockRequestTest().main()
diff --git a/qa/rpc-tests/maxblocksinflight.py b/qa/rpc-tests/maxblocksinflight.py
new file mode 100755
index 0000000000..87c80cd97e
--- /dev/null
+++ b/qa/rpc-tests/maxblocksinflight.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python2
+#
+# Distributed under the MIT/X11 software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+
+from mininode import *
+from test_framework import BitcoinTestFramework
+from util import *
+import logging
+
+'''
+In this test we connect to one node over p2p, send it numerous inv's, and
+compare the resulting number of getdata requests to a max allowed value. We
+test for exceeding 128 blocks in flight, which was the limit an 0.9 client will
+reach. [0.10 clients shouldn't request more than 16 from a single peer.]
+'''
+MAX_REQUESTS = 128
+
+class TestManager(NodeConnCB):
+ # set up NodeConnCB callbacks, overriding base class
+ def on_getdata(self, conn, message):
+ self.log.debug("got getdata %s" % repr(message))
+ # Log the requests
+ for inv in message.inv:
+ if inv.hash not in self.blockReqCounts:
+ self.blockReqCounts[inv.hash] = 0
+ self.blockReqCounts[inv.hash] += 1
+
+ def on_close(self, conn):
+ if not self.disconnectOkay:
+ raise EarlyDisconnectError(0)
+
+ def __init__(self):
+ NodeConnCB.__init__(self)
+ self.log = logging.getLogger("BlockRelayTest")
+ self.create_callback_map()
+
+ def add_new_connection(self, connection):
+ self.connection = connection
+ self.blockReqCounts = {}
+ self.disconnectOkay = False
+
+ def run(self):
+ try:
+ fail = False
+ self.connection.rpc.generate(1) # Leave IBD
+
+ numBlocksToGenerate = [ 8, 16, 128, 1024 ]
+ for count in range(len(numBlocksToGenerate)):
+ current_invs = []
+ for i in range(numBlocksToGenerate[count]):
+ current_invs.append(CInv(2, random.randrange(0, 1<<256)))
+ if len(current_invs) >= 50000:
+ self.connection.send_message(msg_inv(current_invs))
+ current_invs = []
+ if len(current_invs) > 0:
+ self.connection.send_message(msg_inv(current_invs))
+
+ # Wait and see how many blocks were requested
+ time.sleep(2)
+
+ total_requests = 0
+ with mininode_lock:
+ for key in self.blockReqCounts:
+ total_requests += self.blockReqCounts[key]
+ if self.blockReqCounts[key] > 1:
+ raise AssertionError("Error, test failed: block %064x requested more than once" % key)
+ if total_requests > MAX_REQUESTS:
+ raise AssertionError("Error, too many blocks (%d) requested" % total_requests)
+ print "Round %d: success (total requests: %d)" % (count, total_requests)
+ except AssertionError as e:
+ print "TEST FAILED: ", e.args
+
+ self.disconnectOkay = True
+ self.connection.disconnect_node()
+
+
+class MaxBlocksInFlightTest(BitcoinTestFramework):
+ def add_options(self, parser):
+ parser.add_option("--testbinary", dest="testbinary",
+ default=os.getenv("BITCOIND", "bitcoind"),
+ help="Binary to test max block requests behavior")
+
+ def setup_chain(self):
+ print "Initializing test directory "+self.options.tmpdir
+ initialize_chain_clean(self.options.tmpdir, 1)
+
+ def setup_network(self):
+ self.nodes = start_nodes(1, self.options.tmpdir,
+ extra_args=[['-debug', '-whitelist=127.0.0.1']],
+ binary=[self.options.testbinary])
+
+ def run_test(self):
+ test = TestManager()
+ test.add_new_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test))
+ NetworkThread().start() # Start up network handling in another thread
+ test.run()
+
+if __name__ == '__main__':
+ MaxBlocksInFlightTest().main()
diff --git a/qa/rpc-tests/mininode.py b/qa/rpc-tests/mininode.py
new file mode 100755
index 0000000000..b7d78e74fa
--- /dev/null
+++ b/qa/rpc-tests/mininode.py
@@ -0,0 +1,1256 @@
+# mininode.py - Bitcoin P2P network half-a-node
+#
+# Distributed under the MIT/X11 software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+# This python code was modified from ArtForz' public domain half-a-node, as
+# found in the mini-node branch of http://github.com/jgarzik/pynode.
+#
+# NodeConn: an object which manages p2p connectivity to a bitcoin node
+# NodeConnCB: a base class that describes the interface for receiving
+# callbacks with network messages from a NodeConn
+# CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
+# data structures that should map to corresponding structures in
+# bitcoin/primitives
+# msg_block, msg_tx, msg_headers, etc.:
+# data structures that represent network messages
+# ser_*, deser_*: functions that handle serialization/deserialization
+
+
+import struct
+import socket
+import asyncore
+import binascii
+import time
+import sys
+import random
+import cStringIO
+import hashlib
+from threading import RLock
+from threading import Thread
+import logging
+import copy
+
+BIP0031_VERSION = 60000
+MY_VERSION = 60001 # past bip-31 for ping/pong
+MY_SUBVERSION = "/python-mininode-tester:0.0.1/"
+
+MAX_INV_SZ = 50000
+
+# Keep our own socket map for asyncore, so that we can track disconnects
+# ourselves (to workaround an issue with closing an asyncore socket when
+# using select)
+mininode_socket_map = dict()
+
+# One lock for synchronizing all data access between the networking thread (see
+# NetworkThread below) and the thread running the test logic. For simplicity,
+# NodeConn acquires this lock whenever delivering a message to to a NodeConnCB,
+# and whenever adding anything to the send buffer (in send_message()). This
+# lock should be acquired in the thread running the test logic to synchronize
+# access to any data shared with the NodeConnCB or NodeConn.
+mininode_lock = RLock()
+
+# Serialization/deserialization tools
+def sha256(s):
+ return hashlib.new('sha256', s).digest()
+
+
+def hash256(s):
+ return sha256(sha256(s))
+
+
+def deser_string(f):
+ nit = struct.unpack("<B", f.read(1))[0]
+ if nit == 253:
+ nit = struct.unpack("<H", f.read(2))[0]
+ elif nit == 254:
+ nit = struct.unpack("<I", f.read(4))[0]
+ elif nit == 255:
+ nit = struct.unpack("<Q", f.read(8))[0]
+ return f.read(nit)
+
+
+def ser_string(s):
+ if len(s) < 253:
+ return chr(len(s)) + s
+ elif len(s) < 0x10000:
+ return chr(253) + struct.pack("<H", len(s)) + s
+ elif len(s) < 0x100000000L:
+ return chr(254) + struct.pack("<I", len(s)) + s
+ return chr(255) + struct.pack("<Q", len(s)) + s
+
+
+def deser_uint256(f):
+ r = 0L
+ for i in xrange(8):
+ t = struct.unpack("<I", f.read(4))[0]
+ r += t << (i * 32)
+ return r
+
+
+def ser_uint256(u):
+ rs = ""
+ for i in xrange(8):
+ rs += struct.pack("<I", u & 0xFFFFFFFFL)
+ u >>= 32
+ return rs
+
+
+def uint256_from_str(s):
+ r = 0L
+ t = struct.unpack("<IIIIIIII", s[:32])
+ for i in xrange(8):
+ r += t[i] << (i * 32)
+ return r
+
+
+def uint256_from_compact(c):
+ nbytes = (c >> 24) & 0xFF
+ v = (c & 0xFFFFFFL) << (8 * (nbytes - 3))
+ return v
+
+
+def deser_vector(f, c):
+ nit = struct.unpack("<B", f.read(1))[0]
+ if nit == 253:
+ nit = struct.unpack("<H", f.read(2))[0]
+ elif nit == 254:
+ nit = struct.unpack("<I", f.read(4))[0]
+ elif nit == 255:
+ nit = struct.unpack("<Q", f.read(8))[0]
+ r = []
+ for i in xrange(nit):
+ t = c()
+ t.deserialize(f)
+ r.append(t)
+ return r
+
+
+def ser_vector(l):
+ r = ""
+ if len(l) < 253:
+ r = chr(len(l))
+ elif len(l) < 0x10000:
+ r = chr(253) + struct.pack("<H", len(l))
+ elif len(l) < 0x100000000L:
+ r = chr(254) + struct.pack("<I", len(l))
+ else:
+ r = chr(255) + struct.pack("<Q", len(l))
+ for i in l:
+ r += i.serialize()
+ return r
+
+
+def deser_uint256_vector(f):
+ nit = struct.unpack("<B", f.read(1))[0]
+ if nit == 253:
+ nit = struct.unpack("<H", f.read(2))[0]
+ elif nit == 254:
+ nit = struct.unpack("<I", f.read(4))[0]
+ elif nit == 255:
+ nit = struct.unpack("<Q", f.read(8))[0]
+ r = []
+ for i in xrange(nit):
+ t = deser_uint256(f)
+ r.append(t)
+ return r
+
+
+def ser_uint256_vector(l):
+ r = ""
+ if len(l) < 253:
+ r = chr(len(l))
+ elif len(l) < 0x10000:
+ r = chr(253) + struct.pack("<H", len(l))
+ elif len(l) < 0x100000000L:
+ r = chr(254) + struct.pack("<I", len(l))
+ else:
+ r = chr(255) + struct.pack("<Q", len(l))
+ for i in l:
+ r += ser_uint256(i)
+ return r
+
+
+def deser_string_vector(f):
+ nit = struct.unpack("<B", f.read(1))[0]
+ if nit == 253:
+ nit = struct.unpack("<H", f.read(2))[0]
+ elif nit == 254:
+ nit = struct.unpack("<I", f.read(4))[0]
+ elif nit == 255:
+ nit = struct.unpack("<Q", f.read(8))[0]
+ r = []
+ for i in xrange(nit):
+ t = deser_string(f)
+ r.append(t)
+ return r
+
+
+def ser_string_vector(l):
+ r = ""
+ if len(l) < 253:
+ r = chr(len(l))
+ elif len(l) < 0x10000:
+ r = chr(253) + struct.pack("<H", len(l))
+ elif len(l) < 0x100000000L:
+ r = chr(254) + struct.pack("<I", len(l))
+ else:
+ r = chr(255) + struct.pack("<Q", len(l))
+ for sv in l:
+ r += ser_string(sv)
+ return r
+
+
+def deser_int_vector(f):
+ nit = struct.unpack("<B", f.read(1))[0]
+ if nit == 253:
+ nit = struct.unpack("<H", f.read(2))[0]
+ elif nit == 254:
+ nit = struct.unpack("<I", f.read(4))[0]
+ elif nit == 255:
+ nit = struct.unpack("<Q", f.read(8))[0]
+ r = []
+ for i in xrange(nit):
+ t = struct.unpack("<i", f.read(4))[0]
+ r.append(t)
+ return r
+
+
+def ser_int_vector(l):
+ r = ""
+ if len(l) < 253:
+ r = chr(len(l))
+ elif len(l) < 0x10000:
+ r = chr(253) + struct.pack("<H", len(l))
+ elif len(l) < 0x100000000L:
+ r = chr(254) + struct.pack("<I", len(l))
+ else:
+ r = chr(255) + struct.pack("<Q", len(l))
+ for i in l:
+ r += struct.pack("<i", i)
+ return r
+
+
+# Objects that map to bitcoind objects, which can be serialized/deserialized
+
+class CAddress(object):
+ def __init__(self):
+ self.nServices = 1
+ self.pchReserved = "\x00" * 10 + "\xff" * 2
+ self.ip = "0.0.0.0"
+ self.port = 0
+
+ def deserialize(self, f):
+ self.nServices = struct.unpack("<Q", f.read(8))[0]
+ self.pchReserved = f.read(12)
+ self.ip = socket.inet_ntoa(f.read(4))
+ self.port = struct.unpack(">H", f.read(2))[0]
+
+ def serialize(self):
+ r = ""
+ r += struct.pack("<Q", self.nServices)
+ r += self.pchReserved
+ r += socket.inet_aton(self.ip)
+ r += struct.pack(">H", self.port)
+ return r
+
+ def __repr__(self):
+ return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
+ self.ip, self.port)
+
+
+class CInv(object):
+ typemap = {
+ 0: "Error",
+ 1: "TX",
+ 2: "Block"}
+
+ def __init__(self, t=0, h=0L):
+ self.type = t
+ self.hash = h
+
+ def deserialize(self, f):
+ self.type = struct.unpack("<i", f.read(4))[0]
+ self.hash = deser_uint256(f)
+
+ def serialize(self):
+ r = ""
+ r += struct.pack("<i", self.type)
+ r += ser_uint256(self.hash)
+ return r
+
+ def __repr__(self):
+ return "CInv(type=%s hash=%064x)" \
+ % (self.typemap[self.type], self.hash)
+
+
+class CBlockLocator(object):
+ def __init__(self):
+ self.nVersion = MY_VERSION
+ self.vHave = []
+
+ def deserialize(self, f):
+ self.nVersion = struct.unpack("<i", f.read(4))[0]
+ self.vHave = deser_uint256_vector(f)
+
+ def serialize(self):
+ r = ""
+ r += struct.pack("<i", self.nVersion)
+ r += ser_uint256_vector(self.vHave)
+ return r
+
+ def __repr__(self):
+ return "CBlockLocator(nVersion=%i vHave=%s)" \
+ % (self.nVersion, repr(self.vHave))
+
+
+class COutPoint(object):
+ def __init__(self, hash=0, n=0):
+ self.hash = hash
+ self.n = n
+
+ def deserialize(self, f):
+ self.hash = deser_uint256(f)
+ self.n = struct.unpack("<I", f.read(4))[0]
+
+ def serialize(self):
+ r = ""
+ r += ser_uint256(self.hash)
+ r += struct.pack("<I", self.n)
+ return r
+
+ def __repr__(self):
+ return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
+
+
+class CTxIn(object):
+ def __init__(self, outpoint=None, scriptSig="", nSequence=0):
+ if outpoint is None:
+ self.prevout = COutPoint()
+ else:
+ self.prevout = outpoint
+ self.scriptSig = scriptSig
+ self.nSequence = nSequence
+
+ def deserialize(self, f):
+ self.prevout = COutPoint()
+ self.prevout.deserialize(f)
+ self.scriptSig = deser_string(f)
+ self.nSequence = struct.unpack("<I", f.read(4))[0]
+
+ def serialize(self):
+ r = ""
+ r += self.prevout.serialize()
+ r += ser_string(self.scriptSig)
+ r += struct.pack("<I", self.nSequence)
+ return r
+
+ def __repr__(self):
+ return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
+ % (repr(self.prevout), binascii.hexlify(self.scriptSig),
+ self.nSequence)
+
+
+class CTxOut(object):
+ def __init__(self, nValue=0, scriptPubKey=""):
+ self.nValue = nValue
+ self.scriptPubKey = scriptPubKey
+
+ def deserialize(self, f):
+ self.nValue = struct.unpack("<q", f.read(8))[0]
+ self.scriptPubKey = deser_string(f)
+
+ def serialize(self):
+ r = ""
+ r += struct.pack("<q", self.nValue)
+ r += ser_string(self.scriptPubKey)
+ return r
+
+ def __repr__(self):
+ return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
+ % (self.nValue // 100000000, self.nValue % 100000000,
+ binascii.hexlify(self.scriptPubKey))
+
+
+class CTransaction(object):
+ def __init__(self, tx=None):
+ if tx is None:
+ self.nVersion = 1
+ self.vin = []
+ self.vout = []
+ self.nLockTime = 0
+ self.sha256 = None
+ self.hash = None
+ else:
+ self.nVersion = tx.nVersion
+ self.vin = copy.deepcopy(tx.vin)
+ self.vout = copy.deepcopy(tx.vout)
+ self.nLockTime = tx.nLockTime
+ self.sha256 = None
+ self.hash = None
+
+ def deserialize(self, f):
+ self.nVersion = struct.unpack("<i", f.read(4))[0]
+ self.vin = deser_vector(f, CTxIn)
+ self.vout = deser_vector(f, CTxOut)
+ self.nLockTime = struct.unpack("<I", f.read(4))[0]
+ self.sha256 = None
+ self.hash = None
+
+ def serialize(self):
+ r = ""
+ r += struct.pack("<i", self.nVersion)
+ r += ser_vector(self.vin)
+ r += ser_vector(self.vout)
+ r += struct.pack("<I", self.nLockTime)
+ return r
+
+ def rehash(self):
+ self.sha256 = None
+ self.calc_sha256()
+
+ def calc_sha256(self):
+ if self.sha256 is None:
+ self.sha256 = uint256_from_str(hash256(self.serialize()))
+ self.hash = hash256(self.serialize())[::-1].encode('hex_codec')
+
+ def is_valid(self):
+ self.calc_sha256()
+ for tout in self.vout:
+ if tout.nValue < 0 or tout.nValue > 21000000L * 100000000L:
+ return False
+ return True
+
+ def __repr__(self):
+ return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \
+ % (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
+
+
+class CBlockHeader(object):
+ def __init__(self, header=None):
+ if header is None:
+ self.set_null()
+ else:
+ self.nVersion = header.nVersion
+ self.hashPrevBlock = header.hashPrevBlock
+ self.hashMerkleRoot = header.hashMerkleRoot
+ self.nTime = header.nTime
+ self.nBits = header.nBits
+ self.nNonce = header.nNonce
+ self.sha256 = header.sha256
+ self.hash = header.hash
+ self.calc_sha256()
+
+ def set_null(self):
+ self.nVersion = 1
+ self.hashPrevBlock = 0
+ self.hashMerkleRoot = 0
+ self.nTime = 0
+ self.nBits = 0
+ self.nNonce = 0
+ self.sha256 = None
+ self.hash = None
+
+ def deserialize(self, f):
+ self.nVersion = struct.unpack("<i", f.read(4))[0]
+ self.hashPrevBlock = deser_uint256(f)
+ self.hashMerkleRoot = deser_uint256(f)
+ self.nTime = struct.unpack("<I", f.read(4))[0]
+ self.nBits = struct.unpack("<I", f.read(4))[0]
+ self.nNonce = struct.unpack("<I", f.read(4))[0]
+ self.sha256 = None
+ self.hash = None
+
+ def serialize(self):
+ r = ""
+ r += struct.pack("<i", self.nVersion)
+ r += ser_uint256(self.hashPrevBlock)
+ r += ser_uint256(self.hashMerkleRoot)
+ r += struct.pack("<I", self.nTime)
+ r += struct.pack("<I", self.nBits)
+ r += struct.pack("<I", self.nNonce)
+ return r
+
+ def calc_sha256(self):
+ if self.sha256 is None:
+ r = ""
+ r += struct.pack("<i", self.nVersion)
+ r += ser_uint256(self.hashPrevBlock)
+ r += ser_uint256(self.hashMerkleRoot)
+ r += struct.pack("<I", self.nTime)
+ r += struct.pack("<I", self.nBits)
+ r += struct.pack("<I", self.nNonce)
+ self.sha256 = uint256_from_str(hash256(r))
+ self.hash = hash256(r)[::-1].encode('hex_codec')
+
+ def rehash(self):
+ self.sha256 = None
+ self.calc_sha256()
+ return self.sha256
+
+ def __repr__(self):
+ return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
+ % (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
+ time.ctime(self.nTime), self.nBits, self.nNonce)
+
+
+class CBlock(CBlockHeader):
+ def __init__(self, header=None):
+ super(CBlock, self).__init__(header)
+ self.vtx = []
+
+ def deserialize(self, f):
+ super(CBlock, self).deserialize(f)
+ self.vtx = deser_vector(f, CTransaction)
+
+ def serialize(self):
+ r = ""
+ r += super(CBlock, self).serialize()
+ r += ser_vector(self.vtx)
+ return r
+
+ def calc_merkle_root(self):
+ hashes = []
+ for tx in self.vtx:
+ tx.calc_sha256()
+ hashes.append(ser_uint256(tx.sha256))
+ while len(hashes) > 1:
+ newhashes = []
+ for i in xrange(0, len(hashes), 2):
+ i2 = min(i+1, len(hashes)-1)
+ newhashes.append(hash256(hashes[i] + hashes[i2]))
+ hashes = newhashes
+ return uint256_from_str(hashes[0])
+
+ def is_valid(self):
+ self.calc_sha256()
+ target = uint256_from_compact(self.nBits)
+ if self.sha256 > target:
+ return False
+ for tx in self.vtx:
+ if not tx.is_valid():
+ return False
+ if self.calc_merkle_root() != self.hashMerkleRoot:
+ return False
+ return True
+
+ def solve(self):
+ self.calc_sha256()
+ target = uint256_from_compact(self.nBits)
+ while self.sha256 > target:
+ self.nNonce += 1
+ self.rehash()
+
+ def __repr__(self):
+ return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
+ % (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
+ time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
+
+
+class CUnsignedAlert(object):
+ def __init__(self):
+ self.nVersion = 1
+ self.nRelayUntil = 0
+ self.nExpiration = 0
+ self.nID = 0
+ self.nCancel = 0
+ self.setCancel = []
+ self.nMinVer = 0
+ self.nMaxVer = 0
+ self.setSubVer = []
+ self.nPriority = 0
+ self.strComment = ""
+ self.strStatusBar = ""
+ self.strReserved = ""
+
+ def deserialize(self, f):
+ self.nVersion = struct.unpack("<i", f.read(4))[0]
+ self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
+ self.nExpiration = struct.unpack("<q", f.read(8))[0]
+ self.nID = struct.unpack("<i", f.read(4))[0]
+ self.nCancel = struct.unpack("<i", f.read(4))[0]
+ self.setCancel = deser_int_vector(f)
+ self.nMinVer = struct.unpack("<i", f.read(4))[0]
+ self.nMaxVer = struct.unpack("<i", f.read(4))[0]
+ self.setSubVer = deser_string_vector(f)
+ self.nPriority = struct.unpack("<i", f.read(4))[0]
+ self.strComment = deser_string(f)
+ self.strStatusBar = deser_string(f)
+ self.strReserved = deser_string(f)
+
+ def serialize(self):
+ r = ""
+ r += struct.pack("<i", self.nVersion)
+ r += struct.pack("<q", self.nRelayUntil)
+ r += struct.pack("<q", self.nExpiration)
+ r += struct.pack("<i", self.nID)
+ r += struct.pack("<i", self.nCancel)
+ r += ser_int_vector(self.setCancel)
+ r += struct.pack("<i", self.nMinVer)
+ r += struct.pack("<i", self.nMaxVer)
+ r += ser_string_vector(self.setSubVer)
+ r += struct.pack("<i", self.nPriority)
+ r += ser_string(self.strComment)
+ r += ser_string(self.strStatusBar)
+ r += ser_string(self.strReserved)
+ return r
+
+ def __repr__(self):
+ return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
+ % (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
+ self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
+ self.strComment, self.strStatusBar, self.strReserved)
+
+
+class CAlert(object):
+ def __init__(self):
+ self.vchMsg = ""
+ self.vchSig = ""
+
+ def deserialize(self, f):
+ self.vchMsg = deser_string(f)
+ self.vchSig = deser_string(f)
+
+ def serialize(self):
+ r = ""
+ r += ser_string(self.vchMsg)
+ r += ser_string(self.vchSig)
+ return r
+
+ def __repr__(self):
+ return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
+ % (len(self.vchMsg), len(self.vchSig))
+
+
+# Objects that correspond to messages on the wire
+class msg_version(object):
+ command = "version"
+
+ def __init__(self):
+ self.nVersion = MY_VERSION
+ self.nServices = 1
+ self.nTime = time.time()
+ self.addrTo = CAddress()
+ self.addrFrom = CAddress()
+ self.nNonce = random.getrandbits(64)
+ self.strSubVer = MY_SUBVERSION
+ self.nStartingHeight = -1
+
+ def deserialize(self, f):
+ self.nVersion = struct.unpack("<i", f.read(4))[0]
+ if self.nVersion == 10300:
+ self.nVersion = 300
+ self.nServices = struct.unpack("<Q", f.read(8))[0]
+ self.nTime = struct.unpack("<q", f.read(8))[0]
+ self.addrTo = CAddress()
+ self.addrTo.deserialize(f)
+ if self.nVersion >= 106:
+ self.addrFrom = CAddress()
+ self.addrFrom.deserialize(f)
+ self.nNonce = struct.unpack("<Q", f.read(8))[0]
+ self.strSubVer = deser_string(f)
+ if self.nVersion >= 209:
+ self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
+ else:
+ self.nStartingHeight = None
+ else:
+ self.addrFrom = None
+ self.nNonce = None
+ self.strSubVer = None
+ self.nStartingHeight = None
+
+ def serialize(self):
+ r = ""
+ r += struct.pack("<i", self.nVersion)
+ r += struct.pack("<Q", self.nServices)
+ r += struct.pack("<q", self.nTime)
+ r += self.addrTo.serialize()
+ r += self.addrFrom.serialize()
+ r += struct.pack("<Q", self.nNonce)
+ r += ser_string(self.strSubVer)
+ r += struct.pack("<i", self.nStartingHeight)
+ return r
+
+ def __repr__(self):
+ return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i)' \
+ % (self.nVersion, self.nServices, time.ctime(self.nTime),
+ repr(self.addrTo), repr(self.addrFrom), self.nNonce,
+ self.strSubVer, self.nStartingHeight)
+
+
+class msg_verack(object):
+ command = "verack"
+
+ def __init__(self):
+ pass
+
+ def deserialize(self, f):
+ pass
+
+ def serialize(self):
+ return ""
+
+ def __repr__(self):
+ return "msg_verack()"
+
+
+class msg_addr(object):
+ command = "addr"
+
+ def __init__(self):
+ self.addrs = []
+
+ def deserialize(self, f):
+ self.addrs = deser_vector(f, CAddress)
+
+ def serialize(self):
+ return ser_vector(self.addrs)
+
+ def __repr__(self):
+ return "msg_addr(addrs=%s)" % (repr(self.addrs))
+
+
+class msg_alert(object):
+ command = "alert"
+
+ def __init__(self):
+ self.alert = CAlert()
+
+ def deserialize(self, f):
+ self.alert = CAlert()
+ self.alert.deserialize(f)
+
+ def serialize(self):
+ r = ""
+ r += self.alert.serialize()
+ return r
+
+ def __repr__(self):
+ return "msg_alert(alert=%s)" % (repr(self.alert), )
+
+
+class msg_inv(object):
+ command = "inv"
+
+ def __init__(self, inv=None):
+ if inv is None:
+ self.inv = []
+ else:
+ self.inv = inv
+
+ def deserialize(self, f):
+ self.inv = deser_vector(f, CInv)
+
+ def serialize(self):
+ return ser_vector(self.inv)
+
+ def __repr__(self):
+ return "msg_inv(inv=%s)" % (repr(self.inv))
+
+
+class msg_getdata(object):
+ command = "getdata"
+
+ def __init__(self):
+ self.inv = []
+
+ def deserialize(self, f):
+ self.inv = deser_vector(f, CInv)
+
+ def serialize(self):
+ return ser_vector(self.inv)
+
+ def __repr__(self):
+ return "msg_getdata(inv=%s)" % (repr(self.inv))
+
+
+class msg_getblocks(object):
+ command = "getblocks"
+
+ def __init__(self):
+ self.locator = CBlockLocator()
+ self.hashstop = 0L
+
+ def deserialize(self, f):
+ self.locator = CBlockLocator()
+ self.locator.deserialize(f)
+ self.hashstop = deser_uint256(f)
+
+ def serialize(self):
+ r = ""
+ r += self.locator.serialize()
+ r += ser_uint256(self.hashstop)
+ return r
+
+ def __repr__(self):
+ return "msg_getblocks(locator=%s hashstop=%064x)" \
+ % (repr(self.locator), self.hashstop)
+
+
+class msg_tx(object):
+ command = "tx"
+
+ def __init__(self, tx=CTransaction()):
+ self.tx = tx
+
+ def deserialize(self, f):
+ self.tx.deserialize(f)
+
+ def serialize(self):
+ return self.tx.serialize()
+
+ def __repr__(self):
+ return "msg_tx(tx=%s)" % (repr(self.tx))
+
+
+class msg_block(object):
+ command = "block"
+
+ def __init__(self, block=None):
+ if block is None:
+ self.block = CBlock()
+ else:
+ self.block = block
+
+ def deserialize(self, f):
+ self.block.deserialize(f)
+
+ def serialize(self):
+ return self.block.serialize()
+
+ def __repr__(self):
+ return "msg_block(block=%s)" % (repr(self.block))
+
+
+class msg_getaddr(object):
+ command = "getaddr"
+
+ def __init__(self):
+ pass
+
+ def deserialize(self, f):
+ pass
+
+ def serialize(self):
+ return ""
+
+ def __repr__(self):
+ return "msg_getaddr()"
+
+
+class msg_ping_prebip31(object):
+ command = "ping"
+
+ def __init__(self):
+ pass
+
+ def deserialize(self, f):
+ pass
+
+ def serialize(self):
+ return ""
+
+ def __repr__(self):
+ return "msg_ping() (pre-bip31)"
+
+
+class msg_ping(object):
+ command = "ping"
+
+ def __init__(self, nonce=0L):
+ self.nonce = nonce
+
+ def deserialize(self, f):
+ self.nonce = struct.unpack("<Q", f.read(8))[0]
+
+ def serialize(self):
+ r = ""
+ r += struct.pack("<Q", self.nonce)
+ return r
+
+ def __repr__(self):
+ return "msg_ping(nonce=%08x)" % self.nonce
+
+
+class msg_pong(object):
+ command = "pong"
+
+ def __init__(self, nonce=0L):
+ self.nonce = nonce
+
+ def deserialize(self, f):
+ self.nonce = struct.unpack("<Q", f.read(8))[0]
+
+ def serialize(self):
+ r = ""
+ r += struct.pack("<Q", self.nonce)
+ return r
+
+ def __repr__(self):
+ return "msg_pong(nonce=%08x)" % self.nonce
+
+
+class msg_mempool(object):
+ command = "mempool"
+
+ def __init__(self):
+ pass
+
+ def deserialize(self, f):
+ pass
+
+ def serialize(self):
+ return ""
+
+ def __repr__(self):
+ return "msg_mempool()"
+
+
+# getheaders message has
+# number of entries
+# vector of hashes
+# hash_stop (hash of last desired block header, 0 to get as many as possible)
+class msg_getheaders(object):
+ command = "getheaders"
+
+ def __init__(self):
+ self.locator = CBlockLocator()
+ self.hashstop = 0L
+
+ def deserialize(self, f):
+ self.locator = CBlockLocator()
+ self.locator.deserialize(f)
+ self.hashstop = deser_uint256(f)
+
+ def serialize(self):
+ r = ""
+ r += self.locator.serialize()
+ r += ser_uint256(self.hashstop)
+ return r
+
+ def __repr__(self):
+ return "msg_getheaders(locator=%s, stop=%064x)" \
+ % (repr(self.locator), self.hashstop)
+
+
+# headers message has
+# <count> <vector of block headers>
+class msg_headers(object):
+ command = "headers"
+
+ def __init__(self):
+ self.headers = []
+
+ def deserialize(self, f):
+ # comment in bitcoind indicates these should be deserialized as blocks
+ blocks = deser_vector(f, CBlock)
+ for x in blocks:
+ self.headers.append(CBlockHeader(x))
+
+ def serialize(self):
+ blocks = [CBlock(x) for x in self.headers]
+ return ser_vector(blocks)
+
+ def __repr__(self):
+ return "msg_headers(headers=%s)" % repr(self.headers)
+
+
+class msg_reject(object):
+ command = "reject"
+
+ def __init__(self):
+ self.message = ""
+ self.code = ""
+ self.reason = ""
+ self.data = 0L
+
+ def deserialize(self, f):
+ self.message = deser_string(f)
+ self.code = struct.unpack("<B", f.read(1))[0]
+ self.reason = deser_string(f)
+ if (self.message == "block" or self.message == "tx"):
+ self.data = deser_uint256(f)
+
+ def serialize(self):
+ r = ser_string(self.message)
+ r += struct.pack("<B", self.code)
+ r += ser_string(self.reason)
+ if (self.message == "block" or self.message == "tx"):
+ r += ser_uint256(self.data)
+ return r
+
+ def __repr__(self):
+ return "msg_reject: %s %d %s [%064x]" \
+ % (self.message, self.code, self.reason, self.data)
+
+
+# This is what a callback should look like for NodeConn
+# Reimplement the on_* functions to provide handling for events
+class NodeConnCB(object):
+ def __init__(self):
+ self.verack_received = False
+
+ # Derived classes should call this function once to set the message map
+ # which associates the derived classes' functions to incoming messages
+ def create_callback_map(self):
+ self.cbmap = {
+ "version": self.on_version,
+ "verack": self.on_verack,
+ "addr": self.on_addr,
+ "alert": self.on_alert,
+ "inv": self.on_inv,
+ "getdata": self.on_getdata,
+ "getblocks": self.on_getblocks,
+ "tx": self.on_tx,
+ "block": self.on_block,
+ "getaddr": self.on_getaddr,
+ "ping": self.on_ping,
+ "pong": self.on_pong,
+ "headers": self.on_headers,
+ "getheaders": self.on_getheaders,
+ "reject": self.on_reject,
+ "mempool": self.on_mempool
+ }
+
+ def deliver(self, conn, message):
+ with mininode_lock:
+ try:
+ self.cbmap[message.command](conn, message)
+ except:
+ print "ERROR delivering %s (%s)" % (repr(message),
+ sys.exc_info()[0])
+
+ def on_version(self, conn, message):
+ if message.nVersion >= 209:
+ conn.send_message(msg_verack())
+ conn.ver_send = min(MY_VERSION, message.nVersion)
+ if message.nVersion < 209:
+ conn.ver_recv = conn.ver_send
+
+ def on_verack(self, conn, message):
+ conn.ver_recv = conn.ver_send
+ self.verack_received = True
+
+ def on_inv(self, conn, message):
+ want = msg_getdata()
+ for i in message.inv:
+ if i.type != 0:
+ want.inv.append(i)
+ if len(want.inv):
+ conn.send_message(want)
+
+ def on_addr(self, conn, message): pass
+ def on_alert(self, conn, message): pass
+ def on_getdata(self, conn, message): pass
+ def on_getblocks(self, conn, message): pass
+ def on_tx(self, conn, message): pass
+ def on_block(self, conn, message): pass
+ def on_getaddr(self, conn, message): pass
+ def on_headers(self, conn, message): pass
+ def on_getheaders(self, conn, message): pass
+ def on_ping(self, conn, message):
+ if conn.ver_send > BIP0031_VERSION:
+ conn.send_message(msg_pong(message.nonce))
+ def on_reject(self, conn, message): pass
+ def on_close(self, conn): pass
+ def on_mempool(self, conn): pass
+ def on_pong(self, conn, message): pass
+
+
+# The actual NodeConn class
+# This class provides an interface for a p2p connection to a specified node
+class NodeConn(asyncore.dispatcher):
+ messagemap = {
+ "version": msg_version,
+ "verack": msg_verack,
+ "addr": msg_addr,
+ "alert": msg_alert,
+ "inv": msg_inv,
+ "getdata": msg_getdata,
+ "getblocks": msg_getblocks,
+ "tx": msg_tx,
+ "block": msg_block,
+ "getaddr": msg_getaddr,
+ "ping": msg_ping,
+ "pong": msg_pong,
+ "headers": msg_headers,
+ "getheaders": msg_getheaders,
+ "reject": msg_reject,
+ "mempool": msg_mempool
+ }
+ MAGIC_BYTES = {
+ "mainnet": "\xf9\xbe\xb4\xd9", # mainnet
+ "testnet3": "\x0b\x11\x09\x07", # testnet3
+ "regtest": "\xfa\xbf\xb5\xda" # regtest
+ }
+
+ def __init__(self, dstaddr, dstport, rpc, callback, net="regtest"):
+ asyncore.dispatcher.__init__(self, map=mininode_socket_map)
+ self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport))
+ self.dstaddr = dstaddr
+ self.dstport = dstport
+ self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.sendbuf = ""
+ self.recvbuf = ""
+ self.ver_send = 209
+ self.ver_recv = 209
+ self.last_sent = 0
+ self.state = "connecting"
+ self.network = net
+ self.cb = callback
+ self.disconnect = False
+
+ # stuff version msg into sendbuf
+ vt = msg_version()
+ vt.addrTo.ip = self.dstaddr
+ vt.addrTo.port = self.dstport
+ vt.addrFrom.ip = "0.0.0.0"
+ vt.addrFrom.port = 0
+ self.send_message(vt, True)
+ print 'MiniNode: Connecting to Bitcoin Node IP # ' + dstaddr + ':' \
+ + str(dstport)
+
+ try:
+ self.connect((dstaddr, dstport))
+ except:
+ self.handle_close()
+ self.rpc = rpc
+
+ def show_debug_msg(self, msg):
+ self.log.debug(msg)
+
+ def handle_connect(self):
+ self.show_debug_msg("MiniNode: Connected & Listening: \n")
+ self.state = "connected"
+
+ def handle_close(self):
+ self.show_debug_msg("MiniNode: Closing Connection to %s:%d... "
+ % (self.dstaddr, self.dstport))
+ self.state = "closed"
+ self.recvbuf = ""
+ self.sendbuf = ""
+ try:
+ self.close()
+ except:
+ pass
+ self.cb.on_close(self)
+
+ def handle_read(self):
+ try:
+ t = self.recv(8192)
+ if len(t) > 0:
+ self.recvbuf += t
+ self.got_data()
+ except:
+ pass
+
+ def readable(self):
+ return True
+
+ def writable(self):
+ with mininode_lock:
+ length = len(self.sendbuf)
+ return (length > 0)
+
+ def handle_write(self):
+ with mininode_lock:
+ try:
+ sent = self.send(self.sendbuf)
+ except:
+ self.handle_close()
+ return
+ self.sendbuf = self.sendbuf[sent:]
+
+ def got_data(self):
+ while True:
+ if len(self.recvbuf) < 4:
+ return
+ if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
+ raise ValueError("got garbage %s" % repr(self.recvbuf))
+ if self.ver_recv < 209:
+ if len(self.recvbuf) < 4 + 12 + 4:
+ return
+ command = self.recvbuf[4:4+12].split("\x00", 1)[0]
+ msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
+ checksum = None
+ if len(self.recvbuf) < 4 + 12 + 4 + msglen:
+ return
+ msg = self.recvbuf[4+12+4:4+12+4+msglen]
+ self.recvbuf = self.recvbuf[4+12+4+msglen:]
+ else:
+ if len(self.recvbuf) < 4 + 12 + 4 + 4:
+ return
+ command = self.recvbuf[4:4+12].split("\x00", 1)[0]
+ msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
+ checksum = self.recvbuf[4+12+4:4+12+4+4]
+ if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
+ return
+ msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
+ th = sha256(msg)
+ h = sha256(th)
+ if checksum != h[:4]:
+ raise ValueError("got bad checksum " + repr(self.recvbuf))
+ self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
+ if command in self.messagemap:
+ f = cStringIO.StringIO(msg)
+ t = self.messagemap[command]()
+ t.deserialize(f)
+ self.got_message(t)
+ else:
+ self.show_debug_msg("Unknown command: '" + command + "' " +
+ repr(msg))
+
+ def send_message(self, message, pushbuf=False):
+ if self.state != "connected" and not pushbuf:
+ return
+ self.show_debug_msg("Send %s" % repr(message))
+ command = message.command
+ data = message.serialize()
+ tmsg = self.MAGIC_BYTES[self.network]
+ tmsg += command
+ tmsg += "\x00" * (12 - len(command))
+ tmsg += struct.pack("<I", len(data))
+ if self.ver_send >= 209:
+ th = sha256(data)
+ h = sha256(th)
+ tmsg += h[:4]
+ tmsg += data
+ with mininode_lock:
+ self.sendbuf += tmsg
+ self.last_sent = time.time()
+
+ def got_message(self, message):
+ if message.command == "version":
+ if message.nVersion <= BIP0031_VERSION:
+ self.messagemap['ping'] = msg_ping_prebip31
+ if self.last_sent + 30 * 60 < time.time():
+ self.send_message(self.messagemap['ping']())
+ self.show_debug_msg("Recv %s" % repr(message))
+ self.cb.deliver(self, message)
+
+ def disconnect_node(self):
+ self.disconnect = True
+
+
+class NetworkThread(Thread):
+ def run(self):
+ while mininode_socket_map:
+ # We check for whether to disconnect outside of the asyncore
+ # loop to workaround the behavior of asyncore when using
+ # select
+ disconnected = []
+ for fd, obj in mininode_socket_map.items():
+ if obj.disconnect:
+ disconnected.append(obj)
+ [ obj.handle_close() for obj in disconnected ]
+ asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
+
+
+# An exception we can raise if we detect a potential disconnect
+# (p2p or rpc) before the test is complete
+class EarlyDisconnectError(Exception):
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return repr(self.value)
diff --git a/qa/rpc-tests/script.py b/qa/rpc-tests/script.py
new file mode 100644
index 0000000000..03695b8635
--- /dev/null
+++ b/qa/rpc-tests/script.py
@@ -0,0 +1,896 @@
+#
+# script.py
+#
+# This file is modified from python-bitcoinlib.
+#
+# Distributed under the MIT/X11 software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+
+"""Scripts
+
+Functionality to build scripts, as well as SignatureHash().
+"""
+
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+from mininode import CTransaction, CTxOut, hash256
+
+import sys
+bchr = chr
+bord = ord
+if sys.version > '3':
+ long = int
+ bchr = lambda x: bytes([x])
+ bord = lambda x: x
+
+import copy
+import struct
+
+import bignum
+
+MAX_SCRIPT_SIZE = 10000
+MAX_SCRIPT_ELEMENT_SIZE = 520
+MAX_SCRIPT_OPCODES = 201
+
+OPCODE_NAMES = {}
+
+_opcode_instances = []
+class CScriptOp(int):
+ """A single script opcode"""
+ __slots__ = []
+
+ @staticmethod
+ def encode_op_pushdata(d):
+ """Encode a PUSHDATA op, returning bytes"""
+ if len(d) < 0x4c:
+ return b'' + bchr(len(d)) + d # OP_PUSHDATA
+ elif len(d) <= 0xff:
+ return b'\x4c' + bchr(len(d)) + d # OP_PUSHDATA1
+ elif len(d) <= 0xffff:
+ return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2
+ elif len(d) <= 0xffffffff:
+ return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4
+ else:
+ raise ValueError("Data too long to encode in a PUSHDATA op")
+
+ @staticmethod
+ def encode_op_n(n):
+ """Encode a small integer op, returning an opcode"""
+ if not (0 <= n <= 16):
+ raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
+
+ if n == 0:
+ return OP_0
+ else:
+ return CScriptOp(OP_1 + n-1)
+
+ def decode_op_n(self):
+ """Decode a small integer opcode, returning an integer"""
+ if self == OP_0:
+ return 0
+
+ if not (self == OP_0 or OP_1 <= self <= OP_16):
+ raise ValueError('op %r is not an OP_N' % self)
+
+ return int(self - OP_1+1)
+
+ def is_small_int(self):
+ """Return true if the op pushes a small integer to the stack"""
+ if 0x51 <= self <= 0x60 or self == 0:
+ return True
+ else:
+ return False
+
+ def __str__(self):
+ return repr(self)
+
+ def __repr__(self):
+ if self in OPCODE_NAMES:
+ return OPCODE_NAMES[self]
+ else:
+ return 'CScriptOp(0x%x)' % self
+
+ def __new__(cls, n):
+ try:
+ return _opcode_instances[n]
+ except IndexError:
+ assert len(_opcode_instances) == n
+ _opcode_instances.append(super(CScriptOp, cls).__new__(cls, n))
+ return _opcode_instances[n]
+
+# Populate opcode instance table
+for n in range(0xff+1):
+ CScriptOp(n)
+
+
+# push value
+OP_0 = CScriptOp(0x00)
+OP_FALSE = OP_0
+OP_PUSHDATA1 = CScriptOp(0x4c)
+OP_PUSHDATA2 = CScriptOp(0x4d)
+OP_PUSHDATA4 = CScriptOp(0x4e)
+OP_1NEGATE = CScriptOp(0x4f)
+OP_RESERVED = CScriptOp(0x50)
+OP_1 = CScriptOp(0x51)
+OP_TRUE=OP_1
+OP_2 = CScriptOp(0x52)
+OP_3 = CScriptOp(0x53)
+OP_4 = CScriptOp(0x54)
+OP_5 = CScriptOp(0x55)
+OP_6 = CScriptOp(0x56)
+OP_7 = CScriptOp(0x57)
+OP_8 = CScriptOp(0x58)
+OP_9 = CScriptOp(0x59)
+OP_10 = CScriptOp(0x5a)
+OP_11 = CScriptOp(0x5b)
+OP_12 = CScriptOp(0x5c)
+OP_13 = CScriptOp(0x5d)
+OP_14 = CScriptOp(0x5e)
+OP_15 = CScriptOp(0x5f)
+OP_16 = CScriptOp(0x60)
+
+# control
+OP_NOP = CScriptOp(0x61)
+OP_VER = CScriptOp(0x62)
+OP_IF = CScriptOp(0x63)
+OP_NOTIF = CScriptOp(0x64)
+OP_VERIF = CScriptOp(0x65)
+OP_VERNOTIF = CScriptOp(0x66)
+OP_ELSE = CScriptOp(0x67)
+OP_ENDIF = CScriptOp(0x68)
+OP_VERIFY = CScriptOp(0x69)
+OP_RETURN = CScriptOp(0x6a)
+
+# stack ops
+OP_TOALTSTACK = CScriptOp(0x6b)
+OP_FROMALTSTACK = CScriptOp(0x6c)
+OP_2DROP = CScriptOp(0x6d)
+OP_2DUP = CScriptOp(0x6e)
+OP_3DUP = CScriptOp(0x6f)
+OP_2OVER = CScriptOp(0x70)
+OP_2ROT = CScriptOp(0x71)
+OP_2SWAP = CScriptOp(0x72)
+OP_IFDUP = CScriptOp(0x73)
+OP_DEPTH = CScriptOp(0x74)
+OP_DROP = CScriptOp(0x75)
+OP_DUP = CScriptOp(0x76)
+OP_NIP = CScriptOp(0x77)
+OP_OVER = CScriptOp(0x78)
+OP_PICK = CScriptOp(0x79)
+OP_ROLL = CScriptOp(0x7a)
+OP_ROT = CScriptOp(0x7b)
+OP_SWAP = CScriptOp(0x7c)
+OP_TUCK = CScriptOp(0x7d)
+
+# splice ops
+OP_CAT = CScriptOp(0x7e)
+OP_SUBSTR = CScriptOp(0x7f)
+OP_LEFT = CScriptOp(0x80)
+OP_RIGHT = CScriptOp(0x81)
+OP_SIZE = CScriptOp(0x82)
+
+# bit logic
+OP_INVERT = CScriptOp(0x83)
+OP_AND = CScriptOp(0x84)
+OP_OR = CScriptOp(0x85)
+OP_XOR = CScriptOp(0x86)
+OP_EQUAL = CScriptOp(0x87)
+OP_EQUALVERIFY = CScriptOp(0x88)
+OP_RESERVED1 = CScriptOp(0x89)
+OP_RESERVED2 = CScriptOp(0x8a)
+
+# numeric
+OP_1ADD = CScriptOp(0x8b)
+OP_1SUB = CScriptOp(0x8c)
+OP_2MUL = CScriptOp(0x8d)
+OP_2DIV = CScriptOp(0x8e)
+OP_NEGATE = CScriptOp(0x8f)
+OP_ABS = CScriptOp(0x90)
+OP_NOT = CScriptOp(0x91)
+OP_0NOTEQUAL = CScriptOp(0x92)
+
+OP_ADD = CScriptOp(0x93)
+OP_SUB = CScriptOp(0x94)
+OP_MUL = CScriptOp(0x95)
+OP_DIV = CScriptOp(0x96)
+OP_MOD = CScriptOp(0x97)
+OP_LSHIFT = CScriptOp(0x98)
+OP_RSHIFT = CScriptOp(0x99)
+
+OP_BOOLAND = CScriptOp(0x9a)
+OP_BOOLOR = CScriptOp(0x9b)
+OP_NUMEQUAL = CScriptOp(0x9c)
+OP_NUMEQUALVERIFY = CScriptOp(0x9d)
+OP_NUMNOTEQUAL = CScriptOp(0x9e)
+OP_LESSTHAN = CScriptOp(0x9f)
+OP_GREATERTHAN = CScriptOp(0xa0)
+OP_LESSTHANOREQUAL = CScriptOp(0xa1)
+OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
+OP_MIN = CScriptOp(0xa3)
+OP_MAX = CScriptOp(0xa4)
+
+OP_WITHIN = CScriptOp(0xa5)
+
+# crypto
+OP_RIPEMD160 = CScriptOp(0xa6)
+OP_SHA1 = CScriptOp(0xa7)
+OP_SHA256 = CScriptOp(0xa8)
+OP_HASH160 = CScriptOp(0xa9)
+OP_HASH256 = CScriptOp(0xaa)
+OP_CODESEPARATOR = CScriptOp(0xab)
+OP_CHECKSIG = CScriptOp(0xac)
+OP_CHECKSIGVERIFY = CScriptOp(0xad)
+OP_CHECKMULTISIG = CScriptOp(0xae)
+OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
+
+# expansion
+OP_NOP1 = CScriptOp(0xb0)
+OP_NOP2 = CScriptOp(0xb1)
+OP_NOP3 = CScriptOp(0xb2)
+OP_NOP4 = CScriptOp(0xb3)
+OP_NOP5 = CScriptOp(0xb4)
+OP_NOP6 = CScriptOp(0xb5)
+OP_NOP7 = CScriptOp(0xb6)
+OP_NOP8 = CScriptOp(0xb7)
+OP_NOP9 = CScriptOp(0xb8)
+OP_NOP10 = CScriptOp(0xb9)
+
+# template matching params
+OP_SMALLINTEGER = CScriptOp(0xfa)
+OP_PUBKEYS = CScriptOp(0xfb)
+OP_PUBKEYHASH = CScriptOp(0xfd)
+OP_PUBKEY = CScriptOp(0xfe)
+
+OP_INVALIDOPCODE = CScriptOp(0xff)
+
+VALID_OPCODES = {
+ OP_1NEGATE,
+ OP_RESERVED,
+ OP_1,
+ OP_2,
+ OP_3,
+ OP_4,
+ OP_5,
+ OP_6,
+ OP_7,
+ OP_8,
+ OP_9,
+ OP_10,
+ OP_11,
+ OP_12,
+ OP_13,
+ OP_14,
+ OP_15,
+ OP_16,
+
+ OP_NOP,
+ OP_VER,
+ OP_IF,
+ OP_NOTIF,
+ OP_VERIF,
+ OP_VERNOTIF,
+ OP_ELSE,
+ OP_ENDIF,
+ OP_VERIFY,
+ OP_RETURN,
+
+ OP_TOALTSTACK,
+ OP_FROMALTSTACK,
+ OP_2DROP,
+ OP_2DUP,
+ OP_3DUP,
+ OP_2OVER,
+ OP_2ROT,
+ OP_2SWAP,
+ OP_IFDUP,
+ OP_DEPTH,
+ OP_DROP,
+ OP_DUP,
+ OP_NIP,
+ OP_OVER,
+ OP_PICK,
+ OP_ROLL,
+ OP_ROT,
+ OP_SWAP,
+ OP_TUCK,
+
+ OP_CAT,
+ OP_SUBSTR,
+ OP_LEFT,
+ OP_RIGHT,
+ OP_SIZE,
+
+ OP_INVERT,
+ OP_AND,
+ OP_OR,
+ OP_XOR,
+ OP_EQUAL,
+ OP_EQUALVERIFY,
+ OP_RESERVED1,
+ OP_RESERVED2,
+
+ OP_1ADD,
+ OP_1SUB,
+ OP_2MUL,
+ OP_2DIV,
+ OP_NEGATE,
+ OP_ABS,
+ OP_NOT,
+ OP_0NOTEQUAL,
+
+ OP_ADD,
+ OP_SUB,
+ OP_MUL,
+ OP_DIV,
+ OP_MOD,
+ OP_LSHIFT,
+ OP_RSHIFT,
+
+ OP_BOOLAND,
+ OP_BOOLOR,
+ OP_NUMEQUAL,
+ OP_NUMEQUALVERIFY,
+ OP_NUMNOTEQUAL,
+ OP_LESSTHAN,
+ OP_GREATERTHAN,
+ OP_LESSTHANOREQUAL,
+ OP_GREATERTHANOREQUAL,
+ OP_MIN,
+ OP_MAX,
+
+ OP_WITHIN,
+
+ OP_RIPEMD160,
+ OP_SHA1,
+ OP_SHA256,
+ OP_HASH160,
+ OP_HASH256,
+ OP_CODESEPARATOR,
+ OP_CHECKSIG,
+ OP_CHECKSIGVERIFY,
+ OP_CHECKMULTISIG,
+ OP_CHECKMULTISIGVERIFY,
+
+ OP_NOP1,
+ OP_NOP2,
+ OP_NOP3,
+ OP_NOP4,
+ OP_NOP5,
+ OP_NOP6,
+ OP_NOP7,
+ OP_NOP8,
+ OP_NOP9,
+ OP_NOP10,
+
+ OP_SMALLINTEGER,
+ OP_PUBKEYS,
+ OP_PUBKEYHASH,
+ OP_PUBKEY,
+}
+
+OPCODE_NAMES.update({
+ OP_0 : 'OP_0',
+ OP_PUSHDATA1 : 'OP_PUSHDATA1',
+ OP_PUSHDATA2 : 'OP_PUSHDATA2',
+ OP_PUSHDATA4 : 'OP_PUSHDATA4',
+ OP_1NEGATE : 'OP_1NEGATE',
+ OP_RESERVED : 'OP_RESERVED',
+ OP_1 : 'OP_1',
+ OP_2 : 'OP_2',
+ OP_3 : 'OP_3',
+ OP_4 : 'OP_4',
+ OP_5 : 'OP_5',
+ OP_6 : 'OP_6',
+ OP_7 : 'OP_7',
+ OP_8 : 'OP_8',
+ OP_9 : 'OP_9',
+ OP_10 : 'OP_10',
+ OP_11 : 'OP_11',
+ OP_12 : 'OP_12',
+ OP_13 : 'OP_13',
+ OP_14 : 'OP_14',
+ OP_15 : 'OP_15',
+ OP_16 : 'OP_16',
+ OP_NOP : 'OP_NOP',
+ OP_VER : 'OP_VER',
+ OP_IF : 'OP_IF',
+ OP_NOTIF : 'OP_NOTIF',
+ OP_VERIF : 'OP_VERIF',
+ OP_VERNOTIF : 'OP_VERNOTIF',
+ OP_ELSE : 'OP_ELSE',
+ OP_ENDIF : 'OP_ENDIF',
+ OP_VERIFY : 'OP_VERIFY',
+ OP_RETURN : 'OP_RETURN',
+ OP_TOALTSTACK : 'OP_TOALTSTACK',
+ OP_FROMALTSTACK : 'OP_FROMALTSTACK',
+ OP_2DROP : 'OP_2DROP',
+ OP_2DUP : 'OP_2DUP',
+ OP_3DUP : 'OP_3DUP',
+ OP_2OVER : 'OP_2OVER',
+ OP_2ROT : 'OP_2ROT',
+ OP_2SWAP : 'OP_2SWAP',
+ OP_IFDUP : 'OP_IFDUP',
+ OP_DEPTH : 'OP_DEPTH',
+ OP_DROP : 'OP_DROP',
+ OP_DUP : 'OP_DUP',
+ OP_NIP : 'OP_NIP',
+ OP_OVER : 'OP_OVER',
+ OP_PICK : 'OP_PICK',
+ OP_ROLL : 'OP_ROLL',
+ OP_ROT : 'OP_ROT',
+ OP_SWAP : 'OP_SWAP',
+ OP_TUCK : 'OP_TUCK',
+ OP_CAT : 'OP_CAT',
+ OP_SUBSTR : 'OP_SUBSTR',
+ OP_LEFT : 'OP_LEFT',
+ OP_RIGHT : 'OP_RIGHT',
+ OP_SIZE : 'OP_SIZE',
+ OP_INVERT : 'OP_INVERT',
+ OP_AND : 'OP_AND',
+ OP_OR : 'OP_OR',
+ OP_XOR : 'OP_XOR',
+ OP_EQUAL : 'OP_EQUAL',
+ OP_EQUALVERIFY : 'OP_EQUALVERIFY',
+ OP_RESERVED1 : 'OP_RESERVED1',
+ OP_RESERVED2 : 'OP_RESERVED2',
+ OP_1ADD : 'OP_1ADD',
+ OP_1SUB : 'OP_1SUB',
+ OP_2MUL : 'OP_2MUL',
+ OP_2DIV : 'OP_2DIV',
+ OP_NEGATE : 'OP_NEGATE',
+ OP_ABS : 'OP_ABS',
+ OP_NOT : 'OP_NOT',
+ OP_0NOTEQUAL : 'OP_0NOTEQUAL',
+ OP_ADD : 'OP_ADD',
+ OP_SUB : 'OP_SUB',
+ OP_MUL : 'OP_MUL',
+ OP_DIV : 'OP_DIV',
+ OP_MOD : 'OP_MOD',
+ OP_LSHIFT : 'OP_LSHIFT',
+ OP_RSHIFT : 'OP_RSHIFT',
+ OP_BOOLAND : 'OP_BOOLAND',
+ OP_BOOLOR : 'OP_BOOLOR',
+ OP_NUMEQUAL : 'OP_NUMEQUAL',
+ OP_NUMEQUALVERIFY : 'OP_NUMEQUALVERIFY',
+ OP_NUMNOTEQUAL : 'OP_NUMNOTEQUAL',
+ OP_LESSTHAN : 'OP_LESSTHAN',
+ OP_GREATERTHAN : 'OP_GREATERTHAN',
+ OP_LESSTHANOREQUAL : 'OP_LESSTHANOREQUAL',
+ OP_GREATERTHANOREQUAL : 'OP_GREATERTHANOREQUAL',
+ OP_MIN : 'OP_MIN',
+ OP_MAX : 'OP_MAX',
+ OP_WITHIN : 'OP_WITHIN',
+ OP_RIPEMD160 : 'OP_RIPEMD160',
+ OP_SHA1 : 'OP_SHA1',
+ OP_SHA256 : 'OP_SHA256',
+ OP_HASH160 : 'OP_HASH160',
+ OP_HASH256 : 'OP_HASH256',
+ OP_CODESEPARATOR : 'OP_CODESEPARATOR',
+ OP_CHECKSIG : 'OP_CHECKSIG',
+ OP_CHECKSIGVERIFY : 'OP_CHECKSIGVERIFY',
+ OP_CHECKMULTISIG : 'OP_CHECKMULTISIG',
+ OP_CHECKMULTISIGVERIFY : 'OP_CHECKMULTISIGVERIFY',
+ OP_NOP1 : 'OP_NOP1',
+ OP_NOP2 : 'OP_NOP2',
+ OP_NOP3 : 'OP_NOP3',
+ OP_NOP4 : 'OP_NOP4',
+ OP_NOP5 : 'OP_NOP5',
+ OP_NOP6 : 'OP_NOP6',
+ OP_NOP7 : 'OP_NOP7',
+ OP_NOP8 : 'OP_NOP8',
+ OP_NOP9 : 'OP_NOP9',
+ OP_NOP10 : 'OP_NOP10',
+ OP_SMALLINTEGER : 'OP_SMALLINTEGER',
+ OP_PUBKEYS : 'OP_PUBKEYS',
+ OP_PUBKEYHASH : 'OP_PUBKEYHASH',
+ OP_PUBKEY : 'OP_PUBKEY',
+ OP_INVALIDOPCODE : 'OP_INVALIDOPCODE',
+})
+
+OPCODES_BY_NAME = {
+ 'OP_0' : OP_0,
+ 'OP_PUSHDATA1' : OP_PUSHDATA1,
+ 'OP_PUSHDATA2' : OP_PUSHDATA2,
+ 'OP_PUSHDATA4' : OP_PUSHDATA4,
+ 'OP_1NEGATE' : OP_1NEGATE,
+ 'OP_RESERVED' : OP_RESERVED,
+ 'OP_1' : OP_1,
+ 'OP_2' : OP_2,
+ 'OP_3' : OP_3,
+ 'OP_4' : OP_4,
+ 'OP_5' : OP_5,
+ 'OP_6' : OP_6,
+ 'OP_7' : OP_7,
+ 'OP_8' : OP_8,
+ 'OP_9' : OP_9,
+ 'OP_10' : OP_10,
+ 'OP_11' : OP_11,
+ 'OP_12' : OP_12,
+ 'OP_13' : OP_13,
+ 'OP_14' : OP_14,
+ 'OP_15' : OP_15,
+ 'OP_16' : OP_16,
+ 'OP_NOP' : OP_NOP,
+ 'OP_VER' : OP_VER,
+ 'OP_IF' : OP_IF,
+ 'OP_NOTIF' : OP_NOTIF,
+ 'OP_VERIF' : OP_VERIF,
+ 'OP_VERNOTIF' : OP_VERNOTIF,
+ 'OP_ELSE' : OP_ELSE,
+ 'OP_ENDIF' : OP_ENDIF,
+ 'OP_VERIFY' : OP_VERIFY,
+ 'OP_RETURN' : OP_RETURN,
+ 'OP_TOALTSTACK' : OP_TOALTSTACK,
+ 'OP_FROMALTSTACK' : OP_FROMALTSTACK,
+ 'OP_2DROP' : OP_2DROP,
+ 'OP_2DUP' : OP_2DUP,
+ 'OP_3DUP' : OP_3DUP,
+ 'OP_2OVER' : OP_2OVER,
+ 'OP_2ROT' : OP_2ROT,
+ 'OP_2SWAP' : OP_2SWAP,
+ 'OP_IFDUP' : OP_IFDUP,
+ 'OP_DEPTH' : OP_DEPTH,
+ 'OP_DROP' : OP_DROP,
+ 'OP_DUP' : OP_DUP,
+ 'OP_NIP' : OP_NIP,
+ 'OP_OVER' : OP_OVER,
+ 'OP_PICK' : OP_PICK,
+ 'OP_ROLL' : OP_ROLL,
+ 'OP_ROT' : OP_ROT,
+ 'OP_SWAP' : OP_SWAP,
+ 'OP_TUCK' : OP_TUCK,
+ 'OP_CAT' : OP_CAT,
+ 'OP_SUBSTR' : OP_SUBSTR,
+ 'OP_LEFT' : OP_LEFT,
+ 'OP_RIGHT' : OP_RIGHT,
+ 'OP_SIZE' : OP_SIZE,
+ 'OP_INVERT' : OP_INVERT,
+ 'OP_AND' : OP_AND,
+ 'OP_OR' : OP_OR,
+ 'OP_XOR' : OP_XOR,
+ 'OP_EQUAL' : OP_EQUAL,
+ 'OP_EQUALVERIFY' : OP_EQUALVERIFY,
+ 'OP_RESERVED1' : OP_RESERVED1,
+ 'OP_RESERVED2' : OP_RESERVED2,
+ 'OP_1ADD' : OP_1ADD,
+ 'OP_1SUB' : OP_1SUB,
+ 'OP_2MUL' : OP_2MUL,
+ 'OP_2DIV' : OP_2DIV,
+ 'OP_NEGATE' : OP_NEGATE,
+ 'OP_ABS' : OP_ABS,
+ 'OP_NOT' : OP_NOT,
+ 'OP_0NOTEQUAL' : OP_0NOTEQUAL,
+ 'OP_ADD' : OP_ADD,
+ 'OP_SUB' : OP_SUB,
+ 'OP_MUL' : OP_MUL,
+ 'OP_DIV' : OP_DIV,
+ 'OP_MOD' : OP_MOD,
+ 'OP_LSHIFT' : OP_LSHIFT,
+ 'OP_RSHIFT' : OP_RSHIFT,
+ 'OP_BOOLAND' : OP_BOOLAND,
+ 'OP_BOOLOR' : OP_BOOLOR,
+ 'OP_NUMEQUAL' : OP_NUMEQUAL,
+ 'OP_NUMEQUALVERIFY' : OP_NUMEQUALVERIFY,
+ 'OP_NUMNOTEQUAL' : OP_NUMNOTEQUAL,
+ 'OP_LESSTHAN' : OP_LESSTHAN,
+ 'OP_GREATERTHAN' : OP_GREATERTHAN,
+ 'OP_LESSTHANOREQUAL' : OP_LESSTHANOREQUAL,
+ 'OP_GREATERTHANOREQUAL' : OP_GREATERTHANOREQUAL,
+ 'OP_MIN' : OP_MIN,
+ 'OP_MAX' : OP_MAX,
+ 'OP_WITHIN' : OP_WITHIN,
+ 'OP_RIPEMD160' : OP_RIPEMD160,
+ 'OP_SHA1' : OP_SHA1,
+ 'OP_SHA256' : OP_SHA256,
+ 'OP_HASH160' : OP_HASH160,
+ 'OP_HASH256' : OP_HASH256,
+ 'OP_CODESEPARATOR' : OP_CODESEPARATOR,
+ 'OP_CHECKSIG' : OP_CHECKSIG,
+ 'OP_CHECKSIGVERIFY' : OP_CHECKSIGVERIFY,
+ 'OP_CHECKMULTISIG' : OP_CHECKMULTISIG,
+ 'OP_CHECKMULTISIGVERIFY' : OP_CHECKMULTISIGVERIFY,
+ 'OP_NOP1' : OP_NOP1,
+ 'OP_NOP2' : OP_NOP2,
+ 'OP_NOP3' : OP_NOP3,
+ 'OP_NOP4' : OP_NOP4,
+ 'OP_NOP5' : OP_NOP5,
+ 'OP_NOP6' : OP_NOP6,
+ 'OP_NOP7' : OP_NOP7,
+ 'OP_NOP8' : OP_NOP8,
+ 'OP_NOP9' : OP_NOP9,
+ 'OP_NOP10' : OP_NOP10,
+ 'OP_SMALLINTEGER' : OP_SMALLINTEGER,
+ 'OP_PUBKEYS' : OP_PUBKEYS,
+ 'OP_PUBKEYHASH' : OP_PUBKEYHASH,
+ 'OP_PUBKEY' : OP_PUBKEY,
+}
+
+class CScriptInvalidError(Exception):
+ """Base class for CScript exceptions"""
+ pass
+
+class CScriptTruncatedPushDataError(CScriptInvalidError):
+ """Invalid pushdata due to truncation"""
+ def __init__(self, msg, data):
+ self.data = data
+ super(CScriptTruncatedPushDataError, self).__init__(msg)
+
+# This is used, eg, for blockchain heights in coinbase scripts (bip34)
+class CScriptNum(object):
+ def __init__(self, d=0):
+ self.value = d
+
+ @staticmethod
+ def encode(obj):
+ r = bytearray(0)
+ if obj.value == 0:
+ return bytes(r)
+ neg = obj.value < 0
+ absvalue = -obj.value if neg else obj.value
+ while (absvalue):
+ r.append(chr(absvalue & 0xff))
+ absvalue >>= 8
+ if r[-1] & 0x80:
+ r.append(0x80 if neg else 0)
+ elif neg:
+ r[-1] |= 0x80
+ return bytes(bchr(len(r)) + r)
+
+
+class CScript(bytes):
+ """Serialized script
+
+ A bytes subclass, so you can use this directly whenever bytes are accepted.
+ Note that this means that indexing does *not* work - you'll get an index by
+ byte rather than opcode. This format was chosen for efficiency so that the
+ general case would not require creating a lot of little CScriptOP objects.
+
+ iter(script) however does iterate by opcode.
+ """
+ @classmethod
+ def __coerce_instance(cls, other):
+ # Coerce other into bytes
+ if isinstance(other, CScriptOp):
+ other = bchr(other)
+ elif isinstance(other, CScriptNum):
+ if (other.value == 0):
+ other = bchr(CScriptOp(OP_0))
+ else:
+ other = CScriptNum.encode(other)
+ elif isinstance(other, (int, long)):
+ if 0 <= other <= 16:
+ other = bytes(bchr(CScriptOp.encode_op_n(other)))
+ elif other == -1:
+ other = bytes(bchr(OP_1NEGATE))
+ else:
+ other = CScriptOp.encode_op_pushdata(bignum.bn2vch(other))
+ elif isinstance(other, (bytes, bytearray)):
+ other = CScriptOp.encode_op_pushdata(other)
+ return other
+
+ def __add__(self, other):
+ # Do the coercion outside of the try block so that errors in it are
+ # noticed.
+ other = self.__coerce_instance(other)
+
+ try:
+ # bytes.__add__ always returns bytes instances unfortunately
+ return CScript(super(CScript, self).__add__(other))
+ except TypeError:
+ raise TypeError('Can not add a %r instance to a CScript' % other.__class__)
+
+ def join(self, iterable):
+ # join makes no sense for a CScript()
+ raise NotImplementedError
+
+ def __new__(cls, value=b''):
+ if isinstance(value, bytes) or isinstance(value, bytearray):
+ return super(CScript, cls).__new__(cls, value)
+ else:
+ def coerce_iterable(iterable):
+ for instance in iterable:
+ yield cls.__coerce_instance(instance)
+ # Annoyingly on both python2 and python3 bytes.join() always
+ # returns a bytes instance even when subclassed.
+ return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value)))
+
+ def raw_iter(self):
+ """Raw iteration
+
+ Yields tuples of (opcode, data, sop_idx) so that the different possible
+ PUSHDATA encodings can be accurately distinguished, as well as
+ determining the exact opcode byte indexes. (sop_idx)
+ """
+ i = 0
+ while i < len(self):
+ sop_idx = i
+ opcode = bord(self[i])
+ i += 1
+
+ if opcode > OP_PUSHDATA4:
+ yield (opcode, None, sop_idx)
+ else:
+ datasize = None
+ pushdata_type = None
+ if opcode < OP_PUSHDATA1:
+ pushdata_type = 'PUSHDATA(%d)' % opcode
+ datasize = opcode
+
+ elif opcode == OP_PUSHDATA1:
+ pushdata_type = 'PUSHDATA1'
+ if i >= len(self):
+ raise CScriptInvalidError('PUSHDATA1: missing data length')
+ datasize = bord(self[i])
+ i += 1
+
+ elif opcode == OP_PUSHDATA2:
+ pushdata_type = 'PUSHDATA2'
+ if i + 1 >= len(self):
+ raise CScriptInvalidError('PUSHDATA2: missing data length')
+ datasize = bord(self[i]) + (bord(self[i+1]) << 8)
+ i += 2
+
+ elif opcode == OP_PUSHDATA4:
+ pushdata_type = 'PUSHDATA4'
+ if i + 3 >= len(self):
+ raise CScriptInvalidError('PUSHDATA4: missing data length')
+ datasize = bord(self[i]) + (bord(self[i+1]) << 8) + (bord(self[i+2]) << 16) + (bord(self[i+3]) << 24)
+ i += 4
+
+ else:
+ assert False # shouldn't happen
+
+
+ data = bytes(self[i:i+datasize])
+
+ # Check for truncation
+ if len(data) < datasize:
+ raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
+
+ i += datasize
+
+ yield (opcode, data, sop_idx)
+
+ def __iter__(self):
+ """'Cooked' iteration
+
+ Returns either a CScriptOP instance, an integer, or bytes, as
+ appropriate.
+
+ See raw_iter() if you need to distinguish the different possible
+ PUSHDATA encodings.
+ """
+ for (opcode, data, sop_idx) in self.raw_iter():
+ if data is not None:
+ yield data
+ else:
+ opcode = CScriptOp(opcode)
+
+ if opcode.is_small_int():
+ yield opcode.decode_op_n()
+ else:
+ yield CScriptOp(opcode)
+
+ def __repr__(self):
+ # For Python3 compatibility add b before strings so testcases don't
+ # need to change
+ def _repr(o):
+ if isinstance(o, bytes):
+ return "x('%s')" % binascii.hexlify(o).decode('utf8')
+ else:
+ return repr(o)
+
+ ops = []
+ i = iter(self)
+ while True:
+ op = None
+ try:
+ op = _repr(next(i))
+ except CScriptTruncatedPushDataError as err:
+ op = '%s...<ERROR: %s>' % (_repr(err.data), err)
+ break
+ except CScriptInvalidError as err:
+ op = '<ERROR: %s>' % err
+ break
+ except StopIteration:
+ break
+ finally:
+ if op is not None:
+ ops.append(op)
+
+ return "CScript([%s])" % ', '.join(ops)
+
+ def GetSigOpCount(self, fAccurate):
+ """Get the SigOp count.
+
+ fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details.
+
+ Note that this is consensus-critical.
+ """
+ n = 0
+ lastOpcode = OP_INVALIDOPCODE
+ for (opcode, data, sop_idx) in self.raw_iter():
+ if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
+ n += 1
+ elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
+ if fAccurate and (OP_1 <= lastOpcode <= OP_16):
+ n += opcode.decode_op_n()
+ else:
+ n += 20
+ lastOpcode = opcode
+ return n
+
+
+SIGHASH_ALL = 1
+SIGHASH_NONE = 2
+SIGHASH_SINGLE = 3
+SIGHASH_ANYONECANPAY = 0x80
+
+def FindAndDelete(script, sig):
+ """Consensus critical, see FindAndDelete() in Satoshi codebase"""
+ r = b''
+ last_sop_idx = sop_idx = 0
+ skip = True
+ for (opcode, data, sop_idx) in script.raw_iter():
+ if not skip:
+ r += script[last_sop_idx:sop_idx]
+ last_sop_idx = sop_idx
+ if script[sop_idx:sop_idx + len(sig)] == sig:
+ skip = True
+ else:
+ skip = False
+ if not skip:
+ r += script[last_sop_idx:]
+ return CScript(r)
+
+
+def SignatureHash(script, txTo, inIdx, hashtype):
+ """Consensus-correct SignatureHash
+
+ Returns (hash, err) to precisely match the consensus-critical behavior of
+ the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity)
+ """
+ HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+
+ if inIdx >= len(txTo.vin):
+ return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
+ txtmp = CTransaction(txTo)
+
+ for txin in txtmp.vin:
+ txin.scriptSig = b''
+ txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
+
+ if (hashtype & 0x1f) == SIGHASH_NONE:
+ txtmp.vout = []
+
+ for i in range(len(txtmp.vin)):
+ if i != inIdx:
+ txtmp.vin[i].nSequence = 0
+
+ elif (hashtype & 0x1f) == SIGHASH_SINGLE:
+ outIdx = inIdx
+ if outIdx >= len(txtmp.vout):
+ return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
+
+ tmp = txtmp.vout[outIdx]
+ txtmp.vout = []
+ for i in range(outIdx):
+ txtmp.vout.append(CTxOut())
+ txtmp.vout.append(tmp)
+
+ for i in range(len(txtmp.vin)):
+ if i != inIdx:
+ txtmp.vin[i].nSequence = 0
+
+ if hashtype & SIGHASH_ANYONECANPAY:
+ tmp = txtmp.vin[inIdx]
+ txtmp.vin = []
+ txtmp.vin.append(tmp)
+
+ s = txtmp.serialize()
+ s += struct.pack(b"<I", hashtype)
+
+ hash = hash256(s)
+
+ return (hash, None)
diff --git a/qa/rpc-tests/script_test.py b/qa/rpc-tests/script_test.py
new file mode 100755
index 0000000000..1ba3a478a8
--- /dev/null
+++ b/qa/rpc-tests/script_test.py
@@ -0,0 +1,253 @@
+#!/usr/bin/env python2
+#
+# Distributed under the MIT/X11 software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+
+'''
+Test notes:
+This test uses the script_valid and script_invalid tests from the unittest
+framework to do end-to-end testing where we compare that two nodes agree on
+whether blocks containing a given test script are valid.
+
+We generally ignore the script flags associated with each test (since we lack
+the precision to test each script using those flags in this framework), but
+for tests with SCRIPT_VERIFY_P2SH, we can use a block time after the BIP16
+switchover date to try to test with that flag enabled (and for tests without
+that flag, we use a block time before the switchover date).
+
+NOTE: This test is very slow and may take more than 40 minutes to run.
+'''
+
+from test_framework import ComparisonTestFramework
+from util import *
+from comptool import TestInstance, TestManager
+from mininode import *
+from blocktools import *
+from script import *
+import logging
+import copy
+import json
+
+script_valid_file = "../../src/test/data/script_valid.json"
+script_invalid_file = "../../src/test/data/script_invalid.json"
+
+# Pass in a set of json files to open.
+class ScriptTestFile(object):
+
+ def __init__(self, files):
+ self.files = files
+ self.index = -1
+ self.data = []
+
+ def load_files(self):
+ for f in self.files:
+ self.data.extend(json.loads(open(f).read()))
+
+ # Skip over records that are not long enough to be tests
+ def get_records(self):
+ while (self.index < len(self.data)):
+ if len(self.data[self.index]) >= 3:
+ yield self.data[self.index]
+ self.index += 1
+
+
+# Helper for parsing the flags specified in the .json files
+SCRIPT_VERIFY_NONE = 0
+SCRIPT_VERIFY_P2SH = 1
+SCRIPT_VERIFY_STRICTENC = 1 << 1
+SCRIPT_VERIFY_DERSIG = 1 << 2
+SCRIPT_VERIFY_LOW_S = 1 << 3
+SCRIPT_VERIFY_NULLDUMMY = 1 << 4
+SCRIPT_VERIFY_SIGPUSHONLY = 1 << 5
+SCRIPT_VERIFY_MINIMALDATA = 1 << 6
+SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_NOPS = 1 << 7
+SCRIPT_VERIFY_CLEANSTACK = 1 << 8
+
+flag_map = {
+ "": SCRIPT_VERIFY_NONE,
+ "NONE": SCRIPT_VERIFY_NONE,
+ "P2SH": SCRIPT_VERIFY_P2SH,
+ "STRICTENC": SCRIPT_VERIFY_STRICTENC,
+ "DERSIG": SCRIPT_VERIFY_DERSIG,
+ "LOW_S": SCRIPT_VERIFY_LOW_S,
+ "NULLDUMMY": SCRIPT_VERIFY_NULLDUMMY,
+ "SIGPUSHONLY": SCRIPT_VERIFY_SIGPUSHONLY,
+ "MINIMALDATA": SCRIPT_VERIFY_MINIMALDATA,
+ "DISCOURAGE_UPGRADABLE_NOPS": SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_NOPS,
+ "CLEANSTACK": SCRIPT_VERIFY_CLEANSTACK,
+}
+
+def ParseScriptFlags(flag_string):
+ flags = 0
+ for x in flag_string.split(","):
+ if x in flag_map:
+ flags |= flag_map[x]
+ else:
+ print "Error: unrecognized script flag: ", x
+ return flags
+
+'''
+Given a string that is a scriptsig or scriptpubkey from the .json files above,
+convert it to a CScript()
+'''
+# Replicates behavior from core_read.cpp
+def ParseScript(json_script):
+ script = json_script.split(" ")
+ parsed_script = CScript()
+ for x in script:
+ if len(x) == 0:
+ # Empty string, ignore.
+ pass
+ elif x.isdigit() or (len(x) >= 1 and x[0] == "-" and x[1:].isdigit()):
+ # Number
+ n = int(x, 0)
+ if (n == -1) or (n >= 1 and n <= 16):
+ parsed_script = CScript(bytes(parsed_script) + bytes(CScript([n])))
+ else:
+ parsed_script += CScriptNum(int(x, 0))
+ elif x.startswith("0x"):
+ # Raw hex data, inserted NOT pushed onto stack:
+ for i in xrange(2, len(x), 2):
+ parsed_script = CScript(bytes(parsed_script) + bytes(chr(int(x[i:i+2],16))))
+ elif x.startswith("'") and x.endswith("'") and len(x) >= 2:
+ # Single-quoted string, pushed as data.
+ parsed_script += CScript([x[1:-1]])
+ else:
+ # opcode, e.g. OP_ADD or ADD:
+ tryopname = "OP_" + x
+ if tryopname in OPCODES_BY_NAME:
+ parsed_script += CScriptOp(OPCODES_BY_NAME["OP_" + x])
+ else:
+ print "ParseScript: error parsing '%s'" % x
+ return ""
+ return parsed_script
+
+class TestBuilder(object):
+ def create_credit_tx(self, scriptPubKey):
+ # self.tx1 is a coinbase transaction, modeled after the one created by script_tests.cpp
+ # This allows us to reuse signatures created in the unit test framework.
+ self.tx1 = create_coinbase() # this has a bip34 scriptsig,
+ self.tx1.vin[0].scriptSig = CScript([0, 0]) # but this matches the unit tests
+ self.tx1.vout[0].nValue = 0
+ self.tx1.vout[0].scriptPubKey = scriptPubKey
+ self.tx1.rehash()
+ def create_spend_tx(self, scriptSig):
+ self.tx2 = create_transaction(self.tx1, 0, CScript(), 0)
+ self.tx2.vin[0].scriptSig = scriptSig
+ self.tx2.vout[0].scriptPubKey = CScript()
+ self.tx2.rehash()
+ def rehash(self):
+ self.tx1.rehash()
+ self.tx2.rehash()
+
+# This test uses the (default) two nodes provided by ComparisonTestFramework,
+# specified on the command line with --testbinary and --refbinary.
+# See comptool.py
+class ScriptTest(ComparisonTestFramework):
+
+ def run_test(self):
+ # Set up the comparison tool TestManager
+ test = TestManager(self, self.options.tmpdir)
+ test.add_all_connections(self.nodes)
+
+ # Load scripts
+ self.scripts = ScriptTestFile([script_valid_file, script_invalid_file])
+ self.scripts.load_files()
+
+ # Some variables we re-use between test instances (to build blocks)
+ self.tip = None
+ self.block_time = None
+
+ NetworkThread().start() # Start up network handling in another thread
+ test.run()
+
+ def generate_test_instance(self, pubkeystring, scriptsigstring):
+ scriptpubkey = ParseScript(pubkeystring)
+ scriptsig = ParseScript(scriptsigstring)
+
+ test = TestInstance(sync_every_block=False)
+ test_build = TestBuilder()
+ test_build.create_credit_tx(scriptpubkey)
+ test_build.create_spend_tx(scriptsig)
+ test_build.rehash()
+
+ block = create_block(self.tip, test_build.tx1, self.block_time)
+ self.block_time += 1
+ block.solve()
+ self.tip = block.sha256
+ test.blocks_and_transactions = [[block, True]]
+
+ for i in xrange(100):
+ block = create_block(self.tip, create_coinbase(), self.block_time)
+ self.block_time += 1
+ block.solve()
+ self.tip = block.sha256
+ test.blocks_and_transactions.append([block, True])
+
+ block = create_block(self.tip, create_coinbase(), self.block_time)
+ self.block_time += 1
+ block.vtx.append(test_build.tx2)
+ block.hashMerkleRoot = block.calc_merkle_root()
+ block.rehash()
+ block.solve()
+ test.blocks_and_transactions.append([block, None])
+ return test
+
+ # This generates the tests for TestManager.
+ def get_tests(self):
+ self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
+ self.block_time = 1333230000 # before the BIP16 switchover
+
+ '''
+ Create a new block with an anyone-can-spend coinbase
+ '''
+ block = create_block(self.tip, create_coinbase(), self.block_time)
+ self.block_time += 1
+ block.solve()
+ self.tip = block.sha256
+ yield TestInstance(objects=[[block, True]])
+
+ '''
+ Build out to 100 blocks total, maturing the coinbase.
+ '''
+ test = TestInstance(objects=[], sync_every_block=False, sync_every_tx=False)
+ for i in xrange(100):
+ b = create_block(self.tip, create_coinbase(), self.block_time)
+ b.solve()
+ test.blocks_and_transactions.append([b, True])
+ self.tip = b.sha256
+ self.block_time += 1
+ yield test
+
+ ''' Iterate through script tests. '''
+ counter = 0
+ for script_test in self.scripts.get_records():
+ ''' Reset the blockchain to genesis block + 100 blocks. '''
+ if self.nodes[0].getblockcount() > 101:
+ self.nodes[0].invalidateblock(self.nodes[0].getblockhash(102))
+ self.nodes[1].invalidateblock(self.nodes[1].getblockhash(102))
+
+ self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
+
+ [scriptsig, scriptpubkey, flags] = script_test[0:3]
+ flags = ParseScriptFlags(flags)
+
+ # We can use block time to determine whether the nodes should be
+ # enforcing BIP16.
+ #
+ # We intentionally let the block time grow by 1 each time.
+ # This forces the block hashes to differ between tests, so that
+ # a call to invalidateblock doesn't interfere with a later test.
+ if (flags & SCRIPT_VERIFY_P2SH):
+ self.block_time = 1333238400 + counter # Advance to enforcing BIP16
+ else:
+ self.block_time = 1333230000 + counter # Before the BIP16 switchover
+
+ print "Script test: [%s]" % script_test
+
+ yield self.generate_test_instance(scriptpubkey, scriptsig)
+ counter += 1
+
+if __name__ == '__main__':
+ ScriptTest().main()
diff --git a/qa/rpc-tests/test_framework.py b/qa/rpc-tests/test_framework.py
index 4c8a11b821..15a357a340 100755
--- a/qa/rpc-tests/test_framework.py
+++ b/qa/rpc-tests/test_framework.py
@@ -89,8 +89,10 @@ class BitcoinTestFramework(object):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
+ parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
+ help="Don't stop bitcoinds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
- help="Source directory containing bitcoind/bitcoin-cli (default: %default%)")
+ help="Source directory containing bitcoind/bitcoin-cli (default: %default)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
@@ -128,10 +130,15 @@ class BitcoinTestFramework(object):
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
- if not self.options.nocleanup:
- print("Cleaning up")
+ if not self.options.noshutdown:
+ print("Stopping nodes")
stop_nodes(self.nodes)
wait_bitcoinds()
+ else:
+ print("Note: bitcoinds were not stopped and may still be running")
+
+ if not self.options.nocleanup and not self.options.noshutdown:
+ print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
if success:
@@ -140,3 +147,34 @@ class BitcoinTestFramework(object):
else:
print("Failed")
sys.exit(1)
+
+
+# Test framework for doing p2p comparison testing, which sets up some bitcoind
+# binaries:
+# 1 binary: test binary
+# 2 binaries: 1 test binary, 1 ref binary
+# n>2 binaries: 1 test binary, n-1 ref binaries
+
+class ComparisonTestFramework(BitcoinTestFramework):
+
+ # Can override the num_nodes variable to indicate how many nodes to run.
+ def __init__(self):
+ self.num_nodes = 2
+
+ def add_options(self, parser):
+ parser.add_option("--testbinary", dest="testbinary",
+ default=os.getenv("BITCOIND", "bitcoind"),
+ help="bitcoind binary to test")
+ parser.add_option("--refbinary", dest="refbinary",
+ default=os.getenv("BITCOIND", "bitcoind"),
+ help="bitcoind binary to use for reference nodes (if any)")
+
+ def setup_chain(self):
+ print "Initializing test directory "+self.options.tmpdir
+ initialize_chain_clean(self.options.tmpdir, self.num_nodes)
+
+ def setup_network(self):
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
+ extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes,
+ binary=[self.options.testbinary] +
+ [self.options.refbinary]*(self.num_nodes-1))
diff --git a/qa/rpc-tests/util.py b/qa/rpc-tests/util.py
index cf789f48e2..3b4a10e46b 100644
--- a/qa/rpc-tests/util.py
+++ b/qa/rpc-tests/util.py
@@ -88,8 +88,12 @@ def initialize_chain(test_dir):
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
+ if os.getenv("PYTHON_DEBUG", ""):
+ print "initialize_chain: bitcoind started, calling bitcoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("BITCOINCLI", "bitcoin-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
+ if os.getenv("PYTHON_DEBUG", ""):
+ print "initialize_chain: bitcoin-cli -rpcwait getblockcount completed"
devnull.close()
rpcs = []
for i in range(4):
@@ -158,18 +162,24 @@ def _rpchost_to_args(rpchost):
rv += ['-rpcport=' + rpcport]
return rv
-def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None):
+def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a bitcoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
- args = [ os.getenv("BITCOIND", "bitcoind"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
+ if binary is None:
+ binary = os.getenv("BITCOIND", "bitcoind")
+ args = [ binary, "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
+ if os.getenv("PYTHON_DEBUG", ""):
+ print "start_node: bitcoind started, calling bitcoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("BITCOINCLI", "bitcoin-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
+ if os.getenv("PYTHON_DEBUG", ""):
+ print "start_node: calling bitcoin-cli -rpcwait getblockcount returned"
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
if timewait is not None:
@@ -179,12 +189,13 @@ def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None):
proxy.url = url # store URL on proxy for info
return proxy
-def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
+def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple bitcoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
- return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
+ if binary is None: binary = [ None for i in range(num_nodes) ]
+ return [ start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
@@ -272,7 +283,7 @@ def send_zeropri_transaction(from_node, to_node, amount, fee):
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
- then using it's output
+ then using its output
"""
# Create a send-to-self with confirmed inputs:
diff --git a/qa/rpc-tests/wallet.py b/qa/rpc-tests/wallet.py
index 08032fc538..b8965b3662 100755
--- a/qa/rpc-tests/wallet.py
+++ b/qa/rpc-tests/wallet.py
@@ -62,7 +62,7 @@ class WalletTest (BitcoinTestFramework):
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
- # Have node0 mine a block, thus they will collect their own fee.
+ # Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all()
diff --git a/src/addrman.h b/src/addrman.h
index 5badd4ef95..b72dda49d1 100644
--- a/src/addrman.h
+++ b/src/addrman.h
@@ -17,8 +17,8 @@
#include <stdint.h>
#include <vector>
-/**
- * Extended statistics about a CAddress
+/**
+ * Extended statistics about a CAddress
*/
class CAddrInfo : public CAddress
{
@@ -105,14 +105,14 @@ public:
/** Stochastic address manager
*
* Design goals:
- * * Keep the address tables in-memory, and asynchronously dump the entire to able in peers.dat.
+ * * Keep the address tables in-memory, and asynchronously dump the entire table to peers.dat.
* * Make sure no (localized) attacker can fill the entire table with his nodes/addresses.
*
* To that end:
* * Addresses are organized into buckets.
* * Address that have not yet been tried go into 1024 "new" buckets.
* * Based on the address range (/16 for IPv4) of source of the information, 64 buckets are selected at random
- * * The actual bucket is chosen from one of these, based on the range the address itself is located.
+ * * The actual bucket is chosen from one of these, based on the range in which the address itself is located.
* * One single address can occur in up to 8 different buckets, to increase selection chances for addresses that
* are seen frequently. The chance for increasing this multiplicity decreases exponentially.
* * When adding a new address to a full bucket, a randomly chosen entry (with a bias favoring less recently seen
diff --git a/src/base58.h b/src/base58.h
index 8de90046a9..787979c827 100644
--- a/src/base58.h
+++ b/src/base58.h
@@ -6,10 +6,10 @@
/**
* Why base-58 instead of standard base-64 encoding?
* - Don't want 0OIl characters that look the same in some fonts and
- * could be used to create visually identical looking account numbers.
- * - A string with non-alphanumeric characters is not as easily accepted as an account number.
+ * could be used to create visually identical looking data.
+ * - A string with non-alphanumeric characters is not as easily accepted as input.
* - E-mail usually won't line-break if there's no punctuation to break at.
- * - Double-clicking selects the whole number as one word if it's all alphanumeric.
+ * - Double-clicking selects the whole string as one word if it's all alphanumeric.
*/
#ifndef BITCOIN_BASE58_H
#define BITCOIN_BASE58_H
diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp
index 2fa8de6fd8..1269d7a119 100644
--- a/src/bitcoin-cli.cpp
+++ b/src/bitcoin-cli.cpp
@@ -12,8 +12,6 @@
#include <boost/filesystem/operations.hpp>
-#define _(x) std::string(x) /* Keep the _() around in case gettext or such will be used later to translate non-UI */
-
using namespace std;
using namespace json_spirit;
diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp
index f1c1c0ff8b..d024b48020 100644
--- a/src/bitcoin-tx.cpp
+++ b/src/bitcoin-tx.cpp
@@ -11,7 +11,6 @@
#include "primitives/transaction.h"
#include "script/script.h"
#include "script/sign.h"
-#include "ui_interface.h" // for _(...)
#include "univalue/univalue.h"
#include "util.h"
#include "utilmoneystr.h"
@@ -26,7 +25,6 @@ using namespace std;
static bool fCreateBlank;
static map<string,UniValue> registers;
-CClientUIInterface uiInterface;
static bool AppInitRawTx(int argc, char* argv[])
{
diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp
index 2172f4a21b..eeca8655c9 100644
--- a/src/bitcoind.cpp
+++ b/src/bitcoind.cpp
@@ -8,7 +8,6 @@
#include "init.h"
#include "main.h"
#include "noui.h"
-#include "ui_interface.h"
#include "util.h"
#include <boost/algorithm/string/predicate.hpp>
diff --git a/src/bloom.cpp b/src/bloom.cpp
index e60576f4b4..36cba491c4 100644
--- a/src/bloom.cpp
+++ b/src/bloom.cpp
@@ -21,22 +21,33 @@
using namespace std;
CBloomFilter::CBloomFilter(unsigned int nElements, double nFPRate, unsigned int nTweakIn, unsigned char nFlagsIn) :
-/**
- * The ideal size for a bloom filter with a given number of elements and false positive rate is:
- * - nElements * log(fp rate) / ln(2)^2
- * We ignore filter parameters which will create a bloom filter larger than the protocol limits
- */
-vData(min((unsigned int)(-1 / LN2SQUARED * nElements * log(nFPRate)), MAX_BLOOM_FILTER_SIZE * 8) / 8),
-/**
- * The ideal number of hash functions is filter size * ln(2) / number of elements
- * Again, we ignore filter parameters which will create a bloom filter with more hash functions than the protocol limits
- * See https://en.wikipedia.org/wiki/Bloom_filter for an explanation of these formulas
- */
-isFull(false),
-isEmpty(false),
-nHashFuncs(min((unsigned int)(vData.size() * 8 / nElements * LN2), MAX_HASH_FUNCS)),
-nTweak(nTweakIn),
-nFlags(nFlagsIn)
+ /**
+ * The ideal size for a bloom filter with a given number of elements and false positive rate is:
+ * - nElements * log(fp rate) / ln(2)^2
+ * We ignore filter parameters which will create a bloom filter larger than the protocol limits
+ */
+ vData(min((unsigned int)(-1 / LN2SQUARED * nElements * log(nFPRate)), MAX_BLOOM_FILTER_SIZE * 8) / 8),
+ /**
+ * The ideal number of hash functions is filter size * ln(2) / number of elements
+ * Again, we ignore filter parameters which will create a bloom filter with more hash functions than the protocol limits
+ * See https://en.wikipedia.org/wiki/Bloom_filter for an explanation of these formulas
+ */
+ isFull(false),
+ isEmpty(false),
+ nHashFuncs(min((unsigned int)(vData.size() * 8 / nElements * LN2), MAX_HASH_FUNCS)),
+ nTweak(nTweakIn),
+ nFlags(nFlagsIn)
+{
+}
+
+// Private constructor used by CRollingBloomFilter
+CBloomFilter::CBloomFilter(unsigned int nElements, double nFPRate, unsigned int nTweakIn) :
+ vData((unsigned int)(-1 / LN2SQUARED * nElements * log(nFPRate)) / 8),
+ isFull(false),
+ isEmpty(true),
+ nHashFuncs((unsigned int)(vData.size() * 8 / nElements * LN2)),
+ nTweak(nTweakIn),
+ nFlags(BLOOM_UPDATE_NONE)
{
}
@@ -197,3 +208,43 @@ void CBloomFilter::UpdateEmptyFull()
isFull = full;
isEmpty = empty;
}
+
+CRollingBloomFilter::CRollingBloomFilter(unsigned int nElements, double fpRate, unsigned int nTweak) :
+ b1(nElements * 2, fpRate, nTweak), b2(nElements * 2, fpRate, nTweak)
+{
+ // Implemented using two bloom filters of 2 * nElements each.
+ // We fill them up, and clear them, staggered, every nElements
+ // inserted, so at least one always contains the last nElements
+ // inserted.
+ nBloomSize = nElements * 2;
+ nInsertions = 0;
+}
+
+void CRollingBloomFilter::insert(const std::vector<unsigned char>& vKey)
+{
+ if (nInsertions == 0) {
+ b1.clear();
+ } else if (nInsertions == nBloomSize / 2) {
+ b2.clear();
+ }
+ b1.insert(vKey);
+ b2.insert(vKey);
+ if (++nInsertions == nBloomSize) {
+ nInsertions = 0;
+ }
+}
+
+bool CRollingBloomFilter::contains(const std::vector<unsigned char>& vKey) const
+{
+ if (nInsertions < nBloomSize / 2) {
+ return b2.contains(vKey);
+ }
+ return b1.contains(vKey);
+}
+
+void CRollingBloomFilter::clear()
+{
+ b1.clear();
+ b2.clear();
+ nInsertions = 0;
+}
diff --git a/src/bloom.h b/src/bloom.h
index 191ffa19b3..bb17f59c86 100644
--- a/src/bloom.h
+++ b/src/bloom.h
@@ -32,14 +32,14 @@ enum bloomflags
/**
* BloomFilter is a probabilistic filter which SPV clients provide
- * so that we can filter the transactions we sends them.
+ * so that we can filter the transactions we send them.
*
* This allows for significantly more efficient transaction and block downloads.
*
- * Because bloom filters are probabilistic, an SPV node can increase the false-
- * positive rate, making us send them transactions which aren't actually theirs,
+ * Because bloom filters are probabilistic, a SPV node can increase the false-
+ * positive rate, making us send it transactions which aren't actually its,
* allowing clients to trade more bandwidth for more privacy by obfuscating which
- * keys are owned by them.
+ * keys are controlled by them.
*/
class CBloomFilter
{
@@ -53,6 +53,10 @@ private:
unsigned int Hash(unsigned int nHashNum, const std::vector<unsigned char>& vDataToHash) const;
+ // Private constructor for CRollingBloomFilter, no restrictions on size
+ CBloomFilter(unsigned int nElements, double nFPRate, unsigned int nTweak);
+ friend class CRollingBloomFilter;
+
public:
/**
* Creates a new bloom filter which will provide the given fp rate when filled with the given number of elements
@@ -97,4 +101,28 @@ public:
void UpdateEmptyFull();
};
+/**
+ * RollingBloomFilter is a probabilistic "keep track of most recently inserted" set.
+ * Construct it with the number of items to keep track of, and a false-positive rate.
+ *
+ * contains(item) will always return true if item was one of the last N things
+ * insert()'ed ... but may also return true for items that were not inserted.
+ */
+class CRollingBloomFilter
+{
+public:
+ CRollingBloomFilter(unsigned int nElements, double nFPRate, unsigned int nTweak);
+
+ void insert(const std::vector<unsigned char>& vKey);
+ bool contains(const std::vector<unsigned char>& vKey) const;
+
+ void clear();
+
+private:
+ unsigned int nBloomSize;
+ unsigned int nInsertions;
+ CBloomFilter b1, b2;
+};
+
+
#endif // BITCOIN_BLOOM_H
diff --git a/src/chain.h b/src/chain.h
index 02f53cd2f2..01be2d6e5c 100644
--- a/src/chain.h
+++ b/src/chain.h
@@ -74,7 +74,7 @@ enum BlockStatus {
*/
BLOCK_VALID_TRANSACTIONS = 3,
- //! Outputs do not overspend inputs, no double spends, coinbase output ok, immature coinbase spends, BIP30.
+ //! Outputs do not overspend inputs, no double spends, coinbase output ok, no immature coinbase spends, BIP30.
//! Implies all parents are also at least CHAIN.
BLOCK_VALID_CHAIN = 4,
diff --git a/src/chainparams.cpp b/src/chainparams.cpp
index 589c7b5472..1e044ad491 100644
--- a/src/chainparams.cpp
+++ b/src/chainparams.cpp
@@ -5,7 +5,6 @@
#include "chainparams.h"
-#include "random.h"
#include "util.h"
#include "utilstrencodings.h"
@@ -15,35 +14,11 @@
using namespace std;
-struct SeedSpec6 {
- uint8_t addr[16];
- uint16_t port;
-};
-
#include "chainparamsseeds.h"
/**
* Main network
*/
-
-//! Convert the pnSeeds6 array into usable address objects.
-static void convertSeed6(std::vector<CAddress> &vSeedsOut, const SeedSpec6 *data, unsigned int count)
-{
- // It'll only connect to one or two seed nodes because once it connects,
- // it'll get a pile of addresses with newer timestamps.
- // Seed nodes are given a random 'last seen time' of between one and two
- // weeks ago.
- const int64_t nOneWeek = 7*24*60*60;
- for (unsigned int i = 0; i < count; i++)
- {
- struct in6_addr ip;
- memcpy(&ip, data[i].addr, sizeof(ip));
- CAddress addr(CService(ip, data[i].port));
- addr.nTime = GetTime() - GetRand(nOneWeek) - nOneWeek;
- vSeedsOut.push_back(addr);
- }
-}
-
/**
* What makes a good checkpoint block?
* + Is surrounded by blocks with reasonable timestamps
@@ -112,7 +87,7 @@ public:
/**
* The message start string is designed to be unlikely to occur in normal data.
* The characters are rarely used upper ASCII, not valid as UTF-8, and produce
- * a large 4-byte int at any alignment.
+ * a large 32-bit integer with any alignment.
*/
pchMessageStart[0] = 0xf9;
pchMessageStart[1] = 0xbe;
@@ -124,9 +99,10 @@ public:
nPruneAfterHeight = 100000;
/**
- * Build the genesis block. Note that the output of the genesis coinbase cannot
- * be spent as it did not originally exist in the database.
- *
+ * Build the genesis block. Note that the output of its generation
+ * transaction cannot be spent since it did not originally exist in the
+ * database.
+ *
* CBlock(hash=000000000019d6, ver=1, hashPrevBlock=00000000000000, hashMerkleRoot=4a5e1e, nTime=1231006505, nBits=1d00ffff, nNonce=2083236893, vtx=1)
* CTransaction(hash=4a5e1e, ver=1, vin.size=1, vout.size=1, nLockTime=0)
* CTxIn(COutPoint(000000, -1), coinbase 04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73)
@@ -164,7 +140,7 @@ public:
base58Prefixes[EXT_PUBLIC_KEY] = boost::assign::list_of(0x04)(0x88)(0xB2)(0x1E).convert_to_container<std::vector<unsigned char> >();
base58Prefixes[EXT_SECRET_KEY] = boost::assign::list_of(0x04)(0x88)(0xAD)(0xE4).convert_to_container<std::vector<unsigned char> >();
- convertSeed6(vFixedSeeds, pnSeed6_main, ARRAYLEN(pnSeed6_main));
+ vFixedSeeds = std::vector<SeedSpec6>(pnSeed6_main, pnSeed6_main + ARRAYLEN(pnSeed6_main));
fRequireRPCPassword = true;
fMiningRequiresPeers = true;
@@ -220,7 +196,7 @@ public:
base58Prefixes[EXT_PUBLIC_KEY] = boost::assign::list_of(0x04)(0x35)(0x87)(0xCF).convert_to_container<std::vector<unsigned char> >();
base58Prefixes[EXT_SECRET_KEY] = boost::assign::list_of(0x04)(0x35)(0x83)(0x94).convert_to_container<std::vector<unsigned char> >();
- convertSeed6(vFixedSeeds, pnSeed6_test, ARRAYLEN(pnSeed6_test));
+ vFixedSeeds = std::vector<SeedSpec6>(pnSeed6_test, pnSeed6_test + ARRAYLEN(pnSeed6_test));
fRequireRPCPassword = true;
fMiningRequiresPeers = true;
diff --git a/src/chainparams.h b/src/chainparams.h
index 5e974123dc..bfefe242b7 100644
--- a/src/chainparams.h
+++ b/src/chainparams.h
@@ -19,6 +19,12 @@ struct CDNSSeedData {
CDNSSeedData(const std::string &strName, const std::string &strHost) : name(strName), host(strHost) {}
};
+struct SeedSpec6 {
+ uint8_t addr[16];
+ uint16_t port;
+};
+
+
/**
* CChainParams defines various tweakable parameters of a given instance of the
* Bitcoin system. There are three: the main network on which people trade goods
@@ -56,7 +62,7 @@ public:
bool MiningRequiresPeers() const { return fMiningRequiresPeers; }
/** Default value for -checkmempool and -checkblockindex argument */
bool DefaultConsistencyChecks() const { return fDefaultConsistencyChecks; }
- /** Make standard checks */
+ /** Policy: Filter transactions that do not match well-defined patterns */
bool RequireStandard() const { return fRequireStandard; }
int64_t PruneAfterHeight() const { return nPruneAfterHeight; }
/** Make miner stop after a block is found. In RPC, don't return until nGenProcLimit blocks are generated */
@@ -67,7 +73,7 @@ public:
std::string NetworkIDString() const { return strNetworkID; }
const std::vector<CDNSSeedData>& DNSSeeds() const { return vSeeds; }
const std::vector<unsigned char>& Base58Prefix(Base58Type type) const { return base58Prefixes[type]; }
- const std::vector<CAddress>& FixedSeeds() const { return vFixedSeeds; }
+ const std::vector<SeedSpec6>& FixedSeeds() const { return vFixedSeeds; }
virtual const Checkpoints::CCheckpointData& Checkpoints() const = 0;
protected:
CChainParams() {}
@@ -83,7 +89,7 @@ protected:
std::vector<unsigned char> base58Prefixes[MAX_BASE58_TYPES];
std::string strNetworkID;
CBlock genesis;
- std::vector<CAddress> vFixedSeeds;
+ std::vector<SeedSpec6> vFixedSeeds;
bool fRequireRPCPassword;
bool fMiningRequiresPeers;
bool fDefaultConsistencyChecks;
@@ -93,8 +99,8 @@ protected:
};
/**
- * Return the currently selected parameters. This won't change after app startup
- * outside of the unit tests.
+ * Return the currently selected parameters. This won't change after app
+ * startup, except for unit tests.
*/
const CChainParams &Params();
diff --git a/src/chainparamsbase.h b/src/chainparamsbase.h
index 421a3a06ff..4369d0aef7 100644
--- a/src/chainparamsbase.h
+++ b/src/chainparamsbase.h
@@ -34,8 +34,8 @@ protected:
};
/**
- * Return the currently selected parameters. This won't change after app startup
- * outside of the unit tests.
+ * Return the currently selected parameters. This won't change after app
+ * startup, except for unit tests.
*/
const CBaseChainParams& BaseParams();
diff --git a/src/checkpoints.cpp b/src/checkpoints.cpp
index 71579bb309..e3d42c2957 100644
--- a/src/checkpoints.cpp
+++ b/src/checkpoints.cpp
@@ -15,9 +15,10 @@
namespace Checkpoints {
/**
- * How many times we expect transactions after the last checkpoint to
- * be slower. This number is a compromise, as it can't be accurate for
- * every system. When reindexing from a fast disk with a slow CPU, it
+ * How many times slower we expect checking transactions after the last
+ * checkpoint to be (from checking signatures, which is skipped up to the
+ * last checkpoint). This number is a compromise, as it can't be accurate
+ * for every system. When reindexing from a fast disk with a slow CPU, it
* can be up to 20, while when downloading from a slow network with a
* fast multicore CPU, it won't be much higher than 1.
*/
diff --git a/src/checkpoints.h b/src/checkpoints.h
index 29dc5f83a9..b0e5076788 100644
--- a/src/checkpoints.h
+++ b/src/checkpoints.h
@@ -11,7 +11,7 @@
class CBlockIndex;
-/**
+/**
* Block-chain checkpoints are compiled-in sanity checks.
* They are updated every release or three.
*/
diff --git a/src/checkqueue.h b/src/checkqueue.h
index 6f6b97e3a7..20ba25bb41 100644
--- a/src/checkqueue.h
+++ b/src/checkqueue.h
@@ -54,7 +54,7 @@ private:
/**
* Number of verifications that haven't completed yet.
- * This includes elements that are not anymore in queue, but still in
+ * This includes elements that are no longer queued, but still in the
* worker's own batches.
*/
unsigned int nTodo;
@@ -81,7 +81,7 @@ private:
fAllOk &= fOk;
nTodo -= nNow;
if (nTodo == 0 && !fMaster)
- // We processed the last element; inform the master he or she can exit and return the result
+ // We processed the last element; inform the master it can exit and return the result
condMaster.notify_one();
} else {
// first iteration
@@ -136,7 +136,7 @@ public:
Loop();
}
- //! Wait until execution finishes, and return whether all evaluations where successful.
+ //! Wait until execution finishes, and return whether all evaluations were successful.
bool Wait()
{
return Loop(true);
diff --git a/src/init.cpp b/src/init.cpp
index b648a237bf..f4caa4717f 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -52,7 +52,7 @@ bool fFeeEstimatesInitialized = false;
#ifdef WIN32
// Win32 LevelDB doesn't use filedescriptors, and the ones used for
-// accessing block files, don't count towards to fd_set size limit
+// accessing block files don't count towards the fd_set size limit
// anyway.
#define MIN_CORE_FILEDESCRIPTORS 0
#else
@@ -68,7 +68,7 @@ enum BindFlags {
};
static const char* FEE_ESTIMATES_FILENAME="fee_estimates.dat";
-CClientUIInterface uiInterface;
+CClientUIInterface uiInterface; // Declared but not defined in ui_interface.h
//////////////////////////////////////////////////////////////////////////////
//
@@ -334,7 +334,7 @@ strUsage += HelpMessageOpt("-reindex", _("Rebuild block chain index from current
strUsage += HelpMessageOpt("-sendfreetransactions", strprintf(_("Send transactions as zero-fee transactions if possible (default: %u)"), 0));
strUsage += HelpMessageOpt("-spendzeroconfchange", strprintf(_("Spend unconfirmed change when sending transactions (default: %u)"), 1));
strUsage += HelpMessageOpt("-txconfirmtarget=<n>", strprintf(_("If paytxfee is not set, include enough fee so transactions begin confirmation on average within n blocks (default: %u)"), 1));
- strUsage += HelpMessageOpt("-maxtxfee=<amt>", strprintf(_("Maximum total fees to use in a single wallet transaction, setting too low may abort large transactions (default: %s)"),
+ strUsage += HelpMessageOpt("-maxtxfee=<amt>", strprintf(_("Maximum total fees to use in a single wallet transaction; setting this too low may abort large transactions (default: %s)"),
FormatMoney(maxTxFee)));
strUsage += HelpMessageOpt("-upgradewallet", _("Upgrade wallet to latest format") + " " + _("on startup"));
strUsage += HelpMessageOpt("-wallet=<file>", _("Specify wallet file (within data directory)") + " " + strprintf(_("(default: %s)"), "wallet.dat"));
diff --git a/src/main.cpp b/src/main.cpp
index 2c12e08492..7d7e670119 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -142,8 +142,9 @@ namespace {
uint32_t nBlockSequenceId = 1;
/**
- * Sources of received blocks, to be able to send them reject messages or ban
- * them, if processing happens afterwards. Protected by cs_main.
+ * Sources of received blocks, saved to be able to send them reject
+ * messages or ban them when processing happens afterwards. Protected by
+ * cs_main.
*/
map<uint256, NodeId> mapBlockSource;
@@ -389,7 +390,7 @@ void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<CBl
}
// If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
- // of their current tip anymore. Go back enough to fix that.
+ // of its current tip anymore. Go back enough to fix that.
state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
return;
@@ -941,7 +942,7 @@ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransa
// do all inputs exist?
// Note that this does not check for the presence of actual outputs (see the next check for that),
- // only helps filling in pfMissingInputs (to determine missing vs spent).
+ // and only helps with filling in pfMissingInputs (to determine missing vs spent).
BOOST_FOREACH(const CTxIn txin, tx.vin) {
if (!view.HaveCoins(txin.prevout.hash)) {
if (pfMissingInputs)
@@ -1277,8 +1278,8 @@ void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip)
pfork = pfork->pprev;
}
- // We define a condition which we should warn the user about as a fork of at least 7 blocks
- // who's tip is within 72 blocks (+/- 12 hours if no one mines it) of ours
+ // We define a condition where we should warn the user about as a fork of at least 7 blocks
+ // with a tip within 72 blocks (+/- 12 hours if no one mines it) of ours
// We use 7 blocks rather arbitrarily as it represents just under 10% of sustained network
// hash rate operating on the fork.
// or a chain that is entirely longer than ours and invalid (note that this should be detected by both)
@@ -1719,9 +1720,9 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
// See BIP30 and http://r6.ca/blog/20120206T005236Z.html for more information.
// This logic is not necessary for memory pool transactions, as AcceptToMemoryPool
// already refuses previously-known transaction ids entirely.
- // This rule was originally applied all blocks whose timestamp was after March 15, 2012, 0:00 UTC.
+ // This rule was originally applied to all blocks with a timestamp after March 15, 2012, 0:00 UTC.
// Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the
- // two in the chain that violate it. This prevents exploiting the issue against nodes in their
+ // two in the chain that violate it. This prevents exploiting the issue against nodes during their
// initial block download.
bool fEnforceBIP30 = (!pindex->phashBlock) || // Enforce on CreateNewBlock invocations which don't have a hash.
!((pindex->nHeight==91842 && pindex->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
@@ -1984,7 +1985,7 @@ void static UpdateTip(CBlockIndex *pindexNew) {
if (nUpgraded > 100/2)
{
// strMiscWarning is read by GetWarnings(), called by Qt and the JSON-RPC code to warn the user:
- strMiscWarning = _("Warning: This version is obsolete, upgrade required!");
+ strMiscWarning = _("Warning: This version is obsolete; upgrade required!");
CAlert::Notify(strMiscWarning, true);
fWarned = true;
}
@@ -2315,7 +2316,7 @@ bool InvalidateBlock(CValidationState& state, CBlockIndex *pindex) {
}
// The resulting new best tip may not be in setBlockIndexCandidates anymore, so
- // add them again.
+ // add it again.
BlockMap::iterator it = mapBlockIndex.begin();
while (it != mapBlockIndex.end()) {
if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->nChainTx && !setBlockIndexCandidates.value_comp()(it->second, chainActive.Tip())) {
@@ -3675,7 +3676,6 @@ bool static AlreadyHave(const CInv& inv)
return true;
}
-
void static ProcessGetData(CNode* pfrom)
{
std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
@@ -3703,11 +3703,13 @@ void static ProcessGetData(CNode* pfrom)
if (chainActive.Contains(mi->second)) {
send = true;
} else {
+ static const int nOneMonth = 30 * 24 * 60 * 60;
// To prevent fingerprinting attacks, only send blocks outside of the active
- // chain if they are valid, and no more than a month older than the best header
- // chain we know about.
+ // chain if they are valid, and no more than a month older (both in time, and in
+ // best equivalent proof of work) than the best header chain we know about.
send = mi->second->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != NULL) &&
- (mi->second->GetBlockTime() > pindexBestHeader->GetBlockTime() - 30 * 24 * 60 * 60);
+ (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() < nOneMonth) &&
+ (GetBlockProofEquivalentTime(*pindexBestHeader, *mi->second, *pindexBestHeader, Params().GetConsensus()) < nOneMonth);
if (!send) {
LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId());
}
@@ -3732,7 +3734,7 @@ void static ProcessGetData(CNode* pfrom)
pfrom->PushMessage("merkleblock", merkleBlock);
// CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
// This avoids hurting performance by pointlessly requiring a round-trip
- // Note that there is currently no way for a node to request any single transactions we didnt send here -
+ // Note that there is currently no way for a node to request any single transactions we didn't send here -
// they must either disconnect and retry or request the full block.
// Thus, the protocol spec specified allows for us to provide duplicate txn here,
// however we MUST always provide at least what the remote peer needs
@@ -3745,7 +3747,7 @@ void static ProcessGetData(CNode* pfrom)
// no response
}
- // Trigger them to send a getblocks request for the next batch of inventory
+ // Trigger the peer node to send a getblocks request for the next batch of inventory
if (inv.hash == pfrom->hashContinue)
{
// Bypass PushInventory, this must send even if redundant,
@@ -3994,7 +3996,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
{
LOCK(cs_vNodes);
// Use deterministic randomness to send to the same nodes for 24 hours
- // at a time so the setAddrKnowns of the chosen nodes prevent repeats
+ // at a time so the addrKnowns of the chosen nodes prevent repeats
static uint256 hashSalt;
if (hashSalt.IsNull())
hashSalt = GetRandHash();
@@ -4059,7 +4061,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
if (inv.type == MSG_BLOCK) {
UpdateBlockAvailability(pfrom->GetId(), inv.hash);
if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) {
- // First request the headers preceeding the announced block. In the normal fully-synced
+ // First request the headers preceding the announced block. In the normal fully-synced
// case where a new block is announced that succeeds the current tip (no reorganization),
// there are no such headers.
// Secondly, and only when we are close to being synced, we request the announced block directly,
@@ -4141,8 +4143,8 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash()));
if (--nLimit <= 0)
{
- // When this block is requested, we'll send an inv that'll make them
- // getblocks the next batch of inventory.
+ // When this block is requested, we'll send an inv that'll
+ // trigger the peer to getblocks the next batch of inventory.
LogPrint("net", " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
pfrom->hashContinue = pindex->GetBlockHash();
break;
@@ -4379,9 +4381,9 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// This asymmetric behavior for inbound and outbound connections was introduced
// to prevent a fingerprinting attack: an attacker can send specific fake addresses
- // to users' AddrMan and later request them by sending getaddr messages.
- // Making users (which are behind NAT and can only make outgoing connections) ignore
- // getaddr message mitigates the attack.
+ // to users' AddrMan and later request them by sending getaddr messages.
+ // Making nodes which are behind NAT and can only make outgoing connections ignore
+ // the getaddr message mitigates the attack.
else if ((strCommand == "getaddr") && (pfrom->fInbound))
{
pfrom->vAddrToSend.clear();
@@ -4466,7 +4468,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// Nonce mismatches are normal when pings are overlapping
sProblem = "Nonce mismatch";
if (nonce == 0) {
- // This is most likely a bug in another implementation somewhere, cancel this ping
+ // This is most likely a bug in another implementation somewhere; cancel this ping
bPingFinished = true;
sProblem = "Nonce zero";
}
@@ -4475,7 +4477,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
sProblem = "Unsolicited pong without ping";
}
} else {
- // This is most likely a bug in another implementation somewhere, cancel this ping
+ // This is most likely a bug in another implementation somewhere; cancel this ping
bPingFinished = true;
sProblem = "Short payload";
}
@@ -4734,7 +4736,7 @@ bool SendMessages(CNode* pto, bool fSendTrickle)
{
const Consensus::Params& consensusParams = Params().GetConsensus();
{
- // Don't send anything until we get their version message
+ // Don't send anything until we get its version message
if (pto->nVersion == 0)
return true;
@@ -4778,9 +4780,9 @@ bool SendMessages(CNode* pto, bool fSendTrickle)
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
{
- // Periodically clear setAddrKnown to allow refresh broadcasts
+ // Periodically clear addrKnown to allow refresh broadcasts
if (nLastRebroadcast)
- pnode->setAddrKnown.clear();
+ pnode->addrKnown.clear();
// Rebroadcast our address
AdvertizeLocal(pnode);
@@ -4798,9 +4800,9 @@ bool SendMessages(CNode* pto, bool fSendTrickle)
vAddr.reserve(pto->vAddrToSend.size());
BOOST_FOREACH(const CAddress& addr, pto->vAddrToSend)
{
- // returns true if wasn't already contained in the set
- if (pto->setAddrKnown.insert(addr).second)
+ if (!pto->addrKnown.contains(addr.GetKey()))
{
+ pto->addrKnown.insert(addr.GetKey());
vAddr.push_back(addr);
// receiver rejects addr messages larger than 1000
if (vAddr.size() >= 1000)
@@ -4918,7 +4920,7 @@ bool SendMessages(CNode* pto, bool fSendTrickle)
// In case there is a block that has been in flight from this peer for (2 + 0.5 * N) times the block interval
// (with N the number of validated blocks that were in flight at the time it was requested), disconnect due to
// timeout. We compensate for in-flight blocks to prevent killing off peers due to our own downstream link
- // being saturated. We only count validated in-flight blocks so peers can't advertize nonexisting block hashes
+ // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
// to unreasonably increase our timeout.
if (!pto->fDisconnect && state.vBlocksInFlight.size() > 0 && state.vBlocksInFlight.front().nTime < nNow - 500000 * consensusParams.nPowTargetSpacing * (4 + state.vBlocksInFlight.front().nValidatedQueuedBefore)) {
LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", state.vBlocksInFlight.front().hash.ToString(), pto->id);
diff --git a/src/main.h b/src/main.h
index b0aec6662d..38d867675e 100644
--- a/src/main.h
+++ b/src/main.h
@@ -75,7 +75,7 @@ static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
/** Timeout in seconds during which a peer must stall block download progress before being disconnected. */
static const unsigned int BLOCK_STALLING_TIMEOUT = 2;
/** Number of headers sent in one getheaders result. We rely on the assumption that if a peer sends
- * less than this number, we reached their tip. Changing this value is a protocol upgrade. */
+ * less than this number, we reached its tip. Changing this value is a protocol upgrade. */
static const unsigned int MAX_HEADERS_RESULTS = 2000;
/** Size of the "block download window": how far ahead of our current height do we fetch?
* Larger windows tolerate larger download speed differences between peer, but increase the potential
diff --git a/src/merkleblock.cpp b/src/merkleblock.cpp
index dc87d0377a..4d90fd8cd7 100644
--- a/src/merkleblock.cpp
+++ b/src/merkleblock.cpp
@@ -119,8 +119,8 @@ uint256 CPartialMerkleTree::TraverseAndExtract(int height, unsigned int pos, uns
if (pos*2+1 < CalcTreeWidth(height-1)) {
right = TraverseAndExtract(height-1, pos*2+1, nBitsUsed, nHashUsed, vMatch);
if (right == left) {
- // If the left and right branch should never be identical as the transaction
- // hashes covered by them must be unique.
+ // The left and right branches should never be identical, as the transaction
+ // hashes covered by them must each be unique.
fBad = true;
}
} else {
diff --git a/src/merkleblock.h b/src/merkleblock.h
index d90face17c..904c22abc2 100644
--- a/src/merkleblock.h
+++ b/src/merkleblock.h
@@ -104,7 +104,7 @@ public:
}
}
- /** Construct a partial merkle tree from a list of transaction id's, and a mask that selects a subset of them */
+ /** Construct a partial merkle tree from a list of transaction ids, and a mask that selects a subset of them */
CPartialMerkleTree(const std::vector<uint256> &vTxid, const std::vector<bool> &vMatch);
CPartialMerkleTree();
diff --git a/src/mruset.h b/src/mruset.h
index 1969f419cb..398aa173bf 100644
--- a/src/mruset.h
+++ b/src/mruset.h
@@ -1,12 +1,12 @@
-// Copyright (c) 2012 The Bitcoin Core developers
+// Copyright (c) 2012-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_MRUSET_H
#define BITCOIN_MRUSET_H
-#include <deque>
#include <set>
+#include <vector>
#include <utility>
/** STL-like set container that only keeps the most recent N elements. */
@@ -22,11 +22,13 @@ public:
protected:
std::set<T> set;
- std::deque<T> queue;
- size_type nMaxSize;
+ std::vector<iterator> order;
+ size_type first_used;
+ size_type first_unused;
+ const size_type nMaxSize;
public:
- mruset(size_type nMaxSizeIn = 0) { nMaxSize = nMaxSizeIn; }
+ mruset(size_type nMaxSizeIn = 1) : nMaxSize(nMaxSizeIn) { clear(); }
iterator begin() const { return set.begin(); }
iterator end() const { return set.end(); }
size_type size() const { return set.size(); }
@@ -36,7 +38,9 @@ public:
void clear()
{
set.clear();
- queue.clear();
+ order.assign(nMaxSize, set.end());
+ first_used = 0;
+ first_unused = 0;
}
bool inline friend operator==(const mruset<T>& a, const mruset<T>& b) { return a.set == b.set; }
bool inline friend operator==(const mruset<T>& a, const std::set<T>& b) { return a.set == b; }
@@ -45,25 +49,17 @@ public:
{
std::pair<iterator, bool> ret = set.insert(x);
if (ret.second) {
- if (nMaxSize && queue.size() == nMaxSize) {
- set.erase(queue.front());
- queue.pop_front();
+ if (set.size() == nMaxSize + 1) {
+ set.erase(order[first_used]);
+ order[first_used] = set.end();
+ if (++first_used == nMaxSize) first_used = 0;
}
- queue.push_back(x);
+ order[first_unused] = ret.first;
+ if (++first_unused == nMaxSize) first_unused = 0;
}
return ret;
}
size_type max_size() const { return nMaxSize; }
- size_type max_size(size_type s)
- {
- if (s)
- while (queue.size() > s) {
- set.erase(queue.front());
- queue.pop_front();
- }
- nMaxSize = s;
- return nMaxSize;
- }
};
#endif // BITCOIN_MRUSET_H
diff --git a/src/net.cpp b/src/net.cpp
index 45a06a105f..2de04fc574 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -142,6 +142,27 @@ bool GetLocal(CService& addr, const CNetAddr *paddrPeer)
return nBestScore >= 0;
}
+//! Convert the pnSeeds6 array into usable address objects.
+static std::vector<CAddress> convertSeed6(const std::vector<SeedSpec6> &vSeedsIn)
+{
+ // It'll only connect to one or two seed nodes because once it connects,
+ // it'll get a pile of addresses with newer timestamps.
+ // Seed nodes are given a random 'last seen time' of between one and two
+ // weeks ago.
+ const int64_t nOneWeek = 7*24*60*60;
+ std::vector<CAddress> vSeedsOut;
+ vSeedsOut.reserve(vSeedsIn.size());
+ for (std::vector<SeedSpec6>::const_iterator i(vSeedsIn.begin()); i != vSeedsIn.end(); ++i)
+ {
+ struct in6_addr ip;
+ memcpy(&ip, i->addr, sizeof(ip));
+ CAddress addr(CService(ip, i->port));
+ addr.nTime = GetTime() - GetRand(nOneWeek) - nOneWeek;
+ vSeedsOut.push_back(addr);
+ }
+ return vSeedsOut;
+}
+
// get best local address for a particular peer as a CAddress
// Otherwise, return the unroutable 0.0.0.0 but filled in with
// the normal parameters, since the IP may be changed to a useful
@@ -1195,7 +1216,7 @@ void ThreadOpenConnections()
static bool done = false;
if (!done) {
LogPrintf("Adding fixed seed nodes as DNS doesn't seem to be available.\n");
- addrman.Add(Params().FixedSeeds(), CNetAddr("127.0.0.1"));
+ addrman.Add(convertSeed6(Params().FixedSeeds()), CNetAddr("127.0.0.1"));
done = true;
}
}
@@ -1884,7 +1905,10 @@ bool CAddrDB::Read(CAddrMan& addr)
unsigned int ReceiveFloodSize() { return 1000*GetArg("-maxreceivebuffer", 5*1000); }
unsigned int SendBufferSize() { return 1000*GetArg("-maxsendbuffer", 1*1000); }
-CNode::CNode(SOCKET hSocketIn, CAddress addrIn, std::string addrNameIn, bool fInboundIn) : ssSend(SER_NETWORK, INIT_PROTO_VERSION), setAddrKnown(5000)
+CNode::CNode(SOCKET hSocketIn, CAddress addrIn, std::string addrNameIn, bool fInboundIn) :
+ ssSend(SER_NETWORK, INIT_PROTO_VERSION),
+ addrKnown(5000, 0.001, insecure_rand()),
+ setInventoryKnown(SendBufferSize() / 1000)
{
nServices = 0;
hSocket = hSocketIn;
@@ -1913,7 +1937,6 @@ CNode::CNode(SOCKET hSocketIn, CAddress addrIn, std::string addrNameIn, bool fIn
nStartingHeight = -1;
fGetAddr = false;
fRelayTxes = false;
- setInventoryKnown.max_size(SendBufferSize() / 1000);
pfilter = new CBloomFilter();
nPingNonceSent = 0;
nPingUsecStart = 0;
diff --git a/src/net.h b/src/net.h
index 9fc6ce68d0..7c61a2be6c 100644
--- a/src/net.h
+++ b/src/net.h
@@ -271,8 +271,8 @@ public:
bool fDisconnect;
// We use fRelayTxes for two purposes -
// a) it allows us to not relay tx invs before receiving the peer's version message
- // b) the peer may tell us in their version message that we should not relay tx invs
- // until they have initialized their bloom filter.
+ // b) the peer may tell us in its version message that we should not relay tx invs
+ // until it has initialized its bloom filter.
bool fRelayTxes;
CSemaphoreGrant grantOutbound;
CCriticalSection cs_filter;
@@ -300,7 +300,7 @@ public:
// flood relay
std::vector<CAddress> vAddrToSend;
- mruset<CAddress> setAddrKnown;
+ CRollingBloomFilter addrKnown;
bool fGetAddr;
std::set<uint256> setKnown;
@@ -380,7 +380,7 @@ public:
void AddAddressKnown(const CAddress& addr)
{
- setAddrKnown.insert(addr);
+ addrKnown.insert(addr.GetKey());
}
void PushAddress(const CAddress& addr)
@@ -388,7 +388,7 @@ public:
// Known checking here is only to save space from duplicates.
// SendMessages will filter it again for knowns that were added
// after addresses were pushed.
- if (addr.IsValid() && !setAddrKnown.count(addr)) {
+ if (addr.IsValid() && !addrKnown.contains(addr.GetKey())) {
if (vAddrToSend.size() >= MAX_ADDR_TO_SEND) {
vAddrToSend[insecure_rand() % vAddrToSend.size()] = addr;
} else {
diff --git a/src/pow.cpp b/src/pow.cpp
index fc6ed4f3d1..bb53ad204b 100644
--- a/src/pow.cpp
+++ b/src/pow.cpp
@@ -114,3 +114,20 @@ arith_uint256 GetBlockProof(const CBlockIndex& block)
// or ~bnTarget / (nTarget+1) + 1.
return (~bnTarget / (bnTarget + 1)) + 1;
}
+
+int64_t GetBlockProofEquivalentTime(const CBlockIndex& to, const CBlockIndex& from, const CBlockIndex& tip, const Consensus::Params& params)
+{
+ arith_uint256 r;
+ int sign = 1;
+ if (to.nChainWork > from.nChainWork) {
+ r = to.nChainWork - from.nChainWork;
+ } else {
+ r = from.nChainWork - to.nChainWork;
+ sign = -1;
+ }
+ r = r * arith_uint256(params.nPowTargetSpacing) / GetBlockProof(tip);
+ if (r.bits() > 63) {
+ return sign * std::numeric_limits<int64_t>::max();
+ }
+ return sign * r.GetLow64();
+}
diff --git a/src/pow.h b/src/pow.h
index a5d32db178..e864a474cc 100644
--- a/src/pow.h
+++ b/src/pow.h
@@ -22,4 +22,7 @@ unsigned int CalculateNextWorkRequired(const CBlockIndex* pindexLast, int64_t nF
bool CheckProofOfWork(uint256 hash, unsigned int nBits, const Consensus::Params&);
arith_uint256 GetBlockProof(const CBlockIndex& block);
+/** Return the time it would take to redo the work difference between from and to, assuming the current hashrate corresponds to the difficulty at tip, in seconds. */
+int64_t GetBlockProofEquivalentTime(const CBlockIndex& to, const CBlockIndex& from, const CBlockIndex& tip, const Consensus::Params&);
+
#endif // BITCOIN_POW_H
diff --git a/src/qt/askpassphrasedialog.cpp b/src/qt/askpassphrasedialog.cpp
index 229139e65c..441814ff07 100644
--- a/src/qt/askpassphrasedialog.cpp
+++ b/src/qt/askpassphrasedialog.cpp
@@ -62,7 +62,7 @@ AskPassphraseDialog::AskPassphraseDialog(Mode mode, QWidget *parent) :
break;
case ChangePass: // Ask old passphrase + new passphrase x2
setWindowTitle(tr("Change passphrase"));
- ui->warningLabel->setText(tr("Enter the old and new passphrase to the wallet."));
+ ui->warningLabel->setText(tr("Enter the old passphrase and new passphrase to the wallet."));
break;
}
textChanged();
diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp
index 069601ab67..018169cfdc 100644
--- a/src/qt/bitcoin.cpp
+++ b/src/qt/bitcoin.cpp
@@ -532,7 +532,7 @@ int main(int argc, char *argv[])
// Now that QSettings are accessible, initialize translations
QTranslator qtTranslatorBase, qtTranslator, translatorBase, translator;
initTranslations(qtTranslatorBase, qtTranslator, translatorBase, translator);
- uiInterface.Translate.connect(Translate);
+ translationInterface.Translate.connect(Translate);
// Show help message immediately after parsing command-line options (for "-lang") and setting locale,
// but before showing splash screen.
diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp
index 670d54c7e3..70bf894599 100644
--- a/src/qt/bitcoingui.cpp
+++ b/src/qt/bitcoingui.cpp
@@ -684,7 +684,7 @@ void BitcoinGUI::setNumBlocks(int count, const QDateTime& blockDate)
QDateTime currentDate = QDateTime::currentDateTime();
qint64 secs = blockDate.secsTo(currentDate);
- tooltip = tr("Processed %n blocks of transaction history.", "", count);
+ tooltip = tr("Processed %n block(s) of transaction history.", "", count);
// Set icon state: spinning if catching up, tick otherwise
if(secs < 90*60)
diff --git a/src/qt/bitcoinstrings.cpp b/src/qt/bitcoinstrings.cpp
index 7f372debad..aeabfc424d 100644
--- a/src/qt/bitcoinstrings.cpp
+++ b/src/qt/bitcoinstrings.cpp
@@ -93,8 +93,8 @@ QT_TRANSLATE_NOOP("bitcoin-core", ""
"Maximum size of data in data carrier transactions we relay and mine "
"(default: %u)"),
QT_TRANSLATE_NOOP("bitcoin-core", ""
-"Maximum total fees to use in a single wallet transaction, setting too low "
-"may abort large transactions (default: %s)"),
+"Maximum total fees to use in a single wallet transaction; setting this too "
+"low may abort large transactions (default: %s)"),
QT_TRANSLATE_NOOP("bitcoin-core", ""
"Number of seconds to keep misbehaving peers from reconnecting (default: %u)"),
QT_TRANSLATE_NOOP("bitcoin-core", ""
@@ -332,7 +332,7 @@ QT_TRANSLATE_NOOP("bitcoin-core", "Wallet %s resides outside data directory %s")
QT_TRANSLATE_NOOP("bitcoin-core", "Wallet needed to be rewritten: restart Bitcoin Core to complete"),
QT_TRANSLATE_NOOP("bitcoin-core", "Wallet options:"),
QT_TRANSLATE_NOOP("bitcoin-core", "Warning"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Warning: This version is obsolete, upgrade required!"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Warning: This version is obsolete; upgrade required!"),
QT_TRANSLATE_NOOP("bitcoin-core", "Warning: Unsupported argument -benchmark ignored, use -debug=bench."),
QT_TRANSLATE_NOOP("bitcoin-core", "Warning: Unsupported argument -debugnet ignored, use -debug=net."),
QT_TRANSLATE_NOOP("bitcoin-core", "You need to rebuild the database using -reindex to change -txindex"),
diff --git a/src/qt/coincontroldialog.cpp b/src/qt/coincontroldialog.cpp
index 0a60632bfa..7531fbddcb 100644
--- a/src/qt/coincontroldialog.cpp
+++ b/src/qt/coincontroldialog.cpp
@@ -129,11 +129,11 @@ CoinControlDialog::CoinControlDialog(QWidget *parent) :
ui->treeWidget->setColumnWidth(COLUMN_DATE, 110);
ui->treeWidget->setColumnWidth(COLUMN_CONFIRMATIONS, 100);
ui->treeWidget->setColumnWidth(COLUMN_PRIORITY, 100);
- ui->treeWidget->setColumnHidden(COLUMN_TXHASH, true); // store transacton hash in this column, but dont show it
- ui->treeWidget->setColumnHidden(COLUMN_VOUT_INDEX, true); // store vout index in this column, but dont show it
- ui->treeWidget->setColumnHidden(COLUMN_AMOUNT_INT64, true); // store amount int64 in this column, but dont show it
- ui->treeWidget->setColumnHidden(COLUMN_PRIORITY_INT64, true); // store priority int64 in this column, but dont show it
- ui->treeWidget->setColumnHidden(COLUMN_DATE_INT64, true); // store date int64 in this column, but dont show it
+ ui->treeWidget->setColumnHidden(COLUMN_TXHASH, true); // store transacton hash in this column, but don't show it
+ ui->treeWidget->setColumnHidden(COLUMN_VOUT_INDEX, true); // store vout index in this column, but don't show it
+ ui->treeWidget->setColumnHidden(COLUMN_AMOUNT_INT64, true); // store amount int64 in this column, but don't show it
+ ui->treeWidget->setColumnHidden(COLUMN_PRIORITY_INT64, true); // store priority int64 in this column, but don't show it
+ ui->treeWidget->setColumnHidden(COLUMN_DATE_INT64, true); // store date int64 in this column, but don't show it
// default view is sorted by amount desc
sortView(COLUMN_AMOUNT_INT64, Qt::DescendingOrder);
@@ -408,8 +408,8 @@ void CoinControlDialog::viewItemChanged(QTreeWidgetItem* item, int column)
}
// todo: this is a temporary qt5 fix: when clicking a parent node in tree mode, the parent node
- // including all childs are partially selected. But the parent node should be fully selected
- // as well as the childs. Childs should never be partially selected in the first place.
+ // including all children are partially selected. But the parent node should be fully selected
+ // as well as the children. Children should never be partially selected in the first place.
// Please remove this ugly fix, once the bug is solved upstream.
#if QT_VERSION >= 0x050000
else if (column == COLUMN_CHECKBOX && item->childCount() > 0)
@@ -635,15 +635,15 @@ void CoinControlDialog::updateLabels(WalletModel *model, QDialog* dialog)
l7->setStyleSheet((fDust) ? "color:red;" : ""); // Dust = "yes"
// tool tips
- QString toolTip1 = tr("This label turns red, if the transaction size is greater than 1000 bytes.") + "<br /><br />";
+ QString toolTip1 = tr("This label turns red if the transaction size is greater than 1000 bytes.") + "<br /><br />";
toolTip1 += tr("This means a fee of at least %1 per kB is required.").arg(BitcoinUnits::formatWithUnit(nDisplayUnit, CWallet::minTxFee.GetFeePerK())) + "<br /><br />";
toolTip1 += tr("Can vary +/- 1 byte per input.");
QString toolTip2 = tr("Transactions with higher priority are more likely to get included into a block.") + "<br /><br />";
- toolTip2 += tr("This label turns red, if the priority is smaller than \"medium\".") + "<br /><br />";
+ toolTip2 += tr("This label turns red if the priority is smaller than \"medium\".") + "<br /><br />";
toolTip2 += tr("This means a fee of at least %1 per kB is required.").arg(BitcoinUnits::formatWithUnit(nDisplayUnit, CWallet::minTxFee.GetFeePerK()));
- QString toolTip3 = tr("This label turns red, if any recipient receives an amount smaller than %1.").arg(BitcoinUnits::formatWithUnit(nDisplayUnit, ::minRelayTxFee.GetFee(546)));
+ QString toolTip3 = tr("This label turns red if any recipient receives an amount smaller than %1.").arg(BitcoinUnits::formatWithUnit(nDisplayUnit, ::minRelayTxFee.GetFee(546)));
// how many satoshis the estimated fee can vary per byte we guess wrong
double dFeeVary;
diff --git a/src/qt/locale/bitcoin_en.ts b/src/qt/locale/bitcoin_en.ts
index 2555cdbb62..c581197dca 100644
--- a/src/qt/locale/bitcoin_en.ts
+++ b/src/qt/locale/bitcoin_en.ts
@@ -195,12 +195,7 @@
<translation>Change passphrase</translation>
</message>
<message>
- <location line="+1"/>
- <source>Enter the old and new passphrase to the wallet.</source>
- <translation>Enter the old and new passphrase to the wallet.</translation>
- </message>
- <message>
- <location line="+45"/>
+ <location line="+46"/>
<source>Confirm wallet encryption</source>
<translation>Confirm wallet encryption</translation>
</message>
@@ -242,7 +237,12 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+93"/>
+ <location line="+23"/>
+ <source>Enter the old passphrase and new passphrase to the wallet.</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+70"/>
<location line="+7"/>
<location line="+42"/>
<location line="+6"/>
@@ -549,7 +549,15 @@
<translation>No block source available...</translation>
</message>
<message numerus="yes">
- <location line="+35"/>
+ <location line="+9"/>
+ <source>Processed %n block(s) of transaction history.</source>
+ <translation>
+ <numerusform>Processed %n block of transaction history.</numerusform>
+ <numerusform>Processed %n blocks of transaction history.</numerusform>
+ </translation>
+ </message>
+ <message numerus="yes">
+ <location line="+26"/>
<source>%n hour(s)</source>
<translation>
<numerusform>%n hour</numerusform>
@@ -621,16 +629,8 @@
<source>Up to date</source>
<translation>Up to date</translation>
</message>
- <message numerus="yes">
- <location line="-5"/>
- <source>Processed %n blocks of transaction history.</source>
- <translation type="unfinished">
- <numerusform></numerusform>
- <numerusform></numerusform>
- </translation>
- </message>
<message>
- <location line="+49"/>
+ <location line="+44"/>
<source>Catching up...</source>
<translation>Catching up...</translation>
</message>
@@ -912,7 +912,22 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+177"/>
+ <location line="+161"/>
+ <source>This label turns red if the transaction size is greater than 1000 bytes.</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+5"/>
+ <source>This label turns red if the priority is smaller than &quot;medium&quot;.</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+3"/>
+ <source>This label turns red if any recipient receives an amount smaller than %1.</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+8"/>
<source>Can vary +/- %1 satoshi(s) per input.</source>
<translation type="unfinished"></translation>
</message>
@@ -927,12 +942,7 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+16"/>
- <source>This label turns red, if the transaction size is greater than 1000 bytes.</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+1"/>
+ <location line="+17"/>
<location line="+5"/>
<source>This means a fee of at least %1 per kB is required.</source>
<translation type="unfinished"></translation>
@@ -948,17 +958,7 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+1"/>
- <source>This label turns red, if the priority is smaller than &quot;medium&quot;.</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+3"/>
- <source>This label turns red, if any recipient receives an amount smaller than %1.</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+55"/>
+ <location line="+59"/>
<location line="+61"/>
<source>(no label)</source>
<translation type="unfinished">(no label)</translation>
@@ -1437,7 +1437,7 @@
</message>
<message>
<location line="-29"/>
- <source>Client will be shutdown, do you want to proceed?</source>
+ <source>Client will be shut down. Do you want to proceed?</source>
<translation type="unfinished"></translation>
</message>
<message>
@@ -2557,12 +2557,7 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+193"/>
- <source>The recipient address is not valid, please recheck.</source>
- <translation>The recipient address is not valid, please recheck.</translation>
- </message>
- <message>
- <location line="+3"/>
+ <location line="+196"/>
<source>The amount to pay must be larger than 0.</source>
<translation>The amount to pay must be larger than 0.</translation>
</message>
@@ -2577,12 +2572,7 @@
<translation>The total exceeds your balance when the %1 transaction fee is included.</translation>
</message>
<message>
- <location line="+3"/>
- <source>Duplicate address found, can only send to each address once per send operation.</source>
- <translation>Duplicate address found, can only send to each address once per send operation.</translation>
- </message>
- <message>
- <location line="+3"/>
+ <location line="+6"/>
<source>Transaction creation failed!</source>
<translation type="unfinished"></translation>
</message>
@@ -2605,8 +2595,8 @@
<location line="+110"/>
<source>Estimated to begin confirmation within %n block(s).</source>
<translation type="unfinished">
- <numerusform></numerusform>
- <numerusform></numerusform>
+ <numerusform>Estimated to begin confirmation within %n block.</numerusform>
+ <numerusform>Estimated to begin confirmation within %n blocks.</numerusform>
</translation>
</message>
<message>
@@ -2615,7 +2605,17 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+129"/>
+ <location line="-114"/>
+ <source>The recipient address is not valid. Please recheck.</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+12"/>
+ <source>Duplicate address found: addresses should only be used once each.</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+231"/>
<source>Warning: Invalid Bitcoin address</source>
<translation type="unfinished"></translation>
</message>
@@ -2978,7 +2978,7 @@
<context>
<name>TransactionDesc</name>
<message>
- <location filename="../transactiondesc.cpp" line="+35"/>
+ <location filename="../transactiondesc.cpp" line="+34"/>
<source>Open until %1</source>
<translation>Open until %1</translation>
</message>
@@ -3659,7 +3659,12 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+36"/>
+ <location line="+14"/>
+ <source>Maximum total fees to use in a single wallet transaction; setting this too low may abort large transactions (default: %s)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+22"/>
<source>Set the number of script verification threads (%u to %d, 0 = auto, &lt;0 = leave that many cores free, default: %d)</source>
<translation type="unfinished"></translation>
</message>
@@ -3879,7 +3884,12 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+5"/>
+ <location line="+2"/>
+ <source>Warning: This version is obsolete; upgrade required!</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+3"/>
<source>You need to rebuild the database using -reindex to change -txindex</source>
<translation>You need to rebuild the database using -reindex to change -txindex</translation>
</message>
@@ -3964,12 +3974,7 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+3"/>
- <source>Maximum total fees to use in a single wallet transaction, setting too low may abort large transactions (default: %s)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+7"/>
+ <location line="+10"/>
<source>Prune configured below the minimum of %d MB. Please use a higher number.</source>
<translation type="unfinished"></translation>
</message>
@@ -4309,12 +4314,7 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
<translation>Warning</translation>
</message>
<message>
- <location line="+1"/>
- <source>Warning: This version is obsolete, upgrade required!</source>
- <translation>Warning: This version is obsolete, upgrade required!</translation>
- </message>
- <message>
- <location line="+1"/>
+ <location line="+2"/>
<source>Warning: Unsupported argument -benchmark ignored, use -debug=bench.</source>
<translation type="unfinished"></translation>
</message>
diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp
index a9e4b339e4..efb2bf4158 100644
--- a/src/qt/optionsdialog.cpp
+++ b/src/qt/optionsdialog.cpp
@@ -208,7 +208,7 @@ void OptionsDialog::on_resetButton_clicked()
{
// confirmation dialog
QMessageBox::StandardButton btnRetVal = QMessageBox::question(this, tr("Confirm options reset"),
- tr("Client restart required to activate changes.") + "<br><br>" + tr("Client will be shutdown, do you want to proceed?"),
+ tr("Client restart required to activate changes.") + "<br><br>" + tr("Client will be shut down. Do you want to proceed?"),
QMessageBox::Yes | QMessageBox::Cancel, QMessageBox::Cancel);
if(btnRetVal == QMessageBox::Cancel)
diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp
index 0360f160d8..7a33e3567b 100644
--- a/src/qt/sendcoinsdialog.cpp
+++ b/src/qt/sendcoinsdialog.cpp
@@ -505,7 +505,7 @@ void SendCoinsDialog::processSendCoinsReturn(const WalletModel::SendCoinsReturn
switch(sendCoinsReturn.status)
{
case WalletModel::InvalidAddress:
- msgParams.first = tr("The recipient address is not valid, please recheck.");
+ msgParams.first = tr("The recipient address is not valid. Please recheck.");
break;
case WalletModel::InvalidAmount:
msgParams.first = tr("The amount to pay must be larger than 0.");
@@ -517,7 +517,7 @@ void SendCoinsDialog::processSendCoinsReturn(const WalletModel::SendCoinsReturn
msgParams.first = tr("The total exceeds your balance when the %1 transaction fee is included.").arg(msgArg);
break;
case WalletModel::DuplicateAddress:
- msgParams.first = tr("Duplicate address found, can only send to each address once per send operation.");
+ msgParams.first = tr("Duplicate address found: addresses should only be used once each.");
break;
case WalletModel::TransactionCreationFailed:
msgParams.first = tr("Transaction creation failed!");
diff --git a/src/qt/sendcoinsentry.cpp b/src/qt/sendcoinsentry.cpp
index ea35ed1d53..6eec33ffd4 100644
--- a/src/qt/sendcoinsentry.cpp
+++ b/src/qt/sendcoinsentry.cpp
@@ -216,7 +216,7 @@ void SendCoinsEntry::setValue(const SendCoinsRecipient &value)
ui->addAsLabel->clear();
ui->payTo->setText(recipient.address); // this may set a label from addressbook
- if (!recipient.label.isEmpty()) // if a label had been set from the addressbook, dont overwrite with an empty label
+ if (!recipient.label.isEmpty()) // if a label had been set from the addressbook, don't overwrite with an empty label
ui->addAsLabel->setText(recipient.label);
ui->payAmount->setValue(recipient.amount);
}
diff --git a/src/qt/transactiondesc.cpp b/src/qt/transactiondesc.cpp
index 7214249435..4fffd03adf 100644
--- a/src/qt/transactiondesc.cpp
+++ b/src/qt/transactiondesc.cpp
@@ -14,7 +14,6 @@
#include "main.h"
#include "script/script.h"
#include "timedata.h"
-#include "ui_interface.h"
#include "util.h"
#include "wallet/db.h"
#include "wallet/wallet.h"
diff --git a/src/rpcclient.cpp b/src/rpcclient.cpp
index ad676f9edc..4b576b3707 100644
--- a/src/rpcclient.cpp
+++ b/src/rpcclient.cpp
@@ -7,7 +7,6 @@
#include "rpcprotocol.h"
#include "util.h"
-#include "ui_interface.h"
#include <set>
#include <stdint.h>
diff --git a/src/rpcserver.cpp b/src/rpcserver.cpp
index 61dda9125b..12a5c4aef9 100644
--- a/src/rpcserver.cpp
+++ b/src/rpcserver.cpp
@@ -912,8 +912,8 @@ static bool HTTPReq_JSONRPC(AcceptedConnection *conn,
{
LogPrintf("ThreadRPCServer incorrect password attempt from %s\n", conn->peer_address_to_string());
/* Deter brute-forcing
- If this results in a DoS the user really
- shouldn't have their RPC port exposed. */
+ We don't support exposing the RPC port, so this shouldn't result
+ in a DoS. */
MilliSleep(250);
conn->stream() << HTTPError(HTTP_UNAUTHORIZED, false) << std::flush;
diff --git a/src/test/bloom_tests.cpp b/src/test/bloom_tests.cpp
index 73a146f05c..1bda8a7ea1 100644
--- a/src/test/bloom_tests.cpp
+++ b/src/test/bloom_tests.cpp
@@ -8,6 +8,7 @@
#include "clientversion.h"
#include "key.h"
#include "merkleblock.h"
+#include "random.h"
#include "serialize.h"
#include "streams.h"
#include "uint256.h"
@@ -459,4 +460,81 @@ BOOST_AUTO_TEST_CASE(merkle_block_4_test_update_none)
BOOST_CHECK(!filter.contains(COutPoint(uint256S("0x02981fa052f0481dbc5868f4fc2166035a10f27a03cfd2de67326471df5bc041"), 0)));
}
+static std::vector<unsigned char> RandomData()
+{
+ uint256 r = GetRandHash();
+ return std::vector<unsigned char>(r.begin(), r.end());
+}
+
+BOOST_AUTO_TEST_CASE(rolling_bloom)
+{
+ // last-100-entry, 1% false positive:
+ CRollingBloomFilter rb1(100, 0.01, 0);
+
+ // Overfill:
+ static const int DATASIZE=399;
+ std::vector<unsigned char> data[DATASIZE];
+ for (int i = 0; i < DATASIZE; i++) {
+ data[i] = RandomData();
+ rb1.insert(data[i]);
+ }
+ // Last 100 guaranteed to be remembered:
+ for (int i = 299; i < DATASIZE; i++) {
+ BOOST_CHECK(rb1.contains(data[i]));
+ }
+
+ // false positive rate is 1%, so we should get about 100 hits if
+ // testing 10,000 random keys. We get worst-case false positive
+ // behavior when the filter is as full as possible, which is
+ // when we've inserted one minus an integer multiple of nElement*2.
+ unsigned int nHits = 0;
+ for (int i = 0; i < 10000; i++) {
+ if (rb1.contains(RandomData()))
+ ++nHits;
+ }
+ // Run test_bitcoin with --log_level=message to see BOOST_TEST_MESSAGEs:
+ BOOST_TEST_MESSAGE("RollingBloomFilter got " << nHits << " false positives (~100 expected)");
+
+ // Insanely unlikely to get a fp count outside this range:
+ BOOST_CHECK(nHits > 25);
+ BOOST_CHECK(nHits < 175);
+
+ BOOST_CHECK(rb1.contains(data[DATASIZE-1]));
+ rb1.clear();
+ BOOST_CHECK(!rb1.contains(data[DATASIZE-1]));
+
+ // Now roll through data, make sure last 100 entries
+ // are always remembered:
+ for (int i = 0; i < DATASIZE; i++) {
+ if (i >= 100)
+ BOOST_CHECK(rb1.contains(data[i-100]));
+ rb1.insert(data[i]);
+ }
+
+ // Insert 999 more random entries:
+ for (int i = 0; i < 999; i++) {
+ rb1.insert(RandomData());
+ }
+ // Sanity check to make sure the filter isn't just filling up:
+ nHits = 0;
+ for (int i = 0; i < DATASIZE; i++) {
+ if (rb1.contains(data[i]))
+ ++nHits;
+ }
+ // Expect about 5 false positives, more than 100 means
+ // something is definitely broken.
+ BOOST_TEST_MESSAGE("RollingBloomFilter got " << nHits << " false positives (~5 expected)");
+ BOOST_CHECK(nHits < 100);
+
+ // last-1000-entry, 0.01% false positive:
+ CRollingBloomFilter rb2(1000, 0.001, 0);
+ for (int i = 0; i < DATASIZE; i++) {
+ rb2.insert(data[i]);
+ }
+ // ... room for all of them:
+ for (int i = 0; i < DATASIZE; i++) {
+ BOOST_CHECK(rb2.contains(data[i]));
+ }
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/data/script_invalid.json b/src/test/data/script_invalid.json
index 271bc70f73..7afa2abf49 100644
--- a/src/test/data/script_invalid.json
+++ b/src/test/data/script_invalid.json
@@ -141,6 +141,8 @@
["2 2 0 IF LSHIFT ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC", "LSHIFT disabled"],
["2 2 0 IF RSHIFT ELSE 1 ENDIF", "NOP", "P2SH,STRICTENC", "RSHIFT disabled"],
+["", "EQUAL NOT", "P2SH,STRICTENC", "EQUAL must error when there are no stack items"],
+["0", "EQUAL NOT", "P2SH,STRICTENC", "EQUAL must error when there are not 2 stack items"],
["0 1","EQUAL", "P2SH,STRICTENC"],
["1 1 ADD", "0 EQUAL", "P2SH,STRICTENC"],
["11 1 ADD 12 SUB", "11 EQUAL", "P2SH,STRICTENC"],
@@ -368,6 +370,16 @@
["NOP", "HASH160 1", "P2SH,STRICTENC"],
["NOP", "HASH256 1", "P2SH,STRICTENC"],
+["Increase CHECKSIG and CHECKMULTISIG negative test coverage"],
+["", "CHECKSIG NOT", "STRICTENC", "CHECKSIG must error when there are no stack items"],
+["0", "CHECKSIG NOT", "STRICTENC", "CHECKSIG must error when there are not 2 stack items"],
+["", "CHECKMULTISIG NOT", "STRICTENC", "CHECKMULTISIG must error when there are no stack items"],
+["", "-1 CHECKMULTISIG NOT", "STRICTENC", "CHECKMULTISIG must error when the specified number of pubkeys is negative"],
+["", "1 CHECKMULTISIG NOT", "STRICTENC", "CHECKMULTISIG must error when there are not enough pubkeys on the stack"],
+["", "-1 0 CHECKMULTISIG NOT", "STRICTENC", "CHECKMULTISIG must error when the specified number of signatures is negative"],
+["", "1 'pk1' 1 CHECKMULTISIG NOT", "STRICTENC", "CHECKMULTISIG must error when there are not enough signatures on the stack"],
+["", "'dummy' 'sig1' 1 'pk1' 1 CHECKMULTISIG IF 1 ENDIF", "", "CHECKMULTISIG must push false to stack when signature is invalid when NOT in strict enc mode"],
+
["",
"0 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG",
"P2SH,STRICTENC",
@@ -426,7 +438,7 @@
["0x4d 0xFF00 0x111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", "DROP 1", "MINIMALDATA",
"PUSHDATA2 of 255 bytes minimally represented by PUSHDATA1"],
-["0x4f 0x00100000 0x11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", "DROP 1", "MINIMALDATA",
+["0x4e 0x00010000 0x11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", "DROP 1", "MINIMALDATA",
"PUSHDATA4 of 256 bytes minimally represented by PUSHDATA2"],
@@ -780,6 +792,12 @@
"P2SH(P2PK) with non-push scriptSig"
],
[
+ "0 0x47 0x304402205451ce65ad844dbb978b8bdedf5082e33b43cae8279c30f2c74d9e9ee49a94f802203fe95a7ccf74da7a232ee523ef4a53cb4d14bdd16289680cdb97a63819b8f42f01 0x46 0x304402205451ce65ad844dbb978b8bdedf5082e33b43cae8279c30f2c74d9e9ee49a94f802203fe95a7ccf74da7a232ee523ef4a53cb4d14bdd16289680cdb97a63819b8f42f",
+ "2 0x21 0x02a673638cb9587cb68ea08dbef685c6f2d2a751a8b3c6f2a7e9a4999e6e4bfaf5 0x21 0x02a673638cb9587cb68ea08dbef685c6f2d2a751a8b3c6f2a7e9a4999e6e4bfaf5 0x21 0x02a673638cb9587cb68ea08dbef685c6f2d2a751a8b3c6f2a7e9a4999e6e4bfaf5 3 CHECKMULTISIG",
+ "P2SH,STRICTENC",
+ "2-of-3 with one valid and one invalid signature due to parse error, nSigs > validSigs"
+],
+[
"11 0x47 0x304402200a5c6163f07b8d3b013c4d1d6dba25e780b39658d79ba37af7057a3b7f15ffa102201fd9b4eaa9943f734928b99a83592c2e7bf342ea2680f6a2bb705167966b742001",
"0x41 0x0479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8 CHECKSIG",
"CLEANSTACK,P2SH",
diff --git a/src/test/mruset_tests.cpp b/src/test/mruset_tests.cpp
index bd4e9c1d38..2b68f8899e 100644
--- a/src/test/mruset_tests.cpp
+++ b/src/test/mruset_tests.cpp
@@ -17,83 +17,65 @@
using namespace std;
-class mrutester
-{
-private:
- mruset<int> mru;
- std::set<int> set;
-
-public:
- mrutester() { mru.max_size(MAX_SIZE); }
- int size() const { return set.size(); }
-
- void insert(int n)
- {
- mru.insert(n);
- set.insert(n);
- BOOST_CHECK(mru == set);
- }
-};
-
BOOST_FIXTURE_TEST_SUITE(mruset_tests, BasicTestingSetup)
-// Test that an mruset behaves like a set, as long as no more than MAX_SIZE elements are in it
-BOOST_AUTO_TEST_CASE(mruset_like_set)
-{
-
- for (int nTest=0; nTest<NUM_TESTS; nTest++)
- {
- mrutester tester;
- while (tester.size() < MAX_SIZE)
- tester.insert(GetRandInt(2 * MAX_SIZE));
- }
-
-}
-
-// Test that an mruset's size never exceeds its max_size
-BOOST_AUTO_TEST_CASE(mruset_limited_size)
+BOOST_AUTO_TEST_CASE(mruset_test)
{
- for (int nTest=0; nTest<NUM_TESTS; nTest++)
- {
- mruset<int> mru(MAX_SIZE);
- for (int nAction=0; nAction<3*MAX_SIZE; nAction++)
- {
- int n = GetRandInt(2 * MAX_SIZE);
- mru.insert(n);
- BOOST_CHECK(mru.size() <= MAX_SIZE);
+ // The mruset being tested.
+ mruset<int> mru(5000);
+
+ // Run the test 10 times.
+ for (int test = 0; test < 10; test++) {
+ // Reset mru.
+ mru.clear();
+
+ // A deque + set to simulate the mruset.
+ std::deque<int> rep;
+ std::set<int> all;
+
+ // Insert 10000 random integers below 15000.
+ for (int j=0; j<10000; j++) {
+ int add = GetRandInt(15000);
+ mru.insert(add);
+
+ // Add the number to rep/all as well.
+ if (all.count(add) == 0) {
+ all.insert(add);
+ rep.push_back(add);
+ if (all.size() == 5001) {
+ all.erase(rep.front());
+ rep.pop_front();
+ }
+ }
+
+ // Do a full comparison between mru and the simulated mru every 1000 and every 5001 elements.
+ if (j % 1000 == 0 || j % 5001 == 0) {
+ mruset<int> mru2 = mru; // Also try making a copy
+
+ // Check that all elements that should be in there, are in there.
+ BOOST_FOREACH(int x, rep) {
+ BOOST_CHECK(mru.count(x));
+ BOOST_CHECK(mru2.count(x));
+ }
+
+ // Check that all elements that are in there, should be in there.
+ BOOST_FOREACH(int x, mru) {
+ BOOST_CHECK(all.count(x));
+ }
+
+ // Check that all elements that are in there, should be in there.
+ BOOST_FOREACH(int x, mru2) {
+ BOOST_CHECK(all.count(x));
+ }
+
+ for (int t = 0; t < 10; t++) {
+ int r = GetRandInt(15000);
+ BOOST_CHECK(all.count(r) == mru.count(r));
+ BOOST_CHECK(all.count(r) == mru2.count(r));
+ }
+ }
}
}
}
-// 16-bit permutation function
-int static permute(int n)
-{
- // hexadecimals of pi; verified to be linearly independent
- static const int table[16] = {0x243F, 0x6A88, 0x85A3, 0x08D3, 0x1319, 0x8A2E, 0x0370, 0x7344,
- 0xA409, 0x3822, 0x299F, 0x31D0, 0x082E, 0xFA98, 0xEC4E, 0x6C89};
-
- int ret = 0;
- for (int bit=0; bit<16; bit++)
- if (n & (1<<bit))
- ret ^= table[bit];
-
- return ret;
-}
-
-// Test that an mruset acts like a moving window, if no duplicate elements are added
-BOOST_AUTO_TEST_CASE(mruset_window)
-{
- mruset<int> mru(MAX_SIZE);
- for (int n=0; n<10*MAX_SIZE; n++)
- {
- mru.insert(permute(n));
-
- set<int> tester;
- for (int m=max(0,n-MAX_SIZE+1); m<=n; m++)
- tester.insert(permute(m));
-
- BOOST_CHECK(mru == tester);
- }
-}
-
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/pow_tests.cpp b/src/test/pow_tests.cpp
index 4ce1591c35..a436749287 100644
--- a/src/test/pow_tests.cpp
+++ b/src/test/pow_tests.cpp
@@ -69,4 +69,28 @@ BOOST_AUTO_TEST_CASE(get_next_work_upper_limit_actual)
BOOST_CHECK_EQUAL(CalculateNextWorkRequired(&pindexLast, nLastRetargetTime, params), 0x1d00e1fd);
}
+BOOST_AUTO_TEST_CASE(GetBlockProofEquivalentTime_test)
+{
+ SelectParams(CBaseChainParams::MAIN);
+ const Consensus::Params& params = Params().GetConsensus();
+
+ std::vector<CBlockIndex> blocks(10000);
+ for (int i = 0; i < 10000; i++) {
+ blocks[i].pprev = i ? &blocks[i - 1] : NULL;
+ blocks[i].nHeight = i;
+ blocks[i].nTime = 1269211443 + i * params.nPowTargetSpacing;
+ blocks[i].nBits = 0x207fffff; /* target 0x7fffff000... */
+ blocks[i].nChainWork = i ? blocks[i - 1].nChainWork + GetBlockProof(blocks[i - 1]) : arith_uint256(0);
+ }
+
+ for (int j = 0; j < 1000; j++) {
+ CBlockIndex *p1 = &blocks[GetRand(10000)];
+ CBlockIndex *p2 = &blocks[GetRand(10000)];
+ CBlockIndex *p3 = &blocks[GetRand(10000)];
+
+ int64_t tdiff = GetBlockProofEquivalentTime(*p1, *p2, *p3, params);
+ BOOST_CHECK_EQUAL(tdiff, p1->GetBlockTime() - p2->GetBlockTime());
+ }
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/test_bitcoin.cpp b/src/test/test_bitcoin.cpp
index a2cb78c989..4057eccbed 100644
--- a/src/test/test_bitcoin.cpp
+++ b/src/test/test_bitcoin.cpp
@@ -20,7 +20,7 @@
#include <boost/test/unit_test.hpp>
#include <boost/thread.hpp>
-CClientUIInterface uiInterface;
+CClientUIInterface uiInterface; // Declared but not defined in ui_interface.h
CWallet* pwalletMain;
extern bool fPrintToConsole;
diff --git a/src/ui_interface.h b/src/ui_interface.h
index 3f11a1ddab..32a92a4b81 100644
--- a/src/ui_interface.h
+++ b/src/ui_interface.h
@@ -78,9 +78,6 @@ public:
/** Progress message during initialization. */
boost::signals2::signal<void (const std::string &message)> InitMessage;
- /** Translate a message to the native language of the user. */
- boost::signals2::signal<std::string (const char* psz)> Translate;
-
/** Number of network connections changed. */
boost::signals2::signal<void (int newNumConnections)> NotifyNumConnectionsChanged;
@@ -102,14 +99,4 @@ public:
extern CClientUIInterface uiInterface;
-/**
- * Translation function: Call Translate signal on UI interface, which returns a boost::optional result.
- * If no translation slot is registered, nothing is returned, and simply return the input.
- */
-inline std::string _(const char* psz)
-{
- boost::optional<std::string> rv = uiInterface.Translate(psz);
- return rv ? (*rv) : psz;
-}
-
#endif // BITCOIN_UI_INTERFACE_H
diff --git a/src/util.cpp b/src/util.cpp
index 1bb7df7085..c9e8242d47 100644
--- a/src/util.cpp
+++ b/src/util.cpp
@@ -7,7 +7,7 @@
#include "config/bitcoin-config.h"
#endif
-#if (defined(__FreeBSD__) || defined(__OpenBSD__))
+#if (defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__))
#include <pthread.h>
#include <pthread_np.h>
#endif
@@ -109,6 +109,7 @@ string strMiscWarning;
bool fLogTimestamps = false;
bool fLogIPs = false;
volatile bool fReopenDebugLog = false;
+CTranslationInterface translationInterface;
/** Init OpenSSL library multithreading support */
static CCriticalSection** ppmutexOpenSSL;
@@ -712,7 +713,7 @@ void RenameThread(const char* name)
#if defined(PR_SET_NAME)
// Only the first 15 characters are used (16 - NUL terminator)
::prctl(PR_SET_NAME, name, 0, 0, 0);
-#elif (defined(__FreeBSD__) || defined(__OpenBSD__))
+#elif (defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__))
pthread_set_name_np(pthread_self(), name);
#elif defined(MAC_OSX)
diff --git a/src/util.h b/src/util.h
index 9b5a4153dd..483d9d7858 100644
--- a/src/util.h
+++ b/src/util.h
@@ -25,8 +25,17 @@
#include <vector>
#include <boost/filesystem/path.hpp>
+#include <boost/signals2/signal.hpp>
#include <boost/thread/exceptions.hpp>
+/** Signals for translation. */
+class CTranslationInterface
+{
+public:
+ /** Translate a message to the native language of the user. */
+ boost::signals2::signal<std::string (const char* psz)> Translate;
+};
+
extern std::map<std::string, std::string> mapArgs;
extern std::map<std::string, std::vector<std::string> > mapMultiArgs;
extern bool fDebug;
@@ -37,6 +46,17 @@ extern std::string strMiscWarning;
extern bool fLogTimestamps;
extern bool fLogIPs;
extern volatile bool fReopenDebugLog;
+extern CTranslationInterface translationInterface;
+
+/**
+ * Translation function: Call Translate signal on UI interface, which returns a boost::optional result.
+ * If no translation slot is registered, nothing is returned, and simply return the input.
+ */
+inline std::string _(const char* psz)
+{
+ boost::optional<std::string> rv = translationInterface.Translate(psz);
+ return rv ? (*rv) : psz;
+}
void SetupEnvironment();
diff --git a/src/utiltime.cpp b/src/utiltime.cpp
index 8f0dcae29d..d316288999 100644
--- a/src/utiltime.cpp
+++ b/src/utiltime.cpp
@@ -30,13 +30,13 @@ void SetMockTime(int64_t nMockTimeIn)
int64_t GetTimeMillis()
{
- return (boost::posix_time::ptime(boost::posix_time::microsec_clock::universal_time()) -
+ return (boost::posix_time::microsec_clock::universal_time() -
boost::posix_time::ptime(boost::gregorian::date(1970,1,1))).total_milliseconds();
}
int64_t GetTimeMicros()
{
- return (boost::posix_time::ptime(boost::posix_time::microsec_clock::universal_time()) -
+ return (boost::posix_time::microsec_clock::universal_time() -
boost::posix_time::ptime(boost::gregorian::date(1970,1,1))).total_microseconds();
}
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index c31c09d922..dd5240e3c0 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -2330,7 +2330,7 @@ Value listunspent(const Array& params, bool fHelp)
if (pk.IsPayToScriptHash()) {
CTxDestination address;
if (ExtractDestination(pk, address)) {
- const CScriptID& hash = boost::get<const CScriptID&>(address);
+ const CScriptID& hash = boost::get<CScriptID>(address);
CScript redeemScript;
if (pwalletMain->GetCScript(hash, redeemScript))
entry.push_back(Pair("redeemScript", HexStr(redeemScript.begin(), redeemScript.end())));
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index 81a9932f36..cb20998d26 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -35,8 +35,8 @@ bool bSpendZeroConfChange = true;
bool fSendFreeTransactions = false;
bool fPayAtLeastCustomFee = true;
-/**
- * Fees smaller than this (in satoshi) are considered zero fee (for transaction creation)
+/**
+ * Fees smaller than this (in satoshi) are considered zero fee (for transaction creation)
* Override with -mintxfee
*/
CFeeRate CWallet::minTxFee = CFeeRate(1000);
@@ -529,7 +529,7 @@ bool CWallet::EncryptWallet(const SecureString& strWalletPassphrase)
delete pwalletdbEncryption;
}
// We now probably have half of our keys encrypted in memory, and half not...
- // die and let the user reload their unencrypted wallet.
+ // die and let the user reload the unencrypted wallet.
assert(false);
}
@@ -541,7 +541,7 @@ bool CWallet::EncryptWallet(const SecureString& strWalletPassphrase)
if (!pwalletdbEncryption->TxnCommit()) {
delete pwalletdbEncryption;
// We now have keys encrypted in memory, but not on disk...
- // die to avoid confusion and let the user reload their unencrypted wallet.
+ // die to avoid confusion and let the user reload the unencrypted wallet.
assert(false);
}
@@ -1097,10 +1097,13 @@ int CWallet::ScanForWalletTransactions(CBlockIndex* pindexStart, bool fUpdate)
void CWallet::ReacceptWalletTransactions()
{
- // If transcations aren't broadcasted, don't let them into local mempool either
+ // If transactions aren't being broadcasted, don't let them into local mempool either
if (!fBroadcastTransactions)
return;
LOCK2(cs_main, cs_wallet);
+ std::map<int64_t, CWalletTx*> mapSorted;
+
+ // Sort pending wallet transactions based on their initial wallet insertion order
BOOST_FOREACH(PAIRTYPE(const uint256, CWalletTx)& item, mapWallet)
{
const uint256& wtxid = item.first;
@@ -1109,13 +1112,19 @@ void CWallet::ReacceptWalletTransactions()
int nDepth = wtx.GetDepthInMainChain();
- if (!wtx.IsCoinBase() && nDepth < 0)
- {
- // Try to add to memory pool
- LOCK(mempool.cs);
- wtx.AcceptToMemoryPool(false);
+ if (!wtx.IsCoinBase() && nDepth < 0) {
+ mapSorted.insert(std::make_pair(wtx.nOrderPos, &wtx));
}
}
+
+ // Try to add wallet transactions to memory pool
+ BOOST_FOREACH(PAIRTYPE(const int64_t, CWalletTx*)& item, mapSorted)
+ {
+ CWalletTx& wtx = *(item.second);
+
+ LOCK(mempool.cs);
+ wtx.AcceptToMemoryPool(false);
+ }
}
bool CWalletTx::RelayWalletTransaction()