aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--depends/packages/openssl.mk6
-rw-r--r--doc/release-notes.md67
-rw-r--r--doc/release-notes/release-notes-0.9.3.md101
-rwxr-xr-xqa/pull-tester/run-bitcoind-for-test.sh.in2
-rwxr-xr-xqa/rpc-tests/forknotify.py35
-rwxr-xr-xqa/rpc-tests/getblocktemplate.py20
-rwxr-xr-xqa/rpc-tests/getchaintips.py50
-rwxr-xr-xqa/rpc-tests/listtransactions.py50
-rwxr-xr-xqa/rpc-tests/receivedby.py68
-rwxr-xr-xqa/rpc-tests/send.sh2
-rwxr-xr-xqa/rpc-tests/smartfees.py48
-rwxr-xr-xqa/rpc-tests/test_framework.py77
-rw-r--r--qa/rpc-tests/util.py4
-rw-r--r--src/Makefile.am1
-rw-r--r--src/Makefile.qt.include4
-rw-r--r--src/addrman.cpp9
-rw-r--r--src/addrman.h239
-rw-r--r--src/allocators.h4
-rw-r--r--src/bitcoin-cli.cpp6
-rw-r--r--src/bloom.cpp1
-rw-r--r--src/chain.cpp5
-rw-r--r--src/chain.h37
-rw-r--r--src/chainparams.cpp11
-rw-r--r--src/chainparamsbase.cpp25
-rw-r--r--src/chainparamsbase.h9
-rw-r--r--src/core.cpp25
-rw-r--r--src/core.h13
-rw-r--r--src/core_read.cpp3
-rw-r--r--src/core_write.cpp2
-rw-r--r--src/crypter.cpp1
-rw-r--r--src/db.h1
-rw-r--r--src/init.cpp125
-rw-r--r--src/key.cpp33
-rw-r--r--src/key.h8
-rw-r--r--src/keystore.cpp3
-rw-r--r--src/keystore.h1
-rw-r--r--src/leveldb/CONTRIBUTING.md36
-rw-r--r--src/leveldb/Makefile28
-rw-r--r--src/leveldb/README.md138
-rwxr-xr-xsrc/leveldb/build_detect_platform15
-rw-r--r--src/leveldb/db/db_bench.cc3
-rw-r--r--src/leveldb/db/db_impl.cc4
-rw-r--r--src/leveldb/db/db_test.cc2
-rw-r--r--src/leveldb/db/dbformat.h6
-rw-r--r--src/leveldb/db/dumpfile.cc225
-rw-r--r--src/leveldb/db/leveldb_main.cc204
-rw-r--r--src/leveldb/db/log_format.h4
-rw-r--r--src/leveldb/db/log_reader.cc6
-rw-r--r--src/leveldb/db/log_reader.h4
-rw-r--r--src/leveldb/db/log_test.cc2
-rw-r--r--src/leveldb/db/repair.cc2
-rw-r--r--src/leveldb/db/skiplist.h5
-rw-r--r--src/leveldb/db/write_batch_internal.h4
-rw-r--r--src/leveldb/doc/bench/db_bench_tree_db.cc2
-rw-r--r--src/leveldb/doc/impl.html4
-rw-r--r--src/leveldb/doc/log_format.txt6
-rw-r--r--src/leveldb/helpers/memenv/memenv.cc9
-rw-r--r--src/leveldb/include/leveldb/cache.h2
-rw-r--r--src/leveldb/include/leveldb/db.h2
-rw-r--r--src/leveldb/include/leveldb/dumpfile.h25
-rw-r--r--src/leveldb/include/leveldb/env.h2
-rw-r--r--src/leveldb/include/leveldb/iterator.h2
-rw-r--r--src/leveldb/include/leveldb/options.h2
-rw-r--r--src/leveldb/port/atomic_pointer.h21
-rw-r--r--src/leveldb/port/port_posix.h9
-rw-r--r--src/leveldb/port/thread_annotations.h5
-rw-r--r--src/leveldb/table/block.cc2
-rw-r--r--src/leveldb/table/block_builder.h2
-rw-r--r--src/leveldb/table/format.cc2
-rw-r--r--src/leveldb/table/table.cc14
-rw-r--r--src/leveldb/util/bloom.cc2
-rw-r--r--src/leveldb/util/env_posix.cc10
-rw-r--r--src/leveldb/util/hash.cc6
-rw-r--r--src/leveldb/util/hash_test.cc54
-rw-r--r--src/leveldb/util/logging.cc9
-rw-r--r--src/leveldb/util/logging.h4
-rw-r--r--src/leveldbwrapper.h1
-rw-r--r--src/main.cpp871
-rw-r--r--src/main.h55
-rw-r--r--src/miner.cpp3
-rw-r--r--src/net.cpp76
-rw-r--r--src/net.h5
-rw-r--r--src/noui.cpp6
-rw-r--r--src/pow.cpp33
-rw-r--r--src/pow.h2
-rw-r--r--src/protocol.cpp1
-rw-r--r--src/qt/bitcoinamountfield.h4
-rw-r--r--src/qt/bitcoingui.cpp5
-rw-r--r--src/qt/bitcoinstrings.cpp104
-rw-r--r--src/qt/forms/rpcconsole.ui23
-rw-r--r--src/qt/intro.cpp4
-rw-r--r--src/qt/locale/bitcoin_en.ts673
-rw-r--r--src/qt/monitoreddatamapper.cpp39
-rw-r--r--src/qt/monitoreddatamapper.h34
-rw-r--r--src/qt/optionsdialog.cpp4
-rw-r--r--src/qt/optionsdialog.h4
-rw-r--r--src/qt/recentrequeststablemodel.cpp1
-rw-r--r--src/qt/rpcconsole.cpp1
-rw-r--r--src/qt/test/paymentservertests.cpp1
-rw-r--r--src/rpcblockchain.cpp5
-rw-r--r--src/rpcmisc.cpp2
-rw-r--r--src/rpcnet.cpp16
-rw-r--r--src/rpcrawtransaction.cpp2
-rw-r--r--src/rpcserver.cpp2
-rw-r--r--src/rpcwallet.cpp2
-rw-r--r--src/script/compressor.cpp3
-rw-r--r--src/script/compressor.h5
-rw-r--r--src/script/interpreter.cpp1
-rw-r--r--src/script/script.cpp36
-rw-r--r--src/script/script.h78
-rw-r--r--src/script/sign.cpp2
-rw-r--r--src/script/standard.cpp9
-rw-r--r--src/script/standard.h12
-rw-r--r--src/serialize.h661
-rw-r--r--src/streams.h571
-rw-r--r--src/test/DoS_tests.cpp45
-rw-r--r--src/test/alert_tests.cpp1
-rw-r--r--src/test/base58_tests.cpp1
-rw-r--r--src/test/bloom_tests.cpp1
-rw-r--r--src/test/checkblock_tests.cpp2
-rw-r--r--src/test/key_tests.cpp2
-rw-r--r--src/test/miner_tests.cpp2
-rw-r--r--src/test/multisig_tests.cpp42
-rw-r--r--src/test/script_P2SH_tests.cpp40
-rw-r--r--src/test/script_tests.cpp90
-rw-r--r--src/test/serialize_tests.cpp1
-rw-r--r--src/test/sigopcount_tests.cpp10
-rw-r--r--src/test/test_bitcoin.cpp2
-rw-r--r--src/test/transaction_tests.cpp4
-rw-r--r--src/test/util_tests.cpp12
-rw-r--r--src/txdb.cpp2
-rw-r--r--src/txmempool.cpp2
-rw-r--r--src/txmempool.h15
-rw-r--r--src/ui_interface.h3
-rw-r--r--src/util.cpp12
-rw-r--r--src/utilmoneystr.cpp1
-rw-r--r--src/utilstrencodings.cpp8
-rw-r--r--src/version.cpp10
-rw-r--r--src/version.h5
-rw-r--r--src/wallet.cpp2
-rw-r--r--src/wallet.h2
-rw-r--r--src/walletdb.cpp8
143 files changed, 3070 insertions, 2595 deletions
diff --git a/.gitignore b/.gitignore
index 7d00051f23..bafc5919c1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -75,6 +75,7 @@ Bitcoin-Qt.app
# Unit-tests
Makefile.test
bitcoin-qt_test
+src/test/buildenv.py
# Resources cpp
qrc_*.cpp
@@ -101,3 +102,5 @@ qa/pull-tester/run-bitcoind-for-test.sh
qa/pull-tester/build-tests.sh
!src/leveldb*/Makefile
+
+/doc/doxygen/
diff --git a/depends/packages/openssl.mk b/depends/packages/openssl.mk
index 3ccdaf6f2f..70b0b8d39f 100644
--- a/depends/packages/openssl.mk
+++ b/depends/packages/openssl.mk
@@ -1,14 +1,14 @@
package=openssl
-$(package)_version=1.0.1i
+$(package)_version=1.0.1j
$(package)_download_path=https://www.openssl.org/source
$(package)_file_name=$(package)-$($(package)_version).tar.gz
-$(package)_sha256_hash=3c179f46ca77069a6a0bac70212a9b3b838b2f66129cb52d568837fc79d8fcc7
+$(package)_sha256_hash=1b60ca8789ba6f03e8ef20da2293b8dc131c39d83814e775069f02d26354edf3
define $(package)_set_vars
$(package)_config_env=AR="$($(package)_ar)" RANLIB="$($(package)_ranlib)" CC="$($(package)_cc)"
$(package)_config_opts=--prefix=$(host_prefix) --openssldir=$(host_prefix)/etc/openssl no-zlib no-shared no-dso
$(package)_config_opts+=no-krb5 no-camellia no-capieng no-cast no-cms no-dtls1 no-gost no-gmp no-heartbeats no-idea no-jpake no-md2
-$(package)_config_opts+=no-mdc2 no-rc5 no-rdrand no-rfc3779 no-rsax no-sctp no-seed no-sha0 no-static_engine no-whirlpool no-rc2 no-rc4 no-ssl3
+$(package)_config_opts+=no-mdc2 no-rc5 no-rdrand no-rfc3779 no-rsax no-sctp no-seed no-sha0 no-static_engine no-whirlpool no-rc2 no-rc4 no-ssl2 no-ssl3
$(package)_config_opts+=$($(package)_cflags) $($(package)_cppflags)
$(package)_config_opts_x86_64_linux=-fPIC linux-x86_64
$(package)_config_opts_arm_linux=-fPIC linux-generic32
diff --git a/doc/release-notes.md b/doc/release-notes.md
index 967a39a0e7..169ad71a0f 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -1,6 +1,27 @@
(note: this is a temporary file, to be added-to by anybody, and moved to
release-notes at release time)
+Block file backwards-compatibility warning
+===========================================
+
+Because release 0.10.0 makes use of headers-first synchronization and parallel
+block download, the block files and databases are not backwards-compatible
+with older versions of Bitcoin Core:
+
+* Blocks will be stored on disk out of order (in the order they are
+received, really), which makes it incompatible with some tools or
+other programs. Reindexing using earlier versions will also not work
+anymore as a result of this.
+
+* The block index database will now hold headers for which no block is
+stored on disk, which earlier versions won't support.
+
+If you want to be able to downgrade smoothly, make a backup of your entire data
+directory. Without this your node will need start syncing (or importing from
+bootstrap.dat) anew afterwards.
+
+This does not affect wallet forward or backward compatibility.
+
Transaction fee changes
=======================
@@ -13,29 +34,53 @@ confirmation times.
Prior releases used hard-coded fees (and priorities), and would
sometimes create transactions that took a very long time to confirm.
+Statistics used to estimate fees and priorities are saved in the
+data directory in the `fee_estimates.dat` file just before
+program shutdown, and are read in at startup.
New Command Line Options
-========================
+---------------------------
--txconfirmtarget=n : create transactions that have enough fees (or priority)
+- `-txconfirmtarget=n` : create transactions that have enough fees (or priority)
so they are likely to confirm within n blocks (default: 1). This setting
is over-ridden by the -paytxfee option.
New RPC methods
-===============
+----------------
-Fee/Priority estimation
------------------------
-
-estimatefee nblocks : Returns approximate fee-per-1,000-bytes needed for
+- `estimatefee nblocks` : Returns approximate fee-per-1,000-bytes needed for
a transaction to be confirmed within nblocks. Returns -1 if not enough
transactions have been observed to compute a good estimate.
-estimatepriority nblocks : Returns approximate priority needed for
+- `estimatepriority nblocks` : Returns approximate priority needed for
a zero-fee transaction to confirm within nblocks. Returns -1 if not
enough free transactions have been observed to compute a good
estimate.
-Statistics used to estimate fees and priorities are saved in the
-data directory in the 'fee_estimates.dat' file just before
-program shutdown, and are read in at startup.
+RPC access control changes
+==========================================
+
+Subnet matching for the purpose of access control is now done
+by matching the binary network address, instead of with string wildcard matching.
+For the user this means that `-rpcallowip` takes a subnet specification, which can be
+
+- a single IP address (e.g. `1.2.3.4` or `fe80::0012:3456:789a:bcde`)
+- a network/CIDR (e.g. `1.2.3.0/24` or `fe80::0000/64`)
+- a network/netmask (e.g. `1.2.3.4/255.255.255.0` or `fe80::0012:3456:789a:bcde/ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff`)
+
+An arbitrary number of `-rpcallow` arguments can be given. An incoming connection will be accepted if its origin address
+matches one of them.
+
+For example:
+
+| 0.9.x and before | 0.10.x |
+|--------------------------------------------|---------------------------------------|
+| `-rpcallowip=192.168.1.1` | `-rpcallowip=192.168.1.1` (unchanged) |
+| `-rpcallowip=192.168.1.*` | `-rpcallowip=192.168.1.0/24` |
+| `-rpcallowip=192.168.*` | `-rpcallowip=192.168.0.0/16` |
+| `-rpcallowip=*` (dangerous!) | `-rpcallowip=::/0` |
+
+Using wildcards will result in the rule being rejected with the following error in debug.log:
+
+ Error: Invalid -rpcallowip subnet specification: *. Valid are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24).
+
diff --git a/doc/release-notes/release-notes-0.9.3.md b/doc/release-notes/release-notes-0.9.3.md
new file mode 100644
index 0000000000..0765a360b2
--- /dev/null
+++ b/doc/release-notes/release-notes-0.9.3.md
@@ -0,0 +1,101 @@
+Bitcoin Core version 0.9.3 is now available from:
+
+ https://bitcoin.org/bin/0.9.3/
+
+This is a new minor version release, bringing only bug fixes and updated
+translations. Upgrading to this release is recommended.
+
+Please report bugs using the issue tracker at github:
+
+ https://github.com/bitcoin/bitcoin/issues
+
+Upgrading and downgrading
+==========================
+
+How to Upgrade
+--------------
+
+If you are running an older version, shut it down. Wait until it has completely
+shut down (which might take a few minutes for older versions), then run the
+installer (on Windows) or just copy over /Applications/Bitcoin-Qt (on Mac) or
+bitcoind/bitcoin-qt (on Linux).
+
+If you are upgrading from version 0.7.2 or earlier, the first time you run
+0.9.3 your blockchain files will be re-indexed, which will take anywhere from
+30 minutes to several hours, depending on the speed of your machine.
+
+Downgrading warnings
+--------------------
+
+The 'chainstate' for this release is not always compatible with previous
+releases, so if you run 0.9.x and then decide to switch back to a
+0.8.x release you might get a blockchain validation error when starting the
+old release (due to 'pruned outputs' being omitted from the index of
+unspent transaction outputs).
+
+Running the old release with the -reindex option will rebuild the chainstate
+data structures and correct the problem.
+
+Also, the first time you run a 0.8.x release on a 0.9 wallet it will rescan
+the blockchain for missing spent coins, which will take a long time (tens
+of minutes on a typical machine).
+
+0.9.3 Release notes
+=======================
+
+RPC:
+- Avoid a segfault on getblock if it can't read a block from disk
+- Add paranoid return value checks in base58
+
+Protocol and network code:
+- Don't poll showmyip.com, it doesn't exist anymore
+- Add a way to limit deserialized string lengths and use it
+- Add a new checkpoint at block 295,000
+- Increase IsStandard() scriptSig length
+- Avoid querying DNS seeds, if we have open connections
+- Remove a useless millisleep in socket handler
+- Stricter memory limits on CNode
+- Better orphan transaction handling
+- Add `-maxorphantx=<n>` and `-maxorphanblocks=<n>` options for control over the maximum orphan transactions and blocks
+
+Wallet:
+- Check redeemScript size does not exceed 520 byte limit
+- Ignore (and warn about) too-long redeemScripts while loading wallet
+
+GUI:
+- fix 'opens in testnet mode when presented with a BIP-72 link with no fallback'
+- AvailableCoins: acquire cs_main mutex
+- Fix unicode character display on MacOSX
+
+Miscellaneous:
+- key.cpp: fail with a friendlier message on missing ssl EC support
+- Remove bignum dependency for scripts
+- Upgrade OpenSSL to 1.0.1i (see https://www.openssl.org/news/secadv_20140806.txt - just to be sure, no critical issues for Bitcoin Core)
+- Upgrade miniupnpc to 1.9.20140701
+- Fix boost detection in build system on some platforms
+
+Credits
+--------
+
+Thanks to everyone who contributed to this release:
+
+- Andrew Poelstra
+- Cory Fields
+- Gavin Andresen
+- Jeff Garzik
+- Johnathan Corgan
+- Julian Haight
+- Michael Ford
+- Pavel Vasin
+- Peter Todd
+- phantomcircuit
+- Pieter Wuille
+- Rose Toomey
+- Ruben Dario Ponticelli
+- shshshsh
+- Trevin Hofmann
+- Warren Togami
+- Wladimir J. van der Laan
+- Zak Wilcox
+
+As well as everyone that helped translating on [Transifex](https://www.transifex.com/projects/p/bitcoin/).
diff --git a/qa/pull-tester/run-bitcoind-for-test.sh.in b/qa/pull-tester/run-bitcoind-for-test.sh.in
index 210fc3c42f..15363d09a6 100755
--- a/qa/pull-tester/run-bitcoind-for-test.sh.in
+++ b/qa/pull-tester/run-bitcoind-for-test.sh.in
@@ -10,7 +10,7 @@ touch "$DATADIR/regtest/debug.log"
tail -q -n 1 -F "$DATADIR/regtest/debug.log" | grep -m 1 -q "Done loading" &
WAITER=$!
PORT=`expr 10000 + $$ % 55536`
-"@abs_top_builddir@/src/bitcoind@EXEEXT@" -connect=0.0.0.0 -datadir="$DATADIR" -rpcuser=user -rpcpassword=pass -listen -keypool=3 -debug -debug=net -logtimestamps -port=$PORT -whitelist=127.0.0.1 -regtest -rpcport=`expr $PORT + 1` &
+"@abs_top_builddir@/src/bitcoind@EXEEXT@" -connect=0.0.0.0 -datadir="$DATADIR" -rpcuser=user -rpcpassword=pass -listen -keypool=3 -debug -debug=net -logtimestamps -checkmempool=0 -port=$PORT -whitelist=127.0.0.1 -regtest -rpcport=`expr $PORT + 1` &
BITCOIND=$!
#Install a watchdog.
diff --git a/qa/rpc-tests/forknotify.py b/qa/rpc-tests/forknotify.py
index a482f7cc5a..23bfb74175 100755
--- a/qa/rpc-tests/forknotify.py
+++ b/qa/rpc-tests/forknotify.py
@@ -17,31 +17,30 @@ class ForkNotifyTest(BitcoinTestFramework):
alert_filename = None # Set by setup_network
- def setup_network(self, test_dir):
- nodes = []
- self.alert_filename = os.path.join(test_dir, "alert.txt")
+ def setup_network(self):
+ self.nodes = []
+ self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
with open(self.alert_filename, 'w') as f:
pass # Just open then close to create zero-length file
- nodes.append(start_node(0, test_dir,
+ self.nodes.append(start_node(0, self.options.tmpdir,
["-blockversion=2", "-alertnotify=echo %s >> '" + self.alert_filename + "'"]))
# Node1 mines block.version=211 blocks
- nodes.append(start_node(1, test_dir,
+ self.nodes.append(start_node(1, self.options.tmpdir,
["-blockversion=211"]))
- connect_nodes(nodes[1], 0)
+ connect_nodes(self.nodes[1], 0)
- sync_blocks(nodes)
- return nodes
-
+ self.is_network_split = False
+ self.sync_all()
- def run_test(self, nodes):
+ def run_test(self):
# Mine 51 up-version blocks
- nodes[1].setgenerate(True, 51)
- sync_blocks(nodes)
+ self.nodes[1].setgenerate(True, 51)
+ self.sync_all()
# -alertnotify should trigger on the 51'st,
# but mine and sync another to give
# -alertnotify time to write
- nodes[1].setgenerate(True, 1)
- sync_blocks(nodes)
+ self.nodes[1].setgenerate(True, 1)
+ self.sync_all()
with open(self.alert_filename, 'r') as f:
alert_text = f.read()
@@ -50,10 +49,10 @@ class ForkNotifyTest(BitcoinTestFramework):
raise AssertionError("-alertnotify did not warn of up-version blocks")
# Mine more up-version blocks, should not get more alerts:
- nodes[1].setgenerate(True, 1)
- sync_blocks(nodes)
- nodes[1].setgenerate(True, 1)
- sync_blocks(nodes)
+ self.nodes[1].setgenerate(True, 1)
+ self.sync_all()
+ self.nodes[1].setgenerate(True, 1)
+ self.sync_all()
with open(self.alert_filename, 'r') as f:
alert_text2 = f.read()
diff --git a/qa/rpc-tests/getblocktemplate.py b/qa/rpc-tests/getblocktemplate.py
index 8d97719ec3..5ae5d09601 100755
--- a/qa/rpc-tests/getblocktemplate.py
+++ b/qa/rpc-tests/getblocktemplate.py
@@ -51,40 +51,40 @@ class GetBlockTemplateTest(BitcoinTestFramework):
Test longpolling with getblocktemplate.
'''
- def run_test(self, nodes):
+ def run_test(self):
print "Warning: this test will take about 70 seconds in the best case. Be patient."
- nodes[0].setgenerate(True, 10)
- templat = nodes[0].getblocktemplate()
+ self.nodes[0].setgenerate(True, 10)
+ templat = self.nodes[0].getblocktemplate()
longpollid = templat['longpollid']
# longpollid should not change between successive invocations if nothing else happens
- templat2 = nodes[0].getblocktemplate()
+ templat2 = self.nodes[0].getblocktemplate()
assert(templat2['longpollid'] == longpollid)
# Test 1: test that the longpolling wait if we do nothing
- thr = LongpollThread(nodes[0])
+ thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert(thr.is_alive())
# Test 2: test that longpoll will terminate if another node generates a block
- nodes[1].setgenerate(True, 1) # generate a block on another node
+ self.nodes[1].setgenerate(True, 1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 3: test that longpoll will terminate if we generate a block ourselves
- thr = LongpollThread(nodes[0])
+ thr = LongpollThread(self.nodes[0])
thr.start()
- nodes[0].setgenerate(True, 1) # generate a block on another node
+ self.nodes[0].setgenerate(True, 1) # generate a block on another node
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
- thr = LongpollThread(nodes[0])
+ thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
- (txid, txhex, fee) = random_transaction(nodes, Decimal("1.1"), Decimal("0.0"), Decimal("0.001"), 20)
+ (txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), Decimal("0.0"), Decimal("0.001"), 20)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert(not thr.is_alive())
diff --git a/qa/rpc-tests/getchaintips.py b/qa/rpc-tests/getchaintips.py
index a83c499743..842fcad2b2 100755
--- a/qa/rpc-tests/getchaintips.py
+++ b/qa/rpc-tests/getchaintips.py
@@ -3,22 +3,52 @@
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-# Exercise the getchaintips API.
-
-# Since the test framework does not generate orphan blocks, we can
-# unfortunately not check for them!
+# Exercise the getchaintips API. We introduce a network split, work
+# on chains of different lengths, and join the network together again.
+# This gives us two tips, verify that it works.
from test_framework import BitcoinTestFramework
from util import assert_equal
class GetChainTipsTest (BitcoinTestFramework):
- def run_test (self, nodes):
- res = nodes[0].getchaintips ()
- assert_equal (len (res), 1)
- res = res[0]
- assert_equal (res['branchlen'], 0)
- assert_equal (res['height'], 200)
+ def run_test (self):
+ BitcoinTestFramework.run_test (self)
+
+ tips = self.nodes[0].getchaintips ()
+ assert_equal (len (tips), 1)
+ assert_equal (tips[0]['branchlen'], 0)
+ assert_equal (tips[0]['height'], 200)
+
+ # Split the network and build two chains of different lengths.
+ self.split_network ()
+ self.nodes[0].setgenerate (True, 10);
+ self.nodes[2].setgenerate (True, 20);
+ self.sync_all ()
+
+ tips = self.nodes[1].getchaintips ()
+ assert_equal (len (tips), 1)
+ shortTip = tips[0]
+ assert_equal (shortTip['branchlen'], 0)
+ assert_equal (shortTip['height'], 210)
+
+ tips = self.nodes[3].getchaintips ()
+ assert_equal (len (tips), 1)
+ longTip = tips[0]
+ assert_equal (longTip['branchlen'], 0)
+ assert_equal (longTip['height'], 220)
+
+ # Join the network halves and check that we now have two tips
+ # (at least at the nodes that previously had the short chain).
+ self.join_network ()
+
+ tips = self.nodes[0].getchaintips ()
+ assert_equal (len (tips), 2)
+ assert_equal (tips[0], longTip)
+
+ assert_equal (tips[1]['branchlen'], 10)
+ tips[1]['branchlen'] = 0;
+ assert_equal (tips[1], shortTip)
if __name__ == '__main__':
GetChainTipsTest ().main ()
diff --git a/qa/rpc-tests/listtransactions.py b/qa/rpc-tests/listtransactions.py
index 50385b4372..6102052a6f 100755
--- a/qa/rpc-tests/listtransactions.py
+++ b/qa/rpc-tests/listtransactions.py
@@ -33,62 +33,64 @@ def check_array_result(object_array, to_match, expected):
class ListTransactionsTest(BitcoinTestFramework):
- def run_test(self, nodes):
+ def run_test(self):
# Simple send, 0 to 1:
- txid = nodes[0].sendtoaddress(nodes[1].getnewaddress(), 0.1)
- sync_mempools(nodes)
- check_array_result(nodes[0].listtransactions(),
+ txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
+ self.sync_all()
+ check_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
- check_array_result(nodes[1].listtransactions(),
+ check_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
- nodes[0].setgenerate(True, 1)
- sync_blocks(nodes)
- check_array_result(nodes[0].listtransactions(),
+ self.nodes[0].setgenerate(True, 1)
+ self.sync_all()
+ check_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
- check_array_result(nodes[1].listtransactions(),
+ check_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
- txid = nodes[0].sendtoaddress(nodes[0].getnewaddress(), 0.2)
- check_array_result(nodes[0].listtransactions(),
+ txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
+ check_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
- check_array_result(nodes[0].listtransactions(),
+ check_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
- send_to = { nodes[0].getnewaddress() : 0.11, nodes[1].getnewaddress() : 0.22,
- nodes[0].getaccountaddress("from1") : 0.33, nodes[1].getaccountaddress("toself") : 0.44 }
- txid = nodes[1].sendmany("", send_to)
- sync_mempools(nodes)
- check_array_result(nodes[1].listtransactions(),
+ send_to = { self.nodes[0].getnewaddress() : 0.11,
+ self.nodes[1].getnewaddress() : 0.22,
+ self.nodes[0].getaccountaddress("from1") : 0.33,
+ self.nodes[1].getaccountaddress("toself") : 0.44 }
+ txid = self.nodes[1].sendmany("", send_to)
+ self.sync_all()
+ check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
- check_array_result(nodes[0].listtransactions(),
+ check_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
- check_array_result(nodes[1].listtransactions(),
+ check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
- check_array_result(nodes[1].listtransactions(),
+ check_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
- check_array_result(nodes[1].listtransactions(),
+ check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
- check_array_result(nodes[0].listtransactions(),
+ check_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
- check_array_result(nodes[1].listtransactions(),
+ check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
- check_array_result(nodes[1].listtransactions(),
+ check_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
diff --git a/qa/rpc-tests/receivedby.py b/qa/rpc-tests/receivedby.py
index 61f5e0452b..7170255242 100755
--- a/qa/rpc-tests/receivedby.py
+++ b/qa/rpc-tests/receivedby.py
@@ -54,36 +54,36 @@ def check_array_result(object_array, to_match, expected, should_not_find = False
class ReceivedByTest(BitcoinTestFramework):
- def run_test(self, nodes):
+ def run_test(self):
'''
listreceivedbyaddress Test
'''
# Send from node 0 to 1
- addr = nodes[1].getnewaddress()
- txid = nodes[0].sendtoaddress(addr, 0.1)
- sync_mempools(nodes)
+ addr = self.nodes[1].getnewaddress()
+ txid = self.nodes[0].sendtoaddress(addr, 0.1)
+ self.sync_all()
#Check not listed in listreceivedbyaddress because has 0 confirmations
- check_array_result(nodes[1].listreceivedbyaddress(),
+ check_array_result(self.nodes[1].listreceivedbyaddress(),
{"address":addr},
{ },
True)
#Bury Tx under 10 block so it will be returned by listreceivedbyaddress
- nodes[1].setgenerate(True, 10)
- sync_blocks(nodes)
- check_array_result(nodes[1].listreceivedbyaddress(),
+ self.nodes[1].setgenerate(True, 10)
+ self.sync_all()
+ check_array_result(self.nodes[1].listreceivedbyaddress(),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence < 10
- check_array_result(nodes[1].listreceivedbyaddress(5),
+ check_array_result(self.nodes[1].listreceivedbyaddress(5),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence > 10, should not find Tx
- check_array_result(nodes[1].listreceivedbyaddress(11),{"address":addr},{ },True)
+ check_array_result(self.nodes[1].listreceivedbyaddress(11),{"address":addr},{ },True)
#Empty Tx
- addr = nodes[1].getnewaddress()
- check_array_result(nodes[1].listreceivedbyaddress(0,True),
+ addr = self.nodes[1].getnewaddress()
+ check_array_result(self.nodes[1].listreceivedbyaddress(0,True),
{"address":addr},
{"address":addr, "account":"", "amount":0, "confirmations":0, "txids":[]})
@@ -91,24 +91,24 @@ class ReceivedByTest(BitcoinTestFramework):
getreceivedbyaddress Test
'''
# Send from node 0 to 1
- addr = nodes[1].getnewaddress()
- txid = nodes[0].sendtoaddress(addr, 0.1)
- sync_mempools(nodes)
+ addr = self.nodes[1].getnewaddress()
+ txid = self.nodes[0].sendtoaddress(addr, 0.1)
+ self.sync_all()
#Check balance is 0 because of 0 confirmations
- balance = nodes[1].getreceivedbyaddress(addr)
+ balance = self.nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Check balance is 0.1
- balance = nodes[1].getreceivedbyaddress(addr,0)
+ balance = self.nodes[1].getreceivedbyaddress(addr,0)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress
- nodes[1].setgenerate(True, 10)
- sync_blocks(nodes)
- balance = nodes[1].getreceivedbyaddress(addr)
+ self.nodes[1].setgenerate(True, 10)
+ self.sync_all()
+ balance = self.nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
@@ -116,40 +116,40 @@ class ReceivedByTest(BitcoinTestFramework):
listreceivedbyaccount + getreceivedbyaccount Test
'''
#set pre-state
- addrArr = nodes[1].getnewaddress()
- account = nodes[1].getaccount(addrArr)
- received_by_account_json = get_sub_array_from_array(nodes[1].listreceivedbyaccount(),{"account":account})
+ addrArr = self.nodes[1].getnewaddress()
+ account = self.nodes[1].getaccount(addrArr)
+ received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(),{"account":account})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
- balance_by_account = rec_by_accountArr = nodes[1].getreceivedbyaccount(account)
+ balance_by_account = rec_by_accountArr = self.nodes[1].getreceivedbyaccount(account)
- txid = nodes[0].sendtoaddress(addr, 0.1)
+ txid = self.nodes[0].sendtoaddress(addr, 0.1)
# listreceivedbyaccount should return received_by_account_json because of 0 confirmations
- check_array_result(nodes[1].listreceivedbyaccount(),
+ check_array_result(self.nodes[1].listreceivedbyaccount(),
{"account":account},
received_by_account_json)
# getreceivedbyaddress should return same balance because of 0 confirmations
- balance = nodes[1].getreceivedbyaccount(account)
+ balance = self.nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account:
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
- nodes[1].setgenerate(True, 10)
- sync_blocks(nodes)
+ self.nodes[1].setgenerate(True, 10)
+ self.sync_all()
# listreceivedbyaccount should return updated account balance
- check_array_result(nodes[1].listreceivedbyaccount(),
+ check_array_result(self.nodes[1].listreceivedbyaccount(),
{"account":account},
{"account":received_by_account_json["account"], "amount":(received_by_account_json["amount"] + Decimal("0.1"))})
# getreceivedbyaddress should return updates balance
- balance = nodes[1].getreceivedbyaccount(account)
+ balance = self.nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account + Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
#Create a new account named "mynewaccount" that has a 0 balance
- nodes[1].getaccountaddress("mynewaccount")
- received_by_account_json = get_sub_array_from_array(nodes[1].listreceivedbyaccount(0,True),{"account":"mynewaccount"})
+ self.nodes[1].getaccountaddress("mynewaccount")
+ received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(0,True),{"account":"mynewaccount"})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
@@ -158,7 +158,7 @@ class ReceivedByTest(BitcoinTestFramework):
raise AssertionError("Wrong balance returned by listreceivedbyaccount, %0.2f"%(received_by_account_json["amount"]))
# Test getreceivedbyaccount for 0 amount accounts
- balance = nodes[1].getreceivedbyaccount("mynewaccount")
+ balance = self.nodes[1].getreceivedbyaccount("mynewaccount")
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
diff --git a/qa/rpc-tests/send.sh b/qa/rpc-tests/send.sh
index 8c0f114590..37367865c1 100755
--- a/qa/rpc-tests/send.sh
+++ b/qa/rpc-tests/send.sh
@@ -16,7 +16,7 @@ fi
if [ $1 = "-STOP" ]; then
if [ -s ${PIDFILE} ]; then
- kill -s ${SIGNAL} $(<${PIDFILE})
+ kill -s ${SIGNAL} $(<$PIDFILE 2>/dev/null) 2>/dev/null
fi
exit 0
fi
diff --git a/qa/rpc-tests/smartfees.py b/qa/rpc-tests/smartfees.py
index 352a1de2d0..065bdb01c3 100755
--- a/qa/rpc-tests/smartfees.py
+++ b/qa/rpc-tests/smartfees.py
@@ -10,48 +10,48 @@ from util import *
class EstimateFeeTest(BitcoinTestFramework):
- def setup_network(self, test_dir):
- nodes = []
- nodes.append(start_node(0, test_dir,
+ def setup_network(self):
+ self.nodes = []
+ self.nodes.append(start_node(0, self.options.tmpdir,
["-debug=mempool", "-debug=estimatefee"]))
# Node1 mines small-but-not-tiny blocks, and allows free transactions.
# NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes,
# so blockmaxsize of 2,000 is really just 1,000 bytes (room enough for
# 6 or 7 transactions)
- nodes.append(start_node(1, test_dir,
+ self.nodes.append(start_node(1, self.options.tmpdir,
["-blockprioritysize=1500", "-blockmaxsize=2000",
"-debug=mempool", "-debug=estimatefee"]))
- connect_nodes(nodes[1], 0)
+ connect_nodes(self.nodes[1], 0)
# Node2 is a stingy miner, that
# produces very small blocks (room for only 3 or so transactions)
node2args = [ "-blockprioritysize=0", "-blockmaxsize=1500",
"-debug=mempool", "-debug=estimatefee"]
- nodes.append(start_node(2, test_dir, node2args))
- connect_nodes(nodes[2], 0)
+ self.nodes.append(start_node(2, self.options.tmpdir, node2args))
+ connect_nodes(self.nodes[2], 0)
- sync_blocks(nodes)
- return nodes
+ self.is_network_split = False
+ self.sync_all()
- def run_test(self, nodes):
+ def run_test(self):
# Prime the memory pool with pairs of transactions
# (high-priority, random fee and zero-priority, random fee)
min_fee = Decimal("0.001")
fees_per_kb = [];
for i in range(12):
- (txid, txhex, fee) = random_zeropri_transaction(nodes, Decimal("1.1"),
+ (txid, txhex, fee) = random_zeropri_transaction(self.nodes, Decimal("1.1"),
min_fee, min_fee, 20)
tx_kbytes = (len(txhex)/2)/1000.0
fees_per_kb.append(float(fee)/tx_kbytes)
# Mine blocks with node2 until the memory pool clears:
- count_start = nodes[2].getblockcount()
- while len(nodes[2].getrawmempool()) > 0:
- nodes[2].setgenerate(True, 1)
- sync_blocks(nodes)
+ count_start = self.nodes[2].getblockcount()
+ while len(self.nodes[2].getrawmempool()) > 0:
+ self.nodes[2].setgenerate(True, 1)
+ self.sync_all()
- all_estimates = [ nodes[0].estimatefee(i) for i in range(1,20) ]
+ all_estimates = [ self.nodes[0].estimatefee(i) for i in range(1,20) ]
print("Fee estimates, super-stingy miner: "+str([str(e) for e in all_estimates]))
# Estimates should be within the bounds of what transactions fees actually were:
@@ -63,25 +63,25 @@ class EstimateFeeTest(BitcoinTestFramework):
# Generate transactions while mining 30 more blocks, this time with node1:
for i in range(30):
for j in range(random.randrange(6-4,6+4)):
- (txid, txhex, fee) = random_transaction(nodes, Decimal("1.1"),
+ (txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"),
Decimal("0.0"), min_fee, 20)
tx_kbytes = (len(txhex)/2)/1000.0
fees_per_kb.append(float(fee)/tx_kbytes)
- nodes[1].setgenerate(True, 1)
- sync_blocks(nodes)
+ self.nodes[1].setgenerate(True, 1)
+ self.sync_all()
- all_estimates = [ nodes[0].estimatefee(i) for i in range(1,20) ]
+ all_estimates = [ self.nodes[0].estimatefee(i) for i in range(1,20) ]
print("Fee estimates, more generous miner: "+str([ str(e) for e in all_estimates]))
for e in filter(lambda x: x >= 0, all_estimates):
if float(e)+delta < min(fees_per_kb) or float(e)-delta > max(fees_per_kb):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"%(float(e), min_fee_kb, max_fee_kb))
# Finish by mining a normal-sized block:
- while len(nodes[0].getrawmempool()) > 0:
- nodes[0].setgenerate(True, 1)
- sync_blocks(nodes)
+ while len(self.nodes[0].getrawmempool()) > 0:
+ self.nodes[0].setgenerate(True, 1)
+ self.sync_all()
- final_estimates = [ nodes[0].estimatefee(i) for i in range(1,20) ]
+ final_estimates = [ self.nodes[0].estimatefee(i) for i in range(1,20) ]
print("Final fee estimates: "+str([ str(e) for e in final_estimates]))
diff --git a/qa/rpc-tests/test_framework.py b/qa/rpc-tests/test_framework.py
index 5a18556655..8c8453054d 100755
--- a/qa/rpc-tests/test_framework.py
+++ b/qa/rpc-tests/test_framework.py
@@ -21,22 +21,64 @@ from util import *
class BitcoinTestFramework(object):
# These may be over-ridden by subclasses:
- def run_test(self, nodes):
+ def run_test(self):
+ for node in self.nodes:
assert_equal(node.getblockcount(), 200)
assert_equal(node.getbalance(), 25*50)
def add_options(self, parser):
pass
- def setup_chain(self, tmp_directory):
- print("Initializing test directory "+tmp_directory)
- initialize_chain(tmp_directory)
-
- def setup_network(self, tmp_directory):
- nodes = start_nodes(2, tmp_directory)
- connect_nodes(nodes[1], 0)
- sync_blocks(nodes)
- return nodes
+ def setup_chain(self):
+ print("Initializing test directory "+self.options.tmpdir)
+ initialize_chain(self.options.tmpdir)
+
+ def setup_network(self, split = False):
+ self.nodes = start_nodes(4, self.options.tmpdir)
+
+ # Connect the nodes as a "chain". This allows us
+ # to split the network between nodes 1 and 2 to get
+ # two halves that can work on competing chains.
+
+ # If we joined network halves, connect the nodes from the joint
+ # on outward. This ensures that chains are properly reorganised.
+ if not split:
+ connect_nodes_bi(self.nodes, 1, 2)
+ sync_blocks(self.nodes[1:2])
+ sync_mempools(self.nodes[1:2])
+
+ connect_nodes_bi(self.nodes, 0, 1)
+ connect_nodes_bi(self.nodes, 2, 3)
+ self.is_network_split = split
+ self.sync_all()
+
+ def split_network(self):
+ """
+ Split the network of four nodes into nodes 0/1 and 2/3.
+ """
+ assert not self.is_network_split
+ stop_nodes(self.nodes)
+ wait_bitcoinds()
+ self.setup_network(True)
+
+ def sync_all(self):
+ if self.is_network_split:
+ sync_blocks(self.nodes[:1])
+ sync_blocks(self.nodes[2:])
+ sync_mempools(self.nodes[:1])
+ sync_mempools(self.nodes[2:])
+ else:
+ sync_blocks(self.nodes)
+ sync_mempools(self.nodes)
+
+ def join_network(self):
+ """
+ Join the (previously split) network halves together.
+ """
+ assert self.is_network_split
+ stop_nodes(self.nodes)
+ wait_bitcoinds()
+ self.setup_network(False)
def main(self):
import optparse
@@ -48,23 +90,28 @@ class BitcoinTestFramework(object):
help="Source directory containing bitcoind/bitcoin-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
+ parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
+ help="Print out all RPC calls as they are made")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
+ if self.options.trace_rpc:
+ import logging
+ logging.basicConfig(level=logging.DEBUG)
+
os.environ['PATH'] = self.options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
- nodes = []
try:
if not os.path.isdir(self.options.tmpdir):
os.makedirs(self.options.tmpdir)
- self.setup_chain(self.options.tmpdir)
+ self.setup_chain()
- nodes = self.setup_network(self.options.tmpdir)
+ self.setup_network()
- self.run_test(nodes)
+ self.run_test()
success = True
@@ -80,7 +127,7 @@ class BitcoinTestFramework(object):
if not self.options.nocleanup:
print("Cleaning up")
- stop_nodes(nodes)
+ stop_nodes(self.nodes)
wait_bitcoinds()
shutil.rmtree(self.options.tmpdir)
diff --git a/qa/rpc-tests/util.py b/qa/rpc-tests/util.py
index 87baadc5d6..036ac577e6 100644
--- a/qa/rpc-tests/util.py
+++ b/qa/rpc-tests/util.py
@@ -194,6 +194,10 @@ def connect_nodes(from_connection, node_num):
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
+def connect_nodes_bi(nodes, a, b):
+ connect_nodes(nodes[a], b)
+ connect_nodes(nodes[b], a)
+
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
diff --git a/src/Makefile.am b/src/Makefile.am
index 42ecda155c..42f325e09d 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -107,6 +107,7 @@ BITCOIN_CORE_H = \
script/sign.h \
script/standard.h \
serialize.h \
+ streams.h \
sync.h \
threadsafety.h \
timedata.h \
diff --git a/src/Makefile.qt.include b/src/Makefile.qt.include
index 1c69773647..ac6d60df03 100644
--- a/src/Makefile.qt.include
+++ b/src/Makefile.qt.include
@@ -110,7 +110,6 @@ QT_MOC_CPP = \
qt/moc_intro.cpp \
qt/moc_macdockiconhandler.cpp \
qt/moc_macnotificationhandler.cpp \
- qt/moc_monitoreddatamapper.cpp \
qt/moc_notificator.cpp \
qt/moc_openuridialog.cpp \
qt/moc_optionsdialog.cpp \
@@ -177,7 +176,6 @@ BITCOIN_QT_H = \
qt/intro.h \
qt/macdockiconhandler.h \
qt/macnotificationhandler.h \
- qt/monitoreddatamapper.h \
qt/networkstyle.h \
qt/notificator.h \
qt/openuridialog.h \
@@ -269,7 +267,6 @@ BITCOIN_QT_CPP = \
qt/csvmodelwriter.cpp \
qt/guiutil.cpp \
qt/intro.cpp \
- qt/monitoreddatamapper.cpp \
qt/networkstyle.cpp \
qt/notificator.cpp \
qt/optionsdialog.cpp \
@@ -368,6 +365,7 @@ if USE_LIBSECP256K1
qt_bitcoin_qt_LDADD += secp256k1/libsecp256k1.la
endif
qt_bitcoin_qt_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(QT_LDFLAGS)
+qt_bitcoin_qt_LIBTOOLFLAGS = --tag CXX
#locale/foo.ts -> locale/foo.qm
QT_QM=$(QT_TS:.ts=.qm)
diff --git a/src/addrman.cpp b/src/addrman.cpp
index 7b674a66e7..1982db52ae 100644
--- a/src/addrman.cpp
+++ b/src/addrman.cpp
@@ -1,11 +1,12 @@
// Copyright (c) 2012 Pieter Wuille
-// Distributed under the MIT/X11 software license, see the accompanying
+// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "addrman.h"
#include "hash.h"
#include "serialize.h"
+#include "streams.h"
using namespace std;
@@ -39,7 +40,7 @@ int CAddrInfo::GetNewBucket(const std::vector<unsigned char>& nKey, const CNetAd
bool CAddrInfo::IsTerrible(int64_t nNow) const
{
- if (nLastTry && nLastTry >= nNow - 60) // never remove things tried the last minute
+ if (nLastTry && nLastTry >= nNow - 60) // never remove things tried in the last minute
return false;
if (nTime > nNow + 10 * 60) // came in a flying DeLorean
@@ -131,7 +132,7 @@ int CAddrMan::SelectTried(int nKBucket)
{
std::vector<int>& vTried = vvTried[nKBucket];
- // random shuffle the first few elements (using the entire list)
+ // randomly shuffle the first few elements (using the entire list)
// find the least recently tried among them
int64_t nOldest = -1;
int nOldestPos = -1;
@@ -211,7 +212,7 @@ void CAddrMan::MakeTried(CAddrInfo& info, int nId, int nOrigin)
assert(info.nRefCount == 0);
- // what tried bucket to move the entry to
+ // which tried bucket to move the entry to
int nKBucket = info.GetTriedBucket(nKey);
std::vector<int>& vTried = vvTried[nKBucket];
diff --git a/src/addrman.h b/src/addrman.h
index 5fd698f18a..914086fc76 100644
--- a/src/addrman.h
+++ b/src/addrman.h
@@ -1,5 +1,5 @@
// Copyright (c) 2012 Pieter Wuille
-// Distributed under the MIT/X11 software license, see the accompanying
+// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef _BITCOIN_ADDRMAN
@@ -17,29 +17,31 @@
#include <stdint.h>
#include <vector>
-/** Extended statistics about a CAddress */
+/**
+ * Extended statistics about a CAddress
+ */
class CAddrInfo : public CAddress
{
private:
- // where knowledge about this address first came from
+ //! where knowledge about this address first came from
CNetAddr source;
- // last successful connection by us
+ //! last successful connection by us
int64_t nLastSuccess;
- // last try whatsoever by us:
+ //! last try whatsoever by us:
// int64_t CAddress::nLastTry
- // connection attempts since last successful attempt
+ //! connection attempts since last successful attempt
int nAttempts;
- // reference count in new sets (memory only)
+ //! reference count in new sets (memory only)
int nRefCount;
- // in tried set? (memory only)
+ //! in tried set? (memory only)
bool fInTried;
- // position in vRandom
+ //! position in vRandom
int nRandomPos;
friend class CAddrMan;
@@ -76,200 +78,205 @@ public:
Init();
}
- // Calculate in which "tried" bucket this entry belongs
+ //! Calculate in which "tried" bucket this entry belongs
int GetTriedBucket(const std::vector<unsigned char> &nKey) const;
- // Calculate in which "new" bucket this entry belongs, given a certain source
+ //! Calculate in which "new" bucket this entry belongs, given a certain source
int GetNewBucket(const std::vector<unsigned char> &nKey, const CNetAddr& src) const;
- // Calculate in which "new" bucket this entry belongs, using its default source
+ //! Calculate in which "new" bucket this entry belongs, using its default source
int GetNewBucket(const std::vector<unsigned char> &nKey) const
{
return GetNewBucket(nKey, source);
}
- // Determine whether the statistics about this entry are bad enough so that it can just be deleted
+ //! Determine whether the statistics about this entry are bad enough so that it can just be deleted
bool IsTerrible(int64_t nNow = GetAdjustedTime()) const;
- // Calculate the relative chance this entry should be given when selecting nodes to connect to
+ //! Calculate the relative chance this entry should be given when selecting nodes to connect to
double GetChance(int64_t nNow = GetAdjustedTime()) const;
};
-// Stochastic address manager
-//
-// Design goals:
-// * Only keep a limited number of addresses around, so that addr.dat and memory requirements do not grow without bound.
-// * Keep the address tables in-memory, and asynchronously dump the entire to able in addr.dat.
-// * Make sure no (localized) attacker can fill the entire table with his nodes/addresses.
-//
-// To that end:
-// * Addresses are organized into buckets.
-// * Address that have not yet been tried go into 256 "new" buckets.
-// * Based on the address range (/16 for IPv4) of source of the information, 32 buckets are selected at random
-// * The actual bucket is chosen from one of these, based on the range the address itself is located.
-// * One single address can occur in up to 4 different buckets, to increase selection chances for addresses that
-// are seen frequently. The chance for increasing this multiplicity decreases exponentially.
-// * When adding a new address to a full bucket, a randomly chosen entry (with a bias favoring less recently seen
-// ones) is removed from it first.
-// * Addresses of nodes that are known to be accessible go into 64 "tried" buckets.
-// * Each address range selects at random 4 of these buckets.
-// * The actual bucket is chosen from one of these, based on the full address.
-// * When adding a new good address to a full bucket, a randomly chosen entry (with a bias favoring less recently
-// tried ones) is evicted from it, back to the "new" buckets.
-// * Bucket selection is based on cryptographic hashing, using a randomly-generated 256-bit key, which should not
-// be observable by adversaries.
-// * Several indexes are kept for high performance. Defining DEBUG_ADDRMAN will introduce frequent (and expensive)
-// consistency checks for the entire data structure.
-
-// total number of buckets for tried addresses
+/** Stochastic address manager
+ *
+ * Design goals:
+ * * Keep the address tables in-memory, and asynchronously dump the entire to able in peers.dat.
+ * * Make sure no (localized) attacker can fill the entire table with his nodes/addresses.
+ *
+ * To that end:
+ * * Addresses are organized into buckets.
+ * * Address that have not yet been tried go into 256 "new" buckets.
+ * * Based on the address range (/16 for IPv4) of source of the information, 32 buckets are selected at random
+ * * The actual bucket is chosen from one of these, based on the range the address itself is located.
+ * * One single address can occur in up to 4 different buckets, to increase selection chances for addresses that
+ * are seen frequently. The chance for increasing this multiplicity decreases exponentially.
+ * * When adding a new address to a full bucket, a randomly chosen entry (with a bias favoring less recently seen
+ * ones) is removed from it first.
+ * * Addresses of nodes that are known to be accessible go into 64 "tried" buckets.
+ * * Each address range selects at random 4 of these buckets.
+ * * The actual bucket is chosen from one of these, based on the full address.
+ * * When adding a new good address to a full bucket, a randomly chosen entry (with a bias favoring less recently
+ * tried ones) is evicted from it, back to the "new" buckets.
+ * * Bucket selection is based on cryptographic hashing, using a randomly-generated 256-bit key, which should not
+ * be observable by adversaries.
+ * * Several indexes are kept for high performance. Defining DEBUG_ADDRMAN will introduce frequent (and expensive)
+ * consistency checks for the entire data structure.
+ */
+
+//! total number of buckets for tried addresses
#define ADDRMAN_TRIED_BUCKET_COUNT 64
-// maximum allowed number of entries in buckets for tried addresses
+//! maximum allowed number of entries in buckets for tried addresses
#define ADDRMAN_TRIED_BUCKET_SIZE 64
-// total number of buckets for new addresses
+//! total number of buckets for new addresses
#define ADDRMAN_NEW_BUCKET_COUNT 256
-// maximum allowed number of entries in buckets for new addresses
+//! maximum allowed number of entries in buckets for new addresses
#define ADDRMAN_NEW_BUCKET_SIZE 64
-// over how many buckets entries with tried addresses from a single group (/16 for IPv4) are spread
+//! over how many buckets entries with tried addresses from a single group (/16 for IPv4) are spread
#define ADDRMAN_TRIED_BUCKETS_PER_GROUP 4
-// over how many buckets entries with new addresses originating from a single group are spread
+//! over how many buckets entries with new addresses originating from a single group are spread
#define ADDRMAN_NEW_BUCKETS_PER_SOURCE_GROUP 32
-// in how many buckets for entries with new addresses a single address may occur
+//! in how many buckets for entries with new addresses a single address may occur
#define ADDRMAN_NEW_BUCKETS_PER_ADDRESS 4
-// how many entries in a bucket with tried addresses are inspected, when selecting one to replace
+//! how many entries in a bucket with tried addresses are inspected, when selecting one to replace
#define ADDRMAN_TRIED_ENTRIES_INSPECT_ON_EVICT 4
-// how old addresses can maximally be
+//! how old addresses can maximally be
#define ADDRMAN_HORIZON_DAYS 30
-// after how many failed attempts we give up on a new node
+//! after how many failed attempts we give up on a new node
#define ADDRMAN_RETRIES 3
-// how many successive failures are allowed ...
+//! how many successive failures are allowed ...
#define ADDRMAN_MAX_FAILURES 10
-// ... in at least this many days
+//! ... in at least this many days
#define ADDRMAN_MIN_FAIL_DAYS 7
-// the maximum percentage of nodes to return in a getaddr call
+//! the maximum percentage of nodes to return in a getaddr call
#define ADDRMAN_GETADDR_MAX_PCT 23
-// the maximum number of nodes to return in a getaddr call
+//! the maximum number of nodes to return in a getaddr call
#define ADDRMAN_GETADDR_MAX 2500
-/** Stochastical (IP) address manager */
+/**
+ * Stochastical (IP) address manager
+ */
class CAddrMan
{
private:
- // critical section to protect the inner data structures
+ //! critical section to protect the inner data structures
mutable CCriticalSection cs;
- // secret key to randomize bucket select with
+ //! secret key to randomize bucket select with
std::vector<unsigned char> nKey;
- // last used nId
+ //! last used nId
int nIdCount;
- // table with information about all nIds
+ //! table with information about all nIds
std::map<int, CAddrInfo> mapInfo;
- // find an nId based on its network address
+ //! find an nId based on its network address
std::map<CNetAddr, int> mapAddr;
- // randomly-ordered vector of all nIds
+ //! randomly-ordered vector of all nIds
std::vector<int> vRandom;
// number of "tried" entries
int nTried;
- // list of "tried" buckets
+ //! list of "tried" buckets
std::vector<std::vector<int> > vvTried;
- // number of (unique) "new" entries
+ //! number of (unique) "new" entries
int nNew;
- // list of "new" buckets
+ //! list of "new" buckets
std::vector<std::set<int> > vvNew;
protected:
- // Find an entry.
+ //! Find an entry.
CAddrInfo* Find(const CNetAddr& addr, int *pnId = NULL);
- // find an entry, creating it if necessary.
- // nTime and nServices of found node is updated, if necessary.
+ //! find an entry, creating it if necessary.
+ //! nTime and nServices of the found node are updated, if necessary.
CAddrInfo* Create(const CAddress &addr, const CNetAddr &addrSource, int *pnId = NULL);
- // Swap two elements in vRandom.
+ //! Swap two elements in vRandom.
void SwapRandom(unsigned int nRandomPos1, unsigned int nRandomPos2);
- // Return position in given bucket to replace.
+ //! Return position in given bucket to replace.
int SelectTried(int nKBucket);
- // Remove an element from a "new" bucket.
- // This is the only place where actual deletes occur.
- // They are never deleted while in the "tried" table, only possibly evicted back to the "new" table.
+ //! Remove an element from a "new" bucket.
+ //! This is the only place where actual deletions occur.
+ //! Elements are never deleted while in the "tried" table, only possibly evicted back to the "new" table.
int ShrinkNew(int nUBucket);
- // Move an entry from the "new" table(s) to the "tried" table
- // @pre vvUnkown[nOrigin].count(nId) != 0
+ //! Move an entry from the "new" table(s) to the "tried" table
+ //! @pre vvUnkown[nOrigin].count(nId) != 0
void MakeTried(CAddrInfo& info, int nId, int nOrigin);
- // Mark an entry "good", possibly moving it from "new" to "tried".
+ //! Mark an entry "good", possibly moving it from "new" to "tried".
void Good_(const CService &addr, int64_t nTime);
- // Add an entry to the "new" table.
+ //! Add an entry to the "new" table.
bool Add_(const CAddress &addr, const CNetAddr& source, int64_t nTimePenalty);
- // Mark an entry as attempted to connect.
+ //! Mark an entry as attempted to connect.
void Attempt_(const CService &addr, int64_t nTime);
- // Select an address to connect to.
- // nUnkBias determines how much to favor new addresses over tried ones (min=0, max=100)
+ //! Select an address to connect to.
+ //! nUnkBias determines how much to favor new addresses over tried ones (min=0, max=100)
CAddress Select_(int nUnkBias);
#ifdef DEBUG_ADDRMAN
- // Perform consistency check. Returns an error code or zero.
+ //! Perform consistency check. Returns an error code or zero.
int Check_();
#endif
- // Select several addresses at once.
+ //! Select several addresses at once.
void GetAddr_(std::vector<CAddress> &vAddr);
- // Mark an entry as currently-connected-to.
+ //! Mark an entry as currently-connected-to.
void Connected_(const CService &addr, int64_t nTime);
public:
- // serialized format:
- // * version byte (currently 0)
- // * nKey
- // * nNew
- // * nTried
- // * number of "new" buckets
- // * all nNew addrinfos in vvNew
- // * all nTried addrinfos in vvTried
- // * for each bucket:
- // * number of elements
- // * for each element: index
- //
- // Notice that vvTried, mapAddr and vVector are never encoded explicitly;
- // they are instead reconstructed from the other information.
- //
- // vvNew is serialized, but only used if ADDRMAN_UNKOWN_BUCKET_COUNT didn't change,
- // otherwise it is reconstructed as well.
- //
- // This format is more complex, but significantly smaller (at most 1.5 MiB), and supports
- // changes to the ADDRMAN_ parameters without breaking the on-disk structure.
- //
- // We don't use ADD_SERIALIZE_METHODS since the serialization and deserialization code has
- // very little in common.
+ /**
+ * serialized format:
+ * * version byte (currently 0)
+ * * nKey
+ * * nNew
+ * * nTried
+ * * number of "new" buckets
+ * * all nNew addrinfos in vvNew
+ * * all nTried addrinfos in vvTried
+ * * for each bucket:
+ * * number of elements
+ * * for each element: index
+ *
+ * Notice that vvTried, mapAddr and vVector are never encoded explicitly;
+ * they are instead reconstructed from the other information.
+ *
+ * vvNew is serialized, but only used if ADDRMAN_UNKOWN_BUCKET_COUNT didn't change,
+ * otherwise it is reconstructed as well.
+ *
+ * This format is more complex, but significantly smaller (at most 1.5 MiB), and supports
+ * changes to the ADDRMAN_ parameters without breaking the on-disk structure.
+ *
+ * We don't use ADD_SERIALIZE_METHODS since the serialization and deserialization code has
+ * very little in common.
+ *
+ */
template<typename Stream>
void Serialize(Stream &s, int nType, int nVersionDummy) const
{
@@ -394,13 +401,13 @@ public:
nNew = 0;
}
- // Return the number of (unique) addresses in all tables.
+ //! Return the number of (unique) addresses in all tables.
int size()
{
return vRandom.size();
}
- // Consistency check
+ //! Consistency check
void Check()
{
#ifdef DEBUG_ADDRMAN
@@ -413,7 +420,7 @@ public:
#endif
}
- // Add a single address.
+ //! Add a single address.
bool Add(const CAddress &addr, const CNetAddr& source, int64_t nTimePenalty = 0)
{
bool fRet = false;
@@ -428,7 +435,7 @@ public:
return fRet;
}
- // Add multiple addresses.
+ //! Add multiple addresses.
bool Add(const std::vector<CAddress> &vAddr, const CNetAddr& source, int64_t nTimePenalty = 0)
{
int nAdd = 0;
@@ -444,7 +451,7 @@ public:
return nAdd > 0;
}
- // Mark an entry as accessible.
+ //! Mark an entry as accessible.
void Good(const CService &addr, int64_t nTime = GetAdjustedTime())
{
{
@@ -455,7 +462,7 @@ public:
}
}
- // Mark an entry as connection attempted to.
+ //! Mark an entry as connection attempted to.
void Attempt(const CService &addr, int64_t nTime = GetAdjustedTime())
{
{
@@ -466,8 +473,10 @@ public:
}
}
- // Choose an address to connect to.
- // nUnkBias determines how much "new" entries are favored over "tried" ones (0-100).
+ /**
+ * Choose an address to connect to.
+ * nUnkBias determines how much "new" entries are favored over "tried" ones (0-100).
+ */
CAddress Select(int nUnkBias = 50)
{
CAddress addrRet;
@@ -480,7 +489,7 @@ public:
return addrRet;
}
- // Return a bunch of addresses, selected at random.
+ //! Return a bunch of addresses, selected at random.
std::vector<CAddress> GetAddr()
{
Check();
@@ -493,7 +502,7 @@ public:
return vAddr;
}
- // Mark an entry as currently-connected-to.
+ //! Mark an entry as currently-connected-to.
void Connected(const CService &addr, int64_t nTime = GetAdjustedTime())
{
{
diff --git a/src/allocators.h b/src/allocators.h
index 6b69e7ae69..78a3b76d0c 100644
--- a/src/allocators.h
+++ b/src/allocators.h
@@ -9,6 +9,7 @@
#include <map>
#include <string>
#include <string.h>
+#include <vector>
#include <boost/thread/mutex.hpp>
#include <boost/thread/once.hpp>
@@ -261,4 +262,7 @@ struct zero_after_free_allocator : public std::allocator<T> {
// This is exactly like std::string, but with a custom allocator.
typedef std::basic_string<char, std::char_traits<char>, secure_allocator<char> > SecureString;
+// Byte-vector that clears its contents before deletion.
+typedef std::vector<char, zero_after_free_allocator<char> > CSerializeData;
+
#endif // BITCOIN_ALLOCATORS_H
diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp
index badb376cb3..aa5e285b10 100644
--- a/src/bitcoin-cli.cpp
+++ b/src/bitcoin-cli.cpp
@@ -25,13 +25,13 @@ std::string HelpMessageCli()
string strUsage;
strUsage += _("Options:") + "\n";
strUsage += " -? " + _("This help message") + "\n";
- strUsage += " -conf=<file> " + _("Specify configuration file (default: bitcoin.conf)") + "\n";
+ strUsage += " -conf=<file> " + strprintf(_("Specify configuration file (default: %s)"), "bitcoin.conf") + "\n";
strUsage += " -datadir=<dir> " + _("Specify data directory") + "\n";
strUsage += " -testnet " + _("Use the test network") + "\n";
strUsage += " -regtest " + _("Enter regression test mode, which uses a special chain in which blocks can be "
"solved instantly. This is intended for regression testing tools and app development.") + "\n";
- strUsage += " -rpcconnect=<ip> " + _("Send commands to node running on <ip> (default: 127.0.0.1)") + "\n";
- strUsage += " -rpcport=<port> " + _("Connect to JSON-RPC on <port> (default: 8332 or testnet: 18332)") + "\n";
+ strUsage += " -rpcconnect=<ip> " + strprintf(_("Send commands to node running on <ip> (default: %s)"), "127.0.0.1") + "\n";
+ strUsage += " -rpcport=<port> " + strprintf(_("Connect to JSON-RPC on <port> (default: %u or testnet: %u)"), 8332, 18332) + "\n";
strUsage += " -rpcwait " + _("Wait for RPC server to start") + "\n";
strUsage += " -rpcuser=<user> " + _("Username for JSON-RPC connections") + "\n";
strUsage += " -rpcpassword=<pw> " + _("Password for JSON-RPC connections") + "\n";
diff --git a/src/bloom.cpp b/src/bloom.cpp
index cef74a3a54..cac71fdbbf 100644
--- a/src/bloom.cpp
+++ b/src/bloom.cpp
@@ -7,6 +7,7 @@
#include "core.h"
#include "script/script.h"
#include "script/standard.h"
+#include "streams.h"
#include <math.h>
#include <stdlib.h>
diff --git a/src/chain.cpp b/src/chain.cpp
index 05427a4569..56ed22ce71 100644
--- a/src/chain.cpp
+++ b/src/chain.cpp
@@ -9,17 +9,16 @@ using namespace std;
// CChain implementation
-CBlockIndex *CChain::SetTip(CBlockIndex *pindex) {
+void CChain::SetTip(CBlockIndex *pindex) {
if (pindex == NULL) {
vChain.clear();
- return NULL;
+ return;
}
vChain.resize(pindex->nHeight + 1);
while (pindex && vChain[pindex->nHeight] != pindex) {
vChain[pindex->nHeight] = pindex;
pindex = pindex->pprev;
}
- return pindex;
}
CBlockLocator CChain::GetLocator(const CBlockIndex *pindex) const {
diff --git a/src/chain.h b/src/chain.h
index 0aafb40b98..290150476e 100644
--- a/src/chain.h
+++ b/src/chain.h
@@ -8,6 +8,7 @@
#include "core.h"
#include "pow.h"
+#include "tinyformat.h"
#include "uint256.h"
#include <vector>
@@ -49,12 +50,29 @@ struct CDiskBlockPos
};
enum BlockStatus {
+ // Unused.
BLOCK_VALID_UNKNOWN = 0,
- BLOCK_VALID_HEADER = 1, // parsed, version ok, hash satisfies claimed PoW, 1 <= vtx count <= max, timestamp not in future
- BLOCK_VALID_TREE = 2, // parent found, difficulty matches, timestamp >= median previous, checkpoint
- BLOCK_VALID_TRANSACTIONS = 3, // only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid, no duplicate txids, sigops, size, merkle root
- BLOCK_VALID_CHAIN = 4, // outputs do not overspend inputs, no double spends, coinbase output ok, immature coinbase spends, BIP30
- BLOCK_VALID_SCRIPTS = 5, // scripts/signatures ok
+
+ // Parsed, version ok, hash satisfies claimed PoW, 1 <= vtx count <= max, timestamp not in future
+ BLOCK_VALID_HEADER = 1,
+
+ // All parent headers found, difficulty matches, timestamp >= median previous, checkpoint. Implies all parents
+ // are also at least TREE.
+ BLOCK_VALID_TREE = 2,
+
+ // Only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid, no duplicate txids,
+ // sigops, size, merkle root. Implies all parents are at least TREE but not necessarily TRANSACTIONS. When all
+ // parent blocks also have TRANSACTIONS, CBlockIndex::nChainTx will be set.
+ BLOCK_VALID_TRANSACTIONS = 3,
+
+ // Outputs do not overspend inputs, no double spends, coinbase output ok, immature coinbase spends, BIP30.
+ // Implies all parents are also at least CHAIN.
+ BLOCK_VALID_CHAIN = 4,
+
+ // Scripts & signatures ok. Implies all parents are also at least SCRIPTS.
+ BLOCK_VALID_SCRIPTS = 5,
+
+ // All validity bits.
BLOCK_VALID_MASK = BLOCK_VALID_HEADER | BLOCK_VALID_TREE | BLOCK_VALID_TRANSACTIONS |
BLOCK_VALID_CHAIN | BLOCK_VALID_SCRIPTS,
@@ -103,7 +121,8 @@ public:
// Note: in a potential headers-first mode, this number cannot be relied upon
unsigned int nTx;
- // (memory only) Number of transactions in the chain up to and including this block
+ // (memory only) Number of transactions in the chain up to and including this block.
+ // This value will be non-zero only if and only if transactions for this block and all its parents are available.
unsigned int nChainTx; // change to 64-bit type when necessary; won't happen before 2030
// Verification status of this block. See enum BlockStatus
@@ -146,7 +165,7 @@ public:
SetNull();
}
- CBlockIndex(CBlockHeader& block)
+ CBlockIndex(const CBlockHeader& block)
{
SetNull();
@@ -377,8 +396,8 @@ public:
return vChain.size() - 1;
}
- /** Set/initialize a chain with a given tip. Returns the forking point. */
- CBlockIndex *SetTip(CBlockIndex *pindex);
+ /** Set/initialize a chain with a given tip. */
+ void SetTip(CBlockIndex *pindex);
/** Return a CBlockLocator that refers to a block in this chain (by default the tip). */
CBlockLocator GetLocator(const CBlockIndex *pindex = NULL) const;
diff --git a/src/chainparams.cpp b/src/chainparams.cpp
index f2a14b8293..1ab292517a 100644
--- a/src/chainparams.cpp
+++ b/src/chainparams.cpp
@@ -7,6 +7,7 @@
#include "random.h"
#include "util.h"
+#include "utilstrencodings.h"
#include <assert.h>
@@ -265,6 +266,7 @@ public:
nDefaultPort = 18444;
assert(hashGenesisBlock == uint256("0x0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206"));
+ vFixedSeeds.clear(); // Regtest mode doesn't have any fixed seeds.
vSeeds.clear(); // Regtest mode doesn't have any DNS seeds.
fRequireRPCPassword = false;
@@ -354,10 +356,13 @@ void SelectParams(CBaseChainParams::Network network) {
pCurrentParams = &Params(network);
}
-bool SelectParamsFromCommandLine() {
- if (!SelectBaseParamsFromCommandLine())
+bool SelectParamsFromCommandLine()
+{
+ CBaseChainParams::Network network = NetworkIdFromCommandLine();
+ if (network == CBaseChainParams::MAX_NETWORK_TYPES)
return false;
- SelectParams(BaseParams().NetworkID());
+ SelectBaseParams(network);
+ SelectParams(network);
return true;
}
diff --git a/src/chainparamsbase.cpp b/src/chainparamsbase.cpp
index e9d63197bd..5d9ec7927b 100644
--- a/src/chainparamsbase.cpp
+++ b/src/chainparamsbase.cpp
@@ -100,22 +100,27 @@ void SelectBaseParams(CBaseChainParams::Network network)
}
}
-bool SelectBaseParamsFromCommandLine()
+CBaseChainParams::Network NetworkIdFromCommandLine()
{
bool fRegTest = GetBoolArg("-regtest", false);
bool fTestNet = GetBoolArg("-testnet", false);
- if (fTestNet && fRegTest) {
+ if (fTestNet && fRegTest)
+ return CBaseChainParams::MAX_NETWORK_TYPES;
+ if (fRegTest)
+ return CBaseChainParams::REGTEST;
+ if (fTestNet)
+ return CBaseChainParams::TESTNET;
+ return CBaseChainParams::MAIN;
+}
+
+bool SelectBaseParamsFromCommandLine()
+{
+ CBaseChainParams::Network network = NetworkIdFromCommandLine();
+ if (network == CBaseChainParams::MAX_NETWORK_TYPES)
return false;
- }
- if (fRegTest) {
- SelectBaseParams(CBaseChainParams::REGTEST);
- } else if (fTestNet) {
- SelectBaseParams(CBaseChainParams::TESTNET);
- } else {
- SelectBaseParams(CBaseChainParams::MAIN);
- }
+ SelectBaseParams(network);
return true;
}
diff --git a/src/chainparamsbase.h b/src/chainparamsbase.h
index cc154cf501..911d1181ac 100644
--- a/src/chainparamsbase.h
+++ b/src/chainparamsbase.h
@@ -26,7 +26,6 @@ public:
const std::string& DataDir() const { return strDataDir; }
int RPCPort() const { return nRPCPort; }
- Network NetworkID() const { return networkID; }
protected:
CBaseChainParams() {}
@@ -46,7 +45,13 @@ const CBaseChainParams& BaseParams();
void SelectBaseParams(CBaseChainParams::Network network);
/**
- * Looks for -regtest or -testnet and then calls SelectParams as appropriate.
+ * Looks for -regtest or -testnet and returns the appropriate Network ID.
+ * Returns MAX_NETWORK_TYPES if an invalid combination is given.
+ */
+CBaseChainParams::Network NetworkIdFromCommandLine();
+
+/**
+ * Calls NetworkIdFromCommandLine() and then calls SelectParams as appropriate.
* Returns false if an invalid combination is given.
*/
bool SelectBaseParamsFromCommandLine();
diff --git a/src/core.cpp b/src/core.cpp
index 380b1c38e0..73e6de88e1 100644
--- a/src/core.cpp
+++ b/src/core.cpp
@@ -5,9 +5,9 @@
#include "core.h"
+#include "hash.h"
#include "tinyformat.h"
-
-#include <boost/foreach.hpp>
+#include "utilstrencodings.h"
std::string COutPoint::ToString() const
{
@@ -113,10 +113,10 @@ CTransaction& CTransaction::operator=(const CTransaction &tx) {
CAmount CTransaction::GetValueOut() const
{
CAmount nValueOut = 0;
- BOOST_FOREACH(const CTxOut& txout, vout)
+ for (std::vector<CTxOut>::const_iterator it(vout.begin()); it != vout.end(); ++it)
{
- nValueOut += txout.nValue;
- if (!MoneyRange(txout.nValue) || !MoneyRange(nValueOut))
+ nValueOut += it->nValue;
+ if (!MoneyRange(it->nValue) || !MoneyRange(nValueOut))
throw std::runtime_error("CTransaction::GetValueOut() : value out of range");
}
return nValueOut;
@@ -139,10 +139,9 @@ unsigned int CTransaction::CalculateModifiedSize(unsigned int nTxSize) const
// risk encouraging people to create junk outputs to redeem later.
if (nTxSize == 0)
nTxSize = ::GetSerializeSize(*this, SER_NETWORK, PROTOCOL_VERSION);
-
- BOOST_FOREACH(const CTxIn& txin, vin)
+ for (std::vector<CTxIn>::const_iterator it(vin.begin()); it != vin.end(); ++it)
{
- unsigned int offset = 41U + std::min(110U, (unsigned int)txin.scriptSig.size());
+ unsigned int offset = 41U + std::min(110U, (unsigned int)it->scriptSig.size());
if (nTxSize > offset)
nTxSize -= offset;
}
@@ -263,8 +262,8 @@ uint256 CBlock::BuildMerkleTree(bool* fMutated) const
*/
vMerkleTree.clear();
vMerkleTree.reserve(vtx.size() * 2 + 16); // Safe upper bound for the number of total nodes.
- BOOST_FOREACH(const CTransaction& tx, vtx)
- vMerkleTree.push_back(tx.GetHash());
+ for (std::vector<CTransaction>::const_iterator it(vtx.begin()); it != vtx.end(); ++it)
+ vMerkleTree.push_back(it->GetHash());
int j = 0;
bool mutated = false;
for (int nSize = vtx.size(); nSize > 1; nSize = (nSize + 1) / 2)
@@ -307,12 +306,12 @@ uint256 CBlock::CheckMerkleBranch(uint256 hash, const std::vector<uint256>& vMer
{
if (nIndex == -1)
return 0;
- BOOST_FOREACH(const uint256& otherside, vMerkleBranch)
+ for (std::vector<uint256>::const_iterator it(vMerkleBranch.begin()); it != vMerkleBranch.end(); ++it)
{
if (nIndex & 1)
- hash = Hash(BEGIN(otherside), END(otherside), BEGIN(hash), END(hash));
+ hash = Hash(BEGIN(*it), END(*it), BEGIN(hash), END(hash));
else
- hash = Hash(BEGIN(hash), END(hash), BEGIN(otherside), END(otherside));
+ hash = Hash(BEGIN(hash), END(hash), BEGIN(*it), END(*it));
nIndex >>= 1;
}
return hash;
diff --git a/src/core.h b/src/core.h
index a348293578..a024dad740 100644
--- a/src/core.h
+++ b/src/core.h
@@ -61,19 +61,6 @@ public:
std::string ToString() const;
};
-/** An inpoint - a combination of a transaction and an index n into its vin */
-class CInPoint
-{
-public:
- const CTransaction* ptx;
- uint32_t n;
-
- CInPoint() { SetNull(); }
- CInPoint(const CTransaction* ptxIn, uint32_t nIn) { ptx = ptxIn; n = nIn; }
- void SetNull() { ptx = NULL; n = (uint32_t) -1; }
- bool IsNull() const { return (ptx == NULL && n == (uint32_t) -1); }
-};
-
/** An input of a transaction. It contains the location of the previous
* transaction's output that it claims and a signature that matches the
* output's public key.
diff --git a/src/core_read.cpp b/src/core_read.cpp
index 6bd3d9a4fa..dcbcf4b4f7 100644
--- a/src/core_read.cpp
+++ b/src/core_read.cpp
@@ -7,8 +7,11 @@
#include "core.h"
#include "script/script.h"
#include "serialize.h"
+#include "streams.h"
#include "univalue/univalue.h"
#include "util.h"
+#include "utilstrencodings.h"
+#include "version.h"
#include <boost/algorithm/string/classification.hpp>
#include <boost/algorithm/string/predicate.hpp>
diff --git a/src/core_write.cpp b/src/core_write.cpp
index 40d547fb33..b2b29fb367 100644
--- a/src/core_write.cpp
+++ b/src/core_write.cpp
@@ -9,9 +9,11 @@
#include "script/script.h"
#include "script/standard.h"
#include "serialize.h"
+#include "streams.h"
#include "univalue/univalue.h"
#include "util.h"
#include "utilmoneystr.h"
+#include "utilstrencodings.h"
#include <boost/foreach.hpp>
diff --git a/src/crypter.cpp b/src/crypter.cpp
index a872df7024..756538836d 100644
--- a/src/crypter.cpp
+++ b/src/crypter.cpp
@@ -5,6 +5,7 @@
#include "crypter.h"
#include "script/script.h"
+#include "script/standard.h"
#include "util.h"
#include <string>
diff --git a/src/db.h b/src/db.h
index d202399383..0cbdd8b91b 100644
--- a/src/db.h
+++ b/src/db.h
@@ -7,6 +7,7 @@
#define BITCOIN_DB_H
#include "serialize.h"
+#include "streams.h"
#include "sync.h"
#include "version.h"
diff --git a/src/init.cpp b/src/init.cpp
index 8dcd35fb8f..70ac5190d3 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -47,6 +47,7 @@ using namespace std;
#ifdef ENABLE_WALLET
CWallet* pwalletMain = NULL;
#endif
+bool fFeeEstimatesInitialized = false;
#ifdef WIN32
// Win32 LevelDB doesn't use filedescriptors, and the ones used for
@@ -119,6 +120,10 @@ void Shutdown()
if (!lockShutdown)
return;
+ /// Note: Shutdown() must be able to handle cases in which AppInit2() failed part of the way,
+ /// for example if the data directory was found to be locked.
+ /// Be sure that anything that writes files or flushes caches only does this if the respective
+ /// module was initialized.
RenameThread("bitcoin-shutoff");
mempool.AddTransactionsUpdated(1);
StopRPCThreads();
@@ -130,13 +135,15 @@ void Shutdown()
StopNode();
UnregisterNodeSignals(GetNodeSignals());
+ if (fFeeEstimatesInitialized)
{
boost::filesystem::path est_path = GetDataDir() / FEE_ESTIMATES_FILENAME;
CAutoFile est_fileout(fopen(est_path.string().c_str(), "wb"), SER_DISK, CLIENT_VERSION);
- if (est_fileout)
+ if (!est_fileout.IsNull())
mempool.WriteFeeEstimates(est_fileout);
else
LogPrintf("%s: Failed to write fee estimates to %s\n", __func__, est_path.string());
+ fFeeEstimatesInitialized = false;
}
{
@@ -163,7 +170,7 @@ void Shutdown()
#ifndef WIN32
boost::filesystem::remove(GetPidFile());
#endif
- UnregisterAllWallets();
+ UnregisterAllValidationInterfaces();
#ifdef ENABLE_WALLET
delete pwalletMain;
pwalletMain = NULL;
@@ -215,9 +222,9 @@ std::string HelpMessage(HelpMessageMode mode)
strUsage += " -? " + _("This help message") + "\n";
strUsage += " -alertnotify=<cmd> " + _("Execute command when a relevant alert is received or we see a really long fork (%s in cmd is replaced by message)") + "\n";
strUsage += " -blocknotify=<cmd> " + _("Execute command when the best block changes (%s in cmd is replaced by block hash)") + "\n";
- strUsage += " -checkblocks=<n> " + _("How many blocks to check at startup (default: 288, 0 = all)") + "\n";
- strUsage += " -checklevel=<n> " + _("How thorough the block verification of -checkblocks is (0-4, default: 3)") + "\n";
- strUsage += " -conf=<file> " + _("Specify configuration file (default: bitcoin.conf)") + "\n";
+ strUsage += " -checkblocks=<n> " + strprintf(_("How many blocks to check at startup (default: %u, 0 = all)"), 288) + "\n";
+ strUsage += " -checklevel=<n> " + strprintf(_("How thorough the block verification of -checkblocks is (0-4, default: %u)"), 3) + "\n";
+ strUsage += " -conf=<file> " + strprintf(_("Specify configuration file (default: %s)"), "bitcoin.conf") + "\n";
if (mode == HMM_BITCOIND)
{
#if !defined(WIN32)
@@ -231,33 +238,33 @@ std::string HelpMessage(HelpMessageMode mode)
strUsage += " -maxorphantx=<n> " + strprintf(_("Keep at most <n> unconnectable transactions in memory (default: %u)"), DEFAULT_MAX_ORPHAN_TRANSACTIONS) + "\n";
strUsage += " -par=<n> " + strprintf(_("Set the number of script verification threads (%u to %d, 0 = auto, <0 = leave that many cores free, default: %d)"), -(int)boost::thread::hardware_concurrency(), MAX_SCRIPTCHECK_THREADS, DEFAULT_SCRIPTCHECK_THREADS) + "\n";
#ifndef WIN32
- strUsage += " -pid=<file> " + _("Specify pid file (default: bitcoind.pid)") + "\n";
+ strUsage += " -pid=<file> " + strprintf(_("Specify pid file (default: %s)"), "bitcoind.pid") + "\n";
#endif
strUsage += " -reindex " + _("Rebuild block chain index from current blk000??.dat files") + " " + _("on startup") + "\n";
#if !defined(WIN32)
strUsage += " -sysperms " + _("Create new files with system default permissions, instead of umask 077 (only effective with disabled wallet functionality)") + "\n";
#endif
- strUsage += " -txindex " + _("Maintain a full transaction index, used by the getrawtransaction rpc call (default: 0)") + "\n";
+ strUsage += " -txindex " + strprintf(_("Maintain a full transaction index, used by the getrawtransaction rpc call (default: %u)"), 0) + "\n";
strUsage += "\n" + _("Connection options:") + "\n";
strUsage += " -addnode=<ip> " + _("Add a node to connect to and attempt to keep the connection open") + "\n";
- strUsage += " -banscore=<n> " + _("Threshold for disconnecting misbehaving peers (default: 100)") + "\n";
- strUsage += " -bantime=<n> " + _("Number of seconds to keep misbehaving peers from reconnecting (default: 86400)") + "\n";
+ strUsage += " -banscore=<n> " + strprintf(_("Threshold for disconnecting misbehaving peers (default: %u)"), 100) + "\n";
+ strUsage += " -bantime=<n> " + strprintf(_("Number of seconds to keep misbehaving peers from reconnecting (default: %u)"), 86400) + "\n";
strUsage += " -bind=<addr> " + _("Bind to given address and always listen on it. Use [host]:port notation for IPv6") + "\n";
strUsage += " -connect=<ip> " + _("Connect only to the specified node(s)") + "\n";
strUsage += " -discover " + _("Discover own IP address (default: 1 when listening and no -externalip)") + "\n";
strUsage += " -dns " + _("Allow DNS lookups for -addnode, -seednode and -connect") + " " + _("(default: 1)") + "\n";
strUsage += " -dnsseed " + _("Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect)") + "\n";
strUsage += " -externalip=<ip> " + _("Specify your own public address") + "\n";
- strUsage += " -forcednsseed " + _("Always query for peer addresses via DNS lookup (default: 0)") + "\n";
+ strUsage += " -forcednsseed " + strprintf(_("Always query for peer addresses via DNS lookup (default: %u)"), 0) + "\n";
strUsage += " -listen " + _("Accept connections from outside (default: 1 if no -proxy or -connect)") + "\n";
- strUsage += " -maxconnections=<n> " + _("Maintain at most <n> connections to peers (default: 125)") + "\n";
- strUsage += " -maxreceivebuffer=<n> " + _("Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)") + "\n";
- strUsage += " -maxsendbuffer=<n> " + _("Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)") + "\n";
- strUsage += " -onion=<ip:port> " + _("Use separate SOCKS5 proxy to reach peers via Tor hidden services (default: -proxy)") + "\n";
+ strUsage += " -maxconnections=<n> " + strprintf(_("Maintain at most <n> connections to peers (default: %u)"), 125) + "\n";
+ strUsage += " -maxreceivebuffer=<n> " + strprintf(_("Maximum per-connection receive buffer, <n>*1000 bytes (default: %u)"), 5000) + "\n";
+ strUsage += " -maxsendbuffer=<n> " + strprintf(_("Maximum per-connection send buffer, <n>*1000 bytes (default: %u)"), 1000) + "\n";
+ strUsage += " -onion=<ip:port> " + strprintf(_("Use separate SOCKS5 proxy to reach peers via Tor hidden services (default: %s)"), "-proxy") + "\n";
strUsage += " -onlynet=<net> " + _("Only connect to nodes in network <net> (ipv4, ipv6 or onion)") + "\n";
- strUsage += " -permitbaremultisig " + _("Relay non-P2SH multisig (default: 1)") + "\n";
- strUsage += " -port=<port> " + _("Listen for connections on <port> (default: 8333 or testnet: 18333)") + "\n";
+ strUsage += " -permitbaremultisig " + strprintf(_("Relay non-P2SH multisig (default: %u)"), 1) + "\n";
+ strUsage += " -port=<port> " + strprintf(_("Listen for connections on <port> (default: %u or testnet: %u)"), 8333, 18333) + "\n";
strUsage += " -proxy=<ip:port> " + _("Connect through SOCKS5 proxy") + "\n";
strUsage += " -seednode=<ip> " + _("Connect to a node to retrieve peer addresses, and disconnect") + "\n";
strUsage += " -timeout=<n> " + strprintf(_("Specify connection timeout in milliseconds (minimum: 1, default: %d)"), DEFAULT_CONNECT_TIMEOUT) + "\n";
@@ -265,44 +272,44 @@ std::string HelpMessage(HelpMessageMode mode)
#if USE_UPNP
strUsage += " -upnp " + _("Use UPnP to map the listening port (default: 1 when listening)") + "\n";
#else
- strUsage += " -upnp " + _("Use UPnP to map the listening port (default: 0)") + "\n";
+ strUsage += " -upnp " + strprintf(_("Use UPnP to map the listening port (default: %u)"), 0) + "\n";
#endif
#endif
strUsage += " -whitebind=<addr> " + _("Bind to given address and whitelist peers connecting to it. Use [host]:port notation for IPv6") + "\n";
- strUsage += " -whitelist=<netmask> " + _("Whitelist peers connecting from the given netmask or ip. Can be specified multiple times.") + "\n";
+ strUsage += " -whitelist=<netmask> " + _("Whitelist peers connecting from the given netmask or IP address. Can be specified multiple times.") + "\n";
strUsage += " " + _("Whitelisted peers cannot be DoS banned and their transactions are always relayed, even if they are already in the mempool, useful e.g. for a gateway") + "\n";
#ifdef ENABLE_WALLET
strUsage += "\n" + _("Wallet options:") + "\n";
strUsage += " -disablewallet " + _("Do not load the wallet and disable wallet RPC calls") + "\n";
- strUsage += " -keypool=<n> " + _("Set key pool size to <n> (default: 100)") + "\n";
+ strUsage += " -keypool=<n> " + strprintf(_("Set key pool size to <n> (default: %u)"), 100) + "\n";
if (GetBoolArg("-help-debug", false))
strUsage += " -mintxfee=<amt> " + strprintf(_("Fees (in BTC/Kb) smaller than this are considered zero fee for transaction creation (default: %s)"), FormatMoney(CWallet::minTxFee.GetFeePerK())) + "\n";
strUsage += " -paytxfee=<amt> " + strprintf(_("Fee (in BTC/kB) to add to transactions you send (default: %s)"), FormatMoney(payTxFee.GetFeePerK())) + "\n";
strUsage += " -rescan " + _("Rescan the block chain for missing wallet transactions") + " " + _("on startup") + "\n";
strUsage += " -salvagewallet " + _("Attempt to recover private keys from a corrupt wallet.dat") + " " + _("on startup") + "\n";
- strUsage += " -spendzeroconfchange " + _("Spend unconfirmed change when sending transactions (default: 1)") + "\n";
- strUsage += " -txconfirmtarget=<n> " + _("If paytxfee is not set, include enough fee so transactions are confirmed on average within n blocks (default: 1)") + "\n";
+ strUsage += " -spendzeroconfchange " + strprintf(_("Spend unconfirmed change when sending transactions (default: %u)"), 1) + "\n";
+ strUsage += " -txconfirmtarget=<n> " + strprintf(_("If paytxfee is not set, include enough fee so transactions are confirmed on average within n blocks (default: %u)"), 1) + "\n";
strUsage += " -upgradewallet " + _("Upgrade wallet to latest format") + " " + _("on startup") + "\n";
- strUsage += " -wallet=<file> " + _("Specify wallet file (within data directory)") + " " + _("(default: wallet.dat)") + "\n";
+ strUsage += " -wallet=<file> " + _("Specify wallet file (within data directory)") + " " + strprintf(_("(default: %s)"), "wallet.dat") + "\n";
strUsage += " -walletnotify=<cmd> " + _("Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)") + "\n";
strUsage += " -zapwallettxes=<mode> " + _("Delete all wallet transactions and only recover those parts of the blockchain through -rescan on startup") + "\n";
- strUsage += " " + _("(default: 1, 1 = keep tx meta data e.g. account owner and payment request information, 2 = drop tx meta data)") + "\n";
+ strUsage += " " + _("(1 = keep tx meta data e.g. account owner and payment request information, 2 = drop tx meta data)") + "\n";
#endif
strUsage += "\n" + _("Debugging/Testing options:") + "\n";
if (GetBoolArg("-help-debug", false))
{
- strUsage += " -checkpoints " + _("Only accept block chain matching built-in checkpoints (default: 1)") + "\n";
- strUsage += " -dblogsize=<n> " + _("Flush database activity from memory pool to disk log every <n> megabytes (default: 100)") + "\n";
- strUsage += " -disablesafemode " + _("Disable safemode, override a real safe mode event (default: 0)") + "\n";
- strUsage += " -testsafemode " + _("Force safe mode (default: 0)") + "\n";
+ strUsage += " -checkpoints " + strprintf(_("Only accept block chain matching built-in checkpoints (default: %u)"), 1) + "\n";
+ strUsage += " -dblogsize=<n> " + strprintf(_("Flush database activity from memory pool to disk log every <n> megabytes (default: %u)"), 100) + "\n";
+ strUsage += " -disablesafemode " + strprintf(_("Disable safemode, override a real safe mode event (default: %u)"), 0) + "\n";
+ strUsage += " -testsafemode " + strprintf(_("Force safe mode (default: %u)"), 0) + "\n";
strUsage += " -dropmessagestest=<n> " + _("Randomly drop 1 of every <n> network messages") + "\n";
strUsage += " -fuzzmessagestest=<n> " + _("Randomly fuzz 1 of every <n> network messages") + "\n";
- strUsage += " -flushwallet " + _("Run a thread to flush wallet periodically (default: 1)") + "\n";
- strUsage += " -stopafterblockimport " + _("Stop running after importing blocks from disk (default: 0)") + "\n";
+ strUsage += " -flushwallet " + strprintf(_("Run a thread to flush wallet periodically (default: %u)"), 1) + "\n";
+ strUsage += " -stopafterblockimport " + strprintf(_("Stop running after importing blocks from disk (default: %u)"), 0) + "\n";
}
- strUsage += " -debug=<category> " + _("Output debugging information (default: 0, supplying <category> is optional)") + "\n";
+ strUsage += " -debug=<category> " + strprintf(_("Output debugging information (default: %u, supplying <category> is optional)"), 0) + "\n";
strUsage += " " + _("If <category> is not supplied, output all debugging information.") + "\n";
strUsage += " " + _("<category> can be:");
strUsage += " addrman, alert, bench, coindb, db, lock, rand, rpc, selectcoins, mempool, net"; // Don't translate these and qt below
@@ -310,25 +317,25 @@ std::string HelpMessage(HelpMessageMode mode)
strUsage += ", qt";
strUsage += ".\n";
#ifdef ENABLE_WALLET
- strUsage += " -gen " + _("Generate coins (default: 0)") + "\n";
- strUsage += " -genproclimit=<n> " + _("Set the processor limit for when generation is on (-1 = unlimited, default: -1)") + "\n";
+ strUsage += " -gen " + strprintf(_("Generate coins (default: %u)"), 0) + "\n";
+ strUsage += " -genproclimit=<n> " + strprintf(_("Set the processor limit for when generation is on (-1 = unlimited, default: %d)"), -1) + "\n";
#endif
strUsage += " -help-debug " + _("Show all debugging options (usage: --help -help-debug)") + "\n";
- strUsage += " -logips " + _("Include IP addresses in debug output (default: 0)") + "\n";
- strUsage += " -logtimestamps " + _("Prepend debug output with timestamp (default: 1)") + "\n";
+ strUsage += " -logips " + strprintf(_("Include IP addresses in debug output (default: %u)"), 0) + "\n";
+ strUsage += " -logtimestamps " + strprintf(_("Prepend debug output with timestamp (default: %u)"), 1) + "\n";
if (GetBoolArg("-help-debug", false))
{
- strUsage += " -limitfreerelay=<n> " + _("Continuously rate-limit free transactions to <n>*1000 bytes per minute (default:15)") + "\n";
- strUsage += " -maxsigcachesize=<n> " + _("Limit size of signature cache to <n> entries (default: 50000)") + "\n";
+ strUsage += " -limitfreerelay=<n> " + strprintf(_("Continuously rate-limit free transactions to <n>*1000 bytes per minute (default:%u)"), 15) + "\n";
+ strUsage += " -maxsigcachesize=<n> " + strprintf(_("Limit size of signature cache to <n> entries (default: %u)"), 50000) + "\n";
}
strUsage += " -minrelaytxfee=<amt> " + strprintf(_("Fees (in BTC/Kb) smaller than this are considered zero fee for relaying (default: %s)"), FormatMoney(::minRelayTxFee.GetFeePerK())) + "\n";
strUsage += " -printtoconsole " + _("Send trace/debug info to console instead of debug.log file") + "\n";
if (GetBoolArg("-help-debug", false))
{
strUsage += " -printblock=<hash> " + _("Print block on startup, if found in block index") + "\n";
- strUsage += " -printblocktree " + _("Print block tree on startup (default: 0)") + "\n";
- strUsage += " -printpriority " + _("Log transaction priority and fee per kB when mining blocks (default: 0)") + "\n";
- strUsage += " -privdb " + _("Sets the DB_PRIVATE flag in the wallet db environment (default: 1)") + "\n";
+ strUsage += " -printblocktree " + strprintf(_("Print block tree on startup (default: %u)"), 0) + "\n";
+ strUsage += " -printpriority " + strprintf(_("Log transaction priority and fee per kB when mining blocks (default: %u)"), 0) + "\n";
+ strUsage += " -privdb " + strprintf(_("Sets the DB_PRIVATE flag in the wallet db environment (default: %u)"), 1) + "\n";
strUsage += " -regtest " + _("Enter regression test mode, which uses a special chain in which blocks can be solved instantly.") + "\n";
strUsage += " " + _("This is intended for regression testing tools and app development.") + "\n";
strUsage += " " + _("In this mode -genproclimit controls how many blocks are generated immediately.") + "\n";
@@ -337,10 +344,10 @@ std::string HelpMessage(HelpMessageMode mode)
strUsage += " -testnet " + _("Use the test network") + "\n";
strUsage += "\n" + _("Node relay options:") + "\n";
- strUsage += " -datacarrier " + _("Relay and mine data carrier transactions (default: 1)") + "\n";
+ strUsage += " -datacarrier " + strprintf(_("Relay and mine data carrier transactions (default: %u)"), 1) + "\n";
strUsage += "\n" + _("Block creation options:") + "\n";
- strUsage += " -blockminsize=<n> " + _("Set minimum block size in bytes (default: 0)") + "\n";
+ strUsage += " -blockminsize=<n> " + strprintf(_("Set minimum block size in bytes (default: %u)"), 0) + "\n";
strUsage += " -blockmaxsize=<n> " + strprintf(_("Set maximum block size in bytes (default: %d)"), DEFAULT_BLOCK_MAX_SIZE) + "\n";
strUsage += " -blockprioritysize=<n> " + strprintf(_("Set maximum size of high-priority/low-fee transactions in bytes (default: %d)"), DEFAULT_BLOCK_PRIORITY_SIZE) + "\n";
@@ -349,15 +356,15 @@ std::string HelpMessage(HelpMessageMode mode)
strUsage += " -rpcbind=<addr> " + _("Bind to given address to listen for JSON-RPC connections. Use [host]:port notation for IPv6. This option can be specified multiple times (default: bind to all interfaces)") + "\n";
strUsage += " -rpcuser=<user> " + _("Username for JSON-RPC connections") + "\n";
strUsage += " -rpcpassword=<pw> " + _("Password for JSON-RPC connections") + "\n";
- strUsage += " -rpcport=<port> " + _("Listen for JSON-RPC connections on <port> (default: 8332 or testnet: 18332)") + "\n";
+ strUsage += " -rpcport=<port> " + strprintf(_("Listen for JSON-RPC connections on <port> (default: %u or testnet: %u)"), 8332, 18332) + "\n";
strUsage += " -rpcallowip=<ip> " + _("Allow JSON-RPC connections from specified source. Valid for <ip> are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). This option can be specified multiple times") + "\n";
- strUsage += " -rpcthreads=<n> " + _("Set the number of threads to service RPC calls (default: 4)") + "\n";
+ strUsage += " -rpcthreads=<n> " + strprintf(_("Set the number of threads to service RPC calls (default: %d)"), 4) + "\n";
strUsage += "\n" + _("RPC SSL options: (see the Bitcoin Wiki for SSL setup instructions)") + "\n";
strUsage += " -rpcssl " + _("Use OpenSSL (https) for JSON-RPC connections") + "\n";
- strUsage += " -rpcsslcertificatechainfile=<file.cert> " + _("Server certificate file (default: server.cert)") + "\n";
- strUsage += " -rpcsslprivatekeyfile=<file.pem> " + _("Server private key (default: server.pem)") + "\n";
- strUsage += " -rpcsslciphers=<ciphers> " + _("Acceptable ciphers (default: TLSv1.2+HIGH:TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!3DES:@STRENGTH)") + "\n";
+ strUsage += " -rpcsslcertificatechainfile=<file.cert> " + strprintf(_("Server certificate file (default: %s)"), "server.cert") + "\n";
+ strUsage += " -rpcsslprivatekeyfile=<file.pem> " + strprintf(_("Server private key (default: %s)"), "server.pem") + "\n";
+ strUsage += " -rpcsslciphers=<ciphers> " + strprintf(_("Acceptable ciphers (default: %s)"), "TLSv1.2+HIGH:TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!3DES:@STRENGTH") + "\n";
return strUsage;
}
@@ -967,7 +974,7 @@ bool AppInit2(boost::thread_group& threadGroup)
// If the loaded chain has a wrong genesis, bail out immediately
// (we're likely using a testnet datadir, or the other way around).
- if (!mapBlockIndex.empty() && chainActive.Genesis() == NULL)
+ if (!mapBlockIndex.empty() && mapBlockIndex.count(Params().HashGenesisBlock()) == 0)
return InitError(_("Incorrect or no genesis block found. Wrong datadir for network?"));
// Initialize the block index (no-op if non-empty database was already loaded)
@@ -1057,8 +1064,9 @@ bool AppInit2(boost::thread_group& threadGroup)
boost::filesystem::path est_path = GetDataDir() / FEE_ESTIMATES_FILENAME;
CAutoFile est_filein(fopen(est_path.string().c_str(), "rb"), SER_DISK, CLIENT_VERSION);
// Allowed to fail as this file IS missing on first startup.
- if (est_filein)
+ if (!est_filein.IsNull())
mempool.ReadFeeEstimates(est_filein);
+ fFeeEstimatesInitialized = true;
// ********************************************************* Step 8: load wallet
#ifdef ENABLE_WALLET
@@ -1146,7 +1154,7 @@ bool AppInit2(boost::thread_group& threadGroup)
LogPrintf("%s", strErrors.str());
LogPrintf(" wallet %15dms\n", GetTimeMillis() - nStart);
- RegisterWallet(pwalletMain);
+ RegisterValidationInterface(pwalletMain);
CBlockIndex *pindexRescan = chainActive.Tip();
if (GetBoolArg("-rescan", false))
@@ -1215,22 +1223,7 @@ bool AppInit2(boost::thread_group& threadGroup)
}
threadGroup.create_thread(boost::bind(&ThreadImport, vImportFiles));
- // ********************************************************* Step 10: load peers
-
- uiInterface.InitMessage(_("Loading addresses..."));
-
- nStart = GetTimeMillis();
-
- {
- CAddrDB adb;
- if (!adb.Read(addrman))
- LogPrintf("Invalid or missing peers.dat; recreating\n");
- }
-
- LogPrintf("Loaded %i addresses from peers.dat %dms\n",
- addrman.size(), GetTimeMillis() - nStart);
-
- // ********************************************************* Step 11: start node
+ // ********************************************************* Step 10: start node
if (!CheckDiskSpace())
return false;
@@ -1259,7 +1252,7 @@ bool AppInit2(boost::thread_group& threadGroup)
GenerateBitcoins(GetBoolArg("-gen", false), pwalletMain, GetArg("-genproclimit", -1));
#endif
- // ********************************************************* Step 12: finished
+ // ********************************************************* Step 11: finished
uiInterface.InitMessage(_("Done loading"));
diff --git a/src/key.cpp b/src/key.cpp
index c2251b4f2a..079e2c6540 100644
--- a/src/key.cpp
+++ b/src/key.cpp
@@ -179,19 +179,17 @@ public:
BN_clear_free(&bn);
}
- void GetPrivKey(CPrivKey &privkey, bool fCompressed) {
+ int GetPrivKeySize(bool fCompressed) {
EC_KEY_set_conv_form(pkey, fCompressed ? POINT_CONVERSION_COMPRESSED : POINT_CONVERSION_UNCOMPRESSED);
- int nSize = i2d_ECPrivateKey(pkey, NULL);
- assert(nSize);
- privkey.resize(nSize);
- unsigned char* pbegin = &privkey[0];
- int nSize2 = i2d_ECPrivateKey(pkey, &pbegin);
- assert(nSize == nSize2);
+ return i2d_ECPrivateKey(pkey, NULL);
+ }
+ int GetPrivKey(unsigned char* privkey, bool fCompressed) {
+ EC_KEY_set_conv_form(pkey, fCompressed ? POINT_CONVERSION_COMPRESSED : POINT_CONVERSION_UNCOMPRESSED);
+ return i2d_ECPrivateKey(pkey, &privkey);
}
- bool SetPrivKey(const CPrivKey &privkey, bool fSkipCheck=false) {
- const unsigned char* pbegin = &privkey[0];
- if (d2i_ECPrivateKey(&pkey, &pbegin, privkey.size())) {
+ bool SetPrivKey(const unsigned char* privkey, size_t size, bool fSkipCheck=false) {
+ if (d2i_ECPrivateKey(&pkey, &privkey, size)) {
if(fSkipCheck)
return true;
@@ -424,7 +422,7 @@ bool CKey::SetPrivKey(const CPrivKey &privkey, bool fCompressedIn) {
return false;
#else
CECKey key;
- if (!key.SetPrivKey(privkey))
+ if (!key.SetPrivKey(&privkey[0], privkey.size()))
return false;
key.GetSecretBytes(vch);
#endif
@@ -436,16 +434,21 @@ bool CKey::SetPrivKey(const CPrivKey &privkey, bool fCompressedIn) {
CPrivKey CKey::GetPrivKey() const {
assert(fValid);
CPrivKey privkey;
+ int privkeylen, ret;
#ifdef USE_SECP256K1
privkey.resize(279);
- int privkeylen = 279;
- int ret = secp256k1_ecdsa_privkey_export(begin(), (unsigned char*)&privkey[0], &privkeylen, fCompressed);
+ privkeylen = 279;
+ ret = secp256k1_ecdsa_privkey_export(begin(), (unsigned char*)&privkey[0], &privkeylen, fCompressed);
assert(ret);
privkey.resize(privkeylen);
#else
CECKey key;
key.SetSecretBytes(vch);
- key.GetPrivKey(privkey, fCompressed);
+ privkeylen = key.GetPrivKeySize(fCompressed);
+ assert(privkeylen);
+ privkey.resize(privkeylen);
+ ret = key.GetPrivKey(&privkey[0], fCompressed);
+ assert(ret == (int)privkey.size());
#endif
return privkey;
}
@@ -517,7 +520,7 @@ bool CKey::Load(CPrivKey &privkey, CPubKey &vchPubKey, bool fSkipCheck=false) {
return false;
#else
CECKey key;
- if (!key.SetPrivKey(privkey, fSkipCheck))
+ if (!key.SetPrivKey(&privkey[0], privkey.size(), fSkipCheck))
return false;
key.GetSecretBytes(vch);
#endif
diff --git a/src/key.h b/src/key.h
index f6f6d35d34..48b1652536 100644
--- a/src/key.h
+++ b/src/key.h
@@ -30,14 +30,6 @@ public:
CKeyID(const uint160& in) : uint160(in) {}
};
-/** A reference to a CScript: the Hash160 of its serialization (see script.h) */
-class CScriptID : public uint160
-{
-public:
- CScriptID() : uint160(0) {}
- CScriptID(const uint160& in) : uint160(in) {}
-};
-
/** An encapsulated public key. */
class CPubKey
{
diff --git a/src/keystore.cpp b/src/keystore.cpp
index 755defa26d..039c690625 100644
--- a/src/keystore.cpp
+++ b/src/keystore.cpp
@@ -8,6 +8,7 @@
#include "crypter.h"
#include "key.h"
#include "script/script.h"
+#include "script/standard.h"
#include "util.h"
#include <boost/foreach.hpp>
@@ -38,7 +39,7 @@ bool CBasicKeyStore::AddCScript(const CScript& redeemScript)
return error("CBasicKeyStore::AddCScript() : redeemScripts > %i bytes are invalid", MAX_SCRIPT_ELEMENT_SIZE);
LOCK(cs_KeyStore);
- mapScripts[redeemScript.GetID()] = redeemScript;
+ mapScripts[CScriptID(redeemScript)] = redeemScript;
return true;
}
diff --git a/src/keystore.h b/src/keystore.h
index d3478f7672..4f8189c8f5 100644
--- a/src/keystore.h
+++ b/src/keystore.h
@@ -13,6 +13,7 @@
#include <boost/variant.hpp>
class CScript;
+class CScriptID;
/** A virtual base class for key stores */
class CKeyStore
diff --git a/src/leveldb/CONTRIBUTING.md b/src/leveldb/CONTRIBUTING.md
new file mode 100644
index 0000000000..cd600ff46b
--- /dev/null
+++ b/src/leveldb/CONTRIBUTING.md
@@ -0,0 +1,36 @@
+# Contributing
+
+We'd love to accept your code patches! However, before we can take them, we
+have to jump a couple of legal hurdles.
+
+## Contributor License Agreements
+
+Please fill out either the individual or corporate Contributor License
+Agreement as appropriate.
+
+* If you are an individual writing original source code and you're sure you
+own the intellectual property, then sign an [individual CLA](https://developers.google.com/open-source/cla/individual).
+* If you work for a company that wants to allow you to contribute your work,
+then sign a [corporate CLA](https://developers.google.com/open-source/cla/corporate).
+
+Follow either of the two links above to access the appropriate CLA and
+instructions for how to sign and return it.
+
+## Submitting a Patch
+
+1. Sign the contributors license agreement above.
+2. Decide which code you want to submit. A submission should be a set of changes
+that addresses one issue in the [issue tracker](https://github.com/google/leveldb/issues).
+Please don't mix more than one logical change per submission, because it makes
+the history hard to follow. If you want to make a change
+(e.g. add a sample or feature) that doesn't have a corresponding issue in the
+issue tracker, please create one.
+3. **Submitting**: When you are ready to submit, send us a Pull Request. Be
+sure to include the issue number you fixed and the name you used to sign
+the CLA.
+
+## Writing Code ##
+
+If your contribution contains code, please make sure that it follows
+[the style guide](http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml).
+Otherwise we will have to ask you to make changes, and that's no fun for anyone.
diff --git a/src/leveldb/Makefile b/src/leveldb/Makefile
index f8903b69e4..2bd2cadcdd 100644
--- a/src/leveldb/Makefile
+++ b/src/leveldb/Makefile
@@ -6,9 +6,12 @@
# Uncomment exactly one of the lines labelled (A), (B), and (C) below
# to switch between compilation modes.
-OPT ?= -O2 -DNDEBUG # (A) Production use (optimized mode)
-# OPT ?= -g2 # (B) Debug mode, w/ full line-level debugging symbols
-# OPT ?= -O2 -g2 -DNDEBUG # (C) Profiling mode: opt, but w/debugging symbols
+# (A) Production use (optimized mode)
+OPT ?= -O2 -DNDEBUG
+# (B) Debug mode, w/ full line-level debugging symbols
+# OPT ?= -g2
+# (C) Profiling mode: opt, but w/debugging symbols
+# OPT ?= -O2 -g2 -DNDEBUG
#-----------------------------------------------
# detect what platform we're building on
@@ -29,6 +32,11 @@ MEMENVOBJECTS = $(MEMENV_SOURCES:.cc=.o)
TESTUTIL = ./util/testutil.o
TESTHARNESS = ./util/testharness.o $(TESTUTIL)
+# Note: iOS should probably be using libtool, not ar.
+ifeq ($(PLATFORM), IOS)
+AR=xcrun ar
+endif
+
TESTS = \
arena_test \
autocompact_test \
@@ -43,6 +51,7 @@ TESTS = \
env_test \
filename_test \
filter_block_test \
+ hash_test \
issue178_test \
issue200_test \
log_test \
@@ -72,7 +81,7 @@ SHARED = $(SHARED1)
else
# Update db.h if you change these.
SHARED_MAJOR = 1
-SHARED_MINOR = 17
+SHARED_MINOR = 18
SHARED1 = libleveldb.$(PLATFORM_SHARED_EXT)
SHARED2 = $(SHARED1).$(SHARED_MAJOR)
SHARED3 = $(SHARED1).$(SHARED_MAJOR).$(SHARED_MINOR)
@@ -152,6 +161,9 @@ filename_test: db/filename_test.o $(LIBOBJECTS) $(TESTHARNESS)
filter_block_test: table/filter_block_test.o $(LIBOBJECTS) $(TESTHARNESS)
$(CXX) $(LDFLAGS) table/filter_block_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+hash_test: util/hash_test.o $(LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) util/hash_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
issue178_test: issues/issue178_test.o $(LIBOBJECTS) $(TESTHARNESS)
$(CXX) $(LDFLAGS) issues/issue178_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
@@ -194,17 +206,17 @@ IOSARCH=-arch armv6 -arch armv7 -arch armv7s -arch arm64
.cc.o:
mkdir -p ios-x86/$(dir $@)
- $(CXX) $(CXXFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 -arch x86_64 -c $< -o ios-x86/$@
+ xcrun -sdk iphonesimulator $(CXX) $(CXXFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 -arch x86_64 -c $< -o ios-x86/$@
mkdir -p ios-arm/$(dir $@)
xcrun -sdk iphoneos $(CXX) $(CXXFLAGS) -isysroot $(DEVICEROOT)/SDKs/iPhoneOS$(IOSVERSION).sdk $(IOSARCH) -c $< -o ios-arm/$@
- lipo ios-x86/$@ ios-arm/$@ -create -output $@
+ xcrun lipo ios-x86/$@ ios-arm/$@ -create -output $@
.c.o:
mkdir -p ios-x86/$(dir $@)
- $(CC) $(CFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 -arch x86_64 -c $< -o ios-x86/$@
+ xcrun -sdk iphonesimulator $(CC) $(CFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 -arch x86_64 -c $< -o ios-x86/$@
mkdir -p ios-arm/$(dir $@)
xcrun -sdk iphoneos $(CC) $(CFLAGS) -isysroot $(DEVICEROOT)/SDKs/iPhoneOS$(IOSVERSION).sdk $(IOSARCH) -c $< -o ios-arm/$@
- lipo ios-x86/$@ ios-arm/$@ -create -output $@
+ xcrun lipo ios-x86/$@ ios-arm/$@ -create -output $@
else
.cc.o:
diff --git a/src/leveldb/README.md b/src/leveldb/README.md
new file mode 100644
index 0000000000..480affb5ca
--- /dev/null
+++ b/src/leveldb/README.md
@@ -0,0 +1,138 @@
+**LevelDB is a fast key-value storage library written at Google that provides an ordered mapping from string keys to string values.**
+
+Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
+
+# Features
+ * Keys and values are arbitrary byte arrays.
+ * Data is stored sorted by key.
+ * Callers can provide a custom comparison function to override the sort order.
+ * The basic operations are `Put(key,value)`, `Get(key)`, `Delete(key)`.
+ * Multiple changes can be made in one atomic batch.
+ * Users can create a transient snapshot to get a consistent view of data.
+ * Forward and backward iteration is supported over the data.
+ * Data is automatically compressed using the [Snappy compression library](http://code.google.com/p/snappy).
+ * External activity (file system operations etc.) is relayed through a virtual interface so users can customize the operating system interactions.
+ * [Detailed documentation](http://htmlpreview.github.io/?https://github.com/google/leveldb/blob/master/doc/index.html) about how to use the library is included with the source code.
+
+
+# Limitations
+ * This is not a SQL database. It does not have a relational data model, it does not support SQL queries, and it has no support for indexes.
+ * Only a single process (possibly multi-threaded) can access a particular database at a time.
+ * There is no client-server support builtin to the library. An application that needs such support will have to wrap their own server around the library.
+
+# Performance
+
+Here is a performance report (with explanations) from the run of the
+included db_bench program. The results are somewhat noisy, but should
+be enough to get a ballpark performance estimate.
+
+## Setup
+
+We use a database with a million entries. Each entry has a 16 byte
+key, and a 100 byte value. Values used by the benchmark compress to
+about half their original size.
+
+ LevelDB: version 1.1
+ Date: Sun May 1 12:11:26 2011
+ CPU: 4 x Intel(R) Core(TM)2 Quad CPU Q6600 @ 2.40GHz
+ CPUCache: 4096 KB
+ Keys: 16 bytes each
+ Values: 100 bytes each (50 bytes after compression)
+ Entries: 1000000
+ Raw Size: 110.6 MB (estimated)
+ File Size: 62.9 MB (estimated)
+
+## Write performance
+
+The "fill" benchmarks create a brand new database, in either
+sequential, or random order. The "fillsync" benchmark flushes data
+from the operating system to the disk after every operation; the other
+write operations leave the data sitting in the operating system buffer
+cache for a while. The "overwrite" benchmark does random writes that
+update existing keys in the database.
+
+ fillseq : 1.765 micros/op; 62.7 MB/s
+ fillsync : 268.409 micros/op; 0.4 MB/s (10000 ops)
+ fillrandom : 2.460 micros/op; 45.0 MB/s
+ overwrite : 2.380 micros/op; 46.5 MB/s
+
+Each "op" above corresponds to a write of a single key/value pair.
+I.e., a random write benchmark goes at approximately 400,000 writes per second.
+
+Each "fillsync" operation costs much less (0.3 millisecond)
+than a disk seek (typically 10 milliseconds). We suspect that this is
+because the hard disk itself is buffering the update in its memory and
+responding before the data has been written to the platter. This may
+or may not be safe based on whether or not the hard disk has enough
+power to save its memory in the event of a power failure.
+
+## Read performance
+
+We list the performance of reading sequentially in both the forward
+and reverse direction, and also the performance of a random lookup.
+Note that the database created by the benchmark is quite small.
+Therefore the report characterizes the performance of leveldb when the
+working set fits in memory. The cost of reading a piece of data that
+is not present in the operating system buffer cache will be dominated
+by the one or two disk seeks needed to fetch the data from disk.
+Write performance will be mostly unaffected by whether or not the
+working set fits in memory.
+
+ readrandom : 16.677 micros/op; (approximately 60,000 reads per second)
+ readseq : 0.476 micros/op; 232.3 MB/s
+ readreverse : 0.724 micros/op; 152.9 MB/s
+
+LevelDB compacts its underlying storage data in the background to
+improve read performance. The results listed above were done
+immediately after a lot of random writes. The results after
+compactions (which are usually triggered automatically) are better.
+
+ readrandom : 11.602 micros/op; (approximately 85,000 reads per second)
+ readseq : 0.423 micros/op; 261.8 MB/s
+ readreverse : 0.663 micros/op; 166.9 MB/s
+
+Some of the high cost of reads comes from repeated decompression of blocks
+read from disk. If we supply enough cache to the leveldb so it can hold the
+uncompressed blocks in memory, the read performance improves again:
+
+ readrandom : 9.775 micros/op; (approximately 100,000 reads per second before compaction)
+ readrandom : 5.215 micros/op; (approximately 190,000 reads per second after compaction)
+
+## Repository contents
+
+See doc/index.html for more explanation. See doc/impl.html for a brief overview of the implementation.
+
+The public interface is in include/*.h. Callers should not include or
+rely on the details of any other header files in this package. Those
+internal APIs may be changed without warning.
+
+Guide to header files:
+
+* **include/db.h**: Main interface to the DB: Start here
+
+* **include/options.h**: Control over the behavior of an entire database,
+and also control over the behavior of individual reads and writes.
+
+* **include/comparator.h**: Abstraction for user-specified comparison function.
+If you want just bytewise comparison of keys, you can use the default
+comparator, but clients can write their own comparator implementations if they
+want custom ordering (e.g. to handle different character encodings, etc.)
+
+* **include/iterator.h**: Interface for iterating over data. You can get
+an iterator from a DB object.
+
+* **include/write_batch.h**: Interface for atomically applying multiple
+updates to a database.
+
+* **include/slice.h**: A simple module for maintaining a pointer and a
+length into some other byte array.
+
+* **include/status.h**: Status is returned from many of the public interfaces
+and is used to report success and various kinds of errors.
+
+* **include/env.h**:
+Abstraction of the OS environment. A posix implementation of this interface is
+in util/env_posix.cc
+
+* **include/table.h, include/table_builder.h**: Lower-level modules that most
+clients probably won't use directly
diff --git a/src/leveldb/build_detect_platform b/src/leveldb/build_detect_platform
index 85b1ce0224..a1101c1bda 100755
--- a/src/leveldb/build_detect_platform
+++ b/src/leveldb/build_detect_platform
@@ -20,7 +20,7 @@
#
# The PLATFORM_CCFLAGS and PLATFORM_CXXFLAGS might include the following:
#
-# -DLEVELDB_CSTDATOMIC_PRESENT if <cstdatomic> is present
+# -DLEVELDB_ATOMIC_PRESENT if <atomic> is present
# -DLEVELDB_PLATFORM_POSIX for Posix-based platforms
# -DSNAPPY if the Snappy library is present
#
@@ -72,6 +72,12 @@ if [ "$CXX" = "g++" ]; then
fi
case "$TARGET_OS" in
+ CYGWIN_*)
+ PLATFORM=OS_LINUX
+ COMMON_FLAGS="$MEMCMP_FLAG -lpthread -DOS_LINUX -DCYGWIN"
+ PLATFORM_LDFLAGS="-lpthread"
+ PORT_FILE=port/port_posix.cc
+ ;;
Darwin)
PLATFORM=OS_MACOSX
COMMON_FLAGS="$MEMCMP_FLAG -DOS_MACOSX"
@@ -185,13 +191,14 @@ if [ "$CROSS_COMPILE" = "true" ]; then
else
CXXOUTPUT="${TMPDIR}/leveldb_build_detect_platform-cxx.$$"
- # If -std=c++0x works, use <cstdatomic>. Otherwise use port_posix.h.
+ # If -std=c++0x works, use <atomic> as fallback for when memory barriers
+ # are not available.
$CXX $CXXFLAGS -std=c++0x -x c++ - -o $CXXOUTPUT 2>/dev/null <<EOF
- #include <cstdatomic>
+ #include <atomic>
int main() {}
EOF
if [ "$?" = 0 ]; then
- COMMON_FLAGS="$COMMON_FLAGS -DLEVELDB_PLATFORM_POSIX -DLEVELDB_CSTDATOMIC_PRESENT"
+ COMMON_FLAGS="$COMMON_FLAGS -DLEVELDB_PLATFORM_POSIX -DLEVELDB_ATOMIC_PRESENT"
PLATFORM_CXXFLAGS="-std=c++0x"
else
COMMON_FLAGS="$COMMON_FLAGS -DLEVELDB_PLATFORM_POSIX"
diff --git a/src/leveldb/db/db_bench.cc b/src/leveldb/db/db_bench.cc
index fc46d89693..705a170aae 100644
--- a/src/leveldb/db/db_bench.cc
+++ b/src/leveldb/db/db_bench.cc
@@ -431,7 +431,7 @@ class Benchmark {
benchmarks = sep + 1;
}
- // Reset parameters that may be overriddden bwlow
+ // Reset parameters that may be overridden below
num_ = FLAGS_num;
reads_ = (FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads);
value_size_ = FLAGS_value_size;
@@ -811,7 +811,6 @@ class Benchmark {
void SeekRandom(ThreadState* thread) {
ReadOptions options;
- std::string value;
int found = 0;
for (int i = 0; i < reads_; i++) {
Iterator* iter = db_->NewIterator(options);
diff --git a/src/leveldb/db/db_impl.cc b/src/leveldb/db/db_impl.cc
index faf5e7d7ba..49b95953b4 100644
--- a/src/leveldb/db/db_impl.cc
+++ b/src/leveldb/db/db_impl.cc
@@ -392,7 +392,7 @@ Status DBImpl::RecoverLogFile(uint64_t log_number,
reporter.info_log = options_.info_log;
reporter.fname = fname.c_str();
reporter.status = (options_.paranoid_checks ? &status : NULL);
- // We intentially make log::Reader do checksumming even if
+ // We intentionally make log::Reader do checksumming even if
// paranoid_checks==false so that corruptions cause entire commits
// to be skipped instead of propagating bad information (like overly
// large sequence numbers).
@@ -1267,7 +1267,7 @@ WriteBatch* DBImpl::BuildBatchGroup(Writer** last_writer) {
break;
}
- // Append to *reuslt
+ // Append to *result
if (result == first->batch) {
// Switch to temporary batch instead of disturbing caller's batch
result = tmp_batch_;
diff --git a/src/leveldb/db/db_test.cc b/src/leveldb/db/db_test.cc
index 280b01c14b..0fed9137d5 100644
--- a/src/leveldb/db/db_test.cc
+++ b/src/leveldb/db/db_test.cc
@@ -626,7 +626,7 @@ TEST(DBTest, GetEncountersEmptyLevel) {
// * sstable B in level 2
// Then do enough Get() calls to arrange for an automatic compaction
// of sstable A. A bug would cause the compaction to be marked as
- // occuring at level 1 (instead of the correct level 0).
+ // occurring at level 1 (instead of the correct level 0).
// Step 1: First place sstables in levels 0 and 2
int compaction_count = 0;
diff --git a/src/leveldb/db/dbformat.h b/src/leveldb/db/dbformat.h
index 5d8a032bd3..ea897b13c0 100644
--- a/src/leveldb/db/dbformat.h
+++ b/src/leveldb/db/dbformat.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#ifndef STORAGE_LEVELDB_DB_FORMAT_H_
-#define STORAGE_LEVELDB_DB_FORMAT_H_
+#ifndef STORAGE_LEVELDB_DB_DBFORMAT_H_
+#define STORAGE_LEVELDB_DB_DBFORMAT_H_
#include <stdio.h>
#include "leveldb/comparator.h"
@@ -227,4 +227,4 @@ inline LookupKey::~LookupKey() {
} // namespace leveldb
-#endif // STORAGE_LEVELDB_DB_FORMAT_H_
+#endif // STORAGE_LEVELDB_DB_DBFORMAT_H_
diff --git a/src/leveldb/db/dumpfile.cc b/src/leveldb/db/dumpfile.cc
new file mode 100644
index 0000000000..61c47c2ff9
--- /dev/null
+++ b/src/leveldb/db/dumpfile.cc
@@ -0,0 +1,225 @@
+// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include <stdio.h>
+#include "db/dbformat.h"
+#include "db/filename.h"
+#include "db/log_reader.h"
+#include "db/version_edit.h"
+#include "db/write_batch_internal.h"
+#include "leveldb/env.h"
+#include "leveldb/iterator.h"
+#include "leveldb/options.h"
+#include "leveldb/status.h"
+#include "leveldb/table.h"
+#include "leveldb/write_batch.h"
+#include "util/logging.h"
+
+namespace leveldb {
+
+namespace {
+
+bool GuessType(const std::string& fname, FileType* type) {
+ size_t pos = fname.rfind('/');
+ std::string basename;
+ if (pos == std::string::npos) {
+ basename = fname;
+ } else {
+ basename = std::string(fname.data() + pos + 1, fname.size() - pos - 1);
+ }
+ uint64_t ignored;
+ return ParseFileName(basename, &ignored, type);
+}
+
+// Notified when log reader encounters corruption.
+class CorruptionReporter : public log::Reader::Reporter {
+ public:
+ WritableFile* dst_;
+ virtual void Corruption(size_t bytes, const Status& status) {
+ std::string r = "corruption: ";
+ AppendNumberTo(&r, bytes);
+ r += " bytes; ";
+ r += status.ToString();
+ r.push_back('\n');
+ dst_->Append(r);
+ }
+};
+
+// Print contents of a log file. (*func)() is called on every record.
+Status PrintLogContents(Env* env, const std::string& fname,
+ void (*func)(uint64_t, Slice, WritableFile*),
+ WritableFile* dst) {
+ SequentialFile* file;
+ Status s = env->NewSequentialFile(fname, &file);
+ if (!s.ok()) {
+ return s;
+ }
+ CorruptionReporter reporter;
+ reporter.dst_ = dst;
+ log::Reader reader(file, &reporter, true, 0);
+ Slice record;
+ std::string scratch;
+ while (reader.ReadRecord(&record, &scratch)) {
+ (*func)(reader.LastRecordOffset(), record, dst);
+ }
+ delete file;
+ return Status::OK();
+}
+
+// Called on every item found in a WriteBatch.
+class WriteBatchItemPrinter : public WriteBatch::Handler {
+ public:
+ WritableFile* dst_;
+ virtual void Put(const Slice& key, const Slice& value) {
+ std::string r = " put '";
+ AppendEscapedStringTo(&r, key);
+ r += "' '";
+ AppendEscapedStringTo(&r, value);
+ r += "'\n";
+ dst_->Append(r);
+ }
+ virtual void Delete(const Slice& key) {
+ std::string r = " del '";
+ AppendEscapedStringTo(&r, key);
+ r += "'\n";
+ dst_->Append(r);
+ }
+};
+
+
+// Called on every log record (each one of which is a WriteBatch)
+// found in a kLogFile.
+static void WriteBatchPrinter(uint64_t pos, Slice record, WritableFile* dst) {
+ std::string r = "--- offset ";
+ AppendNumberTo(&r, pos);
+ r += "; ";
+ if (record.size() < 12) {
+ r += "log record length ";
+ AppendNumberTo(&r, record.size());
+ r += " is too small\n";
+ dst->Append(r);
+ return;
+ }
+ WriteBatch batch;
+ WriteBatchInternal::SetContents(&batch, record);
+ r += "sequence ";
+ AppendNumberTo(&r, WriteBatchInternal::Sequence(&batch));
+ r.push_back('\n');
+ dst->Append(r);
+ WriteBatchItemPrinter batch_item_printer;
+ batch_item_printer.dst_ = dst;
+ Status s = batch.Iterate(&batch_item_printer);
+ if (!s.ok()) {
+ dst->Append(" error: " + s.ToString() + "\n");
+ }
+}
+
+Status DumpLog(Env* env, const std::string& fname, WritableFile* dst) {
+ return PrintLogContents(env, fname, WriteBatchPrinter, dst);
+}
+
+// Called on every log record (each one of which is a WriteBatch)
+// found in a kDescriptorFile.
+static void VersionEditPrinter(uint64_t pos, Slice record, WritableFile* dst) {
+ std::string r = "--- offset ";
+ AppendNumberTo(&r, pos);
+ r += "; ";
+ VersionEdit edit;
+ Status s = edit.DecodeFrom(record);
+ if (!s.ok()) {
+ r += s.ToString();
+ r.push_back('\n');
+ } else {
+ r += edit.DebugString();
+ }
+ dst->Append(r);
+}
+
+Status DumpDescriptor(Env* env, const std::string& fname, WritableFile* dst) {
+ return PrintLogContents(env, fname, VersionEditPrinter, dst);
+}
+
+Status DumpTable(Env* env, const std::string& fname, WritableFile* dst) {
+ uint64_t file_size;
+ RandomAccessFile* file = NULL;
+ Table* table = NULL;
+ Status s = env->GetFileSize(fname, &file_size);
+ if (s.ok()) {
+ s = env->NewRandomAccessFile(fname, &file);
+ }
+ if (s.ok()) {
+ // We use the default comparator, which may or may not match the
+ // comparator used in this database. However this should not cause
+ // problems since we only use Table operations that do not require
+ // any comparisons. In particular, we do not call Seek or Prev.
+ s = Table::Open(Options(), file, file_size, &table);
+ }
+ if (!s.ok()) {
+ delete table;
+ delete file;
+ return s;
+ }
+
+ ReadOptions ro;
+ ro.fill_cache = false;
+ Iterator* iter = table->NewIterator(ro);
+ std::string r;
+ for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
+ r.clear();
+ ParsedInternalKey key;
+ if (!ParseInternalKey(iter->key(), &key)) {
+ r = "badkey '";
+ AppendEscapedStringTo(&r, iter->key());
+ r += "' => '";
+ AppendEscapedStringTo(&r, iter->value());
+ r += "'\n";
+ dst->Append(r);
+ } else {
+ r = "'";
+ AppendEscapedStringTo(&r, key.user_key);
+ r += "' @ ";
+ AppendNumberTo(&r, key.sequence);
+ r += " : ";
+ if (key.type == kTypeDeletion) {
+ r += "del";
+ } else if (key.type == kTypeValue) {
+ r += "val";
+ } else {
+ AppendNumberTo(&r, key.type);
+ }
+ r += " => '";
+ AppendEscapedStringTo(&r, iter->value());
+ r += "'\n";
+ dst->Append(r);
+ }
+ }
+ s = iter->status();
+ if (!s.ok()) {
+ dst->Append("iterator error: " + s.ToString() + "\n");
+ }
+
+ delete iter;
+ delete table;
+ delete file;
+ return Status::OK();
+}
+
+} // namespace
+
+Status DumpFile(Env* env, const std::string& fname, WritableFile* dst) {
+ FileType ftype;
+ if (!GuessType(fname, &ftype)) {
+ return Status::InvalidArgument(fname + ": unknown file type");
+ }
+ switch (ftype) {
+ case kLogFile: return DumpLog(env, fname, dst);
+ case kDescriptorFile: return DumpDescriptor(env, fname, dst);
+ case kTableFile: return DumpTable(env, fname, dst);
+ default:
+ break;
+ }
+ return Status::InvalidArgument(fname + ": not a dump-able file type");
+}
+
+} // namespace leveldb
diff --git a/src/leveldb/db/leveldb_main.cc b/src/leveldb/db/leveldb_main.cc
index 995d76107a..9f4b7dd70c 100644
--- a/src/leveldb/db/leveldb_main.cc
+++ b/src/leveldb/db/leveldb_main.cc
@@ -3,212 +3,38 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include <stdio.h>
-#include "db/dbformat.h"
-#include "db/filename.h"
-#include "db/log_reader.h"
-#include "db/version_edit.h"
-#include "db/write_batch_internal.h"
+#include "leveldb/dumpfile.h"
#include "leveldb/env.h"
-#include "leveldb/iterator.h"
-#include "leveldb/options.h"
#include "leveldb/status.h"
-#include "leveldb/table.h"
-#include "leveldb/write_batch.h"
-#include "util/logging.h"
namespace leveldb {
-
namespace {
-bool GuessType(const std::string& fname, FileType* type) {
- size_t pos = fname.rfind('/');
- std::string basename;
- if (pos == std::string::npos) {
- basename = fname;
- } else {
- basename = std::string(fname.data() + pos + 1, fname.size() - pos - 1);
- }
- uint64_t ignored;
- return ParseFileName(basename, &ignored, type);
-}
-
-// Notified when log reader encounters corruption.
-class CorruptionReporter : public log::Reader::Reporter {
- public:
- virtual void Corruption(size_t bytes, const Status& status) {
- printf("corruption: %d bytes; %s\n",
- static_cast<int>(bytes),
- status.ToString().c_str());
- }
-};
-
-// Print contents of a log file. (*func)() is called on every record.
-bool PrintLogContents(Env* env, const std::string& fname,
- void (*func)(Slice)) {
- SequentialFile* file;
- Status s = env->NewSequentialFile(fname, &file);
- if (!s.ok()) {
- fprintf(stderr, "%s\n", s.ToString().c_str());
- return false;
- }
- CorruptionReporter reporter;
- log::Reader reader(file, &reporter, true, 0);
- Slice record;
- std::string scratch;
- while (reader.ReadRecord(&record, &scratch)) {
- printf("--- offset %llu; ",
- static_cast<unsigned long long>(reader.LastRecordOffset()));
- (*func)(record);
- }
- delete file;
- return true;
-}
-
-// Called on every item found in a WriteBatch.
-class WriteBatchItemPrinter : public WriteBatch::Handler {
+class StdoutPrinter : public WritableFile {
public:
- uint64_t offset_;
- uint64_t sequence_;
-
- virtual void Put(const Slice& key, const Slice& value) {
- printf(" put '%s' '%s'\n",
- EscapeString(key).c_str(),
- EscapeString(value).c_str());
- }
- virtual void Delete(const Slice& key) {
- printf(" del '%s'\n",
- EscapeString(key).c_str());
+ virtual Status Append(const Slice& data) {
+ fwrite(data.data(), 1, data.size(), stdout);
+ return Status::OK();
}
+ virtual Status Close() { return Status::OK(); }
+ virtual Status Flush() { return Status::OK(); }
+ virtual Status Sync() { return Status::OK(); }
};
-
-// Called on every log record (each one of which is a WriteBatch)
-// found in a kLogFile.
-static void WriteBatchPrinter(Slice record) {
- if (record.size() < 12) {
- printf("log record length %d is too small\n",
- static_cast<int>(record.size()));
- return;
- }
- WriteBatch batch;
- WriteBatchInternal::SetContents(&batch, record);
- printf("sequence %llu\n",
- static_cast<unsigned long long>(WriteBatchInternal::Sequence(&batch)));
- WriteBatchItemPrinter batch_item_printer;
- Status s = batch.Iterate(&batch_item_printer);
- if (!s.ok()) {
- printf(" error: %s\n", s.ToString().c_str());
- }
-}
-
-bool DumpLog(Env* env, const std::string& fname) {
- return PrintLogContents(env, fname, WriteBatchPrinter);
-}
-
-// Called on every log record (each one of which is a WriteBatch)
-// found in a kDescriptorFile.
-static void VersionEditPrinter(Slice record) {
- VersionEdit edit;
- Status s = edit.DecodeFrom(record);
- if (!s.ok()) {
- printf("%s\n", s.ToString().c_str());
- return;
- }
- printf("%s", edit.DebugString().c_str());
-}
-
-bool DumpDescriptor(Env* env, const std::string& fname) {
- return PrintLogContents(env, fname, VersionEditPrinter);
-}
-
-bool DumpTable(Env* env, const std::string& fname) {
- uint64_t file_size;
- RandomAccessFile* file = NULL;
- Table* table = NULL;
- Status s = env->GetFileSize(fname, &file_size);
- if (s.ok()) {
- s = env->NewRandomAccessFile(fname, &file);
- }
- if (s.ok()) {
- // We use the default comparator, which may or may not match the
- // comparator used in this database. However this should not cause
- // problems since we only use Table operations that do not require
- // any comparisons. In particular, we do not call Seek or Prev.
- s = Table::Open(Options(), file, file_size, &table);
- }
- if (!s.ok()) {
- fprintf(stderr, "%s\n", s.ToString().c_str());
- delete table;
- delete file;
- return false;
- }
-
- ReadOptions ro;
- ro.fill_cache = false;
- Iterator* iter = table->NewIterator(ro);
- for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
- ParsedInternalKey key;
- if (!ParseInternalKey(iter->key(), &key)) {
- printf("badkey '%s' => '%s'\n",
- EscapeString(iter->key()).c_str(),
- EscapeString(iter->value()).c_str());
- } else {
- char kbuf[20];
- const char* type;
- if (key.type == kTypeDeletion) {
- type = "del";
- } else if (key.type == kTypeValue) {
- type = "val";
- } else {
- snprintf(kbuf, sizeof(kbuf), "%d", static_cast<int>(key.type));
- type = kbuf;
- }
- printf("'%s' @ %8llu : %s => '%s'\n",
- EscapeString(key.user_key).c_str(),
- static_cast<unsigned long long>(key.sequence),
- type,
- EscapeString(iter->value()).c_str());
- }
- }
- s = iter->status();
- if (!s.ok()) {
- printf("iterator error: %s\n", s.ToString().c_str());
- }
-
- delete iter;
- delete table;
- delete file;
- return true;
-}
-
-bool DumpFile(Env* env, const std::string& fname) {
- FileType ftype;
- if (!GuessType(fname, &ftype)) {
- fprintf(stderr, "%s: unknown file type\n", fname.c_str());
- return false;
- }
- switch (ftype) {
- case kLogFile: return DumpLog(env, fname);
- case kDescriptorFile: return DumpDescriptor(env, fname);
- case kTableFile: return DumpTable(env, fname);
-
- default: {
- fprintf(stderr, "%s: not a dump-able file type\n", fname.c_str());
- break;
- }
- }
- return false;
-}
-
bool HandleDumpCommand(Env* env, char** files, int num) {
+ StdoutPrinter printer;
bool ok = true;
for (int i = 0; i < num; i++) {
- ok &= DumpFile(env, files[i]);
+ Status s = DumpFile(env, files[i], &printer);
+ if (!s.ok()) {
+ fprintf(stderr, "%s\n", s.ToString().c_str());
+ ok = false;
+ }
}
return ok;
}
-}
+} // namespace
} // namespace leveldb
static void Usage() {
diff --git a/src/leveldb/db/log_format.h b/src/leveldb/db/log_format.h
index 2690cb9789..a8c06efe18 100644
--- a/src/leveldb/db/log_format.h
+++ b/src/leveldb/db/log_format.h
@@ -26,8 +26,8 @@ static const int kMaxRecordType = kLastType;
static const int kBlockSize = 32768;
-// Header is checksum (4 bytes), type (1 byte), length (2 bytes).
-static const int kHeaderSize = 4 + 1 + 2;
+// Header is checksum (4 bytes), length (2 bytes), type (1 byte).
+static const int kHeaderSize = 4 + 2 + 1;
} // namespace log
} // namespace leveldb
diff --git a/src/leveldb/db/log_reader.cc b/src/leveldb/db/log_reader.cc
index 4919216d04..e44b66c85b 100644
--- a/src/leveldb/db/log_reader.cc
+++ b/src/leveldb/db/log_reader.cc
@@ -167,14 +167,14 @@ uint64_t Reader::LastRecordOffset() {
return last_record_offset_;
}
-void Reader::ReportCorruption(size_t bytes, const char* reason) {
+void Reader::ReportCorruption(uint64_t bytes, const char* reason) {
ReportDrop(bytes, Status::Corruption(reason));
}
-void Reader::ReportDrop(size_t bytes, const Status& reason) {
+void Reader::ReportDrop(uint64_t bytes, const Status& reason) {
if (reporter_ != NULL &&
end_of_buffer_offset_ - buffer_.size() - bytes >= initial_offset_) {
- reporter_->Corruption(bytes, reason);
+ reporter_->Corruption(static_cast<size_t>(bytes), reason);
}
}
diff --git a/src/leveldb/db/log_reader.h b/src/leveldb/db/log_reader.h
index 82d4bee68d..6aff791716 100644
--- a/src/leveldb/db/log_reader.h
+++ b/src/leveldb/db/log_reader.h
@@ -94,8 +94,8 @@ class Reader {
// Reports dropped bytes to the reporter.
// buffer_ must be updated to remove the dropped bytes prior to invocation.
- void ReportCorruption(size_t bytes, const char* reason);
- void ReportDrop(size_t bytes, const Status& reason);
+ void ReportCorruption(uint64_t bytes, const char* reason);
+ void ReportDrop(uint64_t bytes, const Status& reason);
// No copying allowed
Reader(const Reader&);
diff --git a/src/leveldb/db/log_test.cc b/src/leveldb/db/log_test.cc
index 91d3caafc3..dcf0562652 100644
--- a/src/leveldb/db/log_test.cc
+++ b/src/leveldb/db/log_test.cc
@@ -463,7 +463,7 @@ TEST(LogTest, ErrorJoinsRecords) {
ASSERT_EQ("correct", Read());
ASSERT_EQ("EOF", Read());
- const int dropped = DroppedBytes();
+ const size_t dropped = DroppedBytes();
ASSERT_LE(dropped, 2*kBlockSize + 100);
ASSERT_GE(dropped, 2*kBlockSize);
}
diff --git a/src/leveldb/db/repair.cc b/src/leveldb/db/repair.cc
index 7727fafc58..4cd4bb047f 100644
--- a/src/leveldb/db/repair.cc
+++ b/src/leveldb/db/repair.cc
@@ -186,7 +186,7 @@ class Repairer {
reporter.env = env_;
reporter.info_log = options_.info_log;
reporter.lognum = log;
- // We intentially make log::Reader do checksumming so that
+ // We intentionally make log::Reader do checksumming so that
// corruptions cause entire commits to be skipped instead of
// propagating bad information (like overly large sequence
// numbers).
diff --git a/src/leveldb/db/skiplist.h b/src/leveldb/db/skiplist.h
index af85be6d01..ed8b092203 100644
--- a/src/leveldb/db/skiplist.h
+++ b/src/leveldb/db/skiplist.h
@@ -1,3 +1,6 @@
+#ifndef STORAGE_LEVELDB_DB_SKIPLIST_H_
+#define STORAGE_LEVELDB_DB_SKIPLIST_H_
+
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
@@ -377,3 +380,5 @@ bool SkipList<Key,Comparator>::Contains(const Key& key) const {
}
} // namespace leveldb
+
+#endif // STORAGE_LEVELDB_DB_SKIPLIST_H_
diff --git a/src/leveldb/db/write_batch_internal.h b/src/leveldb/db/write_batch_internal.h
index 4423a7f318..310a3c8912 100644
--- a/src/leveldb/db/write_batch_internal.h
+++ b/src/leveldb/db/write_batch_internal.h
@@ -21,10 +21,10 @@ class WriteBatchInternal {
// Set the count for the number of entries in the batch.
static void SetCount(WriteBatch* batch, int n);
- // Return the seqeunce number for the start of this batch.
+ // Return the sequence number for the start of this batch.
static SequenceNumber Sequence(const WriteBatch* batch);
- // Store the specified number as the seqeunce number for the start of
+ // Store the specified number as the sequence number for the start of
// this batch.
static void SetSequence(WriteBatch* batch, SequenceNumber seq);
diff --git a/src/leveldb/doc/bench/db_bench_tree_db.cc b/src/leveldb/doc/bench/db_bench_tree_db.cc
index ed86f031c2..4ca381f11f 100644
--- a/src/leveldb/doc/bench/db_bench_tree_db.cc
+++ b/src/leveldb/doc/bench/db_bench_tree_db.cc
@@ -338,7 +338,7 @@ class Benchmark {
bool write_sync = false;
if (name == Slice("fillseq")) {
Write(write_sync, SEQUENTIAL, FRESH, num_, FLAGS_value_size, 1);
-
+ DBSynchronize(db_);
} else if (name == Slice("fillrandom")) {
Write(write_sync, RANDOM, FRESH, num_, FLAGS_value_size, 1);
DBSynchronize(db_);
diff --git a/src/leveldb/doc/impl.html b/src/leveldb/doc/impl.html
index 28817fe0da..6a468be095 100644
--- a/src/leveldb/doc/impl.html
+++ b/src/leveldb/doc/impl.html
@@ -111,7 +111,7 @@ A compaction merges the contents of the picked files to produce a
sequence of level-(L+1) files. We switch to producing a new
level-(L+1) file after the current output file has reached the target
file size (2MB). We also switch to a new output file when the key
-range of the current output file has grown enough to overlap more then
+range of the current output file has grown enough to overlap more than
ten level-(L+2) files. This last rule ensures that a later compaction
of a level-(L+1) file will not pick up too much data from level-(L+2).
@@ -151,7 +151,7 @@ compaction cost will be approximately 0.5 second.
If we throttle the background writing to something small, say 10% of
the full 100MB/s speed, a compaction may take up to 5 seconds. If the
user is writing at 10MB/s, we might build up lots of level-0 files
-(~50 to hold the 5*10MB). This may signficantly increase the cost of
+(~50 to hold the 5*10MB). This may significantly increase the cost of
reads due to the overhead of merging more files together on every
read.
diff --git a/src/leveldb/doc/log_format.txt b/src/leveldb/doc/log_format.txt
index 5228f624de..4cca5ef6ea 100644
--- a/src/leveldb/doc/log_format.txt
+++ b/src/leveldb/doc/log_format.txt
@@ -11,7 +11,7 @@ Each block consists of a sequence of records:
A record never starts within the last six bytes of a block (since it
won't fit). Any leftover bytes here form the trailer, which must
-consist entirely of zero bytes and must be skipped by readers.
+consist entirely of zero bytes and must be skipped by readers.
Aside: if exactly seven bytes are left in the current block, and a new
non-zero length record is added, the writer must emit a FIRST record
@@ -33,8 +33,8 @@ The FULL record contains the contents of an entire user record.
FIRST, MIDDLE, LAST are types used for user records that have been
split into multiple fragments (typically because of block boundaries).
FIRST is the type of the first fragment of a user record, LAST is the
-type of the last fragment of a user record, and MID is the type of all
-interior fragments of a user record.
+type of the last fragment of a user record, and MIDDLE is the type of
+all interior fragments of a user record.
Example: consider a sequence of user records:
A: length 1000
diff --git a/src/leveldb/helpers/memenv/memenv.cc b/src/leveldb/helpers/memenv/memenv.cc
index 5879de1214..43ef2e0729 100644
--- a/src/leveldb/helpers/memenv/memenv.cc
+++ b/src/leveldb/helpers/memenv/memenv.cc
@@ -55,14 +55,15 @@ class FileState {
}
const uint64_t available = size_ - offset;
if (n > available) {
- n = available;
+ n = static_cast<size_t>(available);
}
if (n == 0) {
*result = Slice();
return Status::OK();
}
- size_t block = offset / kBlockSize;
+ assert(offset / kBlockSize <= SIZE_MAX);
+ size_t block = static_cast<size_t>(offset / kBlockSize);
size_t block_offset = offset % kBlockSize;
if (n <= kBlockSize - block_offset) {
@@ -167,7 +168,7 @@ class SequentialFileImpl : public SequentialFile {
if (pos_ > file_->Size()) {
return Status::IOError("pos_ > file_->Size()");
}
- const size_t available = file_->Size() - pos_;
+ const uint64_t available = file_->Size() - pos_;
if (n > available) {
n = available;
}
@@ -177,7 +178,7 @@ class SequentialFileImpl : public SequentialFile {
private:
FileState* file_;
- size_t pos_;
+ uint64_t pos_;
};
class RandomAccessFileImpl : public RandomAccessFile {
diff --git a/src/leveldb/include/leveldb/cache.h b/src/leveldb/include/leveldb/cache.h
index 5e3b47637d..1a201e5e0a 100644
--- a/src/leveldb/include/leveldb/cache.h
+++ b/src/leveldb/include/leveldb/cache.h
@@ -96,4 +96,4 @@ class Cache {
} // namespace leveldb
-#endif // STORAGE_LEVELDB_UTIL_CACHE_H_
+#endif // STORAGE_LEVELDB_INCLUDE_CACHE_H_
diff --git a/src/leveldb/include/leveldb/db.h b/src/leveldb/include/leveldb/db.h
index 40851b2aa8..4c169bf22e 100644
--- a/src/leveldb/include/leveldb/db.h
+++ b/src/leveldb/include/leveldb/db.h
@@ -14,7 +14,7 @@ namespace leveldb {
// Update Makefile if you change these
static const int kMajorVersion = 1;
-static const int kMinorVersion = 17;
+static const int kMinorVersion = 18;
struct Options;
struct ReadOptions;
diff --git a/src/leveldb/include/leveldb/dumpfile.h b/src/leveldb/include/leveldb/dumpfile.h
new file mode 100644
index 0000000000..3f97fda16b
--- /dev/null
+++ b/src/leveldb/include/leveldb/dumpfile.h
@@ -0,0 +1,25 @@
+// Copyright (c) 2014 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef STORAGE_LEVELDB_INCLUDE_DUMPFILE_H_
+#define STORAGE_LEVELDB_INCLUDE_DUMPFILE_H_
+
+#include <string>
+#include "leveldb/env.h"
+#include "leveldb/status.h"
+
+namespace leveldb {
+
+// Dump the contents of the file named by fname in text format to
+// *dst. Makes a sequence of dst->Append() calls; each call is passed
+// the newline-terminated text corresponding to a single item found
+// in the file.
+//
+// Returns a non-OK result if fname does not name a leveldb storage
+// file, or if the file cannot be read.
+Status DumpFile(Env* env, const std::string& fname, WritableFile* dst);
+
+} // namespace leveldb
+
+#endif // STORAGE_LEVELDB_INCLUDE_DUMPFILE_H_
diff --git a/src/leveldb/include/leveldb/env.h b/src/leveldb/include/leveldb/env.h
index b2072d02c1..f709514da6 100644
--- a/src/leveldb/include/leveldb/env.h
+++ b/src/leveldb/include/leveldb/env.h
@@ -142,7 +142,7 @@ class Env {
// useful for computing deltas of time.
virtual uint64_t NowMicros() = 0;
- // Sleep/delay the thread for the perscribed number of micro-seconds.
+ // Sleep/delay the thread for the prescribed number of micro-seconds.
virtual void SleepForMicroseconds(int micros) = 0;
private:
diff --git a/src/leveldb/include/leveldb/iterator.h b/src/leveldb/include/leveldb/iterator.h
index ad543eb46c..76aced04bd 100644
--- a/src/leveldb/include/leveldb/iterator.h
+++ b/src/leveldb/include/leveldb/iterator.h
@@ -61,7 +61,7 @@ class Iterator {
// Return the value for the current entry. The underlying storage for
// the returned slice is valid only until the next modification of
// the iterator.
- // REQUIRES: !AtEnd() && !AtStart()
+ // REQUIRES: Valid()
virtual Slice value() const = 0;
// If an error has occurred, return it. Else return an ok status.
diff --git a/src/leveldb/include/leveldb/options.h b/src/leveldb/include/leveldb/options.h
index fdda718d30..7c9b973454 100644
--- a/src/leveldb/include/leveldb/options.h
+++ b/src/leveldb/include/leveldb/options.h
@@ -153,7 +153,7 @@ struct ReadOptions {
// If "snapshot" is non-NULL, read as of the supplied snapshot
// (which must belong to the DB that is being read and which must
- // not have been released). If "snapshot" is NULL, use an impliicit
+ // not have been released). If "snapshot" is NULL, use an implicit
// snapshot of the state at the beginning of this read operation.
// Default: NULL
const Snapshot* snapshot;
diff --git a/src/leveldb/port/atomic_pointer.h b/src/leveldb/port/atomic_pointer.h
index a9866b2302..9bf091f757 100644
--- a/src/leveldb/port/atomic_pointer.h
+++ b/src/leveldb/port/atomic_pointer.h
@@ -5,14 +5,13 @@
// AtomicPointer provides storage for a lock-free pointer.
// Platform-dependent implementation of AtomicPointer:
// - If the platform provides a cheap barrier, we use it with raw pointers
-// - If cstdatomic is present (on newer versions of gcc, it is), we use
-// a cstdatomic-based AtomicPointer. However we prefer the memory
+// - If <atomic> is present (on newer versions of gcc, it is), we use
+// a <atomic>-based AtomicPointer. However we prefer the memory
// barrier based version, because at least on a gcc 4.4 32-bit build
-// on linux, we have encountered a buggy <cstdatomic>
-// implementation. Also, some <cstdatomic> implementations are much
-// slower than a memory-barrier based implementation (~16ns for
-// <cstdatomic> based acquire-load vs. ~1ns for a barrier based
-// acquire-load).
+// on linux, we have encountered a buggy <atomic> implementation.
+// Also, some <atomic> implementations are much slower than a memory-barrier
+// based implementation (~16ns for <atomic> based acquire-load vs. ~1ns for
+// a barrier based acquire-load).
// This code is based on atomicops-internals-* in Google's perftools:
// http://code.google.com/p/google-perftools/source/browse/#svn%2Ftrunk%2Fsrc%2Fbase
@@ -20,8 +19,8 @@
#define PORT_ATOMIC_POINTER_H_
#include <stdint.h>
-#ifdef LEVELDB_CSTDATOMIC_PRESENT
-#include <cstdatomic>
+#ifdef LEVELDB_ATOMIC_PRESENT
+#include <atomic>
#endif
#ifdef OS_WIN
#include <windows.h>
@@ -126,7 +125,7 @@ class AtomicPointer {
};
// AtomicPointer based on <cstdatomic>
-#elif defined(LEVELDB_CSTDATOMIC_PRESENT)
+#elif defined(LEVELDB_ATOMIC_PRESENT)
class AtomicPointer {
private:
std::atomic<void*> rep_;
@@ -207,7 +206,7 @@ class AtomicPointer {
inline void NoBarrier_Store(void* v) { rep_ = v; }
};
-// We have neither MemoryBarrier(), nor <cstdatomic>
+// We have neither MemoryBarrier(), nor <atomic>
#else
#error Please implement AtomicPointer for this platform.
diff --git a/src/leveldb/port/port_posix.h b/src/leveldb/port/port_posix.h
index 21c845e211..ccca9939d3 100644
--- a/src/leveldb/port/port_posix.h
+++ b/src/leveldb/port/port_posix.h
@@ -21,14 +21,11 @@
#else
#define PLATFORM_IS_LITTLE_ENDIAN false
#endif
-#elif defined(OS_FREEBSD)
+#elif defined(OS_FREEBSD) || defined(OS_OPENBSD) ||\
+ defined(OS_NETBSD) || defined(OS_DRAGONFLYBSD)
#include <sys/types.h>
#include <sys/endian.h>
#define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN)
-#elif defined(OS_OPENBSD) || defined(OS_NETBSD) ||\
- defined(OS_DRAGONFLYBSD)
- #include <sys/types.h>
- #include <sys/endian.h>
#elif defined(OS_HPUX)
#define PLATFORM_IS_LITTLE_ENDIAN false
#elif defined(OS_ANDROID)
@@ -55,7 +52,7 @@
#if defined(OS_MACOSX) || defined(OS_SOLARIS) || defined(OS_FREEBSD) ||\
defined(OS_NETBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLYBSD) ||\
- defined(OS_ANDROID) || defined(OS_HPUX)
+ defined(OS_ANDROID) || defined(OS_HPUX) || defined(CYGWIN)
// Use fread/fwrite/fflush on platforms without _unlocked variants
#define fread_unlocked fread
#define fwrite_unlocked fwrite
diff --git a/src/leveldb/port/thread_annotations.h b/src/leveldb/port/thread_annotations.h
index 6f9b6a7924..9470ef587c 100644
--- a/src/leveldb/port/thread_annotations.h
+++ b/src/leveldb/port/thread_annotations.h
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#ifndef STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H
+#ifndef STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
+#define STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
// Some environments provide custom macros to aid in static thread-safety
// analysis. Provide empty definitions of such macros unless they are already
@@ -56,4 +57,4 @@
#define NO_THREAD_SAFETY_ANALYSIS
#endif
-#endif // STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H
+#endif // STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
diff --git a/src/leveldb/table/block.cc b/src/leveldb/table/block.cc
index 79ea9d9ee5..43e402c9c0 100644
--- a/src/leveldb/table/block.cc
+++ b/src/leveldb/table/block.cc
@@ -46,7 +46,7 @@ Block::~Block() {
// Helper routine: decode the next block entry starting at "p",
// storing the number of shared key bytes, non_shared key bytes,
// and the length of the value in "*shared", "*non_shared", and
-// "*value_length", respectively. Will not derefence past "limit".
+// "*value_length", respectively. Will not dereference past "limit".
//
// If any errors are detected, returns NULL. Otherwise, returns a
// pointer to the key delta (just past the three decoded values).
diff --git a/src/leveldb/table/block_builder.h b/src/leveldb/table/block_builder.h
index 5b545bd1af..4fbcb33972 100644
--- a/src/leveldb/table/block_builder.h
+++ b/src/leveldb/table/block_builder.h
@@ -21,7 +21,7 @@ class BlockBuilder {
// Reset the contents as if the BlockBuilder was just constructed.
void Reset();
- // REQUIRES: Finish() has not been callled since the last call to Reset().
+ // REQUIRES: Finish() has not been called since the last call to Reset().
// REQUIRES: key is larger than any previously added key
void Add(const Slice& key, const Slice& value);
diff --git a/src/leveldb/table/format.cc b/src/leveldb/table/format.cc
index cda1decdf3..aa63144c9e 100644
--- a/src/leveldb/table/format.cc
+++ b/src/leveldb/table/format.cc
@@ -48,7 +48,7 @@ Status Footer::DecodeFrom(Slice* input) {
const uint64_t magic = ((static_cast<uint64_t>(magic_hi) << 32) |
(static_cast<uint64_t>(magic_lo)));
if (magic != kTableMagicNumber) {
- return Status::InvalidArgument("not an sstable (bad magic number)");
+ return Status::Corruption("not an sstable (bad magic number)");
}
Status result = metaindex_handle_.DecodeFrom(input);
diff --git a/src/leveldb/table/table.cc b/src/leveldb/table/table.cc
index 71c1756e5f..dff8a82590 100644
--- a/src/leveldb/table/table.cc
+++ b/src/leveldb/table/table.cc
@@ -41,7 +41,7 @@ Status Table::Open(const Options& options,
Table** table) {
*table = NULL;
if (size < Footer::kEncodedLength) {
- return Status::InvalidArgument("file is too short to be an sstable");
+ return Status::Corruption("file is too short to be an sstable");
}
char footer_space[Footer::kEncodedLength];
@@ -58,7 +58,11 @@ Status Table::Open(const Options& options,
BlockContents contents;
Block* index_block = NULL;
if (s.ok()) {
- s = ReadBlock(file, ReadOptions(), footer.index_handle(), &contents);
+ ReadOptions opt;
+ if (options.paranoid_checks) {
+ opt.verify_checksums = true;
+ }
+ s = ReadBlock(file, opt, footer.index_handle(), &contents);
if (s.ok()) {
index_block = new Block(contents);
}
@@ -92,6 +96,9 @@ void Table::ReadMeta(const Footer& footer) {
// TODO(sanjay): Skip this if footer.metaindex_handle() size indicates
// it is an empty block.
ReadOptions opt;
+ if (rep_->options.paranoid_checks) {
+ opt.verify_checksums = true;
+ }
BlockContents contents;
if (!ReadBlock(rep_->file, opt, footer.metaindex_handle(), &contents).ok()) {
// Do not propagate errors since meta info is not needed for operation
@@ -120,6 +127,9 @@ void Table::ReadFilter(const Slice& filter_handle_value) {
// We might want to unify with ReadBlock() if we start
// requiring checksum verification in Table::Open.
ReadOptions opt;
+ if (rep_->options.paranoid_checks) {
+ opt.verify_checksums = true;
+ }
BlockContents block;
if (!ReadBlock(rep_->file, opt, filter_handle, &block).ok()) {
return;
diff --git a/src/leveldb/util/bloom.cc b/src/leveldb/util/bloom.cc
index d7941cd21f..a27a2ace28 100644
--- a/src/leveldb/util/bloom.cc
+++ b/src/leveldb/util/bloom.cc
@@ -29,7 +29,7 @@ class BloomFilterPolicy : public FilterPolicy {
}
virtual const char* Name() const {
- return "leveldb.BuiltinBloomFilter";
+ return "leveldb.BuiltinBloomFilter2";
}
virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
diff --git a/src/leveldb/util/env_posix.cc b/src/leveldb/util/env_posix.cc
index 93eadb1a4f..ba2667864a 100644
--- a/src/leveldb/util/env_posix.cc
+++ b/src/leveldb/util/env_posix.cc
@@ -3,8 +3,6 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#if !defined(LEVELDB_PLATFORM_WINDOWS)
-#include <deque>
-#include <set>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
@@ -18,9 +16,8 @@
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
-#if defined(LEVELDB_PLATFORM_ANDROID)
-#include <sys/stat.h>
-#endif
+#include <deque>
+#include <set>
#include "leveldb/env.h"
#include "leveldb/slice.h"
#include "port/port.h"
@@ -296,7 +293,8 @@ class PosixEnv : public Env {
public:
PosixEnv();
virtual ~PosixEnv() {
- fprintf(stderr, "Destroying Env::Default()\n");
+ char msg[] = "Destroying Env::Default()\n";
+ fwrite(msg, 1, sizeof(msg), stderr);
abort();
}
diff --git a/src/leveldb/util/hash.cc b/src/leveldb/util/hash.cc
index 07cf022060..ed439ce7a2 100644
--- a/src/leveldb/util/hash.cc
+++ b/src/leveldb/util/hash.cc
@@ -34,13 +34,13 @@ uint32_t Hash(const char* data, size_t n, uint32_t seed) {
// Pick up remaining bytes
switch (limit - data) {
case 3:
- h += data[2] << 16;
+ h += static_cast<unsigned char>(data[2]) << 16;
FALLTHROUGH_INTENDED;
case 2:
- h += data[1] << 8;
+ h += static_cast<unsigned char>(data[1]) << 8;
FALLTHROUGH_INTENDED;
case 1:
- h += data[0];
+ h += static_cast<unsigned char>(data[0]);
h *= m;
h ^= (h >> r);
break;
diff --git a/src/leveldb/util/hash_test.cc b/src/leveldb/util/hash_test.cc
new file mode 100644
index 0000000000..eaa1c92c23
--- /dev/null
+++ b/src/leveldb/util/hash_test.cc
@@ -0,0 +1,54 @@
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "util/hash.h"
+#include "util/testharness.h"
+
+namespace leveldb {
+
+class HASH { };
+
+TEST(HASH, SignedUnsignedIssue) {
+ const unsigned char data1[1] = {0x62};
+ const unsigned char data2[2] = {0xc3, 0x97};
+ const unsigned char data3[3] = {0xe2, 0x99, 0xa5};
+ const unsigned char data4[4] = {0xe1, 0x80, 0xb9, 0x32};
+ const unsigned char data5[48] = {
+ 0x01, 0xc0, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x14, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x14,
+ 0x00, 0x00, 0x00, 0x18,
+ 0x28, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ };
+
+ ASSERT_EQ(Hash(0, 0, 0xbc9f1d34), 0xbc9f1d34);
+ ASSERT_EQ(
+ Hash(reinterpret_cast<const char*>(data1), sizeof(data1), 0xbc9f1d34),
+ 0xef1345c4);
+ ASSERT_EQ(
+ Hash(reinterpret_cast<const char*>(data2), sizeof(data2), 0xbc9f1d34),
+ 0x5b663814);
+ ASSERT_EQ(
+ Hash(reinterpret_cast<const char*>(data3), sizeof(data3), 0xbc9f1d34),
+ 0x323c078f);
+ ASSERT_EQ(
+ Hash(reinterpret_cast<const char*>(data4), sizeof(data4), 0xbc9f1d34),
+ 0xed21633a);
+ ASSERT_EQ(
+ Hash(reinterpret_cast<const char*>(data5), sizeof(data5), 0x12345678),
+ 0xf333dabb);
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) {
+ return leveldb::test::RunAllTests();
+}
diff --git a/src/leveldb/util/logging.cc b/src/leveldb/util/logging.cc
index 22cf278512..ca6b324403 100644
--- a/src/leveldb/util/logging.cc
+++ b/src/leveldb/util/logging.cc
@@ -45,15 +45,6 @@ std::string EscapeString(const Slice& value) {
return r;
}
-bool ConsumeChar(Slice* in, char c) {
- if (!in->empty() && (*in)[0] == c) {
- in->remove_prefix(1);
- return true;
- } else {
- return false;
- }
-}
-
bool ConsumeDecimalNumber(Slice* in, uint64_t* val) {
uint64_t v = 0;
int digits = 0;
diff --git a/src/leveldb/util/logging.h b/src/leveldb/util/logging.h
index b0c5da813e..1b450d2480 100644
--- a/src/leveldb/util/logging.h
+++ b/src/leveldb/util/logging.h
@@ -32,10 +32,6 @@ extern std::string NumberToString(uint64_t num);
// Escapes any non-printable characters found in "value".
extern std::string EscapeString(const Slice& value);
-// If *in starts with "c", advances *in past the first character and
-// returns true. Otherwise, returns false.
-extern bool ConsumeChar(Slice* in, char c);
-
// Parse a human-readable number from "*in" into *value. On success,
// advances "*in" past the consumed number and sets "*val" to the
// numeric value. Otherwise, returns false and leaves *in in an
diff --git a/src/leveldbwrapper.h b/src/leveldbwrapper.h
index da5ba61c7b..d997d56e00 100644
--- a/src/leveldbwrapper.h
+++ b/src/leveldbwrapper.h
@@ -6,6 +6,7 @@
#define BITCOIN_LEVELDBWRAPPER_H
#include "serialize.h"
+#include "streams.h"
#include "util.h"
#include "version.h"
diff --git a/src/main.cpp b/src/main.cpp
index fc8167e40e..0cfe90beda 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -41,6 +41,7 @@ CCriticalSection cs_main;
BlockMap mapBlockIndex;
CChain chainActive;
+CBlockIndex *pindexBestHeader = NULL;
int64_t nTimeBestReceived = 0;
CWaitableCriticalSection csBestBlock;
CConditionVariable cvBlockChange;
@@ -51,19 +52,12 @@ bool fTxIndex = false;
bool fIsBareMultisigStd = true;
unsigned int nCoinCacheSize = 5000;
+
/** Fees smaller than this (in satoshi) are considered zero fee (for relaying and mining) */
CFeeRate minRelayTxFee = CFeeRate(1000);
CTxMemPool mempool(::minRelayTxFee);
-struct COrphanBlock {
- uint256 hashBlock;
- uint256 hashPrev;
- vector<unsigned char> vchBlock;
-};
-map<uint256, COrphanBlock*> mapOrphanBlocks;
-multimap<uint256, COrphanBlock*> mapOrphanBlocksByPrev;
-
struct COrphanTx {
CTransaction tx;
NodeId fromPeer;
@@ -105,10 +99,14 @@ namespace {
// The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS or better that are at least
// as good as our current tip. Entries may be failed, though.
- set<CBlockIndex*, CBlockIndexWorkComparator> setBlockIndexValid;
+ set<CBlockIndex*, CBlockIndexWorkComparator> setBlockIndexCandidates;
+ // Number of nodes with fSyncStarted.
+ int nSyncStarted = 0;
+ // All pairs A->B, where A (or one if its ancestors) misses transactions, but B has transactions.
+ multimap<CBlockIndex*, CBlockIndex*> mapBlocksUnlinked;
CCriticalSection cs_LastBlockFile;
- CBlockFileInfo infoLastBlockFile;
+ std::vector<CBlockFileInfo> vinfoBlockFile;
int nLastBlockFile = 0;
// Every received block is assigned a unique and increasing identifier, so we
@@ -125,11 +123,10 @@ namespace {
// Protected by cs_main.
struct QueuedBlock {
uint256 hash;
+ CBlockIndex *pindex; // Optional.
int64_t nTime; // Time of "getdata" request in microseconds.
- int nQueuedBefore; // Number of blocks in flight at the time of request.
};
map<uint256, pair<NodeId, list<QueuedBlock>::iterator> > mapBlocksInFlight;
- map<uint256, pair<NodeId, list<uint256>::iterator> > mapBlocksToDownload;
} // anon namespace
@@ -159,25 +156,25 @@ struct CMainSignals {
} // anon namespace
-void RegisterWallet(CWalletInterface* pwalletIn) {
- g_signals.SyncTransaction.connect(boost::bind(&CWalletInterface::SyncTransaction, pwalletIn, _1, _2));
- g_signals.EraseTransaction.connect(boost::bind(&CWalletInterface::EraseFromWallet, pwalletIn, _1));
- g_signals.UpdatedTransaction.connect(boost::bind(&CWalletInterface::UpdatedTransaction, pwalletIn, _1));
- g_signals.SetBestChain.connect(boost::bind(&CWalletInterface::SetBestChain, pwalletIn, _1));
- g_signals.Inventory.connect(boost::bind(&CWalletInterface::Inventory, pwalletIn, _1));
- g_signals.Broadcast.connect(boost::bind(&CWalletInterface::ResendWalletTransactions, pwalletIn));
+void RegisterValidationInterface(CValidationInterface* pwalletIn) {
+ g_signals.SyncTransaction.connect(boost::bind(&CValidationInterface::SyncTransaction, pwalletIn, _1, _2));
+ g_signals.EraseTransaction.connect(boost::bind(&CValidationInterface::EraseFromWallet, pwalletIn, _1));
+ g_signals.UpdatedTransaction.connect(boost::bind(&CValidationInterface::UpdatedTransaction, pwalletIn, _1));
+ g_signals.SetBestChain.connect(boost::bind(&CValidationInterface::SetBestChain, pwalletIn, _1));
+ g_signals.Inventory.connect(boost::bind(&CValidationInterface::Inventory, pwalletIn, _1));
+ g_signals.Broadcast.connect(boost::bind(&CValidationInterface::ResendWalletTransactions, pwalletIn));
}
-void UnregisterWallet(CWalletInterface* pwalletIn) {
- g_signals.Broadcast.disconnect(boost::bind(&CWalletInterface::ResendWalletTransactions, pwalletIn));
- g_signals.Inventory.disconnect(boost::bind(&CWalletInterface::Inventory, pwalletIn, _1));
- g_signals.SetBestChain.disconnect(boost::bind(&CWalletInterface::SetBestChain, pwalletIn, _1));
- g_signals.UpdatedTransaction.disconnect(boost::bind(&CWalletInterface::UpdatedTransaction, pwalletIn, _1));
- g_signals.EraseTransaction.disconnect(boost::bind(&CWalletInterface::EraseFromWallet, pwalletIn, _1));
- g_signals.SyncTransaction.disconnect(boost::bind(&CWalletInterface::SyncTransaction, pwalletIn, _1, _2));
+void UnregisterValidationInterface(CValidationInterface* pwalletIn) {
+ g_signals.Broadcast.disconnect(boost::bind(&CValidationInterface::ResendWalletTransactions, pwalletIn));
+ g_signals.Inventory.disconnect(boost::bind(&CValidationInterface::Inventory, pwalletIn, _1));
+ g_signals.SetBestChain.disconnect(boost::bind(&CValidationInterface::SetBestChain, pwalletIn, _1));
+ g_signals.UpdatedTransaction.disconnect(boost::bind(&CValidationInterface::UpdatedTransaction, pwalletIn, _1));
+ g_signals.EraseTransaction.disconnect(boost::bind(&CValidationInterface::EraseFromWallet, pwalletIn, _1));
+ g_signals.SyncTransaction.disconnect(boost::bind(&CValidationInterface::SyncTransaction, pwalletIn, _1, _2));
}
-void UnregisterAllWallets() {
+void UnregisterAllValidationInterfaces() {
g_signals.Broadcast.disconnect_all_slots();
g_signals.Inventory.disconnect_all_slots();
g_signals.SetBestChain.disconnect_all_slots();
@@ -220,22 +217,24 @@ struct CNodeState {
CBlockIndex *pindexBestKnownBlock;
// The hash of the last unknown block this peer has announced.
uint256 hashLastUnknownBlock;
+ // The last full block we both have.
+ CBlockIndex *pindexLastCommonBlock;
+ // Whether we've started headers synchronization with this peer.
+ bool fSyncStarted;
+ // Since when we're stalling block download progress (in microseconds), or 0.
+ int64_t nStallingSince;
list<QueuedBlock> vBlocksInFlight;
int nBlocksInFlight;
- list<uint256> vBlocksToDownload;
- int nBlocksToDownload;
- int64_t nLastBlockReceive;
- int64_t nLastBlockProcess;
CNodeState() {
nMisbehavior = 0;
fShouldBan = false;
pindexBestKnownBlock = NULL;
hashLastUnknownBlock = uint256(0);
- nBlocksToDownload = 0;
+ pindexLastCommonBlock = NULL;
+ fSyncStarted = false;
+ nStallingSince = 0;
nBlocksInFlight = 0;
- nLastBlockReceive = 0;
- nLastBlockProcess = 0;
}
};
@@ -266,64 +265,37 @@ void FinalizeNode(NodeId nodeid) {
LOCK(cs_main);
CNodeState *state = State(nodeid);
+ if (state->fSyncStarted)
+ nSyncStarted--;
+
BOOST_FOREACH(const QueuedBlock& entry, state->vBlocksInFlight)
mapBlocksInFlight.erase(entry.hash);
- BOOST_FOREACH(const uint256& hash, state->vBlocksToDownload)
- mapBlocksToDownload.erase(hash);
EraseOrphansFor(nodeid);
mapNodeState.erase(nodeid);
}
// Requires cs_main.
-void MarkBlockAsReceived(const uint256 &hash, NodeId nodeFrom = -1) {
- map<uint256, pair<NodeId, list<uint256>::iterator> >::iterator itToDownload = mapBlocksToDownload.find(hash);
- if (itToDownload != mapBlocksToDownload.end()) {
- CNodeState *state = State(itToDownload->second.first);
- state->vBlocksToDownload.erase(itToDownload->second.second);
- state->nBlocksToDownload--;
- mapBlocksToDownload.erase(itToDownload);
- }
-
+void MarkBlockAsReceived(const uint256& hash) {
map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
if (itInFlight != mapBlocksInFlight.end()) {
CNodeState *state = State(itInFlight->second.first);
state->vBlocksInFlight.erase(itInFlight->second.second);
state->nBlocksInFlight--;
- if (itInFlight->second.first == nodeFrom)
- state->nLastBlockReceive = GetTimeMicros();
+ state->nStallingSince = 0;
mapBlocksInFlight.erase(itInFlight);
}
}
// Requires cs_main.
-bool AddBlockToQueue(NodeId nodeid, const uint256 &hash) {
- if (mapBlocksToDownload.count(hash) || mapBlocksInFlight.count(hash))
- return false;
-
- CNodeState *state = State(nodeid);
- if (state == NULL)
- return false;
-
- list<uint256>::iterator it = state->vBlocksToDownload.insert(state->vBlocksToDownload.end(), hash);
- state->nBlocksToDownload++;
- if (state->nBlocksToDownload > 5000)
- Misbehaving(nodeid, 10);
- mapBlocksToDownload[hash] = std::make_pair(nodeid, it);
- return true;
-}
-
-// Requires cs_main.
-void MarkBlockAsInFlight(NodeId nodeid, const uint256 &hash) {
+void MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, CBlockIndex *pindex = NULL) {
CNodeState *state = State(nodeid);
assert(state != NULL);
// Make sure it's not listed somewhere already.
MarkBlockAsReceived(hash);
- QueuedBlock newentry = {hash, GetTimeMicros(), state->nBlocksInFlight};
- if (state->nBlocksInFlight == 0)
- state->nLastBlockReceive = newentry.nTime; // Reset when a first request is sent.
+ QueuedBlock newentry = {hash, pindex, GetTimeMicros()};
list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), newentry);
state->nBlocksInFlight++;
mapBlocksInFlight[hash] = std::make_pair(nodeid, it);
@@ -362,6 +334,104 @@ void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) {
}
}
+/** Find the last common ancestor two blocks have.
+ * Both pa and pb must be non-NULL. */
+CBlockIndex* LastCommonAncestor(CBlockIndex* pa, CBlockIndex* pb) {
+ if (pa->nHeight > pb->nHeight) {
+ pa = pa->GetAncestor(pb->nHeight);
+ } else if (pb->nHeight > pa->nHeight) {
+ pb = pb->GetAncestor(pa->nHeight);
+ }
+
+ while (pa != pb && pa && pb) {
+ pa = pa->pprev;
+ pb = pb->pprev;
+ }
+
+ // Eventually all chain branches meet at the genesis block.
+ assert(pa == pb);
+ return pa;
+}
+
+/** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
+ * at most count entries. */
+void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<CBlockIndex*>& vBlocks, NodeId& nodeStaller) {
+ if (count == 0)
+ return;
+
+ vBlocks.reserve(vBlocks.size() + count);
+ CNodeState *state = State(nodeid);
+ assert(state != NULL);
+
+ // Make sure pindexBestKnownBlock is up to date, we'll need it.
+ ProcessBlockAvailability(nodeid);
+
+ if (state->pindexBestKnownBlock == NULL || state->pindexBestKnownBlock->nChainWork < chainActive.Tip()->nChainWork) {
+ // This peer has nothing interesting.
+ return;
+ }
+
+ if (state->pindexLastCommonBlock == NULL) {
+ // Bootstrap quickly by guessing a parent of our best tip is the forking point.
+ // Guessing wrong in either direction is not a problem.
+ state->pindexLastCommonBlock = chainActive[std::min(state->pindexBestKnownBlock->nHeight, chainActive.Height())];
+ }
+
+ // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
+ // of their current tip anymore. Go back enough to fix that.
+ state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
+ if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
+ return;
+
+ std::vector<CBlockIndex*> vToFetch;
+ CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
+ // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
+ // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
+ // download that next block if the window were 1 larger.
+ int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
+ int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
+ NodeId waitingfor = -1;
+ while (pindexWalk->nHeight < nMaxHeight) {
+ // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
+ // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
+ // as iterating over ~100 CBlockIndex* entries anyway.
+ int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
+ vToFetch.resize(nToFetch);
+ pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
+ vToFetch[nToFetch - 1] = pindexWalk;
+ for (unsigned int i = nToFetch - 1; i > 0; i--) {
+ vToFetch[i - 1] = vToFetch[i]->pprev;
+ }
+
+ // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
+ // are not yet downloaded and not in flight to vBlocks. In the mean time, update
+ // pindexLastCommonBlock as long as all ancestors are already downloaded.
+ BOOST_FOREACH(CBlockIndex* pindex, vToFetch) {
+ if (pindex->nStatus & BLOCK_HAVE_DATA) {
+ if (pindex->nChainTx)
+ state->pindexLastCommonBlock = pindex;
+ } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) {
+ // The block is not already downloaded, and not yet in flight.
+ if (pindex->nHeight > nWindowEnd) {
+ // We reached the end of the window.
+ if (vBlocks.size() == 0 && waitingfor != nodeid) {
+ // We aren't able to fetch anything, but we would be if the download window was one larger.
+ nodeStaller = waitingfor;
+ }
+ return;
+ }
+ vBlocks.push_back(pindex);
+ if (vBlocks.size() == count) {
+ return;
+ }
+ } else if (waitingfor == -1) {
+ // This is the first already-in-flight block.
+ waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
+ }
+ }
+ }
+}
+
} // anon namespace
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
@@ -371,6 +441,11 @@ bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
return false;
stats.nMisbehavior = state->nMisbehavior;
stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
+ stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
+ BOOST_FOREACH(const QueuedBlock& queue, state->vBlocksInFlight) {
+ if (queue.pindex)
+ stats.vHeightInFlight.push_back(queue.pindex->nHeight);
+ }
return true;
}
@@ -976,7 +1051,7 @@ bool GetTransaction(const uint256 &hash, CTransaction &txOut, uint256 &hashBlock
CBlockHeader header;
try {
file >> header;
- fseek(file, postx.nTxOffset, SEEK_CUR);
+ fseek(file.Get(), postx.nTxOffset, SEEK_CUR);
file >> txOut;
} catch (std::exception &e) {
return error("%s : Deserialize or I/O error - %s", __func__, e.what());
@@ -1031,7 +1106,7 @@ bool WriteBlockToDisk(CBlock& block, CDiskBlockPos& pos)
{
// Open history file to append
CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION);
- if (!fileout)
+ if (fileout.IsNull())
return error("WriteBlockToDisk : OpenBlockFile failed");
// Write index header
@@ -1039,16 +1114,16 @@ bool WriteBlockToDisk(CBlock& block, CDiskBlockPos& pos)
fileout << FLATDATA(Params().MessageStart()) << nSize;
// Write block
- long fileOutPos = ftell(fileout);
+ long fileOutPos = ftell(fileout.Get());
if (fileOutPos < 0)
return error("WriteBlockToDisk : ftell failed");
pos.nPos = (unsigned int)fileOutPos;
fileout << block;
// Flush stdio buffers and commit to disk before returning
- fflush(fileout);
+ fflush(fileout.Get());
if (!IsInitialBlockDownload())
- FileCommit(fileout);
+ FileCommit(fileout.Get());
return true;
}
@@ -1059,7 +1134,7 @@ bool ReadBlockFromDisk(CBlock& block, const CDiskBlockPos& pos)
// Open history file to read
CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION);
- if (!filein)
+ if (filein.IsNull())
return error("ReadBlockFromDisk : OpenBlockFile failed");
// Read block
@@ -1086,46 +1161,6 @@ bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex)
return true;
}
-uint256 static GetOrphanRoot(const uint256& hash)
-{
- map<uint256, COrphanBlock*>::iterator it = mapOrphanBlocks.find(hash);
- if (it == mapOrphanBlocks.end())
- return hash;
-
- // Work back to the first block in the orphan chain
- do {
- map<uint256, COrphanBlock*>::iterator it2 = mapOrphanBlocks.find(it->second->hashPrev);
- if (it2 == mapOrphanBlocks.end())
- return it->first;
- it = it2;
- } while(true);
-}
-
-// Remove a random orphan block (which does not have any dependent orphans).
-void static PruneOrphanBlocks()
-{
- if (mapOrphanBlocksByPrev.size() <= (size_t)std::max((int64_t)0, GetArg("-maxorphanblocks", DEFAULT_MAX_ORPHAN_BLOCKS)))
- return;
-
- // Pick a random orphan block.
- int pos = insecure_rand() % mapOrphanBlocksByPrev.size();
- std::multimap<uint256, COrphanBlock*>::iterator it = mapOrphanBlocksByPrev.begin();
- while (pos--) it++;
-
- // As long as this block has other orphans depending on it, move to one of those successors.
- do {
- std::multimap<uint256, COrphanBlock*>::iterator it2 = mapOrphanBlocksByPrev.find(it->second->hashBlock);
- if (it2 == mapOrphanBlocksByPrev.end())
- break;
- it = it2;
- } while(1);
-
- uint256 hash = it->second->hashBlock;
- delete it->second;
- mapOrphanBlocksByPrev.erase(it);
- mapOrphanBlocks.erase(hash);
-}
-
CAmount GetBlockValue(int nHeight, const CAmount& nFees)
{
int64_t nSubsidy = 50 * COIN;
@@ -1284,7 +1319,7 @@ void static InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state
if (!state.CorruptionPossible()) {
pindex->nStatus |= BLOCK_FAILED_VALID;
pblocktree->WriteBlockIndex(CDiskBlockIndex(pindex));
- setBlockIndexValid.erase(pindex);
+ setBlockIndexCandidates.erase(pindex);
InvalidChainFound(pindex);
}
}
@@ -1512,7 +1547,7 @@ void static FlushBlockFile(bool fFinalize = false)
FILE *fileOld = OpenBlockFile(posOld);
if (fileOld) {
if (fFinalize)
- TruncateFile(fileOld, infoLastBlockFile.nSize);
+ TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nSize);
FileCommit(fileOld);
fclose(fileOld);
}
@@ -1520,7 +1555,7 @@ void static FlushBlockFile(bool fFinalize = false)
fileOld = OpenUndoFile(posOld);
if (fileOld) {
if (fFinalize)
- TruncateFile(fileOld, infoLastBlockFile.nUndoSize);
+ TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nUndoSize);
FileCommit(fileOld);
fclose(fileOld);
}
@@ -1664,11 +1699,6 @@ bool ConnectBlock(CBlock& block, CValidationState& state, CBlockIndex* pindex, C
if (fJustCheck)
return true;
- // Correct transaction counts.
- pindex->nTx = block.vtx.size();
- if (pindex->pprev)
- pindex->nChainTx = pindex->pprev->nChainTx + block.vtx.size();
-
// Write undo information to disk
if (pindex->GetUndoPos().IsNull() || !pindex->IsValid(BLOCK_VALID_SCRIPTS))
{
@@ -1889,8 +1919,8 @@ static CBlockIndex* FindMostWorkChain() {
// Find the best candidate header.
{
- std::set<CBlockIndex*, CBlockIndexWorkComparator>::reverse_iterator it = setBlockIndexValid.rbegin();
- if (it == setBlockIndexValid.rend())
+ std::set<CBlockIndex*, CBlockIndexWorkComparator>::reverse_iterator it = setBlockIndexCandidates.rbegin();
+ if (it == setBlockIndexCandidates.rend())
return NULL;
pindexNew = *it;
}
@@ -1900,6 +1930,8 @@ static CBlockIndex* FindMostWorkChain() {
CBlockIndex *pindexTest = pindexNew;
bool fInvalidAncestor = false;
while (pindexTest && !chainActive.Contains(pindexTest)) {
+ assert(pindexTest->nStatus & BLOCK_HAVE_DATA);
+ assert(pindexTest->nChainTx || pindexTest->nHeight == 0);
if (pindexTest->nStatus & BLOCK_FAILED_MASK) {
// Candidate has an invalid ancestor, remove entire chain from the set.
if (pindexBestInvalid == NULL || pindexNew->nChainWork > pindexBestInvalid->nChainWork)
@@ -1907,10 +1939,10 @@ static CBlockIndex* FindMostWorkChain() {
CBlockIndex *pindexFailed = pindexNew;
while (pindexTest != pindexFailed) {
pindexFailed->nStatus |= BLOCK_FAILED_CHILD;
- setBlockIndexValid.erase(pindexFailed);
+ setBlockIndexCandidates.erase(pindexFailed);
pindexFailed = pindexFailed->pprev;
}
- setBlockIndexValid.erase(pindexTest);
+ setBlockIndexCandidates.erase(pindexTest);
fInvalidAncestor = true;
break;
}
@@ -1937,12 +1969,20 @@ static bool ActivateBestChainStep(CValidationState &state, CBlockIndex *pindexMo
// Build list of new blocks to connect.
std::vector<CBlockIndex*> vpindexToConnect;
- vpindexToConnect.reserve(pindexMostWork->nHeight - (pindexFork ? pindexFork->nHeight : -1));
- CBlockIndex *pindexIter = pindexMostWork;
- while (pindexIter && pindexIter != pindexFork) {
+ bool fContinue = true;
+ int nHeight = pindexFork ? pindexFork->nHeight : -1;
+ while (fContinue && nHeight != pindexMostWork->nHeight) {
+ // Don't iterate the entire list of potential improvements toward the best tip, as we likely only need
+ // a few blocks along the way.
+ int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
+ vpindexToConnect.clear();
+ vpindexToConnect.reserve(nTargetHeight - nHeight);
+ CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
+ while (pindexIter && pindexIter->nHeight != nHeight) {
vpindexToConnect.push_back(pindexIter);
pindexIter = pindexIter->pprev;
}
+ nHeight = nTargetHeight;
// Connect new blocks.
BOOST_REVERSE_FOREACH(CBlockIndex *pindexConnect, vpindexToConnect) {
@@ -1953,27 +1993,30 @@ static bool ActivateBestChainStep(CValidationState &state, CBlockIndex *pindexMo
InvalidChainFound(vpindexToConnect.back());
state = CValidationState();
fInvalidFound = true;
+ fContinue = false;
break;
} else {
// A system error occurred (disk space, database error, ...).
return false;
}
} else {
- // Delete all entries in setBlockIndexValid that are worse than our new current block.
+ // Delete all entries in setBlockIndexCandidates that are worse than our new current block.
// Note that we can't delete the current block itself, as we may need to return to it later in case a
// reorganization to a better block fails.
- std::set<CBlockIndex*, CBlockIndexWorkComparator>::iterator it = setBlockIndexValid.begin();
- while (setBlockIndexValid.value_comp()(*it, chainActive.Tip())) {
- setBlockIndexValid.erase(it++);
+ std::set<CBlockIndex*, CBlockIndexWorkComparator>::iterator it = setBlockIndexCandidates.begin();
+ while (setBlockIndexCandidates.value_comp()(*it, chainActive.Tip())) {
+ setBlockIndexCandidates.erase(it++);
}
- // Either the current tip or a successor of it we're working towards is left in setBlockIndexValid.
- assert(!setBlockIndexValid.empty());
+ // Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates.
+ assert(!setBlockIndexCandidates.empty());
if (!pindexOldTip || chainActive.Tip()->nChainWork > pindexOldTip->nChainWork) {
// We're in a better position than we were. Return temporarily to release the lock.
+ fContinue = false;
break;
}
}
}
+ }
// Callbacks/notifications for a new best chain.
if (fInvalidFound)
@@ -2032,7 +2075,7 @@ bool ActivateBestChain(CValidationState &state, CBlock *pblock) {
return true;
}
-CBlockIndex* AddToBlockIndex(CBlockHeader& block)
+CBlockIndex* AddToBlockIndex(const CBlockHeader& block)
{
// Check for duplicate
uint256 hash = block.GetHash();
@@ -2043,10 +2086,10 @@ CBlockIndex* AddToBlockIndex(CBlockHeader& block)
// Construct new block index object
CBlockIndex* pindexNew = new CBlockIndex(block);
assert(pindexNew);
- {
- LOCK(cs_nBlockSequenceId);
- pindexNew->nSequenceId = nBlockSequenceId++;
- }
+ // We assign the sequence id to blocks only when the full data is available,
+ // to avoid miners withholding blocks but broadcasting headers, to get a
+ // competitive advantage.
+ pindexNew->nSequenceId = 0;
BlockMap::iterator mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first;
pindexNew->phashBlock = &((*mi).first);
BlockMap::iterator miPrev = mapBlockIndex.find(block.hashPrevBlock);
@@ -2058,6 +2101,11 @@ CBlockIndex* AddToBlockIndex(CBlockHeader& block)
}
pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + pindexNew->GetBlockWork();
pindexNew->RaiseValidity(BLOCK_VALID_TREE);
+ if (pindexBestHeader == NULL || pindexBestHeader->nChainWork < pindexNew->nChainWork)
+ pindexBestHeader = pindexNew;
+
+ // Ok if it fails, we'll download the header again next time.
+ pblocktree->WriteBlockIndex(CDiskBlockIndex(pindexNew));
return pindexNew;
}
@@ -2066,30 +2114,45 @@ CBlockIndex* AddToBlockIndex(CBlockHeader& block)
bool ReceivedBlockTransactions(const CBlock &block, CValidationState& state, CBlockIndex *pindexNew, const CDiskBlockPos& pos)
{
pindexNew->nTx = block.vtx.size();
- if (pindexNew->pprev) {
- // Not the genesis block.
- if (pindexNew->pprev->nChainTx) {
- // This parent's block's total number transactions is known, so compute outs.
- pindexNew->nChainTx = pindexNew->pprev->nChainTx + pindexNew->nTx;
- } else {
- // The total number of transactions isn't known yet.
- // We will compute it when the block is connected.
- pindexNew->nChainTx = 0;
- }
- } else {
- // Genesis block.
- pindexNew->nChainTx = pindexNew->nTx;
- }
+ pindexNew->nChainTx = 0;
pindexNew->nFile = pos.nFile;
pindexNew->nDataPos = pos.nPos;
pindexNew->nUndoPos = 0;
pindexNew->nStatus |= BLOCK_HAVE_DATA;
+ pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS);
+ {
+ LOCK(cs_nBlockSequenceId);
+ pindexNew->nSequenceId = nBlockSequenceId++;
+ }
- if (pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS))
- setBlockIndexValid.insert(pindexNew);
-
- if (!pblocktree->WriteBlockIndex(CDiskBlockIndex(pindexNew)))
- return state.Abort("Failed to write block index");
+ if (pindexNew->pprev == NULL || pindexNew->pprev->nChainTx) {
+ // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS.
+ deque<CBlockIndex*> queue;
+ queue.push_back(pindexNew);
+
+ // Recursively process any descendant blocks that now may be eligible to be connected.
+ while (!queue.empty()) {
+ CBlockIndex *pindex = queue.front();
+ queue.pop_front();
+ pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx;
+ setBlockIndexCandidates.insert(pindex);
+ std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = mapBlocksUnlinked.equal_range(pindex);
+ while (range.first != range.second) {
+ std::multimap<CBlockIndex*, CBlockIndex*>::iterator it = range.first;
+ queue.push_back(it->second);
+ range.first++;
+ mapBlocksUnlinked.erase(it);
+ }
+ if (!pblocktree->WriteBlockIndex(CDiskBlockIndex(pindex)))
+ return state.Abort("Failed to write block index");
+ }
+ } else {
+ if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) {
+ mapBlocksUnlinked.insert(std::make_pair(pindexNew->pprev, pindexNew));
+ }
+ if (!pblocktree->WriteBlockIndex(CDiskBlockIndex(pindexNew)))
+ return state.Abort("Failed to write block index");
+ }
return true;
}
@@ -2100,32 +2163,32 @@ bool FindBlockPos(CValidationState &state, CDiskBlockPos &pos, unsigned int nAdd
LOCK(cs_LastBlockFile);
- if (fKnown) {
- if (nLastBlockFile != pos.nFile) {
- nLastBlockFile = pos.nFile;
- infoLastBlockFile.SetNull();
- pblocktree->ReadBlockFileInfo(nLastBlockFile, infoLastBlockFile);
- fUpdatedLast = true;
- }
- } else {
- while (infoLastBlockFile.nSize + nAddSize >= MAX_BLOCKFILE_SIZE) {
- LogPrintf("Leaving block file %i: %s\n", nLastBlockFile, infoLastBlockFile.ToString());
+ unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile;
+ if (vinfoBlockFile.size() <= nFile) {
+ vinfoBlockFile.resize(nFile + 1);
+ }
+
+ if (!fKnown) {
+ while (vinfoBlockFile[nFile].nSize + nAddSize >= MAX_BLOCKFILE_SIZE) {
+ LogPrintf("Leaving block file %i: %s\n", nFile, vinfoBlockFile[nFile].ToString());
FlushBlockFile(true);
- nLastBlockFile++;
- infoLastBlockFile.SetNull();
- pblocktree->ReadBlockFileInfo(nLastBlockFile, infoLastBlockFile); // check whether data for the new file somehow already exist; can fail just fine
+ nFile++;
+ if (vinfoBlockFile.size() <= nFile) {
+ vinfoBlockFile.resize(nFile + 1);
+ }
fUpdatedLast = true;
}
- pos.nFile = nLastBlockFile;
- pos.nPos = infoLastBlockFile.nSize;
+ pos.nFile = nFile;
+ pos.nPos = vinfoBlockFile[nFile].nSize;
}
- infoLastBlockFile.nSize += nAddSize;
- infoLastBlockFile.AddBlock(nHeight, nTime);
+ nLastBlockFile = nFile;
+ vinfoBlockFile[nFile].nSize += nAddSize;
+ vinfoBlockFile[nFile].AddBlock(nHeight, nTime);
if (!fKnown) {
unsigned int nOldChunks = (pos.nPos + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
- unsigned int nNewChunks = (infoLastBlockFile.nSize + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
+ unsigned int nNewChunks = (vinfoBlockFile[nFile].nSize + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
if (nNewChunks > nOldChunks) {
if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos)) {
FILE *file = OpenBlockFile(pos);
@@ -2140,7 +2203,7 @@ bool FindBlockPos(CValidationState &state, CDiskBlockPos &pos, unsigned int nAdd
}
}
- if (!pblocktree->WriteBlockFileInfo(nLastBlockFile, infoLastBlockFile))
+ if (!pblocktree->WriteBlockFileInfo(nLastBlockFile, vinfoBlockFile[nFile]))
return state.Abort("Failed to write file info");
if (fUpdatedLast)
pblocktree->WriteLastBlockFile(nLastBlockFile);
@@ -2155,19 +2218,10 @@ bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigne
LOCK(cs_LastBlockFile);
unsigned int nNewSize;
- if (nFile == nLastBlockFile) {
- pos.nPos = infoLastBlockFile.nUndoSize;
- nNewSize = (infoLastBlockFile.nUndoSize += nAddSize);
- if (!pblocktree->WriteBlockFileInfo(nLastBlockFile, infoLastBlockFile))
- return state.Abort("Failed to write block info");
- } else {
- CBlockFileInfo info;
- if (!pblocktree->ReadBlockFileInfo(nFile, info))
- return state.Abort("Failed to read block info");
- pos.nPos = info.nUndoSize;
- nNewSize = (info.nUndoSize += nAddSize);
- if (!pblocktree->WriteBlockFileInfo(nFile, info))
- return state.Abort("Failed to write block info");
+ pos.nPos = vinfoBlockFile[nFile].nUndoSize;
+ nNewSize = vinfoBlockFile[nFile].nUndoSize += nAddSize;
+ if (!pblocktree->WriteBlockFileInfo(nLastBlockFile, vinfoBlockFile[nLastBlockFile])) {
+ return state.Abort("Failed to write block info");
}
unsigned int nOldChunks = (pos.nPos + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE;
@@ -2205,12 +2259,31 @@ bool CheckBlockHeader(const CBlockHeader& block, CValidationState& state, bool f
bool CheckBlock(const CBlock& block, CValidationState& state, bool fCheckPOW, bool fCheckMerkleRoot)
{
- // These are checks that are independent of context
- // that can be verified before saving an orphan block.
+ // These are checks that are independent of context.
if (!CheckBlockHeader(block, state, fCheckPOW))
return false;
+ // Check the merkle root.
+ if (fCheckMerkleRoot) {
+ bool mutated;
+ uint256 hashMerkleRoot2 = block.BuildMerkleTree(&mutated);
+ if (block.hashMerkleRoot != hashMerkleRoot2)
+ return state.DoS(100, error("CheckBlock() : hashMerkleRoot mismatch"),
+ REJECT_INVALID, "bad-txnmrklroot", true);
+
+ // Check for merkle tree malleability (CVE-2012-2459): repeating sequences
+ // of transactions in a block without affecting the merkle root of a block,
+ // while still invalidating it.
+ if (mutated)
+ return state.DoS(100, error("CheckBlock() : duplicate transaction"),
+ REJECT_INVALID, "bad-txns-duplicate", true);
+ }
+
+ // All potential-corruption validation must be done before we do any
+ // transaction validation, as otherwise we may mark the header as invalid
+ // because we receive the wrong transactions for it.
+
// Size limits
if (block.vtx.empty() || block.vtx.size() > MAX_BLOCK_SIZE || ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION) > MAX_BLOCK_SIZE)
return state.DoS(100, error("CheckBlock() : size limits failed"),
@@ -2230,15 +2303,6 @@ bool CheckBlock(const CBlock& block, CValidationState& state, bool fCheckPOW, bo
if (!CheckTransaction(tx, state))
return error("CheckBlock() : CheckTransaction failed");
- // Check for merkle tree malleability (CVE-2012-2459): repeating sequences
- // of transactions in a block without affecting the merkle root of a block,
- // while still invalidating it.
- bool mutated;
- uint256 hashMerkleRoot2 = block.BuildMerkleTree(&mutated);
- if (mutated)
- return state.DoS(100, error("CheckBlock() : duplicate transaction"),
- REJECT_INVALID, "bad-txns-duplicate", true);
-
unsigned int nSigOps = 0;
BOOST_FOREACH(const CTransaction& tx, block.vtx)
{
@@ -2248,15 +2312,10 @@ bool CheckBlock(const CBlock& block, CValidationState& state, bool fCheckPOW, bo
return state.DoS(100, error("CheckBlock() : out-of-bounds SigOpCount"),
REJECT_INVALID, "bad-blk-sigops", true);
- // Check merkle root
- if (fCheckMerkleRoot && block.hashMerkleRoot != hashMerkleRoot2)
- return state.DoS(100, error("CheckBlock() : hashMerkleRoot mismatch"),
- REJECT_INVALID, "bad-txnmrklroot", true);
-
return true;
}
-bool AcceptBlockHeader(CBlockHeader& block, CValidationState& state, CBlockIndex** ppindex)
+bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, CBlockIndex** ppindex)
{
AssertLockHeld(cs_main);
// Check for duplicate
@@ -2264,26 +2323,13 @@ bool AcceptBlockHeader(CBlockHeader& block, CValidationState& state, CBlockIndex
BlockMap::iterator miSelf = mapBlockIndex.find(hash);
CBlockIndex *pindex = NULL;
if (miSelf != mapBlockIndex.end()) {
+ // Block header is already known.
pindex = miSelf->second;
+ if (ppindex)
+ *ppindex = pindex;
if (pindex->nStatus & BLOCK_FAILED_MASK)
return state.Invalid(error("%s : block is marked invalid", __func__), 0, "duplicate");
- }
-
- CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint();
- if (pcheckpoint && block.hashPrevBlock != (chainActive.Tip() ? chainActive.Tip()->GetBlockHash() : uint256(0)))
- {
- // Extra checks to prevent "fill up memory by spamming with bogus blocks"
- int64_t deltaTime = block.GetBlockTime() - pcheckpoint->GetBlockTime();
- if (deltaTime < 0)
- {
- return state.DoS(100, error("%s : block with timestamp before last checkpoint", __func__),
- REJECT_CHECKPOINT, "time-too-old");
- }
- if (!CheckMinWork(block.nBits, pcheckpoint->nBits, deltaTime))
- {
- return state.DoS(100, error("%s : block with too little proof-of-work", __func__),
- REJECT_INVALID, "bad-diffbits");
- }
+ return true;
}
// Get prev block index
@@ -2344,6 +2390,12 @@ bool AcceptBlock(CBlock& block, CValidationState& state, CBlockIndex** ppindex,
if (!AcceptBlockHeader(block, state, &pindex))
return false;
+ if (pindex->nStatus & BLOCK_HAVE_DATA) {
+ // TODO: deal better with duplicate blocks.
+ // return state.DoS(20, error("AcceptBlock() : already have block %d %s", pindex->nHeight, pindex->GetBlockHash().ToString()), REJECT_DUPLICATE, "duplicate");
+ return true;
+ }
+
if (!CheckBlock(block, state)) {
if (state.IsInvalid() && !state.CorruptionPossible()) {
pindex->nStatus |= BLOCK_FAILED_VALID;
@@ -2456,93 +2508,26 @@ void CBlockIndex::BuildSkip()
pskip = pprev->GetAncestor(GetSkipHeight(nHeight));
}
-void PushGetBlocks(CNode* pnode, CBlockIndex* pindexBegin, uint256 hashEnd)
-{
- AssertLockHeld(cs_main);
- // Filter out duplicate requests
- if (pindexBegin == pnode->pindexLastGetBlocksBegin && hashEnd == pnode->hashLastGetBlocksEnd)
- return;
- pnode->pindexLastGetBlocksBegin = pindexBegin;
- pnode->hashLastGetBlocksEnd = hashEnd;
-
- pnode->PushMessage("getblocks", chainActive.GetLocator(pindexBegin), hashEnd);
-}
-
bool ProcessBlock(CValidationState &state, CNode* pfrom, CBlock* pblock, CDiskBlockPos *dbp)
{
- // Check for duplicate
- uint256 hash = pblock->GetHash();
-
- {
- LOCK(cs_main);
- if (mapBlockIndex.count(hash))
- return state.Invalid(error("ProcessBlock() : already have block %d %s", mapBlockIndex[hash]->nHeight, hash.ToString()), 0, "duplicate");
- if (mapOrphanBlocks.count(hash))
- return state.Invalid(error("ProcessBlock() : already have block (orphan) %s", hash.ToString()), 0, "duplicate");
-
// Preliminary checks
- if (!CheckBlock(*pblock, state))
- return error("ProcessBlock() : CheckBlock FAILED");
+ bool checked = CheckBlock(*pblock, state);
- // If we don't already have its previous block (with full data), shunt it off to holding area until we get it
- BlockMap::iterator it = mapBlockIndex.find(pblock->hashPrevBlock);
- if (pblock->hashPrevBlock != 0 && (it == mapBlockIndex.end() || !(it->second->nStatus & BLOCK_HAVE_DATA)))
{
- LogPrintf("ProcessBlock: ORPHAN BLOCK %lu, prev=%s\n", (unsigned long)mapOrphanBlocks.size(), pblock->hashPrevBlock.ToString());
-
- // Accept orphans as long as there is a node to request its parents from
- if (pfrom) {
- PruneOrphanBlocks();
- COrphanBlock* pblock2 = new COrphanBlock();
- {
- CDataStream ss(SER_DISK, CLIENT_VERSION);
- ss << *pblock;
- pblock2->vchBlock = std::vector<unsigned char>(ss.begin(), ss.end());
- }
- pblock2->hashBlock = hash;
- pblock2->hashPrev = pblock->hashPrevBlock;
- mapOrphanBlocks.insert(make_pair(hash, pblock2));
- mapOrphanBlocksByPrev.insert(make_pair(pblock2->hashPrev, pblock2));
-
- // Ask this guy to fill in what we're missing
- PushGetBlocks(pfrom, chainActive.Tip(), GetOrphanRoot(hash));
+ LOCK(cs_main);
+ MarkBlockAsReceived(pblock->GetHash());
+ if (!checked) {
+ return error("ProcessBlock() : CheckBlock FAILED");
}
- return true;
- }
- // Store to disk
- CBlockIndex *pindex = NULL;
- bool ret = AcceptBlock(*pblock, state, &pindex, dbp);
- if (!ret)
- return error("ProcessBlock() : AcceptBlock FAILED");
-
- // Recursively process any orphan blocks that depended on this one
- vector<uint256> vWorkQueue;
- vWorkQueue.push_back(hash);
- for (unsigned int i = 0; i < vWorkQueue.size(); i++)
- {
- uint256 hashPrev = vWorkQueue[i];
- for (multimap<uint256, COrphanBlock*>::iterator mi = mapOrphanBlocksByPrev.lower_bound(hashPrev);
- mi != mapOrphanBlocksByPrev.upper_bound(hashPrev);
- ++mi)
- {
- CBlock block;
- {
- CDataStream ss(mi->second->vchBlock, SER_DISK, CLIENT_VERSION);
- ss >> block;
- }
- block.BuildMerkleTree();
- // Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan resolution (that is, feeding people an invalid block based on LegitBlockX in order to get anyone relaying LegitBlockX banned)
- CValidationState stateDummy;
- CBlockIndex *pindexChild = NULL;
- if (AcceptBlock(block, stateDummy, &pindexChild))
- vWorkQueue.push_back(mi->second->hashBlock);
- mapOrphanBlocks.erase(mi->second->hashBlock);
- delete mi->second;
+ // Store to disk
+ CBlockIndex *pindex = NULL;
+ bool ret = AcceptBlock(*pblock, state, &pindex, dbp);
+ if (pindex && pfrom) {
+ mapBlockSource[pindex->GetBlockHash()] = pfrom->GetId();
}
- mapOrphanBlocksByPrev.erase(hashPrev);
- }
-
+ if (!ret)
+ return error("ProcessBlock() : AcceptBlock FAILED");
}
if (!ActivateBestChain(state, pblock))
@@ -2808,20 +2793,44 @@ bool static LoadBlockIndexDB()
{
CBlockIndex* pindex = item.second;
pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + pindex->GetBlockWork();
- pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx;
- if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS))
- setBlockIndexValid.insert(pindex);
+ if (pindex->nStatus & BLOCK_HAVE_DATA) {
+ if (pindex->pprev) {
+ if (pindex->pprev->nChainTx) {
+ pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
+ } else {
+ pindex->nChainTx = 0;
+ mapBlocksUnlinked.insert(std::make_pair(pindex->pprev, pindex));
+ }
+ } else {
+ pindex->nChainTx = pindex->nTx;
+ }
+ }
+ if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->nChainTx || pindex->pprev == NULL))
+ setBlockIndexCandidates.insert(pindex);
if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork))
pindexBestInvalid = pindex;
if (pindex->pprev)
pindex->BuildSkip();
+ if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == NULL || CBlockIndexWorkComparator()(pindexBestHeader, pindex)))
+ pindexBestHeader = pindex;
}
// Load block file info
pblocktree->ReadLastBlockFile(nLastBlockFile);
- LogPrintf("LoadBlockIndexDB(): last block file = %i\n", nLastBlockFile);
- if (pblocktree->ReadBlockFileInfo(nLastBlockFile, infoLastBlockFile))
- LogPrintf("LoadBlockIndexDB(): last block file info: %s\n", infoLastBlockFile.ToString());
+ vinfoBlockFile.resize(nLastBlockFile + 1);
+ LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile);
+ for (int nFile = 0; nFile <= nLastBlockFile; nFile++) {
+ pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
+ }
+ LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString());
+ for (int nFile = nLastBlockFile + 1; true; nFile++) {
+ CBlockFileInfo info;
+ if (pblocktree->ReadBlockFileInfo(nFile, info)) {
+ vinfoBlockFile.push_back(info);
+ } else {
+ break;
+ }
+ }
// Check presence of blk files
LogPrintf("Checking all blk files are present...\n");
@@ -2836,7 +2845,7 @@ bool static LoadBlockIndexDB()
for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++)
{
CDiskBlockPos pos(*it, 0);
- if (!CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION)) {
+ if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) {
return false;
}
}
@@ -2952,7 +2961,7 @@ bool CVerifyDB::VerifyDB(CCoinsView *coinsview, int nCheckLevel, int nCheckDepth
void UnloadBlockIndex()
{
mapBlockIndex.clear();
- setBlockIndexValid.clear();
+ setBlockIndexCandidates.clear();
chainActive.SetTip(NULL);
pindexBestInvalid = NULL;
}
@@ -3075,21 +3084,14 @@ void PrintBlockTree()
bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp)
{
+ // Map of disk positions for blocks with unknown parent (only used for reindex)
+ static std::multimap<uint256, CDiskBlockPos> mapBlocksUnknownParent;
int64_t nStart = GetTimeMillis();
int nLoaded = 0;
try {
// This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
CBufferedFile blkdat(fileIn, 2*MAX_BLOCK_SIZE, MAX_BLOCK_SIZE+8, SER_DISK, CLIENT_VERSION);
- uint64_t nStartByte = 0;
- if (dbp) {
- // (try to) skip already indexed part
- CBlockFileInfo info;
- if (pblocktree->ReadBlockFileInfo(dbp->nFile, info)) {
- nStartByte = info.nSize;
- blkdat.Seek(info.nSize);
- }
- }
uint64_t nRewind = blkdat.GetPos();
while (!blkdat.eof()) {
boost::this_thread::interruption_point();
@@ -3117,21 +3119,57 @@ bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp)
try {
// read block
uint64_t nBlockPos = blkdat.GetPos();
+ if (dbp)
+ dbp->nPos = nBlockPos;
blkdat.SetLimit(nBlockPos + nSize);
+ blkdat.SetPos(nBlockPos);
CBlock block;
blkdat >> block;
nRewind = blkdat.GetPos();
- // process block
- if (nBlockPos >= nStartByte) {
+ // detect out of order blocks, and store them for later
+ uint256 hash = block.GetHash();
+ if (hash != Params().HashGenesisBlock() && mapBlockIndex.find(block.hashPrevBlock) == mapBlockIndex.end()) {
+ LogPrint("reindex", "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(),
+ block.hashPrevBlock.ToString());
if (dbp)
- dbp->nPos = nBlockPos;
+ mapBlocksUnknownParent.insert(std::make_pair(block.hashPrevBlock, *dbp));
+ continue;
+ }
+
+ // process in case the block isn't known yet
+ if (mapBlockIndex.count(hash) == 0) {
CValidationState state;
if (ProcessBlock(state, NULL, &block, dbp))
nLoaded++;
if (state.IsError())
break;
}
+
+ // Recursively process earlier encountered successors of this block
+ deque<uint256> queue;
+ queue.push_back(hash);
+ while (!queue.empty()) {
+ uint256 head = queue.front();
+ queue.pop_front();
+ std::pair<std::multimap<uint256, CDiskBlockPos>::iterator, std::multimap<uint256, CDiskBlockPos>::iterator> range = mapBlocksUnknownParent.equal_range(head);
+ while (range.first != range.second) {
+ std::multimap<uint256, CDiskBlockPos>::iterator it = range.first;
+ if (ReadBlockFromDisk(block, it->second))
+ {
+ LogPrintf("%s: Processing out of order child %s of %s\n", __func__, block.GetHash().ToString(),
+ head.ToString());
+ CValidationState dummy;
+ if (ProcessBlock(dummy, NULL, &block, &it->second))
+ {
+ nLoaded++;
+ queue.push_back(block.GetHash());
+ }
+ }
+ range.first++;
+ mapBlocksUnknownParent.erase(it);
+ }
+ }
} catch (std::exception &e) {
LogPrintf("%s : Deserialize or I/O error - %s", __func__, e.what());
}
@@ -3226,8 +3264,7 @@ bool static AlreadyHave(const CInv& inv)
pcoinsTip->HaveCoins(inv.hash);
}
case MSG_BLOCK:
- return mapBlockIndex.count(inv.hash) ||
- mapOrphanBlocks.count(inv.hash);
+ return mapBlockIndex.count(inv.hash);
}
// Don't know what it is, just say we already got one
return true;
@@ -3375,10 +3412,6 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
return true;
}
- {
- LOCK(cs_main);
- State(pfrom->GetId())->nLastBlockProcess = GetTimeMicros();
- }
@@ -3587,6 +3620,8 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
LOCK(cs_main);
+ std::vector<CInv> vToFetch;
+
for (unsigned int nInv = 0; nInv < vInv.size(); nInv++)
{
const CInv &inv = vInv[nInv];
@@ -3597,19 +3632,30 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
bool fAlreadyHave = AlreadyHave(inv);
LogPrint("net", "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->id);
- if (!fAlreadyHave) {
- if (!fImporting && !fReindex) {
- if (inv.type == MSG_BLOCK)
- AddBlockToQueue(pfrom->GetId(), inv.hash);
- else
- pfrom->AskFor(inv);
- }
- } else if (inv.type == MSG_BLOCK && mapOrphanBlocks.count(inv.hash)) {
- PushGetBlocks(pfrom, chainActive.Tip(), GetOrphanRoot(inv.hash));
- }
+ if (!fAlreadyHave && !fImporting && !fReindex && inv.type != MSG_BLOCK)
+ pfrom->AskFor(inv);
- if (inv.type == MSG_BLOCK)
+ if (inv.type == MSG_BLOCK) {
UpdateBlockAvailability(pfrom->GetId(), inv.hash);
+ if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) {
+ // First request the headers preceeding the announced block. In the normal fully-synced
+ // case where a new block is announced that succeeds the current tip (no reorganization),
+ // there are no such headers.
+ // Secondly, and only when we are close to being synced, we request the announced block directly,
+ // to avoid an extra round-trip. Note that we must *first* ask for the headers, so by the
+ // time the block arrives, the header chain leading up to it is already validated. Not
+ // doing this will result in the received block being rejected as an orphan in case it is
+ // not a direct successor.
+ pfrom->PushMessage("getheaders", chainActive.GetLocator(pindexBestHeader), inv.hash);
+ if (chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - Params().TargetSpacing() * 20) {
+ vToFetch.push_back(inv);
+ // Mark block as in flight already, even though the actual "getdata" message only goes out
+ // later (within the same cs_main lock, though).
+ MarkBlockAsInFlight(pfrom->GetId(), inv.hash);
+ }
+ LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id);
+ }
+ }
// Track requests for our stuff
g_signals.Inventory(inv.hash);
@@ -3619,6 +3665,9 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
return error("send buffer size() = %u", pfrom->nSendSize);
}
}
+
+ if (!vToFetch.empty())
+ pfrom->PushMessage("getdata", vToFetch);
}
@@ -3706,8 +3755,8 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
vector<CBlock> vHeaders;
- int nLimit = 2000;
- LogPrint("net", "getheaders %d to %s\n", (pindex ? pindex->nHeight : -1), hashStop.ToString());
+ int nLimit = MAX_HEADERS_RESULTS;
+ LogPrint("net", "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.ToString(), pfrom->id);
for (; pindex; pindex = chainActive.Next(pindex))
{
vHeaders.push_back(pindex->GetBlockHeader());
@@ -3826,22 +3875,67 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
}
+ else if (strCommand == "headers" && !fImporting && !fReindex) // Ignore headers received while importing
+ {
+ std::vector<CBlockHeader> headers;
+
+ // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
+ unsigned int nCount = ReadCompactSize(vRecv);
+ if (nCount > MAX_HEADERS_RESULTS) {
+ Misbehaving(pfrom->GetId(), 20);
+ return error("headers message size = %u", nCount);
+ }
+ headers.resize(nCount);
+ for (unsigned int n = 0; n < nCount; n++) {
+ vRecv >> headers[n];
+ ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
+ }
+
+ LOCK(cs_main);
+
+ if (nCount == 0) {
+ // Nothing interesting. Stop asking this peers for more headers.
+ return true;
+ }
+
+ CBlockIndex *pindexLast = NULL;
+ BOOST_FOREACH(const CBlockHeader& header, headers) {
+ CValidationState state;
+ if (pindexLast != NULL && header.hashPrevBlock != pindexLast->GetBlockHash()) {
+ Misbehaving(pfrom->GetId(), 20);
+ return error("non-continuous headers sequence");
+ }
+ if (!AcceptBlockHeader(header, state, &pindexLast)) {
+ int nDoS;
+ if (state.IsInvalid(nDoS)) {
+ if (nDoS > 0)
+ Misbehaving(pfrom->GetId(), nDoS);
+ return error("invalid header received");
+ }
+ }
+ }
+
+ if (pindexLast)
+ UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash());
+
+ if (nCount == MAX_HEADERS_RESULTS && pindexLast) {
+ // Headers message had its maximum size; the peer may have more headers.
+ // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
+ // from there instead.
+ LogPrint("net", "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->id, pfrom->nStartingHeight);
+ pfrom->PushMessage("getheaders", chainActive.GetLocator(pindexLast), uint256(0));
+ }
+ }
+
else if (strCommand == "block" && !fImporting && !fReindex) // Ignore blocks received while importing
{
CBlock block;
vRecv >> block;
- LogPrint("net", "received block %s peer=%d\n", block.GetHash().ToString(), pfrom->id);
-
CInv inv(MSG_BLOCK, block.GetHash());
- pfrom->AddInventoryKnown(inv);
+ LogPrint("net", "received block %s peer=%d\n", inv.hash.ToString(), pfrom->id);
- {
- LOCK(cs_main);
- // Remember who we got this block from.
- mapBlockSource[inv.hash] = pfrom->GetId();
- MarkBlockAsReceived(inv.hash, pfrom->GetId());
- }
+ pfrom->AddInventoryKnown(inv);
CValidationState state;
ProcessBlock(state, pfrom, &block);
@@ -4323,9 +4417,18 @@ bool SendMessages(CNode* pto, bool fSendTrickle)
state.rejects.clear();
// Start block sync
- if (pto->fStartSync && !fImporting && !fReindex) {
- pto->fStartSync = false;
- PushGetBlocks(pto, chainActive.Tip(), uint256(0));
+ if (pindexBestHeader == NULL)
+ pindexBestHeader = chainActive.Tip();
+ bool fFetch = !pto->fInbound || (pindexBestHeader && (state.pindexLastCommonBlock ? state.pindexLastCommonBlock->nHeight : 0) + 144 > pindexBestHeader->nHeight);
+ if (!state.fSyncStarted && !pto->fClient && fFetch && !fImporting && !fReindex) {
+ // Only actively request headers from a single peer, unless we're close to today.
+ if (nSyncStarted == 0 || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
+ state.fSyncStarted = true;
+ nSyncStarted++;
+ CBlockIndex *pindexStart = pindexBestHeader->pprev ? pindexBestHeader->pprev : pindexBestHeader;
+ LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->id, pto->nStartingHeight);
+ pto->PushMessage("getheaders", chainActive.GetLocator(pindexStart), uint256(0));
+ }
}
// Resend wallet transactions that haven't gotten in a block yet
@@ -4384,35 +4487,35 @@ bool SendMessages(CNode* pto, bool fSendTrickle)
if (!vInv.empty())
pto->PushMessage("inv", vInv);
-
- // Detect stalled peers. Require that blocks are in flight, we haven't
- // received a (requested) block in one minute, and that all blocks are
- // in flight for over two minutes, since we first had a chance to
- // process an incoming block.
+ // Detect whether we're stalling
int64_t nNow = GetTimeMicros();
- if (!pto->fDisconnect && state.nBlocksInFlight &&
- state.nLastBlockReceive < state.nLastBlockProcess - BLOCK_DOWNLOAD_TIMEOUT*1000000 &&
- state.vBlocksInFlight.front().nTime < state.nLastBlockProcess - 2*BLOCK_DOWNLOAD_TIMEOUT*1000000) {
- LogPrintf("Peer %s is stalling block download, disconnecting\n", state.name);
+ if (!pto->fDisconnect && state.nStallingSince && state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) {
+ // Stalling only triggers when the block download window cannot move. During normal steady state,
+ // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
+ // should only happen during initial block download.
+ LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->id);
pto->fDisconnect = true;
}
- // Update knowledge of peer's block availability.
- ProcessBlockAvailability(pto->GetId());
-
//
// Message: getdata (blocks)
//
vector<CInv> vGetData;
- while (!pto->fDisconnect && state.nBlocksToDownload && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
- uint256 hash = state.vBlocksToDownload.front();
- vGetData.push_back(CInv(MSG_BLOCK, hash));
- MarkBlockAsInFlight(pto->GetId(), hash);
- LogPrint("net", "Requesting block %s peer=%d\n", hash.ToString(), pto->id);
- if (vGetData.size() >= 1000)
- {
- pto->PushMessage("getdata", vGetData);
- vGetData.clear();
+ if (!pto->fDisconnect && !pto->fClient && fFetch && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
+ vector<CBlockIndex*> vToDownload;
+ NodeId staller = -1;
+ FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller);
+ BOOST_FOREACH(CBlockIndex *pindex, vToDownload) {
+ vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
+ MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), pindex);
+ LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
+ pindex->nHeight, pto->id);
+ }
+ if (state.nBlocksInFlight == 0 && staller != -1) {
+ if (State(staller)->nStallingSince == 0) {
+ State(staller)->nStallingSince = nNow;
+ LogPrint("net", "Stall started peer=%d\n", staller);
+ }
}
}
@@ -4447,7 +4550,7 @@ bool CBlockUndo::WriteToDisk(CDiskBlockPos &pos, const uint256 &hashBlock)
{
// Open history file to append
CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION);
- if (!fileout)
+ if (fileout.IsNull())
return error("CBlockUndo::WriteToDisk : OpenUndoFile failed");
// Write index header
@@ -4455,7 +4558,7 @@ bool CBlockUndo::WriteToDisk(CDiskBlockPos &pos, const uint256 &hashBlock)
fileout << FLATDATA(Params().MessageStart()) << nSize;
// Write undo data
- long fileOutPos = ftell(fileout);
+ long fileOutPos = ftell(fileout.Get());
if (fileOutPos < 0)
return error("CBlockUndo::WriteToDisk : ftell failed");
pos.nPos = (unsigned int)fileOutPos;
@@ -4468,9 +4571,9 @@ bool CBlockUndo::WriteToDisk(CDiskBlockPos &pos, const uint256 &hashBlock)
fileout << hasher.GetHash();
// Flush stdio buffers and commit to disk before returning
- fflush(fileout);
+ fflush(fileout.Get());
if (!IsInitialBlockDownload())
- FileCommit(fileout);
+ FileCommit(fileout.Get());
return true;
}
@@ -4479,7 +4582,7 @@ bool CBlockUndo::ReadFromDisk(const CDiskBlockPos &pos, const uint256 &hashBlock
{
// Open history file to read
CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION);
- if (!filein)
+ if (filein.IsNull())
return error("CBlockUndo::ReadFromDisk : OpenBlockFile failed");
// Read block
@@ -4519,12 +4622,6 @@ public:
delete (*it1).second;
mapBlockIndex.clear();
- // orphan blocks
- std::map<uint256, COrphanBlock*>::iterator it2 = mapOrphanBlocks.begin();
- for (; it2 != mapOrphanBlocks.end(); it2++)
- delete (*it2).second;
- mapOrphanBlocks.clear();
-
// orphan transactions
mapOrphanTransactions.clear();
mapOrphanTransactionsByPrev.clear();
diff --git a/src/main.h b/src/main.h
index cad7eebfb7..1ef51918c5 100644
--- a/src/main.h
+++ b/src/main.h
@@ -20,6 +20,7 @@
#include "script/sigcache.h"
#include "script/standard.h"
#include "sync.h"
+#include "tinyformat.h"
#include "txmempool.h"
#include "uint256.h"
@@ -72,9 +73,17 @@ static const int MAX_SCRIPTCHECK_THREADS = 16;
/** -par default (number of script-checking threads, 0 = auto) */
static const int DEFAULT_SCRIPTCHECK_THREADS = 0;
/** Number of blocks that can be requested at any given time from a single peer. */
-static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 128;
-/** Timeout in seconds before considering a block download peer unresponsive. */
-static const unsigned int BLOCK_DOWNLOAD_TIMEOUT = 60;
+static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
+/** Timeout in seconds during which a peer must stall block download progress before being disconnected. */
+static const unsigned int BLOCK_STALLING_TIMEOUT = 2;
+/** Number of headers sent in one getheaders result. We rely on the assumption that if a peer sends
+ * less than this number, we reached their tip. Changing this value is a protocol upgrade. */
+static const unsigned int MAX_HEADERS_RESULTS = 2000;
+/** Size of the "block download window": how far ahead of our current height do we fetch?
+ * Larger windows tolerate larger download speed differences between peer, but increase the potential
+ * degree of disordering of blocks on disk (which make reindexing and in the future perhaps pruning
+ * harder). We'll probably want to make this a per-peer adaptive value at some point. */
+static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
/** "reject" message codes **/
static const unsigned char REJECT_MALFORMED = 0x01;
@@ -110,6 +119,9 @@ extern bool fIsBareMultisigStd;
extern unsigned int nCoinCacheSize;
extern CFeeRate minRelayTxFee;
+// Best header we've seen so far (used for getheaders queries' starting points).
+extern CBlockIndex *pindexBestHeader;
+
// Minimum disk space required - used in CheckDiskSpace()
static const uint64_t nMinDiskSpace = 52428800;
@@ -118,17 +130,17 @@ class CBlockTreeDB;
class CTxUndo;
class CScriptCheck;
class CValidationState;
-class CWalletInterface;
+class CValidationInterface;
struct CNodeStateStats;
struct CBlockTemplate;
/** Register a wallet to receive updates from core */
-void RegisterWallet(CWalletInterface* pwalletIn);
+void RegisterValidationInterface(CValidationInterface* pwalletIn);
/** Unregister a wallet from core */
-void UnregisterWallet(CWalletInterface* pwalletIn);
+void UnregisterValidationInterface(CValidationInterface* pwalletIn);
/** Unregister all wallets from core */
-void UnregisterAllWallets();
+void UnregisterAllValidationInterfaces();
/** Push an updated transaction to all registered wallets */
void SyncWithWallets(const CTransaction& tx, const CBlock* pblock = NULL);
@@ -137,8 +149,6 @@ void RegisterNodeSignals(CNodeSignals& nodeSignals);
/** Unregister a network node */
void UnregisterNodeSignals(CNodeSignals& nodeSignals);
-void PushGetBlocks(CNode* pnode, CBlockIndex* pindexBegin, uint256 hashEnd);
-
/** Process an incoming block */
bool ProcessBlock(CValidationState &state, CNode* pfrom, CBlock* pblock, CDiskBlockPos *dbp = NULL);
/** Check whether enough disk space is available for an incoming block */
@@ -193,6 +203,8 @@ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransa
struct CNodeStateStats {
int nMisbehavior;
int nSyncHeight;
+ int nCommonHeight;
+ std::vector<int> vHeightInFlight;
};
struct CDiskTxPos : public CDiskBlockPos
@@ -439,9 +451,6 @@ bool DisconnectBlock(CBlock& block, CValidationState& state, CBlockIndex* pindex
// Apply the effects of this block (with given index) on the UTXO set represented by coins
bool ConnectBlock(CBlock& block, CValidationState& state, CBlockIndex* pindex, CCoinsViewCache& coins, bool fJustCheck = false);
-// Add this block to the block index, and if necessary, switch the active block chain to this
-bool AddToBlockIndex(CBlock& block, CValidationState& state, const CDiskBlockPos& pos);
-
// Context-independent validity checks
bool CheckBlockHeader(const CBlockHeader& block, CValidationState& state, bool fCheckPOW = true);
bool CheckBlock(const CBlock& block, CValidationState& state, bool fCheckPOW = true, bool fCheckMerkleRoot = true);
@@ -449,7 +458,7 @@ bool CheckBlock(const CBlock& block, CValidationState& state, bool fCheckPOW = t
// Store block on disk
// if dbp is provided, the file is known to already reside on disk
bool AcceptBlock(CBlock& block, CValidationState& state, CBlockIndex **pindex, CDiskBlockPos* dbp = NULL);
-bool AcceptBlockHeader(CBlockHeader& block, CValidationState& state, CBlockIndex **ppindex= NULL);
+bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, CBlockIndex **ppindex= NULL);
@@ -632,17 +641,17 @@ public:
};
-class CWalletInterface {
+class CValidationInterface {
protected:
- virtual void SyncTransaction(const CTransaction &tx, const CBlock *pblock) =0;
- virtual void EraseFromWallet(const uint256 &hash) =0;
- virtual void SetBestChain(const CBlockLocator &locator) =0;
- virtual void UpdatedTransaction(const uint256 &hash) =0;
- virtual void Inventory(const uint256 &hash) =0;
- virtual void ResendWalletTransactions() =0;
- friend void ::RegisterWallet(CWalletInterface*);
- friend void ::UnregisterWallet(CWalletInterface*);
- friend void ::UnregisterAllWallets();
+ virtual void SyncTransaction(const CTransaction &tx, const CBlock *pblock) {};
+ virtual void EraseFromWallet(const uint256 &hash) {};
+ virtual void SetBestChain(const CBlockLocator &locator) {};
+ virtual void UpdatedTransaction(const uint256 &hash) {};
+ virtual void Inventory(const uint256 &hash) {};
+ virtual void ResendWalletTransactions() {};
+ friend void ::RegisterValidationInterface(CValidationInterface*);
+ friend void ::UnregisterValidationInterface(CValidationInterface*);
+ friend void ::UnregisterAllValidationInterfaces();
};
#endif // BITCOIN_MAIN_H
diff --git a/src/miner.cpp b/src/miner.cpp
index c2762bf44e..eefccfd641 100644
--- a/src/miner.cpp
+++ b/src/miner.cpp
@@ -17,6 +17,7 @@
#endif
#include <boost/thread.hpp>
+#include <boost/tuple/tuple.hpp>
using namespace std;
@@ -398,7 +399,7 @@ CBlockTemplate* CreateNewBlockWithKey(CReserveKey& reservekey)
if (!reservekey.GetReservedKey(pubkey))
return NULL;
- CScript scriptPubKey = CScript() << pubkey << OP_CHECKSIG;
+ CScript scriptPubKey = CScript() << ToByteVector(pubkey) << OP_CHECKSIG;
return CreateNewBlock(scriptPubKey);
}
diff --git a/src/net.cpp b/src/net.cpp
index 866bac2c0e..6cf64f51c3 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -73,11 +73,11 @@ map<CNetAddr, LocalServiceInfo> mapLocalHost;
static bool vfReachable[NET_MAX] = {};
static bool vfLimited[NET_MAX] = {};
static CNode* pnodeLocalHost = NULL;
-static CNode* pnodeSync = NULL;
uint64_t nLocalHostNonce = 0;
static std::vector<ListenSocket> vhListenSocket;
CAddrMan addrman;
int nMaxConnections = 125;
+bool fAddressesInitialized = false;
vector<CNode*> vNodes;
CCriticalSection cs_vNodes;
@@ -518,10 +518,6 @@ void CNode::CloseSocketDisconnect()
TRY_LOCK(cs_vRecvMsg, lockRecv);
if (lockRecv)
vRecvMsg.clear();
-
- // if this was the sync node, we'll need a new one
- if (this == pnodeSync)
- pnodeSync = NULL;
}
void CNode::PushVersion()
@@ -614,7 +610,6 @@ void CNode::copyStats(CNodeStats &stats)
X(nSendBytes);
X(nRecvBytes);
X(fWhitelisted);
- stats.fSyncNode = (this == pnodeSync);
// It is common for nodes with good ping times to suddenly become lagged,
// due to a new block arriving or other large transfer.
@@ -1486,61 +1481,20 @@ bool OpenNetworkConnection(const CAddress& addrConnect, CSemaphoreGrant *grantOu
}
-// for now, use a very simple selection metric: the node from which we received
-// most recently
-static int64_t NodeSyncScore(const CNode *pnode) {
- return pnode->nLastRecv;
-}
-
-void static StartSync(const vector<CNode*> &vNodes) {
- CNode *pnodeNewSync = NULL;
- int64_t nBestScore = 0;
-
- int nBestHeight = g_signals.GetHeight().get_value_or(0);
-
- // Iterate over all nodes
- BOOST_FOREACH(CNode* pnode, vNodes) {
- // check preconditions for allowing a sync
- if (!pnode->fClient && !pnode->fOneShot &&
- !pnode->fDisconnect && pnode->fSuccessfullyConnected &&
- (pnode->nStartingHeight > (nBestHeight - 144)) &&
- (pnode->nVersion < NOBLKS_VERSION_START || pnode->nVersion >= NOBLKS_VERSION_END)) {
- // if ok, compare node's score with the best so far
- int64_t nScore = NodeSyncScore(pnode);
- if (pnodeNewSync == NULL || nScore > nBestScore) {
- pnodeNewSync = pnode;
- nBestScore = nScore;
- }
- }
- }
- // if a new sync candidate was found, start sync!
- if (pnodeNewSync) {
- pnodeNewSync->fStartSync = true;
- pnodeSync = pnodeNewSync;
- }
-}
-
void ThreadMessageHandler()
{
SetThreadPriority(THREAD_PRIORITY_BELOW_NORMAL);
while (true)
{
- bool fHaveSyncNode = false;
-
vector<CNode*> vNodesCopy;
{
LOCK(cs_vNodes);
vNodesCopy = vNodes;
BOOST_FOREACH(CNode* pnode, vNodesCopy) {
pnode->AddRef();
- if (pnode == pnodeSync)
- fHaveSyncNode = true;
}
}
- if (!fHaveSyncNode)
- StartSync(vNodesCopy);
-
// Poll the connected nodes for messages
CNode* pnodeTrickle = NULL;
if (!vNodesCopy.empty())
@@ -1739,6 +1693,18 @@ void static Discover(boost::thread_group& threadGroup)
void StartNode(boost::thread_group& threadGroup)
{
+ uiInterface.InitMessage(_("Loading addresses..."));
+ // Load addresses for peers.dat
+ int64_t nStart = GetTimeMillis();
+ {
+ CAddrDB adb;
+ if (!adb.Read(addrman))
+ LogPrintf("Invalid or missing peers.dat; recreating\n");
+ }
+ LogPrintf("Loaded %i addresses from peers.dat %dms\n",
+ addrman.size(), GetTimeMillis() - nStart);
+ fAddressesInitialized = true;
+
if (semOutbound == NULL) {
// initialize semaphore
int nMaxOutbound = min(MAX_OUTBOUND_CONNECTIONS, nMaxConnections);
@@ -1785,7 +1751,12 @@ bool StopNode()
if (semOutbound)
for (int i=0; i<MAX_OUTBOUND_CONNECTIONS; i++)
semOutbound->post();
- DumpAddresses();
+
+ if (fAddressesInitialized)
+ {
+ DumpAddresses();
+ fAddressesInitialized = false;
+ }
return true;
}
@@ -1958,7 +1929,7 @@ bool CAddrDB::Write(const CAddrMan& addr)
boost::filesystem::path pathTmp = GetDataDir() / tmpfn;
FILE *file = fopen(pathTmp.string().c_str(), "wb");
CAutoFile fileout(file, SER_DISK, CLIENT_VERSION);
- if (!fileout)
+ if (fileout.IsNull())
return error("%s : Failed to open file %s", __func__, pathTmp.string());
// Write and commit header, data
@@ -1968,7 +1939,7 @@ bool CAddrDB::Write(const CAddrMan& addr)
catch (std::exception &e) {
return error("%s : Serialize or I/O error - %s", __func__, e.what());
}
- FileCommit(fileout);
+ FileCommit(fileout.Get());
fileout.fclose();
// replace existing peers.dat, if any, with new peers.dat.XXXX
@@ -1983,7 +1954,7 @@ bool CAddrDB::Read(CAddrMan& addr)
// open input file, and associate with CAutoFile
FILE *file = fopen(pathAddr.string().c_str(), "rb");
CAutoFile filein(file, SER_DISK, CLIENT_VERSION);
- if (!filein)
+ if (filein.IsNull())
return error("%s : Failed to open file %s", __func__, pathAddr.string());
// use file size to size memory buffer
@@ -2060,10 +2031,7 @@ CNode::CNode(SOCKET hSocketIn, CAddress addrIn, std::string addrNameIn, bool fIn
nSendSize = 0;
nSendOffset = 0;
hashContinue = 0;
- pindexLastGetBlocksBegin = 0;
- hashLastGetBlocksEnd = 0;
nStartingHeight = -1;
- fStartSync = false;
fGetAddr = false;
fRelayTxes = false;
setInventoryKnown.max_size(SendBufferSize() / 1000);
diff --git a/src/net.h b/src/net.h
index ad0a1df7e2..340158512d 100644
--- a/src/net.h
+++ b/src/net.h
@@ -14,6 +14,7 @@
#include "netbase.h"
#include "protocol.h"
#include "random.h"
+#include "streams.h"
#include "sync.h"
#include "uint256.h"
#include "utilstrencodings.h"
@@ -158,7 +159,6 @@ public:
int nStartingHeight;
uint64_t nSendBytes;
uint64_t nRecvBytes;
- bool fSyncNode;
bool fWhitelisted;
double dPingTime;
double dPingWait;
@@ -276,10 +276,7 @@ protected:
public:
uint256 hashContinue;
- CBlockIndex* pindexLastGetBlocksBegin;
- uint256 hashLastGetBlocksEnd;
int nStartingHeight;
- bool fStartSync;
// flood relay
std::vector<CAddress> vAddrToSend;
diff --git a/src/noui.cpp b/src/noui.cpp
index f786a20db5..8f3b0275b0 100644
--- a/src/noui.cpp
+++ b/src/noui.cpp
@@ -14,6 +14,9 @@
static bool noui_ThreadSafeMessageBox(const std::string& message, const std::string& caption, unsigned int style)
{
+ bool fSecure = style & CClientUIInterface::SECURE;
+ style &= ~CClientUIInterface::SECURE;
+
std::string strCaption;
// Check for usage of predefined caption
switch (style) {
@@ -30,7 +33,8 @@ static bool noui_ThreadSafeMessageBox(const std::string& message, const std::str
strCaption += caption; // Use supplied caption (can be empty)
}
- LogPrintf("%s: %s\n", strCaption, message);
+ if (!fSecure)
+ LogPrintf("%s: %s\n", strCaption, message);
fprintf(stderr, "%s: %s\n", strCaption.c_str(), message.c_str());
return false;
}
diff --git a/src/pow.cpp b/src/pow.cpp
index d50222849c..75fbfc6a6d 100644
--- a/src/pow.cpp
+++ b/src/pow.cpp
@@ -98,39 +98,6 @@ bool CheckProofOfWork(uint256 hash, unsigned int nBits)
return true;
}
-//
-// true if nBits is greater than the minimum amount of work that could
-// possibly be required deltaTime after minimum work required was nBase
-//
-bool CheckMinWork(unsigned int nBits, unsigned int nBase, int64_t deltaTime)
-{
- bool fOverflow = false;
- uint256 bnNewBlock;
- bnNewBlock.SetCompact(nBits, NULL, &fOverflow);
- if (fOverflow)
- return false;
-
- const uint256 &bnLimit = Params().ProofOfWorkLimit();
- // Testnet has min-difficulty blocks
- // after Params().TargetSpacing()*2 time between blocks:
- if (Params().AllowMinDifficultyBlocks() && deltaTime > Params().TargetSpacing()*2)
- return bnNewBlock <= bnLimit;
-
- uint256 bnResult;
- bnResult.SetCompact(nBase);
- while (deltaTime > 0 && bnResult < bnLimit)
- {
- // Maximum 400% adjustment...
- bnResult *= 4;
- // ... in best-case exactly 4-times-normal target time
- deltaTime -= Params().TargetTimespan()*4;
- }
- if (bnResult > bnLimit)
- bnResult = bnLimit;
-
- return bnNewBlock <= bnResult;
-}
-
void UpdateTime(CBlockHeader* pblock, const CBlockIndex* pindexPrev)
{
pblock->nTime = std::max(pindexPrev->GetMedianTimePast()+1, GetAdjustedTime());
diff --git a/src/pow.h b/src/pow.h
index 5d91108ac4..233d1f3795 100644
--- a/src/pow.h
+++ b/src/pow.h
@@ -16,8 +16,6 @@ unsigned int GetNextWorkRequired(const CBlockIndex* pindexLast, const CBlockHead
/** Check whether a block hash satisfies the proof-of-work requirement specified by nBits */
bool CheckProofOfWork(uint256 hash, unsigned int nBits);
-/** Check the work is more than the minimum a received block needs, without knowing its direct parent */
-bool CheckMinWork(unsigned int nBits, unsigned int nBase, int64_t deltaTime);
void UpdateTime(CBlockHeader* block, const CBlockIndex* pindexPrev);
diff --git a/src/protocol.cpp b/src/protocol.cpp
index 0e28f3abbd..72fdd753a8 100644
--- a/src/protocol.cpp
+++ b/src/protocol.cpp
@@ -7,6 +7,7 @@
#include "chainparams.h"
#include "util.h"
+#include "utilstrencodings.h"
#ifndef WIN32
# include <arpa/inet.h>
diff --git a/src/qt/bitcoinamountfield.h b/src/qt/bitcoinamountfield.h
index e52feeb46e..040a234177 100644
--- a/src/qt/bitcoinamountfield.h
+++ b/src/qt/bitcoinamountfield.h
@@ -21,7 +21,9 @@ class BitcoinAmountField: public QWidget
{
Q_OBJECT
- Q_PROPERTY(CAmount value READ value WRITE setValue NOTIFY valueChanged USER true)
+ // ugly hack: for some unknown reason CAmount (instead of qint64) does not work here as expected
+ // discussion: https://github.com/bitcoin/bitcoin/pull/5117
+ Q_PROPERTY(qint64 value READ value WRITE setValue NOTIFY valueChanged USER true)
public:
explicit BitcoinAmountField(QWidget *parent = 0);
diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp
index 8a945606dc..f0471c32f9 100644
--- a/src/qt/bitcoingui.cpp
+++ b/src/qt/bitcoingui.cpp
@@ -665,7 +665,7 @@ void BitcoinGUI::setNumBlocks(int count)
QDateTime currentDate = QDateTime::currentDateTime();
int secs = lastBlockDate.secsTo(currentDate);
- tooltip = tr("Processed %1 blocks of transaction history.").arg(count);
+ tooltip = tr("Processed %n blocks of transaction history.", "", count);
// Set icon state: spinning if catching up, tick otherwise
if(secs < 90*60)
@@ -992,6 +992,9 @@ void BitcoinGUI::showProgress(const QString &title, int nProgress)
static bool ThreadSafeMessageBox(BitcoinGUI *gui, const std::string& message, const std::string& caption, unsigned int style)
{
bool modal = (style & CClientUIInterface::MODAL);
+ // The SECURE flag has no effect in the Qt GUI.
+ // bool secure = (style & CClientUIInterface::SECURE);
+ style &= ~CClientUIInterface::SECURE;
bool ret = false;
// In case of modal message, use blocking connection to wait for user to click a button
QMetaObject::invokeMethod(gui, "message",
diff --git a/src/qt/bitcoinstrings.cpp b/src/qt/bitcoinstrings.cpp
index 25c811183f..1073b6a472 100644
--- a/src/qt/bitcoinstrings.cpp
+++ b/src/qt/bitcoinstrings.cpp
@@ -22,11 +22,8 @@ QT_TRANSLATE_NOOP("bitcoin-core", ""
"It is also recommended to set alertnotify so you are notified of problems;\n"
"for example: alertnotify=echo %%s | mail -s \"Bitcoin Alert\" admin@foo.com\n"),
QT_TRANSLATE_NOOP("bitcoin-core", ""
-"(default: 1, 1 = keep tx meta data e.g. account owner and payment request "
-"information, 2 = drop tx meta data)"),
-QT_TRANSLATE_NOOP("bitcoin-core", ""
-"Acceptable ciphers (default: TLSv1.2+HIGH:TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!"
-"3DES:@STRENGTH)"),
+"(1 = keep tx meta data e.g. account owner and payment request information, 2 "
+"= drop tx meta data)"),
QT_TRANSLATE_NOOP("bitcoin-core", ""
"Allow JSON-RPC connections from specified source. Valid for <ip> are a "
"single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0) or "
@@ -49,7 +46,7 @@ QT_TRANSLATE_NOOP("bitcoin-core", ""
"running."),
QT_TRANSLATE_NOOP("bitcoin-core", ""
"Continuously rate-limit free transactions to <n>*1000 bytes per minute "
-"(default:15)"),
+"(default:%u)"),
QT_TRANSLATE_NOOP("bitcoin-core", ""
"Create new files with system default permissions, instead of umask 077 (only "
"effective with disabled wallet functionality)"),
@@ -75,9 +72,6 @@ QT_TRANSLATE_NOOP("bitcoin-core", ""
"Error: Unsupported argument -socks found. Setting SOCKS version isn't "
"possible anymore, only SOCKS5 proxies are supported."),
QT_TRANSLATE_NOOP("bitcoin-core", ""
-"Execute command when a network tx respends wallet tx input (%s=respend TxID, "
-"%t=wallet TxID)"),
-QT_TRANSLATE_NOOP("bitcoin-core", ""
"Execute command when a relevant alert is received or we see a really long "
"fork (%s in cmd is replaced by message)"),
QT_TRANSLATE_NOOP("bitcoin-core", ""
@@ -94,22 +88,24 @@ QT_TRANSLATE_NOOP("bitcoin-core", ""
"creation (default: %s)"),
QT_TRANSLATE_NOOP("bitcoin-core", ""
"Flush database activity from memory pool to disk log every <n> megabytes "
-"(default: 100)"),
+"(default: %u)"),
QT_TRANSLATE_NOOP("bitcoin-core", ""
-"How thorough the block verification of -checkblocks is (0-4, default: 3)"),
+"How thorough the block verification of -checkblocks is (0-4, default: %u)"),
QT_TRANSLATE_NOOP("bitcoin-core", ""
"If paytxfee is not set, include enough fee so transactions are confirmed on "
-"average within n blocks (default: 1)"),
+"average within n blocks (default: %u)"),
QT_TRANSLATE_NOOP("bitcoin-core", ""
"In this mode -genproclimit controls how many blocks are generated "
"immediately."),
QT_TRANSLATE_NOOP("bitcoin-core", ""
-"Listen for JSON-RPC connections on <port> (default: 8332 or testnet: 18332)"),
+"Log transaction priority and fee per kB when mining blocks (default: %u)"),
+QT_TRANSLATE_NOOP("bitcoin-core", ""
+"Maintain a full transaction index, used by the getrawtransaction rpc call "
+"(default: %u)"),
QT_TRANSLATE_NOOP("bitcoin-core", ""
-"Number of seconds to keep misbehaving peers from reconnecting (default: "
-"86400)"),
+"Number of seconds to keep misbehaving peers from reconnecting (default: %u)"),
QT_TRANSLATE_NOOP("bitcoin-core", ""
-"Output debugging information (default: 0, supplying <category> is optional)"),
+"Output debugging information (default: %u, supplying <category> is optional)"),
QT_TRANSLATE_NOOP("bitcoin-core", ""
"Query for peer addresses via DNS lookup, if low on addresses (default: 1 "
"unless -connect)"),
@@ -120,7 +116,7 @@ QT_TRANSLATE_NOOP("bitcoin-core", ""
"leave that many cores free, default: %d)"),
QT_TRANSLATE_NOOP("bitcoin-core", ""
"Set the processor limit for when generation is on (-1 = unlimited, default: "
-"-1)"),
+"%d)"),
QT_TRANSLATE_NOOP("bitcoin-core", ""
"This is a pre-release test build - use at your own risk - do not use for "
"mining or merchant applications"),
@@ -132,8 +128,8 @@ QT_TRANSLATE_NOOP("bitcoin-core", ""
"Unable to bind to %s on this computer. Bitcoin Core is probably already "
"running."),
QT_TRANSLATE_NOOP("bitcoin-core", ""
-"Use separate SOCKS5 proxy to reach peers via Tor hidden services (default: -"
-"proxy)"),
+"Use separate SOCKS5 proxy to reach peers via Tor hidden services (default: "
+"%s)"),
QT_TRANSLATE_NOOP("bitcoin-core", ""
"Warning: -paytxfee is set very high! This is the transaction fee you will "
"pay if you send a transaction."),
@@ -154,19 +150,20 @@ QT_TRANSLATE_NOOP("bitcoin-core", ""
"wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect "
"you should restore from a backup."),
QT_TRANSLATE_NOOP("bitcoin-core", ""
-"Whitelist peers connecting from the given netmask or ip. Can be specified "
-"multiple times."),
+"Whitelist peers connecting from the given netmask or IP address. Can be "
+"specified multiple times."),
QT_TRANSLATE_NOOP("bitcoin-core", ""
"Whitelisted peers cannot be DoS banned and their transactions are always "
"relayed, even if they are already in the mempool, useful e.g. for a gateway"),
+QT_TRANSLATE_NOOP("bitcoin-core", "(default: %s)"),
QT_TRANSLATE_NOOP("bitcoin-core", "(default: 1)"),
-QT_TRANSLATE_NOOP("bitcoin-core", "(default: wallet.dat)"),
QT_TRANSLATE_NOOP("bitcoin-core", "<category> can be:"),
QT_TRANSLATE_NOOP("bitcoin-core", "Accept command line and JSON-RPC commands"),
QT_TRANSLATE_NOOP("bitcoin-core", "Accept connections from outside (default: 1 if no -proxy or -connect)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Acceptable ciphers (default: %s)"),
QT_TRANSLATE_NOOP("bitcoin-core", "Add a node to connect to and attempt to keep the connection open"),
QT_TRANSLATE_NOOP("bitcoin-core", "Allow DNS lookups for -addnode, -seednode and -connect"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Always query for peer addresses via DNS lookup (default: 0)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Always query for peer addresses via DNS lookup (default: %u)"),
QT_TRANSLATE_NOOP("bitcoin-core", "Attempt to recover private keys from a corrupt wallet.dat"),
QT_TRANSLATE_NOOP("bitcoin-core", "Block creation options:"),
QT_TRANSLATE_NOOP("bitcoin-core", "Cannot downgrade wallet"),
@@ -182,7 +179,7 @@ QT_TRANSLATE_NOOP("bitcoin-core", "Copyright (C) 2009-%i The Bitcoin Core Develo
QT_TRANSLATE_NOOP("bitcoin-core", "Corrupted block database detected"),
QT_TRANSLATE_NOOP("bitcoin-core", "Could not parse -rpcbind value %s as network address"),
QT_TRANSLATE_NOOP("bitcoin-core", "Debugging/Testing options:"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Disable safemode, override a real safe mode event (default: 0)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Disable safemode, override a real safe mode event (default: %u)"),
QT_TRANSLATE_NOOP("bitcoin-core", "Discover own IP address (default: 1 when listening and no -externalip)"),
QT_TRANSLATE_NOOP("bitcoin-core", "Do not load the wallet and disable wallet RPC calls"),
QT_TRANSLATE_NOOP("bitcoin-core", "Do you want to rebuild the block database now?"),
@@ -201,13 +198,13 @@ QT_TRANSLATE_NOOP("bitcoin-core", "Error: Unsupported argument -tor found, use -
QT_TRANSLATE_NOOP("bitcoin-core", "Error: Wallet locked, unable to create transaction!"),
QT_TRANSLATE_NOOP("bitcoin-core", "Failed to listen on any port. Use -listen=0 if you want this."),
QT_TRANSLATE_NOOP("bitcoin-core", "Fee (in BTC/kB) to add to transactions you send (default: %s)"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Force safe mode (default: 0)"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Generate coins (default: 0)"),
-QT_TRANSLATE_NOOP("bitcoin-core", "How many blocks to check at startup (default: 288, 0 = all)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Force safe mode (default: %u)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Generate coins (default: %u)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "How many blocks to check at startup (default: %u, 0 = all)"),
QT_TRANSLATE_NOOP("bitcoin-core", "If <category> is not supplied, output all debugging information."),
QT_TRANSLATE_NOOP("bitcoin-core", "Importing..."),
QT_TRANSLATE_NOOP("bitcoin-core", "Imports blocks from external blk000??.dat file"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Include IP addresses in debug output (default: 0)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Include IP addresses in debug output (default: %u)"),
QT_TRANSLATE_NOOP("bitcoin-core", "Incorrect or no genesis block found. Wrong datadir for network?"),
QT_TRANSLATE_NOOP("bitcoin-core", "Information"),
QT_TRANSLATE_NOOP("bitcoin-core", "Initialization sanity check failed. Bitcoin Core is shutting down."),
@@ -222,61 +219,60 @@ QT_TRANSLATE_NOOP("bitcoin-core", "Invalid amount"),
QT_TRANSLATE_NOOP("bitcoin-core", "Invalid netmask specified in -whitelist: '%s'"),
QT_TRANSLATE_NOOP("bitcoin-core", "Keep at most <n> unconnectable blocks in memory (default: %u)"),
QT_TRANSLATE_NOOP("bitcoin-core", "Keep at most <n> unconnectable transactions in memory (default: %u)"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Limit size of signature cache to <n> entries (default: 50000)"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Listen for connections on <port> (default: 8333 or testnet: 18333)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Limit size of signature cache to <n> entries (default: %u)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Listen for JSON-RPC connections on <port> (default: %u or testnet: %u)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Listen for connections on <port> (default: %u or testnet: %u)"),
QT_TRANSLATE_NOOP("bitcoin-core", "Loading addresses..."),
QT_TRANSLATE_NOOP("bitcoin-core", "Loading block index..."),
QT_TRANSLATE_NOOP("bitcoin-core", "Loading wallet..."),
-QT_TRANSLATE_NOOP("bitcoin-core", "Log transaction priority and fee per kB when mining blocks (default: 0)"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Maintain a full transaction index (default: 0)"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Maintain at most <n> connections to peers (default: 125)"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Maintain at most <n> connections to peers (default: %u)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Maximum per-connection receive buffer, <n>*1000 bytes (default: %u)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Maximum per-connection send buffer, <n>*1000 bytes (default: %u)"),
QT_TRANSLATE_NOOP("bitcoin-core", "Need to specify a port with -whitebind: '%s'"),
QT_TRANSLATE_NOOP("bitcoin-core", "Node relay options:"),
QT_TRANSLATE_NOOP("bitcoin-core", "Not enough file descriptors available."),
-QT_TRANSLATE_NOOP("bitcoin-core", "Only accept block chain matching built-in checkpoints (default: 1)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Only accept block chain matching built-in checkpoints (default: %u)"),
QT_TRANSLATE_NOOP("bitcoin-core", "Only connect to nodes in network <net> (ipv4, ipv6 or onion)"),
QT_TRANSLATE_NOOP("bitcoin-core", "Options:"),
QT_TRANSLATE_NOOP("bitcoin-core", "Password for JSON-RPC connections"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Prepend debug output with timestamp (default: 1)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Prepend debug output with timestamp (default: %u)"),
QT_TRANSLATE_NOOP("bitcoin-core", "Print block on startup, if found in block index"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Print block tree on startup (default: 0)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Print block tree on startup (default: %u)"),
QT_TRANSLATE_NOOP("bitcoin-core", "RPC SSL options: (see the Bitcoin Wiki for SSL setup instructions)"),
QT_TRANSLATE_NOOP("bitcoin-core", "RPC server options:"),
QT_TRANSLATE_NOOP("bitcoin-core", "Randomly drop 1 of every <n> network messages"),
QT_TRANSLATE_NOOP("bitcoin-core", "Randomly fuzz 1 of every <n> network messages"),
QT_TRANSLATE_NOOP("bitcoin-core", "Rebuild block chain index from current blk000??.dat files"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Relay and mine data carrier transactions (default: 1)"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Relay non-P2SH multisig (default: 1)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Relay and mine data carrier transactions (default: %u)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Relay non-P2SH multisig (default: %u)"),
QT_TRANSLATE_NOOP("bitcoin-core", "Rescan the block chain for missing wallet transactions"),
QT_TRANSLATE_NOOP("bitcoin-core", "Rescanning..."),
-QT_TRANSLATE_NOOP("bitcoin-core", "Run a thread to flush wallet periodically (default: 1)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Run a thread to flush wallet periodically (default: %u)"),
QT_TRANSLATE_NOOP("bitcoin-core", "Run in the background as a daemon and accept commands"),
QT_TRANSLATE_NOOP("bitcoin-core", "Send trace/debug info to console instead of debug.log file"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Server certificate file (default: server.cert)"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Server private key (default: server.pem)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Server certificate file (default: %s)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Server private key (default: %s)"),
QT_TRANSLATE_NOOP("bitcoin-core", "Set database cache size in megabytes (%d to %d, default: %d)"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Set key pool size to <n> (default: 100)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Set key pool size to <n> (default: %u)"),
QT_TRANSLATE_NOOP("bitcoin-core", "Set maximum block size in bytes (default: %d)"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Set minimum block size in bytes (default: 0)"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Set the number of threads to service RPC calls (default: 4)"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Sets the DB_PRIVATE flag in the wallet db environment (default: 1)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Set minimum block size in bytes (default: %u)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Set the number of threads to service RPC calls (default: %d)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Sets the DB_PRIVATE flag in the wallet db environment (default: %u)"),
QT_TRANSLATE_NOOP("bitcoin-core", "Show all debugging options (usage: --help -help-debug)"),
QT_TRANSLATE_NOOP("bitcoin-core", "Shrink debug.log file on client startup (default: 1 when no -debug)"),
QT_TRANSLATE_NOOP("bitcoin-core", "Signing transaction failed"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Specify configuration file (default: bitcoin.conf)"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Specify connection timeout in milliseconds (default: 5000)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Specify configuration file (default: %s)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Specify connection timeout in milliseconds (minimum: 1, default: %d)"),
QT_TRANSLATE_NOOP("bitcoin-core", "Specify data directory"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Specify pid file (default: bitcoind.pid)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Specify pid file (default: %s)"),
QT_TRANSLATE_NOOP("bitcoin-core", "Specify wallet file (within data directory)"),
QT_TRANSLATE_NOOP("bitcoin-core", "Specify your own public address"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Spend unconfirmed change when sending transactions (default: 1)"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Stop running after importing blocks from disk (default: 0)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Spend unconfirmed change when sending transactions (default: %u)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Stop running after importing blocks from disk (default: %u)"),
QT_TRANSLATE_NOOP("bitcoin-core", "This help message"),
QT_TRANSLATE_NOOP("bitcoin-core", "This is experimental software."),
QT_TRANSLATE_NOOP("bitcoin-core", "This is intended for regression testing tools and app development."),
-QT_TRANSLATE_NOOP("bitcoin-core", "Threshold for disconnecting misbehaving peers (default: 100)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Threshold for disconnecting misbehaving peers (default: %u)"),
QT_TRANSLATE_NOOP("bitcoin-core", "To use the %s option"),
QT_TRANSLATE_NOOP("bitcoin-core", "Transaction amount too small"),
QT_TRANSLATE_NOOP("bitcoin-core", "Transaction amounts must be positive"),
@@ -285,7 +281,7 @@ QT_TRANSLATE_NOOP("bitcoin-core", "Unable to bind to %s on this computer (bind r
QT_TRANSLATE_NOOP("bitcoin-core", "Unknown network specified in -onlynet: '%s'"),
QT_TRANSLATE_NOOP("bitcoin-core", "Upgrade wallet to latest format"),
QT_TRANSLATE_NOOP("bitcoin-core", "Use OpenSSL (https) for JSON-RPC connections"),
-QT_TRANSLATE_NOOP("bitcoin-core", "Use UPnP to map the listening port (default: 0)"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Use UPnP to map the listening port (default: %u)"),
QT_TRANSLATE_NOOP("bitcoin-core", "Use UPnP to map the listening port (default: 1 when listening)"),
QT_TRANSLATE_NOOP("bitcoin-core", "Use the test network"),
QT_TRANSLATE_NOOP("bitcoin-core", "Username for JSON-RPC connections"),
diff --git a/src/qt/forms/rpcconsole.ui b/src/qt/forms/rpcconsole.ui
index 7f28209c9a..898df2b080 100644
--- a/src/qt/forms/rpcconsole.ui
+++ b/src/qt/forms/rpcconsole.ui
@@ -836,29 +836,6 @@
</property>
</widget>
</item>
- <item row="4" column="0">
- <widget class="QLabel" name="label_25">
- <property name="text">
- <string>Sync Node</string>
- </property>
- </widget>
- </item>
- <item row="4" column="2">
- <widget class="QLabel" name="peerSyncNode">
- <property name="cursor">
- <cursorShape>IBeamCursor</cursorShape>
- </property>
- <property name="text">
- <string>N/A</string>
- </property>
- <property name="textFormat">
- <enum>Qt::PlainText</enum>
- </property>
- <property name="textInteractionFlags">
- <set>Qt::LinksAccessibleByMouse|Qt::TextSelectableByKeyboard|Qt::TextSelectableByMouse</set>
- </property>
- </widget>
- </item>
<item row="5" column="0">
<widget class="QLabel" name="label_29">
<property name="text">
diff --git a/src/qt/intro.cpp b/src/qt/intro.cpp
index d469c9a0bd..7618bff69d 100644
--- a/src/qt/intro.cpp
+++ b/src/qt/intro.cpp
@@ -215,10 +215,10 @@ void Intro::setStatus(int status, const QString &message, quint64 bytesAvailable
{
ui->freeSpace->setText("");
} else {
- QString freeString = QString::number(bytesAvailable/GB_BYTES) + tr("GB of free space available");
+ QString freeString = tr("%n GB of free space available", "", bytesAvailable/GB_BYTES);
if(bytesAvailable < BLOCK_CHAIN_SIZE)
{
- freeString += " " + tr("(of %1GB needed)").arg(BLOCK_CHAIN_SIZE/GB_BYTES);
+ freeString += " " + tr("(of %n GB needed)", "", BLOCK_CHAIN_SIZE/GB_BYTES);
ui->freeSpace->setStyleSheet("QLabel { color: #800000 }");
} else {
ui->freeSpace->setStyleSheet("");
diff --git a/src/qt/locale/bitcoin_en.ts b/src/qt/locale/bitcoin_en.ts
index 5c3abef2e7..df285441e1 100644
--- a/src/qt/locale/bitcoin_en.ts
+++ b/src/qt/locale/bitcoin_en.ts
@@ -286,27 +286,27 @@
<context>
<name>BitcoinGUI</name>
<message>
- <location filename="../bitcoingui.cpp" line="+327"/>
+ <location filename="../bitcoingui.cpp" line="+309"/>
<source>Sign &amp;message...</source>
<translation>Sign &amp;message...</translation>
</message>
<message>
- <location line="+348"/>
+ <location line="+339"/>
<source>Synchronizing with network...</source>
<translation>Synchronizing with network...</translation>
</message>
<message>
- <location line="-420"/>
+ <location line="-405"/>
<source>&amp;Overview</source>
<translation>&amp;Overview</translation>
</message>
<message>
- <location line="-142"/>
+ <location line="-129"/>
<source>Node</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+143"/>
+ <location line="+130"/>
<source>Show general overview of wallet</source>
<translation>Show general overview of wallet</translation>
</message>
@@ -331,7 +331,7 @@
<translation>Quit application</translation>
</message>
<message>
- <location line="+10"/>
+ <location line="+7"/>
<location line="+2"/>
<source>About &amp;Qt</source>
<translation>About &amp;Qt</translation>
@@ -347,7 +347,7 @@
<translation>&amp;Options...</translation>
</message>
<message>
- <location line="+9"/>
+ <location line="+6"/>
<source>&amp;Encrypt Wallet...</source>
<translation>&amp;Encrypt Wallet...</translation>
</message>
@@ -377,13 +377,12 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+175"/>
- <location line="+5"/>
+ <location line="+172"/>
<source>Bitcoin Core client</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+158"/>
+ <location line="+157"/>
<source>Importing blocks from disk...</source>
<translation>Importing blocks from disk...</translation>
</message>
@@ -393,17 +392,17 @@
<translation>Reindexing blocks on disk...</translation>
</message>
<message>
- <location line="-418"/>
+ <location line="-403"/>
<source>Send coins to a Bitcoin address</source>
<translation>Send coins to a Bitcoin address</translation>
</message>
<message>
- <location line="+49"/>
+ <location line="+46"/>
<source>Modify configuration options for Bitcoin</source>
<translation>Modify configuration options for Bitcoin</translation>
</message>
<message>
- <location line="+12"/>
+ <location line="+9"/>
<source>Backup wallet to another location</source>
<translation>Backup wallet to another location</translation>
</message>
@@ -428,17 +427,17 @@
<translation>&amp;Verify message...</translation>
</message>
<message>
- <location line="+446"/>
+ <location line="+437"/>
<source>Bitcoin</source>
<translation>Bitcoin</translation>
</message>
<message>
- <location line="-664"/>
+ <location line="-636"/>
<source>Wallet</source>
<translation>Wallet</translation>
</message>
<message>
- <location line="+151"/>
+ <location line="+138"/>
<source>&amp;Send</source>
<translation>&amp;Send</translation>
</message>
@@ -448,13 +447,12 @@
<translation>&amp;Receive</translation>
</message>
<message>
- <location line="+33"/>
+ <location line="+30"/>
<source>Show information about Bitcoin Core</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+13"/>
- <location line="+2"/>
+ <location line="+12"/>
<source>&amp;Show / Hide</source>
<translation>&amp;Show / Hide</translation>
</message>
@@ -499,29 +497,22 @@
<translation>Tabs toolbar</translation>
</message>
<message>
- <location line="-289"/>
- <location line="+393"/>
- <source>[testnet]</source>
- <translation>[testnet]</translation>
- </message>
- <message>
- <location line="-418"/>
+ <location line="-295"/>
<source>Bitcoin Core</source>
<translation type="unfinished">Bitcoin Core</translation>
</message>
<message>
- <location line="+168"/>
+ <location line="+155"/>
<source>Request payments (generates QR codes and bitcoin: URIs)</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+29"/>
- <location line="+2"/>
+ <location line="+28"/>
<source>&amp;About Bitcoin Core</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+35"/>
+ <location line="+32"/>
<source>Show the list of used sending addresses and labels</source>
<translation type="unfinished"></translation>
</message>
@@ -546,7 +537,7 @@
<translation type="unfinished"></translation>
</message>
<message numerus="yes">
- <location line="+316"/>
+ <location line="+307"/>
<source>%n active connection(s) to Bitcoin network</source>
<translation>
<numerusform>%n active connection to Bitcoin network</numerusform>
@@ -558,13 +549,8 @@
<source>No block source available...</source>
<translation>No block source available...</translation>
</message>
- <message>
- <location line="+10"/>
- <source>Processed %1 blocks of transaction history.</source>
- <translation>Processed %1 blocks of transaction history.</translation>
- </message>
<message numerus="yes">
- <location line="+26"/>
+ <location line="+36"/>
<source>%n hour(s)</source>
<translation>
<numerusform>%n hour</numerusform>
@@ -636,8 +622,16 @@
<source>Up to date</source>
<translation>Up to date</translation>
</message>
+ <message numerus="yes">
+ <location line="-5"/>
+ <source>Processed %n blocks of transaction history.</source>
+ <translation type="unfinished">
+ <numerusform></numerusform>
+ <numerusform></numerusform>
+ </translation>
+ </message>
<message>
- <location line="+44"/>
+ <location line="+49"/>
<source>Catching up...</source>
<translation>Catching up...</translation>
</message>
@@ -1172,15 +1166,21 @@ Address: %4
<source>Error</source>
<translation>Error</translation>
</message>
- <message>
+ <message numerus="yes">
<location line="+9"/>
- <source>GB of free space available</source>
- <translation>GB of free space available</translation>
+ <source>%n GB of free space available</source>
+ <translation type="unfinished">
+ <numerusform></numerusform>
+ <numerusform></numerusform>
+ </translation>
</message>
- <message>
+ <message numerus="yes">
<location line="+3"/>
- <source>(of %1GB needed)</source>
- <translation>(of %1GB needed)</translation>
+ <source>(of %n GB needed)</source>
+ <translation type="unfinished">
+ <numerusform></numerusform>
+ <numerusform></numerusform>
+ </translation>
</message>
</context>
<context>
@@ -1813,12 +1813,11 @@ Address: %4
<location line="+23"/>
<location line="+23"/>
<location line="+23"/>
- <location line="+23"/>
<source>N/A</source>
<translation>N/A</translation>
</message>
<message>
- <location line="-990"/>
+ <location line="-967"/>
<source>Client version</source>
<translation>Client version</translation>
</message>
@@ -1921,11 +1920,6 @@ Address: %4
</message>
<message>
<location line="+23"/>
- <source>Sync Node</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+23"/>
<source>Starting Height</source>
<translation type="unfinished"></translation>
</message>
@@ -1970,7 +1964,7 @@ Address: %4
<translation type="unfinished"></translation>
</message>
<message>
- <location line="-764"/>
+ <location line="-741"/>
<source>Last block time</source>
<translation>Last block time</translation>
</message>
@@ -2086,17 +2080,7 @@ Address: %4
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+2"/>
- <source>Yes</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+0"/>
- <source>No</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+12"/>
+ <location line="+13"/>
<source>Unknown</source>
<translation type="unfinished"></translation>
</message>
@@ -2853,7 +2837,7 @@ Address: %4
<context>
<name>SplashScreen</name>
<message>
- <location filename="../splashscreen.cpp" line="+34"/>
+ <location filename="../splashscreen.cpp" line="+35"/>
<source>Bitcoin Core</source>
<translation type="unfinished">Bitcoin Core</translation>
</message>
@@ -2863,7 +2847,7 @@ Address: %4
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+1"/>
+ <location filename="../networkstyle.cpp" line="+19"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
@@ -3418,7 +3402,7 @@ Address: %4
<context>
<name>UnitDisplayStatusBarControl</name>
<message>
- <location filename="../bitcoingui.cpp" line="+103"/>
+ <location filename="../bitcoingui.cpp" line="+106"/>
<source>Unit to show amounts in. Click to select another unit.</source>
<translation type="unfinished"></translation>
</message>
@@ -3485,62 +3469,27 @@ Address: %4
<context>
<name>bitcoin-core</name>
<message>
- <location filename="../bitcoinstrings.cpp" line="+240"/>
+ <location filename="../bitcoinstrings.cpp" line="+236"/>
<source>Options:</source>
<translation>Options:</translation>
</message>
<message>
- <location line="+28"/>
- <source>Specify configuration file (default: bitcoin.conf)</source>
- <translation>Specify configuration file (default: bitcoin.conf)</translation>
- </message>
- <message>
- <location line="+3"/>
- <source>Specify pid file (default: bitcoind.pid)</source>
- <translation>Specify pid file (default: bitcoind.pid)</translation>
- </message>
- <message>
- <location line="-1"/>
+ <location line="+30"/>
<source>Specify data directory</source>
<translation>Specify data directory</translation>
</message>
<message>
- <location line="-44"/>
- <source>Listen for connections on &lt;port&gt; (default: 8333 or testnet: 18333)</source>
- <translation>Listen for connections on &lt;port&gt; (default: 8333 or testnet: 18333)</translation>
- </message>
- <message>
- <location line="+6"/>
- <source>Maintain at most &lt;n&gt; connections to peers (default: 125)</source>
- <translation>Maintain at most &lt;n&gt; connections to peers (default: 125)</translation>
- </message>
- <message>
- <location line="-53"/>
+ <location line="-90"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation>Connect to a node to retrieve peer addresses, and disconnect</translation>
</message>
<message>
- <location line="+94"/>
+ <location line="+93"/>
<source>Specify your own public address</source>
<translation>Specify your own public address</translation>
</message>
<message>
- <location line="+6"/>
- <source>Threshold for disconnecting misbehaving peers (default: 100)</source>
- <translation>Threshold for disconnecting misbehaving peers (default: 100)</translation>
- </message>
- <message>
- <location line="-171"/>
- <source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
- <translation>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</translation>
- </message>
- <message>
- <location line="-2"/>
- <source>Listen for JSON-RPC connections on &lt;port&gt; (default: 8332 or testnet: 18332)</source>
- <translation>Listen for JSON-RPC connections on &lt;port&gt; (default: 8332 or testnet: 18332)</translation>
- </message>
- <message>
- <location line="+59"/>
+ <location line="-108"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>Accept command line and JSON-RPC commands</translation>
</message>
@@ -3560,7 +3509,7 @@ Address: %4
<translation>Accept connections from outside (default: 1 if no -proxy or -connect)</translation>
</message>
<message>
- <location line="-154"/>
+ <location line="-150"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
@@ -3585,22 +3534,12 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
</translation>
</message>
<message>
- <location line="+15"/>
- <source>Acceptable ciphers (default: TLSv1.2+HIGH:TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!3DES:@STRENGTH)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+10"/>
+ <location line="+22"/>
<source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source>
<translation>Bind to given address and always listen on it. Use [host]:port notation for IPv6</translation>
</message>
<message>
- <location line="+13"/>
- <source>Continuously rate-limit free transactions to &lt;n&gt;*1000 bytes per minute (default:15)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+6"/>
+ <location line="+19"/>
<source>Delete all wallet transactions and only recover those parts of the blockchain through -rescan on startup</source>
<translation type="unfinished"></translation>
</message>
@@ -3620,37 +3559,22 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
<translation>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</translation>
</message>
<message>
- <location line="+12"/>
+ <location line="+9"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</translation>
</message>
<message>
- <location line="+12"/>
- <source>Flush database activity from memory pool to disk log every &lt;n&gt; megabytes (default: 100)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+3"/>
- <source>How thorough the block verification of -checkblocks is (0-4, default: 3)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+5"/>
+ <location line="+20"/>
<source>In this mode -genproclimit controls how many blocks are generated immediately.</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+15"/>
+ <location line="+17"/>
<source>Set the number of script verification threads (%u to %d, 0 = auto, &lt;0 = leave that many cores free, default: %d)</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+3"/>
- <source>Set the processor limit for when generation is on (-1 = unlimited, default: -1)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+3"/>
+ <location line="+6"/>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</translation>
</message>
@@ -3660,12 +3584,7 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+3"/>
- <source>Use separate SOCKS5 proxy to reach peers via Tor hidden services (default: -proxy)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+3"/>
+ <location line="+6"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</translation>
</message>
@@ -3690,13 +3609,13 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
<translation>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</translation>
</message>
<message>
- <location line="+10"/>
- <source>(default: 1)</source>
+ <location line="+4"/>
+ <source>Whitelist peers connecting from the given netmask or IP address. Can be specified multiple times.</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+1"/>
- <source>(default: wallet.dat)</source>
+ <location line="+7"/>
+ <source>(default: 1)</source>
<translation type="unfinished"></translation>
</message>
<message>
@@ -3705,7 +3624,7 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+6"/>
+ <location line="+7"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation>Attempt to recover private keys from a corrupt wallet.dat</translation>
</message>
@@ -3735,12 +3654,7 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+1"/>
- <source>Disable safemode, override a real safe mode event (default: 0)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+1"/>
+ <location line="+2"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation>Discover own IP address (default: 1 when listening and no -externalip)</translation>
</message>
@@ -3795,22 +3709,7 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
<translation>Failed to listen on any port. Use -listen=0 if you want this.</translation>
</message>
<message>
- <location line="+2"/>
- <source>Force safe mode (default: 0)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+1"/>
- <source>Generate coins (default: 0)</source>
- <translation>Generate coins (default: 0)</translation>
- </message>
- <message>
- <location line="+1"/>
- <source>How many blocks to check at startup (default: 288, 0 = all)</source>
- <translation>How many blocks to check at startup (default: 288, 0 = all)</translation>
- </message>
- <message>
- <location line="+1"/>
+ <location line="+5"/>
<source>If &lt;category&gt; is not supplied, output all debugging information.</source>
<translation type="unfinished"></translation>
</message>
@@ -3830,7 +3729,7 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+22"/>
+ <location line="+21"/>
<source>Not enough file descriptors available.</source>
<translation>Not enough file descriptors available.</translation>
</message>
@@ -3840,12 +3739,7 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+3"/>
- <source>Prepend debug output with timestamp (default: 1)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+7"/>
+ <location line="+10"/>
<source>Rebuild block chain index from current blk000??.dat files</source>
<translation>Rebuild block chain index from current blk000??.dat files</translation>
</message>
@@ -3860,32 +3754,22 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+2"/>
- <source>Set the number of threads to service RPC calls (default: 4)</source>
- <translation>Set the number of threads to service RPC calls (default: 4)</translation>
- </message>
- <message>
- <location line="+9"/>
+ <location line="+11"/>
<source>Specify wallet file (within data directory)</source>
<translation>Specify wallet file (within data directory)</translation>
</message>
<message>
- <location line="+2"/>
- <source>Spend unconfirmed change when sending transactions (default: 1)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+1"/>
- <source>Stop running after importing blocks from disk (default: 0)</source>
+ <location line="+6"/>
+ <source>This is intended for regression testing tools and app development.</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+3"/>
- <source>This is intended for regression testing tools and app development.</source>
+ <location line="+10"/>
+ <source>Use UPnP to map the listening port (default: %u)</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+14"/>
+ <location line="+4"/>
<source>Verifying blocks...</source>
<translation>Verifying blocks...</translation>
</message>
@@ -3910,17 +3794,12 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
<translation>You need to rebuild the database using -reindex to change -txindex</translation>
</message>
<message>
- <location line="-92"/>
+ <location line="-91"/>
<source>Imports blocks from external blk000??.dat file</source>
<translation>Imports blocks from external blk000??.dat file</translation>
</message>
<message>
- <location line="-185"/>
- <source>(default: 1, 1 = keep tx meta data e.g. account owner and payment request information, 2 = drop tx meta data)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+6"/>
+ <location line="-179"/>
<source>Allow JSON-RPC connections from specified source. Valid for &lt;ip&gt; are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). This option can be specified multiple times</source>
<translation type="unfinished"></translation>
</message>
@@ -3945,7 +3824,12 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+6"/>
+ <location line="+3"/>
+ <source>Continuously rate-limit free transactions to &lt;n&gt;*1000 bytes per minute (default:%u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+3"/>
<source>Create new files with system default permissions, instead of umask 077 (only effective with disabled wallet functionality)</source>
<translation type="unfinished"></translation>
</message>
@@ -3966,11 +3850,6 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
</message>
<message>
<location line="+3"/>
- <source>Execute command when a network tx respends wallet tx input (%s=respend TxID, %t=wallet TxID)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+3"/>
<source>Execute command when a relevant alert is received or we see a really long fork (%s in cmd is replaced by message)</source>
<translation>Execute command when a relevant alert is received or we see a really long fork (%s in cmd is replaced by message)</translation>
</message>
@@ -3985,17 +3864,7 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+8"/>
- <source>If paytxfee is not set, include enough fee so transactions are confirmed on average within n blocks (default: 1)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+11"/>
- <source>Output debugging information (default: 0, supplying &lt;category&gt; is optional)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+2"/>
+ <location line="+23"/>
<source>Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect)</source>
<translation type="unfinished"></translation>
</message>
@@ -4015,22 +3884,12 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+16"/>
- <source>Whitelist peers connecting from the given netmask or ip. Can be specified multiple times.</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+3"/>
+ <location line="+19"/>
<source>Whitelisted peers cannot be DoS banned and their transactions are always relayed, even if they are already in the mempool, useful e.g. for a gateway</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+10"/>
- <source>Always query for peer addresses via DNS lookup (default: 0)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+6"/>
+ <location line="+17"/>
<source>Cannot resolve -whitebind address: &apos;%s&apos;</source>
<translation type="unfinished"></translation>
</message>
@@ -4065,12 +3924,7 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+7"/>
- <source>Include IP addresses in debug output (default: 0)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+2"/>
+ <location line="+9"/>
<source>Information</source>
<translation>Information</translation>
</message>
@@ -4110,32 +3964,7 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+1"/>
- <source>Limit size of signature cache to &lt;n&gt; entries (default: 50000)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+5"/>
- <source>Log transaction priority and fee per kB when mining blocks (default: 0)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+1"/>
- <source>Maintain a full transaction index (default: 0)</source>
- <translation>Maintain a full transaction index (default: 0)</translation>
- </message>
- <message>
- <location line="+2"/>
- <source>Maximum per-connection receive buffer, &lt;n&gt;*1000 bytes (default: 5000)</source>
- <translation>Maximum per-connection receive buffer, &lt;n&gt;*1000 bytes (default: 5000)</translation>
- </message>
- <message>
- <location line="+1"/>
- <source>Maximum per-connection send buffer, &lt;n&gt;*1000 bytes (default: 1000)</source>
- <translation>Maximum per-connection send buffer, &lt;n&gt;*1000 bytes (default: 1000)</translation>
- </message>
- <message>
- <location line="+1"/>
+ <location line="+10"/>
<source>Need to specify a port with -whitebind: &apos;%s&apos;</source>
<translation type="unfinished"></translation>
</message>
@@ -4145,22 +3974,12 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+2"/>
- <source>Only accept block chain matching built-in checkpoints (default: 1)</source>
- <translation>Only accept block chain matching built-in checkpoints (default: 1)</translation>
- </message>
- <message>
- <location line="+5"/>
+ <location line="+7"/>
<source>Print block on startup, if found in block index</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+1"/>
- <source>Print block tree on startup (default: 0)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+1"/>
+ <location line="+2"/>
<source>RPC SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source>
<translation type="unfinished"></translation>
</message>
@@ -4180,37 +3999,12 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+2"/>
- <source>Relay and mine data carrier transactions (default: 1)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+1"/>
- <source>Relay non-P2SH multisig (default: 1)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+3"/>
- <source>Run a thread to flush wallet periodically (default: 1)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+2"/>
+ <location line="+8"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>Send trace/debug info to console instead of debug.log file</translation>
</message>
<message>
- <location line="+6"/>
- <source>Set minimum block size in bytes (default: 0)</source>
- <translation>Set minimum block size in bytes (default: 0)</translation>
- </message>
- <message>
- <location line="+2"/>
- <source>Sets the DB_PRIVATE flag in the wallet db environment (default: 1)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+1"/>
+ <location line="+9"/>
<source>Show all debugging options (usage: --help -help-debug)</source>
<translation type="unfinished"></translation>
</message>
@@ -4225,12 +4019,7 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
<translation>Signing transaction failed</translation>
</message>
<message>
- <location line="+2"/>
- <source>Specify connection timeout in milliseconds (default: 5000)</source>
- <translation>Specify connection timeout in milliseconds (default: 5000)</translation>
- </message>
- <message>
- <location line="+8"/>
+ <location line="+10"/>
<source>This is experimental software.</source>
<translation type="unfinished"></translation>
</message>
@@ -4255,12 +4044,7 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+4"/>
- <source>Use UPnP to map the listening port (default: 0)</source>
- <translation>Use UPnP to map the listening port (default: 0)</translation>
- </message>
- <message>
- <location line="+1"/>
+ <location line="+5"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation>Use UPnP to map the listening port (default: 1 when listening)</translation>
</message>
@@ -4315,22 +4099,17 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
<translation>Password for JSON-RPC connections</translation>
</message>
<message>
- <location line="-155"/>
+ <location line="-157"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>Execute command when the best block changes (%s in cmd is replaced by block hash)</translation>
</message>
<message>
- <location line="+200"/>
+ <location line="+202"/>
<source>Upgrade wallet to latest format</source>
<translation>Upgrade wallet to latest format</translation>
</message>
<message>
- <location line="-26"/>
- <source>Set key pool size to &lt;n&gt; (default: 100)</source>
- <translation>Set key pool size to &lt;n&gt; (default: 100)</translation>
- </message>
- <message>
- <location line="-8"/>
+ <location line="-34"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>Rescan the block chain for missing wallet transactions</translation>
</message>
@@ -4340,52 +4119,252 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
<translation>Use OpenSSL (https) for JSON-RPC connections</translation>
</message>
<message>
- <location line="-30"/>
- <source>Server certificate file (default: server.cert)</source>
- <translation>Server certificate file (default: server.cert)</translation>
- </message>
- <message>
- <location line="+1"/>
- <source>Server private key (default: server.pem)</source>
- <translation>Server private key (default: server.pem)</translation>
- </message>
- <message>
- <location line="+18"/>
+ <location line="-11"/>
<source>This help message</source>
<translation>This help message</translation>
</message>
<message>
- <location line="-108"/>
+ <location line="-107"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>Allow DNS lookups for -addnode, -seednode and -connect</translation>
</message>
<message>
- <location line="+59"/>
+ <location line="+60"/>
<source>Loading addresses...</source>
<translation>Loading addresses...</translation>
</message>
<message>
- <location line="-33"/>
+ <location line="-34"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>Error loading wallet.dat: Wallet corrupted</translation>
</message>
<message>
- <location line="-1"/>
+ <location line="-167"/>
+ <source>(1 = keep tx meta data e.g. account owner and payment request information, 2 = drop tx meta data)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+65"/>
+ <source>Flush database activity from memory pool to disk log every &lt;n&gt; megabytes (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+3"/>
+ <source>How thorough the block verification of -checkblocks is (0-4, default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+2"/>
+ <source>If paytxfee is not set, include enough fee so transactions are confirmed on average within n blocks (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+6"/>
+ <source>Log transaction priority and fee per kB when mining blocks (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+2"/>
+ <source>Maintain a full transaction index, used by the getrawtransaction rpc call (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+3"/>
+ <source>Number of seconds to keep misbehaving peers from reconnecting (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+2"/>
+ <source>Output debugging information (default: %u, supplying &lt;category&gt; is optional)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+10"/>
+ <source>Set the processor limit for when generation is on (-1 = unlimited, default: %d)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+13"/>
+ <source>Use separate SOCKS5 proxy to reach peers via Tor hidden services (default: %s)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+28"/>
+ <source>(default: %s)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+5"/>
+ <source>Acceptable ciphers (default: %s)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+3"/>
+ <source>Always query for peer addresses via DNS lookup (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+16"/>
+ <source>Disable safemode, override a real safe mode event (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+8"/>
<source>Error loading wallet.dat</source>
<translation>Error loading wallet.dat</translation>
</message>
<message>
- <location line="+23"/>
+ <location line="+11"/>
+ <source>Force safe mode (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+1"/>
+ <source>Generate coins (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+1"/>
+ <source>How many blocks to check at startup (default: %u, 0 = all)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+4"/>
+ <source>Include IP addresses in debug output (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+6"/>
<source>Invalid -proxy address: &apos;%s&apos;</source>
<translation>Invalid -proxy address: &apos;%s&apos;</translation>
</message>
<message>
- <location line="+69"/>
+ <location line="+9"/>
+ <source>Limit size of signature cache to &lt;n&gt; entries (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+1"/>
+ <source>Listen for JSON-RPC connections on &lt;port&gt; (default: %u or testnet: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+1"/>
+ <source>Listen for connections on &lt;port&gt; (default: %u or testnet: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+4"/>
+ <source>Maintain at most &lt;n&gt; connections to peers (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+1"/>
+ <source>Maximum per-connection receive buffer, &lt;n&gt;*1000 bytes (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+1"/>
+ <source>Maximum per-connection send buffer, &lt;n&gt;*1000 bytes (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+4"/>
+ <source>Only accept block chain matching built-in checkpoints (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+4"/>
+ <source>Prepend debug output with timestamp (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+2"/>
+ <source>Print block tree on startup (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+6"/>
+ <source>Relay and mine data carrier transactions (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+1"/>
+ <source>Relay non-P2SH multisig (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+3"/>
+ <source>Run a thread to flush wallet periodically (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+3"/>
+ <source>Server certificate file (default: %s)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+1"/>
+ <source>Server private key (default: %s)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+2"/>
+ <source>Set key pool size to &lt;n&gt; (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+2"/>
+ <source>Set minimum block size in bytes (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+1"/>
+ <source>Set the number of threads to service RPC calls (default: %d)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+1"/>
+ <source>Sets the DB_PRIVATE flag in the wallet db environment (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+4"/>
+ <source>Specify configuration file (default: %s)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+1"/>
+ <source>Specify connection timeout in milliseconds (minimum: 1, default: %d)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+2"/>
+ <source>Specify pid file (default: %s)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+3"/>
+ <source>Spend unconfirmed change when sending transactions (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+1"/>
+ <source>Stop running after importing blocks from disk (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+4"/>
+ <source>Threshold for disconnecting misbehaving peers (default: %u)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+6"/>
<source>Unknown network specified in -onlynet: &apos;%s&apos;</source>
<translation>Unknown network specified in -onlynet: &apos;%s&apos;</translation>
</message>
<message>
- <location line="-112"/>
+ <location line="-111"/>
<source>Cannot resolve -bind address: &apos;%s&apos;</source>
<translation>Cannot resolve -bind address: &apos;%s&apos;</translation>
</message>
@@ -4410,22 +4389,22 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
<translation>Insufficient funds</translation>
</message>
<message>
- <location line="+14"/>
+ <location line="+15"/>
<source>Loading block index...</source>
<translation>Loading block index...</translation>
</message>
<message>
- <location line="-61"/>
+ <location line="-62"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>Add a node to connect to and attempt to keep the connection open</translation>
</message>
<message>
- <location line="+62"/>
+ <location line="+63"/>
<source>Loading wallet...</source>
<translation>Loading wallet...</translation>
</message>
<message>
- <location line="-57"/>
+ <location line="-58"/>
<source>Cannot downgrade wallet</source>
<translation>Cannot downgrade wallet</translation>
</message>
@@ -4435,22 +4414,22 @@ for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.
<translation>Cannot write default address</translation>
</message>
<message>
- <location line="+77"/>
+ <location line="+76"/>
<source>Rescanning...</source>
<translation>Rescanning...</translation>
</message>
<message>
- <location line="-64"/>
+ <location line="-63"/>
<source>Done loading</source>
<translation>Done loading</translation>
</message>
<message>
- <location line="+91"/>
+ <location line="+90"/>
<source>To use the %s option</source>
<translation>To use the %s option</translation>
</message>
<message>
- <location line="-83"/>
+ <location line="-82"/>
<source>Error</source>
<translation>Error</translation>
</message>
diff --git a/src/qt/monitoreddatamapper.cpp b/src/qt/monitoreddatamapper.cpp
deleted file mode 100644
index 5931c53872..0000000000
--- a/src/qt/monitoreddatamapper.cpp
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (c) 2011-2013 The Bitcoin developers
-// Distributed under the MIT/X11 software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#include "monitoreddatamapper.h"
-
-#include <QMetaObject>
-#include <QMetaProperty>
-#include <QWidget>
-
-MonitoredDataMapper::MonitoredDataMapper(QObject *parent) :
- QDataWidgetMapper(parent)
-{
-}
-
-void MonitoredDataMapper::addMapping(QWidget *widget, int section)
-{
- QDataWidgetMapper::addMapping(widget, section);
- addChangeMonitor(widget);
-}
-
-void MonitoredDataMapper::addMapping(QWidget *widget, int section, const QByteArray &propertyName)
-{
- QDataWidgetMapper::addMapping(widget, section, propertyName);
- addChangeMonitor(widget);
-}
-
-void MonitoredDataMapper::addChangeMonitor(QWidget *widget)
-{
- // Watch user property of widget for changes, and connect
- // the signal to our viewModified signal.
- QMetaProperty prop = widget->metaObject()->userProperty();
- int signal = prop.notifySignalIndex();
- int method = this->metaObject()->indexOfMethod("viewModified()");
- if(signal != -1 && method != -1)
- {
- QMetaObject::connect(widget, signal, this, method);
- }
-}
diff --git a/src/qt/monitoreddatamapper.h b/src/qt/monitoreddatamapper.h
deleted file mode 100644
index b3237d3e09..0000000000
--- a/src/qt/monitoreddatamapper.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright (c) 2011-2013 The Bitcoin developers
-// Distributed under the MIT/X11 software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#ifndef MONITOREDDATAMAPPER_H
-#define MONITOREDDATAMAPPER_H
-
-#include <QDataWidgetMapper>
-
-QT_BEGIN_NAMESPACE
-class QWidget;
-QT_END_NAMESPACE
-
-/** Data to Widget mapper that watches for edits and notifies listeners when a field is edited.
- This can be used, for example, to enable a commit/apply button in a configuration dialog.
- */
-class MonitoredDataMapper : public QDataWidgetMapper
-{
- Q_OBJECT
-
-public:
- explicit MonitoredDataMapper(QObject *parent=0);
-
- void addMapping(QWidget *widget, int section);
- void addMapping(QWidget *widget, int section, const QByteArray &propertyName);
-
-private:
- void addChangeMonitor(QWidget *widget);
-
-signals:
- void viewModified();
-};
-
-#endif // MONITOREDDATAMAPPER_H
diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp
index 279467129f..67be174d55 100644
--- a/src/qt/optionsdialog.cpp
+++ b/src/qt/optionsdialog.cpp
@@ -11,7 +11,6 @@
#include "bitcoinunits.h"
#include "guiutil.h"
-#include "monitoreddatamapper.h"
#include "optionsmodel.h"
#include "main.h" // for MAX_SCRIPTCHECK_THREADS
@@ -24,6 +23,7 @@
#include <boost/thread.hpp>
+#include <QDataWidgetMapper>
#include <QDir>
#include <QIntValidator>
#include <QLocale>
@@ -105,7 +105,7 @@ OptionsDialog::OptionsDialog(QWidget *parent) :
#endif
/* Widget-to-option mapper */
- mapper = new MonitoredDataMapper(this);
+ mapper = new QDataWidgetMapper(this);
mapper->setSubmitPolicy(QDataWidgetMapper::ManualSubmit);
mapper->setOrientation(Qt::Vertical);
diff --git a/src/qt/optionsdialog.h b/src/qt/optionsdialog.h
index 6b62069660..39c53f4391 100644
--- a/src/qt/optionsdialog.h
+++ b/src/qt/optionsdialog.h
@@ -7,7 +7,7 @@
#include <QDialog>
-class MonitoredDataMapper;
+class QDataWidgetMapper;
class OptionsModel;
class QValidatedLineEdit;
@@ -52,7 +52,7 @@ signals:
private:
Ui::OptionsDialog *ui;
OptionsModel *model;
- MonitoredDataMapper *mapper;
+ QDataWidgetMapper *mapper;
bool fProxyIpValid;
};
diff --git a/src/qt/recentrequeststablemodel.cpp b/src/qt/recentrequeststablemodel.cpp
index 0e5802922c..5deac8007c 100644
--- a/src/qt/recentrequeststablemodel.cpp
+++ b/src/qt/recentrequeststablemodel.cpp
@@ -7,6 +7,7 @@
#include "bitcoinunits.h"
#include "guiutil.h"
#include "optionsmodel.h"
+#include "streams.h"
#include <boost/foreach.hpp>
diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp
index 8129353d4b..2d2d448b49 100644
--- a/src/qt/rpcconsole.cpp
+++ b/src/qt/rpcconsole.cpp
@@ -611,7 +611,6 @@ void RPCConsole::updateNodeDetail(const CNodeCombinedStats *stats)
ui->peerSubversion->setText(QString::fromStdString(stats->nodeStats.cleanSubVer));
ui->peerDirection->setText(stats->nodeStats.fInbound ? tr("Inbound") : tr("Outbound"));
ui->peerHeight->setText(QString("%1").arg(stats->nodeStats.nStartingHeight));
- ui->peerSyncNode->setText(stats->nodeStats.fSyncNode ? tr("Yes") : tr("No"));
// This check fails for example if the lock was busy and
// nodeStateStats couldn't be fetched.
diff --git a/src/qt/test/paymentservertests.cpp b/src/qt/test/paymentservertests.cpp
index 5d7fe96285..84cab01c50 100644
--- a/src/qt/test/paymentservertests.cpp
+++ b/src/qt/test/paymentservertests.cpp
@@ -8,6 +8,7 @@
#include "paymentrequestdata.h"
#include "util.h"
+#include "utilstrencodings.h"
#include <openssl/x509.h>
#include <openssl/x509_vfy.h>
diff --git a/src/rpcblockchain.cpp b/src/rpcblockchain.cpp
index 24175215bf..78f5569895 100644
--- a/src/rpcblockchain.cpp
+++ b/src/rpcblockchain.cpp
@@ -225,7 +225,7 @@ Value getblockhash(const Array& params, bool fHelp)
int nHeight = params[0].get_int();
if (nHeight < 0 || nHeight > chainActive.Height())
- throw runtime_error("Block number out of range.");
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Block height out of range");
CBlockIndex* pblockindex = chainActive[nHeight];
return pblockindex->GetBlockHash().GetHex();
@@ -319,6 +319,7 @@ Value gettxoutsetinfo(const Array& params, bool fHelp)
Object ret;
CCoinsStats stats;
+ pcoinsTip->Flush();
if (pcoinsTip->GetStats(stats)) {
ret.push_back(Pair("height", (int64_t)stats.nHeight));
ret.push_back(Pair("bestblock", stats.hashBlock.GetHex()));
@@ -445,6 +446,7 @@ Value getblockchaininfo(const Array& params, bool fHelp)
"{\n"
" \"chain\": \"xxxx\", (string) current network name as defined in BIP70 (main, test, regtest)\n"
" \"blocks\": xxxxxx, (numeric) the current number of blocks processed in the server\n"
+ " \"headers\": xxxxxx, (numeric) the current number of headers we have validated\n"
" \"bestblockhash\": \"...\", (string) the hash of the currently best block\n"
" \"difficulty\": xxxxxx, (numeric) the current difficulty\n"
" \"verificationprogress\": xxxx, (numeric) estimate of verification progress [0..1]\n"
@@ -458,6 +460,7 @@ Value getblockchaininfo(const Array& params, bool fHelp)
Object obj;
obj.push_back(Pair("chain", Params().NetworkIDString()));
obj.push_back(Pair("blocks", (int)chainActive.Height()));
+ obj.push_back(Pair("headers", pindexBestHeader ? pindexBestHeader->nHeight : -1));
obj.push_back(Pair("bestblockhash", chainActive.Tip()->GetBlockHash().GetHex()));
obj.push_back(Pair("difficulty", (double)GetDifficulty()));
obj.push_back(Pair("verificationprogress", Checkpoints::GuessVerificationProgress(chainActive.Tip())));
diff --git a/src/rpcmisc.cpp b/src/rpcmisc.cpp
index 8be14b567c..92ed1c3e2b 100644
--- a/src/rpcmisc.cpp
+++ b/src/rpcmisc.cpp
@@ -292,7 +292,7 @@ Value createmultisig(const Array& params, bool fHelp)
// Construct using pay-to-script-hash:
CScript inner = _createmultisig_redeemScript(params);
- CScriptID innerID = inner.GetID();
+ CScriptID innerID(inner);
CBitcoinAddress address(innerID);
Object result;
diff --git a/src/rpcnet.cpp b/src/rpcnet.cpp
index bc19d1372a..12dcd5b540 100644
--- a/src/rpcnet.cpp
+++ b/src/rpcnet.cpp
@@ -97,7 +97,12 @@ Value getpeerinfo(const Array& params, bool fHelp)
" \"inbound\": true|false, (boolean) Inbound (true) or Outbound (false)\n"
" \"startingheight\": n, (numeric) The starting height (block) of the peer\n"
" \"banscore\": n, (numeric) The ban score\n"
- " \"syncnode\": true|false (boolean) if sync node\n"
+ " \"synced_headers\": n, (numeric) The last header we have in common with this peer\n"
+ " \"synced_blocks\": n, (numeric) The last block we have in common with this peer\n"
+ " \"inflight\": [\n"
+ " n, (numeric) The heights of blocks we're currently asking from this peer\n"
+ " ...\n"
+ " ]\n"
" }\n"
" ,...\n"
"]\n"
@@ -137,9 +142,14 @@ Value getpeerinfo(const Array& params, bool fHelp)
obj.push_back(Pair("startingheight", stats.nStartingHeight));
if (fStateStats) {
obj.push_back(Pair("banscore", statestats.nMisbehavior));
- obj.push_back(Pair("syncheight", statestats.nSyncHeight));
+ obj.push_back(Pair("synced_headers", statestats.nSyncHeight));
+ obj.push_back(Pair("synced_blocks", statestats.nCommonHeight));
+ Array heights;
+ BOOST_FOREACH(int height, statestats.vHeightInFlight) {
+ heights.push_back(height);
+ }
+ obj.push_back(Pair("inflight", heights));
}
- obj.push_back(Pair("syncnode", stats.fSyncNode));
obj.push_back(Pair("whitelisted", stats.fWhitelisted));
ret.push_back(obj);
diff --git a/src/rpcrawtransaction.cpp b/src/rpcrawtransaction.cpp
index 78372da685..fdfcb59eeb 100644
--- a/src/rpcrawtransaction.cpp
+++ b/src/rpcrawtransaction.cpp
@@ -480,7 +480,7 @@ Value decodescript(const Array& params, bool fHelp)
}
ScriptPubKeyToJSON(script, r, false);
- r.push_back(Pair("p2sh", CBitcoinAddress(script.GetID()).ToString()));
+ r.push_back(Pair("p2sh", CBitcoinAddress(CScriptID(script)).ToString()));
return r;
}
diff --git a/src/rpcserver.cpp b/src/rpcserver.cpp
index 1a41344da5..9668c78831 100644
--- a/src/rpcserver.cpp
+++ b/src/rpcserver.cpp
@@ -581,7 +581,7 @@ void StartRPCThreads()
strWhatAmI,
GetConfigFile().string(),
EncodeBase58(&rand_pwd[0],&rand_pwd[0]+32)),
- "", CClientUIInterface::MSG_ERROR);
+ "", CClientUIInterface::MSG_ERROR | CClientUIInterface::SECURE);
StartShutdown();
return;
}
diff --git a/src/rpcwallet.cpp b/src/rpcwallet.cpp
index d11455e389..68bb4068b8 100644
--- a/src/rpcwallet.cpp
+++ b/src/rpcwallet.cpp
@@ -918,7 +918,7 @@ Value addmultisigaddress(const Array& params, bool fHelp)
// Construct using pay-to-script-hash:
CScript inner = _createmultisig_redeemScript(params);
- CScriptID innerID = inner.GetID();
+ CScriptID innerID(inner);
pwalletMain->AddCScript(inner);
pwalletMain->SetAddressBook(innerID, strAccount, "send");
diff --git a/src/script/compressor.cpp b/src/script/compressor.cpp
index 51a3cf6025..af1acf48db 100644
--- a/src/script/compressor.cpp
+++ b/src/script/compressor.cpp
@@ -5,6 +5,9 @@
#include "compressor.h"
+#include "key.h"
+#include "script/standard.h"
+
bool CScriptCompressor::IsToKeyID(CKeyID &hash) const
{
if (script.size() == 25 && script[0] == OP_DUP && script[1] == OP_HASH160
diff --git a/src/script/compressor.h b/src/script/compressor.h
index 53c6bf3ecc..154e0b2662 100644
--- a/src/script/compressor.h
+++ b/src/script/compressor.h
@@ -7,6 +7,11 @@
#define H_BITCOIN_SCRIPT_COMPRESSOR
#include "script/script.h"
+#include "serialize.h"
+
+class CKeyID;
+class CPubKey;
+class CScriptID;
/** Compact serializer for scripts.
*
diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp
index ae66217b7c..cd73b88210 100644
--- a/src/script/interpreter.cpp
+++ b/src/script/interpreter.cpp
@@ -9,6 +9,7 @@
#include "crypto/ripemd160.h"
#include "crypto/sha1.h"
#include "crypto/sha2.h"
+#include "key.h"
#include "script/script.h"
#include "uint256.h"
#include "util.h"
diff --git a/src/script/script.cpp b/src/script/script.cpp
index a5126e7cc2..3e19d0c2bf 100644
--- a/src/script/script.cpp
+++ b/src/script/script.cpp
@@ -5,7 +5,18 @@
#include "script.h"
-#include <boost/foreach.hpp>
+#include "tinyformat.h"
+#include "utilstrencodings.h"
+
+namespace {
+inline std::string ValueString(const std::vector<unsigned char>& vch)
+{
+ if (vch.size() <= 4)
+ return strprintf("%d", CScriptNum(vch).getint());
+ else
+ return HexStr(vch);
+}
+} // anon namespace
using namespace std;
@@ -253,3 +264,26 @@ bool CScript::HasCanonicalPushes() const
}
return true;
}
+
+std::string CScript::ToString() const
+{
+ std::string str;
+ opcodetype opcode;
+ std::vector<unsigned char> vch;
+ const_iterator pc = begin();
+ while (pc < end())
+ {
+ if (!str.empty())
+ str += " ";
+ if (!GetOp(pc, opcode, vch))
+ {
+ str += "[error]";
+ return str;
+ }
+ if (0 <= opcode && opcode <= OP_PUSHDATA4)
+ str += ValueString(vch);
+ else
+ str += GetOpName(opcode);
+ }
+ return str;
+}
diff --git a/src/script/script.h b/src/script/script.h
index caf176476f..d450db5cad 100644
--- a/src/script/script.h
+++ b/src/script/script.h
@@ -6,16 +6,23 @@
#ifndef H_BITCOIN_SCRIPT
#define H_BITCOIN_SCRIPT
-#include "key.h"
-#include "tinyformat.h"
-#include "utilstrencodings.h"
-
+#include <assert.h>
+#include <climits>
+#include <limits>
#include <stdexcept>
-
-#include <boost/variant.hpp>
+#include <stdint.h>
+#include <string.h>
+#include <string>
+#include <vector>
static const unsigned int MAX_SCRIPT_ELEMENT_SIZE = 520; // bytes
+template <typename T>
+std::vector<unsigned char> ToByteVector(const T& in)
+{
+ return std::vector<unsigned char>(in.begin(), in.end());
+}
+
/** Script opcodes */
enum opcodetype
{
@@ -312,13 +319,6 @@ private:
int64_t m_value;
};
-inline std::string ValueString(const std::vector<unsigned char>& vch)
-{
- if (vch.size() <= 4)
- return strprintf("%d", CScriptNum(vch).getint());
- else
- return HexStr(vch);
-}
/** Serialized script, used inside transaction inputs and outputs */
class CScript : public std::vector<unsigned char>
@@ -358,7 +358,6 @@ public:
CScript(int64_t b) { operator<<(b); }
explicit CScript(opcodetype b) { operator<<(b); }
- explicit CScript(const uint256& b) { operator<<(b); }
explicit CScript(const CScriptNum& b) { operator<<(b); }
explicit CScript(const std::vector<unsigned char>& b) { operator<<(b); }
@@ -373,28 +372,6 @@ public:
return *this;
}
- CScript& operator<<(const uint160& b)
- {
- insert(end(), sizeof(b));
- insert(end(), (unsigned char*)&b, (unsigned char*)&b + sizeof(b));
- return *this;
- }
-
- CScript& operator<<(const uint256& b)
- {
- insert(end(), sizeof(b));
- insert(end(), (unsigned char*)&b, (unsigned char*)&b + sizeof(b));
- return *this;
- }
-
- CScript& operator<<(const CPubKey& key)
- {
- assert(key.size() < OP_PUSHDATA1);
- insert(end(), (unsigned char)key.size());
- insert(end(), key.begin(), key.end());
- return *this;
- }
-
CScript& operator<<(const CScriptNum& b)
{
*this << b.getvch();
@@ -588,34 +565,7 @@ public:
return (size() > 0 && *begin() == OP_RETURN);
}
- std::string ToString() const
- {
- std::string str;
- opcodetype opcode;
- std::vector<unsigned char> vch;
- const_iterator pc = begin();
- while (pc < end())
- {
- if (!str.empty())
- str += " ";
- if (!GetOp(pc, opcode, vch))
- {
- str += "[error]";
- return str;
- }
- if (0 <= opcode && opcode <= OP_PUSHDATA4)
- str += ValueString(vch);
- else
- str += GetOpName(opcode);
- }
- return str;
- }
-
- CScriptID GetID() const
- {
- return CScriptID(Hash160(*this));
- }
-
+ std::string ToString() const;
void clear()
{
// The default std::vector::clear() does not release memory.
diff --git a/src/script/sign.cpp b/src/script/sign.cpp
index da77e7d1f1..bf98c40394 100644
--- a/src/script/sign.cpp
+++ b/src/script/sign.cpp
@@ -78,7 +78,7 @@ bool Solver(const CKeyStore& keystore, const CScript& scriptPubKey, uint256 hash
{
CPubKey vch;
keystore.GetPubKey(keyID, vch);
- scriptSigRet << vch;
+ scriptSigRet << ToByteVector(vch);
}
return true;
case TX_SCRIPTHASH:
diff --git a/src/script/standard.cpp b/src/script/standard.cpp
index 53ae254d59..05938961bc 100644
--- a/src/script/standard.cpp
+++ b/src/script/standard.cpp
@@ -7,6 +7,7 @@
#include "script/script.h"
#include "util.h"
+#include "utilstrencodings.h"
#include <boost/foreach.hpp>
@@ -14,6 +15,8 @@ using namespace std;
typedef vector<unsigned char> valtype;
+CScriptID::CScriptID(const CScript& in) : uint160(in.size() ? Hash160(in.begin(), in.end()) : 0) {}
+
const char* GetTxnOutputType(txnouttype t)
{
switch (t)
@@ -280,13 +283,13 @@ public:
bool operator()(const CKeyID &keyID) const {
script->clear();
- *script << OP_DUP << OP_HASH160 << keyID << OP_EQUALVERIFY << OP_CHECKSIG;
+ *script << OP_DUP << OP_HASH160 << ToByteVector(keyID) << OP_EQUALVERIFY << OP_CHECKSIG;
return true;
}
bool operator()(const CScriptID &scriptID) const {
script->clear();
- *script << OP_HASH160 << scriptID << OP_EQUAL;
+ *script << OP_HASH160 << ToByteVector(scriptID) << OP_EQUAL;
return true;
}
};
@@ -306,7 +309,7 @@ CScript GetScriptForMultisig(int nRequired, const std::vector<CPubKey>& keys)
script << CScript::EncodeOP_N(nRequired);
BOOST_FOREACH(const CPubKey& key, keys)
- script << key;
+ script << ToByteVector(key);
script << CScript::EncodeOP_N(keys.size()) << OP_CHECKMULTISIG;
return script;
}
diff --git a/src/script/standard.h b/src/script/standard.h
index ead79b82a2..961b214c89 100644
--- a/src/script/standard.h
+++ b/src/script/standard.h
@@ -6,13 +6,25 @@
#ifndef H_BITCOIN_SCRIPT_STANDARD
#define H_BITCOIN_SCRIPT_STANDARD
+#include "key.h"
#include "script/script.h"
#include "script/interpreter.h"
+#include <boost/variant.hpp>
+
#include <stdint.h>
class CScript;
+/** A reference to a CScript: the Hash160 of its serialization (see script.h) */
+class CScriptID : public uint160
+{
+public:
+ CScriptID() : uint160(0) {}
+ CScriptID(const CScript& in);
+ CScriptID(const uint160& in) : uint160(in) {}
+};
+
static const unsigned int MAX_OP_RETURN_RELAY = 40; // bytes
// Mandatory script verification flags that all new blocks must comply with for
diff --git a/src/serialize.h b/src/serialize.h
index ff11edc06c..877ef8640a 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -6,8 +6,6 @@
#ifndef BITCOIN_SERIALIZE_H
#define BITCOIN_SERIALIZE_H
-#include "allocators.h"
-
#include <algorithm>
#include <assert.h>
#include <ios>
@@ -20,11 +18,6 @@
#include <utility>
#include <vector>
-#include <boost/tuple/tuple.hpp>
-#include <boost/type_traits/is_fundamental.hpp>
-
-class CAutoFile;
-class CDataStream;
class CScript;
static const unsigned int MAX_SIZE = 0x02000000;
@@ -432,14 +425,15 @@ template<typename Stream, typename C> void Serialize(Stream& os, const std::basi
template<typename Stream, typename C> void Unserialize(Stream& is, std::basic_string<C>& str, int, int=0);
// vector
-template<typename T, typename A> unsigned int GetSerializeSize_impl(const std::vector<T, A>& v, int nType, int nVersion, const boost::true_type&);
-template<typename T, typename A> unsigned int GetSerializeSize_impl(const std::vector<T, A>& v, int nType, int nVersion, const boost::false_type&);
+// vectors of unsigned char are a special case and are intended to be serialized as a single opaque blob.
+template<typename T, typename A> unsigned int GetSerializeSize_impl(const std::vector<T, A>& v, int nType, int nVersion, const unsigned char&);
+template<typename T, typename A, typename V> unsigned int GetSerializeSize_impl(const std::vector<T, A>& v, int nType, int nVersion, const V&);
template<typename T, typename A> inline unsigned int GetSerializeSize(const std::vector<T, A>& v, int nType, int nVersion);
-template<typename Stream, typename T, typename A> void Serialize_impl(Stream& os, const std::vector<T, A>& v, int nType, int nVersion, const boost::true_type&);
-template<typename Stream, typename T, typename A> void Serialize_impl(Stream& os, const std::vector<T, A>& v, int nType, int nVersion, const boost::false_type&);
+template<typename Stream, typename T, typename A> void Serialize_impl(Stream& os, const std::vector<T, A>& v, int nType, int nVersion, const unsigned char&);
+template<typename Stream, typename T, typename A, typename V> void Serialize_impl(Stream& os, const std::vector<T, A>& v, int nType, int nVersion, const V&);
template<typename Stream, typename T, typename A> inline void Serialize(Stream& os, const std::vector<T, A>& v, int nType, int nVersion);
-template<typename Stream, typename T, typename A> void Unserialize_impl(Stream& is, std::vector<T, A>& v, int nType, int nVersion, const boost::true_type&);
-template<typename Stream, typename T, typename A> void Unserialize_impl(Stream& is, std::vector<T, A>& v, int nType, int nVersion, const boost::false_type&);
+template<typename Stream, typename T, typename A> void Unserialize_impl(Stream& is, std::vector<T, A>& v, int nType, int nVersion, const unsigned char&);
+template<typename Stream, typename T, typename A, typename V> void Unserialize_impl(Stream& is, std::vector<T, A>& v, int nType, int nVersion, const V&);
template<typename Stream, typename T, typename A> inline void Unserialize(Stream& is, std::vector<T, A>& v, int nType, int nVersion);
// others derived from vector
@@ -452,16 +446,6 @@ template<typename K, typename T> unsigned int GetSerializeSize(const std::pair<K
template<typename Stream, typename K, typename T> void Serialize(Stream& os, const std::pair<K, T>& item, int nType, int nVersion);
template<typename Stream, typename K, typename T> void Unserialize(Stream& is, std::pair<K, T>& item, int nType, int nVersion);
-// 3 tuple
-template<typename T0, typename T1, typename T2> unsigned int GetSerializeSize(const boost::tuple<T0, T1, T2>& item, int nType, int nVersion);
-template<typename Stream, typename T0, typename T1, typename T2> void Serialize(Stream& os, const boost::tuple<T0, T1, T2>& item, int nType, int nVersion);
-template<typename Stream, typename T0, typename T1, typename T2> void Unserialize(Stream& is, boost::tuple<T0, T1, T2>& item, int nType, int nVersion);
-
-// 4 tuple
-template<typename T0, typename T1, typename T2, typename T3> unsigned int GetSerializeSize(const boost::tuple<T0, T1, T2, T3>& item, int nType, int nVersion);
-template<typename Stream, typename T0, typename T1, typename T2, typename T3> void Serialize(Stream& os, const boost::tuple<T0, T1, T2, T3>& item, int nType, int nVersion);
-template<typename Stream, typename T0, typename T1, typename T2, typename T3> void Unserialize(Stream& is, boost::tuple<T0, T1, T2, T3>& item, int nType, int nVersion);
-
// map
template<typename K, typename T, typename Pred, typename A> unsigned int GetSerializeSize(const std::map<K, T, Pred, A>& m, int nType, int nVersion);
template<typename Stream, typename K, typename T, typename Pred, typename A> void Serialize(Stream& os, const std::map<K, T, Pred, A>& m, int nType, int nVersion);
@@ -536,13 +520,13 @@ void Unserialize(Stream& is, std::basic_string<C>& str, int, int)
// vector
//
template<typename T, typename A>
-unsigned int GetSerializeSize_impl(const std::vector<T, A>& v, int nType, int nVersion, const boost::true_type&)
+unsigned int GetSerializeSize_impl(const std::vector<T, A>& v, int nType, int nVersion, const unsigned char&)
{
return (GetSizeOfCompactSize(v.size()) + v.size() * sizeof(T));
}
-template<typename T, typename A>
-unsigned int GetSerializeSize_impl(const std::vector<T, A>& v, int nType, int nVersion, const boost::false_type&)
+template<typename T, typename A, typename V>
+unsigned int GetSerializeSize_impl(const std::vector<T, A>& v, int nType, int nVersion, const V&)
{
unsigned int nSize = GetSizeOfCompactSize(v.size());
for (typename std::vector<T, A>::const_iterator vi = v.begin(); vi != v.end(); ++vi)
@@ -553,20 +537,20 @@ unsigned int GetSerializeSize_impl(const std::vector<T, A>& v, int nType, int nV
template<typename T, typename A>
inline unsigned int GetSerializeSize(const std::vector<T, A>& v, int nType, int nVersion)
{
- return GetSerializeSize_impl(v, nType, nVersion, boost::is_fundamental<T>());
+ return GetSerializeSize_impl(v, nType, nVersion, T());
}
template<typename Stream, typename T, typename A>
-void Serialize_impl(Stream& os, const std::vector<T, A>& v, int nType, int nVersion, const boost::true_type&)
+void Serialize_impl(Stream& os, const std::vector<T, A>& v, int nType, int nVersion, const unsigned char&)
{
WriteCompactSize(os, v.size());
if (!v.empty())
os.write((char*)&v[0], v.size() * sizeof(T));
}
-template<typename Stream, typename T, typename A>
-void Serialize_impl(Stream& os, const std::vector<T, A>& v, int nType, int nVersion, const boost::false_type&)
+template<typename Stream, typename T, typename A, typename V>
+void Serialize_impl(Stream& os, const std::vector<T, A>& v, int nType, int nVersion, const V&)
{
WriteCompactSize(os, v.size());
for (typename std::vector<T, A>::const_iterator vi = v.begin(); vi != v.end(); ++vi)
@@ -576,12 +560,12 @@ void Serialize_impl(Stream& os, const std::vector<T, A>& v, int nType, int nVers
template<typename Stream, typename T, typename A>
inline void Serialize(Stream& os, const std::vector<T, A>& v, int nType, int nVersion)
{
- Serialize_impl(os, v, nType, nVersion, boost::is_fundamental<T>());
+ Serialize_impl(os, v, nType, nVersion, T());
}
template<typename Stream, typename T, typename A>
-void Unserialize_impl(Stream& is, std::vector<T, A>& v, int nType, int nVersion, const boost::true_type&)
+void Unserialize_impl(Stream& is, std::vector<T, A>& v, int nType, int nVersion, const unsigned char&)
{
// Limit size per read so bogus size value won't cause out of memory
v.clear();
@@ -596,8 +580,8 @@ void Unserialize_impl(Stream& is, std::vector<T, A>& v, int nType, int nVersion,
}
}
-template<typename Stream, typename T, typename A>
-void Unserialize_impl(Stream& is, std::vector<T, A>& v, int nType, int nVersion, const boost::false_type&)
+template<typename Stream, typename T, typename A, typename V>
+void Unserialize_impl(Stream& is, std::vector<T, A>& v, int nType, int nVersion, const V&)
{
v.clear();
unsigned int nSize = ReadCompactSize(is);
@@ -617,7 +601,7 @@ void Unserialize_impl(Stream& is, std::vector<T, A>& v, int nType, int nVersion,
template<typename Stream, typename T, typename A>
inline void Unserialize(Stream& is, std::vector<T, A>& v, int nType, int nVersion)
{
- Unserialize_impl(is, v, nType, nVersion, boost::is_fundamental<T>());
+ Unserialize_impl(is, v, nType, nVersion, T());
}
@@ -670,71 +654,6 @@ void Unserialize(Stream& is, std::pair<K, T>& item, int nType, int nVersion)
//
-// 3 tuple
-//
-template<typename T0, typename T1, typename T2>
-unsigned int GetSerializeSize(const boost::tuple<T0, T1, T2>& item, int nType, int nVersion)
-{
- unsigned int nSize = 0;
- nSize += GetSerializeSize(boost::get<0>(item), nType, nVersion);
- nSize += GetSerializeSize(boost::get<1>(item), nType, nVersion);
- nSize += GetSerializeSize(boost::get<2>(item), nType, nVersion);
- return nSize;
-}
-
-template<typename Stream, typename T0, typename T1, typename T2>
-void Serialize(Stream& os, const boost::tuple<T0, T1, T2>& item, int nType, int nVersion)
-{
- Serialize(os, boost::get<0>(item), nType, nVersion);
- Serialize(os, boost::get<1>(item), nType, nVersion);
- Serialize(os, boost::get<2>(item), nType, nVersion);
-}
-
-template<typename Stream, typename T0, typename T1, typename T2>
-void Unserialize(Stream& is, boost::tuple<T0, T1, T2>& item, int nType, int nVersion)
-{
- Unserialize(is, boost::get<0>(item), nType, nVersion);
- Unserialize(is, boost::get<1>(item), nType, nVersion);
- Unserialize(is, boost::get<2>(item), nType, nVersion);
-}
-
-
-
-//
-// 4 tuple
-//
-template<typename T0, typename T1, typename T2, typename T3>
-unsigned int GetSerializeSize(const boost::tuple<T0, T1, T2, T3>& item, int nType, int nVersion)
-{
- unsigned int nSize = 0;
- nSize += GetSerializeSize(boost::get<0>(item), nType, nVersion);
- nSize += GetSerializeSize(boost::get<1>(item), nType, nVersion);
- nSize += GetSerializeSize(boost::get<2>(item), nType, nVersion);
- nSize += GetSerializeSize(boost::get<3>(item), nType, nVersion);
- return nSize;
-}
-
-template<typename Stream, typename T0, typename T1, typename T2, typename T3>
-void Serialize(Stream& os, const boost::tuple<T0, T1, T2, T3>& item, int nType, int nVersion)
-{
- Serialize(os, boost::get<0>(item), nType, nVersion);
- Serialize(os, boost::get<1>(item), nType, nVersion);
- Serialize(os, boost::get<2>(item), nType, nVersion);
- Serialize(os, boost::get<3>(item), nType, nVersion);
-}
-
-template<typename Stream, typename T0, typename T1, typename T2, typename T3>
-void Unserialize(Stream& is, boost::tuple<T0, T1, T2, T3>& item, int nType, int nVersion)
-{
- Unserialize(is, boost::get<0>(item), nType, nVersion);
- Unserialize(is, boost::get<1>(item), nType, nVersion);
- Unserialize(is, boost::get<2>(item), nType, nVersion);
- Unserialize(is, boost::get<3>(item), nType, nVersion);
-}
-
-
-
-//
// map
//
template<typename K, typename T, typename Pred, typename A>
@@ -838,8 +757,6 @@ inline void SerReadWrite(Stream& s, T& obj, int nType, int nVersion, CSerActionU
-typedef std::vector<char, zero_after_free_allocator<char> > CSerializeData;
-
class CSizeComputer
{
protected:
@@ -869,544 +786,4 @@ public:
}
};
-/** Double ended buffer combining vector and stream-like interfaces.
- *
- * >> and << read and write unformatted data using the above serialization templates.
- * Fills with data in linear time; some stringstream implementations take N^2 time.
- */
-class CDataStream
-{
-protected:
- typedef CSerializeData vector_type;
- vector_type vch;
- unsigned int nReadPos;
-public:
- int nType;
- int nVersion;
-
- typedef vector_type::allocator_type allocator_type;
- typedef vector_type::size_type size_type;
- typedef vector_type::difference_type difference_type;
- typedef vector_type::reference reference;
- typedef vector_type::const_reference const_reference;
- typedef vector_type::value_type value_type;
- typedef vector_type::iterator iterator;
- typedef vector_type::const_iterator const_iterator;
- typedef vector_type::reverse_iterator reverse_iterator;
-
- explicit CDataStream(int nTypeIn, int nVersionIn)
- {
- Init(nTypeIn, nVersionIn);
- }
-
- CDataStream(const_iterator pbegin, const_iterator pend, int nTypeIn, int nVersionIn) : vch(pbegin, pend)
- {
- Init(nTypeIn, nVersionIn);
- }
-
-#if !defined(_MSC_VER) || _MSC_VER >= 1300
- CDataStream(const char* pbegin, const char* pend, int nTypeIn, int nVersionIn) : vch(pbegin, pend)
- {
- Init(nTypeIn, nVersionIn);
- }
-#endif
-
- CDataStream(const vector_type& vchIn, int nTypeIn, int nVersionIn) : vch(vchIn.begin(), vchIn.end())
- {
- Init(nTypeIn, nVersionIn);
- }
-
- CDataStream(const std::vector<char>& vchIn, int nTypeIn, int nVersionIn) : vch(vchIn.begin(), vchIn.end())
- {
- Init(nTypeIn, nVersionIn);
- }
-
- CDataStream(const std::vector<unsigned char>& vchIn, int nTypeIn, int nVersionIn) : vch(vchIn.begin(), vchIn.end())
- {
- Init(nTypeIn, nVersionIn);
- }
-
- void Init(int nTypeIn, int nVersionIn)
- {
- nReadPos = 0;
- nType = nTypeIn;
- nVersion = nVersionIn;
- }
-
- CDataStream& operator+=(const CDataStream& b)
- {
- vch.insert(vch.end(), b.begin(), b.end());
- return *this;
- }
-
- friend CDataStream operator+(const CDataStream& a, const CDataStream& b)
- {
- CDataStream ret = a;
- ret += b;
- return (ret);
- }
-
- std::string str() const
- {
- return (std::string(begin(), end()));
- }
-
-
- //
- // Vector subset
- //
- const_iterator begin() const { return vch.begin() + nReadPos; }
- iterator begin() { return vch.begin() + nReadPos; }
- const_iterator end() const { return vch.end(); }
- iterator end() { return vch.end(); }
- size_type size() const { return vch.size() - nReadPos; }
- bool empty() const { return vch.size() == nReadPos; }
- void resize(size_type n, value_type c=0) { vch.resize(n + nReadPos, c); }
- void reserve(size_type n) { vch.reserve(n + nReadPos); }
- const_reference operator[](size_type pos) const { return vch[pos + nReadPos]; }
- reference operator[](size_type pos) { return vch[pos + nReadPos]; }
- void clear() { vch.clear(); nReadPos = 0; }
- iterator insert(iterator it, const char& x=char()) { return vch.insert(it, x); }
- void insert(iterator it, size_type n, const char& x) { vch.insert(it, n, x); }
-
- void insert(iterator it, std::vector<char>::const_iterator first, std::vector<char>::const_iterator last)
- {
- assert(last - first >= 0);
- if (it == vch.begin() + nReadPos && (unsigned int)(last - first) <= nReadPos)
- {
- // special case for inserting at the front when there's room
- nReadPos -= (last - first);
- memcpy(&vch[nReadPos], &first[0], last - first);
- }
- else
- vch.insert(it, first, last);
- }
-
-#if !defined(_MSC_VER) || _MSC_VER >= 1300
- void insert(iterator it, const char* first, const char* last)
- {
- assert(last - first >= 0);
- if (it == vch.begin() + nReadPos && (unsigned int)(last - first) <= nReadPos)
- {
- // special case for inserting at the front when there's room
- nReadPos -= (last - first);
- memcpy(&vch[nReadPos], &first[0], last - first);
- }
- else
- vch.insert(it, first, last);
- }
-#endif
-
- iterator erase(iterator it)
- {
- if (it == vch.begin() + nReadPos)
- {
- // special case for erasing from the front
- if (++nReadPos >= vch.size())
- {
- // whenever we reach the end, we take the opportunity to clear the buffer
- nReadPos = 0;
- return vch.erase(vch.begin(), vch.end());
- }
- return vch.begin() + nReadPos;
- }
- else
- return vch.erase(it);
- }
-
- iterator erase(iterator first, iterator last)
- {
- if (first == vch.begin() + nReadPos)
- {
- // special case for erasing from the front
- if (last == vch.end())
- {
- nReadPos = 0;
- return vch.erase(vch.begin(), vch.end());
- }
- else
- {
- nReadPos = (last - vch.begin());
- return last;
- }
- }
- else
- return vch.erase(first, last);
- }
-
- inline void Compact()
- {
- vch.erase(vch.begin(), vch.begin() + nReadPos);
- nReadPos = 0;
- }
-
- bool Rewind(size_type n)
- {
- // Rewind by n characters if the buffer hasn't been compacted yet
- if (n > nReadPos)
- return false;
- nReadPos -= n;
- return true;
- }
-
-
- //
- // Stream subset
- //
- bool eof() const { return size() == 0; }
- CDataStream* rdbuf() { return this; }
- int in_avail() { return size(); }
-
- void SetType(int n) { nType = n; }
- int GetType() { return nType; }
- void SetVersion(int n) { nVersion = n; }
- int GetVersion() { return nVersion; }
- void ReadVersion() { *this >> nVersion; }
- void WriteVersion() { *this << nVersion; }
-
- CDataStream& read(char* pch, size_t nSize)
- {
- // Read from the beginning of the buffer
- unsigned int nReadPosNext = nReadPos + nSize;
- if (nReadPosNext >= vch.size())
- {
- if (nReadPosNext > vch.size())
- {
- throw std::ios_base::failure("CDataStream::read() : end of data");
- }
- memcpy(pch, &vch[nReadPos], nSize);
- nReadPos = 0;
- vch.clear();
- return (*this);
- }
- memcpy(pch, &vch[nReadPos], nSize);
- nReadPos = nReadPosNext;
- return (*this);
- }
-
- CDataStream& ignore(int nSize)
- {
- // Ignore from the beginning of the buffer
- assert(nSize >= 0);
- unsigned int nReadPosNext = nReadPos + nSize;
- if (nReadPosNext >= vch.size())
- {
- if (nReadPosNext > vch.size())
- throw std::ios_base::failure("CDataStream::ignore() : end of data");
- nReadPos = 0;
- vch.clear();
- return (*this);
- }
- nReadPos = nReadPosNext;
- return (*this);
- }
-
- CDataStream& write(const char* pch, size_t nSize)
- {
- // Write to the end of the buffer
- vch.insert(vch.end(), pch, pch + nSize);
- return (*this);
- }
-
- template<typename Stream>
- void Serialize(Stream& s, int nType, int nVersion) const
- {
- // Special case: stream << stream concatenates like stream += stream
- if (!vch.empty())
- s.write((char*)&vch[0], vch.size() * sizeof(vch[0]));
- }
-
- template<typename T>
- unsigned int GetSerializeSize(const T& obj)
- {
- // Tells the size of the object if serialized to this stream
- return ::GetSerializeSize(obj, nType, nVersion);
- }
-
- template<typename T>
- CDataStream& operator<<(const T& obj)
- {
- // Serialize to this stream
- ::Serialize(*this, obj, nType, nVersion);
- return (*this);
- }
-
- template<typename T>
- CDataStream& operator>>(T& obj)
- {
- // Unserialize from this stream
- ::Unserialize(*this, obj, nType, nVersion);
- return (*this);
- }
-
- void GetAndClear(CSerializeData &data) {
- data.insert(data.end(), begin(), end());
- clear();
- }
-};
-
-
-
-
-
-
-
-
-
-
-/** Non-refcounted RAII wrapper for FILE*
- *
- * Will automatically close the file when it goes out of scope if not null.
- * If you're returning the file pointer, return file.release().
- * If you need to close the file early, use file.fclose() instead of fclose(file).
- */
-class CAutoFile
-{
-private:
- // Disallow copies
- CAutoFile(const CAutoFile&);
- CAutoFile& operator=(const CAutoFile&);
-
- int nType;
- int nVersion;
-
- FILE* file;
-
-public:
- CAutoFile(FILE* filenew, int nTypeIn, int nVersionIn)
- {
- file = filenew;
- nType = nTypeIn;
- nVersion = nVersionIn;
- }
-
- ~CAutoFile()
- {
- fclose();
- }
-
- void fclose()
- {
- if (file) {
- ::fclose(file);
- file = NULL;
- }
- }
-
- FILE* release() { FILE* ret = file; file = NULL; return ret; }
- operator FILE*() { return file; }
- FILE* operator->() { return file; }
- FILE& operator*() { return *file; }
- FILE** operator&() { return &file; }
- FILE* operator=(FILE* pnew) { return file = pnew; }
- bool operator!() { return (file == NULL); }
-
-
- //
- // Stream subset
- //
- void SetType(int n) { nType = n; }
- int GetType() { return nType; }
- void SetVersion(int n) { nVersion = n; }
- int GetVersion() { return nVersion; }
- void ReadVersion() { *this >> nVersion; }
- void WriteVersion() { *this << nVersion; }
-
- CAutoFile& read(char* pch, size_t nSize)
- {
- if (!file)
- throw std::ios_base::failure("CAutoFile::read : file handle is NULL");
- if (fread(pch, 1, nSize, file) != nSize)
- throw std::ios_base::failure(feof(file) ? "CAutoFile::read : end of file" : "CAutoFile::read : fread failed");
- return (*this);
- }
-
- CAutoFile& write(const char* pch, size_t nSize)
- {
- if (!file)
- throw std::ios_base::failure("CAutoFile::write : file handle is NULL");
- if (fwrite(pch, 1, nSize, file) != nSize)
- throw std::ios_base::failure("CAutoFile::write : write failed");
- return (*this);
- }
-
- template<typename T>
- unsigned int GetSerializeSize(const T& obj)
- {
- // Tells the size of the object if serialized to this stream
- return ::GetSerializeSize(obj, nType, nVersion);
- }
-
- template<typename T>
- CAutoFile& operator<<(const T& obj)
- {
- // Serialize to this stream
- if (!file)
- throw std::ios_base::failure("CAutoFile::operator<< : file handle is NULL");
- ::Serialize(*this, obj, nType, nVersion);
- return (*this);
- }
-
- template<typename T>
- CAutoFile& operator>>(T& obj)
- {
- // Unserialize from this stream
- if (!file)
- throw std::ios_base::failure("CAutoFile::operator>> : file handle is NULL");
- ::Unserialize(*this, obj, nType, nVersion);
- return (*this);
- }
-};
-
-/** Non-refcounted RAII wrapper around a FILE* that implements a ring buffer to
- * deserialize from. It guarantees the ability to rewind a given number of bytes.
- *
- * Will automatically close the file when it goes out of scope if not null.
- * If you need to close the file early, use file.fclose() instead of fclose(file).
- */
-class CBufferedFile
-{
-private:
- // Disallow copies
- CBufferedFile(const CBufferedFile&);
- CBufferedFile& operator=(const CBufferedFile&);
-
- int nType;
- int nVersion;
-
- FILE *src; // source file
- uint64_t nSrcPos; // how many bytes have been read from source
- uint64_t nReadPos; // how many bytes have been read from this
- uint64_t nReadLimit; // up to which position we're allowed to read
- uint64_t nRewind; // how many bytes we guarantee to rewind
- std::vector<char> vchBuf; // the buffer
-
-protected:
- // read data from the source to fill the buffer
- bool Fill() {
- unsigned int pos = nSrcPos % vchBuf.size();
- unsigned int readNow = vchBuf.size() - pos;
- unsigned int nAvail = vchBuf.size() - (nSrcPos - nReadPos) - nRewind;
- if (nAvail < readNow)
- readNow = nAvail;
- if (readNow == 0)
- return false;
- size_t read = fread((void*)&vchBuf[pos], 1, readNow, src);
- if (read == 0) {
- throw std::ios_base::failure(feof(src) ? "CBufferedFile::Fill : end of file" : "CBufferedFile::Fill : fread failed");
- } else {
- nSrcPos += read;
- return true;
- }
- }
-
-public:
- CBufferedFile(FILE *fileIn, uint64_t nBufSize, uint64_t nRewindIn, int nTypeIn, int nVersionIn) :
- nSrcPos(0), nReadPos(0), nReadLimit((uint64_t)(-1)), nRewind(nRewindIn), vchBuf(nBufSize, 0)
- {
- src = fileIn;
- nType = nTypeIn;
- nVersion = nVersionIn;
- }
-
- ~CBufferedFile()
- {
- fclose();
- }
-
- void fclose()
- {
- if (src) {
- ::fclose(src);
- src = NULL;
- }
- }
-
- // check whether we're at the end of the source file
- bool eof() const {
- return nReadPos == nSrcPos && feof(src);
- }
-
- // read a number of bytes
- CBufferedFile& read(char *pch, size_t nSize) {
- if (nSize + nReadPos > nReadLimit)
- throw std::ios_base::failure("Read attempted past buffer limit");
- if (nSize + nRewind > vchBuf.size())
- throw std::ios_base::failure("Read larger than buffer size");
- while (nSize > 0) {
- if (nReadPos == nSrcPos)
- Fill();
- unsigned int pos = nReadPos % vchBuf.size();
- size_t nNow = nSize;
- if (nNow + pos > vchBuf.size())
- nNow = vchBuf.size() - pos;
- if (nNow + nReadPos > nSrcPos)
- nNow = nSrcPos - nReadPos;
- memcpy(pch, &vchBuf[pos], nNow);
- nReadPos += nNow;
- pch += nNow;
- nSize -= nNow;
- }
- return (*this);
- }
-
- // return the current reading position
- uint64_t GetPos() {
- return nReadPos;
- }
-
- // rewind to a given reading position
- bool SetPos(uint64_t nPos) {
- nReadPos = nPos;
- if (nReadPos + nRewind < nSrcPos) {
- nReadPos = nSrcPos - nRewind;
- return false;
- } else if (nReadPos > nSrcPos) {
- nReadPos = nSrcPos;
- return false;
- } else {
- return true;
- }
- }
-
- bool Seek(uint64_t nPos) {
- long nLongPos = nPos;
- if (nPos != (uint64_t)nLongPos)
- return false;
- if (fseek(src, nLongPos, SEEK_SET))
- return false;
- nLongPos = ftell(src);
- nSrcPos = nLongPos;
- nReadPos = nLongPos;
- return true;
- }
-
- // prevent reading beyond a certain position
- // no argument removes the limit
- bool SetLimit(uint64_t nPos = (uint64_t)(-1)) {
- if (nPos < nReadPos)
- return false;
- nReadLimit = nPos;
- return true;
- }
-
- template<typename T>
- CBufferedFile& operator>>(T& obj) {
- // Unserialize from this stream
- ::Unserialize(*this, obj, nType, nVersion);
- return (*this);
- }
-
- // search for a given byte in the stream, and remain positioned on it
- void FindByte(char ch) {
- while (true) {
- if (nReadPos == nSrcPos)
- Fill();
- if (vchBuf[nReadPos % vchBuf.size()] == ch)
- break;
- nReadPos++;
- }
- }
-};
-
#endif // BITCOIN_SERIALIZE_H
diff --git a/src/streams.h b/src/streams.h
new file mode 100644
index 0000000000..b07b11eb3d
--- /dev/null
+++ b/src/streams.h
@@ -0,0 +1,571 @@
+// Copyright (c) 2009-2010 Satoshi Nakamoto
+// Copyright (c) 2009-2013 The Bitcoin developers
+// Distributed under the MIT/X11 software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_STREAMS_H
+#define BITCOIN_STREAMS_H
+
+#include "allocators.h"
+#include "serialize.h"
+
+#include <algorithm>
+#include <assert.h>
+#include <ios>
+#include <limits>
+#include <map>
+#include <set>
+#include <stdint.h>
+#include <string>
+#include <string.h>
+#include <utility>
+#include <vector>
+
+/** Double ended buffer combining vector and stream-like interfaces.
+ *
+ * >> and << read and write unformatted data using the above serialization templates.
+ * Fills with data in linear time; some stringstream implementations take N^2 time.
+ */
+class CDataStream
+{
+protected:
+ typedef CSerializeData vector_type;
+ vector_type vch;
+ unsigned int nReadPos;
+public:
+ int nType;
+ int nVersion;
+
+ typedef vector_type::allocator_type allocator_type;
+ typedef vector_type::size_type size_type;
+ typedef vector_type::difference_type difference_type;
+ typedef vector_type::reference reference;
+ typedef vector_type::const_reference const_reference;
+ typedef vector_type::value_type value_type;
+ typedef vector_type::iterator iterator;
+ typedef vector_type::const_iterator const_iterator;
+ typedef vector_type::reverse_iterator reverse_iterator;
+
+ explicit CDataStream(int nTypeIn, int nVersionIn)
+ {
+ Init(nTypeIn, nVersionIn);
+ }
+
+ CDataStream(const_iterator pbegin, const_iterator pend, int nTypeIn, int nVersionIn) : vch(pbegin, pend)
+ {
+ Init(nTypeIn, nVersionIn);
+ }
+
+#if !defined(_MSC_VER) || _MSC_VER >= 1300
+ CDataStream(const char* pbegin, const char* pend, int nTypeIn, int nVersionIn) : vch(pbegin, pend)
+ {
+ Init(nTypeIn, nVersionIn);
+ }
+#endif
+
+ CDataStream(const vector_type& vchIn, int nTypeIn, int nVersionIn) : vch(vchIn.begin(), vchIn.end())
+ {
+ Init(nTypeIn, nVersionIn);
+ }
+
+ CDataStream(const std::vector<char>& vchIn, int nTypeIn, int nVersionIn) : vch(vchIn.begin(), vchIn.end())
+ {
+ Init(nTypeIn, nVersionIn);
+ }
+
+ CDataStream(const std::vector<unsigned char>& vchIn, int nTypeIn, int nVersionIn) : vch(vchIn.begin(), vchIn.end())
+ {
+ Init(nTypeIn, nVersionIn);
+ }
+
+ void Init(int nTypeIn, int nVersionIn)
+ {
+ nReadPos = 0;
+ nType = nTypeIn;
+ nVersion = nVersionIn;
+ }
+
+ CDataStream& operator+=(const CDataStream& b)
+ {
+ vch.insert(vch.end(), b.begin(), b.end());
+ return *this;
+ }
+
+ friend CDataStream operator+(const CDataStream& a, const CDataStream& b)
+ {
+ CDataStream ret = a;
+ ret += b;
+ return (ret);
+ }
+
+ std::string str() const
+ {
+ return (std::string(begin(), end()));
+ }
+
+
+ //
+ // Vector subset
+ //
+ const_iterator begin() const { return vch.begin() + nReadPos; }
+ iterator begin() { return vch.begin() + nReadPos; }
+ const_iterator end() const { return vch.end(); }
+ iterator end() { return vch.end(); }
+ size_type size() const { return vch.size() - nReadPos; }
+ bool empty() const { return vch.size() == nReadPos; }
+ void resize(size_type n, value_type c=0) { vch.resize(n + nReadPos, c); }
+ void reserve(size_type n) { vch.reserve(n + nReadPos); }
+ const_reference operator[](size_type pos) const { return vch[pos + nReadPos]; }
+ reference operator[](size_type pos) { return vch[pos + nReadPos]; }
+ void clear() { vch.clear(); nReadPos = 0; }
+ iterator insert(iterator it, const char& x=char()) { return vch.insert(it, x); }
+ void insert(iterator it, size_type n, const char& x) { vch.insert(it, n, x); }
+
+ void insert(iterator it, std::vector<char>::const_iterator first, std::vector<char>::const_iterator last)
+ {
+ assert(last - first >= 0);
+ if (it == vch.begin() + nReadPos && (unsigned int)(last - first) <= nReadPos)
+ {
+ // special case for inserting at the front when there's room
+ nReadPos -= (last - first);
+ memcpy(&vch[nReadPos], &first[0], last - first);
+ }
+ else
+ vch.insert(it, first, last);
+ }
+
+#if !defined(_MSC_VER) || _MSC_VER >= 1300
+ void insert(iterator it, const char* first, const char* last)
+ {
+ assert(last - first >= 0);
+ if (it == vch.begin() + nReadPos && (unsigned int)(last - first) <= nReadPos)
+ {
+ // special case for inserting at the front when there's room
+ nReadPos -= (last - first);
+ memcpy(&vch[nReadPos], &first[0], last - first);
+ }
+ else
+ vch.insert(it, first, last);
+ }
+#endif
+
+ iterator erase(iterator it)
+ {
+ if (it == vch.begin() + nReadPos)
+ {
+ // special case for erasing from the front
+ if (++nReadPos >= vch.size())
+ {
+ // whenever we reach the end, we take the opportunity to clear the buffer
+ nReadPos = 0;
+ return vch.erase(vch.begin(), vch.end());
+ }
+ return vch.begin() + nReadPos;
+ }
+ else
+ return vch.erase(it);
+ }
+
+ iterator erase(iterator first, iterator last)
+ {
+ if (first == vch.begin() + nReadPos)
+ {
+ // special case for erasing from the front
+ if (last == vch.end())
+ {
+ nReadPos = 0;
+ return vch.erase(vch.begin(), vch.end());
+ }
+ else
+ {
+ nReadPos = (last - vch.begin());
+ return last;
+ }
+ }
+ else
+ return vch.erase(first, last);
+ }
+
+ inline void Compact()
+ {
+ vch.erase(vch.begin(), vch.begin() + nReadPos);
+ nReadPos = 0;
+ }
+
+ bool Rewind(size_type n)
+ {
+ // Rewind by n characters if the buffer hasn't been compacted yet
+ if (n > nReadPos)
+ return false;
+ nReadPos -= n;
+ return true;
+ }
+
+
+ //
+ // Stream subset
+ //
+ bool eof() const { return size() == 0; }
+ CDataStream* rdbuf() { return this; }
+ int in_avail() { return size(); }
+
+ void SetType(int n) { nType = n; }
+ int GetType() { return nType; }
+ void SetVersion(int n) { nVersion = n; }
+ int GetVersion() { return nVersion; }
+ void ReadVersion() { *this >> nVersion; }
+ void WriteVersion() { *this << nVersion; }
+
+ CDataStream& read(char* pch, size_t nSize)
+ {
+ // Read from the beginning of the buffer
+ unsigned int nReadPosNext = nReadPos + nSize;
+ if (nReadPosNext >= vch.size())
+ {
+ if (nReadPosNext > vch.size())
+ {
+ throw std::ios_base::failure("CDataStream::read() : end of data");
+ }
+ memcpy(pch, &vch[nReadPos], nSize);
+ nReadPos = 0;
+ vch.clear();
+ return (*this);
+ }
+ memcpy(pch, &vch[nReadPos], nSize);
+ nReadPos = nReadPosNext;
+ return (*this);
+ }
+
+ CDataStream& ignore(int nSize)
+ {
+ // Ignore from the beginning of the buffer
+ assert(nSize >= 0);
+ unsigned int nReadPosNext = nReadPos + nSize;
+ if (nReadPosNext >= vch.size())
+ {
+ if (nReadPosNext > vch.size())
+ throw std::ios_base::failure("CDataStream::ignore() : end of data");
+ nReadPos = 0;
+ vch.clear();
+ return (*this);
+ }
+ nReadPos = nReadPosNext;
+ return (*this);
+ }
+
+ CDataStream& write(const char* pch, size_t nSize)
+ {
+ // Write to the end of the buffer
+ vch.insert(vch.end(), pch, pch + nSize);
+ return (*this);
+ }
+
+ template<typename Stream>
+ void Serialize(Stream& s, int nType, int nVersion) const
+ {
+ // Special case: stream << stream concatenates like stream += stream
+ if (!vch.empty())
+ s.write((char*)&vch[0], vch.size() * sizeof(vch[0]));
+ }
+
+ template<typename T>
+ unsigned int GetSerializeSize(const T& obj)
+ {
+ // Tells the size of the object if serialized to this stream
+ return ::GetSerializeSize(obj, nType, nVersion);
+ }
+
+ template<typename T>
+ CDataStream& operator<<(const T& obj)
+ {
+ // Serialize to this stream
+ ::Serialize(*this, obj, nType, nVersion);
+ return (*this);
+ }
+
+ template<typename T>
+ CDataStream& operator>>(T& obj)
+ {
+ // Unserialize from this stream
+ ::Unserialize(*this, obj, nType, nVersion);
+ return (*this);
+ }
+
+ void GetAndClear(CSerializeData &data) {
+ data.insert(data.end(), begin(), end());
+ clear();
+ }
+};
+
+
+
+
+
+
+
+
+
+
+/** Non-refcounted RAII wrapper for FILE*
+ *
+ * Will automatically close the file when it goes out of scope if not null.
+ * If you're returning the file pointer, return file.release().
+ * If you need to close the file early, use file.fclose() instead of fclose(file).
+ */
+class CAutoFile
+{
+private:
+ // Disallow copies
+ CAutoFile(const CAutoFile&);
+ CAutoFile& operator=(const CAutoFile&);
+
+ int nType;
+ int nVersion;
+
+ FILE* file;
+
+public:
+ CAutoFile(FILE* filenew, int nTypeIn, int nVersionIn)
+ {
+ file = filenew;
+ nType = nTypeIn;
+ nVersion = nVersionIn;
+ }
+
+ ~CAutoFile()
+ {
+ fclose();
+ }
+
+ void fclose()
+ {
+ if (file) {
+ ::fclose(file);
+ file = NULL;
+ }
+ }
+
+ /** Get wrapped FILE* with transfer of ownership.
+ * @note This will invalidate the CAutoFile object, and makes it the responsibility of the caller
+ * of this function to clean up the returned FILE*.
+ */
+ FILE* release() { FILE* ret = file; file = NULL; return ret; }
+
+ /** Get wrapped FILE* without transfer of ownership.
+ * @note Ownership of the FILE* will remain with this class. Use this only if the scope of the
+ * CAutoFile outlives use of the passed pointer.
+ */
+ FILE* Get() const { return file; }
+
+ /** Return true if the wrapped FILE* is NULL, false otherwise.
+ */
+ bool IsNull() const { return (file == NULL); }
+
+ //
+ // Stream subset
+ //
+ void SetType(int n) { nType = n; }
+ int GetType() { return nType; }
+ void SetVersion(int n) { nVersion = n; }
+ int GetVersion() { return nVersion; }
+ void ReadVersion() { *this >> nVersion; }
+ void WriteVersion() { *this << nVersion; }
+
+ CAutoFile& read(char* pch, size_t nSize)
+ {
+ if (!file)
+ throw std::ios_base::failure("CAutoFile::read : file handle is NULL");
+ if (fread(pch, 1, nSize, file) != nSize)
+ throw std::ios_base::failure(feof(file) ? "CAutoFile::read : end of file" : "CAutoFile::read : fread failed");
+ return (*this);
+ }
+
+ CAutoFile& write(const char* pch, size_t nSize)
+ {
+ if (!file)
+ throw std::ios_base::failure("CAutoFile::write : file handle is NULL");
+ if (fwrite(pch, 1, nSize, file) != nSize)
+ throw std::ios_base::failure("CAutoFile::write : write failed");
+ return (*this);
+ }
+
+ template<typename T>
+ unsigned int GetSerializeSize(const T& obj)
+ {
+ // Tells the size of the object if serialized to this stream
+ return ::GetSerializeSize(obj, nType, nVersion);
+ }
+
+ template<typename T>
+ CAutoFile& operator<<(const T& obj)
+ {
+ // Serialize to this stream
+ if (!file)
+ throw std::ios_base::failure("CAutoFile::operator<< : file handle is NULL");
+ ::Serialize(*this, obj, nType, nVersion);
+ return (*this);
+ }
+
+ template<typename T>
+ CAutoFile& operator>>(T& obj)
+ {
+ // Unserialize from this stream
+ if (!file)
+ throw std::ios_base::failure("CAutoFile::operator>> : file handle is NULL");
+ ::Unserialize(*this, obj, nType, nVersion);
+ return (*this);
+ }
+};
+
+/** Non-refcounted RAII wrapper around a FILE* that implements a ring buffer to
+ * deserialize from. It guarantees the ability to rewind a given number of bytes.
+ *
+ * Will automatically close the file when it goes out of scope if not null.
+ * If you need to close the file early, use file.fclose() instead of fclose(file).
+ */
+class CBufferedFile
+{
+private:
+ // Disallow copies
+ CBufferedFile(const CBufferedFile&);
+ CBufferedFile& operator=(const CBufferedFile&);
+
+ int nType;
+ int nVersion;
+
+ FILE *src; // source file
+ uint64_t nSrcPos; // how many bytes have been read from source
+ uint64_t nReadPos; // how many bytes have been read from this
+ uint64_t nReadLimit; // up to which position we're allowed to read
+ uint64_t nRewind; // how many bytes we guarantee to rewind
+ std::vector<char> vchBuf; // the buffer
+
+protected:
+ // read data from the source to fill the buffer
+ bool Fill() {
+ unsigned int pos = nSrcPos % vchBuf.size();
+ unsigned int readNow = vchBuf.size() - pos;
+ unsigned int nAvail = vchBuf.size() - (nSrcPos - nReadPos) - nRewind;
+ if (nAvail < readNow)
+ readNow = nAvail;
+ if (readNow == 0)
+ return false;
+ size_t read = fread((void*)&vchBuf[pos], 1, readNow, src);
+ if (read == 0) {
+ throw std::ios_base::failure(feof(src) ? "CBufferedFile::Fill : end of file" : "CBufferedFile::Fill : fread failed");
+ } else {
+ nSrcPos += read;
+ return true;
+ }
+ }
+
+public:
+ CBufferedFile(FILE *fileIn, uint64_t nBufSize, uint64_t nRewindIn, int nTypeIn, int nVersionIn) :
+ nSrcPos(0), nReadPos(0), nReadLimit((uint64_t)(-1)), nRewind(nRewindIn), vchBuf(nBufSize, 0)
+ {
+ src = fileIn;
+ nType = nTypeIn;
+ nVersion = nVersionIn;
+ }
+
+ ~CBufferedFile()
+ {
+ fclose();
+ }
+
+ void fclose()
+ {
+ if (src) {
+ ::fclose(src);
+ src = NULL;
+ }
+ }
+
+ // check whether we're at the end of the source file
+ bool eof() const {
+ return nReadPos == nSrcPos && feof(src);
+ }
+
+ // read a number of bytes
+ CBufferedFile& read(char *pch, size_t nSize) {
+ if (nSize + nReadPos > nReadLimit)
+ throw std::ios_base::failure("Read attempted past buffer limit");
+ if (nSize + nRewind > vchBuf.size())
+ throw std::ios_base::failure("Read larger than buffer size");
+ while (nSize > 0) {
+ if (nReadPos == nSrcPos)
+ Fill();
+ unsigned int pos = nReadPos % vchBuf.size();
+ size_t nNow = nSize;
+ if (nNow + pos > vchBuf.size())
+ nNow = vchBuf.size() - pos;
+ if (nNow + nReadPos > nSrcPos)
+ nNow = nSrcPos - nReadPos;
+ memcpy(pch, &vchBuf[pos], nNow);
+ nReadPos += nNow;
+ pch += nNow;
+ nSize -= nNow;
+ }
+ return (*this);
+ }
+
+ // return the current reading position
+ uint64_t GetPos() {
+ return nReadPos;
+ }
+
+ // rewind to a given reading position
+ bool SetPos(uint64_t nPos) {
+ nReadPos = nPos;
+ if (nReadPos + nRewind < nSrcPos) {
+ nReadPos = nSrcPos - nRewind;
+ return false;
+ } else if (nReadPos > nSrcPos) {
+ nReadPos = nSrcPos;
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ bool Seek(uint64_t nPos) {
+ long nLongPos = nPos;
+ if (nPos != (uint64_t)nLongPos)
+ return false;
+ if (fseek(src, nLongPos, SEEK_SET))
+ return false;
+ nLongPos = ftell(src);
+ nSrcPos = nLongPos;
+ nReadPos = nLongPos;
+ return true;
+ }
+
+ // prevent reading beyond a certain position
+ // no argument removes the limit
+ bool SetLimit(uint64_t nPos = (uint64_t)(-1)) {
+ if (nPos < nReadPos)
+ return false;
+ nReadLimit = nPos;
+ return true;
+ }
+
+ template<typename T>
+ CBufferedFile& operator>>(T& obj) {
+ // Unserialize from this stream
+ ::Unserialize(*this, obj, nType, nVersion);
+ return (*this);
+ }
+
+ // search for a given byte in the stream, and remain positioned on it
+ void FindByte(char ch) {
+ while (true) {
+ if (nReadPos == nSrcPos)
+ Fill();
+ if (vchBuf[nReadPos % vchBuf.size()] == ch)
+ break;
+ nReadPos++;
+ }
+ }
+};
+
+#endif // BITCOIN_STREAMS_H
diff --git a/src/test/DoS_tests.cpp b/src/test/DoS_tests.cpp
index 7bec12b665..f9746fdaa5 100644
--- a/src/test/DoS_tests.cpp
+++ b/src/test/DoS_tests.cpp
@@ -106,51 +106,6 @@ BOOST_AUTO_TEST_CASE(DoS_bantime)
BOOST_CHECK(!CNode::IsBanned(addr));
}
-static bool CheckNBits(unsigned int nbits1, int64_t time1, unsigned int nbits2, int64_t time2)\
-{
- if (time1 > time2)
- return CheckNBits(nbits2, time2, nbits1, time1);
- int64_t deltaTime = time2-time1;
-
- return CheckMinWork(nbits2, nbits1, deltaTime);
-}
-
-BOOST_AUTO_TEST_CASE(DoS_checknbits)
-{
- using namespace boost::assign; // for 'map_list_of()'
-
- // Timestamps,nBits from the bitcoin block chain.
- // These are the block-chain checkpoint blocks
- typedef std::map<int64_t, unsigned int> BlockData;
- BlockData chainData =
- map_list_of(1239852051,486604799)(1262749024,486594666)
- (1279305360,469854461)(1280200847,469830746)(1281678674,469809688)
- (1296207707,453179945)(1302624061,453036989)(1309640330,437004818)
- (1313172719,436789733);
-
- // Make sure CheckNBits considers every combination of block-chain-lock-in-points
- // "sane":
- BOOST_FOREACH(const BlockData::value_type& i, chainData)
- {
- BOOST_FOREACH(const BlockData::value_type& j, chainData)
- {
- BOOST_CHECK(CheckNBits(i.second, i.first, j.second, j.first));
- }
- }
-
- // Test a couple of insane combinations:
- BlockData::value_type firstcheck = *(chainData.begin());
- BlockData::value_type lastcheck = *(chainData.rbegin());
-
- // First checkpoint difficulty at or a while after the last checkpoint time should fail when
- // compared to last checkpoint
- BOOST_CHECK(!CheckNBits(firstcheck.second, lastcheck.first+60*10, lastcheck.second, lastcheck.first));
- BOOST_CHECK(!CheckNBits(firstcheck.second, lastcheck.first+60*60*24*14, lastcheck.second, lastcheck.first));
-
- // ... but OK if enough time passed for difficulty to adjust downward:
- BOOST_CHECK(CheckNBits(firstcheck.second, lastcheck.first+60*60*24*365*4, lastcheck.second, lastcheck.first));
-}
-
CTransaction RandomOrphan()
{
std::map<uint256, COrphanTx>::iterator it;
diff --git a/src/test/alert_tests.cpp b/src/test/alert_tests.cpp
index 4af87cf8ef..28610f0d2e 100644
--- a/src/test/alert_tests.cpp
+++ b/src/test/alert_tests.cpp
@@ -10,6 +10,7 @@
#include "data/alertTests.raw.h"
#include "serialize.h"
+#include "streams.h"
#include "util.h"
#include "utilstrencodings.h"
#include "version.h"
diff --git a/src/test/base58_tests.cpp b/src/test/base58_tests.cpp
index c298c805da..e495435b81 100644
--- a/src/test/base58_tests.cpp
+++ b/src/test/base58_tests.cpp
@@ -12,6 +12,7 @@
#include "script/script.h"
#include "uint256.h"
#include "util.h"
+#include "utilstrencodings.h"
#include <boost/foreach.hpp>
#include <boost/test/unit_test.hpp>
diff --git a/src/test/bloom_tests.cpp b/src/test/bloom_tests.cpp
index 2cdafa4bdd..99b21a23a0 100644
--- a/src/test/bloom_tests.cpp
+++ b/src/test/bloom_tests.cpp
@@ -14,6 +14,7 @@
#include <vector>
#include <boost/test/unit_test.hpp>
+#include <boost/tuple/tuple.hpp>
using namespace std;
using namespace boost::tuples;
diff --git a/src/test/checkblock_tests.cpp b/src/test/checkblock_tests.cpp
index 67d40a45c7..9151fdc0c8 100644
--- a/src/test/checkblock_tests.cpp
+++ b/src/test/checkblock_tests.cpp
@@ -36,7 +36,7 @@ bool read_block(const std::string& filename, CBlock& block)
fseek(fp, 8, SEEK_SET); // skip msgheader/size
CAutoFile filein(fp, SER_DISK, CLIENT_VERSION);
- if (!filein) return false;
+ if (filein.IsNull()) return false;
filein >> block;
diff --git a/src/test/key_tests.cpp b/src/test/key_tests.cpp
index 203c20731a..b32f3774fe 100644
--- a/src/test/key_tests.cpp
+++ b/src/test/key_tests.cpp
@@ -75,7 +75,7 @@ BOOST_AUTO_TEST_CASE(key_test1)
CKey key1C = bsecret1C.GetKey();
BOOST_CHECK(key1C.IsCompressed() == true);
CKey key2C = bsecret2C.GetKey();
- BOOST_CHECK(key1C.IsCompressed() == true);
+ BOOST_CHECK(key2C.IsCompressed() == true);
CPubKey pubkey1 = key1. GetPubKey();
CPubKey pubkey2 = key2. GetPubKey();
diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp
index bad5c13ac2..93b7fe189a 100644
--- a/src/test/miner_tests.cpp
+++ b/src/test/miner_tests.cpp
@@ -170,7 +170,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
tx.vin[0].scriptSig = CScript() << OP_1;
tx.vout[0].nValue = 4900000000LL;
script = CScript() << OP_0;
- tx.vout[0].scriptPubKey = GetScriptForDestination(script.GetID());
+ tx.vout[0].scriptPubKey = GetScriptForDestination(CScriptID(script));
hash = tx.GetHash();
mempool.addUnchecked(hash, CTxMemPoolEntry(tx, 11, GetTime(), 111.0, 11));
tx.vin[0].prevout.hash = hash;
diff --git a/src/test/multisig_tests.cpp b/src/test/multisig_tests.cpp
index 5a2ec1cb31..e9fc86779a 100644
--- a/src/test/multisig_tests.cpp
+++ b/src/test/multisig_tests.cpp
@@ -51,13 +51,13 @@ BOOST_AUTO_TEST_CASE(multisig_verify)
key[i].MakeNewKey(true);
CScript a_and_b;
- a_and_b << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG;
+ a_and_b << OP_2 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_2 << OP_CHECKMULTISIG;
CScript a_or_b;
- a_or_b << OP_1 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG;
+ a_or_b << OP_1 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_2 << OP_CHECKMULTISIG;
CScript escrow;
- escrow << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << key[2].GetPubKey() << OP_3 << OP_CHECKMULTISIG;
+ escrow << OP_2 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << ToByteVector(key[2].GetPubKey()) << OP_3 << OP_CHECKMULTISIG;
CMutableTransaction txFrom; // Funding transaction
txFrom.vout.resize(3);
@@ -138,28 +138,28 @@ BOOST_AUTO_TEST_CASE(multisig_IsStandard)
txnouttype whichType;
CScript a_and_b;
- a_and_b << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG;
+ a_and_b << OP_2 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_2 << OP_CHECKMULTISIG;
BOOST_CHECK(::IsStandard(a_and_b, whichType));
CScript a_or_b;
- a_or_b << OP_1 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG;
+ a_or_b << OP_1 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_2 << OP_CHECKMULTISIG;
BOOST_CHECK(::IsStandard(a_or_b, whichType));
CScript escrow;
- escrow << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << key[2].GetPubKey() << OP_3 << OP_CHECKMULTISIG;
+ escrow << OP_2 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << ToByteVector(key[2].GetPubKey()) << OP_3 << OP_CHECKMULTISIG;
BOOST_CHECK(::IsStandard(escrow, whichType));
CScript one_of_four;
- one_of_four << OP_1 << key[0].GetPubKey() << key[1].GetPubKey() << key[2].GetPubKey() << key[3].GetPubKey() << OP_4 << OP_CHECKMULTISIG;
+ one_of_four << OP_1 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << ToByteVector(key[2].GetPubKey()) << ToByteVector(key[3].GetPubKey()) << OP_4 << OP_CHECKMULTISIG;
BOOST_CHECK(!::IsStandard(one_of_four, whichType));
CScript malformed[6];
- malformed[0] << OP_3 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG;
- malformed[1] << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << OP_3 << OP_CHECKMULTISIG;
- malformed[2] << OP_0 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG;
- malformed[3] << OP_1 << key[0].GetPubKey() << key[1].GetPubKey() << OP_0 << OP_CHECKMULTISIG;
- malformed[4] << OP_1 << key[0].GetPubKey() << key[1].GetPubKey() << OP_CHECKMULTISIG;
- malformed[5] << OP_1 << key[0].GetPubKey() << key[1].GetPubKey();
+ malformed[0] << OP_3 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_2 << OP_CHECKMULTISIG;
+ malformed[1] << OP_2 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_3 << OP_CHECKMULTISIG;
+ malformed[2] << OP_0 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_2 << OP_CHECKMULTISIG;
+ malformed[3] << OP_1 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_0 << OP_CHECKMULTISIG;
+ malformed[4] << OP_1 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_CHECKMULTISIG;
+ malformed[5] << OP_1 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey());
for (int i = 0; i < 6; i++)
BOOST_CHECK(!::IsStandard(malformed[i], whichType));
@@ -192,7 +192,7 @@ BOOST_AUTO_TEST_CASE(multisig_Solver1)
vector<valtype> solutions;
txnouttype whichType;
CScript s;
- s << key[0].GetPubKey() << OP_CHECKSIG;
+ s << ToByteVector(key[0].GetPubKey()) << OP_CHECKSIG;
BOOST_CHECK(Solver(s, whichType, solutions));
BOOST_CHECK(solutions.size() == 1);
CTxDestination addr;
@@ -207,7 +207,7 @@ BOOST_AUTO_TEST_CASE(multisig_Solver1)
vector<valtype> solutions;
txnouttype whichType;
CScript s;
- s << OP_DUP << OP_HASH160 << key[0].GetPubKey().GetID() << OP_EQUALVERIFY << OP_CHECKSIG;
+ s << OP_DUP << OP_HASH160 << ToByteVector(key[0].GetPubKey().GetID()) << OP_EQUALVERIFY << OP_CHECKSIG;
BOOST_CHECK(Solver(s, whichType, solutions));
BOOST_CHECK(solutions.size() == 1);
CTxDestination addr;
@@ -222,7 +222,7 @@ BOOST_AUTO_TEST_CASE(multisig_Solver1)
vector<valtype> solutions;
txnouttype whichType;
CScript s;
- s << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG;
+ s << OP_2 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_2 << OP_CHECKMULTISIG;
BOOST_CHECK(Solver(s, whichType, solutions));
BOOST_CHECK_EQUAL(solutions.size(), 4U);
CTxDestination addr;
@@ -237,7 +237,7 @@ BOOST_AUTO_TEST_CASE(multisig_Solver1)
vector<valtype> solutions;
txnouttype whichType;
CScript s;
- s << OP_1 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG;
+ s << OP_1 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_2 << OP_CHECKMULTISIG;
BOOST_CHECK(Solver(s, whichType, solutions));
BOOST_CHECK_EQUAL(solutions.size(), 4U);
vector<CTxDestination> addrs;
@@ -256,7 +256,7 @@ BOOST_AUTO_TEST_CASE(multisig_Solver1)
vector<valtype> solutions;
txnouttype whichType;
CScript s;
- s << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << key[2].GetPubKey() << OP_3 << OP_CHECKMULTISIG;
+ s << OP_2 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << ToByteVector(key[2].GetPubKey()) << OP_3 << OP_CHECKMULTISIG;
BOOST_CHECK(Solver(s, whichType, solutions));
BOOST_CHECK(solutions.size() == 5);
}
@@ -274,13 +274,13 @@ BOOST_AUTO_TEST_CASE(multisig_Sign)
}
CScript a_and_b;
- a_and_b << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG;
+ a_and_b << OP_2 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_2 << OP_CHECKMULTISIG;
CScript a_or_b;
- a_or_b << OP_1 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG;
+ a_or_b << OP_1 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << OP_2 << OP_CHECKMULTISIG;
CScript escrow;
- escrow << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << key[2].GetPubKey() << OP_3 << OP_CHECKMULTISIG;
+ escrow << OP_2 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << ToByteVector(key[2].GetPubKey()) << OP_3 << OP_CHECKMULTISIG;
CMutableTransaction txFrom; // Funding transaction
txFrom.vout.resize(3);
diff --git a/src/test/script_P2SH_tests.cpp b/src/test/script_P2SH_tests.cpp
index f8361a0dc8..fcab652783 100644
--- a/src/test/script_P2SH_tests.cpp
+++ b/src/test/script_P2SH_tests.cpp
@@ -67,15 +67,15 @@ BOOST_AUTO_TEST_CASE(sign)
// 8 Scripts: checking all combinations of
// different keys, straight/P2SH, pubkey/pubkeyhash
CScript standardScripts[4];
- standardScripts[0] << key[0].GetPubKey() << OP_CHECKSIG;
+ standardScripts[0] << ToByteVector(key[0].GetPubKey()) << OP_CHECKSIG;
standardScripts[1] = GetScriptForDestination(key[1].GetPubKey().GetID());
- standardScripts[2] << key[1].GetPubKey() << OP_CHECKSIG;
+ standardScripts[2] << ToByteVector(key[1].GetPubKey()) << OP_CHECKSIG;
standardScripts[3] = GetScriptForDestination(key[2].GetPubKey().GetID());
CScript evalScripts[4];
for (int i = 0; i < 4; i++)
{
keystore.AddCScript(standardScripts[i]);
- evalScripts[i] = GetScriptForDestination(standardScripts[i].GetID());
+ evalScripts[i] = GetScriptForDestination(CScriptID(standardScripts[i]));
}
CMutableTransaction txFrom; // Funding transaction:
@@ -129,7 +129,7 @@ BOOST_AUTO_TEST_CASE(norecurse)
CScript invalidAsScript;
invalidAsScript << OP_INVALIDOPCODE << OP_INVALIDOPCODE;
- CScript p2sh = GetScriptForDestination(invalidAsScript.GetID());
+ CScript p2sh = GetScriptForDestination(CScriptID(invalidAsScript));
CScript scriptSig;
scriptSig << Serialize(invalidAsScript);
@@ -139,7 +139,7 @@ BOOST_AUTO_TEST_CASE(norecurse)
// Try to recur, and verification should succeed because
// the inner HASH160 <> EQUAL should only check the hash:
- CScript p2sh2 = GetScriptForDestination(p2sh.GetID());
+ CScript p2sh2 = GetScriptForDestination(CScriptID(p2sh));
CScript scriptSig2;
scriptSig2 << Serialize(invalidAsScript) << Serialize(p2sh);
@@ -169,7 +169,7 @@ BOOST_AUTO_TEST_CASE(set)
CScript outer[4];
for (int i = 0; i < 4; i++)
{
- outer[i] = GetScriptForDestination(inner[i].GetID());
+ outer[i] = GetScriptForDestination(CScriptID(inner[i]));
keystore.AddCScript(inner[i]);
}
@@ -206,9 +206,9 @@ BOOST_AUTO_TEST_CASE(set)
BOOST_AUTO_TEST_CASE(is)
{
// Test CScript::IsPayToScriptHash()
- uint160 dummy;
+ uint160 dummy(0);
CScript p2sh;
- p2sh << OP_HASH160 << dummy << OP_EQUAL;
+ p2sh << OP_HASH160 << ToByteVector(dummy) << OP_EQUAL;
BOOST_CHECK(p2sh.IsPayToScriptHash());
// Not considered pay-to-script-hash if using one of the OP_PUSHDATA opcodes:
@@ -224,13 +224,13 @@ BOOST_AUTO_TEST_CASE(is)
CScript not_p2sh;
BOOST_CHECK(!not_p2sh.IsPayToScriptHash());
- not_p2sh.clear(); not_p2sh << OP_HASH160 << dummy << dummy << OP_EQUAL;
+ not_p2sh.clear(); not_p2sh << OP_HASH160 << ToByteVector(dummy) << ToByteVector(dummy) << OP_EQUAL;
BOOST_CHECK(!not_p2sh.IsPayToScriptHash());
- not_p2sh.clear(); not_p2sh << OP_NOP << dummy << OP_EQUAL;
+ not_p2sh.clear(); not_p2sh << OP_NOP << ToByteVector(dummy) << OP_EQUAL;
BOOST_CHECK(!not_p2sh.IsPayToScriptHash());
- not_p2sh.clear(); not_p2sh << OP_HASH160 << dummy << OP_CHECKSIG;
+ not_p2sh.clear(); not_p2sh << OP_HASH160 << ToByteVector(dummy) << OP_CHECKSIG;
BOOST_CHECK(!not_p2sh.IsPayToScriptHash());
}
@@ -242,7 +242,7 @@ BOOST_AUTO_TEST_CASE(switchover)
CScript scriptSig;
scriptSig << Serialize(notValid);
- CScript fund = GetScriptForDestination(notValid.GetID());
+ CScript fund = GetScriptForDestination(CScriptID(notValid));
// Validation should succeed under old rules (hash is correct):
@@ -275,7 +275,7 @@ BOOST_AUTO_TEST_CASE(AreInputsStandard)
keystore.AddCScript(pay1);
CScript pay1of3 = GetScriptForMultisig(1, keys);
- txFrom.vout[0].scriptPubKey = GetScriptForDestination(pay1.GetID()); // P2SH (OP_CHECKSIG)
+ txFrom.vout[0].scriptPubKey = GetScriptForDestination(CScriptID(pay1)); // P2SH (OP_CHECKSIG)
txFrom.vout[0].nValue = 1000;
txFrom.vout[1].scriptPubKey = pay1; // ordinary OP_CHECKSIG
txFrom.vout[1].nValue = 2000;
@@ -285,31 +285,31 @@ BOOST_AUTO_TEST_CASE(AreInputsStandard)
// vout[3] is complicated 1-of-3 AND 2-of-3
// ... that is OK if wrapped in P2SH:
CScript oneAndTwo;
- oneAndTwo << OP_1 << key[0].GetPubKey() << key[1].GetPubKey() << key[2].GetPubKey();
+ oneAndTwo << OP_1 << ToByteVector(key[0].GetPubKey()) << ToByteVector(key[1].GetPubKey()) << ToByteVector(key[2].GetPubKey());
oneAndTwo << OP_3 << OP_CHECKMULTISIGVERIFY;
- oneAndTwo << OP_2 << key[3].GetPubKey() << key[4].GetPubKey() << key[5].GetPubKey();
+ oneAndTwo << OP_2 << ToByteVector(key[3].GetPubKey()) << ToByteVector(key[4].GetPubKey()) << ToByteVector(key[5].GetPubKey());
oneAndTwo << OP_3 << OP_CHECKMULTISIG;
keystore.AddCScript(oneAndTwo);
- txFrom.vout[3].scriptPubKey = GetScriptForDestination(oneAndTwo.GetID());
+ txFrom.vout[3].scriptPubKey = GetScriptForDestination(CScriptID(oneAndTwo));
txFrom.vout[3].nValue = 4000;
// vout[4] is max sigops:
CScript fifteenSigops; fifteenSigops << OP_1;
for (unsigned i = 0; i < MAX_P2SH_SIGOPS; i++)
- fifteenSigops << key[i%3].GetPubKey();
+ fifteenSigops << ToByteVector(key[i%3].GetPubKey());
fifteenSigops << OP_15 << OP_CHECKMULTISIG;
keystore.AddCScript(fifteenSigops);
- txFrom.vout[4].scriptPubKey = GetScriptForDestination(fifteenSigops.GetID());
+ txFrom.vout[4].scriptPubKey = GetScriptForDestination(CScriptID(fifteenSigops));
txFrom.vout[4].nValue = 5000;
// vout[5/6] are non-standard because they exceed MAX_P2SH_SIGOPS
CScript sixteenSigops; sixteenSigops << OP_16 << OP_CHECKMULTISIG;
keystore.AddCScript(sixteenSigops);
- txFrom.vout[5].scriptPubKey = GetScriptForDestination(fifteenSigops.GetID());
+ txFrom.vout[5].scriptPubKey = GetScriptForDestination(CScriptID(fifteenSigops));
txFrom.vout[5].nValue = 5000;
CScript twentySigops; twentySigops << OP_CHECKMULTISIG;
keystore.AddCScript(twentySigops);
- txFrom.vout[6].scriptPubKey = GetScriptForDestination(twentySigops.GetID());
+ txFrom.vout[6].scriptPubKey = GetScriptForDestination(CScriptID(twentySigops));
txFrom.vout[6].nValue = 6000;
coins.ModifyCoins(txFrom.GetHash())->FromTx(txFrom, 0);
diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp
index a4b0212494..d3fc673a79 100644
--- a/src/test/script_tests.cpp
+++ b/src/test/script_tests.cpp
@@ -162,7 +162,7 @@ public:
TestBuilder(const CScript& redeemScript, const std::string& comment_, int flags_, bool P2SH = false) : scriptPubKey(redeemScript), havePush(false), comment(comment_), flags(flags_)
{
if (P2SH) {
- creditTx = BuildCreditingTransaction(CScript() << OP_HASH160 << redeemScript.GetID() << OP_EQUAL);
+ creditTx = BuildCreditingTransaction(CScript() << OP_HASH160 << ToByteVector(CScriptID(redeemScript)) << OP_EQUAL);
} else {
creditTx = BuildCreditingTransaction(redeemScript);
}
@@ -270,135 +270,135 @@ BOOST_AUTO_TEST_CASE(script_build)
std::vector<TestBuilder> good;
std::vector<TestBuilder> bad;
- good.push_back(TestBuilder(CScript() << keys.pubkey0 << OP_CHECKSIG,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG,
"P2PK", 0
).PushSig(keys.key0));
- bad.push_back(TestBuilder(CScript() << keys.pubkey0 << OP_CHECKSIG,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0) << OP_CHECKSIG,
"P2PK, bad sig", 0
).PushSig(keys.key0).DamagePush(10));
- good.push_back(TestBuilder(CScript() << OP_DUP << OP_HASH160 << keys.pubkey1C.GetID() << OP_EQUALVERIFY << OP_CHECKSIG,
+ good.push_back(TestBuilder(CScript() << OP_DUP << OP_HASH160 << ToByteVector(keys.pubkey1C.GetID()) << OP_EQUALVERIFY << OP_CHECKSIG,
"P2PKH", 0
).PushSig(keys.key1).Push(keys.pubkey1C));
- bad.push_back(TestBuilder(CScript() << OP_DUP << OP_HASH160 << keys.pubkey2C.GetID() << OP_EQUALVERIFY << OP_CHECKSIG,
+ bad.push_back(TestBuilder(CScript() << OP_DUP << OP_HASH160 << ToByteVector(keys.pubkey2C.GetID()) << OP_EQUALVERIFY << OP_CHECKSIG,
"P2PKH, bad pubkey", 0
).PushSig(keys.key2).Push(keys.pubkey2C).DamagePush(5));
- good.push_back(TestBuilder(CScript() << keys.pubkey1 << OP_CHECKSIG,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG,
"P2PK anyonecanpay", 0
).PushSig(keys.key1, SIGHASH_ALL | SIGHASH_ANYONECANPAY));
- bad.push_back(TestBuilder(CScript() << keys.pubkey1 << OP_CHECKSIG,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG,
"P2PK anyonecanpay marked with normal hashtype", 0
).PushSig(keys.key1, SIGHASH_ALL | SIGHASH_ANYONECANPAY).EditPush(70, "81", "01"));
- good.push_back(TestBuilder(CScript() << keys.pubkey0C << OP_CHECKSIG,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0C) << OP_CHECKSIG,
"P2SH(P2PK)", SCRIPT_VERIFY_P2SH, true
).PushSig(keys.key0).PushRedeem());
- bad.push_back(TestBuilder(CScript() << keys.pubkey0C << OP_CHECKSIG,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0C) << OP_CHECKSIG,
"P2SH(P2PK), bad redeemscript", SCRIPT_VERIFY_P2SH, true
).PushSig(keys.key0).PushRedeem().DamagePush(10));
- good.push_back(TestBuilder(CScript() << OP_DUP << OP_HASH160 << keys.pubkey1.GetID() << OP_EQUALVERIFY << OP_CHECKSIG,
+ good.push_back(TestBuilder(CScript() << OP_DUP << OP_HASH160 << ToByteVector(keys.pubkey1.GetID()) << OP_EQUALVERIFY << OP_CHECKSIG,
"P2SH(P2PKH), bad sig but no VERIFY_P2SH", 0, true
).PushSig(keys.key0).DamagePush(10).PushRedeem());
- bad.push_back(TestBuilder(CScript() << OP_DUP << OP_HASH160 << keys.pubkey1.GetID() << OP_EQUALVERIFY << OP_CHECKSIG,
+ bad.push_back(TestBuilder(CScript() << OP_DUP << OP_HASH160 << ToByteVector(keys.pubkey1.GetID()) << OP_EQUALVERIFY << OP_CHECKSIG,
"P2SH(P2PKH), bad sig", SCRIPT_VERIFY_P2SH, true
).PushSig(keys.key0).DamagePush(10).PushRedeem());
- good.push_back(TestBuilder(CScript() << OP_3 << keys.pubkey0C << keys.pubkey1C << keys.pubkey2C << OP_3 << OP_CHECKMULTISIG,
+ good.push_back(TestBuilder(CScript() << OP_3 << ToByteVector(keys.pubkey0C) << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey2C) << OP_3 << OP_CHECKMULTISIG,
"3-of-3", 0
).Num(0).PushSig(keys.key0).PushSig(keys.key1).PushSig(keys.key2));
- bad.push_back(TestBuilder(CScript() << OP_3 << keys.pubkey0C << keys.pubkey1C << keys.pubkey2C << OP_3 << OP_CHECKMULTISIG,
+ bad.push_back(TestBuilder(CScript() << OP_3 << ToByteVector(keys.pubkey0C) << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey2C) << OP_3 << OP_CHECKMULTISIG,
"3-of-3, 2 sigs", 0
).Num(0).PushSig(keys.key0).PushSig(keys.key1).Num(0));
- good.push_back(TestBuilder(CScript() << OP_2 << keys.pubkey0C << keys.pubkey1C << keys.pubkey2C << OP_3 << OP_CHECKMULTISIG,
+ good.push_back(TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey0C) << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey2C) << OP_3 << OP_CHECKMULTISIG,
"P2SH(2-of-3)", SCRIPT_VERIFY_P2SH, true
).Num(0).PushSig(keys.key1).PushSig(keys.key2).PushRedeem());
- bad.push_back(TestBuilder(CScript() << OP_2 << keys.pubkey0C << keys.pubkey1C << keys.pubkey2C << OP_3 << OP_CHECKMULTISIG,
+ bad.push_back(TestBuilder(CScript() << OP_2 << ToByteVector(keys.pubkey0C) << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey2C) << OP_3 << OP_CHECKMULTISIG,
"P2SH(2-of-3), 1 sig", SCRIPT_VERIFY_P2SH, true
).Num(0).PushSig(keys.key1).Num(0).PushRedeem());
- good.push_back(TestBuilder(CScript() << keys.pubkey1C << OP_CHECKSIG,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKSIG,
"P2PK with too much R padding but no DERSIG", 0
).PushSig(keys.key1, SIGHASH_ALL, 31, 32).EditPush(1, "43021F", "44022000"));
- bad.push_back(TestBuilder(CScript() << keys.pubkey1C << OP_CHECKSIG,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKSIG,
"P2PK with too much R padding", SCRIPT_VERIFY_DERSIG
).PushSig(keys.key1, SIGHASH_ALL, 31, 32).EditPush(1, "43021F", "44022000"));
- good.push_back(TestBuilder(CScript() << keys.pubkey1C << OP_CHECKSIG,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKSIG,
"P2PK with too much S padding but no DERSIG", 0
).PushSig(keys.key1, SIGHASH_ALL).EditPush(1, "44", "45").EditPush(37, "20", "2100"));
- bad.push_back(TestBuilder(CScript() << keys.pubkey1C << OP_CHECKSIG,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKSIG,
"P2PK with too much S padding", SCRIPT_VERIFY_DERSIG
).PushSig(keys.key1, SIGHASH_ALL).EditPush(1, "44", "45").EditPush(37, "20", "2100"));
- good.push_back(TestBuilder(CScript() << keys.pubkey1C << OP_CHECKSIG,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKSIG,
"P2PK with too little R padding but no DERSIG", 0
).PushSig(keys.key1, SIGHASH_ALL, 33, 32).EditPush(1, "45022100", "440220"));
- bad.push_back(TestBuilder(CScript() << keys.pubkey1C << OP_CHECKSIG,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1C) << OP_CHECKSIG,
"P2PK with too little R padding", SCRIPT_VERIFY_DERSIG
).PushSig(keys.key1, SIGHASH_ALL, 33, 32).EditPush(1, "45022100", "440220"));
- good.push_back(TestBuilder(CScript() << keys.pubkey2C << OP_CHECKSIG << OP_NOT,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey2C) << OP_CHECKSIG << OP_NOT,
"P2PK NOT with bad sig with too much R padding but no DERSIG", 0
).PushSig(keys.key2, SIGHASH_ALL, 31, 32).EditPush(1, "43021F", "44022000").DamagePush(10));
- bad.push_back(TestBuilder(CScript() << keys.pubkey2C << OP_CHECKSIG << OP_NOT,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey2C) << OP_CHECKSIG << OP_NOT,
"P2PK NOT with bad sig with too much R padding", SCRIPT_VERIFY_DERSIG
).PushSig(keys.key2, SIGHASH_ALL, 31, 32).EditPush(1, "43021F", "44022000").DamagePush(10));
- bad.push_back(TestBuilder(CScript() << keys.pubkey2C << OP_CHECKSIG << OP_NOT,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey2C) << OP_CHECKSIG << OP_NOT,
"P2PK NOT with too much R padding but no DERSIG", 0
).PushSig(keys.key2, SIGHASH_ALL, 31, 32).EditPush(1, "43021F", "44022000"));
- bad.push_back(TestBuilder(CScript() << keys.pubkey2C << OP_CHECKSIG << OP_NOT,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey2C) << OP_CHECKSIG << OP_NOT,
"P2PK NOT with too much R padding", SCRIPT_VERIFY_DERSIG
).PushSig(keys.key2, SIGHASH_ALL, 31, 32).EditPush(1, "43021F", "44022000"));
- good.push_back(TestBuilder(CScript() << keys.pubkey2C << OP_CHECKSIG,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey2C) << OP_CHECKSIG,
"P2PK with high S but no LOW_S", 0
).PushSig(keys.key2, SIGHASH_ALL, 32, 33));
- bad.push_back(TestBuilder(CScript() << keys.pubkey2C << OP_CHECKSIG,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey2C) << OP_CHECKSIG,
"P2PK with high S", SCRIPT_VERIFY_LOW_S
).PushSig(keys.key2, SIGHASH_ALL, 32, 33));
- good.push_back(TestBuilder(CScript() << keys.pubkey0H << OP_CHECKSIG,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0H) << OP_CHECKSIG,
"P2PK with hybrid pubkey but no STRICTENC", 0
).PushSig(keys.key0, SIGHASH_ALL));
- bad.push_back(TestBuilder(CScript() << keys.pubkey0H << OP_CHECKSIG,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0H) << OP_CHECKSIG,
"P2PK with hybrid pubkey", SCRIPT_VERIFY_STRICTENC
).PushSig(keys.key0, SIGHASH_ALL));
- bad.push_back(TestBuilder(CScript() << keys.pubkey0H << OP_CHECKSIG << OP_NOT,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0H) << OP_CHECKSIG << OP_NOT,
"P2PK NOT with hybrid pubkey but no STRICTENC", 0
).PushSig(keys.key0, SIGHASH_ALL));
- good.push_back(TestBuilder(CScript() << keys.pubkey0H << OP_CHECKSIG << OP_NOT,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0H) << OP_CHECKSIG << OP_NOT,
"P2PK NOT with hybrid pubkey", SCRIPT_VERIFY_STRICTENC
).PushSig(keys.key0, SIGHASH_ALL));
- good.push_back(TestBuilder(CScript() << keys.pubkey0H << OP_CHECKSIG << OP_NOT,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0H) << OP_CHECKSIG << OP_NOT,
"P2PK NOT with invalid hybrid pubkey but no STRICTENC", 0
).PushSig(keys.key0, SIGHASH_ALL).DamagePush(10));
- good.push_back(TestBuilder(CScript() << keys.pubkey0H << OP_CHECKSIG << OP_NOT,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey0H) << OP_CHECKSIG << OP_NOT,
"P2PK NOT with invalid hybrid pubkey", SCRIPT_VERIFY_STRICTENC
).PushSig(keys.key0, SIGHASH_ALL).DamagePush(10));
- good.push_back(TestBuilder(CScript() << keys.pubkey1 << OP_CHECKSIG,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG,
"P2PK with undefined hashtype but no STRICTENC", 0
).PushSig(keys.key1, 5));
- bad.push_back(TestBuilder(CScript() << keys.pubkey1 << OP_CHECKSIG,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG,
"P2PK with undefined hashtype", SCRIPT_VERIFY_STRICTENC
).PushSig(keys.key1, 5));
- good.push_back(TestBuilder(CScript() << keys.pubkey1 << OP_CHECKSIG << OP_NOT,
+ good.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG << OP_NOT,
"P2PK NOT with invalid sig and undefined hashtype but no STRICTENC", 0
).PushSig(keys.key1, 5).DamagePush(10));
- bad.push_back(TestBuilder(CScript() << keys.pubkey1 << OP_CHECKSIG << OP_NOT,
+ bad.push_back(TestBuilder(CScript() << ToByteVector(keys.pubkey1) << OP_CHECKSIG << OP_NOT,
"P2PK NOT with invalid sig and undefined hashtype", SCRIPT_VERIFY_STRICTENC
).PushSig(keys.key1, 5).DamagePush(10));
- good.push_back(TestBuilder(CScript() << OP_3 << keys.pubkey0C << keys.pubkey1C << keys.pubkey2C << OP_3 << OP_CHECKMULTISIG,
+ good.push_back(TestBuilder(CScript() << OP_3 << ToByteVector(keys.pubkey0C) << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey2C) << OP_3 << OP_CHECKMULTISIG,
"3-of-3 with nonzero dummy but no NULLDUMMY", 0
).Num(1).PushSig(keys.key0).PushSig(keys.key1).PushSig(keys.key2));
- bad.push_back(TestBuilder(CScript() << OP_3 << keys.pubkey0C << keys.pubkey1C << keys.pubkey2C << OP_3 << OP_CHECKMULTISIG,
+ bad.push_back(TestBuilder(CScript() << OP_3 << ToByteVector(keys.pubkey0C) << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey2C) << OP_3 << OP_CHECKMULTISIG,
"3-of-3 with nonzero dummy", SCRIPT_VERIFY_NULLDUMMY
).Num(1).PushSig(keys.key0).PushSig(keys.key1).PushSig(keys.key2));
- good.push_back(TestBuilder(CScript() << OP_3 << keys.pubkey0C << keys.pubkey1C << keys.pubkey2C << OP_3 << OP_CHECKMULTISIG << OP_NOT,
+ good.push_back(TestBuilder(CScript() << OP_3 << ToByteVector(keys.pubkey0C) << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey2C) << OP_3 << OP_CHECKMULTISIG << OP_NOT,
"3-of-3 NOT with invalid sig and nonzero dummy but no NULLDUMMY", 0
).Num(1).PushSig(keys.key0).PushSig(keys.key1).PushSig(keys.key2).DamagePush(10));
- bad.push_back(TestBuilder(CScript() << OP_3 << keys.pubkey0C << keys.pubkey1C << keys.pubkey2C << OP_3 << OP_CHECKMULTISIG << OP_NOT,
+ bad.push_back(TestBuilder(CScript() << OP_3 << ToByteVector(keys.pubkey0C) << ToByteVector(keys.pubkey1C) << ToByteVector(keys.pubkey2C) << OP_3 << OP_CHECKMULTISIG << OP_NOT,
"3-of-3 NOT with invalid sig with nonzero dummy", SCRIPT_VERIFY_NULLDUMMY
).Num(1).PushSig(keys.key0).PushSig(keys.key1).PushSig(keys.key2).DamagePush(10));
@@ -582,7 +582,7 @@ BOOST_AUTO_TEST_CASE(script_CHECKMULTISIG12)
key3.MakeNewKey(true);
CScript scriptPubKey12;
- scriptPubKey12 << OP_1 << key1.GetPubKey() << key2.GetPubKey() << OP_2 << OP_CHECKMULTISIG;
+ scriptPubKey12 << OP_1 << ToByteVector(key1.GetPubKey()) << ToByteVector(key2.GetPubKey()) << OP_2 << OP_CHECKMULTISIG;
CMutableTransaction txFrom12 = BuildCreditingTransaction(scriptPubKey12);
CMutableTransaction txTo12 = BuildSpendingTransaction(CScript(), txFrom12);
@@ -608,7 +608,7 @@ BOOST_AUTO_TEST_CASE(script_CHECKMULTISIG23)
key4.MakeNewKey(false);
CScript scriptPubKey23;
- scriptPubKey23 << OP_2 << key1.GetPubKey() << key2.GetPubKey() << key3.GetPubKey() << OP_3 << OP_CHECKMULTISIG;
+ scriptPubKey23 << OP_2 << ToByteVector(key1.GetPubKey()) << ToByteVector(key2.GetPubKey()) << ToByteVector(key3.GetPubKey()) << OP_3 << OP_CHECKMULTISIG;
CMutableTransaction txFrom23 = BuildCreditingTransaction(scriptPubKey23);
CMutableTransaction txTo23 = BuildSpendingTransaction(CScript(), txFrom23);
@@ -695,9 +695,9 @@ BOOST_AUTO_TEST_CASE(script_combineSigs)
BOOST_CHECK(combined == scriptSigCopy || combined == scriptSig);
// P2SH, single-signature case:
- CScript pkSingle; pkSingle << keys[0].GetPubKey() << OP_CHECKSIG;
+ CScript pkSingle; pkSingle << ToByteVector(keys[0].GetPubKey()) << OP_CHECKSIG;
keystore.AddCScript(pkSingle);
- scriptPubKey = GetScriptForDestination(pkSingle.GetID());
+ scriptPubKey = GetScriptForDestination(CScriptID(pkSingle));
SignSignature(keystore, txFrom, txTo, 0);
combined = CombineSignatures(scriptPubKey, txTo, 0, scriptSig, empty);
BOOST_CHECK(combined == scriptSig);
diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp
index 867a7df888..59e95f2fd1 100644
--- a/src/test/serialize_tests.cpp
+++ b/src/test/serialize_tests.cpp
@@ -3,6 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "serialize.h"
+#include "streams.h"
#include <stdint.h>
diff --git a/src/test/sigopcount_tests.cpp b/src/test/sigopcount_tests.cpp
index 62a6cd63d6..7b27703b62 100644
--- a/src/test/sigopcount_tests.cpp
+++ b/src/test/sigopcount_tests.cpp
@@ -31,14 +31,14 @@ BOOST_AUTO_TEST_CASE(GetSigOpCount)
BOOST_CHECK_EQUAL(s1.GetSigOpCount(false), 0U);
BOOST_CHECK_EQUAL(s1.GetSigOpCount(true), 0U);
- uint160 dummy;
- s1 << OP_1 << dummy << dummy << OP_2 << OP_CHECKMULTISIG;
+ uint160 dummy(0);
+ s1 << OP_1 << ToByteVector(dummy) << ToByteVector(dummy) << OP_2 << OP_CHECKMULTISIG;
BOOST_CHECK_EQUAL(s1.GetSigOpCount(true), 2U);
s1 << OP_IF << OP_CHECKSIG << OP_ENDIF;
BOOST_CHECK_EQUAL(s1.GetSigOpCount(true), 3U);
BOOST_CHECK_EQUAL(s1.GetSigOpCount(false), 21U);
- CScript p2sh = GetScriptForDestination(s1.GetID());
+ CScript p2sh = GetScriptForDestination(CScriptID(s1));
CScript scriptSig;
scriptSig << OP_0 << Serialize(s1);
BOOST_CHECK_EQUAL(p2sh.GetSigOpCount(scriptSig), 3U);
@@ -54,11 +54,11 @@ BOOST_AUTO_TEST_CASE(GetSigOpCount)
BOOST_CHECK_EQUAL(s2.GetSigOpCount(true), 3U);
BOOST_CHECK_EQUAL(s2.GetSigOpCount(false), 20U);
- p2sh = GetScriptForDestination(s2.GetID());
+ p2sh = GetScriptForDestination(CScriptID(s2));
BOOST_CHECK_EQUAL(p2sh.GetSigOpCount(true), 0U);
BOOST_CHECK_EQUAL(p2sh.GetSigOpCount(false), 0U);
CScript scriptSig2;
- scriptSig2 << OP_1 << dummy << dummy << Serialize(s2);
+ scriptSig2 << OP_1 << ToByteVector(dummy) << ToByteVector(dummy) << Serialize(s2);
BOOST_CHECK_EQUAL(p2sh.GetSigOpCount(scriptSig2), 3U);
}
diff --git a/src/test/test_bitcoin.cpp b/src/test/test_bitcoin.cpp
index afd63d2717..e50218d8ef 100644
--- a/src/test/test_bitcoin.cpp
+++ b/src/test/test_bitcoin.cpp
@@ -47,7 +47,7 @@ struct TestingSetup {
bool fFirstRun;
pwalletMain = new CWallet("wallet.dat");
pwalletMain->LoadWallet(fFirstRun);
- RegisterWallet(pwalletMain);
+ RegisterValidationInterface(pwalletMain);
#endif
nScriptCheckThreads = 3;
for (int i=0; i < nScriptCheckThreads-1; i++)
diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp
index 18cb8f3d1b..41ccaaac94 100644
--- a/src/test/transaction_tests.cpp
+++ b/src/test/transaction_tests.cpp
@@ -259,9 +259,9 @@ SetupDummyInputs(CBasicKeyStore& keystoreRet, CCoinsViewCache& coinsRet)
// Create some dummy input transactions
dummyTransactions[0].vout.resize(2);
dummyTransactions[0].vout[0].nValue = 11*CENT;
- dummyTransactions[0].vout[0].scriptPubKey << key[0].GetPubKey() << OP_CHECKSIG;
+ dummyTransactions[0].vout[0].scriptPubKey << ToByteVector(key[0].GetPubKey()) << OP_CHECKSIG;
dummyTransactions[0].vout[1].nValue = 50*CENT;
- dummyTransactions[0].vout[1].scriptPubKey << key[1].GetPubKey() << OP_CHECKSIG;
+ dummyTransactions[0].vout[1].scriptPubKey << ToByteVector(key[1].GetPubKey()) << OP_CHECKSIG;
coinsRet.ModifyCoins(dummyTransactions[0].GetHash())->FromTx(dummyTransactions[0], 0);
dummyTransactions[1].vout.resize(2);
diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp
index 6378bd0941..61daa0a3fe 100644
--- a/src/test/util_tests.cpp
+++ b/src/test/util_tests.cpp
@@ -9,6 +9,7 @@
#include "sync.h"
#include "utilstrencodings.h"
#include "utilmoneystr.h"
+#include "version.h"
#include <stdint.h>
#include <vector>
@@ -341,4 +342,15 @@ BOOST_AUTO_TEST_CASE(test_FormatParagraph)
BOOST_CHECK_EQUAL(FormatParagraph("test test", 4, 4), "test\n test");
}
+BOOST_AUTO_TEST_CASE(test_FormatSubVersion)
+{
+ std::vector<std::string> comments;
+ comments.push_back(std::string("comment1"));
+ std::vector<std::string> comments2;
+ comments2.push_back(std::string("comment1"));
+ comments2.push_back(std::string("comment2"));
+ BOOST_CHECK_EQUAL(FormatSubVersion("Test", 99900, std::vector<std::string>()),std::string("/Test:0.9.99/"));
+ BOOST_CHECK_EQUAL(FormatSubVersion("Test", 99900, comments),std::string("/Test:0.9.99(comment1)/"));
+ BOOST_CHECK_EQUAL(FormatSubVersion("Test", 99900, comments2),std::string("/Test:0.9.99(comment1; comment2)/"));
+}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/txdb.cpp b/src/txdb.cpp
index cb9f150011..8a73ce961c 100644
--- a/src/txdb.cpp
+++ b/src/txdb.cpp
@@ -104,7 +104,7 @@ bool CCoinsViewDB::GetStats(CCoinsStats &stats) const {
/* It seems that there are no "const iterators" for LevelDB. Since we
only need read operations on it, use a const-cast to get around
that restriction. */
- leveldb::Iterator *pcursor = const_cast<CLevelDBWrapper*>(&db)->NewIterator();
+ boost::scoped_ptr<leveldb::Iterator> pcursor(const_cast<CLevelDBWrapper*>(&db)->NewIterator());
pcursor->SeekToFirst();
CHashWriter ss(SER_GETHASH, PROTOCOL_VERSION);
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index fa1802ad31..b0d6b4aefa 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -6,8 +6,10 @@
#include "txmempool.h"
#include "core.h"
+#include "streams.h"
#include "util.h"
#include "utilmoneystr.h"
+#include "version.h"
#include <boost/circular_buffer.hpp>
diff --git a/src/txmempool.h b/src/txmempool.h
index c63fd6f590..85cf5310ff 100644
--- a/src/txmempool.h
+++ b/src/txmempool.h
@@ -12,6 +12,8 @@
#include "core.h"
#include "sync.h"
+class CAutoFile;
+
inline bool AllowFree(double dPriority)
{
// Large (in bytes) low-priority (new, small-coin) transactions
@@ -52,6 +54,19 @@ public:
class CMinerPolicyEstimator;
+/** An inpoint - a combination of a transaction and an index n into its vin */
+class CInPoint
+{
+public:
+ const CTransaction* ptx;
+ uint32_t n;
+
+ CInPoint() { SetNull(); }
+ CInPoint(const CTransaction* ptxIn, uint32_t nIn) { ptx = ptxIn; n = nIn; }
+ void SetNull() { ptx = NULL; n = (uint32_t) -1; }
+ bool IsNull() const { return (ptx == NULL && n == (uint32_t) -1); }
+};
+
/*
* CTxMemPool stores valid-according-to-the-current-best-chain
* transactions that may be included in the next block.
diff --git a/src/ui_interface.h b/src/ui_interface.h
index f5224ba57d..1231d5ed0b 100644
--- a/src/ui_interface.h
+++ b/src/ui_interface.h
@@ -63,6 +63,9 @@ public:
/** Force blocking, modal message box dialog (not just OS notification) */
MODAL = 0x10000000U,
+ /** Do not print contents of message to debug log */
+ SECURE = 0x40000000U,
+
/** Predefined combinations for certain default usage cases */
MSG_INFORMATION = ICON_INFORMATION,
MSG_WARNING = (ICON_WARNING | BTN_OK | MODAL),
diff --git a/src/util.cpp b/src/util.cpp
index 632d0965bf..544ffc98b8 100644
--- a/src/util.cpp
+++ b/src/util.cpp
@@ -395,7 +395,8 @@ boost::filesystem::path GetDefaultDataDir()
#endif
}
-static boost::filesystem::path pathCached[CBaseChainParams::MAX_NETWORK_TYPES+1];
+static boost::filesystem::path pathCached;
+static boost::filesystem::path pathCachedNetSpecific;
static CCriticalSection csPathCached;
const boost::filesystem::path &GetDataDir(bool fNetSpecific)
@@ -404,10 +405,7 @@ const boost::filesystem::path &GetDataDir(bool fNetSpecific)
LOCK(csPathCached);
- int nNet = CBaseChainParams::MAX_NETWORK_TYPES;
- if (fNetSpecific) nNet = BaseParams().NetworkID();
-
- fs::path &path = pathCached[nNet];
+ fs::path &path = fNetSpecific ? pathCachedNetSpecific : pathCached;
// This can be called during exceptions by LogPrintf(), so we cache the
// value so we don't have to do memory allocations after that.
@@ -433,8 +431,8 @@ const boost::filesystem::path &GetDataDir(bool fNetSpecific)
void ClearDatadirCache()
{
- std::fill(&pathCached[0], &pathCached[CBaseChainParams::MAX_NETWORK_TYPES+1],
- boost::filesystem::path());
+ pathCached = boost::filesystem::path();
+ pathCachedNetSpecific = boost::filesystem::path();
}
boost::filesystem::path GetConfigFile()
diff --git a/src/utilmoneystr.cpp b/src/utilmoneystr.cpp
index 1a5635bfb8..95be06aa18 100644
--- a/src/utilmoneystr.cpp
+++ b/src/utilmoneystr.cpp
@@ -7,6 +7,7 @@
#include "core.h"
#include "tinyformat.h"
+#include "utilstrencodings.h"
using namespace std;
diff --git a/src/utilstrencodings.cpp b/src/utilstrencodings.cpp
index b9e64c5fe1..81e156f43f 100644
--- a/src/utilstrencodings.cpp
+++ b/src/utilstrencodings.cpp
@@ -9,8 +9,8 @@
#include <errno.h>
#include <limits>
-
-#include <boost/foreach.hpp>
+#include <cstdlib>
+#include <cstring>
using namespace std;
@@ -53,9 +53,9 @@ signed char HexDigit(char c)
bool IsHex(const string& str)
{
- BOOST_FOREACH(char c, str)
+ for(std::string::const_iterator it(str.begin()); it != str.end(); ++it)
{
- if (HexDigit(c) < 0)
+ if (HexDigit(*it) < 0)
return false;
}
return (str.size() > 0) && (str.size()%2 == 0);
diff --git a/src/version.cpp b/src/version.cpp
index 95632fdab7..d12b681e5c 100644
--- a/src/version.cpp
+++ b/src/version.cpp
@@ -8,8 +8,6 @@
#include <string>
-#include <boost/algorithm/string/join.hpp>
-
// Name of client reported in the 'version' message. Report the same name
// for both bitcoind and bitcoin-qt, to make it harder for attackers to
// target servers or GUI users specifically.
@@ -94,7 +92,13 @@ std::string FormatSubVersion(const std::string& name, int nClientVersion, const
ss << "/";
ss << name << ":" << FormatVersion(nClientVersion);
if (!comments.empty())
- ss << "(" << boost::algorithm::join(comments, "; ") << ")";
+ {
+ std::vector<std::string>::const_iterator it(comments.begin());
+ ss << "(" << *it;
+ for(++it; it != comments.end(); ++it)
+ ss << "; " << *it;
+ ss << ")";
+ }
ss << "/";
return ss.str();
}
diff --git a/src/version.h b/src/version.h
index 75cbec39b7..a1e440de24 100644
--- a/src/version.h
+++ b/src/version.h
@@ -33,8 +33,11 @@ static const int PROTOCOL_VERSION = 70002;
// initial proto version, to be increased after version/verack negotiation
static const int INIT_PROTO_VERSION = 209;
+// In this version, 'getheaders' was introduced.
+static const int GETHEADERS_VERSION = 31800;
+
// disconnect from peers older than this proto version
-static const int MIN_PEER_PROTO_VERSION = 209;
+static const int MIN_PEER_PROTO_VERSION = GETHEADERS_VERSION;
// nTime field added to CAddress, starting with this version;
// if possible, avoid requesting addresses nodes older than this
diff --git a/src/wallet.cpp b/src/wallet.cpp
index 19e43f6ec2..65944587f8 100644
--- a/src/wallet.cpp
+++ b/src/wallet.cpp
@@ -158,7 +158,7 @@ bool CWallet::LoadCScript(const CScript& redeemScript)
* these. Do not add them to the wallet and warn. */
if (redeemScript.size() > MAX_SCRIPT_ELEMENT_SIZE)
{
- std::string strAddr = CBitcoinAddress(redeemScript.GetID()).ToString();
+ std::string strAddr = CBitcoinAddress(CScriptID(redeemScript)).ToString();
LogPrintf("%s: Warning: This wallet contains a redeemScript of size %i which exceeds maximum size %i thus can never be redeemed. Do not use address %s.\n",
__func__, redeemScript.size(), MAX_SCRIPT_ELEMENT_SIZE, strAddr);
return true;
diff --git a/src/wallet.h b/src/wallet.h
index fa8a94dfc1..06706655f8 100644
--- a/src/wallet.h
+++ b/src/wallet.h
@@ -95,7 +95,7 @@ public:
/** A CWallet is an extension of a keystore, which also maintains a set of transactions and balances,
* and provides the ability to create new transactions.
*/
-class CWallet : public CCryptoKeyStore, public CWalletInterface
+class CWallet : public CCryptoKeyStore, public CValidationInterface
{
private:
bool SelectCoins(const CAmount& nTargetValue, std::set<std::pair<const CWalletTx*,unsigned int> >& setCoinsRet, CAmount& nValueRet, const CCoinControl *coinControl = NULL) const;
diff --git a/src/walletdb.cpp b/src/walletdb.cpp
index 783f766f6f..ffddd8106b 100644
--- a/src/walletdb.cpp
+++ b/src/walletdb.cpp
@@ -185,7 +185,7 @@ bool CWalletDB::WriteAccount(const string& strAccount, const CAccount& account)
bool CWalletDB::WriteAccountingEntry(const uint64_t nAccEntryNum, const CAccountingEntry& acentry)
{
- return Write(boost::make_tuple(string("acentry"), acentry.strAccount, nAccEntryNum), acentry);
+ return Write(std::make_pair(std::string("acentry"), std::make_pair(acentry.strAccount, nAccEntryNum)), acentry);
}
bool CWalletDB::WriteAccountingEntry(const CAccountingEntry& acentry)
@@ -218,7 +218,7 @@ void CWalletDB::ListAccountCreditDebit(const string& strAccount, list<CAccountin
// Read next record
CDataStream ssKey(SER_DISK, CLIENT_VERSION);
if (fFlags == DB_SET_RANGE)
- ssKey << boost::make_tuple(string("acentry"), (fAllAccounts? string("") : strAccount), uint64_t(0));
+ ssKey << std::make_pair(std::string("acentry"), std::make_pair((fAllAccounts ? string("") : strAccount), uint64_t(0)));
CDataStream ssValue(SER_DISK, CLIENT_VERSION);
int ret = ReadAtCursor(pcursor, ssKey, ssValue, fFlags);
fFlags = DB_NEXT;
@@ -977,11 +977,11 @@ bool CWalletDB::Recover(CDBEnv& dbenv, std::string filename)
bool CWalletDB::WriteDestData(const std::string &address, const std::string &key, const std::string &value)
{
nWalletDBUpdated++;
- return Write(boost::make_tuple(std::string("destdata"), address, key), value);
+ return Write(std::make_pair(std::string("destdata"), std::make_pair(address, key)), value);
}
bool CWalletDB::EraseDestData(const std::string &address, const std::string &key)
{
nWalletDBUpdated++;
- return Erase(boost::make_tuple(string("destdata"), address, key));
+ return Erase(std::make_pair(std::string("destdata"), std::make_pair(address, key)));
}