aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--.travis.yml1
-rw-r--r--configure.ac1
-rw-r--r--doc/README.md1
-rw-r--r--doc/fuzzing.md66
-rwxr-xr-xqa/pull-tester/rpc-tests.py56
-rw-r--r--qa/pull-tester/run-bitcoind-for-test.sh.in36
-rwxr-xr-xqa/rpc-tests/bip68-112-113-p2p.py1
-rwxr-xr-xqa/rpc-tests/fundrawtransaction.py19
-rwxr-xr-xqa/rpc-tests/import-rescan.py155
-rwxr-xr-xqa/rpc-tests/maxuploadtarget.py7
-rwxr-xr-xqa/rpc-tests/mempool_limit.py6
-rwxr-xr-xqa/rpc-tests/mempool_packages.py4
-rwxr-xr-xqa/rpc-tests/mempool_reorg.py2
-rwxr-xr-xqa/rpc-tests/p2p-compactblocks.py52
-rwxr-xr-xqa/rpc-tests/prioritise_transaction.py2
-rwxr-xr-xqa/rpc-tests/pruning.py15
-rwxr-xr-xqa/rpc-tests/replace-by-fee.py2
-rwxr-xr-xqa/rpc-tests/segwit.py19
-rwxr-xr-xqa/rpc-tests/smartfees.py6
-rw-r--r--qa/rpc-tests/test_framework/util.py14
-rwxr-xr-xqa/rpc-tests/wallet.py25
-rw-r--r--src/Makefile.am4
-rw-r--r--src/Makefile.qttest.include3
-rw-r--r--src/Makefile.test.include26
-rw-r--r--src/bench/coin_selection.cpp2
-rw-r--r--src/bench/crypto_hash.cpp10
-rw-r--r--src/bench/verify_script.cpp2
-rw-r--r--src/coins.h1
-rw-r--r--src/compat/byteswap.h19
-rw-r--r--src/core_io.h4
-rw-r--r--src/core_write.cpp4
-rw-r--r--src/crypto/ctaes/ctaes.c8
-rw-r--r--src/cuckoocache.h457
-rw-r--r--src/dbwrapper.h11
-rw-r--r--src/init.cpp10
-rw-r--r--src/leveldb/.travis.yml13
-rw-r--r--src/leveldb/Makefile467
-rw-r--r--src/leveldb/README51
-rw-r--r--src/leveldb/README.md39
-rwxr-xr-xsrc/leveldb/build_detect_platform2
-rw-r--r--src/leveldb/db/corruption_test.cc2
-rw-r--r--src/leveldb/db/db_bench.cc24
-rw-r--r--src/leveldb/db/db_impl.cc194
-rw-r--r--src/leveldb/db/db_impl.h8
-rw-r--r--src/leveldb/db/db_test.cc32
-rw-r--r--src/leveldb/db/fault_injection_test.cc554
-rw-r--r--src/leveldb/db/leveldbutil.cc (renamed from src/leveldb/db/leveldb_main.cc)0
-rw-r--r--src/leveldb/db/log_reader.cc22
-rw-r--r--src/leveldb/db/log_reader.h5
-rw-r--r--src/leveldb/db/log_test.cc101
-rw-r--r--src/leveldb/db/log_writer.cc17
-rw-r--r--src/leveldb/db/log_writer.h6
-rw-r--r--src/leveldb/db/memtable.h5
-rw-r--r--src/leveldb/db/recovery_test.cc324
-rw-r--r--src/leveldb/db/skiplist.h8
-rw-r--r--src/leveldb/db/skiplist_test.cc2
-rw-r--r--src/leveldb/db/snapshot.h1
-rw-r--r--src/leveldb/db/version_set.cc40
-rw-r--r--src/leveldb/db/version_set.h4
-rw-r--r--src/leveldb/db/write_batch_internal.h1
-rw-r--r--src/leveldb/doc/index.html2
-rw-r--r--src/leveldb/helpers/memenv/memenv.cc13
-rw-r--r--src/leveldb/helpers/memenv/memenv_test.cc13
-rw-r--r--src/leveldb/include/leveldb/cache.h11
-rw-r--r--src/leveldb/include/leveldb/db.h4
-rw-r--r--src/leveldb/include/leveldb/env.h18
-rw-r--r--src/leveldb/include/leveldb/iterator.h2
-rw-r--r--src/leveldb/include/leveldb/options.h6
-rw-r--r--src/leveldb/include/leveldb/status.h6
-rw-r--r--src/leveldb/port/atomic_pointer.h19
-rw-r--r--src/leveldb/port/port_posix.cc1
-rw-r--r--src/leveldb/table/filter_block.cc4
-rw-r--r--src/leveldb/table/format.cc3
-rw-r--r--src/leveldb/table/iterator_wrapper.h3
-rw-r--r--src/leveldb/table/table.cc2
-rw-r--r--src/leveldb/table/table_test.cc20
-rw-r--r--src/leveldb/util/arena.cc6
-rw-r--r--src/leveldb/util/arena.h10
-rw-r--r--src/leveldb/util/bloom.cc2
-rw-r--r--src/leveldb/util/bloom_test.cc3
-rw-r--r--src/leveldb/util/cache.cc136
-rw-r--r--src/leveldb/util/cache_test.cc42
-rw-r--r--src/leveldb/util/env.cc4
-rw-r--r--src/leveldb/util/env_posix.cc13
-rw-r--r--src/leveldb/util/env_win.cc37
-rw-r--r--src/leveldb/util/options.cc2
-rw-r--r--src/leveldb/util/testutil.h10
-rw-r--r--src/net_processing.cpp62
-rw-r--r--src/netbase.cpp6
-rw-r--r--src/primitives/transaction.h2
-rw-r--r--src/qt/bitcoin.cpp7
-rw-r--r--src/qt/bitcoingui.cpp6
-rw-r--r--src/qt/guiutil.cpp9
-rw-r--r--src/qt/guiutil.h43
-rw-r--r--src/qt/modaloverlay.cpp7
-rw-r--r--src/qt/modaloverlay.h2
-rw-r--r--src/qt/paymentrequestplus.cpp20
-rw-r--r--src/qt/rpcconsole.cpp61
-rw-r--r--src/qt/rpcconsole.h2
-rw-r--r--src/qt/test/compattests.cpp23
-rw-r--r--src/qt/test/compattests.h19
-rw-r--r--src/qt/test/rpcnestedtests.cpp59
-rw-r--r--src/qt/test/test_main.cpp4
-rw-r--r--src/qt/walletmodel.cpp3
-rw-r--r--src/random.cpp7
-rw-r--r--src/rest.cpp4
-rw-r--r--src/rpc/blockchain.cpp2
-rw-r--r--src/rpc/mining.cpp4
-rw-r--r--src/rpc/rawtransaction.cpp2
-rw-r--r--src/rpc/server.cpp8
-rw-r--r--src/rpc/server.h5
-rw-r--r--src/script/interpreter.cpp10
-rw-r--r--src/script/sigcache.cpp77
-rw-r--r--src/script/sigcache.h9
-rw-r--r--src/secp256k1/.gitignore1
-rw-r--r--src/secp256k1/.travis.yml9
-rw-r--r--src/secp256k1/Makefile.am18
-rw-r--r--src/secp256k1/build-aux/m4/bitcoin_secp.m44
-rw-r--r--src/secp256k1/configure.ac30
-rw-r--r--src/secp256k1/include/secp256k1.h14
-rw-r--r--src/secp256k1/include/secp256k1_schnorr.h173
-rw-r--r--src/secp256k1/src/ecdsa_impl.h18
-rw-r--r--src/secp256k1/src/ecmult_const_impl.h2
-rw-r--r--src/secp256k1/src/ecmult_gen_impl.h2
-rw-r--r--src/secp256k1/src/ecmult_impl.h23
-rw-r--r--src/secp256k1/src/field.h7
-rw-r--r--src/secp256k1/src/field_impl.h2
-rw-r--r--src/secp256k1/src/group.h4
-rw-r--r--src/secp256k1/src/group_impl.h78
-rw-r--r--src/secp256k1/src/java/org/bitcoin/NativeSecp256k1.java34
-rw-r--r--src/secp256k1/src/java/org/bitcoin/NativeSecp256k1Test.java21
-rw-r--r--src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.c34
-rw-r--r--src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.h8
-rwxr-xr-x[-rw-r--r--]src/secp256k1/src/modules/recovery/main_impl.h4
-rw-r--r--src/secp256k1/src/modules/schnorr/Makefile.am.include10
-rw-r--r--src/secp256k1/src/modules/schnorr/main_impl.h164
-rw-r--r--src/secp256k1/src/modules/schnorr/schnorr.h20
-rw-r--r--src/secp256k1/src/modules/schnorr/schnorr_impl.h207
-rw-r--r--src/secp256k1/src/modules/schnorr/tests_impl.h175
-rw-r--r--src/secp256k1/src/scalar.h4
-rw-r--r--src/secp256k1/src/scalar_4x64_impl.h26
-rw-r--r--src/secp256k1/src/scalar_impl.h39
-rw-r--r--src/secp256k1/src/scalar_low.h15
-rw-r--r--src/secp256k1/src/scalar_low_impl.h114
-rwxr-xr-x[-rw-r--r--]src/secp256k1/src/secp256k1.c4
-rw-r--r--src/secp256k1/src/tests.c20
-rw-r--r--src/secp256k1/src/tests_exhaustive.c329
-rw-r--r--src/serialize.h36
-rw-r--r--src/test/DoS_tests.cpp3
-rw-r--r--src/test/base58_tests.cpp2
-rw-r--r--src/test/blockencodings_tests.cpp6
-rw-r--r--src/test/bswap_tests.cpp26
-rw-r--r--src/test/coins_tests.cpp364
-rw-r--r--src/test/cuckoocache_tests.cpp394
-rw-r--r--src/test/data/bitcoin-util-test.json47
-rw-r--r--src/test/data/blanktxv1.hex (renamed from src/test/data/blanktx.hex)0
-rw-r--r--src/test/data/blanktxv2.hex1
-rw-r--r--src/test/data/blanktxv2.json11
-rw-r--r--src/test/data/tx_valid.json2
-rw-r--r--src/test/data/txcreate1.hex2
-rw-r--r--src/test/data/txcreate1.json8
-rw-r--r--src/test/data/txcreate2.hex2
-rw-r--r--src/test/data/txcreate2.json8
-rw-r--r--src/test/data/txcreatedata1.hex2
-rw-r--r--src/test/data/txcreatedata2.hex2
-rw-r--r--src/test/data/txcreatedata2.json8
-rw-r--r--src/test/data/txcreatedata_seq0.hex2
-rw-r--r--src/test/data/txcreatedata_seq0.json8
-rw-r--r--src/test/data/txcreatesignv1.hex (renamed from src/test/data/txcreatesign.hex)0
-rw-r--r--src/test/data/txcreatesignv1.json (renamed from src/test/data/txcreatesign.json)0
-rw-r--r--src/test/data/txcreatesignv2.hex1
-rw-r--r--src/test/hash_tests.cpp6
-rw-r--r--src/test/mempool_tests.cpp55
-rw-r--r--src/test/miner_tests.cpp2
-rw-r--r--src/test/script_tests.cpp6
-rw-r--r--src/test/test_bitcoin.cpp4
-rw-r--r--src/test/test_bitcoin_fuzzy.cpp256
-rw-r--r--src/test/txvalidationcache_tests.cpp1
-rw-r--r--src/timedata.cpp5
-rw-r--r--src/torcontrol.cpp8
-rw-r--r--src/txmempool.cpp22
-rw-r--r--src/txmempool.h9
-rw-r--r--src/util.cpp6
-rw-r--r--src/util.h2
-rw-r--r--src/validation.cpp102
-rw-r--r--src/validation.h4
-rw-r--r--src/wallet/rpcwallet.cpp4
-rw-r--r--src/wallet/test/crypto_tests.cpp32
-rw-r--r--src/wallet/test/wallet_tests.cpp74
-rw-r--r--src/wallet/wallet.cpp60
-rw-r--r--src/wallet/wallet.h5
-rw-r--r--src/warnings.cpp89
-rw-r--r--src/warnings.h21
-rw-r--r--src/zmq/zmqpublishnotifier.cpp5
195 files changed, 5329 insertions, 1828 deletions
diff --git a/.gitignore b/.gitignore
index b9db7cbd39..8ace3c7123 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,6 +6,7 @@ src/bitcoind
src/bitcoin-cli
src/bitcoin-tx
src/test/test_bitcoin
+src/test/test_bitcoin_fuzzy
src/qt/test/test_bitcoin-qt
# autoreconf
@@ -55,7 +56,6 @@ src/qt/test/moc*.cpp
*.o
*.o-*
*.patch
-.bitcoin
*.a
*.pb.cc
*.pb.h
@@ -101,7 +101,6 @@ coverage_percent.txt
linux-coverage-build
linux-build
win32-build
-qa/pull-tester/run-bitcoind-for-test.sh
qa/pull-tester/tests_config.py
qa/cache/*
diff --git a/.travis.yml b/.travis.yml
index d201cea732..6570e6571b 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -60,6 +60,7 @@ script:
- mkdir build && cd build
- ../configure $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG || ( cat config.log && false)
- make $MAKEJOBS $GOAL || ( echo "Build failure. Verbose build follows." && make $GOAL V=1 ; false )
+ - make distdir
- export LD_LIBRARY_PATH=$TRAVIS_BUILD_DIR/depends/$HOST/lib
- if [ "$RUN_TESTS" = "true" ]; then make $MAKEJOBS check VERBOSE=1; fi
- if [ "$RUN_TESTS" = "true" ]; then qa/pull-tester/rpc-tests.py --coverage; fi
diff --git a/configure.ac b/configure.ac
index 7357e4ec9f..ced258e02e 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1077,7 +1077,6 @@ AC_SUBST(ZMQ_LIBS)
AC_SUBST(PROTOBUF_LIBS)
AC_SUBST(QR_LIBS)
AC_CONFIG_FILES([Makefile src/Makefile doc/man/Makefile share/setup.nsi share/qt/Info.plist src/test/buildenv.py])
-AC_CONFIG_FILES([qa/pull-tester/run-bitcoind-for-test.sh],[chmod +x qa/pull-tester/run-bitcoind-for-test.sh])
AC_CONFIG_FILES([qa/pull-tester/tests_config.py],[chmod +x qa/pull-tester/tests_config.py])
AC_CONFIG_FILES([contrib/devtools/split-debug.sh],[chmod +x contrib/devtools/split-debug.sh])
AC_CONFIG_LINKS([qa/pull-tester/rpc-tests.py:qa/pull-tester/rpc-tests.py])
diff --git a/doc/README.md b/doc/README.md
index 8b9c0ea262..36684e5401 100644
--- a/doc/README.md
+++ b/doc/README.md
@@ -68,6 +68,7 @@ The Bitcoin repo's [root README](/README.md) contains relevant information on th
### Miscellaneous
- [Assets Attribution](assets-attribution.md)
- [Files](files.md)
+- [Fuzz-testing](fuzzing.md)
- [Reduce Traffic](reduce-traffic.md)
- [Tor Support](tor.md)
- [Init Scripts (systemd/upstart/openrc)](init.md)
diff --git a/doc/fuzzing.md b/doc/fuzzing.md
new file mode 100644
index 0000000000..bf3ad17861
--- /dev/null
+++ b/doc/fuzzing.md
@@ -0,0 +1,66 @@
+Fuzz-testing Bitcoin Core
+==========================
+
+A special test harness `test_bitcoin_fuzzy` is provided to provide an easy
+entry point for fuzzers and the like. In this document we'll describe how to
+use it with AFL.
+
+Building AFL
+-------------
+
+It is recommended to always use the latest version of afl:
+```
+wget http://lcamtuf.coredump.cx/afl/releases/afl-latest.tgz
+tar -zxvf afl-latest.tgz
+cd afl-<version>
+make
+export AFLPATH=$PWD
+```
+
+Instrumentation
+----------------
+
+To build Bitcoin Core using AFL instrumentation (this assumes that the
+`AFLPATH` was set as above):
+```
+./configure --disable-ccache --disable-shared --enable-tests CC=${AFLPATH}/afl-gcc CXX=${AFLPATH}/afl-g++
+export AFL_HARDEN=1
+cd src/
+make test/test_bitcoin_fuzzy
+```
+We disable ccache because we don't want to pollute the ccache with instrumented
+objects, and similarly don't want to use non-instrumented cached objects linked
+in.
+
+Preparing fuzzing
+------------------
+
+AFL needs an input directory with examples, and an output directory where it
+will place examples that it found. These can be anywhere in the file system,
+we'll define environment variables to make it easy to reference them.
+
+```
+mkdir inputs
+AFLIN=$PWD/inputs
+mkdir outputs
+AFLOUT=$PWD/outputs
+```
+
+Example inputs are available from:
+
+- https://download.visucore.com/bitcoin/bitcoin_fuzzy_in.tar.xz
+- http://strateman.ninja/fuzzing.tar.xz
+
+Extract these (or other starting inputs) into the `inputs` directory before starting fuzzing.
+
+Fuzzing
+--------
+
+To start the actual fuzzing use:
+```
+$AFLPATH/afl-fuzz -i ${AFLIN} -o ${AFLOUT} -m52 -- test/test_bitcoin_fuzzy
+```
+
+You may have to change a few kernel parameters to test optimally - `afl-fuzz`
+will print an error and suggestion if so.
+
diff --git a/qa/pull-tester/rpc-tests.py b/qa/pull-tester/rpc-tests.py
index 58bd00fdfc..be31cbbdd3 100755
--- a/qa/pull-tester/rpc-tests.py
+++ b/qa/pull-tester/rpc-tests.py
@@ -100,78 +100,88 @@ if ENABLE_ZMQ:
testScripts = [
# longest test should go first, to favor running tests in parallel
- 'p2p-fullblocktest.py',
+ 'wallet-hd.py',
'walletbackup.py',
- 'bip68-112-113-p2p.py',
+ # vv Tests less than 5m vv
+ 'p2p-fullblocktest.py',
+ 'fundrawtransaction.py',
+ 'p2p-compactblocks.py',
+ 'segwit.py',
+ # vv Tests less than 2m vv
'wallet.py',
'wallet-accounts.py',
- 'wallet-hd.py',
+ 'p2p-segwit.py',
'wallet-dump.py',
'listtransactions.py',
+ # vv Tests less than 60s vv
+ 'sendheaders.py',
+ 'zapwallettxes.py',
+ 'importmulti.py',
+ 'mempool_limit.py',
+ 'merkle_blocks.py',
'receivedby.py',
+ 'abandonconflict.py',
+ 'bip68-112-113-p2p.py',
+ 'rawtransactions.py',
+ 'reindex.py',
+ # vv Tests less than 30s vv
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
- 'p2p-segwit.py',
- 'segwit.py',
'txn_clone.py',
'getchaintips.py',
- 'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
- 'mempool_limit.py',
'httpbasics.py',
'multi_rpc.py',
- 'zapwallettxes.py',
'proxy_test.py',
- 'merkle_blocks.py',
- 'fundrawtransaction.py',
'signrawtransactions.py',
'nodehandling.py',
- 'reindex.py',
'decodescript.py',
'blockchain.py',
'disablewallet.py',
- 'sendheaders.py',
'keypool.py',
'p2p-mempool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
- 'abandonconflict.py',
'p2p-versionbits-warning.py',
'preciousblock.py',
'importprunedfunds.py',
'signmessages.py',
- 'p2p-compactblocks.py',
'nulldummy.py',
- 'importmulti.py',
+ 'import-rescan.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
+ 'pruning.py',
+ # vv Tests less than 20m vv
+ 'smartfees.py',
+ # vv Tests less than 5m vv
+ 'maxuploadtarget.py',
+ 'mempool_packages.py',
+ # vv Tests less than 2m vv
+ 'bip68-sequence.py',
+ 'getblocktemplate_longpoll.py',
+ # vv Tests less than 60s vv
'bip9-softforks.py',
+ 'p2p-feefilter.py',
+ 'rpcbind_test.py',
+ # vv Tests less than 30s vv
'bip65-cltv.py',
'bip65-cltv-p2p.py',
- 'bip68-sequence.py',
'bipdersig-p2p.py',
'bipdersig.py',
- 'getblocktemplate_longpoll.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'forknotify.py',
'invalidateblock.py',
- 'rpcbind_test.py',
- 'smartfees.py',
'maxblocksinflight.py',
'p2p-acceptblock.py',
- 'mempool_packages.py',
- 'maxuploadtarget.py',
'replace-by-fee.py',
- 'p2p-feefilter.py',
- 'pruning.py', # leave pruning last as it takes a REALLY long time
]
diff --git a/qa/pull-tester/run-bitcoind-for-test.sh.in b/qa/pull-tester/run-bitcoind-for-test.sh.in
deleted file mode 100644
index 14ae08e4e5..0000000000
--- a/qa/pull-tester/run-bitcoind-for-test.sh.in
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-# Copyright (c) 2013-2014 The Bitcoin Core developers
-# Distributed under the MIT software license, see the accompanying
-# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#
-DATADIR="@abs_top_builddir@/.bitcoin"
-rm -rf "$DATADIR"
-mkdir -p "$DATADIR"/regtest
-touch "$DATADIR/regtest/debug.log"
-tail -q -n 1 -F "$DATADIR/regtest/debug.log" | grep -m 1 -q "Done loading" &
-WAITER=$!
-PORT=`expr 10000 + $$ % 55536`
-"@abs_top_builddir@/src/bitcoind@EXEEXT@" -connect=0.0.0.0 -datadir="$DATADIR" -rpcuser=user -rpcpassword=pass -listen -keypool=3 -debug -debug=net -logtimestamps -checkmempool=0 -relaypriority=0 -port=$PORT -whitelist=127.0.0.1 -regtest -rpcport=`expr $PORT + 1` &
-BITCOIND=$!
-
-#Install a watchdog.
-(sleep 10 && kill -0 $WAITER 2>/dev/null && kill -9 $BITCOIND $$)&
-wait $WAITER
-
-if [ -n "$TIMEOUT" ]; then
- timeout "$TIMEOUT"s "$@" $PORT
- RETURN=$?
-else
- "$@" $PORT
- RETURN=$?
-fi
-
-(sleep 15 && kill -0 $BITCOIND 2>/dev/null && kill -9 $BITCOIND $$)&
-kill $BITCOIND && wait $BITCOIND
-
-# timeout returns 124 on timeout, otherwise the return value of the child
-
-# If $RETURN is not 0, the test failed. Dump the tail of the debug log.
-if [ $RETURN -ne 0 ]; then tail -n 200 $DATADIR/regtest/debug.log; fi
-
-exit $RETURN
diff --git a/qa/rpc-tests/bip68-112-113-p2p.py b/qa/rpc-tests/bip68-112-113-p2p.py
index 55b3e2a04a..fc3efb1abf 100755
--- a/qa/rpc-tests/bip68-112-113-p2p.py
+++ b/qa/rpc-tests/bip68-112-113-p2p.py
@@ -289,6 +289,7 @@ class BIP68_112_113Test(ComparisonTestFramework):
# BIP113 test transaction will be modified before each use to put in appropriate block time
bip113tx_v1 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98"))
bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE
+ bip113tx_v1.nVersion = 1
bip113tx_v2 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98"))
bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v2.nVersion = 2
diff --git a/qa/rpc-tests/fundrawtransaction.py b/qa/rpc-tests/fundrawtransaction.py
index 8c45578fcf..82e148c55a 100755
--- a/qa/rpc-tests/fundrawtransaction.py
+++ b/qa/rpc-tests/fundrawtransaction.py
@@ -484,6 +484,23 @@ class RawTransactionsTest(BitcoinTestFramework):
self.is_network_split=False
self.sync_all()
+ # drain the keypool
+ self.nodes[1].getnewaddress()
+ inputs = []
+ outputs = {self.nodes[0].getnewaddress():1.1}
+ rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
+ # fund a transaction that requires a new key for the change output
+ # creating the key must be impossible because the wallet is locked
+ try:
+ fundedTx = self.nodes[1].fundrawtransaction(rawTx)
+ raise AssertionError("Wallet unlocked without passphrase")
+ except JSONRPCException as e:
+ assert('Keypool ran out' in e.error['message'])
+
+ #refill the keypool
+ self.nodes[1].walletpassphrase("test", 100)
+ self.nodes[1].walletlock()
+
try:
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.2)
raise AssertionError("Wallet unlocked without passphrase")
@@ -498,7 +515,7 @@ class RawTransactionsTest(BitcoinTestFramework):
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#now we need to unlock
- self.nodes[1].walletpassphrase("test", 100)
+ self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
diff --git a/qa/rpc-tests/import-rescan.py b/qa/rpc-tests/import-rescan.py
new file mode 100755
index 0000000000..e683df26db
--- /dev/null
+++ b/qa/rpc-tests/import-rescan.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python3
+# Copyright (c) 2014-2016 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (start_nodes, connect_nodes, sync_blocks, assert_equal)
+from decimal import Decimal
+
+import collections
+import enum
+import itertools
+import functools
+
+Call = enum.Enum("Call", "single multi")
+Data = enum.Enum("Data", "address pub priv")
+ImportNode = collections.namedtuple("ImportNode", "rescan")
+
+
+def call_import_rpc(call, data, address, scriptPubKey, pubkey, key, label, node, rescan):
+ """Helper that calls a wallet import RPC on a bitcoin node."""
+ watchonly = data != Data.priv
+ if call == Call.single:
+ if data == Data.address:
+ response = node.importaddress(address, label, rescan)
+ elif data == Data.pub:
+ response = node.importpubkey(pubkey, label, rescan)
+ elif data == Data.priv:
+ response = node.importprivkey(key, label, rescan)
+ assert_equal(response, None)
+ elif call == Call.multi:
+ response = node.importmulti([{
+ "scriptPubKey": {
+ "address": address
+ },
+ "pubkeys": [pubkey] if data == Data.pub else [],
+ "keys": [key] if data == Data.priv else [],
+ "label": label,
+ "watchonly": watchonly
+ }], {"rescan": rescan})
+ assert_equal(response, [{"success": True}])
+ return watchonly
+
+
+# List of RPCs that import a wallet key or address in various ways.
+IMPORT_RPCS = [functools.partial(call_import_rpc, call, data) for call, data in itertools.product(Call, Data)]
+
+# List of bitcoind nodes that will import keys.
+IMPORT_NODES = [
+ ImportNode(rescan=True),
+ ImportNode(rescan=False),
+]
+
+
+class ImportRescanTest(BitcoinTestFramework):
+ def __init__(self):
+ super().__init__()
+ self.num_nodes = 1 + len(IMPORT_NODES)
+
+ def setup_network(self):
+ extra_args = [["-debug=1"] for _ in range(self.num_nodes)]
+ self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
+ for i in range(1, self.num_nodes):
+ connect_nodes(self.nodes[i], 0)
+
+ def run_test(self):
+ # Create one transaction on node 0 with a unique amount and label for
+ # each possible type of wallet import RPC.
+ import_rpc_variants = []
+ for i, import_rpc in enumerate(IMPORT_RPCS):
+ label = "label{}".format(i)
+ addr = self.nodes[0].validateaddress(self.nodes[0].getnewaddress(label))
+ key = self.nodes[0].dumpprivkey(addr["address"])
+ amount = 24.9375 - i * .0625
+ txid = self.nodes[0].sendtoaddress(addr["address"], amount)
+ import_rpc = functools.partial(import_rpc, addr["address"], addr["scriptPubKey"], addr["pubkey"], key,
+ label)
+ import_rpc_variants.append((import_rpc, label, amount, txid, addr))
+
+ self.nodes[0].generate(1)
+ assert_equal(self.nodes[0].getrawmempool(), [])
+ sync_blocks(self.nodes)
+
+ # For each importing node and variation of wallet import RPC, invoke
+ # the RPC and check the results from getbalance and listtransactions.
+ for node, import_node in zip(self.nodes[1:], IMPORT_NODES):
+ for import_rpc, label, amount, txid, addr in import_rpc_variants:
+ watchonly = import_rpc(node, import_node.rescan)
+
+ balance = node.getbalance(label, 0, True)
+ if import_node.rescan:
+ assert_equal(balance, amount)
+ else:
+ assert_equal(balance, 0)
+
+ txs = node.listtransactions(label, 10000, 0, True)
+ if import_node.rescan:
+ assert_equal(len(txs), 1)
+ assert_equal(txs[0]["account"], label)
+ assert_equal(txs[0]["address"], addr["address"])
+ assert_equal(txs[0]["amount"], amount)
+ assert_equal(txs[0]["category"], "receive")
+ assert_equal(txs[0]["label"], label)
+ assert_equal(txs[0]["txid"], txid)
+ assert_equal(txs[0]["confirmations"], 1)
+ assert_equal("trusted" not in txs[0], True)
+ if watchonly:
+ assert_equal(txs[0]["involvesWatchonly"], True)
+ else:
+ assert_equal("involvesWatchonly" not in txs[0], True)
+ else:
+ assert_equal(len(txs), 0)
+
+ # Create spends for all the imported addresses.
+ spend_txids = []
+ fee = self.nodes[0].getnetworkinfo()["relayfee"]
+ for import_rpc, label, amount, txid, addr in import_rpc_variants:
+ raw_tx = self.nodes[0].getrawtransaction(txid)
+ decoded_tx = self.nodes[0].decoderawtransaction(raw_tx)
+ input_vout = next(out["n"] for out in decoded_tx["vout"]
+ if out["scriptPubKey"]["addresses"] == [addr["address"]])
+ inputs = [{"txid": txid, "vout": input_vout}]
+ outputs = {self.nodes[0].getnewaddress(): Decimal(amount) - fee}
+ raw_spend_tx = self.nodes[0].createrawtransaction(inputs, outputs)
+ signed_spend_tx = self.nodes[0].signrawtransaction(raw_spend_tx)
+ spend_txid = self.nodes[0].sendrawtransaction(signed_spend_tx["hex"])
+ spend_txids.append(spend_txid)
+
+ self.nodes[0].generate(1)
+ assert_equal(self.nodes[0].getrawmempool(), [])
+ sync_blocks(self.nodes)
+
+ # Check the results from getbalance and listtransactions after the spends.
+ for node, import_node in zip(self.nodes[1:], IMPORT_NODES):
+ txs = node.listtransactions("*", 10000, 0, True)
+ for (import_rpc, label, amount, txid, addr), spend_txid in zip(import_rpc_variants, spend_txids):
+ balance = node.getbalance(label, 0, True)
+ spend_tx = [tx for tx in txs if tx["txid"] == spend_txid]
+ if import_node.rescan:
+ assert_equal(balance, amount)
+ assert_equal(len(spend_tx), 1)
+ assert_equal(spend_tx[0]["account"], "")
+ assert_equal(spend_tx[0]["amount"] + spend_tx[0]["fee"], -amount)
+ assert_equal(spend_tx[0]["category"], "send")
+ assert_equal("label" not in spend_tx[0], True)
+ assert_equal(spend_tx[0]["confirmations"], 1)
+ assert_equal("trusted" not in spend_tx[0], True)
+ assert_equal("involvesWatchonly" not in txs[0], True)
+ else:
+ assert_equal(balance, 0)
+ assert_equal(spend_tx, [])
+
+
+if __name__ == "__main__":
+ ImportRescanTest().main()
diff --git a/qa/rpc-tests/maxuploadtarget.py b/qa/rpc-tests/maxuploadtarget.py
index 83168a7ce7..9340e899eb 100755
--- a/qa/rpc-tests/maxuploadtarget.py
+++ b/qa/rpc-tests/maxuploadtarget.py
@@ -86,6 +86,9 @@ class MaxUploadTest(BitcoinTestFramework):
self.setup_clean_chain = True
self.num_nodes = 1
+ # Cache for utxos, as the listunspent may take a long time later in the test
+ self.utxo_cache = []
+
def setup_network(self):
# Start a node with maxuploadtarget of 200 MB (/24h)
self.nodes = []
@@ -118,7 +121,7 @@ class MaxUploadTest(BitcoinTestFramework):
# Test logic begins here
# Now mine a big block
- mine_large_block(self.nodes[0])
+ mine_large_block(self.nodes[0], self.utxo_cache)
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
@@ -129,7 +132,7 @@ class MaxUploadTest(BitcoinTestFramework):
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
- mine_large_block(self.nodes[0])
+ mine_large_block(self.nodes[0], self.utxo_cache)
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
diff --git a/qa/rpc-tests/mempool_limit.py b/qa/rpc-tests/mempool_limit.py
index 4438c152df..154ae59c26 100755
--- a/qa/rpc-tests/mempool_limit.py
+++ b/qa/rpc-tests/mempool_limit.py
@@ -26,7 +26,7 @@ class MempoolLimitTest(BitcoinTestFramework):
def run_test(self):
txids = []
- utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], 90)
+ utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], 91)
#create a mempool tx that will be evicted
us0 = utxos.pop()
@@ -41,9 +41,9 @@ class MempoolLimitTest(BitcoinTestFramework):
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
base_fee = relayfee*100
- for i in range (4):
+ for i in range (3):
txids.append([])
- txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[30*i:30*i+30], (i+1)*base_fee)
+ txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[30*i:30*i+30], 30, (i+1)*base_fee)
# by now, the tx should be evicted, check confirmation state
assert(txid not in self.nodes[0].getrawmempool())
diff --git a/qa/rpc-tests/mempool_packages.py b/qa/rpc-tests/mempool_packages.py
index 45dc0e65c4..f605e7524f 100755
--- a/qa/rpc-tests/mempool_packages.py
+++ b/qa/rpc-tests/mempool_packages.py
@@ -20,8 +20,8 @@ class MempoolPackagesTest(BitcoinTestFramework):
def setup_network(self):
self.nodes = []
- self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-relaypriority=0", "-debug"]))
- self.nodes.append(start_node(1, self.options.tmpdir, ["-maxorphantx=1000", "-relaypriority=0", "-limitancestorcount=5", "-debug"]))
+ self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-debug"]))
+ self.nodes.append(start_node(1, self.options.tmpdir, ["-maxorphantx=1000", "-limitancestorcount=5", "-debug"]))
connect_nodes(self.nodes[0], 1)
self.is_network_split = False
self.sync_all()
diff --git a/qa/rpc-tests/mempool_reorg.py b/qa/rpc-tests/mempool_reorg.py
index 301b094eb0..dd88aae4f2 100755
--- a/qa/rpc-tests/mempool_reorg.py
+++ b/qa/rpc-tests/mempool_reorg.py
@@ -55,7 +55,7 @@ class MempoolCoinbaseTest(BitcoinTestFramework):
# Create a block-height-locked transaction which will be invalid after reorg
timelock_tx = self.nodes[0].createrawtransaction([{"txid": coinbase_txids[0], "vout": 0}], {node0_address: 49.99})
# Set the time lock
- timelock_tx = timelock_tx.replace("ffffffff", "11111111", 1)
+ timelock_tx = timelock_tx.replace("ffffffff", "11111191", 1)
timelock_tx = timelock_tx[:-8] + hex(self.nodes[0].getblockcount() + 2)[2:] + "000000"
timelock_tx = self.nodes[0].signrawtransaction(timelock_tx)["hex"]
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx)
diff --git a/qa/rpc-tests/p2p-compactblocks.py b/qa/rpc-tests/p2p-compactblocks.py
index ab4b809ded..fc1f16c6d2 100755
--- a/qa/rpc-tests/p2p-compactblocks.py
+++ b/qa/rpc-tests/p2p-compactblocks.py
@@ -764,6 +764,54 @@ class CompactBlocksTest(BitcoinTestFramework):
msg.announce = True
peer.send_and_ping(msg)
+ def test_compactblock_reconstruction_multiple_peers(self, node, stalling_peer, delivery_peer):
+ assert(len(self.utxos))
+
+ def announce_cmpct_block(node, peer):
+ utxo = self.utxos.pop(0)
+ block = self.build_block_with_transactions(node, utxo, 5)
+
+ cmpct_block = HeaderAndShortIDs()
+ cmpct_block.initialize_from_block(block)
+ msg = msg_cmpctblock(cmpct_block.to_p2p())
+ peer.send_and_ping(msg)
+ with mininode_lock:
+ assert(peer.last_getblocktxn is not None)
+ return block, cmpct_block
+
+ block, cmpct_block = announce_cmpct_block(node, stalling_peer)
+
+ for tx in block.vtx[1:]:
+ delivery_peer.send_message(msg_tx(tx))
+ delivery_peer.sync_with_ping()
+ mempool = node.getrawmempool()
+ for tx in block.vtx[1:]:
+ assert(tx.hash in mempool)
+
+ delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
+ assert_equal(int(node.getbestblockhash(), 16), block.sha256)
+
+ self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
+
+ # Now test that delivering an invalid compact block won't break relay
+
+ block, cmpct_block = announce_cmpct_block(node, stalling_peer)
+ for tx in block.vtx[1:]:
+ delivery_peer.send_message(msg_tx(tx))
+ delivery_peer.sync_with_ping()
+
+ cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [ CTxInWitness() ]
+ cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
+
+ cmpct_block.use_witness = True
+ delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
+ assert(int(node.getbestblockhash(), 16) != block.sha256)
+
+ msg = msg_blocktxn()
+ msg.block_transactions.blockhash = block.sha256
+ msg.block_transactions.transactions = block.vtx[1:]
+ stalling_peer.send_and_ping(msg)
+ assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def run_test(self):
# Setup the p2p connections and start up the network thread.
@@ -848,6 +896,10 @@ class CompactBlocksTest(BitcoinTestFramework):
self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, False)
+ print("\tTesting reconstructing compact blocks from all peers...")
+ self.test_compactblock_reconstruction_multiple_peers(self.nodes[1], self.segwit_node, self.old_node)
+ sync_blocks(self.nodes)
+
# Advance to segwit activation
print ("\nAdvancing to segwit activation\n")
self.activate_segwit(self.nodes[1])
diff --git a/qa/rpc-tests/prioritise_transaction.py b/qa/rpc-tests/prioritise_transaction.py
index 85afeab2e3..b7459c80cb 100755
--- a/qa/rpc-tests/prioritise_transaction.py
+++ b/qa/rpc-tests/prioritise_transaction.py
@@ -39,7 +39,7 @@ class PrioritiseTransactionTest(BitcoinTestFramework):
txids.append([])
start_range = i * range_size
end_range = start_range + range_size
- txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], (i+1)*base_fee)
+ txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], end_range - start_range, (i+1)*base_fee)
# Make sure that the size of each group of transactions exceeds
# MAX_BLOCK_BASE_SIZE -- otherwise the test needs to be revised to create
diff --git a/qa/rpc-tests/pruning.py b/qa/rpc-tests/pruning.py
index 6635b0dff2..78b8938e4a 100755
--- a/qa/rpc-tests/pruning.py
+++ b/qa/rpc-tests/pruning.py
@@ -13,6 +13,9 @@
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
+import time
+import os
+
def calc_usage(blockdir):
return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.)
@@ -24,6 +27,10 @@ class PruneTest(BitcoinTestFramework):
self.setup_clean_chain = True
self.num_nodes = 3
+ # Cache for utxos, as the listunspent may take a long time later in the test
+ self.utxo_cache_0 = []
+ self.utxo_cache_1 = []
+
def setup_network(self):
self.nodes = []
self.is_network_split = False
@@ -48,7 +55,7 @@ class PruneTest(BitcoinTestFramework):
self.nodes[0].generate(150)
# Then mine enough full blocks to create more than 550MiB of data
for i in range(645):
- mine_large_block(self.nodes[0])
+ mine_large_block(self.nodes[0], self.utxo_cache_0)
sync_blocks(self.nodes[0:3])
@@ -60,7 +67,7 @@ class PruneTest(BitcoinTestFramework):
print("Mining 25 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
for i in range(25):
- mine_large_block(self.nodes[0])
+ mine_large_block(self.nodes[0], self.utxo_cache_0)
waitstart = time.time()
while os.path.isfile(self.prunedir+"blk00000.dat"):
@@ -87,13 +94,13 @@ class PruneTest(BitcoinTestFramework):
# Mine 24 blocks in node 1
for i in range(24):
if j == 0:
- mine_large_block(self.nodes[1])
+ mine_large_block(self.nodes[1], self.utxo_cache_1)
else:
self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
# Reorg back with 25 block chain from node 0
for i in range(25):
- mine_large_block(self.nodes[0])
+ mine_large_block(self.nodes[0], self.utxo_cache_0)
# Create connections in the order so both nodes can see the reorg at the same time
connect_nodes(self.nodes[1], 0)
diff --git a/qa/rpc-tests/replace-by-fee.py b/qa/rpc-tests/replace-by-fee.py
index 34c0f9d795..8aba06c60c 100755
--- a/qa/rpc-tests/replace-by-fee.py
+++ b/qa/rpc-tests/replace-by-fee.py
@@ -76,7 +76,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-debug",
- "-relaypriority=0", "-whitelist=127.0.0.1",
+ "-whitelist=127.0.0.1",
"-limitancestorcount=50",
"-limitancestorsize=101",
"-limitdescendantcount=200",
diff --git a/qa/rpc-tests/segwit.py b/qa/rpc-tests/segwit.py
index 41a1b3b20f..be6fae5088 100755
--- a/qa/rpc-tests/segwit.py
+++ b/qa/rpc-tests/segwit.py
@@ -13,6 +13,7 @@ from test_framework.mininode import sha256, ripemd160, CTransaction, CTxIn, COut
from test_framework.address import script_to_p2sh, key_to_p2pkh
from test_framework.script import CScript, OP_HASH160, OP_CHECKSIG, OP_0, hash160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_1, OP_2, OP_CHECKMULTISIG
from io import BytesIO
+from test_framework.mininode import FromHex
NODE_0 = 0
NODE_1 = 1
@@ -84,8 +85,8 @@ class SegWitTest(BitcoinTestFramework):
def setup_network(self):
self.nodes = []
- self.nodes.append(start_node(0, self.options.tmpdir, ["-logtimemicros", "-debug", "-walletprematurewitness"]))
- self.nodes.append(start_node(1, self.options.tmpdir, ["-logtimemicros", "-debug", "-blockversion=4", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness"]))
+ self.nodes.append(start_node(0, self.options.tmpdir, ["-logtimemicros", "-debug", "-walletprematurewitness", "-rpcserialversion=0"]))
+ self.nodes.append(start_node(1, self.options.tmpdir, ["-logtimemicros", "-debug", "-blockversion=4", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-rpcserialversion=1"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-logtimemicros", "-debug", "-blockversion=536870915", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness"]))
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 1)
@@ -211,7 +212,19 @@ class SegWitTest(BitcoinTestFramework):
block = self.nodes[2].generate(1) #block 432 (first block with new rules; 432 = 144 * 3)
sync_blocks(self.nodes)
assert_equal(len(self.nodes[2].getrawmempool()), 0)
- assert_equal(len(self.nodes[2].getblock(block[0])["tx"]), 5)
+ segwit_tx_list = self.nodes[2].getblock(block[0])["tx"]
+ assert_equal(len(segwit_tx_list), 5)
+
+ print("Verify block and transaction serialization rpcs return differing serializations depending on rpc serialization flag")
+ assert(self.nodes[2].getblock(block[0], False) != self.nodes[0].getblock(block[0], False))
+ assert(self.nodes[1].getblock(block[0], False) == self.nodes[2].getblock(block[0], False))
+ for i in range(len(segwit_tx_list)):
+ tx = FromHex(CTransaction(), self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
+ assert(self.nodes[2].getrawtransaction(segwit_tx_list[i]) != self.nodes[0].getrawtransaction(segwit_tx_list[i]))
+ assert(self.nodes[1].getrawtransaction(segwit_tx_list[i], 0) == self.nodes[2].getrawtransaction(segwit_tx_list[i]))
+ assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) != self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
+ assert(self.nodes[1].getrawtransaction(segwit_tx_list[i]) == self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
+ assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) == bytes_to_hex_str(tx.serialize_without_witness()))
print("Verify witness txs without witness data are invalid after the fork")
self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][2], False)
diff --git a/qa/rpc-tests/smartfees.py b/qa/rpc-tests/smartfees.py
index 74a74f679a..2c56f954a2 100755
--- a/qa/rpc-tests/smartfees.py
+++ b/qa/rpc-tests/smartfees.py
@@ -159,7 +159,7 @@ class EstimateFeeTest(BitcoinTestFramework):
self.nodes = []
# Use node0 to mine blocks for input splitting
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000",
- "-relaypriority=0", "-whitelist=127.0.0.1"]))
+ "-whitelist=127.0.0.1"]))
print("This test is time consuming, please be patient")
print("Splitting inputs to small size so we can generate low priority tx's")
@@ -197,12 +197,12 @@ class EstimateFeeTest(BitcoinTestFramework):
# (17k is room enough for 110 or so transactions)
self.nodes.append(start_node(1, self.options.tmpdir,
["-blockprioritysize=1500", "-blockmaxsize=17000",
- "-maxorphantx=1000", "-relaypriority=0", "-debug=estimatefee"]))
+ "-maxorphantx=1000", "-debug=estimatefee"]))
connect_nodes(self.nodes[1], 0)
# Node2 is a stingy miner, that
# produces too small blocks (room for only 55 or so transactions)
- node2args = ["-blockprioritysize=0", "-blockmaxsize=8000", "-maxorphantx=1000", "-relaypriority=0"]
+ node2args = ["-blockprioritysize=0", "-blockmaxsize=8000", "-maxorphantx=1000"]
self.nodes.append(start_node(2, self.options.tmpdir, node2args))
connect_nodes(self.nodes[0], 2)
diff --git a/qa/rpc-tests/test_framework/util.py b/qa/rpc-tests/test_framework/util.py
index 1d6c9aa230..85898d9f32 100644
--- a/qa/rpc-tests/test_framework/util.py
+++ b/qa/rpc-tests/test_framework/util.py
@@ -654,10 +654,10 @@ def create_tx(node, coinbase, to_address, amount):
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
-def create_lots_of_big_transactions(node, txouts, utxos, fee):
+def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
- for _ in range(len(utxos)):
+ for _ in range(num):
t = utxos.pop()
inputs=[{ "txid" : t["txid"], "vout" : t["vout"]}]
outputs = {}
@@ -672,13 +672,17 @@ def create_lots_of_big_transactions(node, txouts, utxos, fee):
txids.append(txid)
return txids
-def mine_large_block(node):
+def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
+ num = 14
txouts = gen_return_txouts()
- utxos = node.listunspent()[:14]
+ utxos = utxos if utxos is not None else []
+ if len(utxos) < num:
+ utxos.clear()
+ utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
- create_lots_of_big_transactions(node, txouts, utxos, fee=fee)
+ create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def get_bip9_status(node, key):
diff --git a/qa/rpc-tests/wallet.py b/qa/rpc-tests/wallet.py
index 3c0dc0f4ea..992fb8a2d6 100755
--- a/qa/rpc-tests/wallet.py
+++ b/qa/rpc-tests/wallet.py
@@ -330,10 +330,12 @@ class WalletTest (BitcoinTestFramework):
# disabled until issue is fixed: https://github.com/bitcoin/bitcoin/issues/7463
# '-salvagewallet',
]
+ chainlimit = 6
for m in maintenance:
print("check " + m)
stop_nodes(self.nodes)
- self.nodes = start_nodes(3, self.options.tmpdir, [[m]] * 3)
+ # set lower ancestor limit for later
+ self.nodes = start_nodes(3, self.options.tmpdir, [[m, "-limitancestorcount="+str(chainlimit)]] * 3)
while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]:
# reindex will leave rpc warm up "early"; Wait for it to finish
time.sleep(0.1)
@@ -346,5 +348,26 @@ class WalletTest (BitcoinTestFramework):
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
+ # ==Check that wallet prefers to use coins that don't exceed mempool limits =====
+
+ # Get all non-zero utxos together
+ chain_addrs = [self.nodes[0].getnewaddress(), self.nodes[0].getnewaddress()]
+ singletxid = self.nodes[0].sendtoaddress(chain_addrs[0], self.nodes[0].getbalance(), "", "", True)
+ self.nodes[0].generate(1)
+ node0_balance = self.nodes[0].getbalance()
+ # Split into two chains
+ rawtx = self.nodes[0].createrawtransaction([{"txid":singletxid, "vout":0}], {chain_addrs[0]:node0_balance/2-Decimal('0.01'), chain_addrs[1]:node0_balance/2-Decimal('0.01')})
+ signedtx = self.nodes[0].signrawtransaction(rawtx)
+ singletxid = self.nodes[0].sendrawtransaction(signedtx["hex"])
+ txids = [singletxid, singletxid]
+ self.nodes[0].generate(1)
+
+ # Make a long chain of unconfirmed payments without hitting mempool limit
+ txid_list = []
+ for i in range(chainlimit*2):
+ txid_list.append(self.nodes[0].sendtoaddress(chain_addrs[0], Decimal('0.0001')))
+ assert_equal(self.nodes[0].getmempoolinfo()['size'], chainlimit*2)
+ assert_equal(len(txid_list), chainlimit*2)
+
if __name__ == '__main__':
WalletTest().main()
diff --git a/src/Makefile.am b/src/Makefile.am
index 8c12aee217..89b90e6dff 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -61,6 +61,7 @@ EXTRA_LIBRARIES += \
lib_LTLIBRARIES = $(LIBBITCOINCONSENSUS)
bin_PROGRAMS =
+noinst_PROGRAMS =
TESTS =
BENCHMARKS =
@@ -96,6 +97,7 @@ BITCOIN_CORE_H = \
consensus/consensus.h \
core_io.h \
core_memusage.h \
+ cuckoocache.h \
httprpc.h \
httpserver.h \
indirectmap.h \
@@ -154,6 +156,7 @@ BITCOIN_CORE_H = \
wallet/rpcwallet.h \
wallet/wallet.h \
wallet/walletdb.h \
+ warnings.h \
zmq/zmqabstractnotifier.h \
zmq/zmqconfig.h\
zmq/zmqnotificationinterface.h \
@@ -305,6 +308,7 @@ libbitcoin_common_a_SOURCES = \
scheduler.cpp \
script/sign.cpp \
script/standard.cpp \
+ warnings.cpp \
$(BITCOIN_CORE_H)
# util: shared between all executables.
diff --git a/src/Makefile.qttest.include b/src/Makefile.qttest.include
index cb310d5a1b..039f8ac547 100644
--- a/src/Makefile.qttest.include
+++ b/src/Makefile.qttest.include
@@ -6,6 +6,7 @@ bin_PROGRAMS += qt/test/test_bitcoin-qt
TESTS += qt/test/test_bitcoin-qt
TEST_QT_MOC_CPP = \
+ qt/test/moc_compattests.cpp \
qt/test/moc_rpcnestedtests.cpp \
qt/test/moc_uritests.cpp
@@ -14,6 +15,7 @@ TEST_QT_MOC_CPP += qt/test/moc_paymentservertests.cpp
endif
TEST_QT_H = \
+ qt/test/compattests.h \
qt/test/rpcnestedtests.h \
qt/test/uritests.h \
qt/test/paymentrequestdata.h \
@@ -23,6 +25,7 @@ qt_test_test_bitcoin_qt_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) $(BITCOIN_
$(QT_INCLUDES) $(QT_TEST_INCLUDES) $(PROTOBUF_CFLAGS)
qt_test_test_bitcoin_qt_SOURCES = \
+ qt/test/compattests.cpp \
qt/test/rpcnestedtests.cpp \
qt/test/test_main.cpp \
qt/test/uritests.cpp \
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index a14adc7876..7a5bb07b6c 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -4,6 +4,7 @@
TESTS += test/test_bitcoin
bin_PROGRAMS += test/test_bitcoin
+noinst_PROGRAMS += test/test_bitcoin_fuzzy
TEST_SRCDIR = test
TEST_BINARY=test/test_bitcoin$(EXEEXT)
@@ -12,7 +13,6 @@ EXTRA_DIST += \
test/bctest.py \
test/bitcoin-util-test.py \
test/data/bitcoin-util-test.json \
- test/data/blanktx.hex \
test/data/tt-delin1-out.hex \
test/data/tt-delout1-out.hex \
test/data/tt-locktime317000-out.hex \
@@ -21,7 +21,7 @@ EXTRA_DIST += \
test/data/txcreate2.hex \
test/data/txcreatedata1.hex \
test/data/txcreatedata2.hex \
- test/data/txcreatesign.hex \
+ test/data/txcreatesignv1.hex \
test/data/txcreatedata_seq0.hex \
test/data/txcreatedata_seq1.hex
@@ -38,6 +38,7 @@ RAW_TEST_FILES =
GENERATED_TEST_FILES = $(JSON_TEST_FILES:.json=.json.h) $(RAW_TEST_FILES:.raw=.raw.h)
+# test_bitcoin binary #
BITCOIN_TESTS =\
test/arith_uint256_tests.cpp \
test/scriptnum10.h \
@@ -50,9 +51,11 @@ BITCOIN_TESTS =\
test/bip32_tests.cpp \
test/blockencodings_tests.cpp \
test/bloom_tests.cpp \
+ test/bswap_tests.cpp \
test/coins_tests.cpp \
test/compress_tests.cpp \
test/crypto_tests.cpp \
+ test/cuckoocache_tests.cpp \
test/DoS_tests.cpp \
test/getarg_tests.cpp \
test/hash_tests.cpp \
@@ -119,6 +122,25 @@ test_test_bitcoin_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) -s
if ENABLE_ZMQ
test_test_bitcoin_LDADD += $(ZMQ_LIBS)
endif
+#
+
+# test_bitcoin_fuzzy binary #
+test_test_bitcoin_fuzzy_SOURCES = test/test_bitcoin_fuzzy.cpp
+test_test_bitcoin_fuzzy_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_test_bitcoin_fuzzy_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_test_bitcoin_fuzzy_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+
+test_test_bitcoin_fuzzy_LDADD = \
+ $(LIBUNIVALUE) \
+ $(LIBBITCOIN_SERVER) \
+ $(LIBBITCOIN_COMMON) \
+ $(LIBBITCOIN_UTIL) \
+ $(LIBBITCOIN_CONSENSUS) \
+ $(LIBBITCOIN_CRYPTO) \
+ $(LIBSECP256K1)
+
+test_test_bitcoin_fuzzy_LDADD += $(BOOST_LIBS) $(CRYPTO_LIBS)
+#
nodist_test_test_bitcoin_SOURCES = $(GENERATED_TEST_FILES)
diff --git a/src/bench/coin_selection.cpp b/src/bench/coin_selection.cpp
index 32690fe484..cacead4a59 100644
--- a/src/bench/coin_selection.cpp
+++ b/src/bench/coin_selection.cpp
@@ -52,7 +52,7 @@ static void CoinSelection(benchmark::State& state)
set<pair<const CWalletTx*, unsigned int> > setCoinsRet;
CAmount nValueRet;
- bool success = wallet.SelectCoinsMinConf(1003 * COIN, 1, 6, vCoins, setCoinsRet, nValueRet);
+ bool success = wallet.SelectCoinsMinConf(1003 * COIN, 1, 6, 0, vCoins, setCoinsRet, nValueRet);
assert(success);
assert(nValueRet == 1003 * COIN);
assert(setCoinsRet.size() == 2);
diff --git a/src/bench/crypto_hash.cpp b/src/bench/crypto_hash.cpp
index 168006154f..737d3572ae 100644
--- a/src/bench/crypto_hash.cpp
+++ b/src/bench/crypto_hash.cpp
@@ -22,7 +22,7 @@ static void RIPEMD160(benchmark::State& state)
uint8_t hash[CRIPEMD160::OUTPUT_SIZE];
std::vector<uint8_t> in(BUFFER_SIZE,0);
while (state.KeepRunning())
- CRIPEMD160().Write(begin_ptr(in), in.size()).Finalize(hash);
+ CRIPEMD160().Write(in.data(), in.size()).Finalize(hash);
}
static void SHA1(benchmark::State& state)
@@ -30,7 +30,7 @@ static void SHA1(benchmark::State& state)
uint8_t hash[CSHA1::OUTPUT_SIZE];
std::vector<uint8_t> in(BUFFER_SIZE,0);
while (state.KeepRunning())
- CSHA1().Write(begin_ptr(in), in.size()).Finalize(hash);
+ CSHA1().Write(in.data(), in.size()).Finalize(hash);
}
static void SHA256(benchmark::State& state)
@@ -38,7 +38,7 @@ static void SHA256(benchmark::State& state)
uint8_t hash[CSHA256::OUTPUT_SIZE];
std::vector<uint8_t> in(BUFFER_SIZE,0);
while (state.KeepRunning())
- CSHA256().Write(begin_ptr(in), in.size()).Finalize(hash);
+ CSHA256().Write(in.data(), in.size()).Finalize(hash);
}
static void SHA256_32b(benchmark::State& state)
@@ -46,7 +46,7 @@ static void SHA256_32b(benchmark::State& state)
std::vector<uint8_t> in(32,0);
while (state.KeepRunning()) {
for (int i = 0; i < 1000000; i++) {
- CSHA256().Write(begin_ptr(in), in.size()).Finalize(&in[0]);
+ CSHA256().Write(in.data(), in.size()).Finalize(&in[0]);
}
}
}
@@ -56,7 +56,7 @@ static void SHA512(benchmark::State& state)
uint8_t hash[CSHA512::OUTPUT_SIZE];
std::vector<uint8_t> in(BUFFER_SIZE,0);
while (state.KeepRunning())
- CSHA512().Write(begin_ptr(in), in.size()).Finalize(hash);
+ CSHA512().Write(in.data(), in.size()).Finalize(hash);
}
static void SipHash_32b(benchmark::State& state)
diff --git a/src/bench/verify_script.cpp b/src/bench/verify_script.cpp
index 4aca1c7eb3..bf04f90e0e 100644
--- a/src/bench/verify_script.cpp
+++ b/src/bench/verify_script.cpp
@@ -90,7 +90,7 @@ static void VerifyScriptBench(benchmark::State& state)
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
stream << txSpend;
int csuccess = bitcoinconsensus_verify_script_with_amount(
- begin_ptr(txCredit.vout[0].scriptPubKey),
+ txCredit.vout[0].scriptPubKey.data(),
txCredit.vout[0].scriptPubKey.size(),
txCredit.vout[0].nValue,
(const unsigned char*)&stream[0], stream.size(), 0, flags, nullptr);
diff --git a/src/coins.h b/src/coins.h
index d295b3c940..dd6ef6cc3a 100644
--- a/src/coins.h
+++ b/src/coins.h
@@ -467,7 +467,6 @@ public:
friend class CCoinsModifier;
private:
- CCoinsMap::iterator FetchCoins(const uint256 &txid);
CCoinsMap::const_iterator FetchCoins(const uint256 &txid) const;
/**
diff --git a/src/compat/byteswap.h b/src/compat/byteswap.h
index 07ca535728..56f4240e49 100644
--- a/src/compat/byteswap.h
+++ b/src/compat/byteswap.h
@@ -15,6 +15,23 @@
#include <byteswap.h>
#endif
+#if defined(__APPLE__)
+
+#if !defined(bswap_16)
+
+// Mac OS X / Darwin features; we include a check for bswap_16 because if it is already defined, protobuf has
+// defined these macros for us already; if it isn't, we do it ourselves. In either case, we get the exact same
+// result regardless which path was taken
+#include <libkern/OSByteOrder.h>
+#define bswap_16(x) OSSwapInt16(x)
+#define bswap_32(x) OSSwapInt32(x)
+#define bswap_64(x) OSSwapInt64(x)
+
+#endif // !defined(bswap_16)
+
+#else
+// Non-Mac OS X / non-Darwin
+
#if HAVE_DECL_BSWAP_16 == 0
inline uint16_t bswap_16(uint16_t x)
{
@@ -44,4 +61,6 @@ inline uint64_t bswap_64(uint64_t x)
}
#endif // HAVE_DECL_BSWAP64
+#endif // defined(__APPLE__)
+
#endif // BITCOIN_COMPAT_BYTESWAP_H
diff --git a/src/core_io.h b/src/core_io.h
index 4344290bb8..7642bc6d6e 100644
--- a/src/core_io.h
+++ b/src/core_io.h
@@ -11,7 +11,7 @@
class CBlock;
class CScript;
class CTransaction;
-class CMutableTransaction;
+struct CMutableTransaction;
class uint256;
class UniValue;
@@ -26,7 +26,7 @@ std::vector<unsigned char> ParseHexUV(const UniValue& v, const std::string& strN
// core_write.cpp
std::string FormatScript(const CScript& script);
-std::string EncodeHexTx(const CTransaction& tx);
+std::string EncodeHexTx(const CTransaction& tx, const int serializeFlags = 0);
void ScriptPubKeyToUniv(const CScript& scriptPubKey, UniValue& out, bool fIncludeHex);
void TxToUniv(const CTransaction& tx, const uint256& hashBlock, UniValue& entry);
diff --git a/src/core_write.cpp b/src/core_write.cpp
index 1125718597..9eed2270d5 100644
--- a/src/core_write.cpp
+++ b/src/core_write.cpp
@@ -116,9 +116,9 @@ string ScriptToAsmStr(const CScript& script, const bool fAttemptSighashDecode)
return str;
}
-string EncodeHexTx(const CTransaction& tx)
+string EncodeHexTx(const CTransaction& tx, const int serialFlags)
{
- CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
+ CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION | serialFlags);
ssTx << tx;
return HexStr(ssTx.begin(), ssTx.end());
}
diff --git a/src/crypto/ctaes/ctaes.c b/src/crypto/ctaes/ctaes.c
index 2389fc0bb2..55962bf252 100644
--- a/src/crypto/ctaes/ctaes.c
+++ b/src/crypto/ctaes/ctaes.c
@@ -134,7 +134,7 @@ static void SubBytes(AES_state *s, int inv) {
D = U7;
}
- /* Non-linear transformation (identical to the code in SubBytes) */
+ /* Non-linear transformation (shared between the forward and backward case) */
M1 = T13 & T6;
M6 = T3 & T16;
M11 = T1 & T15;
@@ -469,9 +469,9 @@ static void AES_encrypt(const AES_state* rounds, int nrounds, unsigned char* cip
static void AES_decrypt(const AES_state* rounds, int nrounds, unsigned char* plain16, const unsigned char* cipher16) {
/* Most AES decryption implementations use the alternate scheme
- * (the Equivalent Inverse Cipher), which looks more like encryption, but
- * needs different round constants. We can't reuse any code here anyway, so
- * don't bother. */
+ * (the Equivalent Inverse Cipher), which allows for more code reuse between
+ * the encryption and decryption code, but requires separate setup for both.
+ */
AES_state s = {{0}};
int round;
diff --git a/src/cuckoocache.h b/src/cuckoocache.h
new file mode 100644
index 0000000000..efd6a820b5
--- /dev/null
+++ b/src/cuckoocache.h
@@ -0,0 +1,457 @@
+// Copyright (c) 2016 Jeremy Rubin
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef _BITCOIN_CUCKOOCACHE_H_
+#define _BITCOIN_CUCKOOCACHE_H_
+
+#include <array>
+#include <algorithm>
+#include <atomic>
+#include <cstring>
+#include <cmath>
+#include <memory>
+#include <vector>
+
+
+/** namespace CuckooCache provides high performance cache primitives
+ *
+ * Summary:
+ *
+ * 1) bit_packed_atomic_flags is bit-packed atomic flags for garbage collection
+ *
+ * 2) cache is a cache which is performant in memory usage and lookup speed. It
+ * is lockfree for erase operations. Elements are lazily erased on the next
+ * insert.
+ */
+namespace CuckooCache
+{
+/** bit_packed_atomic_flags implements a container for garbage collection flags
+ * that is only thread unsafe on calls to setup. This class bit-packs collection
+ * flags for memory efficiency.
+ *
+ * All operations are std::memory_order_relaxed so external mechanisms must
+ * ensure that writes and reads are properly synchronized.
+ *
+ * On setup(n), all bits up to n are marked as collected.
+ *
+ * Under the hood, because it is an 8-bit type, it makes sense to use a multiple
+ * of 8 for setup, but it will be safe if that is not the case as well.
+ *
+ */
+class bit_packed_atomic_flags
+{
+ std::unique_ptr<std::atomic<uint8_t>[]> mem;
+
+public:
+ /** No default constructor as there must be some size */
+ bit_packed_atomic_flags() = delete;
+
+ /**
+ * bit_packed_atomic_flags constructor creates memory to sufficiently
+ * keep track of garbage collection information for size entries.
+ *
+ * @param size the number of elements to allocate space for
+ *
+ * @post bit_set, bit_unset, and bit_is_set function properly forall x. x <
+ * size
+ * @post All calls to bit_is_set (without subsequent bit_unset) will return
+ * true.
+ */
+ bit_packed_atomic_flags(uint32_t size)
+ {
+ // pad out the size if needed
+ size = (size + 7) / 8;
+ mem.reset(new std::atomic<uint8_t>[size]);
+ for (uint32_t i = 0; i < size; ++i)
+ mem[i].store(0xFF);
+ };
+
+ /** setup marks all entries and ensures that bit_packed_atomic_flags can store
+ * at least size entries
+ *
+ * @param b the number of elements to allocate space for
+ * @post bit_set, bit_unset, and bit_is_set function properly forall x. x <
+ * b
+ * @post All calls to bit_is_set (without subsequent bit_unset) will return
+ * true.
+ */
+ inline void setup(uint32_t b)
+ {
+ bit_packed_atomic_flags d(b);
+ std::swap(mem, d.mem);
+ }
+
+ /** bit_set sets an entry as discardable.
+ *
+ * @param s the index of the entry to bit_set.
+ * @post immediately subsequent call (assuming proper external memory
+ * ordering) to bit_is_set(s) == true.
+ *
+ */
+ inline void bit_set(uint32_t s)
+ {
+ mem[s >> 3].fetch_or(1 << (s & 7), std::memory_order_relaxed);
+ }
+
+ /** bit_unset marks an entry as something that should not be overwritten
+ *
+ * @param s the index of the entry to bit_unset.
+ * @post immediately subsequent call (assuming proper external memory
+ * ordering) to bit_is_set(s) == false.
+ */
+ inline void bit_unset(uint32_t s)
+ {
+ mem[s >> 3].fetch_and(~(1 << (s & 7)), std::memory_order_relaxed);
+ }
+
+ /** bit_is_set queries the table for discardability at s
+ *
+ * @param s the index of the entry to read.
+ * @returns if the bit at index s was set.
+ * */
+ inline bool bit_is_set(uint32_t s) const
+ {
+ return (1 << (s & 7)) & mem[s >> 3].load(std::memory_order_relaxed);
+ }
+};
+
+/** cache implements a cache with properties similar to a cuckoo-set
+ *
+ * The cache is able to hold up to (~(uint32_t)0) - 1 elements.
+ *
+ * Read Operations:
+ * - contains(*, false)
+ *
+ * Read+Erase Operations:
+ * - contains(*, true)
+ *
+ * Erase Operations:
+ * - allow_erase()
+ *
+ * Write Operations:
+ * - setup()
+ * - setup_bytes()
+ * - insert()
+ * - please_keep()
+ *
+ * Synchronization Free Operations:
+ * - invalid()
+ * - compute_hashes()
+ *
+ * User Must Guarantee:
+ *
+ * 1) Write Requires synchronized access (e.g., a lock)
+ * 2) Read Requires no concurrent Write, synchronized with the last insert.
+ * 3) Erase requires no concurrent Write, synchronized with last insert.
+ * 4) An Erase caller must release all memory before allowing a new Writer.
+ *
+ *
+ * Note on function names:
+ * - The name "allow_erase" is used because the real discard happens later.
+ * - The name "please_keep" is used because elements may be erased anyways on insert.
+ *
+ * @tparam Element should be a movable and copyable type
+ * @tparam Hash should be a function/callable which takes a template parameter
+ * hash_select and an Element and extracts a hash from it. Should return
+ * high-entropy hashes for `Hash h; h<0>(e) ... h<7>(e)`.
+ */
+template <typename Element, typename Hash>
+class cache
+{
+private:
+ /** table stores all the elements */
+ std::vector<Element> table;
+
+ /** size stores the total available slots in the hash table */
+ uint32_t size;
+
+ /** The bit_packed_atomic_flags array is marked mutable because we want
+ * garbage collection to be allowed to occur from const methods */
+ mutable bit_packed_atomic_flags collection_flags;
+
+ /** epoch_flags tracks how recently an element was inserted into
+ * the cache. true denotes recent, false denotes not-recent. See insert()
+ * method for full semantics.
+ */
+ mutable std::vector<bool> epoch_flags;
+
+ /** epoch_heuristic_counter is used to determine when a epoch might be aged
+ * & an expensive scan should be done. epoch_heuristic_counter is
+ * decremented on insert and reset to the new number of inserts which would
+ * cause the epoch to reach epoch_size when it reaches zero.
+ */
+ uint32_t epoch_heuristic_counter;
+
+ /** epoch_size is set to be the number of elements supposed to be in a
+ * epoch. When the number of non-erased elements in a epoch
+ * exceeds epoch_size, a new epoch should be started and all
+ * current entries demoted. epoch_size is set to be 45% of size because
+ * we want to keep load around 90%, and we support 3 epochs at once --
+ * one "dead" which has been erased, one "dying" which has been marked to be
+ * erased next, and one "living" which new inserts add to.
+ */
+ uint32_t epoch_size;
+
+ /** hash_mask should be set to appropriately mask out a hash such that every
+ * masked hash is [0,size), eg, if floor(log2(size)) == 20, then hash_mask
+ * should be (1<<20)-1
+ */
+ uint32_t hash_mask;
+
+ /** depth_limit determines how many elements insert should try to replace.
+ * Should be set to log2(n)*/
+ uint8_t depth_limit;
+
+ /** hash_function is a const instance of the hash function. It cannot be
+ * static or initialized at call time as it may have internal state (such as
+ * a nonce).
+ * */
+ const Hash hash_function;
+
+ /** compute_hashes is convenience for not having to write out this
+ * expression everywhere we use the hash values of an Element.
+ *
+ * @param e the element whose hashes will be returned
+ * @returns std::array<uint32_t, 8> of deterministic hashes derived from e
+ */
+ inline std::array<uint32_t, 8> compute_hashes(const Element& e) const
+ {
+ return {{hash_function.template operator()<0>(e) & hash_mask,
+ hash_function.template operator()<1>(e) & hash_mask,
+ hash_function.template operator()<2>(e) & hash_mask,
+ hash_function.template operator()<3>(e) & hash_mask,
+ hash_function.template operator()<4>(e) & hash_mask,
+ hash_function.template operator()<5>(e) & hash_mask,
+ hash_function.template operator()<6>(e) & hash_mask,
+ hash_function.template operator()<7>(e) & hash_mask}};
+ }
+
+ /* end
+ * @returns a constexpr index that can never be inserted to */
+ constexpr uint32_t invalid() const
+ {
+ return ~(uint32_t)0;
+ }
+
+ /** allow_erase marks the element at index n as discardable. Threadsafe
+ * without any concurrent insert.
+ * @param n the index to allow erasure of
+ */
+ inline void allow_erase(uint32_t n) const
+ {
+ collection_flags.bit_set(n);
+ }
+
+ /** please_keep marks the element at index n as an entry that should be kept.
+ * Threadsafe without any concurrent insert.
+ * @param n the index to prioritize keeping
+ */
+ inline void please_keep(uint32_t n) const
+ {
+ collection_flags.bit_unset(n);
+ }
+
+ /** epoch_check handles the changing of epochs for elements stored in the
+ * cache. epoch_check should be run before every insert.
+ *
+ * First, epoch_check decrements and checks the cheap heuristic, and then does
+ * a more expensive scan if the cheap heuristic runs out. If the expensive
+ * scan suceeds, the epochs are aged and old elements are allow_erased. The
+ * cheap heuristic is reset to retrigger after the worst case growth of the
+ * current epoch's elements would exceed the epoch_size.
+ */
+ void epoch_check()
+ {
+ if (epoch_heuristic_counter != 0) {
+ --epoch_heuristic_counter;
+ return;
+ }
+ // count the number of elements from the latest epoch which
+ // have not been erased.
+ uint32_t epoch_unused_count = 0;
+ for (uint32_t i = 0; i < size; ++i)
+ epoch_unused_count += epoch_flags[i] &&
+ !collection_flags.bit_is_set(i);
+ // If there are more non-deleted entries in the current epoch than the
+ // epoch size, then allow_erase on all elements in the old epoch (marked
+ // false) and move all elements in the current epoch to the old epoch
+ // but do not call allow_erase on their indices.
+ if (epoch_unused_count >= epoch_size) {
+ for (uint32_t i = 0; i < size; ++i)
+ if (epoch_flags[i])
+ epoch_flags[i] = false;
+ else
+ allow_erase(i);
+ epoch_heuristic_counter = epoch_size;
+ } else
+ // reset the epoch_heuristic_counter to next do a scan when worst
+ // case behavior (no intermittent erases) would exceed epoch size,
+ // with a reasonable minimum scan size.
+ // Ordinarily, we would have to sanity check std::min(epoch_size,
+ // epoch_unused_count), but we already know that `epoch_unused_count
+ // < epoch_size` in this branch
+ epoch_heuristic_counter = std::max(1u, std::max(epoch_size / 16,
+ epoch_size - epoch_unused_count));
+ }
+
+public:
+ /** You must always construct a cache with some elements via a subsequent
+ * call to setup or setup_bytes, otherwise operations may segfault.
+ */
+ cache() : table(), size(), collection_flags(0), epoch_flags(),
+ epoch_heuristic_counter(), epoch_size(), depth_limit(0), hash_function()
+ {
+ }
+
+ /** setup initializes the container to store no more than new_size
+ * elements. setup rounds down to a power of two size.
+ *
+ * setup should only be called once.
+ *
+ * @param new_size the desired number of elements to store
+ * @returns the maximum number of elements storable
+ **/
+ uint32_t setup(uint32_t new_size)
+ {
+ // depth_limit must be at least one otherwise errors can occur.
+ depth_limit = static_cast<uint8_t>(std::log2(static_cast<float>(std::max((uint32_t)2, new_size))));
+ size = 1 << depth_limit;
+ hash_mask = size-1;
+ table.resize(size);
+ collection_flags.setup(size);
+ epoch_flags.resize(size);
+ // Set to 45% as described above
+ epoch_size = std::max((uint32_t)1, (45 * size) / 100);
+ // Initially set to wait for a whole epoch
+ epoch_heuristic_counter = epoch_size;
+ return size;
+ }
+
+ /** setup_bytes is a convenience function which accounts for internal memory
+ * usage when deciding how many elements to store. It isn't perfect because
+ * it doesn't account for any overhead (struct size, MallocUsage, collection
+ * and epoch flags). This was done to simplify selecting a power of two
+ * size. In the expected use case, an extra two bits per entry should be
+ * negligible compared to the size of the elements.
+ *
+ * @param bytes the approximate number of bytes to use for this data
+ * structure.
+ * @returns the maximum number of elements storable (see setup()
+ * documentation for more detail)
+ */
+ uint32_t setup_bytes(size_t bytes)
+ {
+ return setup(bytes/sizeof(Element));
+ }
+
+ /** insert loops at most depth_limit times trying to insert a hash
+ * at various locations in the table via a variant of the Cuckoo Algorithm
+ * with eight hash locations.
+ *
+ * It drops the last tried element if it runs out of depth before
+ * encountering an open slot.
+ *
+ * Thus
+ *
+ * insert(x);
+ * return contains(x, false);
+ *
+ * is not guaranteed to return true.
+ *
+ * @param e the element to insert
+ * @post one of the following: All previously inserted elements and e are
+ * now in the table, one previously inserted element is evicted from the
+ * table, the entry attempted to be inserted is evicted.
+ *
+ */
+ inline void insert(Element e)
+ {
+ epoch_check();
+ uint32_t last_loc = invalid();
+ bool last_epoch = true;
+ std::array<uint32_t, 8> locs = compute_hashes(e);
+ // Make sure we have not already inserted this element
+ // If we have, make sure that it does not get deleted
+ for (uint32_t loc : locs)
+ if (table[loc] == e) {
+ please_keep(loc);
+ epoch_flags[loc] = last_epoch;
+ return;
+ }
+ for (uint8_t depth = 0; depth < depth_limit; ++depth) {
+ // First try to insert to an empty slot, if one exists
+ for (uint32_t loc : locs) {
+ if (!collection_flags.bit_is_set(loc))
+ continue;
+ table[loc] = std::move(e);
+ please_keep(loc);
+ epoch_flags[loc] = last_epoch;
+ return;
+ }
+ /** Swap with the element at the location that was
+ * not the last one looked at. Example:
+ *
+ * 1) On first iteration, last_loc == invalid(), find returns last, so
+ * last_loc defaults to locs[0].
+ * 2) On further iterations, where last_loc == locs[k], last_loc will
+ * go to locs[k+1 % 8], i.e., next of the 8 indicies wrapping around
+ * to 0 if needed.
+ *
+ * This prevents moving the element we just put in.
+ *
+ * The swap is not a move -- we must switch onto the evicted element
+ * for the next iteration.
+ */
+ last_loc = locs[(1 + (std::find(locs.begin(), locs.end(), last_loc) - locs.begin())) & 7];
+ std::swap(table[last_loc], e);
+ // Can't std::swap a std::vector<bool>::reference and a bool&.
+ bool epoch = last_epoch;
+ last_epoch = epoch_flags[last_loc];
+ epoch_flags[last_loc] = epoch;
+
+ // Recompute the locs -- unfortunately happens one too many times!
+ locs = compute_hashes(e);
+ }
+ }
+
+ /* contains iterates through the hash locations for a given element
+ * and checks to see if it is present.
+ *
+ * contains does not check garbage collected state (in other words,
+ * garbage is only collected when the space is needed), so:
+ *
+ * insert(x);
+ * if (contains(x, true))
+ * return contains(x, false);
+ * else
+ * return true;
+ *
+ * executed on a single thread will always return true!
+ *
+ * This is a great property for re-org performance for example.
+ *
+ * contains returns a bool set true if the element was found.
+ *
+ * @param e the element to check
+ * @param erase
+ *
+ * @post if erase is true and the element is found, then the garbage collect
+ * flag is set
+ * @returns true if the element is found, false otherwise
+ */
+ inline bool contains(const Element& e, const bool erase) const
+ {
+ std::array<uint32_t, 8> locs = compute_hashes(e);
+ for (uint32_t loc : locs)
+ if (table[loc] == e) {
+ if (erase)
+ allow_erase(loc);
+ return true;
+ }
+ return false;
+ }
+};
+} // namespace CuckooCache
+
+#endif
diff --git a/src/dbwrapper.h b/src/dbwrapper.h
index 4a79bbd17d..08c72c25dd 100644
--- a/src/dbwrapper.h
+++ b/src/dbwrapper.h
@@ -53,38 +53,41 @@ private:
const CDBWrapper &parent;
leveldb::WriteBatch batch;
+ CDataStream ssKey;
+ CDataStream ssValue;
+
public:
/**
* @param[in] _parent CDBWrapper that this batch is to be submitted to
*/
- CDBBatch(const CDBWrapper &_parent) : parent(_parent) { };
+ CDBBatch(const CDBWrapper &_parent) : parent(_parent), ssKey(SER_DISK, CLIENT_VERSION), ssValue(SER_DISK, CLIENT_VERSION) { };
template <typename K, typename V>
void Write(const K& key, const V& value)
{
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey << key;
leveldb::Slice slKey(&ssKey[0], ssKey.size());
- CDataStream ssValue(SER_DISK, CLIENT_VERSION);
ssValue.reserve(DBWRAPPER_PREALLOC_VALUE_SIZE);
ssValue << value;
ssValue.Xor(dbwrapper_private::GetObfuscateKey(parent));
leveldb::Slice slValue(&ssValue[0], ssValue.size());
batch.Put(slKey, slValue);
+ ssKey.clear();
+ ssValue.clear();
}
template <typename K>
void Erase(const K& key)
{
- CDataStream ssKey(SER_DISK, CLIENT_VERSION);
ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE);
ssKey << key;
leveldb::Slice slKey(&ssKey[0], ssKey.size());
batch.Delete(slKey);
+ ssKey.clear();
}
};
diff --git a/src/init.cpp b/src/init.cpp
index ba5fe4152a..e642729a99 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -41,6 +41,7 @@
#ifdef ENABLE_WALLET
#include "wallet/wallet.h"
#endif
+#include "warnings.h"
#include <stdint.h>
#include <stdio.h>
#include <memory>
@@ -380,6 +381,7 @@ std::string HelpMessage(HelpMessageMode mode)
strUsage += HelpMessageOpt("-port=<port>", strprintf(_("Listen for connections on <port> (default: %u or testnet: %u)"), Params(CBaseChainParams::MAIN).GetDefaultPort(), Params(CBaseChainParams::TESTNET).GetDefaultPort()));
strUsage += HelpMessageOpt("-proxy=<ip:port>", _("Connect through SOCKS5 proxy"));
strUsage += HelpMessageOpt("-proxyrandomize", strprintf(_("Randomize credentials for every proxy connection. This enables Tor stream isolation (default: %u)"), DEFAULT_PROXYRANDOMIZE));
+ strUsage += HelpMessageOpt("-rpcserialversion", strprintf(_("Sets the serialization of raw transaction or block hex returned in non-verbose mode, non-segwit(0) or segwit(1) (default: %d)"), DEFAULT_RPC_SERIALIZE_VERSION));
strUsage += HelpMessageOpt("-seednode=<ip>", _("Connect to a node to retrieve peer addresses, and disconnect"));
strUsage += HelpMessageOpt("-timeout=<n>", strprintf(_("Specify connection timeout in milliseconds (minimum: 1, default: %d)"), DEFAULT_CONNECT_TIMEOUT));
strUsage += HelpMessageOpt("-torcontrol=<ip>:<port>", strprintf(_("Tor control port to use if onion listening enabled (default: %s)"), DEFAULT_TOR_CONTROL));
@@ -984,6 +986,12 @@ bool AppInitParameterInteraction()
if (GetBoolArg("-peerbloomfilters", DEFAULT_PEERBLOOMFILTERS))
nLocalServices = ServiceFlags(nLocalServices | NODE_BLOOM);
+ if (GetArg("-rpcserialversion", DEFAULT_RPC_SERIALIZE_VERSION) < 0)
+ return InitError("rpcserialversion must be non-negative.");
+
+ if (GetArg("-rpcserialversion", DEFAULT_RPC_SERIALIZE_VERSION) > 1)
+ return InitError("unknown rpcserialversion requested.");
+
nMaxTipAge = GetArg("-maxtipage", DEFAULT_MAX_TIP_AGE);
fEnableReplacement = GetBoolArg("-mempoolreplacement", DEFAULT_ENABLE_REPLACEMENT);
@@ -1099,6 +1107,8 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
LogPrintf("Using config file %s\n", GetConfigFile(GetArg("-conf", BITCOIN_CONF_FILENAME)).string());
LogPrintf("Using at most %i connections (%i file descriptors available)\n", nMaxConnections, nFD);
+ InitSignatureCache();
+
LogPrintf("Using %u threads for script verification\n", nScriptCheckThreads);
if (nScriptCheckThreads) {
for (int i=0; i<nScriptCheckThreads-1; i++)
diff --git a/src/leveldb/.travis.yml b/src/leveldb/.travis.yml
new file mode 100644
index 0000000000..f5bd74c454
--- /dev/null
+++ b/src/leveldb/.travis.yml
@@ -0,0 +1,13 @@
+language: cpp
+compiler:
+- clang
+- gcc
+os:
+- linux
+- osx
+sudo: false
+before_install:
+- echo $LANG
+- echo $LC_ALL
+script:
+- make -j 4 check
diff --git a/src/leveldb/Makefile b/src/leveldb/Makefile
index 2bd2cadcdd..07a5a1ead6 100644
--- a/src/leveldb/Makefile
+++ b/src/leveldb/Makefile
@@ -20,208 +20,395 @@ $(shell CC="$(CC)" CXX="$(CXX)" TARGET_OS="$(TARGET_OS)" \
# this file is generated by the previous line to set build flags and sources
include build_config.mk
+TESTS = \
+ db/autocompact_test \
+ db/c_test \
+ db/corruption_test \
+ db/db_test \
+ db/dbformat_test \
+ db/fault_injection_test \
+ db/filename_test \
+ db/log_test \
+ db/recovery_test \
+ db/skiplist_test \
+ db/version_edit_test \
+ db/version_set_test \
+ db/write_batch_test \
+ helpers/memenv/memenv_test \
+ issues/issue178_test \
+ issues/issue200_test \
+ table/filter_block_test \
+ table/table_test \
+ util/arena_test \
+ util/bloom_test \
+ util/cache_test \
+ util/coding_test \
+ util/crc32c_test \
+ util/env_test \
+ util/hash_test
+
+UTILS = \
+ db/db_bench \
+ db/leveldbutil
+
+# Put the object files in a subdirectory, but the application at the top of the object dir.
+PROGNAMES := $(notdir $(TESTS) $(UTILS))
+
+# On Linux may need libkyotocabinet-dev for dependency.
+BENCHMARKS = \
+ doc/bench/db_bench_sqlite3 \
+ doc/bench/db_bench_tree_db
+
CFLAGS += -I. -I./include $(PLATFORM_CCFLAGS) $(OPT)
CXXFLAGS += -I. -I./include $(PLATFORM_CXXFLAGS) $(OPT)
LDFLAGS += $(PLATFORM_LDFLAGS)
LIBS += $(PLATFORM_LIBS)
-LIBOBJECTS = $(SOURCES:.cc=.o)
-MEMENVOBJECTS = $(MEMENV_SOURCES:.cc=.o)
-
-TESTUTIL = ./util/testutil.o
-TESTHARNESS = ./util/testharness.o $(TESTUTIL)
+SIMULATOR_OUTDIR=out-ios-x86
+DEVICE_OUTDIR=out-ios-arm
-# Note: iOS should probably be using libtool, not ar.
ifeq ($(PLATFORM), IOS)
+# Note: iOS should probably be using libtool, not ar.
AR=xcrun ar
+SIMULATORSDK=$(shell xcrun -sdk iphonesimulator --show-sdk-path)
+DEVICESDK=$(shell xcrun -sdk iphoneos --show-sdk-path)
+DEVICE_CFLAGS = -isysroot "$(DEVICESDK)" -arch armv6 -arch armv7 -arch armv7s -arch arm64
+SIMULATOR_CFLAGS = -isysroot "$(SIMULATORSDK)" -arch i686 -arch x86_64
+STATIC_OUTDIR=out-ios-universal
+else
+STATIC_OUTDIR=out-static
+SHARED_OUTDIR=out-shared
+STATIC_PROGRAMS := $(addprefix $(STATIC_OUTDIR)/, $(PROGNAMES))
+SHARED_PROGRAMS := $(addprefix $(SHARED_OUTDIR)/, db_bench)
endif
-TESTS = \
- arena_test \
- autocompact_test \
- bloom_test \
- c_test \
- cache_test \
- coding_test \
- corruption_test \
- crc32c_test \
- db_test \
- dbformat_test \
- env_test \
- filename_test \
- filter_block_test \
- hash_test \
- issue178_test \
- issue200_test \
- log_test \
- memenv_test \
- skiplist_test \
- table_test \
- version_edit_test \
- version_set_test \
- write_batch_test
-
-PROGRAMS = db_bench leveldbutil $(TESTS)
-BENCHMARKS = db_bench_sqlite3 db_bench_tree_db
-
-LIBRARY = libleveldb.a
-MEMENVLIBRARY = libmemenv.a
+STATIC_LIBOBJECTS := $(addprefix $(STATIC_OUTDIR)/, $(SOURCES:.cc=.o))
+STATIC_MEMENVOBJECTS := $(addprefix $(STATIC_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o))
+
+DEVICE_LIBOBJECTS := $(addprefix $(DEVICE_OUTDIR)/, $(SOURCES:.cc=.o))
+DEVICE_MEMENVOBJECTS := $(addprefix $(DEVICE_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o))
+
+SIMULATOR_LIBOBJECTS := $(addprefix $(SIMULATOR_OUTDIR)/, $(SOURCES:.cc=.o))
+SIMULATOR_MEMENVOBJECTS := $(addprefix $(SIMULATOR_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o))
+
+SHARED_LIBOBJECTS := $(addprefix $(SHARED_OUTDIR)/, $(SOURCES:.cc=.o))
+SHARED_MEMENVOBJECTS := $(addprefix $(SHARED_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o))
+
+TESTUTIL := $(STATIC_OUTDIR)/util/testutil.o
+TESTHARNESS := $(STATIC_OUTDIR)/util/testharness.o $(TESTUTIL)
+
+STATIC_TESTOBJS := $(addprefix $(STATIC_OUTDIR)/, $(addsuffix .o, $(TESTS)))
+STATIC_UTILOBJS := $(addprefix $(STATIC_OUTDIR)/, $(addsuffix .o, $(UTILS)))
+STATIC_ALLOBJS := $(STATIC_LIBOBJECTS) $(STATIC_MEMENVOBJECTS) $(STATIC_TESTOBJS) $(STATIC_UTILOBJS) $(TESTHARNESS)
+DEVICE_ALLOBJS := $(DEVICE_LIBOBJECTS) $(DEVICE_MEMENVOBJECTS)
+SIMULATOR_ALLOBJS := $(SIMULATOR_LIBOBJECTS) $(SIMULATOR_MEMENVOBJECTS)
default: all
# Should we build shared libraries?
ifneq ($(PLATFORM_SHARED_EXT),)
+# Many leveldb test apps use non-exported API's. Only build a subset for testing.
+SHARED_ALLOBJS := $(SHARED_LIBOBJECTS) $(SHARED_MEMENVOBJECTS) $(TESTHARNESS)
+
ifneq ($(PLATFORM_SHARED_VERSIONED),true)
-SHARED1 = libleveldb.$(PLATFORM_SHARED_EXT)
-SHARED2 = $(SHARED1)
-SHARED3 = $(SHARED1)
-SHARED = $(SHARED1)
+SHARED_LIB1 = libleveldb.$(PLATFORM_SHARED_EXT)
+SHARED_LIB2 = $(SHARED_LIB1)
+SHARED_LIB3 = $(SHARED_LIB1)
+SHARED_LIBS = $(SHARED_LIB1)
+SHARED_MEMENVLIB = $(SHARED_OUTDIR)/libmemenv.a
else
# Update db.h if you change these.
-SHARED_MAJOR = 1
-SHARED_MINOR = 18
-SHARED1 = libleveldb.$(PLATFORM_SHARED_EXT)
-SHARED2 = $(SHARED1).$(SHARED_MAJOR)
-SHARED3 = $(SHARED1).$(SHARED_MAJOR).$(SHARED_MINOR)
-SHARED = $(SHARED1) $(SHARED2) $(SHARED3)
-$(SHARED1): $(SHARED3)
- ln -fs $(SHARED3) $(SHARED1)
-$(SHARED2): $(SHARED3)
- ln -fs $(SHARED3) $(SHARED2)
+SHARED_VERSION_MAJOR = 1
+SHARED_VERSION_MINOR = 19
+SHARED_LIB1 = libleveldb.$(PLATFORM_SHARED_EXT)
+SHARED_LIB2 = $(SHARED_LIB1).$(SHARED_VERSION_MAJOR)
+SHARED_LIB3 = $(SHARED_LIB1).$(SHARED_VERSION_MAJOR).$(SHARED_VERSION_MINOR)
+SHARED_LIBS = $(SHARED_OUTDIR)/$(SHARED_LIB1) $(SHARED_OUTDIR)/$(SHARED_LIB2) $(SHARED_OUTDIR)/$(SHARED_LIB3)
+$(SHARED_OUTDIR)/$(SHARED_LIB1): $(SHARED_OUTDIR)/$(SHARED_LIB3)
+ ln -fs $(SHARED_LIB3) $(SHARED_OUTDIR)/$(SHARED_LIB1)
+$(SHARED_OUTDIR)/$(SHARED_LIB2): $(SHARED_OUTDIR)/$(SHARED_LIB3)
+ ln -fs $(SHARED_LIB3) $(SHARED_OUTDIR)/$(SHARED_LIB2)
+SHARED_MEMENVLIB = $(SHARED_OUTDIR)/libmemenv.a
endif
-$(SHARED3):
- $(CXX) $(LDFLAGS) $(PLATFORM_SHARED_LDFLAGS)$(SHARED2) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) $(SOURCES) -o $(SHARED3) $(LIBS)
+$(SHARED_OUTDIR)/$(SHARED_LIB3): $(SHARED_LIBOBJECTS)
+ $(CXX) $(LDFLAGS) $(PLATFORM_SHARED_LDFLAGS)$(SHARED_LIB2) $(SHARED_LIBOBJECTS) -o $(SHARED_OUTDIR)/$(SHARED_LIB3) $(LIBS)
endif # PLATFORM_SHARED_EXT
-all: $(SHARED) $(LIBRARY)
+all: $(SHARED_LIBS) $(SHARED_PROGRAMS) $(STATIC_OUTDIR)/libleveldb.a $(STATIC_OUTDIR)/libmemenv.a $(STATIC_PROGRAMS)
-check: all $(PROGRAMS) $(TESTS)
- for t in $(TESTS); do echo "***** Running $$t"; ./$$t || exit 1; done
+check: $(STATIC_PROGRAMS)
+ for t in $(notdir $(TESTS)); do echo "***** Running $$t"; $(STATIC_OUTDIR)/$$t || exit 1; done
clean:
- -rm -f $(PROGRAMS) $(BENCHMARKS) $(LIBRARY) $(SHARED) $(MEMENVLIBRARY) */*.o */*/*.o ios-x86/*/*.o ios-arm/*/*.o build_config.mk
- -rm -rf ios-x86/* ios-arm/*
+ -rm -rf out-static out-shared out-ios-x86 out-ios-arm out-ios-universal
+ -rm -f build_config.mk
+ -rm -rf ios-x86 ios-arm
-$(LIBRARY): $(LIBOBJECTS)
- rm -f $@
- $(AR) -rs $@ $(LIBOBJECTS)
+$(STATIC_OUTDIR):
+ mkdir $@
-db_bench: db/db_bench.o $(LIBOBJECTS) $(TESTUTIL)
- $(CXX) $(LDFLAGS) db/db_bench.o $(LIBOBJECTS) $(TESTUTIL) -o $@ $(LIBS)
+$(STATIC_OUTDIR)/db: | $(STATIC_OUTDIR)
+ mkdir $@
-db_bench_sqlite3: doc/bench/db_bench_sqlite3.o $(LIBOBJECTS) $(TESTUTIL)
- $(CXX) $(LDFLAGS) doc/bench/db_bench_sqlite3.o $(LIBOBJECTS) $(TESTUTIL) -o $@ -lsqlite3 $(LIBS)
+$(STATIC_OUTDIR)/helpers/memenv: | $(STATIC_OUTDIR)
+ mkdir -p $@
-db_bench_tree_db: doc/bench/db_bench_tree_db.o $(LIBOBJECTS) $(TESTUTIL)
- $(CXX) $(LDFLAGS) doc/bench/db_bench_tree_db.o $(LIBOBJECTS) $(TESTUTIL) -o $@ -lkyotocabinet $(LIBS)
+$(STATIC_OUTDIR)/port: | $(STATIC_OUTDIR)
+ mkdir $@
-leveldbutil: db/leveldb_main.o $(LIBOBJECTS)
- $(CXX) $(LDFLAGS) db/leveldb_main.o $(LIBOBJECTS) -o $@ $(LIBS)
+$(STATIC_OUTDIR)/table: | $(STATIC_OUTDIR)
+ mkdir $@
-arena_test: util/arena_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) util/arena_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(STATIC_OUTDIR)/util: | $(STATIC_OUTDIR)
+ mkdir $@
-autocompact_test: db/autocompact_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) db/autocompact_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+.PHONY: STATIC_OBJDIRS
+STATIC_OBJDIRS: \
+ $(STATIC_OUTDIR)/db \
+ $(STATIC_OUTDIR)/port \
+ $(STATIC_OUTDIR)/table \
+ $(STATIC_OUTDIR)/util \
+ $(STATIC_OUTDIR)/helpers/memenv
-bloom_test: util/bloom_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) util/bloom_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(SHARED_OUTDIR):
+ mkdir $@
-c_test: db/c_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) db/c_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(SHARED_OUTDIR)/db: | $(SHARED_OUTDIR)
+ mkdir $@
-cache_test: util/cache_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) util/cache_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(SHARED_OUTDIR)/helpers/memenv: | $(SHARED_OUTDIR)
+ mkdir -p $@
-coding_test: util/coding_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) util/coding_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(SHARED_OUTDIR)/port: | $(SHARED_OUTDIR)
+ mkdir $@
-corruption_test: db/corruption_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) db/corruption_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(SHARED_OUTDIR)/table: | $(SHARED_OUTDIR)
+ mkdir $@
-crc32c_test: util/crc32c_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) util/crc32c_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(SHARED_OUTDIR)/util: | $(SHARED_OUTDIR)
+ mkdir $@
-db_test: db/db_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) db/db_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+.PHONY: SHARED_OBJDIRS
+SHARED_OBJDIRS: \
+ $(SHARED_OUTDIR)/db \
+ $(SHARED_OUTDIR)/port \
+ $(SHARED_OUTDIR)/table \
+ $(SHARED_OUTDIR)/util \
+ $(SHARED_OUTDIR)/helpers/memenv
-dbformat_test: db/dbformat_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) db/dbformat_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(DEVICE_OUTDIR):
+ mkdir $@
-env_test: util/env_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) util/env_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(DEVICE_OUTDIR)/db: | $(DEVICE_OUTDIR)
+ mkdir $@
-filename_test: db/filename_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) db/filename_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(DEVICE_OUTDIR)/helpers/memenv: | $(DEVICE_OUTDIR)
+ mkdir -p $@
-filter_block_test: table/filter_block_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) table/filter_block_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(DEVICE_OUTDIR)/port: | $(DEVICE_OUTDIR)
+ mkdir $@
-hash_test: util/hash_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) util/hash_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(DEVICE_OUTDIR)/table: | $(DEVICE_OUTDIR)
+ mkdir $@
-issue178_test: issues/issue178_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) issues/issue178_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(DEVICE_OUTDIR)/util: | $(DEVICE_OUTDIR)
+ mkdir $@
-issue200_test: issues/issue200_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) issues/issue200_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+.PHONY: DEVICE_OBJDIRS
+DEVICE_OBJDIRS: \
+ $(DEVICE_OUTDIR)/db \
+ $(DEVICE_OUTDIR)/port \
+ $(DEVICE_OUTDIR)/table \
+ $(DEVICE_OUTDIR)/util \
+ $(DEVICE_OUTDIR)/helpers/memenv
-log_test: db/log_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) db/log_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(SIMULATOR_OUTDIR):
+ mkdir $@
-table_test: table/table_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) table/table_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(SIMULATOR_OUTDIR)/db: | $(SIMULATOR_OUTDIR)
+ mkdir $@
-skiplist_test: db/skiplist_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) db/skiplist_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(SIMULATOR_OUTDIR)/helpers/memenv: | $(SIMULATOR_OUTDIR)
+ mkdir -p $@
-version_edit_test: db/version_edit_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) db/version_edit_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(SIMULATOR_OUTDIR)/port: | $(SIMULATOR_OUTDIR)
+ mkdir $@
-version_set_test: db/version_set_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) db/version_set_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(SIMULATOR_OUTDIR)/table: | $(SIMULATOR_OUTDIR)
+ mkdir $@
-write_batch_test: db/write_batch_test.o $(LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) db/write_batch_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+$(SIMULATOR_OUTDIR)/util: | $(SIMULATOR_OUTDIR)
+ mkdir $@
-$(MEMENVLIBRARY) : $(MEMENVOBJECTS)
- rm -f $@
- $(AR) -rs $@ $(MEMENVOBJECTS)
+.PHONY: SIMULATOR_OBJDIRS
+SIMULATOR_OBJDIRS: \
+ $(SIMULATOR_OUTDIR)/db \
+ $(SIMULATOR_OUTDIR)/port \
+ $(SIMULATOR_OUTDIR)/table \
+ $(SIMULATOR_OUTDIR)/util \
+ $(SIMULATOR_OUTDIR)/helpers/memenv
-memenv_test : helpers/memenv/memenv_test.o $(MEMENVLIBRARY) $(LIBRARY) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) helpers/memenv/memenv_test.o $(MEMENVLIBRARY) $(LIBRARY) $(TESTHARNESS) -o $@ $(LIBS)
+$(STATIC_ALLOBJS): | STATIC_OBJDIRS
+$(DEVICE_ALLOBJS): | DEVICE_OBJDIRS
+$(SIMULATOR_ALLOBJS): | SIMULATOR_OBJDIRS
+$(SHARED_ALLOBJS): | SHARED_OBJDIRS
ifeq ($(PLATFORM), IOS)
-# For iOS, create universal object files to be used on both the simulator and
+$(DEVICE_OUTDIR)/libleveldb.a: $(DEVICE_LIBOBJECTS)
+ rm -f $@
+ $(AR) -rs $@ $(DEVICE_LIBOBJECTS)
+
+$(SIMULATOR_OUTDIR)/libleveldb.a: $(SIMULATOR_LIBOBJECTS)
+ rm -f $@
+ $(AR) -rs $@ $(SIMULATOR_LIBOBJECTS)
+
+$(DEVICE_OUTDIR)/libmemenv.a: $(DEVICE_MEMENVOBJECTS)
+ rm -f $@
+ $(AR) -rs $@ $(DEVICE_MEMENVOBJECTS)
+
+$(SIMULATOR_OUTDIR)/libmemenv.a: $(SIMULATOR_MEMENVOBJECTS)
+ rm -f $@
+ $(AR) -rs $@ $(SIMULATOR_MEMENVOBJECTS)
+
+# For iOS, create universal object libraries to be used on both the simulator and
# a device.
-PLATFORMSROOT=/Applications/Xcode.app/Contents/Developer/Platforms
-SIMULATORROOT=$(PLATFORMSROOT)/iPhoneSimulator.platform/Developer
-DEVICEROOT=$(PLATFORMSROOT)/iPhoneOS.platform/Developer
-IOSVERSION=$(shell defaults read $(PLATFORMSROOT)/iPhoneOS.platform/version CFBundleShortVersionString)
-IOSARCH=-arch armv6 -arch armv7 -arch armv7s -arch arm64
-
-.cc.o:
- mkdir -p ios-x86/$(dir $@)
- xcrun -sdk iphonesimulator $(CXX) $(CXXFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 -arch x86_64 -c $< -o ios-x86/$@
- mkdir -p ios-arm/$(dir $@)
- xcrun -sdk iphoneos $(CXX) $(CXXFLAGS) -isysroot $(DEVICEROOT)/SDKs/iPhoneOS$(IOSVERSION).sdk $(IOSARCH) -c $< -o ios-arm/$@
- xcrun lipo ios-x86/$@ ios-arm/$@ -create -output $@
-
-.c.o:
- mkdir -p ios-x86/$(dir $@)
- xcrun -sdk iphonesimulator $(CC) $(CFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 -arch x86_64 -c $< -o ios-x86/$@
- mkdir -p ios-arm/$(dir $@)
- xcrun -sdk iphoneos $(CC) $(CFLAGS) -isysroot $(DEVICEROOT)/SDKs/iPhoneOS$(IOSVERSION).sdk $(IOSARCH) -c $< -o ios-arm/$@
- xcrun lipo ios-x86/$@ ios-arm/$@ -create -output $@
+$(STATIC_OUTDIR)/libleveldb.a: $(STATIC_OUTDIR) $(DEVICE_OUTDIR)/libleveldb.a $(SIMULATOR_OUTDIR)/libleveldb.a
+ lipo -create $(DEVICE_OUTDIR)/libleveldb.a $(SIMULATOR_OUTDIR)/libleveldb.a -output $@
+$(STATIC_OUTDIR)/libmemenv.a: $(STATIC_OUTDIR) $(DEVICE_OUTDIR)/libmemenv.a $(SIMULATOR_OUTDIR)/libmemenv.a
+ lipo -create $(DEVICE_OUTDIR)/libmemenv.a $(SIMULATOR_OUTDIR)/libmemenv.a -output $@
else
-.cc.o:
+$(STATIC_OUTDIR)/libleveldb.a:$(STATIC_LIBOBJECTS)
+ rm -f $@
+ $(AR) -rs $@ $(STATIC_LIBOBJECTS)
+
+$(STATIC_OUTDIR)/libmemenv.a:$(STATIC_MEMENVOBJECTS)
+ rm -f $@
+ $(AR) -rs $@ $(STATIC_MEMENVOBJECTS)
+endif
+
+$(SHARED_MEMENVLIB):$(SHARED_MEMENVOBJECTS)
+ rm -f $@
+ $(AR) -rs $@ $(SHARED_MEMENVOBJECTS)
+
+$(STATIC_OUTDIR)/db_bench:db/db_bench.cc $(STATIC_LIBOBJECTS) $(TESTUTIL)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/db_bench.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/db_bench_sqlite3:doc/bench/db_bench_sqlite3.cc $(STATIC_LIBOBJECTS) $(TESTUTIL)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) doc/bench/db_bench_sqlite3.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) -o $@ -lsqlite3 $(LIBS)
+
+$(STATIC_OUTDIR)/db_bench_tree_db:doc/bench/db_bench_tree_db.cc $(STATIC_LIBOBJECTS) $(TESTUTIL)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) doc/bench/db_bench_tree_db.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) -o $@ -lkyotocabinet $(LIBS)
+
+$(STATIC_OUTDIR)/leveldbutil:db/leveldbutil.cc $(STATIC_LIBOBJECTS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/leveldbutil.cc $(STATIC_LIBOBJECTS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/arena_test:util/arena_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) util/arena_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/autocompact_test:db/autocompact_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/autocompact_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/bloom_test:util/bloom_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) util/bloom_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/c_test:$(STATIC_OUTDIR)/db/c_test.o $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(STATIC_OUTDIR)/db/c_test.o $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/cache_test:util/cache_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) util/cache_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/coding_test:util/coding_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) util/coding_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/corruption_test:db/corruption_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/corruption_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/crc32c_test:util/crc32c_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) util/crc32c_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/db_test:db/db_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/db_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/dbformat_test:db/dbformat_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/dbformat_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/env_test:util/env_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) util/env_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/fault_injection_test:db/fault_injection_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/fault_injection_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/filename_test:db/filename_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/filename_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/filter_block_test:table/filter_block_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) table/filter_block_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/hash_test:util/hash_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) util/hash_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/issue178_test:issues/issue178_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) issues/issue178_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/issue200_test:issues/issue200_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) issues/issue200_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/log_test:db/log_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/log_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/recovery_test:db/recovery_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/recovery_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/table_test:table/table_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) table/table_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/skiplist_test:db/skiplist_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/skiplist_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/version_edit_test:db/version_edit_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/version_edit_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/version_set_test:db/version_set_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/version_set_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/write_batch_test:db/write_batch_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
+ $(CXX) $(LDFLAGS) $(CXXFLAGS) db/write_batch_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
+
+$(STATIC_OUTDIR)/memenv_test:$(STATIC_OUTDIR)/helpers/memenv/memenv_test.o $(STATIC_OUTDIR)/libmemenv.a $(STATIC_OUTDIR)/libleveldb.a $(TESTHARNESS)
+ $(XCRUN) $(CXX) $(LDFLAGS) $(STATIC_OUTDIR)/helpers/memenv/memenv_test.o $(STATIC_OUTDIR)/libmemenv.a $(STATIC_OUTDIR)/libleveldb.a $(TESTHARNESS) -o $@ $(LIBS)
+
+$(SHARED_OUTDIR)/db_bench:$(SHARED_OUTDIR)/db/db_bench.o $(SHARED_LIBS) $(TESTUTIL)
+ $(XCRUN) $(CXX) $(LDFLAGS) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) $(SHARED_OUTDIR)/db/db_bench.o $(TESTUTIL) $(SHARED_OUTDIR)/$(SHARED_LIB3) -o $@ $(LIBS)
+
+.PHONY: run-shared
+run-shared: $(SHARED_OUTDIR)/db_bench
+ LD_LIBRARY_PATH=$(SHARED_OUTDIR) $(SHARED_OUTDIR)/db_bench
+
+$(SIMULATOR_OUTDIR)/%.o: %.cc
+ xcrun -sdk iphonesimulator $(CXX) $(CXXFLAGS) $(SIMULATOR_CFLAGS) -c $< -o $@
+
+$(DEVICE_OUTDIR)/%.o: %.cc
+ xcrun -sdk iphoneos $(CXX) $(CXXFLAGS) $(DEVICE_CFLAGS) -c $< -o $@
+
+$(SIMULATOR_OUTDIR)/%.o: %.c
+ xcrun -sdk iphonesimulator $(CC) $(CFLAGS) $(SIMULATOR_CFLAGS) -c $< -o $@
+
+$(DEVICE_OUTDIR)/%.o: %.c
+ xcrun -sdk iphoneos $(CC) $(CFLAGS) $(DEVICE_CFLAGS) -c $< -o $@
+
+$(STATIC_OUTDIR)/%.o: %.cc
$(CXX) $(CXXFLAGS) -c $< -o $@
-.c.o:
+$(STATIC_OUTDIR)/%.o: %.c
$(CC) $(CFLAGS) -c $< -o $@
-endif
+
+$(SHARED_OUTDIR)/%.o: %.cc
+ $(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) -c $< -o $@
+
+$(SHARED_OUTDIR)/%.o: %.c
+ $(CC) $(CFLAGS) $(PLATFORM_SHARED_CFLAGS) -c $< -o $@
diff --git a/src/leveldb/README b/src/leveldb/README
deleted file mode 100644
index 3618adeeed..0000000000
--- a/src/leveldb/README
+++ /dev/null
@@ -1,51 +0,0 @@
-leveldb: A key-value store
-Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
-
-The code under this directory implements a system for maintaining a
-persistent key/value store.
-
-See doc/index.html for more explanation.
-See doc/impl.html for a brief overview of the implementation.
-
-The public interface is in include/*.h. Callers should not include or
-rely on the details of any other header files in this package. Those
-internal APIs may be changed without warning.
-
-Guide to header files:
-
-include/db.h
- Main interface to the DB: Start here
-
-include/options.h
- Control over the behavior of an entire database, and also
- control over the behavior of individual reads and writes.
-
-include/comparator.h
- Abstraction for user-specified comparison function. If you want
- just bytewise comparison of keys, you can use the default comparator,
- but clients can write their own comparator implementations if they
- want custom ordering (e.g. to handle different character
- encodings, etc.)
-
-include/iterator.h
- Interface for iterating over data. You can get an iterator
- from a DB object.
-
-include/write_batch.h
- Interface for atomically applying multiple updates to a database.
-
-include/slice.h
- A simple module for maintaining a pointer and a length into some
- other byte array.
-
-include/status.h
- Status is returned from many of the public interfaces and is used
- to report success and various kinds of errors.
-
-include/env.h
- Abstraction of the OS environment. A posix implementation of
- this interface is in util/env_posix.cc
-
-include/table.h
-include/table_builder.h
- Lower-level modules that most clients probably won't use directly
diff --git a/src/leveldb/README.md b/src/leveldb/README.md
index 480affb5ca..c75b185e0e 100644
--- a/src/leveldb/README.md
+++ b/src/leveldb/README.md
@@ -1,5 +1,7 @@
**LevelDB is a fast key-value storage library written at Google that provides an ordered mapping from string keys to string values.**
+[![Build Status](https://travis-ci.org/google/leveldb.svg?branch=master)](https://travis-ci.org/google/leveldb)
+
Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
# Features
@@ -10,9 +12,11 @@ Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
* Multiple changes can be made in one atomic batch.
* Users can create a transient snapshot to get a consistent view of data.
* Forward and backward iteration is supported over the data.
- * Data is automatically compressed using the [Snappy compression library](http://code.google.com/p/snappy).
+ * Data is automatically compressed using the [Snappy compression library](http://google.github.io/snappy/).
* External activity (file system operations etc.) is relayed through a virtual interface so users can customize the operating system interactions.
- * [Detailed documentation](http://htmlpreview.github.io/?https://github.com/google/leveldb/blob/master/doc/index.html) about how to use the library is included with the source code.
+
+# Documentation
+ [LevelDB library documentation](https://rawgit.com/google/leveldb/master/doc/index.html) is online and bundled with the source code.
# Limitations
@@ -20,6 +24,37 @@ Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
* Only a single process (possibly multi-threaded) can access a particular database at a time.
* There is no client-server support builtin to the library. An application that needs such support will have to wrap their own server around the library.
+# Contributing to the leveldb Project
+The leveldb project welcomes contributions. leveldb's primary goal is to be
+a reliable and fast key/value store. Changes that are in line with the
+features/limitations outlined above, and meet the requirements below,
+will be considered.
+
+Contribution requirements:
+
+1. **POSIX only**. We _generally_ will only accept changes that are both
+ compiled, and tested on a POSIX platform - usually Linux. Very small
+ changes will sometimes be accepted, but consider that more of an
+ exception than the rule.
+
+2. **Stable API**. We strive very hard to maintain a stable API. Changes that
+ require changes for projects using leveldb _might_ be rejected without
+ sufficient benefit to the project.
+
+3. **Tests**: All changes must be accompanied by a new (or changed) test, or
+ a sufficient explanation as to why a new (or changed) test is not required.
+
+## Submitting a Pull Request
+Before any pull request will be accepted the author must first sign a
+Contributor License Agreement (CLA) at https://cla.developers.google.com/.
+
+In order to keep the commit timeline linear
+[squash](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History#Squashing-Commits)
+your changes down to a single commit and [rebase](https://git-scm.com/docs/git-rebase)
+on google/leveldb/master. This keeps the commit timeline linear and more easily sync'ed
+with the internal repository at Google. More information at GitHub's
+[About Git rebase](https://help.github.com/articles/about-git-rebase/) page.
+
# Performance
Here is a performance report (with explanations) from the run of the
diff --git a/src/leveldb/build_detect_platform b/src/leveldb/build_detect_platform
index a1101c1bda..d7edab1d87 100755
--- a/src/leveldb/build_detect_platform
+++ b/src/leveldb/build_detect_platform
@@ -175,7 +175,7 @@ DIRS="$PREFIX/db $PREFIX/util $PREFIX/table"
set -f # temporarily disable globbing so that our patterns aren't expanded
PRUNE_TEST="-name *test*.cc -prune"
PRUNE_BENCH="-name *_bench.cc -prune"
-PRUNE_TOOL="-name leveldb_main.cc -prune"
+PRUNE_TOOL="-name leveldbutil.cc -prune"
PORTABLE_FILES=`find $DIRS $PRUNE_TEST -o $PRUNE_BENCH -o $PRUNE_TOOL -o -name '*.cc' -print | sort | sed "s,^$PREFIX/,," | tr "\n" " "`
set +f # re-enable globbing
diff --git a/src/leveldb/db/corruption_test.cc b/src/leveldb/db/corruption_test.cc
index 96afc68913..37a484d25f 100644
--- a/src/leveldb/db/corruption_test.cc
+++ b/src/leveldb/db/corruption_test.cc
@@ -36,7 +36,7 @@ class CorruptionTest {
tiny_cache_ = NewLRUCache(100);
options_.env = &env_;
options_.block_cache = tiny_cache_;
- dbname_ = test::TmpDir() + "/db_test";
+ dbname_ = test::TmpDir() + "/corruption_test";
DestroyDB(dbname_, options_);
db_ = NULL;
diff --git a/src/leveldb/db/db_bench.cc b/src/leveldb/db/db_bench.cc
index 705a170aae..7a0f5e08cd 100644
--- a/src/leveldb/db/db_bench.cc
+++ b/src/leveldb/db/db_bench.cc
@@ -33,6 +33,7 @@
// readmissing -- read N missing keys in random order
// readhot -- read N times in random order from 1% section of DB
// seekrandom -- N random seeks
+// open -- cost of opening a DB
// crc32c -- repeated crc32c of 4K of data
// acquireload -- load N*1000 times
// Meta operations:
@@ -99,6 +100,9 @@ static int FLAGS_bloom_bits = -1;
// benchmark will fail.
static bool FLAGS_use_existing_db = false;
+// If true, reuse existing log/MANIFEST files when re-opening a database.
+static bool FLAGS_reuse_logs = false;
+
// Use the db with the following name.
static const char* FLAGS_db = NULL;
@@ -138,6 +142,7 @@ class RandomGenerator {
}
};
+#if defined(__linux)
static Slice TrimSpace(Slice s) {
size_t start = 0;
while (start < s.size() && isspace(s[start])) {
@@ -149,6 +154,7 @@ static Slice TrimSpace(Slice s) {
}
return Slice(s.data() + start, limit - start);
}
+#endif
static void AppendWithSpace(std::string* str, Slice msg) {
if (msg.empty()) return;
@@ -442,7 +448,11 @@ class Benchmark {
bool fresh_db = false;
int num_threads = FLAGS_threads;
- if (name == Slice("fillseq")) {
+ if (name == Slice("open")) {
+ method = &Benchmark::OpenBench;
+ num_ /= 10000;
+ if (num_ < 1) num_ = 1;
+ } else if (name == Slice("fillseq")) {
fresh_db = true;
method = &Benchmark::WriteSeq;
} else if (name == Slice("fillbatch")) {
@@ -695,6 +705,7 @@ class Benchmark {
options.write_buffer_size = FLAGS_write_buffer_size;
options.max_open_files = FLAGS_open_files;
options.filter_policy = filter_policy_;
+ options.reuse_logs = FLAGS_reuse_logs;
Status s = DB::Open(options, FLAGS_db, &db_);
if (!s.ok()) {
fprintf(stderr, "open error: %s\n", s.ToString().c_str());
@@ -702,6 +713,14 @@ class Benchmark {
}
}
+ void OpenBench(ThreadState* thread) {
+ for (int i = 0; i < num_; i++) {
+ delete db_;
+ Open();
+ thread->stats.FinishedSingleOp();
+ }
+ }
+
void WriteSeq(ThreadState* thread) {
DoWrite(thread, true);
}
@@ -941,6 +960,9 @@ int main(int argc, char** argv) {
} else if (sscanf(argv[i], "--use_existing_db=%d%c", &n, &junk) == 1 &&
(n == 0 || n == 1)) {
FLAGS_use_existing_db = n;
+ } else if (sscanf(argv[i], "--reuse_logs=%d%c", &n, &junk) == 1 &&
+ (n == 0 || n == 1)) {
+ FLAGS_reuse_logs = n;
} else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) {
FLAGS_num = n;
} else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) {
diff --git a/src/leveldb/db/db_impl.cc b/src/leveldb/db/db_impl.cc
index 49b95953b4..60f4e66e55 100644
--- a/src/leveldb/db/db_impl.cc
+++ b/src/leveldb/db/db_impl.cc
@@ -125,7 +125,7 @@ DBImpl::DBImpl(const Options& raw_options, const std::string& dbname)
db_lock_(NULL),
shutting_down_(NULL),
bg_cv_(&mutex_),
- mem_(new MemTable(internal_comparator_)),
+ mem_(NULL),
imm_(NULL),
logfile_(NULL),
logfile_number_(0),
@@ -134,7 +134,6 @@ DBImpl::DBImpl(const Options& raw_options, const std::string& dbname)
tmp_batch_(new WriteBatch),
bg_compaction_scheduled_(false),
manual_compaction_(NULL) {
- mem_->Ref();
has_imm_.Release_Store(NULL);
// Reserve ten files or so for other uses and give the rest to TableCache.
@@ -271,7 +270,7 @@ void DBImpl::DeleteObsoleteFiles() {
}
}
-Status DBImpl::Recover(VersionEdit* edit) {
+Status DBImpl::Recover(VersionEdit* edit, bool *save_manifest) {
mutex_.AssertHeld();
// Ignore error from CreateDir since the creation of the DB is
@@ -301,66 +300,69 @@ Status DBImpl::Recover(VersionEdit* edit) {
}
}
- s = versions_->Recover();
- if (s.ok()) {
- SequenceNumber max_sequence(0);
-
- // Recover from all newer log files than the ones named in the
- // descriptor (new log files may have been added by the previous
- // incarnation without registering them in the descriptor).
- //
- // Note that PrevLogNumber() is no longer used, but we pay
- // attention to it in case we are recovering a database
- // produced by an older version of leveldb.
- const uint64_t min_log = versions_->LogNumber();
- const uint64_t prev_log = versions_->PrevLogNumber();
- std::vector<std::string> filenames;
- s = env_->GetChildren(dbname_, &filenames);
+ s = versions_->Recover(save_manifest);
+ if (!s.ok()) {
+ return s;
+ }
+ SequenceNumber max_sequence(0);
+
+ // Recover from all newer log files than the ones named in the
+ // descriptor (new log files may have been added by the previous
+ // incarnation without registering them in the descriptor).
+ //
+ // Note that PrevLogNumber() is no longer used, but we pay
+ // attention to it in case we are recovering a database
+ // produced by an older version of leveldb.
+ const uint64_t min_log = versions_->LogNumber();
+ const uint64_t prev_log = versions_->PrevLogNumber();
+ std::vector<std::string> filenames;
+ s = env_->GetChildren(dbname_, &filenames);
+ if (!s.ok()) {
+ return s;
+ }
+ std::set<uint64_t> expected;
+ versions_->AddLiveFiles(&expected);
+ uint64_t number;
+ FileType type;
+ std::vector<uint64_t> logs;
+ for (size_t i = 0; i < filenames.size(); i++) {
+ if (ParseFileName(filenames[i], &number, &type)) {
+ expected.erase(number);
+ if (type == kLogFile && ((number >= min_log) || (number == prev_log)))
+ logs.push_back(number);
+ }
+ }
+ if (!expected.empty()) {
+ char buf[50];
+ snprintf(buf, sizeof(buf), "%d missing files; e.g.",
+ static_cast<int>(expected.size()));
+ return Status::Corruption(buf, TableFileName(dbname_, *(expected.begin())));
+ }
+
+ // Recover in the order in which the logs were generated
+ std::sort(logs.begin(), logs.end());
+ for (size_t i = 0; i < logs.size(); i++) {
+ s = RecoverLogFile(logs[i], (i == logs.size() - 1), save_manifest, edit,
+ &max_sequence);
if (!s.ok()) {
return s;
}
- std::set<uint64_t> expected;
- versions_->AddLiveFiles(&expected);
- uint64_t number;
- FileType type;
- std::vector<uint64_t> logs;
- for (size_t i = 0; i < filenames.size(); i++) {
- if (ParseFileName(filenames[i], &number, &type)) {
- expected.erase(number);
- if (type == kLogFile && ((number >= min_log) || (number == prev_log)))
- logs.push_back(number);
- }
- }
- if (!expected.empty()) {
- char buf[50];
- snprintf(buf, sizeof(buf), "%d missing files; e.g.",
- static_cast<int>(expected.size()));
- return Status::Corruption(buf, TableFileName(dbname_, *(expected.begin())));
- }
-
- // Recover in the order in which the logs were generated
- std::sort(logs.begin(), logs.end());
- for (size_t i = 0; i < logs.size(); i++) {
- s = RecoverLogFile(logs[i], edit, &max_sequence);
- // The previous incarnation may not have written any MANIFEST
- // records after allocating this log number. So we manually
- // update the file number allocation counter in VersionSet.
- versions_->MarkFileNumberUsed(logs[i]);
- }
+ // The previous incarnation may not have written any MANIFEST
+ // records after allocating this log number. So we manually
+ // update the file number allocation counter in VersionSet.
+ versions_->MarkFileNumberUsed(logs[i]);
+ }
- if (s.ok()) {
- if (versions_->LastSequence() < max_sequence) {
- versions_->SetLastSequence(max_sequence);
- }
- }
+ if (versions_->LastSequence() < max_sequence) {
+ versions_->SetLastSequence(max_sequence);
}
- return s;
+ return Status::OK();
}
-Status DBImpl::RecoverLogFile(uint64_t log_number,
- VersionEdit* edit,
+Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
+ bool* save_manifest, VersionEdit* edit,
SequenceNumber* max_sequence) {
struct LogReporter : public log::Reader::Reporter {
Env* env;
@@ -405,6 +407,7 @@ Status DBImpl::RecoverLogFile(uint64_t log_number,
std::string scratch;
Slice record;
WriteBatch batch;
+ int compactions = 0;
MemTable* mem = NULL;
while (reader.ReadRecord(&record, &scratch) &&
status.ok()) {
@@ -432,25 +435,52 @@ Status DBImpl::RecoverLogFile(uint64_t log_number,
}
if (mem->ApproximateMemoryUsage() > options_.write_buffer_size) {
+ compactions++;
+ *save_manifest = true;
status = WriteLevel0Table(mem, edit, NULL);
+ mem->Unref();
+ mem = NULL;
if (!status.ok()) {
// Reflect errors immediately so that conditions like full
// file-systems cause the DB::Open() to fail.
break;
}
- mem->Unref();
- mem = NULL;
}
}
- if (status.ok() && mem != NULL) {
- status = WriteLevel0Table(mem, edit, NULL);
- // Reflect errors immediately so that conditions like full
- // file-systems cause the DB::Open() to fail.
+ delete file;
+
+ // See if we should keep reusing the last log file.
+ if (status.ok() && options_.reuse_logs && last_log && compactions == 0) {
+ assert(logfile_ == NULL);
+ assert(log_ == NULL);
+ assert(mem_ == NULL);
+ uint64_t lfile_size;
+ if (env_->GetFileSize(fname, &lfile_size).ok() &&
+ env_->NewAppendableFile(fname, &logfile_).ok()) {
+ Log(options_.info_log, "Reusing old log %s \n", fname.c_str());
+ log_ = new log::Writer(logfile_, lfile_size);
+ logfile_number_ = log_number;
+ if (mem != NULL) {
+ mem_ = mem;
+ mem = NULL;
+ } else {
+ // mem can be NULL if lognum exists but was empty.
+ mem_ = new MemTable(internal_comparator_);
+ mem_->Ref();
+ }
+ }
+ }
+
+ if (mem != NULL) {
+ // mem did not get reused; compact it.
+ if (status.ok()) {
+ *save_manifest = true;
+ status = WriteLevel0Table(mem, edit, NULL);
+ }
+ mem->Unref();
}
- if (mem != NULL) mem->Unref();
- delete file;
return status;
}
@@ -821,8 +851,9 @@ Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
delete iter;
if (s.ok()) {
Log(options_.info_log,
- "Generated table #%llu: %lld keys, %lld bytes",
+ "Generated table #%llu@%d: %lld keys, %lld bytes",
(unsigned long long) output_number,
+ compact->compaction->level(),
(unsigned long long) current_entries,
(unsigned long long) current_bytes);
}
@@ -1395,6 +1426,19 @@ bool DBImpl::GetProperty(const Slice& property, std::string* value) {
} else if (in == "sstables") {
*value = versions_->current()->DebugString();
return true;
+ } else if (in == "approximate-memory-usage") {
+ size_t total_usage = options_.block_cache->TotalCharge();
+ if (mem_) {
+ total_usage += mem_->ApproximateMemoryUsage();
+ }
+ if (imm_) {
+ total_usage += imm_->ApproximateMemoryUsage();
+ }
+ char buf[50];
+ snprintf(buf, sizeof(buf), "%llu",
+ static_cast<unsigned long long>(total_usage));
+ value->append(buf);
+ return true;
}
return false;
@@ -1449,8 +1493,11 @@ Status DB::Open(const Options& options, const std::string& dbname,
DBImpl* impl = new DBImpl(options, dbname);
impl->mutex_.Lock();
VersionEdit edit;
- Status s = impl->Recover(&edit); // Handles create_if_missing, error_if_exists
- if (s.ok()) {
+ // Recover handles create_if_missing, error_if_exists
+ bool save_manifest = false;
+ Status s = impl->Recover(&edit, &save_manifest);
+ if (s.ok() && impl->mem_ == NULL) {
+ // Create new log and a corresponding memtable.
uint64_t new_log_number = impl->versions_->NewFileNumber();
WritableFile* lfile;
s = options.env->NewWritableFile(LogFileName(dbname, new_log_number),
@@ -1460,15 +1507,22 @@ Status DB::Open(const Options& options, const std::string& dbname,
impl->logfile_ = lfile;
impl->logfile_number_ = new_log_number;
impl->log_ = new log::Writer(lfile);
- s = impl->versions_->LogAndApply(&edit, &impl->mutex_);
- }
- if (s.ok()) {
- impl->DeleteObsoleteFiles();
- impl->MaybeScheduleCompaction();
+ impl->mem_ = new MemTable(impl->internal_comparator_);
+ impl->mem_->Ref();
}
}
+ if (s.ok() && save_manifest) {
+ edit.SetPrevLogNumber(0); // No older logs needed after recovery.
+ edit.SetLogNumber(impl->logfile_number_);
+ s = impl->versions_->LogAndApply(&edit, &impl->mutex_);
+ }
+ if (s.ok()) {
+ impl->DeleteObsoleteFiles();
+ impl->MaybeScheduleCompaction();
+ }
impl->mutex_.Unlock();
if (s.ok()) {
+ assert(impl->mem_ != NULL);
*dbptr = impl;
} else {
delete impl;
diff --git a/src/leveldb/db/db_impl.h b/src/leveldb/db/db_impl.h
index cfc998164a..8ff323e728 100644
--- a/src/leveldb/db/db_impl.h
+++ b/src/leveldb/db/db_impl.h
@@ -78,7 +78,8 @@ class DBImpl : public DB {
// Recover the descriptor from persistent storage. May do a significant
// amount of work to recover recently logged updates. Any changes to
// be made to the descriptor are added to *edit.
- Status Recover(VersionEdit* edit) EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ Status Recover(VersionEdit* edit, bool* save_manifest)
+ EXCLUSIVE_LOCKS_REQUIRED(mutex_);
void MaybeIgnoreError(Status* s) const;
@@ -90,9 +91,8 @@ class DBImpl : public DB {
// Errors are recorded in bg_error_.
void CompactMemTable() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
- Status RecoverLogFile(uint64_t log_number,
- VersionEdit* edit,
- SequenceNumber* max_sequence)
+ Status RecoverLogFile(uint64_t log_number, bool last_log, bool* save_manifest,
+ VersionEdit* edit, SequenceNumber* max_sequence)
EXCLUSIVE_LOCKS_REQUIRED(mutex_);
Status WriteLevel0Table(MemTable* mem, VersionEdit* edit, Version* base)
diff --git a/src/leveldb/db/db_test.cc b/src/leveldb/db/db_test.cc
index 0fed9137d5..a0b08bc19c 100644
--- a/src/leveldb/db/db_test.cc
+++ b/src/leveldb/db/db_test.cc
@@ -193,6 +193,7 @@ class DBTest {
// Sequence of option configurations to try
enum OptionConfig {
kDefault,
+ kReuse,
kFilter,
kUncompressed,
kEnd
@@ -237,7 +238,11 @@ class DBTest {
// Return the current option configuration.
Options CurrentOptions() {
Options options;
+ options.reuse_logs = false;
switch (option_config_) {
+ case kReuse:
+ options.reuse_logs = true;
+ break;
case kFilter:
options.filter_policy = filter_policy_;
break;
@@ -558,6 +563,17 @@ TEST(DBTest, GetFromVersions) {
} while (ChangeOptions());
}
+TEST(DBTest, GetMemUsage) {
+ do {
+ ASSERT_OK(Put("foo", "v1"));
+ std::string val;
+ ASSERT_TRUE(db_->GetProperty("leveldb.approximate-memory-usage", &val));
+ int mem_usage = atoi(val.c_str());
+ ASSERT_GT(mem_usage, 0);
+ ASSERT_LT(mem_usage, 5*1024*1024);
+ } while (ChangeOptions());
+}
+
TEST(DBTest, GetSnapshot) {
do {
// Try with both a short key and a long key
@@ -1080,6 +1096,14 @@ TEST(DBTest, ApproximateSizes) {
// 0 because GetApproximateSizes() does not account for memtable space
ASSERT_TRUE(Between(Size("", Key(50)), 0, 0));
+ if (options.reuse_logs) {
+ // Recovery will reuse memtable, and GetApproximateSizes() does not
+ // account for memtable usage;
+ Reopen(&options);
+ ASSERT_TRUE(Between(Size("", Key(50)), 0, 0));
+ continue;
+ }
+
// Check sizes across recovery by reopening a few times
for (int run = 0; run < 3; run++) {
Reopen(&options);
@@ -1123,6 +1147,11 @@ TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
ASSERT_OK(Put(Key(6), RandomString(&rnd, 300000)));
ASSERT_OK(Put(Key(7), RandomString(&rnd, 10000)));
+ if (options.reuse_logs) {
+ // Need to force a memtable compaction since recovery does not do so.
+ ASSERT_OK(dbfull()->TEST_CompactMemTable());
+ }
+
// Check sizes across recovery by reopening a few times
for (int run = 0; run < 3; run++) {
Reopen(&options);
@@ -2084,7 +2113,8 @@ void BM_LogAndApply(int iters, int num_base_files) {
InternalKeyComparator cmp(BytewiseComparator());
Options options;
VersionSet vset(dbname, &options, NULL, &cmp);
- ASSERT_OK(vset.Recover());
+ bool save_manifest;
+ ASSERT_OK(vset.Recover(&save_manifest));
VersionEdit vbase;
uint64_t fnum = 1;
for (int i = 0; i < num_base_files; i++) {
diff --git a/src/leveldb/db/fault_injection_test.cc b/src/leveldb/db/fault_injection_test.cc
new file mode 100644
index 0000000000..875dfe81ee
--- /dev/null
+++ b/src/leveldb/db/fault_injection_test.cc
@@ -0,0 +1,554 @@
+// Copyright 2014 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+// This test uses a custom Env to keep track of the state of a filesystem as of
+// the last "sync". It then checks for data loss errors by purposely dropping
+// file data (or entire files) not protected by a "sync".
+
+#include "leveldb/db.h"
+
+#include <map>
+#include <set>
+#include "db/db_impl.h"
+#include "db/filename.h"
+#include "db/log_format.h"
+#include "db/version_set.h"
+#include "leveldb/cache.h"
+#include "leveldb/env.h"
+#include "leveldb/table.h"
+#include "leveldb/write_batch.h"
+#include "util/logging.h"
+#include "util/mutexlock.h"
+#include "util/testharness.h"
+#include "util/testutil.h"
+
+namespace leveldb {
+
+static const int kValueSize = 1000;
+static const int kMaxNumValues = 2000;
+static const size_t kNumIterations = 3;
+
+class FaultInjectionTestEnv;
+
+namespace {
+
+// Assume a filename, and not a directory name like "/foo/bar/"
+static std::string GetDirName(const std::string filename) {
+ size_t found = filename.find_last_of("/\\");
+ if (found == std::string::npos) {
+ return "";
+ } else {
+ return filename.substr(0, found);
+ }
+}
+
+Status SyncDir(const std::string& dir) {
+ // As this is a test it isn't required to *actually* sync this directory.
+ return Status::OK();
+}
+
+// A basic file truncation function suitable for this test.
+Status Truncate(const std::string& filename, uint64_t length) {
+ leveldb::Env* env = leveldb::Env::Default();
+
+ SequentialFile* orig_file;
+ Status s = env->NewSequentialFile(filename, &orig_file);
+ if (!s.ok())
+ return s;
+
+ char* scratch = new char[length];
+ leveldb::Slice result;
+ s = orig_file->Read(length, &result, scratch);
+ delete orig_file;
+ if (s.ok()) {
+ std::string tmp_name = GetDirName(filename) + "/truncate.tmp";
+ WritableFile* tmp_file;
+ s = env->NewWritableFile(tmp_name, &tmp_file);
+ if (s.ok()) {
+ s = tmp_file->Append(result);
+ delete tmp_file;
+ if (s.ok()) {
+ s = env->RenameFile(tmp_name, filename);
+ } else {
+ env->DeleteFile(tmp_name);
+ }
+ }
+ }
+
+ delete[] scratch;
+
+ return s;
+}
+
+struct FileState {
+ std::string filename_;
+ ssize_t pos_;
+ ssize_t pos_at_last_sync_;
+ ssize_t pos_at_last_flush_;
+
+ FileState(const std::string& filename)
+ : filename_(filename),
+ pos_(-1),
+ pos_at_last_sync_(-1),
+ pos_at_last_flush_(-1) { }
+
+ FileState() : pos_(-1), pos_at_last_sync_(-1), pos_at_last_flush_(-1) {}
+
+ bool IsFullySynced() const { return pos_ <= 0 || pos_ == pos_at_last_sync_; }
+
+ Status DropUnsyncedData() const;
+};
+
+} // anonymous namespace
+
+// A wrapper around WritableFile which informs another Env whenever this file
+// is written to or sync'ed.
+class TestWritableFile : public WritableFile {
+ public:
+ TestWritableFile(const FileState& state,
+ WritableFile* f,
+ FaultInjectionTestEnv* env);
+ virtual ~TestWritableFile();
+ virtual Status Append(const Slice& data);
+ virtual Status Close();
+ virtual Status Flush();
+ virtual Status Sync();
+
+ private:
+ FileState state_;
+ WritableFile* target_;
+ bool writable_file_opened_;
+ FaultInjectionTestEnv* env_;
+
+ Status SyncParent();
+};
+
+class FaultInjectionTestEnv : public EnvWrapper {
+ public:
+ FaultInjectionTestEnv() : EnvWrapper(Env::Default()), filesystem_active_(true) {}
+ virtual ~FaultInjectionTestEnv() { }
+ virtual Status NewWritableFile(const std::string& fname,
+ WritableFile** result);
+ virtual Status NewAppendableFile(const std::string& fname,
+ WritableFile** result);
+ virtual Status DeleteFile(const std::string& f);
+ virtual Status RenameFile(const std::string& s, const std::string& t);
+
+ void WritableFileClosed(const FileState& state);
+ Status DropUnsyncedFileData();
+ Status DeleteFilesCreatedAfterLastDirSync();
+ void DirWasSynced();
+ bool IsFileCreatedSinceLastDirSync(const std::string& filename);
+ void ResetState();
+ void UntrackFile(const std::string& f);
+ // Setting the filesystem to inactive is the test equivalent to simulating a
+ // system reset. Setting to inactive will freeze our saved filesystem state so
+ // that it will stop being recorded. It can then be reset back to the state at
+ // the time of the reset.
+ bool IsFilesystemActive() const { return filesystem_active_; }
+ void SetFilesystemActive(bool active) { filesystem_active_ = active; }
+
+ private:
+ port::Mutex mutex_;
+ std::map<std::string, FileState> db_file_state_;
+ std::set<std::string> new_files_since_last_dir_sync_;
+ bool filesystem_active_; // Record flushes, syncs, writes
+};
+
+TestWritableFile::TestWritableFile(const FileState& state,
+ WritableFile* f,
+ FaultInjectionTestEnv* env)
+ : state_(state),
+ target_(f),
+ writable_file_opened_(true),
+ env_(env) {
+ assert(f != NULL);
+}
+
+TestWritableFile::~TestWritableFile() {
+ if (writable_file_opened_) {
+ Close();
+ }
+ delete target_;
+}
+
+Status TestWritableFile::Append(const Slice& data) {
+ Status s = target_->Append(data);
+ if (s.ok() && env_->IsFilesystemActive()) {
+ state_.pos_ += data.size();
+ }
+ return s;
+}
+
+Status TestWritableFile::Close() {
+ writable_file_opened_ = false;
+ Status s = target_->Close();
+ if (s.ok()) {
+ env_->WritableFileClosed(state_);
+ }
+ return s;
+}
+
+Status TestWritableFile::Flush() {
+ Status s = target_->Flush();
+ if (s.ok() && env_->IsFilesystemActive()) {
+ state_.pos_at_last_flush_ = state_.pos_;
+ }
+ return s;
+}
+
+Status TestWritableFile::SyncParent() {
+ Status s = SyncDir(GetDirName(state_.filename_));
+ if (s.ok()) {
+ env_->DirWasSynced();
+ }
+ return s;
+}
+
+Status TestWritableFile::Sync() {
+ if (!env_->IsFilesystemActive()) {
+ return Status::OK();
+ }
+ // Ensure new files referred to by the manifest are in the filesystem.
+ Status s = target_->Sync();
+ if (s.ok()) {
+ state_.pos_at_last_sync_ = state_.pos_;
+ }
+ if (env_->IsFileCreatedSinceLastDirSync(state_.filename_)) {
+ Status ps = SyncParent();
+ if (s.ok() && !ps.ok()) {
+ s = ps;
+ }
+ }
+ return s;
+}
+
+Status FaultInjectionTestEnv::NewWritableFile(const std::string& fname,
+ WritableFile** result) {
+ WritableFile* actual_writable_file;
+ Status s = target()->NewWritableFile(fname, &actual_writable_file);
+ if (s.ok()) {
+ FileState state(fname);
+ state.pos_ = 0;
+ *result = new TestWritableFile(state, actual_writable_file, this);
+ // NewWritableFile doesn't append to files, so if the same file is
+ // opened again then it will be truncated - so forget our saved
+ // state.
+ UntrackFile(fname);
+ MutexLock l(&mutex_);
+ new_files_since_last_dir_sync_.insert(fname);
+ }
+ return s;
+}
+
+Status FaultInjectionTestEnv::NewAppendableFile(const std::string& fname,
+ WritableFile** result) {
+ WritableFile* actual_writable_file;
+ Status s = target()->NewAppendableFile(fname, &actual_writable_file);
+ if (s.ok()) {
+ FileState state(fname);
+ state.pos_ = 0;
+ {
+ MutexLock l(&mutex_);
+ if (db_file_state_.count(fname) == 0) {
+ new_files_since_last_dir_sync_.insert(fname);
+ } else {
+ state = db_file_state_[fname];
+ }
+ }
+ *result = new TestWritableFile(state, actual_writable_file, this);
+ }
+ return s;
+}
+
+Status FaultInjectionTestEnv::DropUnsyncedFileData() {
+ Status s;
+ MutexLock l(&mutex_);
+ for (std::map<std::string, FileState>::const_iterator it =
+ db_file_state_.begin();
+ s.ok() && it != db_file_state_.end(); ++it) {
+ const FileState& state = it->second;
+ if (!state.IsFullySynced()) {
+ s = state.DropUnsyncedData();
+ }
+ }
+ return s;
+}
+
+void FaultInjectionTestEnv::DirWasSynced() {
+ MutexLock l(&mutex_);
+ new_files_since_last_dir_sync_.clear();
+}
+
+bool FaultInjectionTestEnv::IsFileCreatedSinceLastDirSync(
+ const std::string& filename) {
+ MutexLock l(&mutex_);
+ return new_files_since_last_dir_sync_.find(filename) !=
+ new_files_since_last_dir_sync_.end();
+}
+
+void FaultInjectionTestEnv::UntrackFile(const std::string& f) {
+ MutexLock l(&mutex_);
+ db_file_state_.erase(f);
+ new_files_since_last_dir_sync_.erase(f);
+}
+
+Status FaultInjectionTestEnv::DeleteFile(const std::string& f) {
+ Status s = EnvWrapper::DeleteFile(f);
+ ASSERT_OK(s);
+ if (s.ok()) {
+ UntrackFile(f);
+ }
+ return s;
+}
+
+Status FaultInjectionTestEnv::RenameFile(const std::string& s,
+ const std::string& t) {
+ Status ret = EnvWrapper::RenameFile(s, t);
+
+ if (ret.ok()) {
+ MutexLock l(&mutex_);
+ if (db_file_state_.find(s) != db_file_state_.end()) {
+ db_file_state_[t] = db_file_state_[s];
+ db_file_state_.erase(s);
+ }
+
+ if (new_files_since_last_dir_sync_.erase(s) != 0) {
+ assert(new_files_since_last_dir_sync_.find(t) ==
+ new_files_since_last_dir_sync_.end());
+ new_files_since_last_dir_sync_.insert(t);
+ }
+ }
+
+ return ret;
+}
+
+void FaultInjectionTestEnv::ResetState() {
+ // Since we are not destroying the database, the existing files
+ // should keep their recorded synced/flushed state. Therefore
+ // we do not reset db_file_state_ and new_files_since_last_dir_sync_.
+ MutexLock l(&mutex_);
+ SetFilesystemActive(true);
+}
+
+Status FaultInjectionTestEnv::DeleteFilesCreatedAfterLastDirSync() {
+ // Because DeleteFile access this container make a copy to avoid deadlock
+ mutex_.Lock();
+ std::set<std::string> new_files(new_files_since_last_dir_sync_.begin(),
+ new_files_since_last_dir_sync_.end());
+ mutex_.Unlock();
+ Status s;
+ std::set<std::string>::const_iterator it;
+ for (it = new_files.begin(); s.ok() && it != new_files.end(); ++it) {
+ s = DeleteFile(*it);
+ }
+ return s;
+}
+
+void FaultInjectionTestEnv::WritableFileClosed(const FileState& state) {
+ MutexLock l(&mutex_);
+ db_file_state_[state.filename_] = state;
+}
+
+Status FileState::DropUnsyncedData() const {
+ ssize_t sync_pos = pos_at_last_sync_ == -1 ? 0 : pos_at_last_sync_;
+ return Truncate(filename_, sync_pos);
+}
+
+class FaultInjectionTest {
+ public:
+ enum ExpectedVerifResult { VAL_EXPECT_NO_ERROR, VAL_EXPECT_ERROR };
+ enum ResetMethod { RESET_DROP_UNSYNCED_DATA, RESET_DELETE_UNSYNCED_FILES };
+
+ FaultInjectionTestEnv* env_;
+ std::string dbname_;
+ Cache* tiny_cache_;
+ Options options_;
+ DB* db_;
+
+ FaultInjectionTest()
+ : env_(new FaultInjectionTestEnv),
+ tiny_cache_(NewLRUCache(100)),
+ db_(NULL) {
+ dbname_ = test::TmpDir() + "/fault_test";
+ DestroyDB(dbname_, Options()); // Destroy any db from earlier run
+ options_.reuse_logs = true;
+ options_.env = env_;
+ options_.paranoid_checks = true;
+ options_.block_cache = tiny_cache_;
+ options_.create_if_missing = true;
+ }
+
+ ~FaultInjectionTest() {
+ CloseDB();
+ DestroyDB(dbname_, Options());
+ delete tiny_cache_;
+ delete env_;
+ }
+
+ void ReuseLogs(bool reuse) {
+ options_.reuse_logs = reuse;
+ }
+
+ void Build(int start_idx, int num_vals) {
+ std::string key_space, value_space;
+ WriteBatch batch;
+ for (int i = start_idx; i < start_idx + num_vals; i++) {
+ Slice key = Key(i, &key_space);
+ batch.Clear();
+ batch.Put(key, Value(i, &value_space));
+ WriteOptions options;
+ ASSERT_OK(db_->Write(options, &batch));
+ }
+ }
+
+ Status ReadValue(int i, std::string* val) const {
+ std::string key_space, value_space;
+ Slice key = Key(i, &key_space);
+ Value(i, &value_space);
+ ReadOptions options;
+ return db_->Get(options, key, val);
+ }
+
+ Status Verify(int start_idx, int num_vals,
+ ExpectedVerifResult expected) const {
+ std::string val;
+ std::string value_space;
+ Status s;
+ for (int i = start_idx; i < start_idx + num_vals && s.ok(); i++) {
+ Value(i, &value_space);
+ s = ReadValue(i, &val);
+ if (expected == VAL_EXPECT_NO_ERROR) {
+ if (s.ok()) {
+ ASSERT_EQ(value_space, val);
+ }
+ } else if (s.ok()) {
+ fprintf(stderr, "Expected an error at %d, but was OK\n", i);
+ s = Status::IOError(dbname_, "Expected value error:");
+ } else {
+ s = Status::OK(); // An expected error
+ }
+ }
+ return s;
+ }
+
+ // Return the ith key
+ Slice Key(int i, std::string* storage) const {
+ char buf[100];
+ snprintf(buf, sizeof(buf), "%016d", i);
+ storage->assign(buf, strlen(buf));
+ return Slice(*storage);
+ }
+
+ // Return the value to associate with the specified key
+ Slice Value(int k, std::string* storage) const {
+ Random r(k);
+ return test::RandomString(&r, kValueSize, storage);
+ }
+
+ Status OpenDB() {
+ delete db_;
+ db_ = NULL;
+ env_->ResetState();
+ return DB::Open(options_, dbname_, &db_);
+ }
+
+ void CloseDB() {
+ delete db_;
+ db_ = NULL;
+ }
+
+ void DeleteAllData() {
+ Iterator* iter = db_->NewIterator(ReadOptions());
+ WriteOptions options;
+ for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
+ ASSERT_OK(db_->Delete(WriteOptions(), iter->key()));
+ }
+
+ delete iter;
+ }
+
+ void ResetDBState(ResetMethod reset_method) {
+ switch (reset_method) {
+ case RESET_DROP_UNSYNCED_DATA:
+ ASSERT_OK(env_->DropUnsyncedFileData());
+ break;
+ case RESET_DELETE_UNSYNCED_FILES:
+ ASSERT_OK(env_->DeleteFilesCreatedAfterLastDirSync());
+ break;
+ default:
+ assert(false);
+ }
+ }
+
+ void PartialCompactTestPreFault(int num_pre_sync, int num_post_sync) {
+ DeleteAllData();
+ Build(0, num_pre_sync);
+ db_->CompactRange(NULL, NULL);
+ Build(num_pre_sync, num_post_sync);
+ }
+
+ void PartialCompactTestReopenWithFault(ResetMethod reset_method,
+ int num_pre_sync,
+ int num_post_sync) {
+ env_->SetFilesystemActive(false);
+ CloseDB();
+ ResetDBState(reset_method);
+ ASSERT_OK(OpenDB());
+ ASSERT_OK(Verify(0, num_pre_sync, FaultInjectionTest::VAL_EXPECT_NO_ERROR));
+ ASSERT_OK(Verify(num_pre_sync, num_post_sync, FaultInjectionTest::VAL_EXPECT_ERROR));
+ }
+
+ void NoWriteTestPreFault() {
+ }
+
+ void NoWriteTestReopenWithFault(ResetMethod reset_method) {
+ CloseDB();
+ ResetDBState(reset_method);
+ ASSERT_OK(OpenDB());
+ }
+
+ void DoTest() {
+ Random rnd(0);
+ ASSERT_OK(OpenDB());
+ for (size_t idx = 0; idx < kNumIterations; idx++) {
+ int num_pre_sync = rnd.Uniform(kMaxNumValues);
+ int num_post_sync = rnd.Uniform(kMaxNumValues);
+
+ PartialCompactTestPreFault(num_pre_sync, num_post_sync);
+ PartialCompactTestReopenWithFault(RESET_DROP_UNSYNCED_DATA,
+ num_pre_sync,
+ num_post_sync);
+
+ NoWriteTestPreFault();
+ NoWriteTestReopenWithFault(RESET_DROP_UNSYNCED_DATA);
+
+ PartialCompactTestPreFault(num_pre_sync, num_post_sync);
+ // No new files created so we expect all values since no files will be
+ // dropped.
+ PartialCompactTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES,
+ num_pre_sync + num_post_sync,
+ 0);
+
+ NoWriteTestPreFault();
+ NoWriteTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES);
+ }
+ }
+};
+
+TEST(FaultInjectionTest, FaultTestNoLogReuse) {
+ ReuseLogs(false);
+ DoTest();
+}
+
+TEST(FaultInjectionTest, FaultTestWithLogReuse) {
+ ReuseLogs(true);
+ DoTest();
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) {
+ return leveldb::test::RunAllTests();
+}
diff --git a/src/leveldb/db/leveldb_main.cc b/src/leveldb/db/leveldbutil.cc
index 9f4b7dd70c..9f4b7dd70c 100644
--- a/src/leveldb/db/leveldb_main.cc
+++ b/src/leveldb/db/leveldbutil.cc
diff --git a/src/leveldb/db/log_reader.cc b/src/leveldb/db/log_reader.cc
index e44b66c85b..a6d304545d 100644
--- a/src/leveldb/db/log_reader.cc
+++ b/src/leveldb/db/log_reader.cc
@@ -25,7 +25,8 @@ Reader::Reader(SequentialFile* file, Reporter* reporter, bool checksum,
eof_(false),
last_record_offset_(0),
end_of_buffer_offset_(0),
- initial_offset_(initial_offset) {
+ initial_offset_(initial_offset),
+ resyncing_(initial_offset > 0) {
}
Reader::~Reader() {
@@ -72,8 +73,25 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch) {
Slice fragment;
while (true) {
- uint64_t physical_record_offset = end_of_buffer_offset_ - buffer_.size();
const unsigned int record_type = ReadPhysicalRecord(&fragment);
+
+ // ReadPhysicalRecord may have only had an empty trailer remaining in its
+ // internal buffer. Calculate the offset of the next physical record now
+ // that it has returned, properly accounting for its header size.
+ uint64_t physical_record_offset =
+ end_of_buffer_offset_ - buffer_.size() - kHeaderSize - fragment.size();
+
+ if (resyncing_) {
+ if (record_type == kMiddleType) {
+ continue;
+ } else if (record_type == kLastType) {
+ resyncing_ = false;
+ continue;
+ } else {
+ resyncing_ = false;
+ }
+ }
+
switch (record_type) {
case kFullType:
if (in_fragmented_record) {
diff --git a/src/leveldb/db/log_reader.h b/src/leveldb/db/log_reader.h
index 6aff791716..8389d61f8f 100644
--- a/src/leveldb/db/log_reader.h
+++ b/src/leveldb/db/log_reader.h
@@ -73,6 +73,11 @@ class Reader {
// Offset at which to start looking for the first record to return
uint64_t const initial_offset_;
+ // True if we are resynchronizing after a seek (initial_offset_ > 0). In
+ // particular, a run of kMiddleType and kLastType records can be silently
+ // skipped in this mode
+ bool resyncing_;
+
// Extend record types with the following special values
enum {
kEof = kMaxRecordType + 1,
diff --git a/src/leveldb/db/log_test.cc b/src/leveldb/db/log_test.cc
index dcf0562652..48a5928657 100644
--- a/src/leveldb/db/log_test.cc
+++ b/src/leveldb/db/log_test.cc
@@ -79,7 +79,7 @@ class LogTest {
virtual Status Skip(uint64_t n) {
if (n > contents_.size()) {
contents_.clear();
- return Status::NotFound("in-memory file skipepd past end");
+ return Status::NotFound("in-memory file skipped past end");
}
contents_.remove_prefix(n);
@@ -104,23 +104,34 @@ class LogTest {
StringSource source_;
ReportCollector report_;
bool reading_;
- Writer writer_;
- Reader reader_;
+ Writer* writer_;
+ Reader* reader_;
// Record metadata for testing initial offset functionality
static size_t initial_offset_record_sizes_[];
static uint64_t initial_offset_last_record_offsets_[];
+ static int num_initial_offset_records_;
public:
LogTest() : reading_(false),
- writer_(&dest_),
- reader_(&source_, &report_, true/*checksum*/,
- 0/*initial_offset*/) {
+ writer_(new Writer(&dest_)),
+ reader_(new Reader(&source_, &report_, true/*checksum*/,
+ 0/*initial_offset*/)) {
+ }
+
+ ~LogTest() {
+ delete writer_;
+ delete reader_;
+ }
+
+ void ReopenForAppend() {
+ delete writer_;
+ writer_ = new Writer(&dest_, dest_.contents_.size());
}
void Write(const std::string& msg) {
ASSERT_TRUE(!reading_) << "Write() after starting to read";
- writer_.AddRecord(Slice(msg));
+ writer_->AddRecord(Slice(msg));
}
size_t WrittenBytes() const {
@@ -134,7 +145,7 @@ class LogTest {
}
std::string scratch;
Slice record;
- if (reader_.ReadRecord(&record, &scratch)) {
+ if (reader_->ReadRecord(&record, &scratch)) {
return record.ToString();
} else {
return "EOF";
@@ -182,13 +193,18 @@ class LogTest {
}
void WriteInitialOffsetLog() {
- for (int i = 0; i < 4; i++) {
+ for (int i = 0; i < num_initial_offset_records_; i++) {
std::string record(initial_offset_record_sizes_[i],
static_cast<char>('a' + i));
Write(record);
}
}
+ void StartReadingAt(uint64_t initial_offset) {
+ delete reader_;
+ reader_ = new Reader(&source_, &report_, true/*checksum*/, initial_offset);
+ }
+
void CheckOffsetPastEndReturnsNoRecords(uint64_t offset_past_end) {
WriteInitialOffsetLog();
reading_ = true;
@@ -208,32 +224,48 @@ class LogTest {
source_.contents_ = Slice(dest_.contents_);
Reader* offset_reader = new Reader(&source_, &report_, true/*checksum*/,
initial_offset);
- Slice record;
- std::string scratch;
- ASSERT_TRUE(offset_reader->ReadRecord(&record, &scratch));
- ASSERT_EQ(initial_offset_record_sizes_[expected_record_offset],
- record.size());
- ASSERT_EQ(initial_offset_last_record_offsets_[expected_record_offset],
- offset_reader->LastRecordOffset());
- ASSERT_EQ((char)('a' + expected_record_offset), record.data()[0]);
+
+ // Read all records from expected_record_offset through the last one.
+ ASSERT_LT(expected_record_offset, num_initial_offset_records_);
+ for (; expected_record_offset < num_initial_offset_records_;
+ ++expected_record_offset) {
+ Slice record;
+ std::string scratch;
+ ASSERT_TRUE(offset_reader->ReadRecord(&record, &scratch));
+ ASSERT_EQ(initial_offset_record_sizes_[expected_record_offset],
+ record.size());
+ ASSERT_EQ(initial_offset_last_record_offsets_[expected_record_offset],
+ offset_reader->LastRecordOffset());
+ ASSERT_EQ((char)('a' + expected_record_offset), record.data()[0]);
+ }
delete offset_reader;
}
-
};
size_t LogTest::initial_offset_record_sizes_[] =
{10000, // Two sizable records in first block
10000,
2 * log::kBlockSize - 1000, // Span three blocks
- 1};
+ 1,
+ 13716, // Consume all but two bytes of block 3.
+ log::kBlockSize - kHeaderSize, // Consume the entirety of block 4.
+ };
uint64_t LogTest::initial_offset_last_record_offsets_[] =
{0,
kHeaderSize + 10000,
2 * (kHeaderSize + 10000),
2 * (kHeaderSize + 10000) +
- (2 * log::kBlockSize - 1000) + 3 * kHeaderSize};
+ (2 * log::kBlockSize - 1000) + 3 * kHeaderSize,
+ 2 * (kHeaderSize + 10000) +
+ (2 * log::kBlockSize - 1000) + 3 * kHeaderSize
+ + kHeaderSize + 1,
+ 3 * log::kBlockSize,
+ };
+// LogTest::initial_offset_last_record_offsets_ must be defined before this.
+int LogTest::num_initial_offset_records_ =
+ sizeof(LogTest::initial_offset_last_record_offsets_)/sizeof(uint64_t);
TEST(LogTest, Empty) {
ASSERT_EQ("EOF", Read());
@@ -318,6 +350,15 @@ TEST(LogTest, AlignedEof) {
ASSERT_EQ("EOF", Read());
}
+TEST(LogTest, OpenForAppend) {
+ Write("hello");
+ ReopenForAppend();
+ Write("world");
+ ASSERT_EQ("hello", Read());
+ ASSERT_EQ("world", Read());
+ ASSERT_EQ("EOF", Read());
+}
+
TEST(LogTest, RandomRead) {
const int N = 500;
Random write_rnd(301);
@@ -445,6 +486,22 @@ TEST(LogTest, PartialLastIsIgnored) {
ASSERT_EQ(0, DroppedBytes());
}
+TEST(LogTest, SkipIntoMultiRecord) {
+ // Consider a fragmented record:
+ // first(R1), middle(R1), last(R1), first(R2)
+ // If initial_offset points to a record after first(R1) but before first(R2)
+ // incomplete fragment errors are not actual errors, and must be suppressed
+ // until a new first or full record is encountered.
+ Write(BigString("foo", 3*kBlockSize));
+ Write("correct");
+ StartReadingAt(kBlockSize);
+
+ ASSERT_EQ("correct", Read());
+ ASSERT_EQ("", ReportMessage());
+ ASSERT_EQ(0, DroppedBytes());
+ ASSERT_EQ("EOF", Read());
+}
+
TEST(LogTest, ErrorJoinsRecords) {
// Consider two fragmented records:
// first(R1) last(R1) first(R2) last(R2)
@@ -514,6 +571,10 @@ TEST(LogTest, ReadFourthStart) {
3);
}
+TEST(LogTest, ReadInitialOffsetIntoBlockPadding) {
+ CheckInitialOffsetRecord(3 * log::kBlockSize - 3, 5);
+}
+
TEST(LogTest, ReadEnd) {
CheckOffsetPastEndReturnsNoRecords(0);
}
diff --git a/src/leveldb/db/log_writer.cc b/src/leveldb/db/log_writer.cc
index 2da99ac088..74a03270da 100644
--- a/src/leveldb/db/log_writer.cc
+++ b/src/leveldb/db/log_writer.cc
@@ -12,15 +12,24 @@
namespace leveldb {
namespace log {
-Writer::Writer(WritableFile* dest)
- : dest_(dest),
- block_offset_(0) {
+static void InitTypeCrc(uint32_t* type_crc) {
for (int i = 0; i <= kMaxRecordType; i++) {
char t = static_cast<char>(i);
- type_crc_[i] = crc32c::Value(&t, 1);
+ type_crc[i] = crc32c::Value(&t, 1);
}
}
+Writer::Writer(WritableFile* dest)
+ : dest_(dest),
+ block_offset_(0) {
+ InitTypeCrc(type_crc_);
+}
+
+Writer::Writer(WritableFile* dest, uint64_t dest_length)
+ : dest_(dest), block_offset_(dest_length % kBlockSize) {
+ InitTypeCrc(type_crc_);
+}
+
Writer::~Writer() {
}
diff --git a/src/leveldb/db/log_writer.h b/src/leveldb/db/log_writer.h
index a3a954d967..9e7cc4705b 100644
--- a/src/leveldb/db/log_writer.h
+++ b/src/leveldb/db/log_writer.h
@@ -22,6 +22,12 @@ class Writer {
// "*dest" must be initially empty.
// "*dest" must remain live while this Writer is in use.
explicit Writer(WritableFile* dest);
+
+ // Create a writer that will append data to "*dest".
+ // "*dest" must have initial length "dest_length".
+ // "*dest" must remain live while this Writer is in use.
+ Writer(WritableFile* dest, uint64_t dest_length);
+
~Writer();
Status AddRecord(const Slice& slice);
diff --git a/src/leveldb/db/memtable.h b/src/leveldb/db/memtable.h
index 92e90bb099..9f41567cde 100644
--- a/src/leveldb/db/memtable.h
+++ b/src/leveldb/db/memtable.h
@@ -36,10 +36,7 @@ class MemTable {
}
// Returns an estimate of the number of bytes of data in use by this
- // data structure.
- //
- // REQUIRES: external synchronization to prevent simultaneous
- // operations on the same MemTable.
+ // data structure. It is safe to call when MemTable is being modified.
size_t ApproximateMemoryUsage();
// Return an iterator that yields the contents of the memtable.
diff --git a/src/leveldb/db/recovery_test.cc b/src/leveldb/db/recovery_test.cc
new file mode 100644
index 0000000000..9596f4288a
--- /dev/null
+++ b/src/leveldb/db/recovery_test.cc
@@ -0,0 +1,324 @@
+// Copyright (c) 2014 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "db/db_impl.h"
+#include "db/filename.h"
+#include "db/version_set.h"
+#include "db/write_batch_internal.h"
+#include "leveldb/db.h"
+#include "leveldb/env.h"
+#include "leveldb/write_batch.h"
+#include "util/logging.h"
+#include "util/testharness.h"
+#include "util/testutil.h"
+
+namespace leveldb {
+
+class RecoveryTest {
+ public:
+ RecoveryTest() : env_(Env::Default()), db_(NULL) {
+ dbname_ = test::TmpDir() + "/recovery_test";
+ DestroyDB(dbname_, Options());
+ Open();
+ }
+
+ ~RecoveryTest() {
+ Close();
+ DestroyDB(dbname_, Options());
+ }
+
+ DBImpl* dbfull() const { return reinterpret_cast<DBImpl*>(db_); }
+ Env* env() const { return env_; }
+
+ bool CanAppend() {
+ WritableFile* tmp;
+ Status s = env_->NewAppendableFile(CurrentFileName(dbname_), &tmp);
+ delete tmp;
+ if (s.IsNotSupportedError()) {
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ void Close() {
+ delete db_;
+ db_ = NULL;
+ }
+
+ void Open(Options* options = NULL) {
+ Close();
+ Options opts;
+ if (options != NULL) {
+ opts = *options;
+ } else {
+ opts.reuse_logs = true; // TODO(sanjay): test both ways
+ opts.create_if_missing = true;
+ }
+ if (opts.env == NULL) {
+ opts.env = env_;
+ }
+ ASSERT_OK(DB::Open(opts, dbname_, &db_));
+ ASSERT_EQ(1, NumLogs());
+ }
+
+ Status Put(const std::string& k, const std::string& v) {
+ return db_->Put(WriteOptions(), k, v);
+ }
+
+ std::string Get(const std::string& k, const Snapshot* snapshot = NULL) {
+ std::string result;
+ Status s = db_->Get(ReadOptions(), k, &result);
+ if (s.IsNotFound()) {
+ result = "NOT_FOUND";
+ } else if (!s.ok()) {
+ result = s.ToString();
+ }
+ return result;
+ }
+
+ std::string ManifestFileName() {
+ std::string current;
+ ASSERT_OK(ReadFileToString(env_, CurrentFileName(dbname_), &current));
+ size_t len = current.size();
+ if (len > 0 && current[len-1] == '\n') {
+ current.resize(len - 1);
+ }
+ return dbname_ + "/" + current;
+ }
+
+ std::string LogName(uint64_t number) {
+ return LogFileName(dbname_, number);
+ }
+
+ size_t DeleteLogFiles() {
+ std::vector<uint64_t> logs = GetFiles(kLogFile);
+ for (size_t i = 0; i < logs.size(); i++) {
+ ASSERT_OK(env_->DeleteFile(LogName(logs[i]))) << LogName(logs[i]);
+ }
+ return logs.size();
+ }
+
+ uint64_t FirstLogFile() {
+ return GetFiles(kLogFile)[0];
+ }
+
+ std::vector<uint64_t> GetFiles(FileType t) {
+ std::vector<std::string> filenames;
+ ASSERT_OK(env_->GetChildren(dbname_, &filenames));
+ std::vector<uint64_t> result;
+ for (size_t i = 0; i < filenames.size(); i++) {
+ uint64_t number;
+ FileType type;
+ if (ParseFileName(filenames[i], &number, &type) && type == t) {
+ result.push_back(number);
+ }
+ }
+ return result;
+ }
+
+ int NumLogs() {
+ return GetFiles(kLogFile).size();
+ }
+
+ int NumTables() {
+ return GetFiles(kTableFile).size();
+ }
+
+ uint64_t FileSize(const std::string& fname) {
+ uint64_t result;
+ ASSERT_OK(env_->GetFileSize(fname, &result)) << fname;
+ return result;
+ }
+
+ void CompactMemTable() {
+ dbfull()->TEST_CompactMemTable();
+ }
+
+ // Directly construct a log file that sets key to val.
+ void MakeLogFile(uint64_t lognum, SequenceNumber seq, Slice key, Slice val) {
+ std::string fname = LogFileName(dbname_, lognum);
+ WritableFile* file;
+ ASSERT_OK(env_->NewWritableFile(fname, &file));
+ log::Writer writer(file);
+ WriteBatch batch;
+ batch.Put(key, val);
+ WriteBatchInternal::SetSequence(&batch, seq);
+ ASSERT_OK(writer.AddRecord(WriteBatchInternal::Contents(&batch)));
+ ASSERT_OK(file->Flush());
+ delete file;
+ }
+
+ private:
+ std::string dbname_;
+ Env* env_;
+ DB* db_;
+};
+
+TEST(RecoveryTest, ManifestReused) {
+ if (!CanAppend()) {
+ fprintf(stderr, "skipping test because env does not support appending\n");
+ return;
+ }
+ ASSERT_OK(Put("foo", "bar"));
+ Close();
+ std::string old_manifest = ManifestFileName();
+ Open();
+ ASSERT_EQ(old_manifest, ManifestFileName());
+ ASSERT_EQ("bar", Get("foo"));
+ Open();
+ ASSERT_EQ(old_manifest, ManifestFileName());
+ ASSERT_EQ("bar", Get("foo"));
+}
+
+TEST(RecoveryTest, LargeManifestCompacted) {
+ if (!CanAppend()) {
+ fprintf(stderr, "skipping test because env does not support appending\n");
+ return;
+ }
+ ASSERT_OK(Put("foo", "bar"));
+ Close();
+ std::string old_manifest = ManifestFileName();
+
+ // Pad with zeroes to make manifest file very big.
+ {
+ uint64_t len = FileSize(old_manifest);
+ WritableFile* file;
+ ASSERT_OK(env()->NewAppendableFile(old_manifest, &file));
+ std::string zeroes(3*1048576 - static_cast<size_t>(len), 0);
+ ASSERT_OK(file->Append(zeroes));
+ ASSERT_OK(file->Flush());
+ delete file;
+ }
+
+ Open();
+ std::string new_manifest = ManifestFileName();
+ ASSERT_NE(old_manifest, new_manifest);
+ ASSERT_GT(10000, FileSize(new_manifest));
+ ASSERT_EQ("bar", Get("foo"));
+
+ Open();
+ ASSERT_EQ(new_manifest, ManifestFileName());
+ ASSERT_EQ("bar", Get("foo"));
+}
+
+TEST(RecoveryTest, NoLogFiles) {
+ ASSERT_OK(Put("foo", "bar"));
+ ASSERT_EQ(1, DeleteLogFiles());
+ Open();
+ ASSERT_EQ("NOT_FOUND", Get("foo"));
+ Open();
+ ASSERT_EQ("NOT_FOUND", Get("foo"));
+}
+
+TEST(RecoveryTest, LogFileReuse) {
+ if (!CanAppend()) {
+ fprintf(stderr, "skipping test because env does not support appending\n");
+ return;
+ }
+ for (int i = 0; i < 2; i++) {
+ ASSERT_OK(Put("foo", "bar"));
+ if (i == 0) {
+ // Compact to ensure current log is empty
+ CompactMemTable();
+ }
+ Close();
+ ASSERT_EQ(1, NumLogs());
+ uint64_t number = FirstLogFile();
+ if (i == 0) {
+ ASSERT_EQ(0, FileSize(LogName(number)));
+ } else {
+ ASSERT_LT(0, FileSize(LogName(number)));
+ }
+ Open();
+ ASSERT_EQ(1, NumLogs());
+ ASSERT_EQ(number, FirstLogFile()) << "did not reuse log file";
+ ASSERT_EQ("bar", Get("foo"));
+ Open();
+ ASSERT_EQ(1, NumLogs());
+ ASSERT_EQ(number, FirstLogFile()) << "did not reuse log file";
+ ASSERT_EQ("bar", Get("foo"));
+ }
+}
+
+TEST(RecoveryTest, MultipleMemTables) {
+ // Make a large log.
+ const int kNum = 1000;
+ for (int i = 0; i < kNum; i++) {
+ char buf[100];
+ snprintf(buf, sizeof(buf), "%050d", i);
+ ASSERT_OK(Put(buf, buf));
+ }
+ ASSERT_EQ(0, NumTables());
+ Close();
+ ASSERT_EQ(0, NumTables());
+ ASSERT_EQ(1, NumLogs());
+ uint64_t old_log_file = FirstLogFile();
+
+ // Force creation of multiple memtables by reducing the write buffer size.
+ Options opt;
+ opt.reuse_logs = true;
+ opt.write_buffer_size = (kNum*100) / 2;
+ Open(&opt);
+ ASSERT_LE(2, NumTables());
+ ASSERT_EQ(1, NumLogs());
+ ASSERT_NE(old_log_file, FirstLogFile()) << "must not reuse log";
+ for (int i = 0; i < kNum; i++) {
+ char buf[100];
+ snprintf(buf, sizeof(buf), "%050d", i);
+ ASSERT_EQ(buf, Get(buf));
+ }
+}
+
+TEST(RecoveryTest, MultipleLogFiles) {
+ ASSERT_OK(Put("foo", "bar"));
+ Close();
+ ASSERT_EQ(1, NumLogs());
+
+ // Make a bunch of uncompacted log files.
+ uint64_t old_log = FirstLogFile();
+ MakeLogFile(old_log+1, 1000, "hello", "world");
+ MakeLogFile(old_log+2, 1001, "hi", "there");
+ MakeLogFile(old_log+3, 1002, "foo", "bar2");
+
+ // Recover and check that all log files were processed.
+ Open();
+ ASSERT_LE(1, NumTables());
+ ASSERT_EQ(1, NumLogs());
+ uint64_t new_log = FirstLogFile();
+ ASSERT_LE(old_log+3, new_log);
+ ASSERT_EQ("bar2", Get("foo"));
+ ASSERT_EQ("world", Get("hello"));
+ ASSERT_EQ("there", Get("hi"));
+
+ // Test that previous recovery produced recoverable state.
+ Open();
+ ASSERT_LE(1, NumTables());
+ ASSERT_EQ(1, NumLogs());
+ if (CanAppend()) {
+ ASSERT_EQ(new_log, FirstLogFile());
+ }
+ ASSERT_EQ("bar2", Get("foo"));
+ ASSERT_EQ("world", Get("hello"));
+ ASSERT_EQ("there", Get("hi"));
+
+ // Check that introducing an older log file does not cause it to be re-read.
+ Close();
+ MakeLogFile(old_log+1, 2000, "hello", "stale write");
+ Open();
+ ASSERT_LE(1, NumTables());
+ ASSERT_EQ(1, NumLogs());
+ if (CanAppend()) {
+ ASSERT_EQ(new_log, FirstLogFile());
+ }
+ ASSERT_EQ("bar2", Get("foo"));
+ ASSERT_EQ("world", Get("hello"));
+ ASSERT_EQ("there", Get("hi"));
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) {
+ return leveldb::test::RunAllTests();
+}
diff --git a/src/leveldb/db/skiplist.h b/src/leveldb/db/skiplist.h
index ed8b092203..8bd77764d8 100644
--- a/src/leveldb/db/skiplist.h
+++ b/src/leveldb/db/skiplist.h
@@ -1,10 +1,10 @@
-#ifndef STORAGE_LEVELDB_DB_SKIPLIST_H_
-#define STORAGE_LEVELDB_DB_SKIPLIST_H_
-
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
+
+#ifndef STORAGE_LEVELDB_DB_SKIPLIST_H_
+#define STORAGE_LEVELDB_DB_SKIPLIST_H_
+
// Thread safety
// -------------
//
diff --git a/src/leveldb/db/skiplist_test.cc b/src/leveldb/db/skiplist_test.cc
index c78f4b4fb1..aee1461e1b 100644
--- a/src/leveldb/db/skiplist_test.cc
+++ b/src/leveldb/db/skiplist_test.cc
@@ -250,7 +250,7 @@ class ConcurrentTest {
// Note that generation 0 is never inserted, so it is ok if
// <*,0,*> is missing.
ASSERT_TRUE((gen(pos) == 0) ||
- (gen(pos) > initial_state.Get(key(pos)))
+ (gen(pos) > static_cast<Key>(initial_state.Get(key(pos))))
) << "key: " << key(pos)
<< "; gen: " << gen(pos)
<< "; initgen: "
diff --git a/src/leveldb/db/snapshot.h b/src/leveldb/db/snapshot.h
index e7f8fd2c37..6ed413c42d 100644
--- a/src/leveldb/db/snapshot.h
+++ b/src/leveldb/db/snapshot.h
@@ -5,6 +5,7 @@
#ifndef STORAGE_LEVELDB_DB_SNAPSHOT_H_
#define STORAGE_LEVELDB_DB_SNAPSHOT_H_
+#include "db/dbformat.h"
#include "leveldb/db.h"
namespace leveldb {
diff --git a/src/leveldb/db/version_set.cc b/src/leveldb/db/version_set.cc
index aa83df55e4..a5e0f77a6a 100644
--- a/src/leveldb/db/version_set.cc
+++ b/src/leveldb/db/version_set.cc
@@ -893,7 +893,7 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) {
return s;
}
-Status VersionSet::Recover() {
+Status VersionSet::Recover(bool *save_manifest) {
struct LogReporter : public log::Reader::Reporter {
Status* status;
virtual void Corruption(size_t bytes, const Status& s) {
@@ -1003,11 +1003,49 @@ Status VersionSet::Recover() {
last_sequence_ = last_sequence;
log_number_ = log_number;
prev_log_number_ = prev_log_number;
+
+ // See if we can reuse the existing MANIFEST file.
+ if (ReuseManifest(dscname, current)) {
+ // No need to save new manifest
+ } else {
+ *save_manifest = true;
+ }
}
return s;
}
+bool VersionSet::ReuseManifest(const std::string& dscname,
+ const std::string& dscbase) {
+ if (!options_->reuse_logs) {
+ return false;
+ }
+ FileType manifest_type;
+ uint64_t manifest_number;
+ uint64_t manifest_size;
+ if (!ParseFileName(dscbase, &manifest_number, &manifest_type) ||
+ manifest_type != kDescriptorFile ||
+ !env_->GetFileSize(dscname, &manifest_size).ok() ||
+ // Make new compacted MANIFEST if old one is too big
+ manifest_size >= kTargetFileSize) {
+ return false;
+ }
+
+ assert(descriptor_file_ == NULL);
+ assert(descriptor_log_ == NULL);
+ Status r = env_->NewAppendableFile(dscname, &descriptor_file_);
+ if (!r.ok()) {
+ Log(options_->info_log, "Reuse MANIFEST: %s\n", r.ToString().c_str());
+ assert(descriptor_file_ == NULL);
+ return false;
+ }
+
+ Log(options_->info_log, "Reusing MANIFEST %s\n", dscname.c_str());
+ descriptor_log_ = new log::Writer(descriptor_file_, manifest_size);
+ manifest_file_number_ = manifest_number;
+ return true;
+}
+
void VersionSet::MarkFileNumberUsed(uint64_t number) {
if (next_file_number_ <= number) {
next_file_number_ = number + 1;
diff --git a/src/leveldb/db/version_set.h b/src/leveldb/db/version_set.h
index 8dc14b8e01..1dec745673 100644
--- a/src/leveldb/db/version_set.h
+++ b/src/leveldb/db/version_set.h
@@ -179,7 +179,7 @@ class VersionSet {
EXCLUSIVE_LOCKS_REQUIRED(mu);
// Recover the last saved descriptor from persistent storage.
- Status Recover();
+ Status Recover(bool *save_manifest);
// Return the current version.
Version* current() const { return current_; }
@@ -274,6 +274,8 @@ class VersionSet {
friend class Compaction;
friend class Version;
+ bool ReuseManifest(const std::string& dscname, const std::string& dscbase);
+
void Finalize(Version* v);
void GetRange(const std::vector<FileMetaData*>& inputs,
diff --git a/src/leveldb/db/write_batch_internal.h b/src/leveldb/db/write_batch_internal.h
index 310a3c8912..9448ef7b21 100644
--- a/src/leveldb/db/write_batch_internal.h
+++ b/src/leveldb/db/write_batch_internal.h
@@ -5,6 +5,7 @@
#ifndef STORAGE_LEVELDB_DB_WRITE_BATCH_INTERNAL_H_
#define STORAGE_LEVELDB_DB_WRITE_BATCH_INTERNAL_H_
+#include "db/dbformat.h"
#include "leveldb/write_batch.h"
namespace leveldb {
diff --git a/src/leveldb/doc/index.html b/src/leveldb/doc/index.html
index 3ed0ed9d9e..2155192581 100644
--- a/src/leveldb/doc/index.html
+++ b/src/leveldb/doc/index.html
@@ -22,7 +22,7 @@ directory. The following example shows how to open a database,
creating it if necessary:
<p>
<pre>
- #include &lt;assert&gt;
+ #include &lt;cassert&gt;
#include "leveldb/db.h"
leveldb::DB* db;
diff --git a/src/leveldb/helpers/memenv/memenv.cc b/src/leveldb/helpers/memenv/memenv.cc
index 43ef2e0729..9a98884daf 100644
--- a/src/leveldb/helpers/memenv/memenv.cc
+++ b/src/leveldb/helpers/memenv/memenv.cc
@@ -277,6 +277,19 @@ class InMemoryEnv : public EnvWrapper {
return Status::OK();
}
+ virtual Status NewAppendableFile(const std::string& fname,
+ WritableFile** result) {
+ MutexLock lock(&mutex_);
+ FileState** sptr = &file_map_[fname];
+ FileState* file = *sptr;
+ if (file == NULL) {
+ file = new FileState();
+ file->Ref();
+ }
+ *result = new WritableFileImpl(file);
+ return Status::OK();
+ }
+
virtual bool FileExists(const std::string& fname) {
MutexLock lock(&mutex_);
return file_map_.find(fname) != file_map_.end();
diff --git a/src/leveldb/helpers/memenv/memenv_test.cc b/src/leveldb/helpers/memenv/memenv_test.cc
index a44310fed8..5cff77613f 100644
--- a/src/leveldb/helpers/memenv/memenv_test.cc
+++ b/src/leveldb/helpers/memenv/memenv_test.cc
@@ -40,6 +40,8 @@ TEST(MemEnvTest, Basics) {
// Create a file.
ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file));
+ ASSERT_OK(env_->GetFileSize("/dir/f", &file_size));
+ ASSERT_EQ(0, file_size);
delete writable_file;
// Check that the file exists.
@@ -55,9 +57,16 @@ TEST(MemEnvTest, Basics) {
ASSERT_OK(writable_file->Append("abc"));
delete writable_file;
- // Check for expected size.
+ // Check that append works.
+ ASSERT_OK(env_->NewAppendableFile("/dir/f", &writable_file));
ASSERT_OK(env_->GetFileSize("/dir/f", &file_size));
ASSERT_EQ(3, file_size);
+ ASSERT_OK(writable_file->Append("hello"));
+ delete writable_file;
+
+ // Check for expected size.
+ ASSERT_OK(env_->GetFileSize("/dir/f", &file_size));
+ ASSERT_EQ(8, file_size);
// Check that renaming works.
ASSERT_TRUE(!env_->RenameFile("/dir/non_existent", "/dir/g").ok());
@@ -65,7 +74,7 @@ TEST(MemEnvTest, Basics) {
ASSERT_TRUE(!env_->FileExists("/dir/f"));
ASSERT_TRUE(env_->FileExists("/dir/g"));
ASSERT_OK(env_->GetFileSize("/dir/g", &file_size));
- ASSERT_EQ(3, file_size);
+ ASSERT_EQ(8, file_size);
// Check that opening non-existent file fails.
SequentialFile* seq_file;
diff --git a/src/leveldb/include/leveldb/cache.h b/src/leveldb/include/leveldb/cache.h
index 1a201e5e0a..6819d5bc49 100644
--- a/src/leveldb/include/leveldb/cache.h
+++ b/src/leveldb/include/leveldb/cache.h
@@ -81,6 +81,17 @@ class Cache {
// its cache keys.
virtual uint64_t NewId() = 0;
+ // Remove all cache entries that are not actively in use. Memory-constrained
+ // applications may wish to call this method to reduce memory usage.
+ // Default implementation of Prune() does nothing. Subclasses are strongly
+ // encouraged to override the default implementation. A future release of
+ // leveldb may change Prune() to a pure abstract method.
+ virtual void Prune() {}
+
+ // Return an estimate of the combined charges of all elements stored in the
+ // cache.
+ virtual size_t TotalCharge() const = 0;
+
private:
void LRU_Remove(Handle* e);
void LRU_Append(Handle* e);
diff --git a/src/leveldb/include/leveldb/db.h b/src/leveldb/include/leveldb/db.h
index 4c169bf22e..9752cbad51 100644
--- a/src/leveldb/include/leveldb/db.h
+++ b/src/leveldb/include/leveldb/db.h
@@ -14,7 +14,7 @@ namespace leveldb {
// Update Makefile if you change these
static const int kMajorVersion = 1;
-static const int kMinorVersion = 18;
+static const int kMinorVersion = 19;
struct Options;
struct ReadOptions;
@@ -115,6 +115,8 @@ class DB {
// about the internal operation of the DB.
// "leveldb.sstables" - returns a multi-line string that describes all
// of the sstables that make up the db contents.
+ // "leveldb.approximate-memory-usage" - returns the approximate number of
+ // bytes of memory in use by the DB.
virtual bool GetProperty(const Slice& property, std::string* value) = 0;
// For each i in [0,n-1], store in "sizes[i]", the approximate
diff --git a/src/leveldb/include/leveldb/env.h b/src/leveldb/include/leveldb/env.h
index f709514da6..99b6c21414 100644
--- a/src/leveldb/include/leveldb/env.h
+++ b/src/leveldb/include/leveldb/env.h
@@ -69,6 +69,21 @@ class Env {
virtual Status NewWritableFile(const std::string& fname,
WritableFile** result) = 0;
+ // Create an object that either appends to an existing file, or
+ // writes to a new file (if the file does not exist to begin with).
+ // On success, stores a pointer to the new file in *result and
+ // returns OK. On failure stores NULL in *result and returns
+ // non-OK.
+ //
+ // The returned file will only be accessed by one thread at a time.
+ //
+ // May return an IsNotSupportedError error if this Env does
+ // not allow appending to an existing file. Users of Env (including
+ // the leveldb implementation) must be prepared to deal with
+ // an Env that does not support appending.
+ virtual Status NewAppendableFile(const std::string& fname,
+ WritableFile** result);
+
// Returns true iff the named file exists.
virtual bool FileExists(const std::string& fname) = 0;
@@ -289,6 +304,9 @@ class EnvWrapper : public Env {
Status NewWritableFile(const std::string& f, WritableFile** r) {
return target_->NewWritableFile(f, r);
}
+ Status NewAppendableFile(const std::string& f, WritableFile** r) {
+ return target_->NewAppendableFile(f, r);
+ }
bool FileExists(const std::string& f) { return target_->FileExists(f); }
Status GetChildren(const std::string& dir, std::vector<std::string>* r) {
return target_->GetChildren(dir, r);
diff --git a/src/leveldb/include/leveldb/iterator.h b/src/leveldb/include/leveldb/iterator.h
index 76aced04bd..da631ed9d8 100644
--- a/src/leveldb/include/leveldb/iterator.h
+++ b/src/leveldb/include/leveldb/iterator.h
@@ -37,7 +37,7 @@ class Iterator {
// Valid() after this call iff the source is not empty.
virtual void SeekToLast() = 0;
- // Position at the first key in the source that at or past target
+ // Position at the first key in the source that is at or past target.
// The iterator is Valid() after this call iff the source contains
// an entry that comes at or past target.
virtual void Seek(const Slice& target) = 0;
diff --git a/src/leveldb/include/leveldb/options.h b/src/leveldb/include/leveldb/options.h
index 7c9b973454..83a1ef39a4 100644
--- a/src/leveldb/include/leveldb/options.h
+++ b/src/leveldb/include/leveldb/options.h
@@ -128,6 +128,12 @@ struct Options {
// efficiently detect that and will switch to uncompressed mode.
CompressionType compression;
+ // EXPERIMENTAL: If true, append to existing MANIFEST and log files
+ // when a database is opened. This can significantly speed up open.
+ //
+ // Default: currently false, but may become true later.
+ bool reuse_logs;
+
// If non-NULL, use the specified filter policy to reduce disk reads.
// Many applications will benefit from passing the result of
// NewBloomFilterPolicy() here.
diff --git a/src/leveldb/include/leveldb/status.h b/src/leveldb/include/leveldb/status.h
index 11dbd4b47e..d9575f9753 100644
--- a/src/leveldb/include/leveldb/status.h
+++ b/src/leveldb/include/leveldb/status.h
@@ -60,6 +60,12 @@ class Status {
// Returns true iff the status indicates an IOError.
bool IsIOError() const { return code() == kIOError; }
+ // Returns true iff the status indicates a NotSupportedError.
+ bool IsNotSupportedError() const { return code() == kNotSupported; }
+
+ // Returns true iff the status indicates an InvalidArgument.
+ bool IsInvalidArgument() const { return code() == kInvalidArgument; }
+
// Return a string representation of this status suitable for printing.
// Returns the string "OK" for success.
std::string ToString() const;
diff --git a/src/leveldb/port/atomic_pointer.h b/src/leveldb/port/atomic_pointer.h
index 9bf091f757..1c4c7aafc6 100644
--- a/src/leveldb/port/atomic_pointer.h
+++ b/src/leveldb/port/atomic_pointer.h
@@ -35,8 +35,12 @@
#define ARCH_CPU_X86_FAMILY 1
#elif defined(__ARMEL__)
#define ARCH_CPU_ARM_FAMILY 1
+#elif defined(__aarch64__)
+#define ARCH_CPU_ARM64_FAMILY 1
#elif defined(__ppc__) || defined(__powerpc__) || defined(__powerpc64__)
#define ARCH_CPU_PPC_FAMILY 1
+#elif defined(__mips__)
+#define ARCH_CPU_MIPS_FAMILY 1
#endif
namespace leveldb {
@@ -92,6 +96,13 @@ inline void MemoryBarrier() {
}
#define LEVELDB_HAVE_MEMORY_BARRIER
+// ARM64
+#elif defined(ARCH_CPU_ARM64_FAMILY)
+inline void MemoryBarrier() {
+ asm volatile("dmb sy" : : : "memory");
+}
+#define LEVELDB_HAVE_MEMORY_BARRIER
+
// PPC
#elif defined(ARCH_CPU_PPC_FAMILY) && defined(__GNUC__)
inline void MemoryBarrier() {
@@ -101,6 +112,13 @@ inline void MemoryBarrier() {
}
#define LEVELDB_HAVE_MEMORY_BARRIER
+// MIPS
+#elif defined(ARCH_CPU_MIPS_FAMILY) && defined(__GNUC__)
+inline void MemoryBarrier() {
+ __asm__ __volatile__("sync" : : : "memory");
+}
+#define LEVELDB_HAVE_MEMORY_BARRIER
+
#endif
// AtomicPointer built using platform-specific MemoryBarrier()
@@ -215,6 +233,7 @@ class AtomicPointer {
#undef LEVELDB_HAVE_MEMORY_BARRIER
#undef ARCH_CPU_X86_FAMILY
#undef ARCH_CPU_ARM_FAMILY
+#undef ARCH_CPU_ARM64_FAMILY
#undef ARCH_CPU_PPC_FAMILY
} // namespace port
diff --git a/src/leveldb/port/port_posix.cc b/src/leveldb/port/port_posix.cc
index 5ba127a5b9..30e8007ae3 100644
--- a/src/leveldb/port/port_posix.cc
+++ b/src/leveldb/port/port_posix.cc
@@ -7,7 +7,6 @@
#include <cstdlib>
#include <stdio.h>
#include <string.h>
-#include "util/logging.h"
namespace leveldb {
namespace port {
diff --git a/src/leveldb/table/filter_block.cc b/src/leveldb/table/filter_block.cc
index 203e15c8bc..4e78b954f8 100644
--- a/src/leveldb/table/filter_block.cc
+++ b/src/leveldb/table/filter_block.cc
@@ -68,7 +68,7 @@ void FilterBlockBuilder::GenerateFilter() {
// Generate filter for current set of keys and append to result_.
filter_offsets_.push_back(result_.size());
- policy_->CreateFilter(&tmp_keys_[0], num_keys, &result_);
+ policy_->CreateFilter(&tmp_keys_[0], static_cast<int>(num_keys), &result_);
tmp_keys_.clear();
keys_.clear();
@@ -97,7 +97,7 @@ bool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) {
if (index < num_) {
uint32_t start = DecodeFixed32(offset_ + index*4);
uint32_t limit = DecodeFixed32(offset_ + index*4 + 4);
- if (start <= limit && limit <= (offset_ - data_)) {
+ if (start <= limit && limit <= static_cast<size_t>(offset_ - data_)) {
Slice filter = Slice(data_ + start, limit - start);
return policy_->KeyMayMatch(key, filter);
} else if (start == limit) {
diff --git a/src/leveldb/table/format.cc b/src/leveldb/table/format.cc
index aa63144c9e..24e4e02445 100644
--- a/src/leveldb/table/format.cc
+++ b/src/leveldb/table/format.cc
@@ -30,15 +30,14 @@ Status BlockHandle::DecodeFrom(Slice* input) {
}
void Footer::EncodeTo(std::string* dst) const {
-#ifndef NDEBUG
const size_t original_size = dst->size();
-#endif
metaindex_handle_.EncodeTo(dst);
index_handle_.EncodeTo(dst);
dst->resize(2 * BlockHandle::kMaxEncodedLength); // Padding
PutFixed32(dst, static_cast<uint32_t>(kTableMagicNumber & 0xffffffffu));
PutFixed32(dst, static_cast<uint32_t>(kTableMagicNumber >> 32));
assert(dst->size() == original_size + kEncodedLength);
+ (void)original_size; // Disable unused variable warning.
}
Status Footer::DecodeFrom(Slice* input) {
diff --git a/src/leveldb/table/iterator_wrapper.h b/src/leveldb/table/iterator_wrapper.h
index 9e16b3dbed..f410c3fabe 100644
--- a/src/leveldb/table/iterator_wrapper.h
+++ b/src/leveldb/table/iterator_wrapper.h
@@ -5,6 +5,9 @@
#ifndef STORAGE_LEVELDB_TABLE_ITERATOR_WRAPPER_H_
#define STORAGE_LEVELDB_TABLE_ITERATOR_WRAPPER_H_
+#include "leveldb/iterator.h"
+#include "leveldb/slice.h"
+
namespace leveldb {
// A internal wrapper class with an interface similar to Iterator that
diff --git a/src/leveldb/table/table.cc b/src/leveldb/table/table.cc
index dff8a82590..decf8082cc 100644
--- a/src/leveldb/table/table.cc
+++ b/src/leveldb/table/table.cc
@@ -82,7 +82,7 @@ Status Table::Open(const Options& options,
*table = new Table(rep);
(*table)->ReadMeta(footer);
} else {
- if (index_block) delete index_block;
+ delete index_block;
}
return s;
diff --git a/src/leveldb/table/table_test.cc b/src/leveldb/table/table_test.cc
index c723bf84cf..abf6e246ff 100644
--- a/src/leveldb/table/table_test.cc
+++ b/src/leveldb/table/table_test.cc
@@ -853,12 +853,20 @@ TEST(TableTest, ApproximateOffsetOfCompressed) {
options.compression = kSnappyCompression;
c.Finish(options, &keys, &kvmap);
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 2000, 3000));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 2000, 3000));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 4000, 6000));
+ // Expected upper and lower bounds of space used by compressible strings.
+ static const int kSlop = 1000; // Compressor effectiveness varies.
+ const int expected = 2500; // 10000 * compression ratio (0.25)
+ const int min_z = expected - kSlop;
+ const int max_z = expected + kSlop;
+
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, kSlop));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, kSlop));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, kSlop));
+ // Have now emitted a large compressible string, so adjust expected offset.
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), min_z, max_z));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), min_z, max_z));
+ // Have now emitted two large compressible strings, so adjust expected offset.
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 2 * min_z, 2 * max_z));
}
} // namespace leveldb
diff --git a/src/leveldb/util/arena.cc b/src/leveldb/util/arena.cc
index 9367f71492..74078213ee 100644
--- a/src/leveldb/util/arena.cc
+++ b/src/leveldb/util/arena.cc
@@ -9,8 +9,7 @@ namespace leveldb {
static const int kBlockSize = 4096;
-Arena::Arena() {
- blocks_memory_ = 0;
+Arena::Arena() : memory_usage_(0) {
alloc_ptr_ = NULL; // First allocation will allocate a block
alloc_bytes_remaining_ = 0;
}
@@ -60,8 +59,9 @@ char* Arena::AllocateAligned(size_t bytes) {
char* Arena::AllocateNewBlock(size_t block_bytes) {
char* result = new char[block_bytes];
- blocks_memory_ += block_bytes;
blocks_.push_back(result);
+ memory_usage_.NoBarrier_Store(
+ reinterpret_cast<void*>(MemoryUsage() + block_bytes + sizeof(char*)));
return result;
}
diff --git a/src/leveldb/util/arena.h b/src/leveldb/util/arena.h
index 73bbf1cb9b..48bab33741 100644
--- a/src/leveldb/util/arena.h
+++ b/src/leveldb/util/arena.h
@@ -9,6 +9,7 @@
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
+#include "port/port.h"
namespace leveldb {
@@ -24,10 +25,9 @@ class Arena {
char* AllocateAligned(size_t bytes);
// Returns an estimate of the total memory usage of data allocated
- // by the arena (including space allocated but not yet used for user
- // allocations).
+ // by the arena.
size_t MemoryUsage() const {
- return blocks_memory_ + blocks_.capacity() * sizeof(char*);
+ return reinterpret_cast<uintptr_t>(memory_usage_.NoBarrier_Load());
}
private:
@@ -41,8 +41,8 @@ class Arena {
// Array of new[] allocated memory blocks
std::vector<char*> blocks_;
- // Bytes of memory in blocks allocated so far
- size_t blocks_memory_;
+ // Total memory usage of the arena.
+ port::AtomicPointer memory_usage_;
// No copying allowed
Arena(const Arena&);
diff --git a/src/leveldb/util/bloom.cc b/src/leveldb/util/bloom.cc
index a27a2ace28..bf3e4ca6e9 100644
--- a/src/leveldb/util/bloom.cc
+++ b/src/leveldb/util/bloom.cc
@@ -47,7 +47,7 @@ class BloomFilterPolicy : public FilterPolicy {
dst->resize(init_size + bytes, 0);
dst->push_back(static_cast<char>(k_)); // Remember # of probes in filter
char* array = &(*dst)[init_size];
- for (size_t i = 0; i < n; i++) {
+ for (int i = 0; i < n; i++) {
// Use double-hashing to generate a sequence of hash values.
// See analysis in [Kirsch,Mitzenmacher 2006].
uint32_t h = BloomHash(keys[i]);
diff --git a/src/leveldb/util/bloom_test.cc b/src/leveldb/util/bloom_test.cc
index 77fb1b3159..1b87a2be3f 100644
--- a/src/leveldb/util/bloom_test.cc
+++ b/src/leveldb/util/bloom_test.cc
@@ -46,7 +46,8 @@ class BloomTest {
key_slices.push_back(Slice(keys_[i]));
}
filter_.clear();
- policy_->CreateFilter(&key_slices[0], key_slices.size(), &filter_);
+ policy_->CreateFilter(&key_slices[0], static_cast<int>(key_slices.size()),
+ &filter_);
keys_.clear();
if (kVerbose >= 2) DumpFilter();
}
diff --git a/src/leveldb/util/cache.cc b/src/leveldb/util/cache.cc
index 8b197bc02a..ce46886171 100644
--- a/src/leveldb/util/cache.cc
+++ b/src/leveldb/util/cache.cc
@@ -19,6 +19,23 @@ Cache::~Cache() {
namespace {
// LRU cache implementation
+//
+// Cache entries have an "in_cache" boolean indicating whether the cache has a
+// reference on the entry. The only ways that this can become false without the
+// entry being passed to its "deleter" are via Erase(), via Insert() when
+// an element with a duplicate key is inserted, or on destruction of the cache.
+//
+// The cache keeps two linked lists of items in the cache. All items in the
+// cache are in one list or the other, and never both. Items still referenced
+// by clients but erased from the cache are in neither list. The lists are:
+// - in-use: contains the items currently referenced by clients, in no
+// particular order. (This list is used for invariant checking. If we
+// removed the check, elements that would otherwise be on this list could be
+// left as disconnected singleton lists.)
+// - LRU: contains the items not currently referenced by clients, in LRU order
+// Elements are moved between these lists by the Ref() and Unref() methods,
+// when they detect an element in the cache acquiring or losing its only
+// external reference.
// An entry is a variable length heap-allocated structure. Entries
// are kept in a circular doubly linked list ordered by access time.
@@ -30,7 +47,8 @@ struct LRUHandle {
LRUHandle* prev;
size_t charge; // TODO(opt): Only allow uint32_t?
size_t key_length;
- uint32_t refs;
+ bool in_cache; // Whether entry is in the cache.
+ uint32_t refs; // References, including cache reference, if present.
uint32_t hash; // Hash of key(); used for fast sharding and comparisons
char key_data[1]; // Beginning of key
@@ -147,49 +165,77 @@ class LRUCache {
Cache::Handle* Lookup(const Slice& key, uint32_t hash);
void Release(Cache::Handle* handle);
void Erase(const Slice& key, uint32_t hash);
+ void Prune();
+ size_t TotalCharge() const {
+ MutexLock l(&mutex_);
+ return usage_;
+ }
private:
void LRU_Remove(LRUHandle* e);
- void LRU_Append(LRUHandle* e);
+ void LRU_Append(LRUHandle*list, LRUHandle* e);
+ void Ref(LRUHandle* e);
void Unref(LRUHandle* e);
+ bool FinishErase(LRUHandle* e);
// Initialized before use.
size_t capacity_;
// mutex_ protects the following state.
- port::Mutex mutex_;
+ mutable port::Mutex mutex_;
size_t usage_;
// Dummy head of LRU list.
// lru.prev is newest entry, lru.next is oldest entry.
+ // Entries have refs==1 and in_cache==true.
LRUHandle lru_;
+ // Dummy head of in-use list.
+ // Entries are in use by clients, and have refs >= 2 and in_cache==true.
+ LRUHandle in_use_;
+
HandleTable table_;
};
LRUCache::LRUCache()
: usage_(0) {
- // Make empty circular linked list
+ // Make empty circular linked lists.
lru_.next = &lru_;
lru_.prev = &lru_;
+ in_use_.next = &in_use_;
+ in_use_.prev = &in_use_;
}
LRUCache::~LRUCache() {
+ assert(in_use_.next == &in_use_); // Error if caller has an unreleased handle
for (LRUHandle* e = lru_.next; e != &lru_; ) {
LRUHandle* next = e->next;
- assert(e->refs == 1); // Error if caller has an unreleased handle
+ assert(e->in_cache);
+ e->in_cache = false;
+ assert(e->refs == 1); // Invariant of lru_ list.
Unref(e);
e = next;
}
}
+void LRUCache::Ref(LRUHandle* e) {
+ if (e->refs == 1 && e->in_cache) { // If on lru_ list, move to in_use_ list.
+ LRU_Remove(e);
+ LRU_Append(&in_use_, e);
+ }
+ e->refs++;
+}
+
void LRUCache::Unref(LRUHandle* e) {
assert(e->refs > 0);
e->refs--;
- if (e->refs <= 0) {
- usage_ -= e->charge;
+ if (e->refs == 0) { // Deallocate.
+ assert(!e->in_cache);
(*e->deleter)(e->key(), e->value);
free(e);
+ } else if (e->in_cache && e->refs == 1) { // No longer in use; move to lru_ list.
+ LRU_Remove(e);
+ LRU_Append(&lru_, e);
}
}
@@ -198,10 +244,10 @@ void LRUCache::LRU_Remove(LRUHandle* e) {
e->prev->next = e->next;
}
-void LRUCache::LRU_Append(LRUHandle* e) {
- // Make "e" newest entry by inserting just before lru_
- e->next = &lru_;
- e->prev = lru_.prev;
+void LRUCache::LRU_Append(LRUHandle* list, LRUHandle* e) {
+ // Make "e" newest entry by inserting just before *list
+ e->next = list;
+ e->prev = list->prev;
e->prev->next = e;
e->next->prev = e;
}
@@ -210,9 +256,7 @@ Cache::Handle* LRUCache::Lookup(const Slice& key, uint32_t hash) {
MutexLock l(&mutex_);
LRUHandle* e = table_.Lookup(key, hash);
if (e != NULL) {
- e->refs++;
- LRU_Remove(e);
- LRU_Append(e);
+ Ref(e);
}
return reinterpret_cast<Cache::Handle*>(e);
}
@@ -234,34 +278,58 @@ Cache::Handle* LRUCache::Insert(
e->charge = charge;
e->key_length = key.size();
e->hash = hash;
- e->refs = 2; // One from LRUCache, one for the returned handle
+ e->in_cache = false;
+ e->refs = 1; // for the returned handle.
memcpy(e->key_data, key.data(), key.size());
- LRU_Append(e);
- usage_ += charge;
- LRUHandle* old = table_.Insert(e);
- if (old != NULL) {
- LRU_Remove(old);
- Unref(old);
- }
+ if (capacity_ > 0) {
+ e->refs++; // for the cache's reference.
+ e->in_cache = true;
+ LRU_Append(&in_use_, e);
+ usage_ += charge;
+ FinishErase(table_.Insert(e));
+ } // else don't cache. (Tests use capacity_==0 to turn off caching.)
while (usage_ > capacity_ && lru_.next != &lru_) {
LRUHandle* old = lru_.next;
- LRU_Remove(old);
- table_.Remove(old->key(), old->hash);
- Unref(old);
+ assert(old->refs == 1);
+ bool erased = FinishErase(table_.Remove(old->key(), old->hash));
+ if (!erased) { // to avoid unused variable when compiled NDEBUG
+ assert(erased);
+ }
}
return reinterpret_cast<Cache::Handle*>(e);
}
-void LRUCache::Erase(const Slice& key, uint32_t hash) {
- MutexLock l(&mutex_);
- LRUHandle* e = table_.Remove(key, hash);
+// If e != NULL, finish removing *e from the cache; it has already been removed
+// from the hash table. Return whether e != NULL. Requires mutex_ held.
+bool LRUCache::FinishErase(LRUHandle* e) {
if (e != NULL) {
+ assert(e->in_cache);
LRU_Remove(e);
+ e->in_cache = false;
+ usage_ -= e->charge;
Unref(e);
}
+ return e != NULL;
+}
+
+void LRUCache::Erase(const Slice& key, uint32_t hash) {
+ MutexLock l(&mutex_);
+ FinishErase(table_.Remove(key, hash));
+}
+
+void LRUCache::Prune() {
+ MutexLock l(&mutex_);
+ while (lru_.next != &lru_) {
+ LRUHandle* e = lru_.next;
+ assert(e->refs == 1);
+ bool erased = FinishErase(table_.Remove(e->key(), e->hash));
+ if (!erased) { // to avoid unused variable when compiled NDEBUG
+ assert(erased);
+ }
+ }
}
static const int kNumShardBits = 4;
@@ -314,6 +382,18 @@ class ShardedLRUCache : public Cache {
MutexLock l(&id_mutex_);
return ++(last_id_);
}
+ virtual void Prune() {
+ for (int s = 0; s < kNumShards; s++) {
+ shard_[s].Prune();
+ }
+ }
+ virtual size_t TotalCharge() const {
+ size_t total = 0;
+ for (int s = 0; s < kNumShards; s++) {
+ total += shard_[s].TotalCharge();
+ }
+ return total;
+ }
};
} // end anonymous namespace
diff --git a/src/leveldb/util/cache_test.cc b/src/leveldb/util/cache_test.cc
index 43716715a8..468f7a6425 100644
--- a/src/leveldb/util/cache_test.cc
+++ b/src/leveldb/util/cache_test.cc
@@ -59,6 +59,11 @@ class CacheTest {
&CacheTest::Deleter));
}
+ Cache::Handle* InsertAndReturnHandle(int key, int value, int charge = 1) {
+ return cache_->Insert(EncodeKey(key), EncodeValue(value), charge,
+ &CacheTest::Deleter);
+ }
+
void Erase(int key) {
cache_->Erase(EncodeKey(key));
}
@@ -135,8 +140,11 @@ TEST(CacheTest, EntriesArePinned) {
TEST(CacheTest, EvictionPolicy) {
Insert(100, 101);
Insert(200, 201);
+ Insert(300, 301);
+ Cache::Handle* h = cache_->Lookup(EncodeKey(300));
- // Frequently used entry must be kept around
+ // Frequently used entry must be kept around,
+ // as must things that are still in use.
for (int i = 0; i < kCacheSize + 100; i++) {
Insert(1000+i, 2000+i);
ASSERT_EQ(2000+i, Lookup(1000+i));
@@ -144,6 +152,25 @@ TEST(CacheTest, EvictionPolicy) {
}
ASSERT_EQ(101, Lookup(100));
ASSERT_EQ(-1, Lookup(200));
+ ASSERT_EQ(301, Lookup(300));
+ cache_->Release(h);
+}
+
+TEST(CacheTest, UseExceedsCacheSize) {
+ // Overfill the cache, keeping handles on all inserted entries.
+ std::vector<Cache::Handle*> h;
+ for (int i = 0; i < kCacheSize + 100; i++) {
+ h.push_back(InsertAndReturnHandle(1000+i, 2000+i));
+ }
+
+ // Check that all the entries can be found in the cache.
+ for (int i = 0; i < h.size(); i++) {
+ ASSERT_EQ(2000+i, Lookup(1000+i));
+ }
+
+ for (int i = 0; i < h.size(); i++) {
+ cache_->Release(h[i]);
+ }
}
TEST(CacheTest, HeavyEntries) {
@@ -179,6 +206,19 @@ TEST(CacheTest, NewId) {
ASSERT_NE(a, b);
}
+TEST(CacheTest, Prune) {
+ Insert(1, 100);
+ Insert(2, 200);
+
+ Cache::Handle* handle = cache_->Lookup(EncodeKey(1));
+ ASSERT_TRUE(handle);
+ cache_->Prune();
+ cache_->Release(handle);
+
+ ASSERT_EQ(100, Lookup(1));
+ ASSERT_EQ(-1, Lookup(2));
+}
+
} // namespace leveldb
int main(int argc, char** argv) {
diff --git a/src/leveldb/util/env.cc b/src/leveldb/util/env.cc
index c2600e964a..c58a0821ef 100644
--- a/src/leveldb/util/env.cc
+++ b/src/leveldb/util/env.cc
@@ -9,6 +9,10 @@ namespace leveldb {
Env::~Env() {
}
+Status Env::NewAppendableFile(const std::string& fname, WritableFile** result) {
+ return Status::NotSupported("NewAppendableFile", fname);
+}
+
SequentialFile::~SequentialFile() {
}
diff --git a/src/leveldb/util/env_posix.cc b/src/leveldb/util/env_posix.cc
index ba2667864a..e0fca52f46 100644
--- a/src/leveldb/util/env_posix.cc
+++ b/src/leveldb/util/env_posix.cc
@@ -351,6 +351,19 @@ class PosixEnv : public Env {
return s;
}
+ virtual Status NewAppendableFile(const std::string& fname,
+ WritableFile** result) {
+ Status s;
+ FILE* f = fopen(fname.c_str(), "a");
+ if (f == NULL) {
+ *result = NULL;
+ s = IOError(fname, errno);
+ } else {
+ *result = new PosixWritableFile(fname, f);
+ }
+ return s;
+ }
+
virtual bool FileExists(const std::string& fname) {
return access(fname.c_str(), F_OK) == 0;
}
diff --git a/src/leveldb/util/env_win.cc b/src/leveldb/util/env_win.cc
index e11a96b791..b074b7579e 100644
--- a/src/leveldb/util/env_win.cc
+++ b/src/leveldb/util/env_win.cc
@@ -106,7 +106,7 @@ private:
class Win32WritableFile : public WritableFile
{
public:
- Win32WritableFile(const std::string& fname);
+ Win32WritableFile(const std::string& fname, bool append);
~Win32WritableFile();
virtual Status Append(const Slice& data);
@@ -158,6 +158,8 @@ public:
RandomAccessFile** result);
virtual Status NewWritableFile(const std::string& fname,
WritableFile** result);
+ virtual Status NewAppendableFile(const std::string& fname,
+ WritableFile** result);
virtual bool FileExists(const std::string& fname);
@@ -423,17 +425,23 @@ void Win32RandomAccessFile::_CleanUp()
}
}
-Win32WritableFile::Win32WritableFile(const std::string& fname)
+Win32WritableFile::Win32WritableFile(const std::string& fname, bool append)
: filename_(fname)
{
std::wstring path;
ToWidePath(fname, path);
- DWORD Flag = PathFileExistsW(path.c_str()) ? OPEN_EXISTING : CREATE_ALWAYS;
+ // NewAppendableFile: append to an existing file, or create a new one
+ // if none exists - this is OPEN_ALWAYS behavior, with
+ // FILE_APPEND_DATA to avoid having to manually position the file
+ // pointer at the end of the file.
+ // NewWritableFile: create a new file, delete if it exists - this is
+ // CREATE_ALWAYS behavior. This file is used for writing only so
+ // use GENERIC_WRITE.
_hFile = CreateFileW(path.c_str(),
- GENERIC_READ | GENERIC_WRITE,
+ append ? FILE_APPEND_DATA : GENERIC_WRITE,
FILE_SHARE_READ|FILE_SHARE_DELETE|FILE_SHARE_WRITE,
NULL,
- Flag,
+ append ? OPEN_ALWAYS : CREATE_ALWAYS,
FILE_ATTRIBUTE_NORMAL,
NULL);
// CreateFileW returns INVALID_HANDLE_VALUE in case of error, always check isEnable() before use
@@ -823,7 +831,9 @@ Status Win32Env::NewLogger( const std::string& fname, Logger** result )
{
Status sRet;
std::string path = fname;
- Win32WritableFile* pMapFile = new Win32WritableFile(ModifyPath(path));
+ // Logs are opened with write semantics, not with append semantics
+ // (see PosixEnv::NewLogger)
+ Win32WritableFile* pMapFile = new Win32WritableFile(ModifyPath(path), false);
if(!pMapFile->isEnable()){
delete pMapFile;
*result = NULL;
@@ -837,7 +847,20 @@ Status Win32Env::NewWritableFile( const std::string& fname, WritableFile** resul
{
Status sRet;
std::string path = fname;
- Win32WritableFile* pFile = new Win32WritableFile(ModifyPath(path));
+ Win32WritableFile* pFile = new Win32WritableFile(ModifyPath(path), false);
+ if(!pFile->isEnable()){
+ *result = NULL;
+ sRet = Status::IOError(fname,Win32::GetLastErrSz());
+ }else
+ *result = pFile;
+ return sRet;
+}
+
+Status Win32Env::NewAppendableFile( const std::string& fname, WritableFile** result )
+{
+ Status sRet;
+ std::string path = fname;
+ Win32WritableFile* pFile = new Win32WritableFile(ModifyPath(path), true);
if(!pFile->isEnable()){
*result = NULL;
sRet = Status::IOError(fname,Win32::GetLastErrSz());
diff --git a/src/leveldb/util/options.cc b/src/leveldb/util/options.cc
index 76af5b9302..8b618fb1ae 100644
--- a/src/leveldb/util/options.cc
+++ b/src/leveldb/util/options.cc
@@ -22,8 +22,8 @@ Options::Options()
block_size(4096),
block_restart_interval(16),
compression(kSnappyCompression),
+ reuse_logs(false),
filter_policy(NULL) {
}
-
} // namespace leveldb
diff --git a/src/leveldb/util/testutil.h b/src/leveldb/util/testutil.h
index adad3fc1ea..d7e4583702 100644
--- a/src/leveldb/util/testutil.h
+++ b/src/leveldb/util/testutil.h
@@ -45,6 +45,16 @@ class ErrorEnv : public EnvWrapper {
}
return target()->NewWritableFile(fname, result);
}
+
+ virtual Status NewAppendableFile(const std::string& fname,
+ WritableFile** result) {
+ if (writable_file_error_) {
+ ++num_writable_file_errors_;
+ *result = NULL;
+ return Status::IOError(fname, "fake error");
+ }
+ return target()->NewAppendableFile(fname, result);
+ }
};
} // namespace test
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 3ff5ccc18f..87d367c2fb 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -50,6 +50,7 @@ struct IteratorComparator
};
struct COrphanTx {
+ // When modifying, adapt the copy of this definition in tests/DoS_tests.
CTransaction tx;
NodeId fromPeer;
int64_t nTimeExpire;
@@ -1517,7 +1518,8 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
}
CBlock block;
- assert(ReadBlockFromDisk(block, it->second, chainparams.GetConsensus()));
+ bool ret = ReadBlockFromDisk(block, it->second, chainparams.GetConsensus());
+ assert(ret);
BlockTransactions resp(req);
for (size_t i = 0; i < req.indexes.size(); i++) {
@@ -1780,6 +1782,11 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
}
}
+ // Keep a CBlock for "optimistic" compactblock reconstructions (see
+ // below)
+ std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
+ bool fBlockReconstructed = false;
+
LOCK(cs_main);
// If AcceptBlockHeader returned true, it set pindex
assert(pindex);
@@ -1868,6 +1875,23 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
req.blockhash = pindex->GetBlockHash();
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
}
+ } else {
+ // This block is either already in flight from a different
+ // peer, or this peer has too many blocks outstanding to
+ // download from.
+ // Optimistically try to reconstruct anyway since we might be
+ // able to without any round trips.
+ PartiallyDownloadedBlock tempBlock(&mempool);
+ ReadStatus status = tempBlock.InitData(cmpctblock);
+ if (status != READ_STATUS_OK) {
+ // TODO: don't ignore failures
+ return true;
+ }
+ std::vector<CTransactionRef> dummy;
+ status = tempBlock.FillBlock(*pblock, dummy);
+ if (status == READ_STATUS_OK) {
+ fBlockReconstructed = true;
+ }
}
} else {
if (fAlreadyInFlight) {
@@ -1887,6 +1911,29 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
return ProcessMessage(pfrom, NetMsgType::HEADERS, vHeadersMsg, nTimeReceived, chainparams, connman);
}
}
+
+ if (fBlockReconstructed) {
+ // If we got here, we were able to optimistically reconstruct a
+ // block that is in flight from some other peer.
+ {
+ LOCK(cs_main);
+ mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom->GetId(), false));
+ }
+ bool fNewBlock = false;
+ ProcessNewBlock(chainparams, pblock, true, &fNewBlock);
+ if (fNewBlock)
+ pfrom->nLastBlockTime = GetTime();
+
+ LOCK(cs_main); // hold cs_main for CBlockIndex::IsValid()
+ if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) {
+ // Clear download state for this block, which is in
+ // process from some other peer. We do this after calling
+ // ProcessNewBlock so that a malleated cmpctblock announcement
+ // can't be used to interfere with block relay.
+ MarkBlockAsReceived(pblock->GetHash());
+ }
+ }
+
}
else if (strCommand == NetMsgType::BLOCKTXN && !fImporting && !fReindex) // Ignore blocks received while importing
@@ -1950,7 +1997,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
bool fNewBlock = false;
// Since we requested this block (it was in mapBlocksInFlight), force it to be processed,
// even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc)
- ProcessNewBlock(chainparams, pblock, true, NULL, &fNewBlock);
+ ProcessNewBlock(chainparams, pblock, true, &fNewBlock);
if (fNewBlock)
pfrom->nLastBlockTime = GetTime();
}
@@ -2132,7 +2179,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
mapBlockSource.emplace(hash, std::make_pair(pfrom->GetId(), true));
}
bool fNewBlock = false;
- ProcessNewBlock(chainparams, pblock, forceProcessing, NULL, &fNewBlock);
+ ProcessNewBlock(chainparams, pblock, forceProcessing, &fNewBlock);
if (fNewBlock)
pfrom->nLastBlockTime = GetTime();
}
@@ -2729,7 +2776,8 @@ bool SendMessages(CNode* pto, CConnman& connman)
vHeaders.front().GetHash().ToString(), pto->id);
//TODO: Shouldn't need to reload block from disk, but requires refactor
CBlock block;
- assert(ReadBlockFromDisk(block, pBestIndex, consensusParams));
+ bool ret = ReadBlockFromDisk(block, pBestIndex, consensusParams);
+ assert(ret);
CBlockHeaderAndShortTxIDs cmpctblock(block, state.fWantsCmpctWitness);
int nSendFlags = state.fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
@@ -2994,8 +3042,12 @@ bool SendMessages(CNode* pto, CConnman& connman)
CAmount currentFilter = mempool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
int64_t timeNow = GetTimeMicros();
if (timeNow > pto->nextSendTimeFeeFilter) {
- static FeeFilterRounder filterRounder(::minRelayTxFee);
+ static CFeeRate default_feerate(DEFAULT_MIN_RELAY_TX_FEE);
+ static FeeFilterRounder filterRounder(default_feerate);
CAmount filterToSend = filterRounder.round(currentFilter);
+ // If we don't allow free transactions, then we always have a fee filter of at least minRelayTxFee
+ if (GetArg("-limitfreerelay", DEFAULT_LIMITFREERELAY) <= 0)
+ filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK());
if (filterToSend != pto->lastSentFeeFilter) {
connman.PushMessage(pto, msgMaker.Make(NetMsgType::FEEFILTER, filterToSend));
pto->lastSentFeeFilter = filterToSend;
diff --git a/src/netbase.cpp b/src/netbase.cpp
index 9118584b80..da94fd4d13 100644
--- a/src/netbase.cpp
+++ b/src/netbase.cpp
@@ -292,7 +292,7 @@ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials
vSocks5Init.push_back(0x01); // # METHODS
vSocks5Init.push_back(0x00); // X'00' NO AUTHENTICATION REQUIRED
}
- ssize_t ret = send(hSocket, (const char*)begin_ptr(vSocks5Init), vSocks5Init.size(), MSG_NOSIGNAL);
+ ssize_t ret = send(hSocket, (const char*)vSocks5Init.data(), vSocks5Init.size(), MSG_NOSIGNAL);
if (ret != (ssize_t)vSocks5Init.size()) {
CloseSocket(hSocket);
return error("Error sending to proxy");
@@ -317,7 +317,7 @@ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials
vAuth.insert(vAuth.end(), auth->username.begin(), auth->username.end());
vAuth.push_back(auth->password.size());
vAuth.insert(vAuth.end(), auth->password.begin(), auth->password.end());
- ret = send(hSocket, (const char*)begin_ptr(vAuth), vAuth.size(), MSG_NOSIGNAL);
+ ret = send(hSocket, (const char*)vAuth.data(), vAuth.size(), MSG_NOSIGNAL);
if (ret != (ssize_t)vAuth.size()) {
CloseSocket(hSocket);
return error("Error sending authentication to proxy");
@@ -347,7 +347,7 @@ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials
vSocks5.insert(vSocks5.end(), strDest.begin(), strDest.end());
vSocks5.push_back((port >> 8) & 0xFF);
vSocks5.push_back((port >> 0) & 0xFF);
- ret = send(hSocket, (const char*)begin_ptr(vSocks5), vSocks5.size(), MSG_NOSIGNAL);
+ ret = send(hSocket, (const char*)vSocks5.data(), vSocks5.size(), MSG_NOSIGNAL);
if (ret != (ssize_t)vSocks5.size()) {
CloseSocket(hSocket);
return error("Error sending to proxy");
diff --git a/src/primitives/transaction.h b/src/primitives/transaction.h
index 79393fa949..4c4a238dd5 100644
--- a/src/primitives/transaction.h
+++ b/src/primitives/transaction.h
@@ -303,7 +303,7 @@ class CTransaction
{
public:
// Default transaction version.
- static const int32_t CURRENT_VERSION=1;
+ static const int32_t CURRENT_VERSION=2;
// Changing the default transaction version requires a two step process: first
// adapting relay policy by bumping MAX_STANDARD_VERSION, and then later date
diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp
index 4f48e21a22..cfd0955a74 100644
--- a/src/qt/bitcoin.cpp
+++ b/src/qt/bitcoin.cpp
@@ -30,6 +30,7 @@
#include "scheduler.h"
#include "ui_interface.h"
#include "util.h"
+#include "warnings.h"
#ifdef ENABLE_WALLET
#include "wallet/wallet.h"
@@ -260,7 +261,7 @@ BitcoinCore::BitcoinCore():
void BitcoinCore::handleRunawayException(const std::exception *e)
{
PrintExceptionContinue(e, "Runaway exception");
- Q_EMIT runawayException(QString::fromStdString(strMiscWarning));
+ Q_EMIT runawayException(QString::fromStdString(GetWarnings("gui")));
}
void BitcoinCore::initialize()
@@ -691,10 +692,10 @@ int main(int argc, char *argv[])
app.exec();
} catch (const std::exception& e) {
PrintExceptionContinue(&e, "Runaway exception");
- app.handleRunawayException(QString::fromStdString(strMiscWarning));
+ app.handleRunawayException(QString::fromStdString(GetWarnings("gui")));
} catch (...) {
PrintExceptionContinue(NULL, "Runaway exception");
- app.handleRunawayException(QString::fromStdString(strMiscWarning));
+ app.handleRunawayException(QString::fromStdString(GetWarnings("gui")));
}
return app.getReturnValue();
}
diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp
index 54ed867de0..651ff84293 100644
--- a/src/qt/bitcoingui.cpp
+++ b/src/qt/bitcoingui.cpp
@@ -46,7 +46,6 @@
#include <QMenuBar>
#include <QMessageBox>
#include <QMimeData>
-#include <QProgressBar>
#include <QProgressDialog>
#include <QSettings>
#include <QShortcut>
@@ -251,6 +250,7 @@ BitcoinGUI::BitcoinGUI(const PlatformStyle *_platformStyle, const NetworkStyle *
if(enableWallet) {
connect(walletFrame, SIGNAL(requestedSyncWarningInfo()), this, SLOT(showModalOverlay()));
connect(labelBlocksIcon, SIGNAL(clicked(QPoint)), this, SLOT(showModalOverlay()));
+ connect(progressBar, SIGNAL(clicked(QPoint)), this, SLOT(showModalOverlay()));
}
#endif
}
@@ -1138,8 +1138,8 @@ void BitcoinGUI::setTrayIconVisible(bool fHideTrayIcon)
void BitcoinGUI::showModalOverlay()
{
- if (modalOverlay)
- modalOverlay->showHide(false, true);
+ if (modalOverlay && (progressBar->isVisible() || modalOverlay->isLayerVisible()))
+ modalOverlay->toggleVisibility();
}
static bool ThreadSafeMessageBox(BitcoinGUI *gui, const std::string& message, const std::string& caption, unsigned int style)
diff --git a/src/qt/guiutil.cpp b/src/qt/guiutil.cpp
index 4806e41439..8132e4fe0d 100644
--- a/src/qt/guiutil.cpp
+++ b/src/qt/guiutil.cpp
@@ -117,7 +117,7 @@ static std::string DummyAddress(const CChainParams &params)
std::vector<unsigned char> sourcedata = params.Base58Prefix(CChainParams::PUBKEY_ADDRESS);
sourcedata.insert(sourcedata.end(), dummydata, dummydata + sizeof(dummydata));
for(int i=0; i<256; ++i) { // Try every trailing byte
- std::string s = EncodeBase58(begin_ptr(sourcedata), end_ptr(sourcedata));
+ std::string s = EncodeBase58(sourcedata.data(), sourcedata.data() + sourcedata.size());
if (!CBitcoinAddress(s).IsValid())
return s;
sourcedata[sourcedata.size()-1] += 1;
@@ -988,7 +988,12 @@ QString formateNiceTimeOffset(qint64 secs)
return timeBehindText;
}
-void ClickableLabel::mousePressEvent(QMouseEvent *event)
+void ClickableLabel::mouseReleaseEvent(QMouseEvent *event)
+{
+ Q_EMIT clicked(event->pos());
+}
+
+void ClickableProgressBar::mouseReleaseEvent(QMouseEvent *event)
{
Q_EMIT clicked(event->pos());
}
diff --git a/src/qt/guiutil.h b/src/qt/guiutil.h
index 9a17d24f07..4ea2aa36e7 100644
--- a/src/qt/guiutil.h
+++ b/src/qt/guiutil.h
@@ -202,20 +202,6 @@ namespace GUIUtil
QString formateNiceTimeOffset(qint64 secs);
-#if defined(Q_OS_MAC) && QT_VERSION >= 0x050000
- // workaround for Qt OSX Bug:
- // https://bugreports.qt-project.org/browse/QTBUG-15631
- // QProgressBar uses around 10% CPU even when app is in background
- class ProgressBar : public QProgressBar
- {
- bool event(QEvent *e) {
- return (e->type() != QEvent::StyleAnimationUpdate) ? QProgressBar::event(e) : false;
- }
- };
-#else
- typedef QProgressBar ProgressBar;
-#endif
-
class ClickableLabel : public QLabel
{
Q_OBJECT
@@ -226,8 +212,35 @@ namespace GUIUtil
*/
void clicked(const QPoint& point);
protected:
- void mousePressEvent(QMouseEvent *event);
+ void mouseReleaseEvent(QMouseEvent *event);
+ };
+
+ class ClickableProgressBar : public QProgressBar
+ {
+ Q_OBJECT
+
+ Q_SIGNALS:
+ /** Emitted when the progressbar is clicked. The relative mouse coordinates of the click are
+ * passed to the signal.
+ */
+ void clicked(const QPoint& point);
+ protected:
+ void mouseReleaseEvent(QMouseEvent *event);
+ };
+
+#if defined(Q_OS_MAC) && QT_VERSION >= 0x050000
+ // workaround for Qt OSX Bug:
+ // https://bugreports.qt-project.org/browse/QTBUG-15631
+ // QProgressBar uses around 10% CPU even when app is in background
+ class ProgressBar : public ClickableProgressBar
+ {
+ bool event(QEvent *e) {
+ return (e->type() != QEvent::StyleAnimationUpdate) ? QProgressBar::event(e) : false;
+ }
};
+#else
+ typedef ClickableProgressBar ProgressBar;
+#endif
} // namespace GUIUtil
diff --git a/src/qt/modaloverlay.cpp b/src/qt/modaloverlay.cpp
index 1a843a07ac..3e8282e63d 100644
--- a/src/qt/modaloverlay.cpp
+++ b/src/qt/modaloverlay.cpp
@@ -137,6 +137,13 @@ void ModalOverlay::tipUpdate(int count, const QDateTime& blockDate, double nVeri
}
}
+void ModalOverlay::toggleVisibility()
+{
+ showHide(layerIsVisible, true);
+ if (!layerIsVisible)
+ userClosed = true;
+}
+
void ModalOverlay::showHide(bool hide, bool userRequested)
{
if ( (layerIsVisible && !hide) || (!layerIsVisible && hide) || (!hide && userClosed && !userRequested))
diff --git a/src/qt/modaloverlay.h b/src/qt/modaloverlay.h
index 66c0aa78cf..70d37b87ad 100644
--- a/src/qt/modaloverlay.h
+++ b/src/qt/modaloverlay.h
@@ -25,9 +25,11 @@ public Q_SLOTS:
void tipUpdate(int count, const QDateTime& blockDate, double nVerificationProgress);
void setKnownBestHeight(int count, const QDateTime& blockDate);
+ void toggleVisibility();
// will show or hide the modal layer
void showHide(bool hide = false, bool userRequested = false);
void closeClicked();
+ bool isLayerVisible() { return layerIsVisible; }
protected:
bool eventFilter(QObject * obj, QEvent * ev);
diff --git a/src/qt/paymentrequestplus.cpp b/src/qt/paymentrequestplus.cpp
index 20e1f79ffa..82be4d831f 100644
--- a/src/qt/paymentrequestplus.cpp
+++ b/src/qt/paymentrequestplus.cpp
@@ -159,14 +159,24 @@ bool PaymentRequestPlus::getMerchant(X509_STORE* certStore, QString& merchant) c
std::string data_to_verify; // Everything but the signature
rcopy.SerializeToString(&data_to_verify);
- EVP_MD_CTX ctx;
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+ EVP_MD_CTX *ctx = EVP_MD_CTX_new();
+ if (!ctx) throw SSLVerifyError("Error allocating OpenSSL context.");
+#else
+ EVP_MD_CTX _ctx;
+ EVP_MD_CTX *ctx;
+ ctx = &_ctx;
+#endif
EVP_PKEY *pubkey = X509_get_pubkey(signing_cert);
- EVP_MD_CTX_init(&ctx);
- if (!EVP_VerifyInit_ex(&ctx, digestAlgorithm, NULL) ||
- !EVP_VerifyUpdate(&ctx, data_to_verify.data(), data_to_verify.size()) ||
- !EVP_VerifyFinal(&ctx, (const unsigned char*)paymentRequest.signature().data(), (unsigned int)paymentRequest.signature().size(), pubkey)) {
+ EVP_MD_CTX_init(ctx);
+ if (!EVP_VerifyInit_ex(ctx, digestAlgorithm, NULL) ||
+ !EVP_VerifyUpdate(ctx, data_to_verify.data(), data_to_verify.size()) ||
+ !EVP_VerifyFinal(ctx, (const unsigned char*)paymentRequest.signature().data(), (unsigned int)paymentRequest.signature().size(), pubkey)) {
throw SSLVerifyError("Bad signature, invalid payment request.");
}
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+ EVP_MD_CTX_free(ctx);
+#endif
// OpenSSL API for getting human printable strings from certs is baroque.
int textlen = X509_NAME_get_text_by_NID(certname, NID_commonName, NULL, 0);
diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp
index 520d229901..cfe197de2a 100644
--- a/src/qt/rpcconsole.cpp
+++ b/src/qt/rpcconsole.cpp
@@ -137,6 +137,8 @@ bool RPCConsole::RPCExecuteCommandLine(std::string &strResult, const std::string
enum CmdParseState
{
STATE_EATING_SPACES,
+ STATE_EATING_SPACES_IN_ARG,
+ STATE_EATING_SPACES_IN_BRACKETS,
STATE_ARGUMENT,
STATE_SINGLEQUOTED,
STATE_DOUBLEQUOTED,
@@ -220,6 +222,8 @@ bool RPCConsole::RPCExecuteCommandLine(std::string &strResult, const std::string
break;
}
case STATE_ARGUMENT: // In or after argument
+ case STATE_EATING_SPACES_IN_ARG:
+ case STATE_EATING_SPACES_IN_BRACKETS:
case STATE_EATING_SPACES: // Handle runs of whitespace
switch(ch)
{
@@ -227,19 +231,20 @@ bool RPCConsole::RPCExecuteCommandLine(std::string &strResult, const std::string
case '\'': state = STATE_SINGLEQUOTED; break;
case '\\': state = STATE_ESCAPE_OUTER; break;
case '(': case ')': case '\n':
+ if (state == STATE_EATING_SPACES_IN_ARG)
+ throw std::runtime_error("Invalid Syntax");
if (state == STATE_ARGUMENT)
{
if (ch == '(' && stack.size() && stack.back().size() > 0)
stack.push_back(std::vector<std::string>());
- if (curarg.size())
- {
- // don't allow commands after executed commands on baselevel
- if (!stack.size())
- throw std::runtime_error("Invalid Syntax");
- stack.back().push_back(curarg);
- }
+
+ // don't allow commands after executed commands on baselevel
+ if (!stack.size())
+ throw std::runtime_error("Invalid Syntax");
+
+ stack.back().push_back(curarg);
curarg.clear();
- state = STATE_EATING_SPACES;
+ state = STATE_EATING_SPACES_IN_BRACKETS;
}
if ((ch == ')' || ch == '\n') && stack.size() > 0)
{
@@ -256,12 +261,19 @@ bool RPCConsole::RPCExecuteCommandLine(std::string &strResult, const std::string
}
break;
case ' ': case ',': case '\t':
- if(state == STATE_ARGUMENT) // Space ends argument
+ if(state == STATE_EATING_SPACES_IN_ARG && curarg.empty() && ch == ',')
+ throw std::runtime_error("Invalid Syntax");
+
+ else if(state == STATE_ARGUMENT) // Space ends argument
{
- if (curarg.size())
- stack.back().push_back(curarg);
+ stack.back().push_back(curarg);
curarg.clear();
}
+ if ((state == STATE_EATING_SPACES_IN_BRACKETS || state == STATE_ARGUMENT) && ch == ',')
+ {
+ state = STATE_EATING_SPACES_IN_ARG;
+ break;
+ }
state = STATE_EATING_SPACES;
break;
default: curarg += ch; state = STATE_ARGUMENT;
@@ -515,7 +527,7 @@ void RPCConsole::setClientModel(ClientModel *model)
// peer table signal handling - update peer details when new nodes are added to the model
connect(model->getPeerTableModel(), SIGNAL(layoutChanged()), this, SLOT(peerLayoutChanged()));
// peer table signal handling - cache selected node ids
- connect(model->getPeerTableModel(), SIGNAL(layoutAboutToChange()), this, SLOT(peerLayoutAboutToChange()));
+ connect(model->getPeerTableModel(), SIGNAL(layoutAboutToBeChanged()), this, SLOT(peerLayoutAboutToChange()));
// set up ban table
ui->banlistWidget->setModel(model->getBanTableModel());
@@ -650,13 +662,18 @@ void RPCConsole::clear(bool clearHistory)
"td.message { font-family: %1; font-size: %2; white-space:pre-wrap; } "
"td.cmd-request { color: #006060; } "
"td.cmd-error { color: red; } "
+ ".secwarning { color: red; }"
"b { color: #006060; } "
).arg(fixedFontInfo.family(), QString("%1pt").arg(consoleFontSize))
);
message(CMD_REPLY, (tr("Welcome to the %1 RPC console.").arg(tr(PACKAGE_NAME)) + "<br>" +
tr("Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.") + "<br>" +
- tr("Type <b>help</b> for an overview of available commands.")), true);
+ tr("Type <b>help</b> for an overview of available commands.")) +
+ "<br><span class=\"secwarning\">" +
+ tr("WARNING: Scammers have been active, telling users to type commands here, stealing their wallet contents. Do not use this console without fully understanding the ramification of a command.") +
+ "</span>",
+ true);
}
void RPCConsole::keyPressEvent(QKeyEvent *event)
@@ -778,7 +795,6 @@ void RPCConsole::startExecutor()
connect(this, SIGNAL(stopExecutor()), &thread, SLOT(quit()));
// - queue executor for deletion (in execution thread)
connect(&thread, SIGNAL(finished()), executor, SLOT(deleteLater()), Qt::DirectConnection);
- connect(&thread, SIGNAL(finished()), this, SLOT(test()), Qt::DirectConnection);
// Default implementation of QThread::run() simply spins up an event loop in the thread,
// which is what we want.
@@ -1008,11 +1024,11 @@ void RPCConsole::disconnectSelectedNode()
return;
// Get selected peer addresses
- QList<QModelIndex> nodes = GUIUtil::getEntryData(ui->peerWidget, 0);
+ QList<QModelIndex> nodes = GUIUtil::getEntryData(ui->peerWidget, PeerTableModel::NetNodeId);
for(int i = 0; i < nodes.count(); i++)
{
// Get currently selected peer address
- NodeId id = nodes.at(i).data(PeerTableModel::NetNodeId).toInt();
+ NodeId id = nodes.at(i).data().toInt();
// Find the node, disconnect it and clear the selected node
if(g_connman->DisconnectNode(id))
clearSelectedNode();
@@ -1025,11 +1041,11 @@ void RPCConsole::banSelectedNode(int bantime)
return;
// Get selected peer addresses
- QList<QModelIndex> nodes = GUIUtil::getEntryData(ui->peerWidget, 0);
+ QList<QModelIndex> nodes = GUIUtil::getEntryData(ui->peerWidget, PeerTableModel::NetNodeId);
for(int i = 0; i < nodes.count(); i++)
{
// Get currently selected peer address
- NodeId id = nodes.at(i).data(PeerTableModel::NetNodeId).toInt();
+ NodeId id = nodes.at(i).data().toInt();
// Get currently selected peer address
int detailNodeRow = clientModel->getPeerTableModel()->getRowByNodeId(id);
@@ -1052,11 +1068,11 @@ void RPCConsole::unbanSelectedNode()
return;
// Get selected ban addresses
- QList<QModelIndex> nodes = GUIUtil::getEntryData(ui->banlistWidget, 0);
+ QList<QModelIndex> nodes = GUIUtil::getEntryData(ui->banlistWidget, BanTableModel::Address);
for(int i = 0; i < nodes.count(); i++)
{
// Get currently selected ban address
- QString strNode = nodes.at(i).data(BanTableModel::Address).toString();
+ QString strNode = nodes.at(i).data().toString();
CSubNet possibleSubnet;
LookupSubNet(strNode.toStdString().c_str(), possibleSubnet);
@@ -1090,8 +1106,3 @@ void RPCConsole::setTabFocus(enum TabTypes tabType)
{
ui->tabWidget->setCurrentIndex(tabType);
}
-
-void RPCConsole::on_toggleNetworkActiveButton_clicked()
-{
- clientModel->setNetworkActive(!clientModel->getNetworkActive());
-}
diff --git a/src/qt/rpcconsole.h b/src/qt/rpcconsole.h
index 344d5ecb98..ab8c3dc914 100644
--- a/src/qt/rpcconsole.h
+++ b/src/qt/rpcconsole.h
@@ -62,8 +62,6 @@ protected:
private Q_SLOTS:
void on_lineEdit_returnPressed();
void on_tabWidget_currentChanged(int index);
- /** toggle network activity */
- void on_toggleNetworkActiveButton_clicked();
/** open the debug.log from the current datadir */
void on_openDebugLogfileButton_clicked();
/** change the time range of the network traffic graph */
diff --git a/src/qt/test/compattests.cpp b/src/qt/test/compattests.cpp
new file mode 100644
index 0000000000..2a7284b5b2
--- /dev/null
+++ b/src/qt/test/compattests.cpp
@@ -0,0 +1,23 @@
+// Copyright (c) 2016 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include "paymentrequestplus.h" // this includes protobuf's port.h which defines its own bswap macos
+
+#include "compattests.h"
+
+#include "compat/byteswap.h"
+
+void CompatTests::bswapTests()
+{
+ // Sibling in bitcoin/src/test/bswap_tests.cpp
+ uint16_t u1 = 0x1234;
+ uint32_t u2 = 0x56789abc;
+ uint64_t u3 = 0xdef0123456789abc;
+ uint16_t e1 = 0x3412;
+ uint32_t e2 = 0xbc9a7856;
+ uint64_t e3 = 0xbc9a78563412f0de;
+ QVERIFY(bswap_16(u1) == e1);
+ QVERIFY(bswap_32(u2) == e2);
+ QVERIFY(bswap_64(u3) == e3);
+}
diff --git a/src/qt/test/compattests.h b/src/qt/test/compattests.h
new file mode 100644
index 0000000000..35dede7743
--- /dev/null
+++ b/src/qt/test/compattests.h
@@ -0,0 +1,19 @@
+// Copyright (c) 2009-2015 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_QT_TEST_COMPATTESTS_H
+#define BITCOIN_QT_TEST_COMPATTESTS_H
+
+#include <QObject>
+#include <QTest>
+
+class CompatTests : public QObject
+{
+ Q_OBJECT
+
+private Q_SLOTS:
+ void bswapTests();
+};
+
+#endif // BITCOIN_QT_TEST_COMPATTESTS_H
diff --git a/src/qt/test/rpcnestedtests.cpp b/src/qt/test/rpcnestedtests.cpp
index 69757f9a93..26ac5e2114 100644
--- a/src/qt/test/rpcnestedtests.cpp
+++ b/src/qt/test/rpcnestedtests.cpp
@@ -15,9 +15,23 @@
#include "util.h"
#include <QDir>
+#include <QtGlobal>
#include <boost/filesystem.hpp>
+static UniValue rpcNestedTest_rpc(const JSONRPCRequest& request)
+{
+ if (request.fHelp) {
+ return "help message";
+ }
+ return request.params.write(0, 0);
+}
+
+static const CRPCCommand vRPCCommands[] =
+{
+ { "test", "rpcNestedTest", &rpcNestedTest_rpc, true },
+};
+
void RPCNestedTests::rpcNestedTests()
{
UniValue jsonRPCError;
@@ -26,6 +40,7 @@ void RPCNestedTests::rpcNestedTests()
// could be moved to a more generic place when we add more tests on QT level
const CChainParams& chainparams = Params();
RegisterAllCoreRPCCommands(tableRPC);
+ tableRPC.appendCommand("rpcNestedTest", &vRPCCommands[0]);
ClearDatadirCache();
std::string path = QDir::tempPath().toStdString() + "/" + strprintf("test_bitcoin_qt_%lu_%i", (unsigned long)GetTime(), (int)(GetRand(100000)));
QDir dir(QString::fromStdString(path));
@@ -63,16 +78,6 @@ void RPCNestedTests::rpcNestedTests()
RPCConsole::RPCExecuteCommandLine(result, "getblockchaininfo "); //whitespace at the end will be tolerated
QVERIFY(result.substr(0,1) == "{");
-#if QT_VERSION >= 0x050300
- // do the QVERIFY_EXCEPTION_THROWN checks only with Qt5.3 and higher (QVERIFY_EXCEPTION_THROWN was introduced in Qt5.3)
- QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(result, "getblockchaininfo() .\n"), std::runtime_error); //invalid syntax
- QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(result, "getblockchaininfo() getblockchaininfo()"), std::runtime_error); //invalid syntax
- (RPCConsole::RPCExecuteCommandLine(result, "getblockchaininfo(")); //tolerate non closing brackets if we have no arguments
- (RPCConsole::RPCExecuteCommandLine(result, "getblockchaininfo()()()")); //tolerate non command brackts
- QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(result, "getblockchaininfo(True)"), UniValue); //invalid argument
- QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(result, "a(getblockchaininfo(True))"), UniValue); //method not found
-#endif
-
(RPCConsole::RPCExecuteCommandLine(result, "getblockchaininfo()[\"chain\"]")); //Quote path identifier are allowed, but look after a child contaning the quotes in the key
QVERIFY(result == "null");
@@ -85,6 +90,40 @@ void RPCNestedTests::rpcNestedTests()
RPCConsole::RPCExecuteCommandLine(result, "getblock(getbestblockhash())[tx][0]");
QVERIFY(result == "4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b");
+ RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest");
+ QVERIFY(result == "[]");
+ RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest ''");
+ QVERIFY(result == "[\"\"]");
+ RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest \"\"");
+ QVERIFY(result == "[\"\"]");
+ RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest '' abc");
+ QVERIFY(result == "[\"\",\"abc\"]");
+ RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest abc '' abc");
+ QVERIFY(result == "[\"abc\",\"\",\"abc\"]");
+ RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest abc abc");
+ QVERIFY(result == "[\"abc\",\"abc\"]");
+ RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest abc\t\tabc");
+ QVERIFY(result == "[\"abc\",\"abc\"]");
+ RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest(abc )");
+ QVERIFY(result == "[\"abc\"]");
+ RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest( abc )");
+ QVERIFY(result == "[\"abc\"]");
+ RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest( abc , cba )");
+ QVERIFY(result == "[\"abc\",\"cba\"]");
+
+#if QT_VERSION >= 0x050300
+ // do the QVERIFY_EXCEPTION_THROWN checks only with Qt5.3 and higher (QVERIFY_EXCEPTION_THROWN was introduced in Qt5.3)
+ QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(result, "getblockchaininfo() .\n"), std::runtime_error); //invalid syntax
+ QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(result, "getblockchaininfo() getblockchaininfo()"), std::runtime_error); //invalid syntax
+ (RPCConsole::RPCExecuteCommandLine(result, "getblockchaininfo(")); //tolerate non closing brackets if we have no arguments
+ (RPCConsole::RPCExecuteCommandLine(result, "getblockchaininfo()()()")); //tolerate non command brackts
+ QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(result, "getblockchaininfo(True)"), UniValue); //invalid argument
+ QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(result, "a(getblockchaininfo(True))"), UniValue); //method not found
+ QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest abc,,abc"), std::runtime_error); //don't tollerate empty arguments when using ,
+ QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest(abc,,abc)"), std::runtime_error); //don't tollerate empty arguments when using ,
+ QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(result, "rpcNestedTest(abc,,)"), std::runtime_error); //don't tollerate empty arguments when using ,
+#endif
+
delete pcoinsTip;
delete pcoinsdbview;
delete pblocktree;
diff --git a/src/qt/test/test_main.cpp b/src/qt/test/test_main.cpp
index dbaab54fb6..d44d711315 100644
--- a/src/qt/test/test_main.cpp
+++ b/src/qt/test/test_main.cpp
@@ -11,6 +11,7 @@
#include "rpcnestedtests.h"
#include "util.h"
#include "uritests.h"
+#include "compattests.h"
#ifdef ENABLE_WALLET
#include "paymentservertests.h"
@@ -61,6 +62,9 @@ int main(int argc, char *argv[])
RPCNestedTests test3;
if (QTest::qExec(&test3) != 0)
fInvalid = true;
+ CompatTests test4;
+ if (QTest::qExec(&test4) != 0)
+ fInvalid = true;
ECC_Stop();
return fInvalid;
diff --git a/src/qt/walletmodel.cpp b/src/qt/walletmodel.cpp
index a78fc90d2c..afc72fae69 100644
--- a/src/qt/walletmodel.cpp
+++ b/src/qt/walletmodel.cpp
@@ -335,9 +335,8 @@ WalletModel::SendCoinsReturn WalletModel::sendCoins(WalletModelTransaction &tran
if(!wallet->CommitTransaction(*newTx, *keyChange, g_connman.get(), state))
return SendCoinsReturn(TransactionCommitFailed, QString::fromStdString(state.GetRejectReason()));
- CTransaction* t = (CTransaction*)newTx;
CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
- ssTx << *t;
+ ssTx << *newTx->tx;
transaction_array.append(&(ssTx[0]), ssTx.size());
}
diff --git a/src/random.cpp b/src/random.cpp
index aa027e49c4..c2605b45bd 100644
--- a/src/random.cpp
+++ b/src/random.cpp
@@ -11,7 +11,6 @@
#include "compat.h" // for Windows API
#include <wincrypt.h>
#endif
-#include "serialize.h" // for begin_ptr(vec)
#include "util.h" // for LogPrint()
#include "utilstrencodings.h" // for GetTime()
@@ -72,15 +71,15 @@ static void RandAddSeedPerfmon()
const size_t nMaxSize = 10000000; // Bail out at more than 10MB of performance data
while (true) {
nSize = vData.size();
- ret = RegQueryValueExA(HKEY_PERFORMANCE_DATA, "Global", NULL, NULL, begin_ptr(vData), &nSize);
+ ret = RegQueryValueExA(HKEY_PERFORMANCE_DATA, "Global", NULL, NULL, vData.data(), &nSize);
if (ret != ERROR_MORE_DATA || vData.size() >= nMaxSize)
break;
vData.resize(std::max((vData.size() * 3) / 2, nMaxSize)); // Grow size of buffer exponentially
}
RegCloseKey(HKEY_PERFORMANCE_DATA);
if (ret == ERROR_SUCCESS) {
- RAND_add(begin_ptr(vData), nSize, nSize / 100.0);
- memory_cleanse(begin_ptr(vData), nSize);
+ RAND_add(vData.data(), nSize, nSize / 100.0);
+ memory_cleanse(vData.data(), nSize);
LogPrint("rand", "%s: %lu bytes\n", __func__, nSize);
} else {
static bool warned = false; // Warn only once
diff --git a/src/rest.cpp b/src/rest.cpp
index 6379061f8f..6b6b7401f6 100644
--- a/src/rest.cpp
+++ b/src/rest.cpp
@@ -228,7 +228,7 @@ static bool rest_block(HTTPRequest* req,
return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found");
}
- CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION);
+ CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags());
ssBlock << block;
switch (rf) {
@@ -368,7 +368,7 @@ static bool rest_tx(HTTPRequest* req, const std::string& strURIPart)
if (!GetTransaction(hash, tx, Params().GetConsensus(), hashBlock, true))
return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found");
- CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
+ CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags());
ssTx << tx;
switch (rf) {
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index e6d80f06a3..1139c2aa5c 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -751,7 +751,7 @@ UniValue getblock(const JSONRPCRequest& request)
if (!fVerbose)
{
- CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION);
+ CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags());
ssBlock << block;
std::string strHex = HexStr(ssBlock.begin(), ssBlock.end());
return strHex;
diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp
index cb22dec342..73797e2019 100644
--- a/src/rpc/mining.cpp
+++ b/src/rpc/mining.cpp
@@ -132,7 +132,7 @@ UniValue generateBlocks(boost::shared_ptr<CReserveScript> coinbaseScript, int nG
continue;
}
std::shared_ptr<const CBlock> shared_pblock = std::make_shared<const CBlock>(*pblock);
- if (!ProcessNewBlock(Params(), shared_pblock, true, NULL, NULL))
+ if (!ProcessNewBlock(Params(), shared_pblock, true, NULL))
throw JSONRPCError(RPC_INTERNAL_ERROR, "ProcessNewBlock, block not accepted");
++nHeight;
blockHashes.push_back(pblock->GetHash().GetHex());
@@ -760,7 +760,7 @@ UniValue submitblock(const JSONRPCRequest& request)
submitblock_StateCatcher sc(block.GetHash());
RegisterValidationInterface(&sc);
- bool fAccepted = ProcessNewBlock(Params(), blockptr, true, NULL, NULL);
+ bool fAccepted = ProcessNewBlock(Params(), blockptr, true, NULL);
UnregisterValidationInterface(&sc);
if (fBlockPresent)
{
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index b13aa4de34..21af449466 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -220,7 +220,7 @@ UniValue getrawtransaction(const JSONRPCRequest& request)
if (!GetTransaction(hash, tx, Params().GetConsensus(), hashBlock, true))
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "No information available about transaction");
- string strHex = EncodeHexTx(*tx);
+ string strHex = EncodeHexTx(*tx, RPCSerializationFlags());
if (!fVerbose)
return strHex;
diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp
index 164e0f00e2..2122819398 100644
--- a/src/rpc/server.cpp
+++ b/src/rpc/server.cpp
@@ -497,4 +497,12 @@ void RPCRunLater(const std::string& name, boost::function<void(void)> func, int6
deadlineTimers.emplace(name, std::unique_ptr<RPCTimerBase>(timerInterface->NewTimer(func, nSeconds*1000)));
}
+int RPCSerializationFlags()
+{
+ int flag = 0;
+ if (GetArg("-rpcserialversion", DEFAULT_RPC_SERIALIZE_VERSION) == 0)
+ flag |= SERIALIZE_TRANSACTION_NO_WITNESS;
+ return flag;
+}
+
CRPCTable tableRPC;
diff --git a/src/rpc/server.h b/src/rpc/server.h
index c59886222c..4fac68a51f 100644
--- a/src/rpc/server.h
+++ b/src/rpc/server.h
@@ -19,6 +19,8 @@
#include <univalue.h>
+static const unsigned int DEFAULT_RPC_SERIALIZE_VERSION = 1;
+
class CRPCCommand;
namespace RPCServer
@@ -198,4 +200,7 @@ void StopRPC();
std::string JSONRPCExecBatch(const UniValue& vReq);
void RPCNotifyBlockChange(bool ibd, const CBlockIndex *);
+// Retrieves any serialization flags requested in command line argument
+int RPCSerializationFlags();
+
#endif // BITCOIN_RPCSERVER_H
diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp
index a6403f9363..1410d0b73f 100644
--- a/src/script/interpreter.cpp
+++ b/src/script/interpreter.cpp
@@ -856,15 +856,15 @@ bool EvalScript(vector<vector<unsigned char> >& stack, const CScript& script, un
valtype& vch = stacktop(-1);
valtype vchHash((opcode == OP_RIPEMD160 || opcode == OP_SHA1 || opcode == OP_HASH160) ? 20 : 32);
if (opcode == OP_RIPEMD160)
- CRIPEMD160().Write(begin_ptr(vch), vch.size()).Finalize(begin_ptr(vchHash));
+ CRIPEMD160().Write(vch.data(), vch.size()).Finalize(vchHash.data());
else if (opcode == OP_SHA1)
- CSHA1().Write(begin_ptr(vch), vch.size()).Finalize(begin_ptr(vchHash));
+ CSHA1().Write(vch.data(), vch.size()).Finalize(vchHash.data());
else if (opcode == OP_SHA256)
- CSHA256().Write(begin_ptr(vch), vch.size()).Finalize(begin_ptr(vchHash));
+ CSHA256().Write(vch.data(), vch.size()).Finalize(vchHash.data());
else if (opcode == OP_HASH160)
- CHash160().Write(begin_ptr(vch), vch.size()).Finalize(begin_ptr(vchHash));
+ CHash160().Write(vch.data(), vch.size()).Finalize(vchHash.data());
else if (opcode == OP_HASH256)
- CHash256().Write(begin_ptr(vch), vch.size()).Finalize(begin_ptr(vchHash));
+ CHash256().Write(vch.data(), vch.size()).Finalize(vchHash.data());
popstack(stack);
stack.push_back(vchHash);
}
diff --git a/src/script/sigcache.cpp b/src/script/sigcache.cpp
index bdc0bfdc1c..b78d7b607f 100644
--- a/src/script/sigcache.cpp
+++ b/src/script/sigcache.cpp
@@ -11,20 +11,29 @@
#include "uint256.h"
#include "util.h"
+#include "cuckoocache.h"
#include <boost/thread.hpp>
-#include <boost/unordered_set.hpp>
namespace {
/**
* We're hashing a nonce into the entries themselves, so we don't need extra
* blinding in the set hash computation.
+ *
+ * This may exhibit platform endian dependent behavior but because these are
+ * nonced hashes (random) and this state is only ever used locally it is safe.
+ * All that matters is local consistency.
*/
-class CSignatureCacheHasher
+class SignatureCacheHasher
{
public:
- size_t operator()(const uint256& key) const {
- return key.GetCheapHash();
+ template <uint8_t hash_select>
+ uint32_t operator()(const uint256& key) const
+ {
+ static_assert(hash_select <8, "SignatureCacheHasher only has 8 hashes available.");
+ uint32_t u;
+ std::memcpy(&u, key.begin()+4*hash_select, 4);
+ return u;
}
};
@@ -38,11 +47,10 @@ class CSignatureCache
private:
//! Entries are SHA256(nonce || signature hash || public key || signature):
uint256 nonce;
- typedef boost::unordered_set<uint256, CSignatureCacheHasher> map_type;
+ typedef CuckooCache::cache<uint256, SignatureCacheHasher> map_type;
map_type setValid;
boost::shared_mutex cs_sigcache;
-
public:
CSignatureCache()
{
@@ -56,58 +64,51 @@ public:
}
bool
- Get(const uint256& entry)
+ Get(const uint256& entry, const bool erase)
{
boost::shared_lock<boost::shared_mutex> lock(cs_sigcache);
- return setValid.count(entry);
+ return setValid.contains(entry, erase);
}
- void Erase(const uint256& entry)
+ void Set(uint256& entry)
{
boost::unique_lock<boost::shared_mutex> lock(cs_sigcache);
- setValid.erase(entry);
+ setValid.insert(entry);
}
-
- void Set(const uint256& entry)
+ uint32_t setup_bytes(size_t n)
{
- size_t nMaxCacheSize = GetArg("-maxsigcachesize", DEFAULT_MAX_SIG_CACHE_SIZE) * ((size_t) 1 << 20);
- if (nMaxCacheSize <= 0) return;
-
- boost::unique_lock<boost::shared_mutex> lock(cs_sigcache);
- while (memusage::DynamicUsage(setValid) > nMaxCacheSize)
- {
- map_type::size_type s = GetRand(setValid.bucket_count());
- map_type::local_iterator it = setValid.begin(s);
- if (it != setValid.end(s)) {
- setValid.erase(*it);
- }
- }
-
- setValid.insert(entry);
+ return setValid.setup_bytes(n);
}
};
+/* In previous versions of this code, signatureCache was a local static variable
+ * in CachingTransactionSignatureChecker::VerifySignature. We initialize
+ * signatureCache outside of VerifySignature to avoid the atomic operation per
+ * call overhead associated with local static variables even though
+ * signatureCache could be made local to VerifySignature.
+*/
+static CSignatureCache signatureCache;
}
-bool CachingTransactionSignatureChecker::VerifySignature(const std::vector<unsigned char>& vchSig, const CPubKey& pubkey, const uint256& sighash) const
+// To be called once in AppInit2/TestingSetup to initialize the signatureCache
+void InitSignatureCache()
{
- static CSignatureCache signatureCache;
+ size_t nMaxCacheSize = GetArg("-maxsigcachesize", DEFAULT_MAX_SIG_CACHE_SIZE) * ((size_t) 1 << 20);
+ if (nMaxCacheSize <= 0) return;
+ size_t nElems = signatureCache.setup_bytes(nMaxCacheSize);
+ LogPrintf("Using %zu MiB out of %zu requested for signature cache, able to store %zu elements\n",
+ (nElems*sizeof(uint256)) >>20, nMaxCacheSize>>20, nElems);
+}
+bool CachingTransactionSignatureChecker::VerifySignature(const std::vector<unsigned char>& vchSig, const CPubKey& pubkey, const uint256& sighash) const
+{
uint256 entry;
signatureCache.ComputeEntry(entry, sighash, vchSig, pubkey);
-
- if (signatureCache.Get(entry)) {
- if (!store) {
- signatureCache.Erase(entry);
- }
+ if (signatureCache.Get(entry, !store))
return true;
- }
-
if (!TransactionSignatureChecker::VerifySignature(vchSig, pubkey, sighash))
return false;
-
- if (store) {
+ if (store)
signatureCache.Set(entry);
- }
return true;
}
diff --git a/src/script/sigcache.h b/src/script/sigcache.h
index 44551ec2bc..5243fc0a42 100644
--- a/src/script/sigcache.h
+++ b/src/script/sigcache.h
@@ -10,9 +10,10 @@
#include <vector>
-// DoS prevention: limit cache size to less than 40MB (over 500000
-// entries on 64-bit systems).
-static const unsigned int DEFAULT_MAX_SIG_CACHE_SIZE = 40;
+// DoS prevention: limit cache size to 32MB (over 1000000 entries on 64-bit
+// systems). Due to how we count cache size, actual memory usage is slightly
+// more (~32.25 MB)
+static const unsigned int DEFAULT_MAX_SIG_CACHE_SIZE = 32;
class CPubKey;
@@ -27,4 +28,6 @@ public:
bool VerifySignature(const std::vector<unsigned char>& vchSig, const CPubKey& vchPubKey, const uint256& sighash) const;
};
+void InitSignatureCache();
+
#endif // BITCOIN_SCRIPT_SIGCACHE_H
diff --git a/src/secp256k1/.gitignore b/src/secp256k1/.gitignore
index efb277d347..87fea161ba 100644
--- a/src/secp256k1/.gitignore
+++ b/src/secp256k1/.gitignore
@@ -6,6 +6,7 @@ bench_schnorr_verify
bench_recover
bench_internal
tests
+exhaustive_tests
gen_context
*.exe
*.so
diff --git a/src/secp256k1/.travis.yml b/src/secp256k1/.travis.yml
index 2c5c63adad..2439529242 100644
--- a/src/secp256k1/.travis.yml
+++ b/src/secp256k1/.travis.yml
@@ -11,7 +11,7 @@ cache:
- src/java/guava/
env:
global:
- - FIELD=auto BIGNUM=auto SCALAR=auto ENDOMORPHISM=no STATICPRECOMPUTATION=yes ASM=no BUILD=check EXTRAFLAGS= HOST= ECDH=no schnorr=no RECOVERY=no EXPERIMENTAL=no
+ - FIELD=auto BIGNUM=auto SCALAR=auto ENDOMORPHISM=no STATICPRECOMPUTATION=yes ASM=no BUILD=check EXTRAFLAGS= HOST= ECDH=no RECOVERY=no EXPERIMENTAL=no
- GUAVA_URL=https://search.maven.org/remotecontent?filepath=com/google/guava/guava/18.0/guava-18.0.jar GUAVA_JAR=src/java/guava/guava-18.0.jar
matrix:
- SCALAR=32bit RECOVERY=yes
@@ -22,15 +22,14 @@ env:
- FIELD=64bit ENDOMORPHISM=yes ECDH=yes EXPERIMENTAL=yes
- FIELD=64bit ASM=x86_64
- FIELD=64bit ENDOMORPHISM=yes ASM=x86_64
- - FIELD=32bit SCHNORR=yes EXPERIMENTAL=yes
- FIELD=32bit ENDOMORPHISM=yes
- BIGNUM=no
- - BIGNUM=no ENDOMORPHISM=yes SCHNORR=yes RECOVERY=yes EXPERIMENTAL=yes
+ - BIGNUM=no ENDOMORPHISM=yes RECOVERY=yes EXPERIMENTAL=yes
- BIGNUM=no STATICPRECOMPUTATION=no
- BUILD=distcheck
- EXTRAFLAGS=CPPFLAGS=-DDETERMINISTIC
- EXTRAFLAGS=CFLAGS=-O0
- - BUILD=check-java ECDH=yes SCHNORR=yes EXPERIMENTAL=yes
+ - BUILD=check-java ECDH=yes EXPERIMENTAL=yes
matrix:
fast_finish: true
include:
@@ -66,5 +65,5 @@ before_script: ./autogen.sh
script:
- if [ -n "$HOST" ]; then export USE_HOST="--host=$HOST"; fi
- if [ "x$HOST" = "xi686-linux-gnu" ]; then export CC="$CC -m32"; fi
- - ./configure --enable-experimental=$EXPERIMENTAL --enable-endomorphism=$ENDOMORPHISM --with-field=$FIELD --with-bignum=$BIGNUM --with-scalar=$SCALAR --enable-ecmult-static-precomputation=$STATICPRECOMPUTATION --enable-module-ecdh=$ECDH --enable-module-schnorr=$SCHNORR --enable-module-recovery=$RECOVERY $EXTRAFLAGS $USE_HOST && make -j2 $BUILD
+ - ./configure --enable-experimental=$EXPERIMENTAL --enable-endomorphism=$ENDOMORPHISM --with-field=$FIELD --with-bignum=$BIGNUM --with-scalar=$SCALAR --enable-ecmult-static-precomputation=$STATICPRECOMPUTATION --enable-module-ecdh=$ECDH --enable-module-recovery=$RECOVERY $EXTRAFLAGS $USE_HOST && make -j2 $BUILD
os: linux
diff --git a/src/secp256k1/Makefile.am b/src/secp256k1/Makefile.am
index 3d130bdcbd..e5657f7f31 100644
--- a/src/secp256k1/Makefile.am
+++ b/src/secp256k1/Makefile.am
@@ -12,9 +12,11 @@ noinst_HEADERS =
noinst_HEADERS += src/scalar.h
noinst_HEADERS += src/scalar_4x64.h
noinst_HEADERS += src/scalar_8x32.h
+noinst_HEADERS += src/scalar_low.h
noinst_HEADERS += src/scalar_impl.h
noinst_HEADERS += src/scalar_4x64_impl.h
noinst_HEADERS += src/scalar_8x32_impl.h
+noinst_HEADERS += src/scalar_low_impl.h
noinst_HEADERS += src/group.h
noinst_HEADERS += src/group_impl.h
noinst_HEADERS += src/num_gmp.h
@@ -87,13 +89,23 @@ bench_internal_LDADD = $(SECP_LIBS) $(COMMON_LIB)
bench_internal_CPPFLAGS = -DSECP256K1_BUILD $(SECP_INCLUDES)
endif
+TESTS =
if USE_TESTS
noinst_PROGRAMS += tests
tests_SOURCES = src/tests.c
tests_CPPFLAGS = -DSECP256K1_BUILD -DVERIFY -I$(top_srcdir)/src -I$(top_srcdir)/include $(SECP_INCLUDES) $(SECP_TEST_INCLUDES)
tests_LDADD = $(SECP_LIBS) $(SECP_TEST_LIBS) $(COMMON_LIB)
tests_LDFLAGS = -static
-TESTS = tests
+TESTS += tests
+endif
+
+if USE_EXHAUSTIVE_TESTS
+noinst_PROGRAMS += exhaustive_tests
+exhaustive_tests_SOURCES = src/tests_exhaustive.c
+exhaustive_tests_CPPFLAGS = -DSECP256K1_BUILD -DVERIFY -I$(top_srcdir)/src $(SECP_INCLUDES)
+exhaustive_tests_LDADD = $(SECP_LIBS)
+exhaustive_tests_LDFLAGS = -static
+TESTS += exhaustive_tests
endif
JAVAROOT=src/java
@@ -154,10 +166,6 @@ if ENABLE_MODULE_ECDH
include src/modules/ecdh/Makefile.am.include
endif
-if ENABLE_MODULE_SCHNORR
-include src/modules/schnorr/Makefile.am.include
-endif
-
if ENABLE_MODULE_RECOVERY
include src/modules/recovery/Makefile.am.include
endif
diff --git a/src/secp256k1/build-aux/m4/bitcoin_secp.m4 b/src/secp256k1/build-aux/m4/bitcoin_secp.m4
index b25d8adb92..b74acb8c13 100644
--- a/src/secp256k1/build-aux/m4/bitcoin_secp.m4
+++ b/src/secp256k1/build-aux/m4/bitcoin_secp.m4
@@ -46,6 +46,10 @@ if test x"$has_libcrypto" = x"yes" && test x"$has_openssl_ec" = x; then
ECDSA_sign(0, NULL, 0, NULL, NULL, eckey);
ECDSA_verify(0, NULL, 0, NULL, 0, eckey);
EC_KEY_free(eckey);
+ ECDSA_SIG *sig_openssl;
+ sig_openssl = ECDSA_SIG_new();
+ (void)sig_openssl->r;
+ ECDSA_SIG_free(sig_openssl);
]])],[has_openssl_ec=yes],[has_openssl_ec=no])
AC_MSG_RESULT([$has_openssl_ec])
fi
diff --git a/src/secp256k1/configure.ac b/src/secp256k1/configure.ac
index 0743c36690..ec50ffe3a2 100644
--- a/src/secp256k1/configure.ac
+++ b/src/secp256k1/configure.ac
@@ -104,6 +104,11 @@ AC_ARG_ENABLE(experimental,
[use_experimental=$enableval],
[use_experimental=no])
+AC_ARG_ENABLE(exhaustive_tests,
+ AS_HELP_STRING([--enable-exhaustive-tests],[compile exhaustive tests (default is yes)]),
+ [use_exhaustive_tests=$enableval],
+ [use_exhaustive_tests=yes])
+
AC_ARG_ENABLE(endomorphism,
AS_HELP_STRING([--enable-endomorphism],[enable endomorphism (default is no)]),
[use_endomorphism=$enableval],
@@ -119,11 +124,6 @@ AC_ARG_ENABLE(module_ecdh,
[enable_module_ecdh=$enableval],
[enable_module_ecdh=no])
-AC_ARG_ENABLE(module_schnorr,
- AS_HELP_STRING([--enable-module-schnorr],[enable Schnorr signature module (experimental)]),
- [enable_module_schnorr=$enableval],
- [enable_module_schnorr=no])
-
AC_ARG_ENABLE(module_recovery,
AS_HELP_STRING([--enable-module-recovery],[enable ECDSA pubkey recovery module (default is no)]),
[enable_module_recovery=$enableval],
@@ -381,9 +381,6 @@ fi
if test x"$use_jni" != x"no"; then
AX_JNI_INCLUDE_DIR
have_jni_dependencies=yes
- if test x"$enable_module_schnorr" = x"no"; then
- have_jni_dependencies=no
- fi
if test x"$enable_module_ecdh" = x"no"; then
have_jni_dependencies=no
fi
@@ -392,7 +389,7 @@ if test x"$use_jni" != x"no"; then
fi
if test "x$have_jni_dependencies" = "xno"; then
if test x"$use_jni" = x"yes"; then
- AC_MSG_ERROR([jni support explicitly requested but headers/dependencies were not found. Enable ECDH and Schnorr and try again.])
+ AC_MSG_ERROR([jni support explicitly requested but headers/dependencies were not found. Enable ECDH and try again.])
fi
AC_MSG_WARN([jni headers/dependencies not found. jni support disabled])
use_jni=no
@@ -413,7 +410,7 @@ if test x"$use_endomorphism" = x"yes"; then
AC_DEFINE(USE_ENDOMORPHISM, 1, [Define this symbol to use endomorphism optimization])
fi
-if test x"$use_ecmult_static_precomputation" = x"yes"; then
+if test x"$set_precomp" = x"yes"; then
AC_DEFINE(USE_ECMULT_STATIC_PRECOMPUTATION, 1, [Define this symbol to use a statically generated ecmult table])
fi
@@ -421,10 +418,6 @@ if test x"$enable_module_ecdh" = x"yes"; then
AC_DEFINE(ENABLE_MODULE_ECDH, 1, [Define this symbol to enable the ECDH module])
fi
-if test x"$enable_module_schnorr" = x"yes"; then
- AC_DEFINE(ENABLE_MODULE_SCHNORR, 1, [Define this symbol to enable the Schnorr signature module])
-fi
-
if test x"$enable_module_recovery" = x"yes"; then
AC_DEFINE(ENABLE_MODULE_RECOVERY, 1, [Define this symbol to enable the ECDSA pubkey recovery module])
fi
@@ -442,7 +435,6 @@ AC_MSG_NOTICE([Using bignum implementation: $set_bignum])
AC_MSG_NOTICE([Using scalar implementation: $set_scalar])
AC_MSG_NOTICE([Using endomorphism optimizations: $use_endomorphism])
AC_MSG_NOTICE([Building ECDH module: $enable_module_ecdh])
-AC_MSG_NOTICE([Building Schnorr signatures module: $enable_module_schnorr])
AC_MSG_NOTICE([Building ECDSA pubkey recovery module: $enable_module_recovery])
AC_MSG_NOTICE([Using jni: $use_jni])
@@ -451,12 +443,8 @@ if test x"$enable_experimental" = x"yes"; then
AC_MSG_NOTICE([WARNING: experimental build])
AC_MSG_NOTICE([Experimental features do not have stable APIs or properties, and may not be safe for production use.])
AC_MSG_NOTICE([Building ECDH module: $enable_module_ecdh])
- AC_MSG_NOTICE([Building Schnorr signatures module: $enable_module_schnorr])
AC_MSG_NOTICE([******])
else
- if test x"$enable_module_schnorr" = x"yes"; then
- AC_MSG_ERROR([Schnorr signature module is experimental. Use --enable-experimental to allow.])
- fi
if test x"$enable_module_ecdh" = x"yes"; then
AC_MSG_ERROR([ECDH module is experimental. Use --enable-experimental to allow.])
fi
@@ -473,10 +461,10 @@ AC_SUBST(SECP_LIBS)
AC_SUBST(SECP_TEST_LIBS)
AC_SUBST(SECP_TEST_INCLUDES)
AM_CONDITIONAL([USE_TESTS], [test x"$use_tests" != x"no"])
+AM_CONDITIONAL([USE_EXHAUSTIVE_TESTS], [test x"$use_exhaustive_tests" != x"no"])
AM_CONDITIONAL([USE_BENCHMARK], [test x"$use_benchmark" = x"yes"])
-AM_CONDITIONAL([USE_ECMULT_STATIC_PRECOMPUTATION], [test x"$use_ecmult_static_precomputation" = x"yes"])
+AM_CONDITIONAL([USE_ECMULT_STATIC_PRECOMPUTATION], [test x"$set_precomp" = x"yes"])
AM_CONDITIONAL([ENABLE_MODULE_ECDH], [test x"$enable_module_ecdh" = x"yes"])
-AM_CONDITIONAL([ENABLE_MODULE_SCHNORR], [test x"$enable_module_schnorr" = x"yes"])
AM_CONDITIONAL([ENABLE_MODULE_RECOVERY], [test x"$enable_module_recovery" = x"yes"])
AM_CONDITIONAL([USE_JNI], [test x"$use_jni" == x"yes"])
AM_CONDITIONAL([USE_EXTERNAL_ASM], [test x"$use_external_asm" = x"yes"])
diff --git a/src/secp256k1/include/secp256k1.h b/src/secp256k1/include/secp256k1.h
index 7145dbcc54..f268e309d0 100644
--- a/src/secp256k1/include/secp256k1.h
+++ b/src/secp256k1/include/secp256k1.h
@@ -47,11 +47,8 @@ typedef struct secp256k1_context_struct secp256k1_context;
* The exact representation of data inside is implementation defined and not
* guaranteed to be portable between different platforms or versions. It is
* however guaranteed to be 64 bytes in size, and can be safely copied/moved.
- * If you need to convert to a format suitable for storage or transmission, use
- * secp256k1_ec_pubkey_serialize and secp256k1_ec_pubkey_parse.
- *
- * Furthermore, it is guaranteed that identical public keys (ignoring
- * compression) will have identical representation, so they can be memcmp'ed.
+ * If you need to convert to a format suitable for storage, transmission, or
+ * comparison, use secp256k1_ec_pubkey_serialize and secp256k1_ec_pubkey_parse.
*/
typedef struct {
unsigned char data[64];
@@ -62,12 +59,9 @@ typedef struct {
* The exact representation of data inside is implementation defined and not
* guaranteed to be portable between different platforms or versions. It is
* however guaranteed to be 64 bytes in size, and can be safely copied/moved.
- * If you need to convert to a format suitable for storage or transmission, use
- * the secp256k1_ecdsa_signature_serialize_* and
+ * If you need to convert to a format suitable for storage, transmission, or
+ * comparison, use the secp256k1_ecdsa_signature_serialize_* and
* secp256k1_ecdsa_signature_serialize_* functions.
- *
- * Furthermore, it is guaranteed to identical signatures will have identical
- * representation, so they can be memcmp'ed.
*/
typedef struct {
unsigned char data[64];
diff --git a/src/secp256k1/include/secp256k1_schnorr.h b/src/secp256k1/include/secp256k1_schnorr.h
deleted file mode 100644
index dc32fec1ea..0000000000
--- a/src/secp256k1/include/secp256k1_schnorr.h
+++ /dev/null
@@ -1,173 +0,0 @@
-#ifndef _SECP256K1_SCHNORR_
-# define _SECP256K1_SCHNORR_
-
-# include "secp256k1.h"
-
-# ifdef __cplusplus
-extern "C" {
-# endif
-
-/** Create a signature using a custom EC-Schnorr-SHA256 construction. It
- * produces non-malleable 64-byte signatures which support public key recovery
- * batch validation, and multiparty signing.
- * Returns: 1: signature created
- * 0: the nonce generation function failed, or the private key was
- * invalid.
- * Args: ctx: pointer to a context object, initialized for signing
- * (cannot be NULL)
- * Out: sig64: pointer to a 64-byte array where the signature will be
- * placed (cannot be NULL)
- * In: msg32: the 32-byte message hash being signed (cannot be NULL)
- * seckey: pointer to a 32-byte secret key (cannot be NULL)
- * noncefp:pointer to a nonce generation function. If NULL,
- * secp256k1_nonce_function_default is used
- * ndata: pointer to arbitrary data used by the nonce generation
- * function (can be NULL)
- */
-SECP256K1_API int secp256k1_schnorr_sign(
- const secp256k1_context* ctx,
- unsigned char *sig64,
- const unsigned char *msg32,
- const unsigned char *seckey,
- secp256k1_nonce_function noncefp,
- const void *ndata
-) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
-
-/** Verify a signature created by secp256k1_schnorr_sign.
- * Returns: 1: correct signature
- * 0: incorrect signature
- * Args: ctx: a secp256k1 context object, initialized for verification.
- * In: sig64: the 64-byte signature being verified (cannot be NULL)
- * msg32: the 32-byte message hash being verified (cannot be NULL)
- * pubkey: the public key to verify with (cannot be NULL)
- */
-SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_schnorr_verify(
- const secp256k1_context* ctx,
- const unsigned char *sig64,
- const unsigned char *msg32,
- const secp256k1_pubkey *pubkey
-) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
-
-/** Recover an EC public key from a Schnorr signature created using
- * secp256k1_schnorr_sign.
- * Returns: 1: public key successfully recovered (which guarantees a correct
- * signature).
- * 0: otherwise.
- * Args: ctx: pointer to a context object, initialized for
- * verification (cannot be NULL)
- * Out: pubkey: pointer to a pubkey to set to the recovered public key
- * (cannot be NULL).
- * In: sig64: signature as 64 byte array (cannot be NULL)
- * msg32: the 32-byte message hash assumed to be signed (cannot
- * be NULL)
- */
-SECP256K1_API int secp256k1_schnorr_recover(
- const secp256k1_context* ctx,
- secp256k1_pubkey *pubkey,
- const unsigned char *sig64,
- const unsigned char *msg32
-) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
-
-/** Generate a nonce pair deterministically for use with
- * secp256k1_schnorr_partial_sign.
- * Returns: 1: valid nonce pair was generated.
- * 0: otherwise (nonce generation function failed)
- * Args: ctx: pointer to a context object, initialized for signing
- * (cannot be NULL)
- * Out: pubnonce: public side of the nonce (cannot be NULL)
- * privnonce32: private side of the nonce (32 byte) (cannot be NULL)
- * In: msg32: the 32-byte message hash assumed to be signed (cannot
- * be NULL)
- * sec32: the 32-byte private key (cannot be NULL)
- * noncefp: pointer to a nonce generation function. If NULL,
- * secp256k1_nonce_function_default is used
- * noncedata: pointer to arbitrary data used by the nonce generation
- * function (can be NULL)
- *
- * Do not use the output as a private/public key pair for signing/validation.
- */
-SECP256K1_API int secp256k1_schnorr_generate_nonce_pair(
- const secp256k1_context* ctx,
- secp256k1_pubkey *pubnonce,
- unsigned char *privnonce32,
- const unsigned char *msg32,
- const unsigned char *sec32,
- secp256k1_nonce_function noncefp,
- const void* noncedata
-) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
-
-/** Produce a partial Schnorr signature, which can be combined using
- * secp256k1_schnorr_partial_combine, to end up with a full signature that is
- * verifiable using secp256k1_schnorr_verify.
- * Returns: 1: signature created successfully.
- * 0: no valid signature exists with this combination of keys, nonces
- * and message (chance around 1 in 2^128)
- * -1: invalid private key, nonce, or public nonces.
- * Args: ctx: pointer to context object, initialized for signing (cannot
- * be NULL)
- * Out: sig64: pointer to 64-byte array to put partial signature in
- * In: msg32: pointer to 32-byte message to sign
- * sec32: pointer to 32-byte private key
- * pubnonce_others: pointer to pubkey containing the sum of the other's
- * nonces (see secp256k1_ec_pubkey_combine)
- * secnonce32: pointer to 32-byte array containing our nonce
- *
- * The intended procedure for creating a multiparty signature is:
- * - Each signer S[i] with private key x[i] and public key Q[i] runs
- * secp256k1_schnorr_generate_nonce_pair to produce a pair (k[i],R[i]) of
- * private/public nonces.
- * - All signers communicate their public nonces to each other (revealing your
- * private nonce can lead to discovery of your private key, so it should be
- * considered secret).
- * - All signers combine all the public nonces they received (excluding their
- * own) using secp256k1_ec_pubkey_combine to obtain an
- * Rall[i] = sum(R[0..i-1,i+1..n]).
- * - All signers produce a partial signature using
- * secp256k1_schnorr_partial_sign, passing in their own private key x[i],
- * their own private nonce k[i], and the sum of the others' public nonces
- * Rall[i].
- * - All signers communicate their partial signatures to each other.
- * - Someone combines all partial signatures using
- * secp256k1_schnorr_partial_combine, to obtain a full signature.
- * - The resulting signature is validatable using secp256k1_schnorr_verify, with
- * public key equal to the result of secp256k1_ec_pubkey_combine of the
- * signers' public keys (sum(Q[0..n])).
- *
- * Note that secp256k1_schnorr_partial_combine and secp256k1_ec_pubkey_combine
- * function take their arguments in any order, and it is possible to
- * pre-combine several inputs already with one call, and add more inputs later
- * by calling the function again (they are commutative and associative).
- */
-SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_schnorr_partial_sign(
- const secp256k1_context* ctx,
- unsigned char *sig64,
- const unsigned char *msg32,
- const unsigned char *sec32,
- const secp256k1_pubkey *pubnonce_others,
- const unsigned char *secnonce32
-) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5) SECP256K1_ARG_NONNULL(6);
-
-/** Combine multiple Schnorr partial signatures.
- * Returns: 1: the passed signatures were successfully combined.
- * 0: the resulting signature is not valid (chance of 1 in 2^256)
- * -1: some inputs were invalid, or the signatures were not created
- * using the same set of nonces
- * Args: ctx: pointer to a context object
- * Out: sig64: pointer to a 64-byte array to place the combined signature
- * (cannot be NULL)
- * In: sig64sin: pointer to an array of n pointers to 64-byte input
- * signatures
- * n: the number of signatures to combine (at least 1)
- */
-SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_schnorr_partial_combine(
- const secp256k1_context* ctx,
- unsigned char *sig64,
- const unsigned char * const * sig64sin,
- size_t n
-) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
-
-# ifdef __cplusplus
-}
-# endif
-
-#endif
diff --git a/src/secp256k1/src/ecdsa_impl.h b/src/secp256k1/src/ecdsa_impl.h
index d110b4bb1d..9a42e519bd 100644
--- a/src/secp256k1/src/ecdsa_impl.h
+++ b/src/secp256k1/src/ecdsa_impl.h
@@ -203,7 +203,9 @@ static int secp256k1_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const
static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context *ctx, const secp256k1_scalar *sigr, const secp256k1_scalar *sigs, const secp256k1_ge *pubkey, const secp256k1_scalar *message) {
unsigned char c[32];
secp256k1_scalar sn, u1, u2;
+#if !defined(EXHAUSTIVE_TEST_ORDER)
secp256k1_fe xr;
+#endif
secp256k1_gej pubkeyj;
secp256k1_gej pr;
@@ -219,6 +221,21 @@ static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context *ctx, const
if (secp256k1_gej_is_infinity(&pr)) {
return 0;
}
+
+#if defined(EXHAUSTIVE_TEST_ORDER)
+{
+ secp256k1_scalar computed_r;
+ int overflow = 0;
+ secp256k1_ge pr_ge;
+ secp256k1_ge_set_gej(&pr_ge, &pr);
+ secp256k1_fe_normalize(&pr_ge.x);
+
+ secp256k1_fe_get_b32(c, &pr_ge.x);
+ secp256k1_scalar_set_b32(&computed_r, c, &overflow);
+ /* we fully expect overflow */
+ return secp256k1_scalar_eq(sigr, &computed_r);
+}
+#else
secp256k1_scalar_get_b32(c, sigr);
secp256k1_fe_set_b32(&xr, c);
@@ -252,6 +269,7 @@ static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context *ctx, const
return 1;
}
return 0;
+#endif
}
static int secp256k1_ecdsa_sig_sign(const secp256k1_ecmult_gen_context *ctx, secp256k1_scalar *sigr, secp256k1_scalar *sigs, const secp256k1_scalar *seckey, const secp256k1_scalar *message, const secp256k1_scalar *nonce, int *recid) {
diff --git a/src/secp256k1/src/ecmult_const_impl.h b/src/secp256k1/src/ecmult_const_impl.h
index 7a6a25318c..0db314c48e 100644
--- a/src/secp256k1/src/ecmult_const_impl.h
+++ b/src/secp256k1/src/ecmult_const_impl.h
@@ -78,7 +78,7 @@ static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w) {
/* Negative numbers will be negated to keep their bit representation below the maximum width */
flip = secp256k1_scalar_is_high(&s);
/* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */
- bit = flip ^ (s.d[0] & 1);
+ bit = flip ^ !secp256k1_scalar_is_even(&s);
/* We check for negative one, since adding 2 to it will cause an overflow */
secp256k1_scalar_negate(&neg_s, &s);
not_neg_one = !secp256k1_scalar_is_one(&neg_s);
diff --git a/src/secp256k1/src/ecmult_gen_impl.h b/src/secp256k1/src/ecmult_gen_impl.h
index b63c4d8662..35f2546077 100644
--- a/src/secp256k1/src/ecmult_gen_impl.h
+++ b/src/secp256k1/src/ecmult_gen_impl.h
@@ -77,7 +77,7 @@ static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx
secp256k1_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL);
}
}
- secp256k1_ge_set_all_gej_var(1024, prec, precj, cb);
+ secp256k1_ge_set_all_gej_var(prec, precj, 1024, cb);
}
for (j = 0; j < 64; j++) {
for (i = 0; i < 16; i++) {
diff --git a/src/secp256k1/src/ecmult_impl.h b/src/secp256k1/src/ecmult_impl.h
index 81ae08e100..4e40104ad4 100644
--- a/src/secp256k1/src/ecmult_impl.h
+++ b/src/secp256k1/src/ecmult_impl.h
@@ -7,15 +7,29 @@
#ifndef _SECP256K1_ECMULT_IMPL_H_
#define _SECP256K1_ECMULT_IMPL_H_
+#include <string.h>
+
#include "group.h"
#include "scalar.h"
#include "ecmult.h"
-#include <string.h>
-
+#if defined(EXHAUSTIVE_TEST_ORDER)
+/* We need to lower these values for exhaustive tests because
+ * the tables cannot have infinities in them (this breaks the
+ * affine-isomorphism stuff which tracks z-ratios) */
+# if EXHAUSTIVE_TEST_ORDER > 128
+# define WINDOW_A 5
+# define WINDOW_G 8
+# elif EXHAUSTIVE_TEST_ORDER > 8
+# define WINDOW_A 4
+# define WINDOW_G 4
+# else
+# define WINDOW_A 2
+# define WINDOW_G 2
+# endif
+#else
/* optimal for 128-bit and 256-bit exponents. */
#define WINDOW_A 5
-
/** larger numbers may result in slightly better performance, at the cost of
exponentially larger precomputed tables. */
#ifdef USE_ENDOMORPHISM
@@ -25,6 +39,7 @@
/** One table for window size 16: 1.375 MiB. */
#define WINDOW_G 16
#endif
+#endif
/** The number of entries a table with precomputed multiples needs to have. */
#define ECMULT_TABLE_SIZE(w) (1 << ((w)-2))
@@ -103,7 +118,7 @@ static void secp256k1_ecmult_odd_multiples_table_storage_var(int n, secp256k1_ge
/* Compute the odd multiples in Jacobian form. */
secp256k1_ecmult_odd_multiples_table(n, prej, zr, a);
/* Convert them in batch to affine coordinates. */
- secp256k1_ge_set_table_gej_var(n, prea, prej, zr);
+ secp256k1_ge_set_table_gej_var(prea, prej, zr, n);
/* Convert them to compact storage form. */
for (i = 0; i < n; i++) {
secp256k1_ge_to_storage(&pre[i], &prea[i]);
diff --git a/src/secp256k1/src/field.h b/src/secp256k1/src/field.h
index c5ba074244..bbb1ee866c 100644
--- a/src/secp256k1/src/field.h
+++ b/src/secp256k1/src/field.h
@@ -30,6 +30,8 @@
#error "Please select field implementation"
#endif
+#include "util.h"
+
/** Normalize a field element. */
static void secp256k1_fe_normalize(secp256k1_fe *r);
@@ -50,6 +52,9 @@ static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe *r);
/** Set a field element equal to a small integer. Resulting field element is normalized. */
static void secp256k1_fe_set_int(secp256k1_fe *r, int a);
+/** Sets a field element equal to zero, initializing all fields. */
+static void secp256k1_fe_clear(secp256k1_fe *a);
+
/** Verify whether a field element is zero. Requires the input to be normalized. */
static int secp256k1_fe_is_zero(const secp256k1_fe *a);
@@ -110,7 +115,7 @@ static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a);
/** Calculate the (modular) inverses of a batch of field elements. Requires the inputs' magnitudes to be
* at most 8. The output magnitudes are 1 (but not guaranteed to be normalized). The inputs and
* outputs must not overlap in memory. */
-static void secp256k1_fe_inv_all_var(size_t len, secp256k1_fe *r, const secp256k1_fe *a);
+static void secp256k1_fe_inv_all_var(secp256k1_fe *r, const secp256k1_fe *a, size_t len);
/** Convert a field element to the storage type. */
static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a);
diff --git a/src/secp256k1/src/field_impl.h b/src/secp256k1/src/field_impl.h
index 52cd902eb3..5127b279bc 100644
--- a/src/secp256k1/src/field_impl.h
+++ b/src/secp256k1/src/field_impl.h
@@ -260,7 +260,7 @@ static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a) {
#endif
}
-static void secp256k1_fe_inv_all_var(size_t len, secp256k1_fe *r, const secp256k1_fe *a) {
+static void secp256k1_fe_inv_all_var(secp256k1_fe *r, const secp256k1_fe *a, size_t len) {
secp256k1_fe u;
size_t i;
if (len < 1) {
diff --git a/src/secp256k1/src/group.h b/src/secp256k1/src/group.h
index d515716744..4957b248fe 100644
--- a/src/secp256k1/src/group.h
+++ b/src/secp256k1/src/group.h
@@ -65,12 +65,12 @@ static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a);
static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a);
/** Set a batch of group elements equal to the inputs given in jacobian coordinates */
-static void secp256k1_ge_set_all_gej_var(size_t len, secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_callback *cb);
+static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len, const secp256k1_callback *cb);
/** Set a batch of group elements equal to the inputs given in jacobian
* coordinates (with known z-ratios). zr must contain the known z-ratios such
* that mul(a[i].z, zr[i+1]) == a[i+1].z. zr[0] is ignored. */
-static void secp256k1_ge_set_table_gej_var(size_t len, secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zr);
+static void secp256k1_ge_set_table_gej_var(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zr, size_t len);
/** Bring a batch inputs given in jacobian coordinates (with known z-ratios) to
* the same global z "denominator". zr must contain the known z-ratios such
diff --git a/src/secp256k1/src/group_impl.h b/src/secp256k1/src/group_impl.h
index 3e9c4c410d..2e192b62fd 100644
--- a/src/secp256k1/src/group_impl.h
+++ b/src/secp256k1/src/group_impl.h
@@ -11,6 +11,53 @@
#include "field.h"
#include "group.h"
+/* These points can be generated in sage as follows:
+ *
+ * 0. Setup a worksheet with the following parameters.
+ * b = 4 # whatever CURVE_B will be set to
+ * F = FiniteField (0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F)
+ * C = EllipticCurve ([F (0), F (b)])
+ *
+ * 1. Determine all the small orders available to you. (If there are
+ * no satisfactory ones, go back and change b.)
+ * print C.order().factor(limit=1000)
+ *
+ * 2. Choose an order as one of the prime factors listed in the above step.
+ * (You can also multiply some to get a composite order, though the
+ * tests will crash trying to invert scalars during signing.) We take a
+ * random point and scale it to drop its order to the desired value.
+ * There is some probability this won't work; just try again.
+ * order = 199
+ * P = C.random_point()
+ * P = (int(P.order()) / int(order)) * P
+ * assert(P.order() == order)
+ *
+ * 3. Print the values. You'll need to use a vim macro or something to
+ * split the hex output into 4-byte chunks.
+ * print "%x %x" % P.xy()
+ */
+#if defined(EXHAUSTIVE_TEST_ORDER)
+# if EXHAUSTIVE_TEST_ORDER == 199
+const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
+ 0xFA7CC9A7, 0x0737F2DB, 0xA749DD39, 0x2B4FB069,
+ 0x3B017A7D, 0xA808C2F1, 0xFB12940C, 0x9EA66C18,
+ 0x78AC123A, 0x5ED8AEF3, 0x8732BC91, 0x1F3A2868,
+ 0x48DF246C, 0x808DAE72, 0xCFE52572, 0x7F0501ED
+);
+
+const int CURVE_B = 4;
+# elif EXHAUSTIVE_TEST_ORDER == 13
+const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
+ 0xedc60018, 0xa51a786b, 0x2ea91f4d, 0x4c9416c0,
+ 0x9de54c3b, 0xa1316554, 0x6cf4345c, 0x7277ef15,
+ 0x54cb1b6b, 0xdc8c1273, 0x087844ea, 0x43f4603e,
+ 0x0eaf9a43, 0xf6effe55, 0x939f806d, 0x37adf8ac
+);
+const int CURVE_B = 2;
+# else
+# error No known generator for the specified exhaustive test group order.
+# endif
+#else
/** Generator for secp256k1, value 'g' defined in
* "Standards for Efficient Cryptography" (SEC2) 2.7.1.
*/
@@ -21,8 +68,11 @@ static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
0xFD17B448UL, 0xA6855419UL, 0x9C47D08FUL, 0xFB10D4B8UL
);
+const int CURVE_B = 7;
+#endif
+
static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zi) {
- secp256k1_fe zi2;
+ secp256k1_fe zi2;
secp256k1_fe zi3;
secp256k1_fe_sqr(&zi2, zi);
secp256k1_fe_mul(&zi3, &zi2, zi);
@@ -76,7 +126,7 @@ static void secp256k1_ge_set_gej_var(secp256k1_ge *r, secp256k1_gej *a) {
r->y = a->y;
}
-static void secp256k1_ge_set_all_gej_var(size_t len, secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_callback *cb) {
+static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len, const secp256k1_callback *cb) {
secp256k1_fe *az;
secp256k1_fe *azi;
size_t i;
@@ -89,7 +139,7 @@ static void secp256k1_ge_set_all_gej_var(size_t len, secp256k1_ge *r, const secp
}
azi = (secp256k1_fe *)checked_malloc(cb, sizeof(secp256k1_fe) * count);
- secp256k1_fe_inv_all_var(count, azi, az);
+ secp256k1_fe_inv_all_var(azi, az, count);
free(az);
count = 0;
@@ -102,7 +152,7 @@ static void secp256k1_ge_set_all_gej_var(size_t len, secp256k1_ge *r, const secp
free(azi);
}
-static void secp256k1_ge_set_table_gej_var(size_t len, secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zr) {
+static void secp256k1_ge_set_table_gej_var(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zr, size_t len) {
size_t i = len - 1;
secp256k1_fe zi;
@@ -145,9 +195,15 @@ static void secp256k1_ge_globalz_set_table_gej(size_t len, secp256k1_ge *r, secp
static void secp256k1_gej_set_infinity(secp256k1_gej *r) {
r->infinity = 1;
- secp256k1_fe_set_int(&r->x, 0);
- secp256k1_fe_set_int(&r->y, 0);
- secp256k1_fe_set_int(&r->z, 0);
+ secp256k1_fe_clear(&r->x);
+ secp256k1_fe_clear(&r->y);
+ secp256k1_fe_clear(&r->z);
+}
+
+static void secp256k1_ge_set_infinity(secp256k1_ge *r) {
+ r->infinity = 1;
+ secp256k1_fe_clear(&r->x);
+ secp256k1_fe_clear(&r->y);
}
static void secp256k1_gej_clear(secp256k1_gej *r) {
@@ -169,7 +225,7 @@ static int secp256k1_ge_set_xquad(secp256k1_ge *r, const secp256k1_fe *x) {
secp256k1_fe_sqr(&x2, x);
secp256k1_fe_mul(&x3, x, &x2);
r->infinity = 0;
- secp256k1_fe_set_int(&c, 7);
+ secp256k1_fe_set_int(&c, CURVE_B);
secp256k1_fe_add(&c, &x3);
return secp256k1_fe_sqrt(&r->y, &c);
}
@@ -228,7 +284,7 @@ static int secp256k1_gej_is_valid_var(const secp256k1_gej *a) {
secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x);
secp256k1_fe_sqr(&z2, &a->z);
secp256k1_fe_sqr(&z6, &z2); secp256k1_fe_mul(&z6, &z6, &z2);
- secp256k1_fe_mul_int(&z6, 7);
+ secp256k1_fe_mul_int(&z6, CURVE_B);
secp256k1_fe_add(&x3, &z6);
secp256k1_fe_normalize_weak(&x3);
return secp256k1_fe_equal_var(&y2, &x3);
@@ -242,7 +298,7 @@ static int secp256k1_ge_is_valid_var(const secp256k1_ge *a) {
/* y^2 = x^3 + 7 */
secp256k1_fe_sqr(&y2, &a->y);
secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x);
- secp256k1_fe_set_int(&c, 7);
+ secp256k1_fe_set_int(&c, CURVE_B);
secp256k1_fe_add(&x3, &c);
secp256k1_fe_normalize_weak(&x3);
return secp256k1_fe_equal_var(&y2, &x3);
@@ -260,7 +316,7 @@ static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, s
/** For secp256k1, 2Q is infinity if and only if Q is infinity. This is because if 2Q = infinity,
* Q must equal -Q, or that Q.y == -(Q.y), or Q.y is 0. For a point on y^2 = x^3 + 7 to have
* y=0, x^3 must be -7 mod p. However, -7 has no cube root mod p.
- *
+ *
* Having said this, if this function receives a point on a sextic twist, e.g. by
* a fault attack, it is possible for y to be 0. This happens for y^2 = x^3 + 6,
* since -6 does have a cube root mod p. For this point, this function will not set
diff --git a/src/secp256k1/src/java/org/bitcoin/NativeSecp256k1.java b/src/secp256k1/src/java/org/bitcoin/NativeSecp256k1.java
index be67048fbe..1c67802fba 100644
--- a/src/secp256k1/src/java/org/bitcoin/NativeSecp256k1.java
+++ b/src/secp256k1/src/java/org/bitcoin/NativeSecp256k1.java
@@ -32,7 +32,7 @@ import static org.bitcoin.NativeSecp256k1Util.*;
* <p>You can find an example library that can be used for this at https://github.com/bitcoin/secp256k1</p>
*
* <p>To build secp256k1 for use with bitcoinj, run
- * `./configure --enable-jni --enable-experimental --enable-module-schnorr --enable-module-ecdh`
+ * `./configure --enable-jni --enable-experimental --enable-module-ecdh`
* and `make` then copy `.libs/libsecp256k1.so` to your system library path
* or point the JVM to the folder containing it with -Djava.library.path
* </p>
@@ -417,36 +417,6 @@ public class NativeSecp256k1 {
}
}
- public static byte[] schnorrSign(byte[] data, byte[] sec) throws AssertFailException {
- Preconditions.checkArgument(data.length == 32 && sec.length <= 32);
-
- ByteBuffer byteBuff = nativeECDSABuffer.get();
- if (byteBuff == null) {
- byteBuff = ByteBuffer.allocateDirect(32 + 32);
- byteBuff.order(ByteOrder.nativeOrder());
- nativeECDSABuffer.set(byteBuff);
- }
- byteBuff.rewind();
- byteBuff.put(data);
- byteBuff.put(sec);
-
- byte[][] retByteArray;
-
- r.lock();
- try {
- retByteArray = secp256k1_schnorr_sign(byteBuff, Secp256k1Context.getContext());
- } finally {
- r.unlock();
- }
-
- byte[] sigArr = retByteArray[0];
- int retVal = new BigInteger(new byte[] { retByteArray[1][0] }).intValue();
-
- assertEquals(sigArr.length, 64, "Got bad signature length.");
-
- return retVal == 0 ? new byte[0] : sigArr;
- }
-
private static native long secp256k1_ctx_clone(long context);
private static native int secp256k1_context_randomize(ByteBuffer byteBuff, long context);
@@ -471,8 +441,6 @@ public class NativeSecp256k1 {
private static native byte[][] secp256k1_ec_pubkey_parse(ByteBuffer byteBuff, long context, int inputLen);
- private static native byte[][] secp256k1_schnorr_sign(ByteBuffer byteBuff, long context);
-
private static native byte[][] secp256k1_ecdh(ByteBuffer byteBuff, long context, int inputLen);
}
diff --git a/src/secp256k1/src/java/org/bitcoin/NativeSecp256k1Test.java b/src/secp256k1/src/java/org/bitcoin/NativeSecp256k1Test.java
index f18ce95810..c00d08899b 100644
--- a/src/secp256k1/src/java/org/bitcoin/NativeSecp256k1Test.java
+++ b/src/secp256k1/src/java/org/bitcoin/NativeSecp256k1Test.java
@@ -167,22 +167,6 @@ public class NativeSecp256k1Test {
assertEquals( result, true, "testRandomize");
}
- /**
- * This tests signSchnorr() for a valid secretkey
- */
- public static void testSchnorrSign() throws AssertFailException{
-
- byte[] data = BaseEncoding.base16().lowerCase().decode("CF80CD8AED482D5D1527D7DC72FCEFF84E6326592848447D2DC0B0E87DFC9A90".toLowerCase()); //sha256hash of "testing"
- byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase());
-
- byte[] resultArr = NativeSecp256k1.schnorrSign(data, sec);
- String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr);
- assertEquals( sigString, "C5E929AA058B982048760422D3B563749B7D0E50C5EBD8CD2FFC23214BD6A2F1B072C13880997EBA847CF20F2F90FCE07C1CA33A890A4127095A351127F8D95F" , "testSchnorrSign");
- }
-
- /**
- * This tests signSchnorr() for a valid secretkey
- */
public static void testCreateECDHSecret() throws AssertFailException{
byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase());
@@ -216,11 +200,6 @@ public class NativeSecp256k1Test {
testSignPos();
testSignNeg();
- //Test Schnorr (partial support) //TODO
- testSchnorrSign();
- //testSchnorrVerify
- //testSchnorrRecovery
-
//Test privKeyTweakAdd() 1
testPrivKeyTweakAdd_1();
diff --git a/src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.c b/src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.c
index dba9524dd4..bcef7b32ce 100644
--- a/src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.c
+++ b/src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.c
@@ -5,7 +5,6 @@
#include "include/secp256k1.h"
#include "include/secp256k1_ecdh.h"
#include "include/secp256k1_recovery.h"
-#include "include/secp256k1_schnorr.h"
SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ctx_1clone
@@ -333,39 +332,6 @@ SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1p
return 0;
}
-SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1schnorr_1sign
- (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l)
-{
- secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
- unsigned char* data = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject);
- unsigned char* secKey = (unsigned char*) (data + 32);
-
- jobjectArray retArray;
- jbyteArray sigArray, intsByteArray;
- unsigned char intsarray[1];
- unsigned char sig[64];
-
- int ret = secp256k1_schnorr_sign(ctx, sig, data, secKey, NULL, NULL);
-
- intsarray[0] = ret;
-
- retArray = (*env)->NewObjectArray(env, 2,
- (*env)->FindClass(env, "[B"),
- (*env)->NewByteArray(env, 1));
-
- sigArray = (*env)->NewByteArray(env, 64);
- (*env)->SetByteArrayRegion(env, sigArray, 0, 64, (jbyte*)sig);
- (*env)->SetObjectArrayElement(env, retArray, 0, sigArray);
-
- intsByteArray = (*env)->NewByteArray(env, 1);
- (*env)->SetByteArrayRegion(env, intsByteArray, 0, 1, (jbyte*)intsarray);
- (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray);
-
- (void)classObject;
-
- return retArray;
-}
-
SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdh
(JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen)
{
diff --git a/src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.h b/src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.h
index 4125a1f523..fe613c9e9e 100644
--- a/src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.h
+++ b/src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.h
@@ -106,14 +106,6 @@ SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1e
/*
* Class: org_bitcoin_NativeSecp256k1
- * Method: secp256k1_schnorr_sign
- * Signature: (Ljava/nio/ByteBuffer;JI)[[B
- */
-SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1schnorr_1sign
- (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l);
-
-/*
- * Class: org_bitcoin_NativeSecp256k1
* Method: secp256k1_ecdh
* Signature: (Ljava/nio/ByteBuffer;JI)[[B
*/
diff --git a/src/secp256k1/src/modules/recovery/main_impl.h b/src/secp256k1/src/modules/recovery/main_impl.h
index ec42f4bb6c..86f2f0cb2b 100644..100755
--- a/src/secp256k1/src/modules/recovery/main_impl.h
+++ b/src/secp256k1/src/modules/recovery/main_impl.h
@@ -138,16 +138,15 @@ int secp256k1_ecdsa_sign_recoverable(const secp256k1_context* ctx, secp256k1_ecd
secp256k1_scalar_set_b32(&sec, seckey, &overflow);
/* Fail if the secret key is invalid. */
if (!overflow && !secp256k1_scalar_is_zero(&sec)) {
+ unsigned char nonce32[32];
unsigned int count = 0;
secp256k1_scalar_set_b32(&msg, msg32, NULL);
while (1) {
- unsigned char nonce32[32];
ret = noncefp(nonce32, msg32, seckey, NULL, (void*)noncedata, count);
if (!ret) {
break;
}
secp256k1_scalar_set_b32(&non, nonce32, &overflow);
- memset(nonce32, 0, 32);
if (!secp256k1_scalar_is_zero(&non) && !overflow) {
if (secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &r, &s, &sec, &msg, &non, &recid)) {
break;
@@ -155,6 +154,7 @@ int secp256k1_ecdsa_sign_recoverable(const secp256k1_context* ctx, secp256k1_ecd
}
count++;
}
+ memset(nonce32, 0, 32);
secp256k1_scalar_clear(&msg);
secp256k1_scalar_clear(&non);
secp256k1_scalar_clear(&sec);
diff --git a/src/secp256k1/src/modules/schnorr/Makefile.am.include b/src/secp256k1/src/modules/schnorr/Makefile.am.include
deleted file mode 100644
index f1af8e8325..0000000000
--- a/src/secp256k1/src/modules/schnorr/Makefile.am.include
+++ /dev/null
@@ -1,10 +0,0 @@
-include_HEADERS += include/secp256k1_schnorr.h
-noinst_HEADERS += src/modules/schnorr/main_impl.h
-noinst_HEADERS += src/modules/schnorr/schnorr.h
-noinst_HEADERS += src/modules/schnorr/schnorr_impl.h
-noinst_HEADERS += src/modules/schnorr/tests_impl.h
-if USE_BENCHMARK
-noinst_PROGRAMS += bench_schnorr_verify
-bench_schnorr_verify_SOURCES = src/bench_schnorr_verify.c
-bench_schnorr_verify_LDADD = libsecp256k1.la $(SECP_LIBS) $(COMMON_LIB)
-endif
diff --git a/src/secp256k1/src/modules/schnorr/main_impl.h b/src/secp256k1/src/modules/schnorr/main_impl.h
deleted file mode 100644
index fa176a1767..0000000000
--- a/src/secp256k1/src/modules/schnorr/main_impl.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/**********************************************************************
- * Copyright (c) 2014-2015 Pieter Wuille *
- * Distributed under the MIT software license, see the accompanying *
- * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
- **********************************************************************/
-
-#ifndef SECP256K1_MODULE_SCHNORR_MAIN
-#define SECP256K1_MODULE_SCHNORR_MAIN
-
-#include "include/secp256k1_schnorr.h"
-#include "modules/schnorr/schnorr_impl.h"
-
-static void secp256k1_schnorr_msghash_sha256(unsigned char *h32, const unsigned char *r32, const unsigned char *msg32) {
- secp256k1_sha256_t sha;
- secp256k1_sha256_initialize(&sha);
- secp256k1_sha256_write(&sha, r32, 32);
- secp256k1_sha256_write(&sha, msg32, 32);
- secp256k1_sha256_finalize(&sha, h32);
-}
-
-static const unsigned char secp256k1_schnorr_algo16[17] = "Schnorr+SHA256 ";
-
-int secp256k1_schnorr_sign(const secp256k1_context* ctx, unsigned char *sig64, const unsigned char *msg32, const unsigned char *seckey, secp256k1_nonce_function noncefp, const void* noncedata) {
- secp256k1_scalar sec, non;
- int ret = 0;
- int overflow = 0;
- unsigned int count = 0;
- VERIFY_CHECK(ctx != NULL);
- ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
- ARG_CHECK(msg32 != NULL);
- ARG_CHECK(sig64 != NULL);
- ARG_CHECK(seckey != NULL);
- if (noncefp == NULL) {
- noncefp = secp256k1_nonce_function_default;
- }
-
- secp256k1_scalar_set_b32(&sec, seckey, NULL);
- while (1) {
- unsigned char nonce32[32];
- ret = noncefp(nonce32, msg32, seckey, secp256k1_schnorr_algo16, (void*)noncedata, count);
- if (!ret) {
- break;
- }
- secp256k1_scalar_set_b32(&non, nonce32, &overflow);
- memset(nonce32, 0, 32);
- if (!secp256k1_scalar_is_zero(&non) && !overflow) {
- if (secp256k1_schnorr_sig_sign(&ctx->ecmult_gen_ctx, sig64, &sec, &non, NULL, secp256k1_schnorr_msghash_sha256, msg32)) {
- break;
- }
- }
- count++;
- }
- if (!ret) {
- memset(sig64, 0, 64);
- }
- secp256k1_scalar_clear(&non);
- secp256k1_scalar_clear(&sec);
- return ret;
-}
-
-int secp256k1_schnorr_verify(const secp256k1_context* ctx, const unsigned char *sig64, const unsigned char *msg32, const secp256k1_pubkey *pubkey) {
- secp256k1_ge q;
- VERIFY_CHECK(ctx != NULL);
- ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
- ARG_CHECK(msg32 != NULL);
- ARG_CHECK(sig64 != NULL);
- ARG_CHECK(pubkey != NULL);
-
- secp256k1_pubkey_load(ctx, &q, pubkey);
- return secp256k1_schnorr_sig_verify(&ctx->ecmult_ctx, sig64, &q, secp256k1_schnorr_msghash_sha256, msg32);
-}
-
-int secp256k1_schnorr_recover(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *sig64, const unsigned char *msg32) {
- secp256k1_ge q;
-
- VERIFY_CHECK(ctx != NULL);
- ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
- ARG_CHECK(msg32 != NULL);
- ARG_CHECK(sig64 != NULL);
- ARG_CHECK(pubkey != NULL);
-
- if (secp256k1_schnorr_sig_recover(&ctx->ecmult_ctx, sig64, &q, secp256k1_schnorr_msghash_sha256, msg32)) {
- secp256k1_pubkey_save(pubkey, &q);
- return 1;
- } else {
- memset(pubkey, 0, sizeof(*pubkey));
- return 0;
- }
-}
-
-int secp256k1_schnorr_generate_nonce_pair(const secp256k1_context* ctx, secp256k1_pubkey *pubnonce, unsigned char *privnonce32, const unsigned char *sec32, const unsigned char *msg32, secp256k1_nonce_function noncefp, const void* noncedata) {
- int count = 0;
- int ret = 1;
- secp256k1_gej Qj;
- secp256k1_ge Q;
- secp256k1_scalar sec;
-
- VERIFY_CHECK(ctx != NULL);
- ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
- ARG_CHECK(msg32 != NULL);
- ARG_CHECK(sec32 != NULL);
- ARG_CHECK(pubnonce != NULL);
- ARG_CHECK(privnonce32 != NULL);
-
- if (noncefp == NULL) {
- noncefp = secp256k1_nonce_function_default;
- }
-
- do {
- int overflow;
- ret = noncefp(privnonce32, sec32, msg32, secp256k1_schnorr_algo16, (void*)noncedata, count++);
- if (!ret) {
- break;
- }
- secp256k1_scalar_set_b32(&sec, privnonce32, &overflow);
- if (overflow || secp256k1_scalar_is_zero(&sec)) {
- continue;
- }
- secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &sec);
- secp256k1_ge_set_gej(&Q, &Qj);
-
- secp256k1_pubkey_save(pubnonce, &Q);
- break;
- } while(1);
-
- secp256k1_scalar_clear(&sec);
- if (!ret) {
- memset(pubnonce, 0, sizeof(*pubnonce));
- }
- return ret;
-}
-
-int secp256k1_schnorr_partial_sign(const secp256k1_context* ctx, unsigned char *sig64, const unsigned char *msg32, const unsigned char *sec32, const secp256k1_pubkey *pubnonce_others, const unsigned char *secnonce32) {
- int overflow = 0;
- secp256k1_scalar sec, non;
- secp256k1_ge pubnon;
- VERIFY_CHECK(ctx != NULL);
- ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
- ARG_CHECK(msg32 != NULL);
- ARG_CHECK(sig64 != NULL);
- ARG_CHECK(sec32 != NULL);
- ARG_CHECK(secnonce32 != NULL);
- ARG_CHECK(pubnonce_others != NULL);
-
- secp256k1_scalar_set_b32(&sec, sec32, &overflow);
- if (overflow || secp256k1_scalar_is_zero(&sec)) {
- return -1;
- }
- secp256k1_scalar_set_b32(&non, secnonce32, &overflow);
- if (overflow || secp256k1_scalar_is_zero(&non)) {
- return -1;
- }
- secp256k1_pubkey_load(ctx, &pubnon, pubnonce_others);
- return secp256k1_schnorr_sig_sign(&ctx->ecmult_gen_ctx, sig64, &sec, &non, &pubnon, secp256k1_schnorr_msghash_sha256, msg32);
-}
-
-int secp256k1_schnorr_partial_combine(const secp256k1_context* ctx, unsigned char *sig64, const unsigned char * const *sig64sin, size_t n) {
- ARG_CHECK(sig64 != NULL);
- ARG_CHECK(n >= 1);
- ARG_CHECK(sig64sin != NULL);
- return secp256k1_schnorr_sig_combine(sig64, n, sig64sin);
-}
-
-#endif
diff --git a/src/secp256k1/src/modules/schnorr/schnorr.h b/src/secp256k1/src/modules/schnorr/schnorr.h
deleted file mode 100644
index de18147bd5..0000000000
--- a/src/secp256k1/src/modules/schnorr/schnorr.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/***********************************************************************
- * Copyright (c) 2014-2015 Pieter Wuille *
- * Distributed under the MIT software license, see the accompanying *
- * file COPYING or http://www.opensource.org/licenses/mit-license.php. *
- ***********************************************************************/
-
-#ifndef _SECP256K1_MODULE_SCHNORR_H_
-#define _SECP256K1_MODULE_SCHNORR_H_
-
-#include "scalar.h"
-#include "group.h"
-
-typedef void (*secp256k1_schnorr_msghash)(unsigned char *h32, const unsigned char *r32, const unsigned char *msg32);
-
-static int secp256k1_schnorr_sig_sign(const secp256k1_ecmult_gen_context* ctx, unsigned char *sig64, const secp256k1_scalar *key, const secp256k1_scalar *nonce, const secp256k1_ge *pubnonce, secp256k1_schnorr_msghash hash, const unsigned char *msg32);
-static int secp256k1_schnorr_sig_verify(const secp256k1_ecmult_context* ctx, const unsigned char *sig64, const secp256k1_ge *pubkey, secp256k1_schnorr_msghash hash, const unsigned char *msg32);
-static int secp256k1_schnorr_sig_recover(const secp256k1_ecmult_context* ctx, const unsigned char *sig64, secp256k1_ge *pubkey, secp256k1_schnorr_msghash hash, const unsigned char *msg32);
-static int secp256k1_schnorr_sig_combine(unsigned char *sig64, size_t n, const unsigned char * const *sig64ins);
-
-#endif
diff --git a/src/secp256k1/src/modules/schnorr/schnorr_impl.h b/src/secp256k1/src/modules/schnorr/schnorr_impl.h
deleted file mode 100644
index e13ab6db7c..0000000000
--- a/src/secp256k1/src/modules/schnorr/schnorr_impl.h
+++ /dev/null
@@ -1,207 +0,0 @@
-/***********************************************************************
- * Copyright (c) 2014-2015 Pieter Wuille *
- * Distributed under the MIT software license, see the accompanying *
- * file COPYING or http://www.opensource.org/licenses/mit-license.php. *
- ***********************************************************************/
-
-#ifndef _SECP256K1_SCHNORR_IMPL_H_
-#define _SECP256K1_SCHNORR_IMPL_H_
-
-#include <string.h>
-
-#include "schnorr.h"
-#include "num.h"
-#include "field.h"
-#include "group.h"
-#include "ecmult.h"
-#include "ecmult_gen.h"
-
-/**
- * Custom Schnorr-based signature scheme. They support multiparty signing, public key
- * recovery and batch validation.
- *
- * Rationale for verifying R's y coordinate:
- * In order to support batch validation and public key recovery, the full R point must
- * be known to verifiers, rather than just its x coordinate. In order to not risk
- * being more strict in batch validation than normal validation, validators must be
- * required to reject signatures with incorrect y coordinate. This is only possible
- * by including a (relatively slow) field inverse, or a field square root. However,
- * batch validation offers potentially much higher benefits than this cost.
- *
- * Rationale for having an implicit y coordinate oddness:
- * If we commit to having the full R point known to verifiers, there are two mechanism.
- * Either include its oddness in the signature, or give it an implicit fixed value.
- * As the R y coordinate can be flipped by a simple negation of the nonce, we choose the
- * latter, as it comes with nearly zero impact on signing or validation performance, and
- * saves a byte in the signature.
- *
- * Signing:
- * Inputs: 32-byte message m, 32-byte scalar key x (!=0), 32-byte scalar nonce k (!=0)
- *
- * Compute point R = k * G. Reject nonce if R's y coordinate is odd (or negate nonce).
- * Compute 32-byte r, the serialization of R's x coordinate.
- * Compute scalar h = Hash(r || m). Reject nonce if h == 0 or h >= order.
- * Compute scalar s = k - h * x.
- * The signature is (r, s).
- *
- *
- * Verification:
- * Inputs: 32-byte message m, public key point Q, signature: (32-byte r, scalar s)
- *
- * Signature is invalid if s >= order.
- * Signature is invalid if r >= p.
- * Compute scalar h = Hash(r || m). Signature is invalid if h == 0 or h >= order.
- * Option 1 (faster for single verification):
- * Compute point R = h * Q + s * G. Signature is invalid if R is infinity or R's y coordinate is odd.
- * Signature is valid if the serialization of R's x coordinate equals r.
- * Option 2 (allows batch validation and pubkey recovery):
- * Decompress x coordinate r into point R, with odd y coordinate. Fail if R is not on the curve.
- * Signature is valid if R + h * Q + s * G == 0.
- */
-
-static int secp256k1_schnorr_sig_sign(const secp256k1_ecmult_gen_context* ctx, unsigned char *sig64, const secp256k1_scalar *key, const secp256k1_scalar *nonce, const secp256k1_ge *pubnonce, secp256k1_schnorr_msghash hash, const unsigned char *msg32) {
- secp256k1_gej Rj;
- secp256k1_ge Ra;
- unsigned char h32[32];
- secp256k1_scalar h, s;
- int overflow;
- secp256k1_scalar n;
-
- if (secp256k1_scalar_is_zero(key) || secp256k1_scalar_is_zero(nonce)) {
- return 0;
- }
- n = *nonce;
-
- secp256k1_ecmult_gen(ctx, &Rj, &n);
- if (pubnonce != NULL) {
- secp256k1_gej_add_ge(&Rj, &Rj, pubnonce);
- }
- secp256k1_ge_set_gej(&Ra, &Rj);
- secp256k1_fe_normalize(&Ra.y);
- if (secp256k1_fe_is_odd(&Ra.y)) {
- /* R's y coordinate is odd, which is not allowed (see rationale above).
- Force it to be even by negating the nonce. Note that this even works
- for multiparty signing, as the R point is known to all participants,
- which can all decide to flip the sign in unison, resulting in the
- overall R point to be negated too. */
- secp256k1_scalar_negate(&n, &n);
- }
- secp256k1_fe_normalize(&Ra.x);
- secp256k1_fe_get_b32(sig64, &Ra.x);
- hash(h32, sig64, msg32);
- overflow = 0;
- secp256k1_scalar_set_b32(&h, h32, &overflow);
- if (overflow || secp256k1_scalar_is_zero(&h)) {
- secp256k1_scalar_clear(&n);
- return 0;
- }
- secp256k1_scalar_mul(&s, &h, key);
- secp256k1_scalar_negate(&s, &s);
- secp256k1_scalar_add(&s, &s, &n);
- secp256k1_scalar_clear(&n);
- secp256k1_scalar_get_b32(sig64 + 32, &s);
- return 1;
-}
-
-static int secp256k1_schnorr_sig_verify(const secp256k1_ecmult_context* ctx, const unsigned char *sig64, const secp256k1_ge *pubkey, secp256k1_schnorr_msghash hash, const unsigned char *msg32) {
- secp256k1_gej Qj, Rj;
- secp256k1_ge Ra;
- secp256k1_fe Rx;
- secp256k1_scalar h, s;
- unsigned char hh[32];
- int overflow;
-
- if (secp256k1_ge_is_infinity(pubkey)) {
- return 0;
- }
- hash(hh, sig64, msg32);
- overflow = 0;
- secp256k1_scalar_set_b32(&h, hh, &overflow);
- if (overflow || secp256k1_scalar_is_zero(&h)) {
- return 0;
- }
- overflow = 0;
- secp256k1_scalar_set_b32(&s, sig64 + 32, &overflow);
- if (overflow) {
- return 0;
- }
- if (!secp256k1_fe_set_b32(&Rx, sig64)) {
- return 0;
- }
- secp256k1_gej_set_ge(&Qj, pubkey);
- secp256k1_ecmult(ctx, &Rj, &Qj, &h, &s);
- if (secp256k1_gej_is_infinity(&Rj)) {
- return 0;
- }
- secp256k1_ge_set_gej_var(&Ra, &Rj);
- secp256k1_fe_normalize_var(&Ra.y);
- if (secp256k1_fe_is_odd(&Ra.y)) {
- return 0;
- }
- return secp256k1_fe_equal_var(&Rx, &Ra.x);
-}
-
-static int secp256k1_schnorr_sig_recover(const secp256k1_ecmult_context* ctx, const unsigned char *sig64, secp256k1_ge *pubkey, secp256k1_schnorr_msghash hash, const unsigned char *msg32) {
- secp256k1_gej Qj, Rj;
- secp256k1_ge Ra;
- secp256k1_fe Rx;
- secp256k1_scalar h, s;
- unsigned char hh[32];
- int overflow;
-
- hash(hh, sig64, msg32);
- overflow = 0;
- secp256k1_scalar_set_b32(&h, hh, &overflow);
- if (overflow || secp256k1_scalar_is_zero(&h)) {
- return 0;
- }
- overflow = 0;
- secp256k1_scalar_set_b32(&s, sig64 + 32, &overflow);
- if (overflow) {
- return 0;
- }
- if (!secp256k1_fe_set_b32(&Rx, sig64)) {
- return 0;
- }
- if (!secp256k1_ge_set_xo_var(&Ra, &Rx, 0)) {
- return 0;
- }
- secp256k1_gej_set_ge(&Rj, &Ra);
- secp256k1_scalar_inverse_var(&h, &h);
- secp256k1_scalar_negate(&s, &s);
- secp256k1_scalar_mul(&s, &s, &h);
- secp256k1_ecmult(ctx, &Qj, &Rj, &h, &s);
- if (secp256k1_gej_is_infinity(&Qj)) {
- return 0;
- }
- secp256k1_ge_set_gej(pubkey, &Qj);
- return 1;
-}
-
-static int secp256k1_schnorr_sig_combine(unsigned char *sig64, size_t n, const unsigned char * const *sig64ins) {
- secp256k1_scalar s = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
- size_t i;
- for (i = 0; i < n; i++) {
- secp256k1_scalar si;
- int overflow;
- secp256k1_scalar_set_b32(&si, sig64ins[i] + 32, &overflow);
- if (overflow) {
- return -1;
- }
- if (i) {
- if (memcmp(sig64ins[i - 1], sig64ins[i], 32) != 0) {
- return -1;
- }
- }
- secp256k1_scalar_add(&s, &s, &si);
- }
- if (secp256k1_scalar_is_zero(&s)) {
- return 0;
- }
- memcpy(sig64, sig64ins[0], 32);
- secp256k1_scalar_get_b32(sig64 + 32, &s);
- secp256k1_scalar_clear(&s);
- return 1;
-}
-
-#endif
diff --git a/src/secp256k1/src/modules/schnorr/tests_impl.h b/src/secp256k1/src/modules/schnorr/tests_impl.h
deleted file mode 100644
index 5bd14a03e3..0000000000
--- a/src/secp256k1/src/modules/schnorr/tests_impl.h
+++ /dev/null
@@ -1,175 +0,0 @@
-/**********************************************************************
- * Copyright (c) 2014-2015 Pieter Wuille *
- * Distributed under the MIT software license, see the accompanying *
- * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
- **********************************************************************/
-
-#ifndef SECP256K1_MODULE_SCHNORR_TESTS
-#define SECP256K1_MODULE_SCHNORR_TESTS
-
-#include "include/secp256k1_schnorr.h"
-
-void test_schnorr_end_to_end(void) {
- unsigned char privkey[32];
- unsigned char message[32];
- unsigned char schnorr_signature[64];
- secp256k1_pubkey pubkey, recpubkey;
-
- /* Generate a random key and message. */
- {
- secp256k1_scalar key;
- random_scalar_order_test(&key);
- secp256k1_scalar_get_b32(privkey, &key);
- secp256k1_rand256_test(message);
- }
-
- /* Construct and verify corresponding public key. */
- CHECK(secp256k1_ec_seckey_verify(ctx, privkey) == 1);
- CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1);
-
- /* Schnorr sign. */
- CHECK(secp256k1_schnorr_sign(ctx, schnorr_signature, message, privkey, NULL, NULL) == 1);
- CHECK(secp256k1_schnorr_verify(ctx, schnorr_signature, message, &pubkey) == 1);
- CHECK(secp256k1_schnorr_recover(ctx, &recpubkey, schnorr_signature, message) == 1);
- CHECK(memcmp(&pubkey, &recpubkey, sizeof(pubkey)) == 0);
- /* Destroy signature and verify again. */
- schnorr_signature[secp256k1_rand_bits(6)] += 1 + secp256k1_rand_int(255);
- CHECK(secp256k1_schnorr_verify(ctx, schnorr_signature, message, &pubkey) == 0);
- CHECK(secp256k1_schnorr_recover(ctx, &recpubkey, schnorr_signature, message) != 1 ||
- memcmp(&pubkey, &recpubkey, sizeof(pubkey)) != 0);
-}
-
-/** Horribly broken hash function. Do not use for anything but tests. */
-void test_schnorr_hash(unsigned char *h32, const unsigned char *r32, const unsigned char *msg32) {
- int i;
- for (i = 0; i < 32; i++) {
- h32[i] = r32[i] ^ msg32[i];
- }
-}
-
-void test_schnorr_sign_verify(void) {
- unsigned char msg32[32];
- unsigned char sig64[3][64];
- secp256k1_gej pubkeyj[3];
- secp256k1_ge pubkey[3];
- secp256k1_scalar nonce[3], key[3];
- int i = 0;
- int k;
-
- secp256k1_rand256_test(msg32);
-
- for (k = 0; k < 3; k++) {
- random_scalar_order_test(&key[k]);
-
- do {
- random_scalar_order_test(&nonce[k]);
- if (secp256k1_schnorr_sig_sign(&ctx->ecmult_gen_ctx, sig64[k], &key[k], &nonce[k], NULL, &test_schnorr_hash, msg32)) {
- break;
- }
- } while(1);
-
- secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pubkeyj[k], &key[k]);
- secp256k1_ge_set_gej_var(&pubkey[k], &pubkeyj[k]);
- CHECK(secp256k1_schnorr_sig_verify(&ctx->ecmult_ctx, sig64[k], &pubkey[k], &test_schnorr_hash, msg32));
-
- for (i = 0; i < 4; i++) {
- int pos = secp256k1_rand_bits(6);
- int mod = 1 + secp256k1_rand_int(255);
- sig64[k][pos] ^= mod;
- CHECK(secp256k1_schnorr_sig_verify(&ctx->ecmult_ctx, sig64[k], &pubkey[k], &test_schnorr_hash, msg32) == 0);
- sig64[k][pos] ^= mod;
- }
- }
-}
-
-void test_schnorr_threshold(void) {
- unsigned char msg[32];
- unsigned char sec[5][32];
- secp256k1_pubkey pub[5];
- unsigned char nonce[5][32];
- secp256k1_pubkey pubnonce[5];
- unsigned char sig[5][64];
- const unsigned char* sigs[5];
- unsigned char allsig[64];
- const secp256k1_pubkey* pubs[5];
- secp256k1_pubkey allpub;
- int n, i;
- int damage;
- int ret = 0;
-
- damage = secp256k1_rand_bits(1) ? (1 + secp256k1_rand_int(4)) : 0;
- secp256k1_rand256_test(msg);
- n = 2 + secp256k1_rand_int(4);
- for (i = 0; i < n; i++) {
- do {
- secp256k1_rand256_test(sec[i]);
- } while (!secp256k1_ec_seckey_verify(ctx, sec[i]));
- CHECK(secp256k1_ec_pubkey_create(ctx, &pub[i], sec[i]));
- CHECK(secp256k1_schnorr_generate_nonce_pair(ctx, &pubnonce[i], nonce[i], msg, sec[i], NULL, NULL));
- pubs[i] = &pub[i];
- }
- if (damage == 1) {
- nonce[secp256k1_rand_int(n)][secp256k1_rand_int(32)] ^= 1 + secp256k1_rand_int(255);
- } else if (damage == 2) {
- sec[secp256k1_rand_int(n)][secp256k1_rand_int(32)] ^= 1 + secp256k1_rand_int(255);
- }
- for (i = 0; i < n; i++) {
- secp256k1_pubkey allpubnonce;
- const secp256k1_pubkey *pubnonces[4];
- int j;
- for (j = 0; j < i; j++) {
- pubnonces[j] = &pubnonce[j];
- }
- for (j = i + 1; j < n; j++) {
- pubnonces[j - 1] = &pubnonce[j];
- }
- CHECK(secp256k1_ec_pubkey_combine(ctx, &allpubnonce, pubnonces, n - 1));
- ret |= (secp256k1_schnorr_partial_sign(ctx, sig[i], msg, sec[i], &allpubnonce, nonce[i]) != 1) * 1;
- sigs[i] = sig[i];
- }
- if (damage == 3) {
- sig[secp256k1_rand_int(n)][secp256k1_rand_bits(6)] ^= 1 + secp256k1_rand_int(255);
- }
- ret |= (secp256k1_ec_pubkey_combine(ctx, &allpub, pubs, n) != 1) * 2;
- if ((ret & 1) == 0) {
- ret |= (secp256k1_schnorr_partial_combine(ctx, allsig, sigs, n) != 1) * 4;
- }
- if (damage == 4) {
- allsig[secp256k1_rand_int(32)] ^= 1 + secp256k1_rand_int(255);
- }
- if ((ret & 7) == 0) {
- ret |= (secp256k1_schnorr_verify(ctx, allsig, msg, &allpub) != 1) * 8;
- }
- CHECK((ret == 0) == (damage == 0));
-}
-
-void test_schnorr_recovery(void) {
- unsigned char msg32[32];
- unsigned char sig64[64];
- secp256k1_ge Q;
-
- secp256k1_rand256_test(msg32);
- secp256k1_rand256_test(sig64);
- secp256k1_rand256_test(sig64 + 32);
- if (secp256k1_schnorr_sig_recover(&ctx->ecmult_ctx, sig64, &Q, &test_schnorr_hash, msg32) == 1) {
- CHECK(secp256k1_schnorr_sig_verify(&ctx->ecmult_ctx, sig64, &Q, &test_schnorr_hash, msg32) == 1);
- }
-}
-
-void run_schnorr_tests(void) {
- int i;
- for (i = 0; i < 32*count; i++) {
- test_schnorr_end_to_end();
- }
- for (i = 0; i < 32 * count; i++) {
- test_schnorr_sign_verify();
- }
- for (i = 0; i < 16 * count; i++) {
- test_schnorr_recovery();
- }
- for (i = 0; i < 10 * count; i++) {
- test_schnorr_threshold();
- }
-}
-
-#endif
diff --git a/src/secp256k1/src/scalar.h b/src/secp256k1/src/scalar.h
index b590ccd6dd..27e9d8375e 100644
--- a/src/secp256k1/src/scalar.h
+++ b/src/secp256k1/src/scalar.h
@@ -13,7 +13,9 @@
#include "libsecp256k1-config.h"
#endif
-#if defined(USE_SCALAR_4X64)
+#if defined(EXHAUSTIVE_TEST_ORDER)
+#include "scalar_low.h"
+#elif defined(USE_SCALAR_4X64)
#include "scalar_4x64.h"
#elif defined(USE_SCALAR_8X32)
#include "scalar_8x32.h"
diff --git a/src/secp256k1/src/scalar_4x64_impl.h b/src/secp256k1/src/scalar_4x64_impl.h
index aa2703dd23..56e7bd82af 100644
--- a/src/secp256k1/src/scalar_4x64_impl.h
+++ b/src/secp256k1/src/scalar_4x64_impl.h
@@ -282,8 +282,8 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l)
"movq 56(%%rsi), %%r14\n"
/* Initialize r8,r9,r10 */
"movq 0(%%rsi), %%r8\n"
- "movq $0, %%r9\n"
- "movq $0, %%r10\n"
+ "xorq %%r9, %%r9\n"
+ "xorq %%r10, %%r10\n"
/* (r8,r9) += n0 * c0 */
"movq %8, %%rax\n"
"mulq %%r11\n"
@@ -291,7 +291,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l)
"adcq %%rdx, %%r9\n"
/* extract m0 */
"movq %%r8, %q0\n"
- "movq $0, %%r8\n"
+ "xorq %%r8, %%r8\n"
/* (r9,r10) += l1 */
"addq 8(%%rsi), %%r9\n"
"adcq $0, %%r10\n"
@@ -309,7 +309,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l)
"adcq $0, %%r8\n"
/* extract m1 */
"movq %%r9, %q1\n"
- "movq $0, %%r9\n"
+ "xorq %%r9, %%r9\n"
/* (r10,r8,r9) += l2 */
"addq 16(%%rsi), %%r10\n"
"adcq $0, %%r8\n"
@@ -332,7 +332,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l)
"adcq $0, %%r9\n"
/* extract m2 */
"movq %%r10, %q2\n"
- "movq $0, %%r10\n"
+ "xorq %%r10, %%r10\n"
/* (r8,r9,r10) += l3 */
"addq 24(%%rsi), %%r8\n"
"adcq $0, %%r9\n"
@@ -355,7 +355,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l)
"adcq $0, %%r10\n"
/* extract m3 */
"movq %%r8, %q3\n"
- "movq $0, %%r8\n"
+ "xorq %%r8, %%r8\n"
/* (r9,r10,r8) += n3 * c1 */
"movq %9, %%rax\n"
"mulq %%r14\n"
@@ -387,8 +387,8 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l)
"movq %q11, %%r13\n"
/* Initialize (r8,r9,r10) */
"movq %q5, %%r8\n"
- "movq $0, %%r9\n"
- "movq $0, %%r10\n"
+ "xorq %%r9, %%r9\n"
+ "xorq %%r10, %%r10\n"
/* (r8,r9) += m4 * c0 */
"movq %12, %%rax\n"
"mulq %%r11\n"
@@ -396,7 +396,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l)
"adcq %%rdx, %%r9\n"
/* extract p0 */
"movq %%r8, %q0\n"
- "movq $0, %%r8\n"
+ "xorq %%r8, %%r8\n"
/* (r9,r10) += m1 */
"addq %q6, %%r9\n"
"adcq $0, %%r10\n"
@@ -414,7 +414,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l)
"adcq $0, %%r8\n"
/* extract p1 */
"movq %%r9, %q1\n"
- "movq $0, %%r9\n"
+ "xorq %%r9, %%r9\n"
/* (r10,r8,r9) += m2 */
"addq %q7, %%r10\n"
"adcq $0, %%r8\n"
@@ -472,7 +472,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l)
"movq %%rax, 0(%q6)\n"
/* Move to (r8,r9) */
"movq %%rdx, %%r8\n"
- "movq $0, %%r9\n"
+ "xorq %%r9, %%r9\n"
/* (r8,r9) += p1 */
"addq %q2, %%r8\n"
"adcq $0, %%r9\n"
@@ -483,7 +483,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l)
"adcq %%rdx, %%r9\n"
/* Extract r1 */
"movq %%r8, 8(%q6)\n"
- "movq $0, %%r8\n"
+ "xorq %%r8, %%r8\n"
/* (r9,r8) += p4 */
"addq %%r10, %%r9\n"
"adcq $0, %%r8\n"
@@ -492,7 +492,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l)
"adcq $0, %%r8\n"
/* Extract r2 */
"movq %%r9, 16(%q6)\n"
- "movq $0, %%r9\n"
+ "xorq %%r9, %%r9\n"
/* (r8,r9) += p3 */
"addq %q4, %%r8\n"
"adcq $0, %%r9\n"
diff --git a/src/secp256k1/src/scalar_impl.h b/src/secp256k1/src/scalar_impl.h
index c5baf4df41..f5b2376407 100644
--- a/src/secp256k1/src/scalar_impl.h
+++ b/src/secp256k1/src/scalar_impl.h
@@ -14,7 +14,9 @@
#include "libsecp256k1-config.h"
#endif
-#if defined(USE_SCALAR_4X64)
+#if defined(EXHAUSTIVE_TEST_ORDER)
+#include "scalar_low_impl.h"
+#elif defined(USE_SCALAR_4X64)
#include "scalar_4x64_impl.h"
#elif defined(USE_SCALAR_8X32)
#include "scalar_8x32_impl.h"
@@ -31,17 +33,37 @@ static void secp256k1_scalar_get_num(secp256k1_num *r, const secp256k1_scalar *a
/** secp256k1 curve order, see secp256k1_ecdsa_const_order_as_fe in ecdsa_impl.h */
static void secp256k1_scalar_order_get_num(secp256k1_num *r) {
+#if defined(EXHAUSTIVE_TEST_ORDER)
+ static const unsigned char order[32] = {
+ 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,EXHAUSTIVE_TEST_ORDER
+ };
+#else
static const unsigned char order[32] = {
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,
0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B,
0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41
};
+#endif
secp256k1_num_set_bin(r, order, 32);
}
#endif
static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) {
+#if defined(EXHAUSTIVE_TEST_ORDER)
+ int i;
+ *r = 0;
+ for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++)
+ if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1)
+ *r = i;
+ /* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus
+ * have a composite group order; fix it in exhaustive_tests.c). */
+ VERIFY_CHECK(*r != 0);
+}
+#else
secp256k1_scalar *t;
int i;
/* First compute x ^ (2^N - 1) for some values of N. */
@@ -233,9 +255,9 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar
}
SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
- /* d[0] is present and is the lowest word for all representations */
return !(a->d[0] & 1);
}
+#endif
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
#if defined(USE_SCALAR_INV_BUILTIN)
@@ -259,6 +281,18 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc
}
#ifdef USE_ENDOMORPHISM
+#if defined(EXHAUSTIVE_TEST_ORDER)
+/**
+ * Find k1 and k2 given k, such that k1 + k2 * lambda == k mod n; unlike in the
+ * full case we don't bother making k1 and k2 be small, we just want them to be
+ * nontrivial to get full test coverage for the exhaustive tests. We therefore
+ * (arbitrarily) set k2 = k + 5 and k1 = k - k2 * lambda.
+ */
+static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
+ *r2 = (*a + 5) % EXHAUSTIVE_TEST_ORDER;
+ *r1 = (*a + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER;
+}
+#else
/**
* The Secp256k1 curve has an endomorphism, where lambda * (x, y) = (beta * x, y), where
* lambda is {0x53,0x63,0xad,0x4c,0xc0,0x5c,0x30,0xe0,0xa5,0x26,0x1c,0x02,0x88,0x12,0x64,0x5a,
@@ -331,5 +365,6 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar
secp256k1_scalar_add(r1, r1, a);
}
#endif
+#endif
#endif
diff --git a/src/secp256k1/src/scalar_low.h b/src/secp256k1/src/scalar_low.h
new file mode 100644
index 0000000000..5574c44c7a
--- /dev/null
+++ b/src/secp256k1/src/scalar_low.h
@@ -0,0 +1,15 @@
+/**********************************************************************
+ * Copyright (c) 2015 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_SCALAR_REPR_
+#define _SECP256K1_SCALAR_REPR_
+
+#include <stdint.h>
+
+/** A scalar modulo the group order of the secp256k1 curve. */
+typedef uint32_t secp256k1_scalar;
+
+#endif
diff --git a/src/secp256k1/src/scalar_low_impl.h b/src/secp256k1/src/scalar_low_impl.h
new file mode 100644
index 0000000000..4f94441f49
--- /dev/null
+++ b/src/secp256k1/src/scalar_low_impl.h
@@ -0,0 +1,114 @@
+/**********************************************************************
+ * Copyright (c) 2015 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_SCALAR_REPR_IMPL_H_
+#define _SECP256K1_SCALAR_REPR_IMPL_H_
+
+#include "scalar.h"
+
+#include <string.h>
+
+SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
+ return !(*a & 1);
+}
+
+SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) { *r = 0; }
+SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) { *r = v; }
+
+SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
+ if (offset < 32)
+ return ((*a >> offset) & ((((uint32_t)1) << count) - 1));
+ else
+ return 0;
+}
+
+SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
+ return secp256k1_scalar_get_bits(a, offset, count);
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; }
+
+static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ *r = (*a + *b) % EXHAUSTIVE_TEST_ORDER;
+ return *r < *b;
+}
+
+static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
+ if (flag && bit < 32)
+ *r += (1 << bit);
+#ifdef VERIFY
+ VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0);
+#endif
+}
+
+static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
+ const int base = 0x100 % EXHAUSTIVE_TEST_ORDER;
+ int i;
+ *r = 0;
+ for (i = 0; i < 32; i++) {
+ *r = ((*r * base) + b32[i]) % EXHAUSTIVE_TEST_ORDER;
+ }
+ /* just deny overflow, it basically always happens */
+ if (overflow) *overflow = 0;
+}
+
+static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
+ memset(bin, 0, 32);
+ bin[28] = *a >> 24; bin[29] = *a >> 16; bin[30] = *a >> 8; bin[31] = *a;
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
+ return *a == 0;
+}
+
+static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
+ if (*a == 0) {
+ *r = 0;
+ } else {
+ *r = EXHAUSTIVE_TEST_ORDER - *a;
+ }
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
+ return *a == 1;
+}
+
+static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
+ return *a > EXHAUSTIVE_TEST_ORDER / 2;
+}
+
+static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
+ if (flag) secp256k1_scalar_negate(r, r);
+ return flag ? -1 : 1;
+}
+
+static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ *r = (*a * *b) % EXHAUSTIVE_TEST_ORDER;
+}
+
+static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
+ int ret;
+ VERIFY_CHECK(n > 0);
+ VERIFY_CHECK(n < 16);
+ ret = *r & ((1 << n) - 1);
+ *r >>= n;
+ return ret;
+}
+
+static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) {
+ *r = (*a * *a) % EXHAUSTIVE_TEST_ORDER;
+}
+
+static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
+ *r1 = *a;
+ *r2 = 0;
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ return *a == *b;
+}
+
+#endif
diff --git a/src/secp256k1/src/secp256k1.c b/src/secp256k1/src/secp256k1.c
index 7973d60c36..fb8b882faa 100644..100755
--- a/src/secp256k1/src/secp256k1.c
+++ b/src/secp256k1/src/secp256k1.c
@@ -359,16 +359,15 @@ int secp256k1_ecdsa_sign(const secp256k1_context* ctx, secp256k1_ecdsa_signature
secp256k1_scalar_set_b32(&sec, seckey, &overflow);
/* Fail if the secret key is invalid. */
if (!overflow && !secp256k1_scalar_is_zero(&sec)) {
+ unsigned char nonce32[32];
unsigned int count = 0;
secp256k1_scalar_set_b32(&msg, msg32, NULL);
while (1) {
- unsigned char nonce32[32];
ret = noncefp(nonce32, msg32, seckey, NULL, (void*)noncedata, count);
if (!ret) {
break;
}
secp256k1_scalar_set_b32(&non, nonce32, &overflow);
- memset(nonce32, 0, 32);
if (!overflow && !secp256k1_scalar_is_zero(&non)) {
if (secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &r, &s, &sec, &msg, &non, NULL)) {
break;
@@ -376,6 +375,7 @@ int secp256k1_ecdsa_sign(const secp256k1_context* ctx, secp256k1_ecdsa_signature
}
count++;
}
+ memset(nonce32, 0, 32);
secp256k1_scalar_clear(&msg);
secp256k1_scalar_clear(&non);
secp256k1_scalar_clear(&sec);
diff --git a/src/secp256k1/src/tests.c b/src/secp256k1/src/tests.c
index b32cb90813..9ae7d30281 100644
--- a/src/secp256k1/src/tests.c
+++ b/src/secp256k1/src/tests.c
@@ -520,7 +520,7 @@ void test_num_mod(void) {
secp256k1_num order, n;
/* check that 0 mod anything is 0 */
- random_scalar_order_test(&s);
+ random_scalar_order_test(&s);
secp256k1_scalar_get_num(&order, &s);
secp256k1_scalar_set_int(&s, 0);
secp256k1_scalar_get_num(&n, &s);
@@ -535,7 +535,7 @@ void test_num_mod(void) {
CHECK(secp256k1_num_is_zero(&n));
/* check that increasing the number past 2^256 does not break this */
- random_scalar_order_test(&s);
+ random_scalar_order_test(&s);
secp256k1_scalar_get_num(&n, &s);
/* multiply by 2^8, which'll test this case with high probability */
for (i = 0; i < 8; ++i) {
@@ -568,7 +568,7 @@ void test_num_jacobi(void) {
/* we first need a scalar which is not a multiple of 5 */
do {
secp256k1_num fiven;
- random_scalar_order_test(&sqr);
+ random_scalar_order_test(&sqr);
secp256k1_scalar_get_num(&fiven, &five);
secp256k1_scalar_get_num(&n, &sqr);
secp256k1_num_mod(&n, &fiven);
@@ -587,7 +587,7 @@ void test_num_jacobi(void) {
/** test with secp group order as order */
secp256k1_scalar_order_get_num(&order);
- random_scalar_order_test(&sqr);
+ random_scalar_order_test(&sqr);
secp256k1_scalar_sqr(&sqr, &sqr);
/* test residue */
secp256k1_scalar_get_num(&n, &sqr);
@@ -1733,18 +1733,18 @@ void run_field_inv_all_var(void) {
secp256k1_fe x[16], xi[16], xii[16];
int i;
/* Check it's safe to call for 0 elements */
- secp256k1_fe_inv_all_var(0, xi, x);
+ secp256k1_fe_inv_all_var(xi, x, 0);
for (i = 0; i < count; i++) {
size_t j;
size_t len = secp256k1_rand_int(15) + 1;
for (j = 0; j < len; j++) {
random_fe_non_zero(&x[j]);
}
- secp256k1_fe_inv_all_var(len, xi, x);
+ secp256k1_fe_inv_all_var(xi, x, len);
for (j = 0; j < len; j++) {
CHECK(check_fe_inverse(&x[j], &xi[j]));
}
- secp256k1_fe_inv_all_var(len, xii, xi);
+ secp256k1_fe_inv_all_var(xii, xi, len);
for (j = 0; j < len; j++) {
CHECK(check_fe_equal(&x[j], &xii[j]));
}
@@ -1930,7 +1930,7 @@ void test_ge(void) {
zs[i] = gej[i].z;
}
}
- secp256k1_fe_inv_all_var(4 * runs + 1, zinv, zs);
+ secp256k1_fe_inv_all_var(zinv, zs, 4 * runs + 1);
free(zs);
}
@@ -2050,8 +2050,8 @@ void test_ge(void) {
secp256k1_fe_mul(&zr[i + 1], &zinv[i], &gej[i + 1].z);
}
}
- secp256k1_ge_set_table_gej_var(4 * runs + 1, ge_set_table, gej, zr);
- secp256k1_ge_set_all_gej_var(4 * runs + 1, ge_set_all, gej, &ctx->error_callback);
+ secp256k1_ge_set_table_gej_var(ge_set_table, gej, zr, 4 * runs + 1);
+ secp256k1_ge_set_all_gej_var(ge_set_all, gej, 4 * runs + 1, &ctx->error_callback);
for (i = 0; i < 4 * runs + 1; i++) {
secp256k1_fe s;
random_fe_non_zero(&s);
diff --git a/src/secp256k1/src/tests_exhaustive.c b/src/secp256k1/src/tests_exhaustive.c
new file mode 100644
index 0000000000..bda6ee475c
--- /dev/null
+++ b/src/secp256k1/src/tests_exhaustive.c
@@ -0,0 +1,329 @@
+/***********************************************************************
+ * Copyright (c) 2016 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <time.h>
+
+#undef USE_ECMULT_STATIC_PRECOMPUTATION
+
+#ifndef EXHAUSTIVE_TEST_ORDER
+/* see group_impl.h for allowable values */
+#define EXHAUSTIVE_TEST_ORDER 13
+#define EXHAUSTIVE_TEST_LAMBDA 9 /* cube root of 1 mod 13 */
+#endif
+
+#include "include/secp256k1.h"
+#include "group.h"
+#include "secp256k1.c"
+#include "testrand_impl.h"
+
+/** stolen from tests.c */
+void ge_equals_ge(const secp256k1_ge *a, const secp256k1_ge *b) {
+ CHECK(a->infinity == b->infinity);
+ if (a->infinity) {
+ return;
+ }
+ CHECK(secp256k1_fe_equal_var(&a->x, &b->x));
+ CHECK(secp256k1_fe_equal_var(&a->y, &b->y));
+}
+
+void ge_equals_gej(const secp256k1_ge *a, const secp256k1_gej *b) {
+ secp256k1_fe z2s;
+ secp256k1_fe u1, u2, s1, s2;
+ CHECK(a->infinity == b->infinity);
+ if (a->infinity) {
+ return;
+ }
+ /* Check a.x * b.z^2 == b.x && a.y * b.z^3 == b.y, to avoid inverses. */
+ secp256k1_fe_sqr(&z2s, &b->z);
+ secp256k1_fe_mul(&u1, &a->x, &z2s);
+ u2 = b->x; secp256k1_fe_normalize_weak(&u2);
+ secp256k1_fe_mul(&s1, &a->y, &z2s); secp256k1_fe_mul(&s1, &s1, &b->z);
+ s2 = b->y; secp256k1_fe_normalize_weak(&s2);
+ CHECK(secp256k1_fe_equal_var(&u1, &u2));
+ CHECK(secp256k1_fe_equal_var(&s1, &s2));
+}
+
+void random_fe(secp256k1_fe *x) {
+ unsigned char bin[32];
+ do {
+ secp256k1_rand256(bin);
+ if (secp256k1_fe_set_b32(x, bin)) {
+ return;
+ }
+ } while(1);
+}
+/** END stolen from tests.c */
+
+int secp256k1_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32,
+ const unsigned char *key32, const unsigned char *algo16,
+ void *data, unsigned int attempt) {
+ secp256k1_scalar s;
+ int *idata = data;
+ (void)msg32;
+ (void)key32;
+ (void)algo16;
+ /* Some nonces cannot be used because they'd cause s and/or r to be zero.
+ * The signing function has retry logic here that just re-calls the nonce
+ * function with an increased `attempt`. So if attempt > 0 this means we
+ * need to change the nonce to avoid an infinite loop. */
+ if (attempt > 0) {
+ (*idata)++;
+ }
+ secp256k1_scalar_set_int(&s, *idata);
+ secp256k1_scalar_get_b32(nonce32, &s);
+ return 1;
+}
+
+#ifdef USE_ENDOMORPHISM
+void test_exhaustive_endomorphism(const secp256k1_ge *group, int order) {
+ int i;
+ for (i = 0; i < order; i++) {
+ secp256k1_ge res;
+ secp256k1_ge_mul_lambda(&res, &group[i]);
+ ge_equals_ge(&group[i * EXHAUSTIVE_TEST_LAMBDA % EXHAUSTIVE_TEST_ORDER], &res);
+ }
+}
+#endif
+
+void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_gej *groupj, int order) {
+ int i, j;
+
+ /* Sanity-check (and check infinity functions) */
+ CHECK(secp256k1_ge_is_infinity(&group[0]));
+ CHECK(secp256k1_gej_is_infinity(&groupj[0]));
+ for (i = 1; i < order; i++) {
+ CHECK(!secp256k1_ge_is_infinity(&group[i]));
+ CHECK(!secp256k1_gej_is_infinity(&groupj[i]));
+ }
+
+ /* Check all addition formulae */
+ for (j = 0; j < order; j++) {
+ secp256k1_fe fe_inv;
+ secp256k1_fe_inv(&fe_inv, &groupj[j].z);
+ for (i = 0; i < order; i++) {
+ secp256k1_ge zless_gej;
+ secp256k1_gej tmp;
+ /* add_var */
+ secp256k1_gej_add_var(&tmp, &groupj[i], &groupj[j], NULL);
+ ge_equals_gej(&group[(i + j) % order], &tmp);
+ /* add_ge */
+ if (j > 0) {
+ secp256k1_gej_add_ge(&tmp, &groupj[i], &group[j]);
+ ge_equals_gej(&group[(i + j) % order], &tmp);
+ }
+ /* add_ge_var */
+ secp256k1_gej_add_ge_var(&tmp, &groupj[i], &group[j], NULL);
+ ge_equals_gej(&group[(i + j) % order], &tmp);
+ /* add_zinv_var */
+ zless_gej.infinity = groupj[j].infinity;
+ zless_gej.x = groupj[j].x;
+ zless_gej.y = groupj[j].y;
+ secp256k1_gej_add_zinv_var(&tmp, &groupj[i], &zless_gej, &fe_inv);
+ ge_equals_gej(&group[(i + j) % order], &tmp);
+ }
+ }
+
+ /* Check doubling */
+ for (i = 0; i < order; i++) {
+ secp256k1_gej tmp;
+ if (i > 0) {
+ secp256k1_gej_double_nonzero(&tmp, &groupj[i], NULL);
+ ge_equals_gej(&group[(2 * i) % order], &tmp);
+ }
+ secp256k1_gej_double_var(&tmp, &groupj[i], NULL);
+ ge_equals_gej(&group[(2 * i) % order], &tmp);
+ }
+
+ /* Check negation */
+ for (i = 1; i < order; i++) {
+ secp256k1_ge tmp;
+ secp256k1_gej tmpj;
+ secp256k1_ge_neg(&tmp, &group[i]);
+ ge_equals_ge(&group[order - i], &tmp);
+ secp256k1_gej_neg(&tmpj, &groupj[i]);
+ ge_equals_gej(&group[order - i], &tmpj);
+ }
+}
+
+void test_exhaustive_ecmult(const secp256k1_context *ctx, const secp256k1_ge *group, const secp256k1_gej *groupj, int order) {
+ int i, j, r_log;
+ for (r_log = 1; r_log < order; r_log++) {
+ for (j = 0; j < order; j++) {
+ for (i = 0; i < order; i++) {
+ secp256k1_gej tmp;
+ secp256k1_scalar na, ng;
+ secp256k1_scalar_set_int(&na, i);
+ secp256k1_scalar_set_int(&ng, j);
+
+ secp256k1_ecmult(&ctx->ecmult_ctx, &tmp, &groupj[r_log], &na, &ng);
+ ge_equals_gej(&group[(i * r_log + j) % order], &tmp);
+
+ if (i > 0) {
+ secp256k1_ecmult_const(&tmp, &group[i], &ng);
+ ge_equals_gej(&group[(i * j) % order], &tmp);
+ }
+ }
+ }
+ }
+}
+
+void r_from_k(secp256k1_scalar *r, const secp256k1_ge *group, int k) {
+ secp256k1_fe x;
+ unsigned char x_bin[32];
+ k %= EXHAUSTIVE_TEST_ORDER;
+ x = group[k].x;
+ secp256k1_fe_normalize(&x);
+ secp256k1_fe_get_b32(x_bin, &x);
+ secp256k1_scalar_set_b32(r, x_bin, NULL);
+}
+
+void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
+ int s, r, msg, key;
+ for (s = 1; s < order; s++) {
+ for (r = 1; r < order; r++) {
+ for (msg = 1; msg < order; msg++) {
+ for (key = 1; key < order; key++) {
+ secp256k1_ge nonconst_ge;
+ secp256k1_ecdsa_signature sig;
+ secp256k1_pubkey pk;
+ secp256k1_scalar sk_s, msg_s, r_s, s_s;
+ secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s;
+ int k, should_verify;
+ unsigned char msg32[32];
+
+ secp256k1_scalar_set_int(&s_s, s);
+ secp256k1_scalar_set_int(&r_s, r);
+ secp256k1_scalar_set_int(&msg_s, msg);
+ secp256k1_scalar_set_int(&sk_s, key);
+
+ /* Verify by hand */
+ /* Run through every k value that gives us this r and check that *one* works.
+ * Note there could be none, there could be multiple, ECDSA is weird. */
+ should_verify = 0;
+ for (k = 0; k < order; k++) {
+ secp256k1_scalar check_x_s;
+ r_from_k(&check_x_s, group, k);
+ if (r_s == check_x_s) {
+ secp256k1_scalar_set_int(&s_times_k_s, k);
+ secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s);
+ secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s);
+ secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s);
+ should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s);
+ }
+ }
+ /* nb we have a "high s" rule */
+ should_verify &= !secp256k1_scalar_is_high(&s_s);
+
+ /* Verify by calling verify */
+ secp256k1_ecdsa_signature_save(&sig, &r_s, &s_s);
+ memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge));
+ secp256k1_pubkey_save(&pk, &nonconst_ge);
+ secp256k1_scalar_get_b32(msg32, &msg_s);
+ CHECK(should_verify ==
+ secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk));
+ }
+ }
+ }
+ }
+}
+
+void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
+ int i, j, k;
+
+ /* Loop */
+ for (i = 1; i < order; i++) { /* message */
+ for (j = 1; j < order; j++) { /* key */
+ for (k = 1; k < order; k++) { /* nonce */
+ secp256k1_ecdsa_signature sig;
+ secp256k1_scalar sk, msg, r, s, expected_r;
+ unsigned char sk32[32], msg32[32];
+ secp256k1_scalar_set_int(&msg, i);
+ secp256k1_scalar_set_int(&sk, j);
+ secp256k1_scalar_get_b32(sk32, &sk);
+ secp256k1_scalar_get_b32(msg32, &msg);
+
+ secp256k1_ecdsa_sign(ctx, &sig, msg32, sk32, secp256k1_nonce_function_smallint, &k);
+
+ secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig);
+ /* Note that we compute expected_r *after* signing -- this is important
+ * because our nonce-computing function function might change k during
+ * signing. */
+ r_from_k(&expected_r, group, k);
+ CHECK(r == expected_r);
+ CHECK((k * s) % order == (i + r * j) % order ||
+ (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order);
+ }
+ }
+ }
+
+ /* We would like to verify zero-knowledge here by counting how often every
+ * possible (s, r) tuple appears, but because the group order is larger
+ * than the field order, when coercing the x-values to scalar values, some
+ * appear more often than others, so we are actually not zero-knowledge.
+ * (This effect also appears in the real code, but the difference is on the
+ * order of 1/2^128th the field order, so the deviation is not useful to a
+ * computationally bounded attacker.)
+ */
+}
+
+int main(void) {
+ int i;
+ secp256k1_gej groupj[EXHAUSTIVE_TEST_ORDER];
+ secp256k1_ge group[EXHAUSTIVE_TEST_ORDER];
+
+ /* Build context */
+ secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+
+ /* TODO set z = 1, then do num_tests runs with random z values */
+
+ /* Generate the entire group */
+ secp256k1_gej_set_infinity(&groupj[0]);
+ secp256k1_ge_set_gej(&group[0], &groupj[0]);
+ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) {
+ /* Set a different random z-value for each Jacobian point */
+ secp256k1_fe z;
+ random_fe(&z);
+
+ secp256k1_gej_add_ge(&groupj[i], &groupj[i - 1], &secp256k1_ge_const_g);
+ secp256k1_ge_set_gej(&group[i], &groupj[i]);
+ secp256k1_gej_rescale(&groupj[i], &z);
+
+ /* Verify against ecmult_gen */
+ {
+ secp256k1_scalar scalar_i;
+ secp256k1_gej generatedj;
+ secp256k1_ge generated;
+
+ secp256k1_scalar_set_int(&scalar_i, i);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i);
+ secp256k1_ge_set_gej(&generated, &generatedj);
+
+ CHECK(group[i].infinity == 0);
+ CHECK(generated.infinity == 0);
+ CHECK(secp256k1_fe_equal_var(&generated.x, &group[i].x));
+ CHECK(secp256k1_fe_equal_var(&generated.y, &group[i].y));
+ }
+ }
+
+ /* Run the tests */
+#ifdef USE_ENDOMORPHISM
+ test_exhaustive_endomorphism(group, EXHAUSTIVE_TEST_ORDER);
+#endif
+ test_exhaustive_addition(group, groupj, EXHAUSTIVE_TEST_ORDER);
+ test_exhaustive_ecmult(ctx, group, groupj, EXHAUSTIVE_TEST_ORDER);
+ test_exhaustive_sign(ctx, group, EXHAUSTIVE_TEST_ORDER);
+ test_exhaustive_verify(ctx, group, EXHAUSTIVE_TEST_ORDER);
+
+ return 0;
+}
+
diff --git a/src/serialize.h b/src/serialize.h
index e28ca548c0..2be6f005e3 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -59,34 +59,6 @@ inline T* NCONST_PTR(const T* val)
return const_cast<T*>(val);
}
-/**
- * Important: Do not use the following functions in new code, but use v.data()
- * and v.data() + v.size() respectively directly. They were once introduced to
- * have a compatible, safe way to get the begin and end pointer of a vector.
- * However with C++11 the language has built-in functionality for this and it's
- * more readable to just use that.
- */
-template <typename V>
-inline typename V::value_type* begin_ptr(V& v)
-{
- return v.data();
-}
-template <typename V>
-inline const typename V::value_type* begin_ptr(const V& v)
-{
- return v.data();
-}
-template <typename V>
-inline typename V::value_type* end_ptr(V& v)
-{
- return v.data() + v.size();
-}
-template <typename V>
-inline const typename V::value_type* end_ptr(const V& v)
-{
- return v.data() + v.size();
-}
-
/*
* Lowest-level serialization and conversion.
* @note Sizes of these types are verified in the tests
@@ -390,14 +362,14 @@ public:
template <class T, class TAl>
explicit CFlatData(std::vector<T,TAl> &v)
{
- pbegin = (char*)begin_ptr(v);
- pend = (char*)end_ptr(v);
+ pbegin = (char*)v.data();
+ pend = (char*)(v.data() + v.size());
}
template <unsigned int N, typename T, typename S, typename D>
explicit CFlatData(prevector<N, T, S, D> &v)
{
- pbegin = (char*)begin_ptr(v);
- pend = (char*)end_ptr(v);
+ pbegin = (char*)v.data();
+ pend = (char*)(v.data() + v.size());
}
char* begin() { return pbegin; }
const char* begin() const { return pbegin; }
diff --git a/src/test/DoS_tests.cpp b/src/test/DoS_tests.cpp
index 1a818a5756..131d667e0b 100644
--- a/src/test/DoS_tests.cpp
+++ b/src/test/DoS_tests.cpp
@@ -29,9 +29,9 @@ extern unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans);
struct COrphanTx {
CTransaction tx;
NodeId fromPeer;
+ int64_t nTimeExpire;
};
extern std::map<uint256, COrphanTx> mapOrphanTransactions;
-extern std::map<uint256, std::set<uint256> > mapOrphanTransactionsByPrev;
CService ip(uint32_t i)
{
@@ -202,7 +202,6 @@ BOOST_AUTO_TEST_CASE(DoS_mapOrphans)
BOOST_CHECK(mapOrphanTransactions.size() <= 10);
LimitOrphanTxSize(0);
BOOST_CHECK(mapOrphanTransactions.empty());
- BOOST_CHECK(mapOrphanTransactionsByPrev.empty());
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/base58_tests.cpp b/src/test/base58_tests.cpp
index ac3ab4c83f..04a6506655 100644
--- a/src/test/base58_tests.cpp
+++ b/src/test/base58_tests.cpp
@@ -39,7 +39,7 @@ BOOST_AUTO_TEST_CASE(base58_EncodeBase58)
std::vector<unsigned char> sourcedata = ParseHex(test[0].get_str());
std::string base58string = test[1].get_str();
BOOST_CHECK_MESSAGE(
- EncodeBase58(begin_ptr(sourcedata), end_ptr(sourcedata)) == base58string,
+ EncodeBase58(sourcedata.data(), sourcedata.data() + sourcedata.size()) == base58string,
strTest);
}
}
diff --git a/src/test/blockencodings_tests.cpp b/src/test/blockencodings_tests.cpp
index b013cda6d7..7478758f73 100644
--- a/src/test/blockencodings_tests.cpp
+++ b/src/test/blockencodings_tests.cpp
@@ -80,9 +80,9 @@ BOOST_AUTO_TEST_CASE(SimpleRoundTripTest)
BOOST_CHECK_EQUAL(pool.mapTx.find(block.vtx[2]->GetHash())->GetSharedTx().use_count(), SHARED_TX_OFFSET + 1);
- std::vector<CTransactionRef> removed;
- pool.removeRecursive(*block.vtx[2], &removed);
- BOOST_CHECK_EQUAL(removed.size(), 1);
+ size_t poolSize = pool.size();
+ pool.removeRecursive(*block.vtx[2]);
+ BOOST_CHECK_EQUAL(pool.size(), poolSize - 1);
CBlock block2;
std::vector<CTransactionRef> vtx_missing;
diff --git a/src/test/bswap_tests.cpp b/src/test/bswap_tests.cpp
new file mode 100644
index 0000000000..7b3134d327
--- /dev/null
+++ b/src/test/bswap_tests.cpp
@@ -0,0 +1,26 @@
+// Copyright (c) 2016 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include "compat/byteswap.h"
+#include "test/test_bitcoin.h"
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_FIXTURE_TEST_SUITE(bswap_tests, BasicTestingSetup)
+
+BOOST_AUTO_TEST_CASE(bswap_tests)
+{
+ // Sibling in bitcoin/src/qt/test/compattests.cpp
+ uint16_t u1 = 0x1234;
+ uint32_t u2 = 0x56789abc;
+ uint64_t u3 = 0xdef0123456789abc;
+ uint16_t e1 = 0x3412;
+ uint32_t e2 = 0xbc9a7856;
+ uint64_t e3 = 0xbc9a78563412f0de;
+ BOOST_CHECK(bswap_16(u1) == e1);
+ BOOST_CHECK(bswap_32(u2) == e2);
+ BOOST_CHECK(bswap_64(u3) == e3);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/coins_tests.cpp b/src/test/coins_tests.cpp
index 79dea00e46..f0e74a55d2 100644
--- a/src/test/coins_tests.cpp
+++ b/src/test/coins_tests.cpp
@@ -80,6 +80,8 @@ public:
BOOST_CHECK_EQUAL(DynamicMemoryUsage(), ret);
}
+ CCoinsMap& map() { return cacheCoins; }
+ size_t& usage() { return cachedCoinsUsage; }
};
}
@@ -415,4 +417,366 @@ BOOST_AUTO_TEST_CASE(ccoins_serialization)
}
}
+const static uint256 TXID;
+const static CAmount PRUNED = -1;
+const static CAmount ABSENT = -2;
+const static CAmount VALUE1 = 100;
+const static CAmount VALUE2 = 200;
+const static CAmount VALUE3 = 300;
+const static char DIRTY = CCoinsCacheEntry::DIRTY;
+const static char FRESH = CCoinsCacheEntry::FRESH;
+const static char NO_ENTRY = -1;
+
+const static auto FLAGS = {char(0), FRESH, DIRTY, char(DIRTY | FRESH)};
+const static auto CLEAN_FLAGS = {char(0), FRESH};
+const static auto DIRTY_FLAGS = {DIRTY, char(DIRTY | FRESH)};
+const static auto ABSENT_FLAGS = {NO_ENTRY};
+
+void SetCoinsValue(CAmount value, CCoins& coins)
+{
+ assert(value != ABSENT);
+ coins.Clear();
+ assert(coins.IsPruned());
+ if (value != PRUNED) {
+ coins.vout.emplace_back();
+ coins.vout.back().nValue = value;
+ assert(!coins.IsPruned());
+ }
+}
+
+size_t InsertCoinsMapEntry(CCoinsMap& map, CAmount value, char flags)
+{
+ if (value == ABSENT) {
+ assert(flags == NO_ENTRY);
+ return 0;
+ }
+ assert(flags != NO_ENTRY);
+ CCoinsCacheEntry entry;
+ entry.flags = flags;
+ SetCoinsValue(value, entry.coins);
+ auto inserted = map.emplace(TXID, std::move(entry));
+ assert(inserted.second);
+ return inserted.first->second.coins.DynamicMemoryUsage();
+}
+
+void GetCoinsMapEntry(const CCoinsMap& map, CAmount& value, char& flags)
+{
+ auto it = map.find(TXID);
+ if (it == map.end()) {
+ value = ABSENT;
+ flags = NO_ENTRY;
+ } else {
+ if (it->second.coins.IsPruned()) {
+ assert(it->second.coins.vout.size() == 0);
+ value = PRUNED;
+ } else {
+ assert(it->second.coins.vout.size() == 1);
+ value = it->second.coins.vout[0].nValue;
+ }
+ flags = it->second.flags;
+ assert(flags != NO_ENTRY);
+ }
+}
+
+void WriteCoinsViewEntry(CCoinsView& view, CAmount value, char flags)
+{
+ CCoinsMap map;
+ InsertCoinsMapEntry(map, value, flags);
+ view.BatchWrite(map, {});
+}
+
+class SingleEntryCacheTest
+{
+public:
+ SingleEntryCacheTest(CAmount base_value, CAmount cache_value, char cache_flags)
+ {
+ WriteCoinsViewEntry(base, base_value, base_value == ABSENT ? NO_ENTRY : DIRTY);
+ cache.usage() += InsertCoinsMapEntry(cache.map(), cache_value, cache_flags);
+ }
+
+ CCoinsView root;
+ CCoinsViewCacheTest base{&root};
+ CCoinsViewCacheTest cache{&base};
+};
+
+void CheckAccessCoins(CAmount base_value, CAmount cache_value, CAmount expected_value, char cache_flags, char expected_flags)
+{
+ SingleEntryCacheTest test(base_value, cache_value, cache_flags);
+ test.cache.AccessCoins(TXID);
+ test.cache.SelfTest();
+
+ CAmount result_value;
+ char result_flags;
+ GetCoinsMapEntry(test.cache.map(), result_value, result_flags);
+ BOOST_CHECK_EQUAL(result_value, expected_value);
+ BOOST_CHECK_EQUAL(result_flags, expected_flags);
+}
+
+BOOST_AUTO_TEST_CASE(ccoins_access)
+{
+ /* Check AccessCoin behavior, requesting a coin from a cache view layered on
+ * top of a base view, and checking the resulting entry in the cache after
+ * the access.
+ *
+ * Base Cache Result Cache Result
+ * Value Value Value Flags Flags
+ */
+ CheckAccessCoins(ABSENT, ABSENT, ABSENT, NO_ENTRY , NO_ENTRY );
+ CheckAccessCoins(ABSENT, PRUNED, PRUNED, 0 , 0 );
+ CheckAccessCoins(ABSENT, PRUNED, PRUNED, FRESH , FRESH );
+ CheckAccessCoins(ABSENT, PRUNED, PRUNED, DIRTY , DIRTY );
+ CheckAccessCoins(ABSENT, PRUNED, PRUNED, DIRTY|FRESH, DIRTY|FRESH);
+ CheckAccessCoins(ABSENT, VALUE2, VALUE2, 0 , 0 );
+ CheckAccessCoins(ABSENT, VALUE2, VALUE2, FRESH , FRESH );
+ CheckAccessCoins(ABSENT, VALUE2, VALUE2, DIRTY , DIRTY );
+ CheckAccessCoins(ABSENT, VALUE2, VALUE2, DIRTY|FRESH, DIRTY|FRESH);
+ CheckAccessCoins(PRUNED, ABSENT, PRUNED, NO_ENTRY , FRESH );
+ CheckAccessCoins(PRUNED, PRUNED, PRUNED, 0 , 0 );
+ CheckAccessCoins(PRUNED, PRUNED, PRUNED, FRESH , FRESH );
+ CheckAccessCoins(PRUNED, PRUNED, PRUNED, DIRTY , DIRTY );
+ CheckAccessCoins(PRUNED, PRUNED, PRUNED, DIRTY|FRESH, DIRTY|FRESH);
+ CheckAccessCoins(PRUNED, VALUE2, VALUE2, 0 , 0 );
+ CheckAccessCoins(PRUNED, VALUE2, VALUE2, FRESH , FRESH );
+ CheckAccessCoins(PRUNED, VALUE2, VALUE2, DIRTY , DIRTY );
+ CheckAccessCoins(PRUNED, VALUE2, VALUE2, DIRTY|FRESH, DIRTY|FRESH);
+ CheckAccessCoins(VALUE1, ABSENT, VALUE1, NO_ENTRY , 0 );
+ CheckAccessCoins(VALUE1, PRUNED, PRUNED, 0 , 0 );
+ CheckAccessCoins(VALUE1, PRUNED, PRUNED, FRESH , FRESH );
+ CheckAccessCoins(VALUE1, PRUNED, PRUNED, DIRTY , DIRTY );
+ CheckAccessCoins(VALUE1, PRUNED, PRUNED, DIRTY|FRESH, DIRTY|FRESH);
+ CheckAccessCoins(VALUE1, VALUE2, VALUE2, 0 , 0 );
+ CheckAccessCoins(VALUE1, VALUE2, VALUE2, FRESH , FRESH );
+ CheckAccessCoins(VALUE1, VALUE2, VALUE2, DIRTY , DIRTY );
+ CheckAccessCoins(VALUE1, VALUE2, VALUE2, DIRTY|FRESH, DIRTY|FRESH);
+}
+
+void CheckModifyCoins(CAmount base_value, CAmount cache_value, CAmount modify_value, CAmount expected_value, char cache_flags, char expected_flags)
+{
+ SingleEntryCacheTest test(base_value, cache_value, cache_flags);
+ SetCoinsValue(modify_value, *test.cache.ModifyCoins(TXID));
+ test.cache.SelfTest();
+
+ CAmount result_value;
+ char result_flags;
+ GetCoinsMapEntry(test.cache.map(), result_value, result_flags);
+ BOOST_CHECK_EQUAL(result_value, expected_value);
+ BOOST_CHECK_EQUAL(result_flags, expected_flags);
+};
+
+BOOST_AUTO_TEST_CASE(ccoins_modify)
+{
+ /* Check ModifyCoin behavior, requesting a coin from a cache view layered on
+ * top of a base view, writing a modification to the coin, and then checking
+ * the resulting entry in the cache after the modification.
+ *
+ * Base Cache Write Result Cache Result
+ * Value Value Value Value Flags Flags
+ */
+ CheckModifyCoins(ABSENT, ABSENT, PRUNED, ABSENT, NO_ENTRY , NO_ENTRY );
+ CheckModifyCoins(ABSENT, ABSENT, VALUE3, VALUE3, NO_ENTRY , DIRTY|FRESH);
+ CheckModifyCoins(ABSENT, PRUNED, PRUNED, PRUNED, 0 , DIRTY );
+ CheckModifyCoins(ABSENT, PRUNED, PRUNED, ABSENT, FRESH , NO_ENTRY );
+ CheckModifyCoins(ABSENT, PRUNED, PRUNED, PRUNED, DIRTY , DIRTY );
+ CheckModifyCoins(ABSENT, PRUNED, PRUNED, ABSENT, DIRTY|FRESH, NO_ENTRY );
+ CheckModifyCoins(ABSENT, PRUNED, VALUE3, VALUE3, 0 , DIRTY );
+ CheckModifyCoins(ABSENT, PRUNED, VALUE3, VALUE3, FRESH , DIRTY|FRESH);
+ CheckModifyCoins(ABSENT, PRUNED, VALUE3, VALUE3, DIRTY , DIRTY );
+ CheckModifyCoins(ABSENT, PRUNED, VALUE3, VALUE3, DIRTY|FRESH, DIRTY|FRESH);
+ CheckModifyCoins(ABSENT, VALUE2, PRUNED, PRUNED, 0 , DIRTY );
+ CheckModifyCoins(ABSENT, VALUE2, PRUNED, ABSENT, FRESH , NO_ENTRY );
+ CheckModifyCoins(ABSENT, VALUE2, PRUNED, PRUNED, DIRTY , DIRTY );
+ CheckModifyCoins(ABSENT, VALUE2, PRUNED, ABSENT, DIRTY|FRESH, NO_ENTRY );
+ CheckModifyCoins(ABSENT, VALUE2, VALUE3, VALUE3, 0 , DIRTY );
+ CheckModifyCoins(ABSENT, VALUE2, VALUE3, VALUE3, FRESH , DIRTY|FRESH);
+ CheckModifyCoins(ABSENT, VALUE2, VALUE3, VALUE3, DIRTY , DIRTY );
+ CheckModifyCoins(ABSENT, VALUE2, VALUE3, VALUE3, DIRTY|FRESH, DIRTY|FRESH);
+ CheckModifyCoins(PRUNED, ABSENT, PRUNED, ABSENT, NO_ENTRY , NO_ENTRY );
+ CheckModifyCoins(PRUNED, ABSENT, VALUE3, VALUE3, NO_ENTRY , DIRTY|FRESH);
+ CheckModifyCoins(PRUNED, PRUNED, PRUNED, PRUNED, 0 , DIRTY );
+ CheckModifyCoins(PRUNED, PRUNED, PRUNED, ABSENT, FRESH , NO_ENTRY );
+ CheckModifyCoins(PRUNED, PRUNED, PRUNED, PRUNED, DIRTY , DIRTY );
+ CheckModifyCoins(PRUNED, PRUNED, PRUNED, ABSENT, DIRTY|FRESH, NO_ENTRY );
+ CheckModifyCoins(PRUNED, PRUNED, VALUE3, VALUE3, 0 , DIRTY );
+ CheckModifyCoins(PRUNED, PRUNED, VALUE3, VALUE3, FRESH , DIRTY|FRESH);
+ CheckModifyCoins(PRUNED, PRUNED, VALUE3, VALUE3, DIRTY , DIRTY );
+ CheckModifyCoins(PRUNED, PRUNED, VALUE3, VALUE3, DIRTY|FRESH, DIRTY|FRESH);
+ CheckModifyCoins(PRUNED, VALUE2, PRUNED, PRUNED, 0 , DIRTY );
+ CheckModifyCoins(PRUNED, VALUE2, PRUNED, ABSENT, FRESH , NO_ENTRY );
+ CheckModifyCoins(PRUNED, VALUE2, PRUNED, PRUNED, DIRTY , DIRTY );
+ CheckModifyCoins(PRUNED, VALUE2, PRUNED, ABSENT, DIRTY|FRESH, NO_ENTRY );
+ CheckModifyCoins(PRUNED, VALUE2, VALUE3, VALUE3, 0 , DIRTY );
+ CheckModifyCoins(PRUNED, VALUE2, VALUE3, VALUE3, FRESH , DIRTY|FRESH);
+ CheckModifyCoins(PRUNED, VALUE2, VALUE3, VALUE3, DIRTY , DIRTY );
+ CheckModifyCoins(PRUNED, VALUE2, VALUE3, VALUE3, DIRTY|FRESH, DIRTY|FRESH);
+ CheckModifyCoins(VALUE1, ABSENT, PRUNED, PRUNED, NO_ENTRY , DIRTY );
+ CheckModifyCoins(VALUE1, ABSENT, VALUE3, VALUE3, NO_ENTRY , DIRTY );
+ CheckModifyCoins(VALUE1, PRUNED, PRUNED, PRUNED, 0 , DIRTY );
+ CheckModifyCoins(VALUE1, PRUNED, PRUNED, ABSENT, FRESH , NO_ENTRY );
+ CheckModifyCoins(VALUE1, PRUNED, PRUNED, PRUNED, DIRTY , DIRTY );
+ CheckModifyCoins(VALUE1, PRUNED, PRUNED, ABSENT, DIRTY|FRESH, NO_ENTRY );
+ CheckModifyCoins(VALUE1, PRUNED, VALUE3, VALUE3, 0 , DIRTY );
+ CheckModifyCoins(VALUE1, PRUNED, VALUE3, VALUE3, FRESH , DIRTY|FRESH);
+ CheckModifyCoins(VALUE1, PRUNED, VALUE3, VALUE3, DIRTY , DIRTY );
+ CheckModifyCoins(VALUE1, PRUNED, VALUE3, VALUE3, DIRTY|FRESH, DIRTY|FRESH);
+ CheckModifyCoins(VALUE1, VALUE2, PRUNED, PRUNED, 0 , DIRTY );
+ CheckModifyCoins(VALUE1, VALUE2, PRUNED, ABSENT, FRESH , NO_ENTRY );
+ CheckModifyCoins(VALUE1, VALUE2, PRUNED, PRUNED, DIRTY , DIRTY );
+ CheckModifyCoins(VALUE1, VALUE2, PRUNED, ABSENT, DIRTY|FRESH, NO_ENTRY );
+ CheckModifyCoins(VALUE1, VALUE2, VALUE3, VALUE3, 0 , DIRTY );
+ CheckModifyCoins(VALUE1, VALUE2, VALUE3, VALUE3, FRESH , DIRTY|FRESH);
+ CheckModifyCoins(VALUE1, VALUE2, VALUE3, VALUE3, DIRTY , DIRTY );
+ CheckModifyCoins(VALUE1, VALUE2, VALUE3, VALUE3, DIRTY|FRESH, DIRTY|FRESH);
+}
+
+void CheckModifyNewCoinsBase(CAmount base_value, CAmount cache_value, CAmount modify_value, CAmount expected_value, char cache_flags, char expected_flags, bool coinbase)
+{
+ SingleEntryCacheTest test(base_value, cache_value, cache_flags);
+ SetCoinsValue(modify_value, *test.cache.ModifyNewCoins(TXID, coinbase));
+
+ CAmount result_value;
+ char result_flags;
+ GetCoinsMapEntry(test.cache.map(), result_value, result_flags);
+ BOOST_CHECK_EQUAL(result_value, expected_value);
+ BOOST_CHECK_EQUAL(result_flags, expected_flags);
+}
+
+// Simple wrapper for CheckModifyNewCoinsBase function above that loops through
+// different possible base_values, making sure each one gives the same results.
+// This wrapper lets the modify_new test below be shorter and less repetitive,
+// while still verifying that the CoinsViewCache::ModifyNewCoins implementation
+// ignores base values.
+template <typename... Args>
+void CheckModifyNewCoins(Args&&... args)
+{
+ for (CAmount base_value : {ABSENT, PRUNED, VALUE1})
+ CheckModifyNewCoinsBase(base_value, std::forward<Args>(args)...);
+}
+
+BOOST_AUTO_TEST_CASE(ccoins_modify_new)
+{
+ /* Check ModifyNewCoin behavior, requesting a new coin from a cache view,
+ * writing a modification to the coin, and then checking the resulting
+ * entry in the cache after the modification. Verify behavior with the
+ * with the ModifyNewCoin coinbase argument set to false, and to true.
+ *
+ * Cache Write Result Cache Result Coinbase
+ * Value Value Value Flags Flags
+ */
+ CheckModifyNewCoins(ABSENT, PRUNED, ABSENT, NO_ENTRY , NO_ENTRY , false);
+ CheckModifyNewCoins(ABSENT, PRUNED, PRUNED, NO_ENTRY , DIRTY , true );
+ CheckModifyNewCoins(ABSENT, VALUE3, VALUE3, NO_ENTRY , DIRTY|FRESH, false);
+ CheckModifyNewCoins(ABSENT, VALUE3, VALUE3, NO_ENTRY , DIRTY , true );
+ CheckModifyNewCoins(PRUNED, PRUNED, ABSENT, 0 , NO_ENTRY , false);
+ CheckModifyNewCoins(PRUNED, PRUNED, PRUNED, 0 , DIRTY , true );
+ CheckModifyNewCoins(PRUNED, PRUNED, ABSENT, FRESH , NO_ENTRY , false);
+ CheckModifyNewCoins(PRUNED, PRUNED, ABSENT, FRESH , NO_ENTRY , true );
+ CheckModifyNewCoins(PRUNED, PRUNED, ABSENT, DIRTY , NO_ENTRY , false);
+ CheckModifyNewCoins(PRUNED, PRUNED, PRUNED, DIRTY , DIRTY , true );
+ CheckModifyNewCoins(PRUNED, PRUNED, ABSENT, DIRTY|FRESH, NO_ENTRY , false);
+ CheckModifyNewCoins(PRUNED, PRUNED, ABSENT, DIRTY|FRESH, NO_ENTRY , true );
+ CheckModifyNewCoins(PRUNED, VALUE3, VALUE3, 0 , DIRTY|FRESH, false);
+ CheckModifyNewCoins(PRUNED, VALUE3, VALUE3, 0 , DIRTY , true );
+ CheckModifyNewCoins(PRUNED, VALUE3, VALUE3, FRESH , DIRTY|FRESH, false);
+ CheckModifyNewCoins(PRUNED, VALUE3, VALUE3, FRESH , DIRTY|FRESH, true );
+ CheckModifyNewCoins(PRUNED, VALUE3, VALUE3, DIRTY , DIRTY|FRESH, false);
+ CheckModifyNewCoins(PRUNED, VALUE3, VALUE3, DIRTY , DIRTY , true );
+ CheckModifyNewCoins(PRUNED, VALUE3, VALUE3, DIRTY|FRESH, DIRTY|FRESH, false);
+ CheckModifyNewCoins(PRUNED, VALUE3, VALUE3, DIRTY|FRESH, DIRTY|FRESH, true );
+ CheckModifyNewCoins(VALUE2, PRUNED, ABSENT, 0 , NO_ENTRY , false);
+ CheckModifyNewCoins(VALUE2, PRUNED, PRUNED, 0 , DIRTY , true );
+ CheckModifyNewCoins(VALUE2, PRUNED, ABSENT, FRESH , NO_ENTRY , false);
+ CheckModifyNewCoins(VALUE2, PRUNED, ABSENT, FRESH , NO_ENTRY , true );
+ CheckModifyNewCoins(VALUE2, PRUNED, ABSENT, DIRTY , NO_ENTRY , false);
+ CheckModifyNewCoins(VALUE2, PRUNED, PRUNED, DIRTY , DIRTY , true );
+ CheckModifyNewCoins(VALUE2, PRUNED, ABSENT, DIRTY|FRESH, NO_ENTRY , false);
+ CheckModifyNewCoins(VALUE2, PRUNED, ABSENT, DIRTY|FRESH, NO_ENTRY , true );
+ CheckModifyNewCoins(VALUE2, VALUE3, VALUE3, 0 , DIRTY|FRESH, false);
+ CheckModifyNewCoins(VALUE2, VALUE3, VALUE3, 0 , DIRTY , true );
+ CheckModifyNewCoins(VALUE2, VALUE3, VALUE3, FRESH , DIRTY|FRESH, false);
+ CheckModifyNewCoins(VALUE2, VALUE3, VALUE3, FRESH , DIRTY|FRESH, true );
+ CheckModifyNewCoins(VALUE2, VALUE3, VALUE3, DIRTY , DIRTY|FRESH, false);
+ CheckModifyNewCoins(VALUE2, VALUE3, VALUE3, DIRTY , DIRTY , true );
+ CheckModifyNewCoins(VALUE2, VALUE3, VALUE3, DIRTY|FRESH, DIRTY|FRESH, false);
+ CheckModifyNewCoins(VALUE2, VALUE3, VALUE3, DIRTY|FRESH, DIRTY|FRESH, true );
+}
+
+void CheckWriteCoins(CAmount parent_value, CAmount child_value, CAmount expected_value, char parent_flags, char child_flags, char expected_flags)
+{
+ SingleEntryCacheTest test(ABSENT, parent_value, parent_flags);
+ WriteCoinsViewEntry(test.cache, child_value, child_flags);
+ test.cache.SelfTest();
+
+ CAmount result_value;
+ char result_flags;
+ GetCoinsMapEntry(test.cache.map(), result_value, result_flags);
+ BOOST_CHECK_EQUAL(result_value, expected_value);
+ BOOST_CHECK_EQUAL(result_flags, expected_flags);
+}
+
+BOOST_AUTO_TEST_CASE(ccoins_write)
+{
+ /* Check BatchWrite behavior, flushing one entry from a child cache to a
+ * parent cache, and checking the resulting entry in the parent cache
+ * after the write.
+ *
+ * Parent Child Result Parent Child Result
+ * Value Value Value Flags Flags Flags
+ */
+ CheckWriteCoins(ABSENT, ABSENT, ABSENT, NO_ENTRY , NO_ENTRY , NO_ENTRY );
+ CheckWriteCoins(ABSENT, PRUNED, PRUNED, NO_ENTRY , DIRTY , DIRTY );
+ CheckWriteCoins(ABSENT, PRUNED, ABSENT, NO_ENTRY , DIRTY|FRESH, NO_ENTRY );
+ CheckWriteCoins(ABSENT, VALUE2, VALUE2, NO_ENTRY , DIRTY , DIRTY );
+ CheckWriteCoins(ABSENT, VALUE2, VALUE2, NO_ENTRY , DIRTY|FRESH, DIRTY|FRESH);
+ CheckWriteCoins(PRUNED, ABSENT, PRUNED, 0 , NO_ENTRY , 0 );
+ CheckWriteCoins(PRUNED, ABSENT, PRUNED, FRESH , NO_ENTRY , FRESH );
+ CheckWriteCoins(PRUNED, ABSENT, PRUNED, DIRTY , NO_ENTRY , DIRTY );
+ CheckWriteCoins(PRUNED, ABSENT, PRUNED, DIRTY|FRESH, NO_ENTRY , DIRTY|FRESH);
+ CheckWriteCoins(PRUNED, PRUNED, PRUNED, 0 , DIRTY , DIRTY );
+ CheckWriteCoins(PRUNED, PRUNED, PRUNED, 0 , DIRTY|FRESH, DIRTY );
+ CheckWriteCoins(PRUNED, PRUNED, ABSENT, FRESH , DIRTY , NO_ENTRY );
+ CheckWriteCoins(PRUNED, PRUNED, ABSENT, FRESH , DIRTY|FRESH, NO_ENTRY );
+ CheckWriteCoins(PRUNED, PRUNED, PRUNED, DIRTY , DIRTY , DIRTY );
+ CheckWriteCoins(PRUNED, PRUNED, PRUNED, DIRTY , DIRTY|FRESH, DIRTY );
+ CheckWriteCoins(PRUNED, PRUNED, ABSENT, DIRTY|FRESH, DIRTY , NO_ENTRY );
+ CheckWriteCoins(PRUNED, PRUNED, ABSENT, DIRTY|FRESH, DIRTY|FRESH, NO_ENTRY );
+ CheckWriteCoins(PRUNED, VALUE2, VALUE2, 0 , DIRTY , DIRTY );
+ CheckWriteCoins(PRUNED, VALUE2, VALUE2, 0 , DIRTY|FRESH, DIRTY );
+ CheckWriteCoins(PRUNED, VALUE2, VALUE2, FRESH , DIRTY , DIRTY|FRESH);
+ CheckWriteCoins(PRUNED, VALUE2, VALUE2, FRESH , DIRTY|FRESH, DIRTY|FRESH);
+ CheckWriteCoins(PRUNED, VALUE2, VALUE2, DIRTY , DIRTY , DIRTY );
+ CheckWriteCoins(PRUNED, VALUE2, VALUE2, DIRTY , DIRTY|FRESH, DIRTY );
+ CheckWriteCoins(PRUNED, VALUE2, VALUE2, DIRTY|FRESH, DIRTY , DIRTY|FRESH);
+ CheckWriteCoins(PRUNED, VALUE2, VALUE2, DIRTY|FRESH, DIRTY|FRESH, DIRTY|FRESH);
+ CheckWriteCoins(VALUE1, ABSENT, VALUE1, 0 , NO_ENTRY , 0 );
+ CheckWriteCoins(VALUE1, ABSENT, VALUE1, FRESH , NO_ENTRY , FRESH );
+ CheckWriteCoins(VALUE1, ABSENT, VALUE1, DIRTY , NO_ENTRY , DIRTY );
+ CheckWriteCoins(VALUE1, ABSENT, VALUE1, DIRTY|FRESH, NO_ENTRY , DIRTY|FRESH);
+ CheckWriteCoins(VALUE1, PRUNED, PRUNED, 0 , DIRTY , DIRTY );
+ CheckWriteCoins(VALUE1, PRUNED, PRUNED, 0 , DIRTY|FRESH, DIRTY );
+ CheckWriteCoins(VALUE1, PRUNED, ABSENT, FRESH , DIRTY , NO_ENTRY );
+ CheckWriteCoins(VALUE1, PRUNED, ABSENT, FRESH , DIRTY|FRESH, NO_ENTRY );
+ CheckWriteCoins(VALUE1, PRUNED, PRUNED, DIRTY , DIRTY , DIRTY );
+ CheckWriteCoins(VALUE1, PRUNED, PRUNED, DIRTY , DIRTY|FRESH, DIRTY );
+ CheckWriteCoins(VALUE1, PRUNED, ABSENT, DIRTY|FRESH, DIRTY , NO_ENTRY );
+ CheckWriteCoins(VALUE1, PRUNED, ABSENT, DIRTY|FRESH, DIRTY|FRESH, NO_ENTRY );
+ CheckWriteCoins(VALUE1, VALUE2, VALUE2, 0 , DIRTY , DIRTY );
+ CheckWriteCoins(VALUE1, VALUE2, VALUE2, 0 , DIRTY|FRESH, DIRTY );
+ CheckWriteCoins(VALUE1, VALUE2, VALUE2, FRESH , DIRTY , DIRTY|FRESH);
+ CheckWriteCoins(VALUE1, VALUE2, VALUE2, FRESH , DIRTY|FRESH, DIRTY|FRESH);
+ CheckWriteCoins(VALUE1, VALUE2, VALUE2, DIRTY , DIRTY , DIRTY );
+ CheckWriteCoins(VALUE1, VALUE2, VALUE2, DIRTY , DIRTY|FRESH, DIRTY );
+ CheckWriteCoins(VALUE1, VALUE2, VALUE2, DIRTY|FRESH, DIRTY , DIRTY|FRESH);
+ CheckWriteCoins(VALUE1, VALUE2, VALUE2, DIRTY|FRESH, DIRTY|FRESH, DIRTY|FRESH);
+
+ // The checks above omit cases where the child flags are not DIRTY, since
+ // they would be too repetitive (the parent cache is never updated in these
+ // cases). The loop below covers these cases and makes sure the parent cache
+ // is always left unchanged.
+ for (CAmount parent_value : {ABSENT, PRUNED, VALUE1})
+ for (CAmount child_value : {ABSENT, PRUNED, VALUE2})
+ for (char parent_flags : parent_value == ABSENT ? ABSENT_FLAGS : FLAGS)
+ for (char child_flags : child_value == ABSENT ? ABSENT_FLAGS : CLEAN_FLAGS)
+ CheckWriteCoins(parent_value, child_value, parent_value, parent_flags, child_flags, parent_flags);
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/cuckoocache_tests.cpp b/src/test/cuckoocache_tests.cpp
new file mode 100644
index 0000000000..1bc50d5ea9
--- /dev/null
+++ b/src/test/cuckoocache_tests.cpp
@@ -0,0 +1,394 @@
+// Copyright (c) 2012-2016 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <boost/test/unit_test.hpp>
+#include "cuckoocache.h"
+#include "test/test_bitcoin.h"
+#include "random.h"
+#include <thread>
+#include <boost/thread.hpp>
+
+
+/** Test Suite for CuckooCache
+ *
+ * 1) All tests should have a deterministic result (using insecure rand
+ * with deterministic seeds)
+ * 2) Some test methods are templated to allow for easier testing
+ * against new versions / comparing
+ * 3) Results should be treated as a regression test, ie, did the behavior
+ * change significantly from what was expected. This can be OK, depending on
+ * the nature of the change, but requires updating the tests to reflect the new
+ * expected behavior. For example improving the hit rate may cause some tests
+ * using BOOST_CHECK_CLOSE to fail.
+ *
+ */
+FastRandomContext insecure_rand(true);
+
+BOOST_AUTO_TEST_SUITE(cuckoocache_tests);
+
+
+/** insecure_GetRandHash fills in a uint256 from insecure_rand
+ */
+void insecure_GetRandHash(uint256& t)
+{
+ uint32_t* ptr = (uint32_t*)t.begin();
+ for (uint8_t j = 0; j < 8; ++j)
+ *(ptr++) = insecure_rand.rand32();
+}
+
+/** Definition copied from /src/script/sigcache.cpp
+ */
+class uint256Hasher
+{
+public:
+ template <uint8_t hash_select>
+ uint32_t operator()(const uint256& key) const
+ {
+ static_assert(hash_select <8, "SignatureCacheHasher only has 8 hashes available.");
+ uint32_t u;
+ std::memcpy(&u, key.begin() + 4 * hash_select, 4);
+ return u;
+ }
+};
+
+
+/* Test that no values not inserted into the cache are read out of it.
+ *
+ * There are no repeats in the first 200000 insecure_GetRandHash calls
+ */
+BOOST_AUTO_TEST_CASE(test_cuckoocache_no_fakes)
+{
+ insecure_rand = FastRandomContext(true);
+ CuckooCache::cache<uint256, uint256Hasher> cc{};
+ cc.setup_bytes(32 << 20);
+ uint256 v;
+ for (int x = 0; x < 100000; ++x) {
+ insecure_GetRandHash(v);
+ cc.insert(v);
+ }
+ for (int x = 0; x < 100000; ++x) {
+ insecure_GetRandHash(v);
+ BOOST_CHECK(!cc.contains(v, false));
+ }
+};
+
+/** This helper returns the hit rate when megabytes*load worth of entries are
+ * inserted into a megabytes sized cache
+ */
+template <typename Cache>
+double test_cache(size_t megabytes, double load)
+{
+ insecure_rand = FastRandomContext(true);
+ std::vector<uint256> hashes;
+ Cache set{};
+ size_t bytes = megabytes * (1 << 20);
+ set.setup_bytes(bytes);
+ uint32_t n_insert = static_cast<uint32_t>(load * (bytes / sizeof(uint256)));
+ hashes.resize(n_insert);
+ for (uint32_t i = 0; i < n_insert; ++i) {
+ uint32_t* ptr = (uint32_t*)hashes[i].begin();
+ for (uint8_t j = 0; j < 8; ++j)
+ *(ptr++) = insecure_rand.rand32();
+ }
+ /** We make a copy of the hashes because future optimizations of the
+ * cuckoocache may overwrite the inserted element, so the test is
+ * "future proofed".
+ */
+ std::vector<uint256> hashes_insert_copy = hashes;
+ /** Do the insert */
+ for (uint256& h : hashes_insert_copy)
+ set.insert(h);
+ /** Count the hits */
+ uint32_t count = 0;
+ for (uint256& h : hashes)
+ count += set.contains(h, false);
+ double hit_rate = ((double)count) / ((double)n_insert);
+ return hit_rate;
+}
+
+/** The normalized hit rate for a given load.
+ *
+ * The semantics are a little confusing, so please see the below
+ * explanation.
+ *
+ * Examples:
+ *
+ * 1) at load 0.5, we expect a perfect hit rate, so we multiply by
+ * 1.0
+ * 2) at load 2.0, we expect to see half the entries, so a perfect hit rate
+ * would be 0.5. Therefore, if we see a hit rate of 0.4, 0.4*2.0 = 0.8 is the
+ * normalized hit rate.
+ *
+ * This is basically the right semantics, but has a bit of a glitch depending on
+ * how you measure around load 1.0 as after load 1.0 your normalized hit rate
+ * becomes effectively perfect, ignoring freshness.
+ */
+double normalize_hit_rate(double hits, double load)
+{
+ return hits * std::max(load, 1.0);
+}
+
+/** Check the hit rate on loads ranging from 0.1 to 2.0 */
+BOOST_AUTO_TEST_CASE(cuckoocache_hit_rate_ok)
+{
+ /** Arbitrarily selected Hit Rate threshold that happens to work for this test
+ * as a lower bound on performance.
+ */
+ double HitRateThresh = 0.98;
+ size_t megabytes = 32;
+ for (double load = 0.1; load < 2; load *= 2) {
+ double hits = test_cache<CuckooCache::cache<uint256, uint256Hasher>>(megabytes, load);
+ BOOST_CHECK(normalize_hit_rate(hits, load) > HitRateThresh);
+ }
+}
+
+
+/** This helper checks that erased elements are preferentially inserted onto and
+ * that the hit rate of "fresher" keys is reasonable*/
+template <typename Cache>
+void test_cache_erase(size_t megabytes)
+{
+ double load = 1;
+ insecure_rand = FastRandomContext(true);
+ std::vector<uint256> hashes;
+ Cache set{};
+ size_t bytes = megabytes * (1 << 20);
+ set.setup_bytes(bytes);
+ uint32_t n_insert = static_cast<uint32_t>(load * (bytes / sizeof(uint256)));
+ hashes.resize(n_insert);
+ for (uint32_t i = 0; i < n_insert; ++i) {
+ uint32_t* ptr = (uint32_t*)hashes[i].begin();
+ for (uint8_t j = 0; j < 8; ++j)
+ *(ptr++) = insecure_rand.rand32();
+ }
+ /** We make a copy of the hashes because future optimizations of the
+ * cuckoocache may overwrite the inserted element, so the test is
+ * "future proofed".
+ */
+ std::vector<uint256> hashes_insert_copy = hashes;
+
+ /** Insert the first half */
+ for (uint32_t i = 0; i < (n_insert / 2); ++i)
+ set.insert(hashes_insert_copy[i]);
+ /** Erase the first quarter */
+ for (uint32_t i = 0; i < (n_insert / 4); ++i)
+ set.contains(hashes[i], true);
+ /** Insert the second half */
+ for (uint32_t i = (n_insert / 2); i < n_insert; ++i)
+ set.insert(hashes_insert_copy[i]);
+
+ /** elements that we marked erased but that are still there */
+ size_t count_erased_but_contained = 0;
+ /** elements that we did not erase but are older */
+ size_t count_stale = 0;
+ /** elements that were most recently inserted */
+ size_t count_fresh = 0;
+
+ for (uint32_t i = 0; i < (n_insert / 4); ++i)
+ count_erased_but_contained += set.contains(hashes[i], false);
+ for (uint32_t i = (n_insert / 4); i < (n_insert / 2); ++i)
+ count_stale += set.contains(hashes[i], false);
+ for (uint32_t i = (n_insert / 2); i < n_insert; ++i)
+ count_fresh += set.contains(hashes[i], false);
+
+ double hit_rate_erased_but_contained = double(count_erased_but_contained) / (double(n_insert) / 4.0);
+ double hit_rate_stale = double(count_stale) / (double(n_insert) / 4.0);
+ double hit_rate_fresh = double(count_fresh) / (double(n_insert) / 2.0);
+
+ // Check that our hit_rate_fresh is perfect
+ BOOST_CHECK_EQUAL(hit_rate_fresh, 1.0);
+ // Check that we have a more than 2x better hit rate on stale elements than
+ // erased elements.
+ BOOST_CHECK(hit_rate_stale > 2 * hit_rate_erased_but_contained);
+}
+
+BOOST_AUTO_TEST_CASE(cuckoocache_erase_ok)
+{
+ size_t megabytes = 32;
+ test_cache_erase<CuckooCache::cache<uint256, uint256Hasher>>(megabytes);
+}
+
+template <typename Cache>
+void test_cache_erase_parallel(size_t megabytes)
+{
+ double load = 1;
+ insecure_rand = FastRandomContext(true);
+ std::vector<uint256> hashes;
+ Cache set{};
+ size_t bytes = megabytes * (1 << 20);
+ set.setup_bytes(bytes);
+ uint32_t n_insert = static_cast<uint32_t>(load * (bytes / sizeof(uint256)));
+ hashes.resize(n_insert);
+ for (uint32_t i = 0; i < n_insert; ++i) {
+ uint32_t* ptr = (uint32_t*)hashes[i].begin();
+ for (uint8_t j = 0; j < 8; ++j)
+ *(ptr++) = insecure_rand.rand32();
+ }
+ /** We make a copy of the hashes because future optimizations of the
+ * cuckoocache may overwrite the inserted element, so the test is
+ * "future proofed".
+ */
+ std::vector<uint256> hashes_insert_copy = hashes;
+ boost::shared_mutex mtx;
+
+ {
+ /** Grab lock to make sure we release inserts */
+ boost::unique_lock<boost::shared_mutex> l(mtx);
+ /** Insert the first half */
+ for (uint32_t i = 0; i < (n_insert / 2); ++i)
+ set.insert(hashes_insert_copy[i]);
+ }
+
+ /** Spin up 3 threads to run contains with erase.
+ */
+ std::vector<std::thread> threads;
+ /** Erase the first quarter */
+ for (uint32_t x = 0; x < 3; ++x)
+ /** Each thread is emplaced with x copy-by-value
+ */
+ threads.emplace_back([&, x] {
+ boost::shared_lock<boost::shared_mutex> l(mtx);
+ size_t ntodo = (n_insert/4)/3;
+ size_t start = ntodo*x;
+ size_t end = ntodo*(x+1);
+ for (uint32_t i = start; i < end; ++i)
+ set.contains(hashes[i], true);
+ });
+
+ /** Wait for all threads to finish
+ */
+ for (std::thread& t : threads)
+ t.join();
+ /** Grab lock to make sure we observe erases */
+ boost::unique_lock<boost::shared_mutex> l(mtx);
+ /** Insert the second half */
+ for (uint32_t i = (n_insert / 2); i < n_insert; ++i)
+ set.insert(hashes_insert_copy[i]);
+
+ /** elements that we marked erased but that are still there */
+ size_t count_erased_but_contained = 0;
+ /** elements that we did not erase but are older */
+ size_t count_stale = 0;
+ /** elements that were most recently inserted */
+ size_t count_fresh = 0;
+
+ for (uint32_t i = 0; i < (n_insert / 4); ++i)
+ count_erased_but_contained += set.contains(hashes[i], false);
+ for (uint32_t i = (n_insert / 4); i < (n_insert / 2); ++i)
+ count_stale += set.contains(hashes[i], false);
+ for (uint32_t i = (n_insert / 2); i < n_insert; ++i)
+ count_fresh += set.contains(hashes[i], false);
+
+ double hit_rate_erased_but_contained = double(count_erased_but_contained) / (double(n_insert) / 4.0);
+ double hit_rate_stale = double(count_stale) / (double(n_insert) / 4.0);
+ double hit_rate_fresh = double(count_fresh) / (double(n_insert) / 2.0);
+
+ // Check that our hit_rate_fresh is perfect
+ BOOST_CHECK_EQUAL(hit_rate_fresh, 1.0);
+ // Check that we have a more than 2x better hit rate on stale elements than
+ // erased elements.
+ BOOST_CHECK(hit_rate_stale > 2 * hit_rate_erased_but_contained);
+}
+BOOST_AUTO_TEST_CASE(cuckoocache_erase_parallel_ok)
+{
+ size_t megabytes = 32;
+ test_cache_erase_parallel<CuckooCache::cache<uint256, uint256Hasher>>(megabytes);
+}
+
+
+template <typename Cache>
+void test_cache_generations()
+{
+ // This test checks that for a simulation of network activity, the fresh hit
+ // rate is never below 99%, and the number of times that it is worse than
+ // 99.9% are less than 1% of the time.
+ double min_hit_rate = 0.99;
+ double tight_hit_rate = 0.999;
+ double max_rate_less_than_tight_hit_rate = 0.01;
+ // A cache that meets this specification is therefore shown to have a hit
+ // rate of at least tight_hit_rate * (1 - max_rate_less_than_tight_hit_rate) +
+ // min_hit_rate*max_rate_less_than_tight_hit_rate = 0.999*99%+0.99*1% == 99.89%
+ // hit rate with low variance.
+
+ // We use deterministic values, but this test has also passed on many
+ // iterations with non-deterministic values, so it isn't "overfit" to the
+ // specific entropy in FastRandomContext(true) and implementation of the
+ // cache.
+ insecure_rand = FastRandomContext(true);
+
+ // block_activity models a chunk of network activity. n_insert elements are
+ // adde to the cache. The first and last n/4 are stored for removal later
+ // and the middle n/2 are not stored. This models a network which uses half
+ // the signatures of recently (since the last block) added transactions
+ // immediately and never uses the other half.
+ struct block_activity {
+ std::vector<uint256> reads;
+ block_activity(uint32_t n_insert, Cache& c) : reads()
+ {
+ std::vector<uint256> inserts;
+ inserts.resize(n_insert);
+ reads.reserve(n_insert / 2);
+ for (uint32_t i = 0; i < n_insert; ++i) {
+ uint32_t* ptr = (uint32_t*)inserts[i].begin();
+ for (uint8_t j = 0; j < 8; ++j)
+ *(ptr++) = insecure_rand.rand32();
+ }
+ for (uint32_t i = 0; i < n_insert / 4; ++i)
+ reads.push_back(inserts[i]);
+ for (uint32_t i = n_insert - (n_insert / 4); i < n_insert; ++i)
+ reads.push_back(inserts[i]);
+ for (auto h : inserts)
+ c.insert(h);
+ }
+ };
+
+ const uint32_t BLOCK_SIZE = 10000;
+ // We expect window size 60 to perform reasonably given that each epoch
+ // stores 45% of the cache size (~472k).
+ const uint32_t WINDOW_SIZE = 60;
+ const uint32_t POP_AMOUNT = (BLOCK_SIZE / WINDOW_SIZE) / 2;
+ const double load = 10;
+ const size_t megabytes = 32;
+ const size_t bytes = megabytes * (1 << 20);
+ const uint32_t n_insert = static_cast<uint32_t>(load * (bytes / sizeof(uint256)));
+
+ std::vector<block_activity> hashes;
+ Cache set{};
+ set.setup_bytes(bytes);
+ hashes.reserve(n_insert / BLOCK_SIZE);
+ std::deque<block_activity> last_few;
+ uint32_t out_of_tight_tolerance = 0;
+ uint32_t total = n_insert / BLOCK_SIZE;
+ // we use the deque last_few to model a sliding window of blocks. at each
+ // step, each of the last WINDOW_SIZE block_activities checks the cache for
+ // POP_AMOUNT of the hashes that they inserted, and marks these erased.
+ for (uint32_t i = 0; i < total; ++i) {
+ if (last_few.size() == WINDOW_SIZE)
+ last_few.pop_front();
+ last_few.emplace_back(BLOCK_SIZE, set);
+ uint32_t count = 0;
+ for (auto& act : last_few)
+ for (uint32_t k = 0; k < POP_AMOUNT; ++k) {
+ count += set.contains(act.reads.back(), true);
+ act.reads.pop_back();
+ }
+ // We use last_few.size() rather than WINDOW_SIZE for the correct
+ // behavior on the first WINDOW_SIZE iterations where the deque is not
+ // full yet.
+ double hit = (double(count)) / (last_few.size() * POP_AMOUNT);
+ // Loose Check that hit rate is above min_hit_rate
+ BOOST_CHECK(hit > min_hit_rate);
+ // Tighter check, count number of times we are less than tight_hit_rate
+ // (and implicityly, greater than min_hit_rate)
+ out_of_tight_tolerance += hit < tight_hit_rate;
+ }
+ // Check that being out of tolerance happens less than
+ // max_rate_less_than_tight_hit_rate of the time
+ BOOST_CHECK(double(out_of_tight_tolerance) / double(total) < max_rate_less_than_tight_hit_rate);
+}
+BOOST_AUTO_TEST_CASE(cuckoocache_generations)
+{
+ test_cache_generations<CuckooCache::cache<uint256, uint256Hasher>>();
+}
+
+BOOST_AUTO_TEST_SUITE_END();
diff --git a/src/test/data/bitcoin-util-test.json b/src/test/data/bitcoin-util-test.json
index 98be75919b..ab1a1385a3 100644
--- a/src/test/data/bitcoin-util-test.json
+++ b/src/test/data/bitcoin-util-test.json
@@ -1,24 +1,24 @@
[
{ "exec": "./bitcoin-tx",
- "args": ["-create"],
- "output_cmp": "blanktx.hex",
- "description": "Creates a blank transaction"
+ "args": ["-create", "nversion=1"],
+ "output_cmp": "blanktxv1.hex",
+ "description": "Creates a blank v1 transaction"
},
{ "exec": "./bitcoin-tx",
"args": ["-json","-create"],
- "output_cmp": "blanktx.json",
+ "output_cmp": "blanktxv2.json",
"description": "Creates a blank transaction (output in json)"
},
{ "exec": "./bitcoin-tx",
"args": ["-"],
- "input": "blanktx.hex",
- "output_cmp": "blanktx.hex",
+ "input": "blanktxv2.hex",
+ "output_cmp": "blanktxv2.hex",
"description": "Creates a blank transaction when nothing is piped into bitcoin-tx"
},
{ "exec": "./bitcoin-tx",
"args": ["-json","-"],
- "input": "blanktx.hex",
- "output_cmp": "blanktx.json",
+ "input": "blanktxv2.hex",
+ "output_cmp": "blanktxv2.json",
"description": "Creates a blank transaction when nothing is piped into bitcoin-tx (output in json)"
},
{ "exec": "./bitcoin-tx",
@@ -103,37 +103,48 @@
"description": "Creates a new transaction with a single empty output script (output in json)"
},
{ "exec": "./bitcoin-tx",
- "args": ["01000000000100000000000000000000000000"],
+ "args": ["02000000000100000000000000000000000000"],
"output_cmp": "txcreate2.hex",
"description": "Parses a transation with no inputs and a single output script"
},
{ "exec": "./bitcoin-tx",
- "args": ["-json", "01000000000100000000000000000000000000"],
+ "args": ["-json", "02000000000100000000000000000000000000"],
"output_cmp": "txcreate2.json",
"description": "Parses a transation with no inputs and a single output script (output in json)"
},
{ "exec": "./bitcoin-tx",
"args":
- ["-create",
+ ["-create", "nversion=1",
"in=4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485:0",
"set=privatekeys:[\"5HpHagT65TZzG1PH3CSu63k8DbpvD8s5ip4nEB3kEsreAnchuDf\"]",
"set=prevtxs:[{\"txid\":\"4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485\",\"vout\":0,\"scriptPubKey\":\"76a91491b24bf9f5288532960ac687abb035127b1d28a588ac\"}]",
"sign=ALL",
"outaddr=0.001:193P6LtvS4nCnkDvM9uXn1gsSRqh4aDAz7"],
- "output_cmp": "txcreatesign.hex",
- "description": "Creates a new transaction with a single input and a single output, and then signs the transaction"
+ "output_cmp": "txcreatesignv1.hex",
+ "description": "Creates a new v1 transaction with a single input and a single output, and then signs the transaction"
},
{ "exec": "./bitcoin-tx",
"args":
["-json",
- "-create",
+ "-create", "nversion=1",
"in=4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485:0",
"set=privatekeys:[\"5HpHagT65TZzG1PH3CSu63k8DbpvD8s5ip4nEB3kEsreAnchuDf\"]",
"set=prevtxs:[{\"txid\":\"4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485\",\"vout\":0,\"scriptPubKey\":\"76a91491b24bf9f5288532960ac687abb035127b1d28a588ac\"}]",
"sign=ALL",
"outaddr=0.001:193P6LtvS4nCnkDvM9uXn1gsSRqh4aDAz7"],
- "output_cmp": "txcreatesign.json",
- "description": "Creates a new transaction with a single input and a single output, and then signs the transaction (output in json)"
+ "output_cmp": "txcreatesignv1.json",
+ "description": "Creates a new v1 transaction with a single input and a single output, and then signs the transaction (output in json)"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-create",
+ "in=4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485:0",
+ "set=privatekeys:[\"5HpHagT65TZzG1PH3CSu63k8DbpvD8s5ip4nEB3kEsreAnchuDf\"]",
+ "set=prevtxs:[{\"txid\":\"4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485\",\"vout\":0,\"scriptPubKey\":\"76a91491b24bf9f5288532960ac687abb035127b1d28a588ac\"}]",
+ "sign=ALL",
+ "outaddr=0.001:193P6LtvS4nCnkDvM9uXn1gsSRqh4aDAz7"],
+ "output_cmp": "txcreatesignv2.hex",
+ "description": "Creates a new transaction with a single input and a single output, and then signs the transaction"
},
{ "exec": "./bitcoin-tx",
"args":
@@ -163,12 +174,12 @@
{ "exec": "./bitcoin-tx",
"args":
["-json",
- "-create",
+ "-create", "nversion=1",
"in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0",
"outaddr=0.18:13tuJJDR2RgArmgfv6JScSdreahzgc4T6o",
"outdata=4:54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e"],
"output_cmp": "txcreatedata1.json",
- "description": "Creates a new transaction with one input, one address output and one data output (output in json)"
+ "description": "Creates a new v1 transaction with one input, one address output and one data output (output in json)"
},
{ "exec": "./bitcoin-tx",
"args":
diff --git a/src/test/data/blanktx.hex b/src/test/data/blanktxv1.hex
index 36b6f00fb6..36b6f00fb6 100644
--- a/src/test/data/blanktx.hex
+++ b/src/test/data/blanktxv1.hex
diff --git a/src/test/data/blanktxv2.hex b/src/test/data/blanktxv2.hex
new file mode 100644
index 0000000000..22d830eda1
--- /dev/null
+++ b/src/test/data/blanktxv2.hex
@@ -0,0 +1 @@
+02000000000000000000
diff --git a/src/test/data/blanktxv2.json b/src/test/data/blanktxv2.json
new file mode 100644
index 0000000000..266919f445
--- /dev/null
+++ b/src/test/data/blanktxv2.json
@@ -0,0 +1,11 @@
+{
+ "txid": "4ebd325a4b394cff8c57e8317ccf5a8d0e2bdf1b8526f8aad6c8e43d8240621a",
+ "hash": "4ebd325a4b394cff8c57e8317ccf5a8d0e2bdf1b8526f8aad6c8e43d8240621a",
+ "version": 2,
+ "locktime": 0,
+ "vin": [
+ ],
+ "vout": [
+ ],
+ "hex": "02000000000000000000"
+}
diff --git a/src/test/data/tx_valid.json b/src/test/data/tx_valid.json
index 27acb2f16a..a3f47fcee2 100644
--- a/src/test/data/tx_valid.json
+++ b/src/test/data/tx_valid.json
@@ -478,7 +478,7 @@
["1b2a9a426ba603ba357ce7773cb5805cb9c7c2b386d100d1fc9263513188e680", 0, "0x00 0x20 0xd9bbfbe56af7c4b7f960a70d7ea107156913d9e5a26b0a71429df5e097ca6537", 16777215]],
"01000000000102e9b542c5176808107ff1df906f46bb1f2583b16112b95ee5380665ba7fcfc0010000000000ffffffff80e68831516392fcd100d186b3c2c7b95c80b53c77e77c35ba03a66b429a2a1b0000000000ffffffff0280969800000000001976a914de4b231626ef508c9a74a8517e6783c0546d6b2888ac80969800000000001976a9146648a8cd4531e1ec47f35916de8e259237294d1e88ac02483045022100f6a10b8604e6dc910194b79ccfc93e1bc0ec7c03453caaa8987f7d6c3413566002206216229ede9b4d6ec2d325be245c5b508ff0339bf1794078e20bfe0babc7ffe683270063ab68210392972e2eb617b2388771abe27235fd5ac44af8e61693261550447a4c3e39da98ac024730440220032521802a76ad7bf74d0e2c218b72cf0cbc867066e2e53db905ba37f130397e02207709e2188ed7f08f4c952d9d13986da504502b8c3be59617e043552f506c46ff83275163ab68210392972e2eb617b2388771abe27235fd5ac44af8e61693261550447a4c3e39da98ac00000000", "P2SH,WITNESS"],
-["BIP143 example: Same as the previous example with input-output paris swapped"],
+["BIP143 example: Same as the previous example with input-output pairs swapped"],
[[["1b2a9a426ba603ba357ce7773cb5805cb9c7c2b386d100d1fc9263513188e680", 0, "0x00 0x20 0xd9bbfbe56af7c4b7f960a70d7ea107156913d9e5a26b0a71429df5e097ca6537", 16777215],
["01c0cf7fba650638e55eb91261b183251fbb466f90dff17f10086817c542b5e9", 0, "0x00 0x20 0xba468eea561b26301e4cf69fa34bde4ad60c81e70f059f045ca9a79931004a4d", 16777215]],
"0100000000010280e68831516392fcd100d186b3c2c7b95c80b53c77e77c35ba03a66b429a2a1b0000000000ffffffffe9b542c5176808107ff1df906f46bb1f2583b16112b95ee5380665ba7fcfc0010000000000ffffffff0280969800000000001976a9146648a8cd4531e1ec47f35916de8e259237294d1e88ac80969800000000001976a914de4b231626ef508c9a74a8517e6783c0546d6b2888ac024730440220032521802a76ad7bf74d0e2c218b72cf0cbc867066e2e53db905ba37f130397e02207709e2188ed7f08f4c952d9d13986da504502b8c3be59617e043552f506c46ff83275163ab68210392972e2eb617b2388771abe27235fd5ac44af8e61693261550447a4c3e39da98ac02483045022100f6a10b8604e6dc910194b79ccfc93e1bc0ec7c03453caaa8987f7d6c3413566002206216229ede9b4d6ec2d325be245c5b508ff0339bf1794078e20bfe0babc7ffe683270063ab68210392972e2eb617b2388771abe27235fd5ac44af8e61693261550447a4c3e39da98ac00000000", "P2SH,WITNESS"],
diff --git a/src/test/data/txcreate1.hex b/src/test/data/txcreate1.hex
index e2981a51c9..9ec6ee3531 100644
--- a/src/test/data/txcreate1.hex
+++ b/src/test/data/txcreate1.hex
@@ -1 +1 @@
-01000000031f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff7cca453133921c50d5025878f7f738d1df891fd359763331935784cf6b9c82bf1200000000fffffffffccd319e04a996c96cfc0bf4c07539aa90bd0b1a700ef72fae535d6504f9a6220100000000ffffffff0280a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac0084d717000000001976a914f2d4db28cad6502226ee484ae24505c2885cb12d88ac00000000
+02000000031f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff7cca453133921c50d5025878f7f738d1df891fd359763331935784cf6b9c82bf1200000000fffffffffccd319e04a996c96cfc0bf4c07539aa90bd0b1a700ef72fae535d6504f9a6220100000000ffffffff0280a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac0084d717000000001976a914f2d4db28cad6502226ee484ae24505c2885cb12d88ac00000000
diff --git a/src/test/data/txcreate1.json b/src/test/data/txcreate1.json
index 567e8026a3..f83e036f33 100644
--- a/src/test/data/txcreate1.json
+++ b/src/test/data/txcreate1.json
@@ -1,7 +1,7 @@
{
- "txid": "f70f0d6c71416ed538e37549f430ab3665fee2437a42f10238c1bd490e782231",
- "hash": "f70f0d6c71416ed538e37549f430ab3665fee2437a42f10238c1bd490e782231",
- "version": 1,
+ "txid": "fe7d174f42dce0cffa7a527e9bc8368956057619ec817648f6138b98f2533e8f",
+ "hash": "fe7d174f42dce0cffa7a527e9bc8368956057619ec817648f6138b98f2533e8f",
+ "version": 2,
"locktime": 0,
"vin": [
{
@@ -60,5 +60,5 @@
}
}
],
- "hex": "01000000031f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff7cca453133921c50d5025878f7f738d1df891fd359763331935784cf6b9c82bf1200000000fffffffffccd319e04a996c96cfc0bf4c07539aa90bd0b1a700ef72fae535d6504f9a6220100000000ffffffff0280a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac0084d717000000001976a914f2d4db28cad6502226ee484ae24505c2885cb12d88ac00000000"
+ "hex": "02000000031f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff7cca453133921c50d5025878f7f738d1df891fd359763331935784cf6b9c82bf1200000000fffffffffccd319e04a996c96cfc0bf4c07539aa90bd0b1a700ef72fae535d6504f9a6220100000000ffffffff0280a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac0084d717000000001976a914f2d4db28cad6502226ee484ae24505c2885cb12d88ac00000000"
}
diff --git a/src/test/data/txcreate2.hex b/src/test/data/txcreate2.hex
index 5243c2d02e..38bb7b1046 100644
--- a/src/test/data/txcreate2.hex
+++ b/src/test/data/txcreate2.hex
@@ -1 +1 @@
-01000000000100000000000000000000000000
+02000000000100000000000000000000000000
diff --git a/src/test/data/txcreate2.json b/src/test/data/txcreate2.json
index a70c1d302a..fb5e177db7 100644
--- a/src/test/data/txcreate2.json
+++ b/src/test/data/txcreate2.json
@@ -1,7 +1,7 @@
{
- "txid": "cf90229625e9eb10f6be8156bf6aa5ec2eca19a42b1e05c11f3029b560a32e13",
- "hash": "cf90229625e9eb10f6be8156bf6aa5ec2eca19a42b1e05c11f3029b560a32e13",
- "version": 1,
+ "txid": "0481afb29931341d0d7861d8a2f6f26456fa042abf54a23e96440ed7946e0715",
+ "hash": "0481afb29931341d0d7861d8a2f6f26456fa042abf54a23e96440ed7946e0715",
+ "version": 2,
"locktime": 0,
"vin": [
],
@@ -16,5 +16,5 @@
}
}
],
- "hex": "01000000000100000000000000000000000000"
+ "hex": "02000000000100000000000000000000000000"
}
diff --git a/src/test/data/txcreatedata1.hex b/src/test/data/txcreatedata1.hex
index eccc7604e6..cefd1a05a6 100644
--- a/src/test/data/txcreatedata1.hex
+++ b/src/test/data/txcreatedata1.hex
@@ -1 +1 @@
-01000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff0280a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac0084d71700000000526a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e00000000
+02000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff0280a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac0084d71700000000526a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e00000000
diff --git a/src/test/data/txcreatedata2.hex b/src/test/data/txcreatedata2.hex
index 3c7644c297..d69cf58ba1 100644
--- a/src/test/data/txcreatedata2.hex
+++ b/src/test/data/txcreatedata2.hex
@@ -1 +1 @@
-01000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff0280a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac0000000000000000526a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e00000000
+02000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff0280a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac0000000000000000526a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e00000000
diff --git a/src/test/data/txcreatedata2.json b/src/test/data/txcreatedata2.json
index 56dfe4a1b0..3c6da40f90 100644
--- a/src/test/data/txcreatedata2.json
+++ b/src/test/data/txcreatedata2.json
@@ -1,7 +1,7 @@
{
- "txid": "4ed17118f5e932ba8c75c461787d171bc02a016d8557cb5bcf34cd416c27bb8b",
- "hash": "4ed17118f5e932ba8c75c461787d171bc02a016d8557cb5bcf34cd416c27bb8b",
- "version": 1,
+ "txid": "c14b007fa3a6c1e7765919c1d14c1cfc2b8642c3a5d3be4b1fa8c4ccfec98bb0",
+ "hash": "c14b007fa3a6c1e7765919c1d14c1cfc2b8642c3a5d3be4b1fa8c4ccfec98bb0",
+ "version": 2,
"locktime": 0,
"vin": [
{
@@ -38,5 +38,5 @@
}
}
],
- "hex": "01000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff0280a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac0000000000000000526a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e00000000"
+ "hex": "02000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000ffffffff0280a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac0000000000000000526a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e00000000"
}
diff --git a/src/test/data/txcreatedata_seq0.hex b/src/test/data/txcreatedata_seq0.hex
index db02b5e4a4..54b89d2381 100644
--- a/src/test/data/txcreatedata_seq0.hex
+++ b/src/test/data/txcreatedata_seq0.hex
@@ -1 +1 @@
-01000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000fdffffff0180a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac00000000
+02000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000fdffffff0180a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac00000000
diff --git a/src/test/data/txcreatedata_seq0.json b/src/test/data/txcreatedata_seq0.json
index 9bc0ed4593..d272a4c447 100644
--- a/src/test/data/txcreatedata_seq0.json
+++ b/src/test/data/txcreatedata_seq0.json
@@ -1,7 +1,7 @@
{
- "txid": "71603ccb1cd76d73d76eb6cfd5f0b9df6d65d90d76860ee52cb461c4be7032e8",
- "hash": "71603ccb1cd76d73d76eb6cfd5f0b9df6d65d90d76860ee52cb461c4be7032e8",
- "version": 1,
+ "txid": "8df6ed527472542dd5e137c242a7c5a9f337ac34f7b257ae4af886aeaebb51b0",
+ "hash": "8df6ed527472542dd5e137c242a7c5a9f337ac34f7b257ae4af886aeaebb51b0",
+ "version": 2,
"locktime": 0,
"vin": [
{
@@ -29,5 +29,5 @@
}
}
],
- "hex": "01000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000fdffffff0180a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac00000000"
+ "hex": "02000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000fdffffff0180a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac00000000"
}
diff --git a/src/test/data/txcreatesign.hex b/src/test/data/txcreatesignv1.hex
index a46fcc88cb..a46fcc88cb 100644
--- a/src/test/data/txcreatesign.hex
+++ b/src/test/data/txcreatesignv1.hex
diff --git a/src/test/data/txcreatesign.json b/src/test/data/txcreatesignv1.json
index ff39e71b40..ff39e71b40 100644
--- a/src/test/data/txcreatesign.json
+++ b/src/test/data/txcreatesignv1.json
diff --git a/src/test/data/txcreatesignv2.hex b/src/test/data/txcreatesignv2.hex
new file mode 100644
index 0000000000..ee425cd98c
--- /dev/null
+++ b/src/test/data/txcreatesignv2.hex
@@ -0,0 +1 @@
+02000000018594c5bdcaec8f06b78b596f31cd292a294fd031e24eec716f43dac91ea7494d000000008a473044022079c7aa014177a2e973caf6df7c7b8f15399083b91eba370ea1e19c4caed9181e02205f8f8763505ce8e6cbdd2cd28fab3fd407a75003e7d0dc04e6bebb0a3c89e7cb01410479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8ffffffff01a0860100000000001976a9145834479edbbe0539b31ffd3a8f8ebadc2165ed0188ac00000000
diff --git a/src/test/hash_tests.cpp b/src/test/hash_tests.cpp
index fa9624f13d..8be9c580c4 100644
--- a/src/test/hash_tests.cpp
+++ b/src/test/hash_tests.cpp
@@ -124,7 +124,11 @@ BOOST_AUTO_TEST_CASE(siphash)
}
CHashWriter ss(SER_DISK, CLIENT_VERSION);
- ss << CTransaction();
+ CMutableTransaction tx;
+ // Note these tests were originally written with tx.nVersion=1
+ // and the test would be affected by default tx version bumps if not fixed.
+ tx.nVersion = 1;
+ ss << tx;
BOOST_CHECK_EQUAL(SipHashUint256(1, 2, ss.GetHash()), 0x79751e980c2a0a35ULL);
}
diff --git a/src/test/mempool_tests.cpp b/src/test/mempool_tests.cpp
index 1faf8b6aeb..84871600b2 100644
--- a/src/test/mempool_tests.cpp
+++ b/src/test/mempool_tests.cpp
@@ -55,17 +55,17 @@ BOOST_AUTO_TEST_CASE(MempoolRemoveTest)
CTxMemPool testPool(CFeeRate(0));
- std::vector<CTransactionRef> removed;
// Nothing in pool, remove should do nothing:
- testPool.removeRecursive(txParent, &removed);
- BOOST_CHECK_EQUAL(removed.size(), 0);
+ unsigned int poolSize = testPool.size();
+ testPool.removeRecursive(txParent);
+ BOOST_CHECK_EQUAL(testPool.size(), poolSize);
// Just the parent:
testPool.addUnchecked(txParent.GetHash(), entry.FromTx(txParent));
- testPool.removeRecursive(txParent, &removed);
- BOOST_CHECK_EQUAL(removed.size(), 1);
- removed.clear();
+ poolSize = testPool.size();
+ testPool.removeRecursive(txParent);
+ BOOST_CHECK_EQUAL(testPool.size(), poolSize - 1);
// Parent, children, grandchildren:
testPool.addUnchecked(txParent.GetHash(), entry.FromTx(txParent));
@@ -75,19 +75,21 @@ BOOST_AUTO_TEST_CASE(MempoolRemoveTest)
testPool.addUnchecked(txGrandChild[i].GetHash(), entry.FromTx(txGrandChild[i]));
}
// Remove Child[0], GrandChild[0] should be removed:
- testPool.removeRecursive(txChild[0], &removed);
- BOOST_CHECK_EQUAL(removed.size(), 2);
- removed.clear();
+ poolSize = testPool.size();
+ testPool.removeRecursive(txChild[0]);
+ BOOST_CHECK_EQUAL(testPool.size(), poolSize - 2);
// ... make sure grandchild and child are gone:
- testPool.removeRecursive(txGrandChild[0], &removed);
- BOOST_CHECK_EQUAL(removed.size(), 0);
- testPool.removeRecursive(txChild[0], &removed);
- BOOST_CHECK_EQUAL(removed.size(), 0);
+ poolSize = testPool.size();
+ testPool.removeRecursive(txGrandChild[0]);
+ BOOST_CHECK_EQUAL(testPool.size(), poolSize);
+ poolSize = testPool.size();
+ testPool.removeRecursive(txChild[0]);
+ BOOST_CHECK_EQUAL(testPool.size(), poolSize);
// Remove parent, all children/grandchildren should go:
- testPool.removeRecursive(txParent, &removed);
- BOOST_CHECK_EQUAL(removed.size(), 5);
+ poolSize = testPool.size();
+ testPool.removeRecursive(txParent);
+ BOOST_CHECK_EQUAL(testPool.size(), poolSize - 5);
BOOST_CHECK_EQUAL(testPool.size(), 0);
- removed.clear();
// Add children and grandchildren, but NOT the parent (simulate the parent being in a block)
for (int i = 0; i < 3; i++)
@@ -97,10 +99,10 @@ BOOST_AUTO_TEST_CASE(MempoolRemoveTest)
}
// Now remove the parent, as might happen if a block-re-org occurs but the parent cannot be
// put into the mempool (maybe because it is non-standard):
- testPool.removeRecursive(txParent, &removed);
- BOOST_CHECK_EQUAL(removed.size(), 6);
+ poolSize = testPool.size();
+ testPool.removeRecursive(txParent);
+ BOOST_CHECK_EQUAL(testPool.size(), poolSize - 6);
BOOST_CHECK_EQUAL(testPool.size(), 0);
- removed.clear();
}
template<typename name>
@@ -388,7 +390,12 @@ BOOST_AUTO_TEST_CASE(MempoolAncestorIndexingTest)
pool.addUnchecked(tx6.GetHash(), entry.Fee(0LL).FromTx(tx6));
BOOST_CHECK_EQUAL(pool.size(), 6);
- sortedOrder.push_back(tx6.GetHash().ToString());
+ // Ties are broken by hash
+ if (tx3.GetHash() < tx6.GetHash())
+ sortedOrder.push_back(tx6.GetHash().ToString());
+ else
+ sortedOrder.insert(sortedOrder.end()-1,tx6.GetHash().ToString());
+
CheckSort<ancestor_score>(pool, sortedOrder);
CMutableTransaction tx7 = CMutableTransaction();
@@ -412,10 +419,14 @@ BOOST_AUTO_TEST_CASE(MempoolAncestorIndexingTest)
/* after tx6 is mined, tx7 should move up in the sort */
std::vector<CTransactionRef> vtx;
vtx.push_back(MakeTransactionRef(tx6));
- pool.removeForBlock(vtx, 1, NULL, false);
+ pool.removeForBlock(vtx, 1);
sortedOrder.erase(sortedOrder.begin()+1);
- sortedOrder.pop_back();
+ // Ties are broken by hash
+ if (tx3.GetHash() < tx6.GetHash())
+ sortedOrder.pop_back();
+ else
+ sortedOrder.erase(sortedOrder.end()-2);
sortedOrder.insert(sortedOrder.begin(), tx7.GetHash().ToString());
CheckSort<ancestor_score>(pool, sortedOrder);
}
diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp
index bc1bdd8874..892e731a7a 100644
--- a/src/test/miner_tests.cpp
+++ b/src/test/miner_tests.cpp
@@ -224,7 +224,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
pblock->hashMerkleRoot = BlockMerkleRoot(*pblock);
pblock->nNonce = blockinfo[i].nonce;
std::shared_ptr<const CBlock> shared_pblock = std::make_shared<const CBlock>(*pblock);
- BOOST_CHECK(ProcessNewBlock(chainparams, shared_pblock, true, NULL, NULL));
+ BOOST_CHECK(ProcessNewBlock(chainparams, shared_pblock, true, NULL));
pblock->hashPrevBlock = pblock->GetHash();
}
diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp
index 84498c5f10..47335ade87 100644
--- a/src/test/script_tests.cpp
+++ b/src/test/script_tests.cpp
@@ -175,10 +175,10 @@ void DoTest(const CScript& scriptPubKey, const CScript& scriptSig, const CScript
int libconsensus_flags = flags & bitcoinconsensus_SCRIPT_FLAGS_VERIFY_ALL;
if (libconsensus_flags == flags) {
if (flags & bitcoinconsensus_SCRIPT_FLAGS_VERIFY_WITNESS) {
- BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(begin_ptr(scriptPubKey), scriptPubKey.size(), txCredit.vout[0].nValue, (const unsigned char*)&stream[0], stream.size(), 0, libconsensus_flags, NULL) == expect, message);
+ BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(scriptPubKey.data(), scriptPubKey.size(), txCredit.vout[0].nValue, (const unsigned char*)&stream[0], stream.size(), 0, libconsensus_flags, NULL) == expect, message);
} else {
- BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(begin_ptr(scriptPubKey), scriptPubKey.size(), 0, (const unsigned char*)&stream[0], stream.size(), 0, libconsensus_flags, NULL) == expect, message);
- BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script(begin_ptr(scriptPubKey), scriptPubKey.size(), (const unsigned char*)&stream[0], stream.size(), 0, libconsensus_flags, NULL) == expect,message);
+ BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(scriptPubKey.data(), scriptPubKey.size(), 0, (const unsigned char*)&stream[0], stream.size(), 0, libconsensus_flags, NULL) == expect, message);
+ BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), (const unsigned char*)&stream[0], stream.size(), 0, libconsensus_flags, NULL) == expect,message);
}
}
#endif
diff --git a/src/test/test_bitcoin.cpp b/src/test/test_bitcoin.cpp
index 139389117a..2a5a78de02 100644
--- a/src/test/test_bitcoin.cpp
+++ b/src/test/test_bitcoin.cpp
@@ -20,6 +20,7 @@
#include "ui_interface.h"
#include "rpc/server.h"
#include "rpc/register.h"
+#include "script/sigcache.h"
#include "test/testutil.h"
@@ -40,6 +41,7 @@ BasicTestingSetup::BasicTestingSetup(const std::string& chainName)
ECC_Start();
SetupEnvironment();
SetupNetworking();
+ InitSignatureCache();
fPrintToDebugLog = false; // don't want to write to debug.log file
fCheckBlockIndex = true;
SelectParams(chainName);
@@ -128,7 +130,7 @@ TestChain100Setup::CreateAndProcessBlock(const std::vector<CMutableTransaction>&
while (!CheckProofOfWork(block.GetHash(), block.nBits, chainparams.GetConsensus())) ++block.nNonce;
std::shared_ptr<const CBlock> shared_pblock = std::make_shared<const CBlock>(block);
- ProcessNewBlock(chainparams, shared_pblock, true, NULL, NULL);
+ ProcessNewBlock(chainparams, shared_pblock, true, NULL);
CBlock result = block;
return result;
diff --git a/src/test/test_bitcoin_fuzzy.cpp b/src/test/test_bitcoin_fuzzy.cpp
new file mode 100644
index 0000000000..584e6ed008
--- /dev/null
+++ b/src/test/test_bitcoin_fuzzy.cpp
@@ -0,0 +1,256 @@
+// Copyright (c) 2009-2015 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#if defined(HAVE_CONFIG_H)
+#include "config/bitcoin-config.h"
+#endif
+
+#include "consensus/merkle.h"
+#include "primitives/block.h"
+#include "script/script.h"
+#include "addrman.h"
+#include "chain.h"
+#include "coins.h"
+#include "compressor.h"
+#include "net.h"
+#include "protocol.h"
+#include "streams.h"
+#include "undo.h"
+#include "version.h"
+
+#include <stdint.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <vector>
+
+enum TEST_ID {
+ CBLOCK_DESERIALIZE=0,
+ CTRANSACTION_DESERIALIZE,
+ CBLOCKLOCATOR_DESERIALIZE,
+ CBLOCKMERKLEROOT,
+ CADDRMAN_DESERIALIZE,
+ CBLOCKHEADER_DESERIALIZE,
+ CBANENTRY_DESERIALIZE,
+ CTXUNDO_DESERIALIZE,
+ CBLOCKUNDO_DESERIALIZE,
+ CCOINS_DESERIALIZE,
+ CNETADDR_DESERIALIZE,
+ CSERVICE_DESERIALIZE,
+ CMESSAGEHEADER_DESERIALIZE,
+ CADDRESS_DESERIALIZE,
+ CINV_DESERIALIZE,
+ CBLOOMFILTER_DESERIALIZE,
+ CDISKBLOCKINDEX_DESERIALIZE,
+ CTXOUTCOMPRESSOR_DESERIALIZE,
+ TEST_ID_END
+};
+
+bool read_stdin(std::vector<char> &data) {
+ char buffer[1024];
+ ssize_t length=0;
+ while((length = read(STDIN_FILENO, buffer, 1024)) > 0) {
+ data.insert(data.end(), buffer, buffer+length);
+
+ if (data.size() > (1<<20)) return false;
+ }
+ return length==0;
+}
+
+int main(int argc, char **argv)
+{
+ std::vector<char> buffer;
+ if (!read_stdin(buffer)) return 0;
+
+ if (buffer.size() < sizeof(uint32_t)) return 0;
+
+ uint32_t test_id = 0xffffffff;
+ memcpy(&test_id, &buffer[0], sizeof(uint32_t));
+ buffer.erase(buffer.begin(), buffer.begin() + sizeof(uint32_t));
+
+ if (test_id >= TEST_ID_END) return 0;
+
+ CDataStream ds(buffer, SER_NETWORK, INIT_PROTO_VERSION);
+ try {
+ int nVersion;
+ ds >> nVersion;
+ ds.SetVersion(nVersion);
+ } catch (const std::ios_base::failure& e) {
+ return 0;
+ }
+
+ switch(test_id) {
+ case CBLOCK_DESERIALIZE:
+ {
+ try
+ {
+ CBlock block;
+ ds >> block;
+ } catch (const std::ios_base::failure& e) {return 0;}
+ break;
+ }
+ case CTRANSACTION_DESERIALIZE:
+ {
+ try
+ {
+ CTransaction tx(deserialize, ds);
+ } catch (const std::ios_base::failure& e) {return 0;}
+ break;
+ }
+ case CBLOCKLOCATOR_DESERIALIZE:
+ {
+ try
+ {
+ CBlockLocator bl;
+ ds >> bl;
+ } catch (const std::ios_base::failure& e) {return 0;}
+ break;
+ }
+ case CBLOCKMERKLEROOT:
+ {
+ try
+ {
+ CBlock block;
+ ds >> block;
+ bool mutated;
+ BlockMerkleRoot(block, &mutated);
+ } catch (const std::ios_base::failure& e) {return 0;}
+ break;
+ }
+ case CADDRMAN_DESERIALIZE:
+ {
+ try
+ {
+ CAddrMan am;
+ ds >> am;
+ } catch (const std::ios_base::failure& e) {return 0;}
+ break;
+ }
+ case CBLOCKHEADER_DESERIALIZE:
+ {
+ try
+ {
+ CBlockHeader bh;
+ ds >> bh;
+ } catch (const std::ios_base::failure& e) {return 0;}
+ break;
+ }
+ case CBANENTRY_DESERIALIZE:
+ {
+ try
+ {
+ CBanEntry be;
+ ds >> be;
+ } catch (const std::ios_base::failure& e) {return 0;}
+ break;
+ }
+ case CTXUNDO_DESERIALIZE:
+ {
+ try
+ {
+ CTxUndo tu;
+ ds >> tu;
+ } catch (const std::ios_base::failure& e) {return 0;}
+ break;
+ }
+ case CBLOCKUNDO_DESERIALIZE:
+ {
+ try
+ {
+ CBlockUndo bu;
+ ds >> bu;
+ } catch (const std::ios_base::failure& e) {return 0;}
+ break;
+ }
+ case CCOINS_DESERIALIZE:
+ {
+ try
+ {
+ CCoins block;
+ ds >> block;
+ } catch (const std::ios_base::failure& e) {return 0;}
+ break;
+ }
+ case CNETADDR_DESERIALIZE:
+ {
+ try
+ {
+ CNetAddr na;
+ ds >> na;
+ } catch (const std::ios_base::failure& e) {return 0;}
+ break;
+ }
+ case CSERVICE_DESERIALIZE:
+ {
+ try
+ {
+ CService s;
+ ds >> s;
+ } catch (const std::ios_base::failure& e) {return 0;}
+ break;
+ }
+ case CMESSAGEHEADER_DESERIALIZE:
+ {
+ CMessageHeader::MessageStartChars pchMessageStart = {0x00, 0x00, 0x00, 0x00};
+ try
+ {
+ CMessageHeader mh(pchMessageStart);
+ ds >> mh;
+ if (!mh.IsValid(pchMessageStart)) {return 0;}
+ } catch (const std::ios_base::failure& e) {return 0;}
+ break;
+ }
+ case CADDRESS_DESERIALIZE:
+ {
+ try
+ {
+ CAddress a;
+ ds >> a;
+ } catch (const std::ios_base::failure& e) {return 0;}
+ break;
+ }
+ case CINV_DESERIALIZE:
+ {
+ try
+ {
+ CInv i;
+ ds >> i;
+ } catch (const std::ios_base::failure& e) {return 0;}
+ break;
+ }
+ case CBLOOMFILTER_DESERIALIZE:
+ {
+ try
+ {
+ CBloomFilter bf;
+ ds >> bf;
+ } catch (const std::ios_base::failure& e) {return 0;}
+ break;
+ }
+ case CDISKBLOCKINDEX_DESERIALIZE:
+ {
+ try
+ {
+ CDiskBlockIndex dbi;
+ ds >> dbi;
+ } catch (const std::ios_base::failure& e) {return 0;}
+ break;
+ }
+ case CTXOUTCOMPRESSOR_DESERIALIZE:
+ {
+ CTxOut to;
+ CTxOutCompressor toc(to);
+ try
+ {
+ ds >> toc;
+ } catch (const std::ios_base::failure& e) {return 0;}
+
+ break;
+ }
+ default:
+ return 0;
+ }
+ return 0;
+}
+
diff --git a/src/test/txvalidationcache_tests.cpp b/src/test/txvalidationcache_tests.cpp
index 55b4d28fb2..9f58f004d5 100644
--- a/src/test/txvalidationcache_tests.cpp
+++ b/src/test/txvalidationcache_tests.cpp
@@ -39,6 +39,7 @@ BOOST_FIXTURE_TEST_CASE(tx_mempool_block_doublespend, TestChain100Setup)
spends.resize(2);
for (int i = 0; i < 2; i++)
{
+ spends[i].nVersion = 1;
spends[i].vin.resize(1);
spends[i].vin[0].prevout.hash = coinbaseTxns[0].GetHash();
spends[i].vin[0].prevout.n = 0;
diff --git a/src/timedata.cpp b/src/timedata.cpp
index 25fc494121..b3cbdcf883 100644
--- a/src/timedata.cpp
+++ b/src/timedata.cpp
@@ -13,6 +13,7 @@
#include "ui_interface.h"
#include "util.h"
#include "utilstrencodings.h"
+#include "warnings.h"
#include <boost/foreach.hpp>
@@ -103,8 +104,8 @@ void AddTimeData(const CNetAddr& ip, int64_t nOffsetSample)
if (!fMatch)
{
fDone = true;
- string strMessage = strprintf(_("Please check that your computer's date and time are correct! If your clock is wrong, %s will not work properly."), _(PACKAGE_NAME));
- strMiscWarning = strMessage;
+ std::string strMessage = strprintf(_("Please check that your computer's date and time are correct! If your clock is wrong, %s will not work properly."), _(PACKAGE_NAME));
+ SetMiscWarning(strMessage);
uiInterface.ThreadSafeMessageBox(strMessage, "", CClientUIInterface::MSG_WARNING);
}
}
diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp
index ffb9993f90..67a69363f0 100644
--- a/src/torcontrol.cpp
+++ b/src/torcontrol.cpp
@@ -501,10 +501,10 @@ static std::vector<uint8_t> ComputeResponse(const std::string &key, const std::v
{
CHMAC_SHA256 computeHash((const uint8_t*)key.data(), key.size());
std::vector<uint8_t> computedHash(CHMAC_SHA256::OUTPUT_SIZE, 0);
- computeHash.Write(begin_ptr(cookie), cookie.size());
- computeHash.Write(begin_ptr(clientNonce), clientNonce.size());
- computeHash.Write(begin_ptr(serverNonce), serverNonce.size());
- computeHash.Finalize(begin_ptr(computedHash));
+ computeHash.Write(cookie.data(), cookie.size());
+ computeHash.Write(clientNonce.data(), clientNonce.size());
+ computeHash.Write(serverNonce.data(), serverNonce.size());
+ computeHash.Finalize(computedHash.data());
return computedHash;
}
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index c035a84db5..c3da69c3f0 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -503,7 +503,7 @@ void CTxMemPool::CalculateDescendants(txiter entryit, setEntries &setDescendants
}
}
-void CTxMemPool::removeRecursive(const CTransaction &origTx, std::vector<CTransactionRef>* removed)
+void CTxMemPool::removeRecursive(const CTransaction &origTx)
{
// Remove transaction from memory pool
{
@@ -530,11 +530,6 @@ void CTxMemPool::removeRecursive(const CTransaction &origTx, std::vector<CTransa
BOOST_FOREACH(txiter it, txToRemove) {
CalculateDescendants(it, setAllRemoves);
}
- if (removed) {
- BOOST_FOREACH(txiter it, setAllRemoves) {
- removed->emplace_back(it->GetSharedTx());
- }
- }
RemoveStaged(setAllRemoves, false);
}
}
@@ -576,7 +571,7 @@ void CTxMemPool::removeForReorg(const CCoinsViewCache *pcoins, unsigned int nMem
RemoveStaged(setAllRemoves, false);
}
-void CTxMemPool::removeConflicts(const CTransaction &tx, std::vector<CTransactionRef>* removed)
+void CTxMemPool::removeConflicts(const CTransaction &tx)
{
// Remove transactions which depend on inputs of tx, recursively
LOCK(cs);
@@ -586,7 +581,7 @@ void CTxMemPool::removeConflicts(const CTransaction &tx, std::vector<CTransactio
const CTransaction &txConflict = *it->second;
if (txConflict != tx)
{
- removeRecursive(txConflict, removed);
+ removeRecursive(txConflict);
ClearPrioritisation(txConflict.GetHash());
}
}
@@ -597,7 +592,7 @@ void CTxMemPool::removeConflicts(const CTransaction &tx, std::vector<CTransactio
* Called when a block is connected. Removes from mempool and updates the miner fee estimator.
*/
void CTxMemPool::removeForBlock(const std::vector<CTransactionRef>& vtx, unsigned int nBlockHeight,
- std::vector<CTransactionRef>* conflicts, bool fCurrentEstimate)
+ bool fCurrentEstimate)
{
LOCK(cs);
std::vector<CTxMemPoolEntry> entries;
@@ -617,7 +612,7 @@ void CTxMemPool::removeForBlock(const std::vector<CTransactionRef>& vtx, unsigne
stage.insert(it);
RemoveStaged(stage, true);
}
- removeConflicts(*tx, conflicts);
+ removeConflicts(*tx);
ClearPrioritisation(tx->GetHash());
}
// After the txs in the new block have been removed from the mempool, update policy estimates
@@ -1142,3 +1137,10 @@ void CTxMemPool::TrimToSize(size_t sizelimit, std::vector<uint256>* pvNoSpendsRe
if (maxFeeRateRemoved > CFeeRate(0))
LogPrint("mempool", "Removed %u txn, rolling minimum fee bumped to %s\n", nTxnRemoved, maxFeeRateRemoved.ToString());
}
+
+bool CTxMemPool::TransactionWithinChainLimit(const uint256& txid, size_t chainLimit) const {
+ LOCK(cs);
+ auto it = mapTx.find(txid);
+ return it == mapTx.end() || (it->GetCountWithAncestors() < chainLimit &&
+ it->GetCountWithDescendants() < chainLimit);
+}
diff --git a/src/txmempool.h b/src/txmempool.h
index 23fe5a7abe..8a5787a886 100644
--- a/src/txmempool.h
+++ b/src/txmempool.h
@@ -527,11 +527,11 @@ public:
bool addUnchecked(const uint256& hash, const CTxMemPoolEntry &entry, bool fCurrentEstimate = true);
bool addUnchecked(const uint256& hash, const CTxMemPoolEntry &entry, setEntries &setAncestors, bool fCurrentEstimate = true);
- void removeRecursive(const CTransaction &tx, std::vector<CTransactionRef>* removed = NULL);
+ void removeRecursive(const CTransaction &tx);
void removeForReorg(const CCoinsViewCache *pcoins, unsigned int nMemPoolHeight, int flags);
- void removeConflicts(const CTransaction &tx, std::vector<CTransactionRef>* removed = NULL);
+ void removeConflicts(const CTransaction &tx);
void removeForBlock(const std::vector<CTransactionRef>& vtx, unsigned int nBlockHeight,
- std::vector<CTransactionRef>* conflicts = NULL, bool fCurrentEstimate = true);
+ bool fCurrentEstimate = true);
void clear();
void _clear(); //lock free
bool CompareDepthAndScore(const uint256& hasha, const uint256& hashb);
@@ -605,6 +605,9 @@ public:
/** Expire all transaction (and their dependencies) in the mempool older than time. Return the number of removed transactions. */
int Expire(int64_t time);
+ /** Returns false if the transaction is in the mempool and not within the chain limit specified. */
+ bool TransactionWithinChainLimit(const uint256& txid, size_t chainLimit) const;
+
unsigned long size()
{
LOCK(cs);
diff --git a/src/util.cpp b/src/util.cpp
index 014013d214..977f8993ed 100644
--- a/src/util.cpp
+++ b/src/util.cpp
@@ -107,7 +107,7 @@ map<string, vector<string> > mapMultiArgs;
bool fDebug = false;
bool fPrintToConsole = false;
bool fPrintToDebugLog = true;
-string strMiscWarning;
+
bool fLogTimestamps = DEFAULT_LOGTIMESTAMPS;
bool fLogTimeMicros = DEFAULT_LOGTIMEMICROS;
bool fLogIPs = DEFAULT_LOGIPS;
@@ -704,13 +704,13 @@ void ShrinkDebugFile()
// Restart the file with some of the end
std::vector <char> vch(200000,0);
fseek(file, -((long)vch.size()), SEEK_END);
- int nBytes = fread(begin_ptr(vch), 1, vch.size(), file);
+ int nBytes = fread(vch.data(), 1, vch.size(), file);
fclose(file);
file = fopen(pathLog.string().c_str(), "w");
if (file)
{
- fwrite(begin_ptr(vch), 1, nBytes, file);
+ fwrite(vch.data(), 1, nBytes, file);
fclose(file);
}
}
diff --git a/src/util.h b/src/util.h
index e8aa266f28..3ec38a7c7d 100644
--- a/src/util.h
+++ b/src/util.h
@@ -46,7 +46,7 @@ extern std::map<std::string, std::vector<std::string> > mapMultiArgs;
extern bool fDebug;
extern bool fPrintToConsole;
extern bool fPrintToDebugLog;
-extern std::string strMiscWarning;
+
extern bool fLogTimestamps;
extern bool fLogTimeMicros;
extern bool fLogIPs;
diff --git a/src/validation.cpp b/src/validation.cpp
index 9a08018113..bd60bbfdcd 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -34,6 +34,7 @@
#include "utilstrencodings.h"
#include "validationinterface.h"
#include "versionbits.h"
+#include "warnings.h"
#include <atomic>
#include <sstream>
@@ -540,14 +541,6 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C
if (tx.IsCoinBase())
return state.DoS(100, false, REJECT_INVALID, "coinbase");
- // Don't relay version 2 transactions until CSV is active, and we can be
- // sure that such transactions will be mined (unless we're on
- // -testnet/-regtest).
- const CChainParams& chainparams = Params();
- if (fRequireStandard && tx.nVersion >= 2 && VersionBitsTipState(chainparams.GetConsensus(), Consensus::DEPLOYMENT_CSV) != THRESHOLD_ACTIVE) {
- return state.DoS(0, false, REJECT_NONSTANDARD, "premature-version2-tx");
- }
-
// Reject transactions with witness before segregated witness activates (override with -prematurewitness)
bool witnessEnabled = IsWitnessEnabled(chainActive.Tip(), Params().GetConsensus());
if (!GetBoolArg("-prematurewitness",false) && tx.HasWitness() && !witnessEnabled) {
@@ -1149,8 +1142,6 @@ bool IsInitialBlockDownload()
return false;
}
-bool fLargeWorkForkFound = false;
-bool fLargeWorkInvalidChainFound = false;
CBlockIndex *pindexBestForkTip = NULL, *pindexBestForkBase = NULL;
static void AlertNotify(const std::string& strMessage)
@@ -1185,7 +1176,7 @@ void CheckForkWarningConditions()
if (pindexBestForkTip || (pindexBestInvalid && pindexBestInvalid->nChainWork > chainActive.Tip()->nChainWork + (GetBlockProof(*chainActive.Tip()) * 6)))
{
- if (!fLargeWorkForkFound && pindexBestForkBase)
+ if (!GetfLargeWorkForkFound() && pindexBestForkBase)
{
std::string warning = std::string("'Warning: Large-work fork detected, forking after block ") +
pindexBestForkBase->phashBlock->ToString() + std::string("'");
@@ -1196,18 +1187,18 @@ void CheckForkWarningConditions()
LogPrintf("%s: Warning: Large valid fork found\n forking the chain at height %d (%s)\n lasting to height %d (%s).\nChain state database corruption likely.\n", __func__,
pindexBestForkBase->nHeight, pindexBestForkBase->phashBlock->ToString(),
pindexBestForkTip->nHeight, pindexBestForkTip->phashBlock->ToString());
- fLargeWorkForkFound = true;
+ SetfLargeWorkForkFound(true);
}
else
{
LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__);
- fLargeWorkInvalidChainFound = true;
+ SetfLargeWorkInvalidChainFound(true);
}
}
else
{
- fLargeWorkForkFound = false;
- fLargeWorkInvalidChainFound = false;
+ SetfLargeWorkForkFound(false);
+ SetfLargeWorkInvalidChainFound(false);
}
}
@@ -1483,7 +1474,7 @@ bool UndoReadFromDisk(CBlockUndo& blockundo, const CDiskBlockPos& pos, const uin
/** Abort with a message */
bool AbortNode(const std::string& strMessage, const std::string& userMessage="")
{
- strMiscWarning = strMessage;
+ SetMiscWarning(strMessage);
LogPrintf("*** %s\n", strMessage);
uiInterface.ThreadSafeMessageBox(
userMessage.empty() ? _("Error: A fatal internal error occurred, see debug.log for details") : userMessage,
@@ -2052,9 +2043,10 @@ void static UpdateTip(CBlockIndex *pindexNew, const CChainParams& chainParams) {
ThresholdState state = checker.GetStateFor(pindex, chainParams.GetConsensus(), warningcache[bit]);
if (state == THRESHOLD_ACTIVE || state == THRESHOLD_LOCKED_IN) {
if (state == THRESHOLD_ACTIVE) {
- strMiscWarning = strprintf(_("Warning: unknown new rules activated (versionbit %i)"), bit);
+ std::string strWarning = strprintf(_("Warning: unknown new rules activated (versionbit %i)"), bit);
+ SetMiscWarning(strWarning);
if (!fWarned) {
- AlertNotify(strMiscWarning);
+ AlertNotify(strWarning);
fWarned = true;
}
} else {
@@ -2074,10 +2066,11 @@ void static UpdateTip(CBlockIndex *pindexNew, const CChainParams& chainParams) {
warningMessages.push_back(strprintf("%d of last 100 blocks have unexpected version", nUpgraded));
if (nUpgraded > 100/2)
{
- // strMiscWarning is read by GetWarnings(), called by Qt and the JSON-RPC code to warn the user:
- strMiscWarning = _("Warning: Unknown block versions being mined! It's possible unknown rules are in effect");
+ std::string strWarning = _("Warning: Unknown block versions being mined! It's possible unknown rules are in effect");
+ // notify GetWarnings(), called by Qt and the JSON-RPC code to warn the user:
+ SetMiscWarning(strWarning);
if (!fWarned) {
- AlertNotify(strMiscWarning);
+ AlertNotify(strWarning);
fWarned = true;
}
}
@@ -2108,7 +2101,8 @@ bool static DisconnectTip(CValidationState& state, const CChainParams& chainpara
CCoinsViewCache view(pcoinsTip);
if (!DisconnectBlock(block, state, pindexDelete, view))
return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString());
- assert(view.Flush());
+ bool flushed = view.Flush();
+ assert(flushed);
}
LogPrint("bench", "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * 0.001);
// Write the chain state to disk, if necessary.
@@ -2153,11 +2147,10 @@ static int64_t nTimeChainState = 0;
static int64_t nTimePostConnect = 0;
/**
- * Used to track conflicted transactions removed from mempool and transactions
- * applied to the UTXO state as a part of a single ActivateBestChainStep call.
+ * Used to track blocks whose transactions were applied to the UTXO state as a
+ * part of a single ActivateBestChainStep call.
*/
struct ConnectTrace {
- std::vector<CTransactionRef> txConflicted;
std::vector<std::pair<CBlockIndex*, std::shared_ptr<const CBlock> > > blocksConnected;
};
@@ -2198,7 +2191,8 @@ bool static ConnectTip(CValidationState& state, const CChainParams& chainparams,
}
nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2;
LogPrint("bench", " - Connect total: %.2fms [%.2fs]\n", (nTime3 - nTime2) * 0.001, nTimeConnectTotal * 0.000001);
- assert(view.Flush());
+ bool flushed = view.Flush();
+ assert(flushed);
}
int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3;
LogPrint("bench", " - Flush: %.2fms [%.2fs]\n", (nTime4 - nTime3) * 0.001, nTimeFlush * 0.000001);
@@ -2208,7 +2202,7 @@ bool static ConnectTip(CValidationState& state, const CChainParams& chainparams,
int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4;
LogPrint("bench", " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5 - nTime4) * 0.001, nTimeChainState * 0.000001);
// Remove conflicting transactions from the mempool.;
- mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight, &connectTrace.txConflicted, !IsInitialBlockDownload());
+ mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight, !IsInitialBlockDownload());
// Update chainActive & related variables.
UpdateTip(pindexNew, chainparams);
@@ -2433,11 +2427,6 @@ bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams,
// throw all transactions though the signal-interface
// while _not_ holding the cs_main lock
- for (const auto& tx : connectTrace.txConflicted)
- {
- GetMainSignals().SyncTransaction(*tx, pindexNewTip, CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK);
- }
- // ... and about transactions that got confirmed:
for (const auto& pair : connectTrace.blocksConnected) {
assert(pair.second);
const CBlock& block = *(pair.second);
@@ -3127,7 +3116,7 @@ static bool AcceptBlock(const CBlock& block, CValidationState& state, const CCha
return true;
}
-bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, const CDiskBlockPos* dbp, bool *fNewBlock)
+bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, bool *fNewBlock)
{
{
LOCK(cs_main);
@@ -3136,7 +3125,7 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<cons
CBlockIndex *pindex = NULL;
if (fNewBlock) *fNewBlock = false;
CValidationState state;
- bool ret = AcceptBlock(*pblock, state, chainparams, &pindex, fForceProcessing, dbp, fNewBlock);
+ bool ret = AcceptBlock(*pblock, state, chainparams, &pindex, fForceProcessing, NULL, fNewBlock);
CheckBlockIndex(chainparams.GetConsensus());
if (!ret) {
GetMainSignals().BlockChecked(*pblock, state);
@@ -4021,51 +4010,10 @@ void static CheckBlockIndex(const Consensus::Params& consensusParams)
assert(nNodes == forward.size());
}
-std::string GetWarnings(const std::string& strFor)
+std::string CBlockFileInfo::ToString() const
{
- string strStatusBar;
- string strRPC;
- string strGUI;
- const string uiAlertSeperator = "<hr />";
-
- if (!CLIENT_VERSION_IS_RELEASE) {
- strStatusBar = "This is a pre-release test build - use at your own risk - do not use for mining or merchant applications";
- strGUI = _("This is a pre-release test build - use at your own risk - do not use for mining or merchant applications");
- }
-
- if (GetBoolArg("-testsafemode", DEFAULT_TESTSAFEMODE))
- strStatusBar = strRPC = strGUI = "testsafemode enabled";
-
- // Misc warnings like out of disk space and clock is wrong
- if (strMiscWarning != "")
- {
- strStatusBar = strMiscWarning;
- strGUI += (strGUI.empty() ? "" : uiAlertSeperator) + strMiscWarning;
- }
-
- if (fLargeWorkForkFound)
- {
- strStatusBar = strRPC = "Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.";
- strGUI += (strGUI.empty() ? "" : uiAlertSeperator) + _("Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.");
- }
- else if (fLargeWorkInvalidChainFound)
- {
- strStatusBar = strRPC = "Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.";
- strGUI += (strGUI.empty() ? "" : uiAlertSeperator) + _("Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.");
- }
-
- if (strFor == "gui")
- return strGUI;
- else if (strFor == "statusbar")
- return strStatusBar;
- else if (strFor == "rpc")
- return strRPC;
- assert(!"GetWarnings(): invalid parameter");
- return "error";
+ return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, DateTimeStrFormat("%Y-%m-%d", nTimeFirst), DateTimeStrFormat("%Y-%m-%d", nTimeLast));
}
- std::string CBlockFileInfo::ToString() const {
- return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, DateTimeStrFormat("%Y-%m-%d", nTimeFirst), DateTimeStrFormat("%Y-%m-%d", nTimeLast));
- }
ThresholdState VersionBitsTipState(const Consensus::Params& params, Consensus::DeploymentPos pos)
{
diff --git a/src/validation.h b/src/validation.h
index 28e81d21c6..f0f54786c7 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -137,7 +137,6 @@ static const bool DEFAULT_CHECKPOINTS_ENABLED = true;
static const bool DEFAULT_TXINDEX = false;
static const unsigned int DEFAULT_BANSCORE_THRESHOLD = 100;
-static const bool DEFAULT_TESTSAFEMODE = false;
/** Default for -mempoolreplacement */
static const bool DEFAULT_ENABLE_REPLACEMENT = true;
/** Default for using fee filter */
@@ -229,11 +228,10 @@ static const uint64_t MIN_DISK_SPACE_FOR_BLOCK_FILES = 550 * 1024 * 1024;
*
* @param[in] pblock The block we want to process.
* @param[in] fForceProcessing Process this block even if unrequested; used for non-network block sources and whitelisted peers.
- * @param[out] dbp The already known disk position of pblock, or NULL if not yet stored.
* @param[out] fNewBlock A boolean which is set to indicate if the block was first received via this call
* @return True if state.IsValid()
*/
-bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, const CDiskBlockPos* dbp, bool* fNewBlock);
+bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, bool* fNewBlock);
/**
* Process incoming block headers.
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index 9e32b6751d..5a4fcc743c 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -362,7 +362,7 @@ static void SendMoney(const CTxDestination &address, CAmount nValue, bool fSubtr
CRecipient recipient = {scriptPubKey, nValue, fSubtractFeeFromAmount};
vecSend.push_back(recipient);
if (!pwalletMain->CreateTransaction(vecSend, wtxNew, reservekey, nFeeRequired, nChangePosRet, strError)) {
- if (!fSubtractFeeFromAmount && nValue + nFeeRequired > pwalletMain->GetBalance())
+ if (!fSubtractFeeFromAmount && nValue + nFeeRequired > curBalance)
strError = strprintf("Error: This transaction requires a transaction fee of at least %s", FormatMoney(nFeeRequired));
throw JSONRPCError(RPC_WALLET_ERROR, strError);
}
@@ -1792,7 +1792,7 @@ UniValue gettransaction(const JSONRPCRequest& request)
ListTransactions(wtx, "*", 0, false, details, filter);
entry.push_back(Pair("details", details));
- string strHex = EncodeHexTx(static_cast<CTransaction>(wtx));
+ string strHex = EncodeHexTx(static_cast<CTransaction>(wtx), RPCSerializationFlags());
entry.push_back(Pair("hex", strHex));
return entry;
diff --git a/src/wallet/test/crypto_tests.cpp b/src/wallet/test/crypto_tests.cpp
index ce35c53c48..c64c76244e 100644
--- a/src/wallet/test/crypto_tests.cpp
+++ b/src/wallet/test/crypto_tests.cpp
@@ -42,15 +42,19 @@ bool OldEncrypt(const CKeyingMaterial& vchPlaintext, std::vector<unsigned char>
int nCLen = nLen + AES_BLOCK_SIZE, nFLen = 0;
vchCiphertext = std::vector<unsigned char> (nCLen);
- EVP_CIPHER_CTX ctx;
+ EVP_CIPHER_CTX *ctx = EVP_CIPHER_CTX_new();
+
+ if (!ctx) return false;
bool fOk = true;
- EVP_CIPHER_CTX_init(&ctx);
- if (fOk) fOk = EVP_EncryptInit_ex(&ctx, EVP_aes_256_cbc(), NULL, chKey, chIV) != 0;
- if (fOk) fOk = EVP_EncryptUpdate(&ctx, &vchCiphertext[0], &nCLen, &vchPlaintext[0], nLen) != 0;
- if (fOk) fOk = EVP_EncryptFinal_ex(&ctx, (&vchCiphertext[0]) + nCLen, &nFLen) != 0;
- EVP_CIPHER_CTX_cleanup(&ctx);
+ EVP_CIPHER_CTX_init(ctx);
+ if (fOk) fOk = EVP_EncryptInit_ex(ctx, EVP_aes_256_cbc(), NULL, chKey, chIV) != 0;
+ if (fOk) fOk = EVP_EncryptUpdate(ctx, &vchCiphertext[0], &nCLen, &vchPlaintext[0], nLen) != 0;
+ if (fOk) fOk = EVP_EncryptFinal_ex(ctx, (&vchCiphertext[0]) + nCLen, &nFLen) != 0;
+ EVP_CIPHER_CTX_cleanup(ctx);
+
+ EVP_CIPHER_CTX_free(ctx);
if (!fOk) return false;
@@ -66,15 +70,19 @@ bool OldDecrypt(const std::vector<unsigned char>& vchCiphertext, CKeyingMaterial
vchPlaintext = CKeyingMaterial(nPLen);
- EVP_CIPHER_CTX ctx;
+ EVP_CIPHER_CTX *ctx = EVP_CIPHER_CTX_new();
+
+ if (!ctx) return false;
bool fOk = true;
- EVP_CIPHER_CTX_init(&ctx);
- if (fOk) fOk = EVP_DecryptInit_ex(&ctx, EVP_aes_256_cbc(), NULL, chKey, chIV) != 0;
- if (fOk) fOk = EVP_DecryptUpdate(&ctx, &vchPlaintext[0], &nPLen, &vchCiphertext[0], nLen) != 0;
- if (fOk) fOk = EVP_DecryptFinal_ex(&ctx, (&vchPlaintext[0]) + nPLen, &nFLen) != 0;
- EVP_CIPHER_CTX_cleanup(&ctx);
+ EVP_CIPHER_CTX_init(ctx);
+ if (fOk) fOk = EVP_DecryptInit_ex(ctx, EVP_aes_256_cbc(), NULL, chKey, chIV) != 0;
+ if (fOk) fOk = EVP_DecryptUpdate(ctx, &vchPlaintext[0], &nPLen, &vchCiphertext[0], nLen) != 0;
+ if (fOk) fOk = EVP_DecryptFinal_ex(ctx, (&vchPlaintext[0]) + nPLen, &nFLen) != 0;
+ EVP_CIPHER_CTX_cleanup(ctx);
+
+ EVP_CIPHER_CTX_free(ctx);
if (!fOk) return false;
diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp
index 8085706a99..9465d88067 100644
--- a/src/wallet/test/wallet_tests.cpp
+++ b/src/wallet/test/wallet_tests.cpp
@@ -78,24 +78,24 @@ BOOST_AUTO_TEST_CASE(coin_selection_tests)
empty_wallet();
// with an empty wallet we can't even pay one cent
- BOOST_CHECK(!wallet.SelectCoinsMinConf( 1 * CENT, 1, 6, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK(!wallet.SelectCoinsMinConf( 1 * CENT, 1, 6, 0, vCoins, setCoinsRet, nValueRet));
add_coin(1*CENT, 4); // add a new 1 cent coin
// with a new 1 cent coin, we still can't find a mature 1 cent
- BOOST_CHECK(!wallet.SelectCoinsMinConf( 1 * CENT, 1, 6, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK(!wallet.SelectCoinsMinConf( 1 * CENT, 1, 6, 0, vCoins, setCoinsRet, nValueRet));
// but we can find a new 1 cent
- BOOST_CHECK( wallet.SelectCoinsMinConf( 1 * CENT, 1, 1, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK( wallet.SelectCoinsMinConf( 1 * CENT, 1, 1, 0, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 1 * CENT);
add_coin(2*CENT); // add a mature 2 cent coin
// we can't make 3 cents of mature coins
- BOOST_CHECK(!wallet.SelectCoinsMinConf( 3 * CENT, 1, 6, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK(!wallet.SelectCoinsMinConf( 3 * CENT, 1, 6, 0, vCoins, setCoinsRet, nValueRet));
// we can make 3 cents of new coins
- BOOST_CHECK( wallet.SelectCoinsMinConf( 3 * CENT, 1, 1, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK( wallet.SelectCoinsMinConf( 3 * CENT, 1, 1, 0, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 3 * CENT);
add_coin(5*CENT); // add a mature 5 cent coin,
@@ -105,33 +105,33 @@ BOOST_AUTO_TEST_CASE(coin_selection_tests)
// now we have new: 1+10=11 (of which 10 was self-sent), and mature: 2+5+20=27. total = 38
// we can't make 38 cents only if we disallow new coins:
- BOOST_CHECK(!wallet.SelectCoinsMinConf(38 * CENT, 1, 6, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK(!wallet.SelectCoinsMinConf(38 * CENT, 1, 6, 0, vCoins, setCoinsRet, nValueRet));
// we can't even make 37 cents if we don't allow new coins even if they're from us
- BOOST_CHECK(!wallet.SelectCoinsMinConf(38 * CENT, 6, 6, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK(!wallet.SelectCoinsMinConf(38 * CENT, 6, 6, 0, vCoins, setCoinsRet, nValueRet));
// but we can make 37 cents if we accept new coins from ourself
- BOOST_CHECK( wallet.SelectCoinsMinConf(37 * CENT, 1, 6, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK( wallet.SelectCoinsMinConf(37 * CENT, 1, 6, 0, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 37 * CENT);
// and we can make 38 cents if we accept all new coins
- BOOST_CHECK( wallet.SelectCoinsMinConf(38 * CENT, 1, 1, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK( wallet.SelectCoinsMinConf(38 * CENT, 1, 1, 0, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 38 * CENT);
// try making 34 cents from 1,2,5,10,20 - we can't do it exactly
- BOOST_CHECK( wallet.SelectCoinsMinConf(34 * CENT, 1, 1, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK( wallet.SelectCoinsMinConf(34 * CENT, 1, 1, 0, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 35 * CENT); // but 35 cents is closest
BOOST_CHECK_EQUAL(setCoinsRet.size(), 3U); // the best should be 20+10+5. it's incredibly unlikely the 1 or 2 got included (but possible)
// when we try making 7 cents, the smaller coins (1,2,5) are enough. We should see just 2+5
- BOOST_CHECK( wallet.SelectCoinsMinConf( 7 * CENT, 1, 1, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK( wallet.SelectCoinsMinConf( 7 * CENT, 1, 1, 0, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 7 * CENT);
BOOST_CHECK_EQUAL(setCoinsRet.size(), 2U);
// when we try making 8 cents, the smaller coins (1,2,5) are exactly enough.
- BOOST_CHECK( wallet.SelectCoinsMinConf( 8 * CENT, 1, 1, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK( wallet.SelectCoinsMinConf( 8 * CENT, 1, 1, 0, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK(nValueRet == 8 * CENT);
BOOST_CHECK_EQUAL(setCoinsRet.size(), 3U);
// when we try making 9 cents, no subset of smaller coins is enough, and we get the next bigger coin (10)
- BOOST_CHECK( wallet.SelectCoinsMinConf( 9 * CENT, 1, 1, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK( wallet.SelectCoinsMinConf( 9 * CENT, 1, 1, 0, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 10 * CENT);
BOOST_CHECK_EQUAL(setCoinsRet.size(), 1U);
@@ -145,30 +145,30 @@ BOOST_AUTO_TEST_CASE(coin_selection_tests)
add_coin(30*CENT); // now we have 6+7+8+20+30 = 71 cents total
// check that we have 71 and not 72
- BOOST_CHECK( wallet.SelectCoinsMinConf(71 * CENT, 1, 1, vCoins, setCoinsRet, nValueRet));
- BOOST_CHECK(!wallet.SelectCoinsMinConf(72 * CENT, 1, 1, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK( wallet.SelectCoinsMinConf(71 * CENT, 1, 1, 0, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK(!wallet.SelectCoinsMinConf(72 * CENT, 1, 1, 0, vCoins, setCoinsRet, nValueRet));
// now try making 16 cents. the best smaller coins can do is 6+7+8 = 21; not as good at the next biggest coin, 20
- BOOST_CHECK( wallet.SelectCoinsMinConf(16 * CENT, 1, 1, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK( wallet.SelectCoinsMinConf(16 * CENT, 1, 1, 0, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 20 * CENT); // we should get 20 in one coin
BOOST_CHECK_EQUAL(setCoinsRet.size(), 1U);
add_coin( 5*CENT); // now we have 5+6+7+8+20+30 = 75 cents total
// now if we try making 16 cents again, the smaller coins can make 5+6+7 = 18 cents, better than the next biggest coin, 20
- BOOST_CHECK( wallet.SelectCoinsMinConf(16 * CENT, 1, 1, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK( wallet.SelectCoinsMinConf(16 * CENT, 1, 1, 0, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 18 * CENT); // we should get 18 in 3 coins
BOOST_CHECK_EQUAL(setCoinsRet.size(), 3U);
add_coin( 18*CENT); // now we have 5+6+7+8+18+20+30
// and now if we try making 16 cents again, the smaller coins can make 5+6+7 = 18 cents, the same as the next biggest coin, 18
- BOOST_CHECK( wallet.SelectCoinsMinConf(16 * CENT, 1, 1, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK( wallet.SelectCoinsMinConf(16 * CENT, 1, 1, 0, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 18 * CENT); // we should get 18 in 1 coin
BOOST_CHECK_EQUAL(setCoinsRet.size(), 1U); // because in the event of a tie, the biggest coin wins
// now try making 11 cents. we should get 5+6
- BOOST_CHECK( wallet.SelectCoinsMinConf(11 * CENT, 1, 1, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK( wallet.SelectCoinsMinConf(11 * CENT, 1, 1, 0, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 11 * CENT);
BOOST_CHECK_EQUAL(setCoinsRet.size(), 2U);
@@ -177,11 +177,11 @@ BOOST_AUTO_TEST_CASE(coin_selection_tests)
add_coin( 2*COIN);
add_coin( 3*COIN);
add_coin( 4*COIN); // now we have 5+6+7+8+18+20+30+100+200+300+400 = 1094 cents
- BOOST_CHECK( wallet.SelectCoinsMinConf(95 * CENT, 1, 1, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK( wallet.SelectCoinsMinConf(95 * CENT, 1, 1, 0, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 1 * COIN); // we should get 1 BTC in 1 coin
BOOST_CHECK_EQUAL(setCoinsRet.size(), 1U);
- BOOST_CHECK( wallet.SelectCoinsMinConf(195 * CENT, 1, 1, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK( wallet.SelectCoinsMinConf(195 * CENT, 1, 1, 0, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 2 * COIN); // we should get 2 BTC in 1 coin
BOOST_CHECK_EQUAL(setCoinsRet.size(), 1U);
@@ -196,14 +196,14 @@ BOOST_AUTO_TEST_CASE(coin_selection_tests)
// try making 1 * MIN_CHANGE from the 1.5 * MIN_CHANGE
// we'll get change smaller than MIN_CHANGE whatever happens, so can expect MIN_CHANGE exactly
- BOOST_CHECK( wallet.SelectCoinsMinConf(MIN_CHANGE, 1, 1, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK( wallet.SelectCoinsMinConf(MIN_CHANGE, 1, 1, 0, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, MIN_CHANGE);
// but if we add a bigger coin, small change is avoided
add_coin(1111*MIN_CHANGE);
// try making 1 from 0.1 + 0.2 + 0.3 + 0.4 + 0.5 + 1111 = 1112.5
- BOOST_CHECK( wallet.SelectCoinsMinConf(1 * MIN_CHANGE, 1, 1, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK( wallet.SelectCoinsMinConf(1 * MIN_CHANGE, 1, 1, 0, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 1 * MIN_CHANGE); // we should get the exact amount
// if we add more small coins:
@@ -211,7 +211,7 @@ BOOST_AUTO_TEST_CASE(coin_selection_tests)
add_coin(MIN_CHANGE * 7 / 10);
// and try again to make 1.0 * MIN_CHANGE
- BOOST_CHECK( wallet.SelectCoinsMinConf(1 * MIN_CHANGE, 1, 1, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK( wallet.SelectCoinsMinConf(1 * MIN_CHANGE, 1, 1, 0, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 1 * MIN_CHANGE); // we should get the exact amount
// run the 'mtgox' test (see http://blockexplorer.com/tx/29a3efd3ef04f9153d47a990bd7b048a4b2d213daaa5fb8ed670fb85f13bdbcf)
@@ -220,7 +220,7 @@ BOOST_AUTO_TEST_CASE(coin_selection_tests)
for (int j = 0; j < 20; j++)
add_coin(50000 * COIN);
- BOOST_CHECK( wallet.SelectCoinsMinConf(500000 * COIN, 1, 1, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK( wallet.SelectCoinsMinConf(500000 * COIN, 1, 1, 0, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 500000 * COIN); // we should get the exact amount
BOOST_CHECK_EQUAL(setCoinsRet.size(), 10U); // in ten coins
@@ -233,7 +233,7 @@ BOOST_AUTO_TEST_CASE(coin_selection_tests)
add_coin(MIN_CHANGE * 6 / 10);
add_coin(MIN_CHANGE * 7 / 10);
add_coin(1111 * MIN_CHANGE);
- BOOST_CHECK( wallet.SelectCoinsMinConf(1 * MIN_CHANGE, 1, 1, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK( wallet.SelectCoinsMinConf(1 * MIN_CHANGE, 1, 1, 0, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 1111 * MIN_CHANGE); // we get the bigger coin
BOOST_CHECK_EQUAL(setCoinsRet.size(), 1U);
@@ -243,7 +243,7 @@ BOOST_AUTO_TEST_CASE(coin_selection_tests)
add_coin(MIN_CHANGE * 6 / 10);
add_coin(MIN_CHANGE * 8 / 10);
add_coin(1111 * MIN_CHANGE);
- BOOST_CHECK( wallet.SelectCoinsMinConf(MIN_CHANGE, 1, 1, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK( wallet.SelectCoinsMinConf(MIN_CHANGE, 1, 1, 0, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, MIN_CHANGE); // we should get the exact amount
BOOST_CHECK_EQUAL(setCoinsRet.size(), 2U); // in two coins 0.4+0.6
@@ -254,12 +254,12 @@ BOOST_AUTO_TEST_CASE(coin_selection_tests)
add_coin(MIN_CHANGE * 100);
// trying to make 100.01 from these three coins
- BOOST_CHECK(wallet.SelectCoinsMinConf(MIN_CHANGE * 10001 / 100, 1, 1, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK(wallet.SelectCoinsMinConf(MIN_CHANGE * 10001 / 100, 1, 1, 0, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, MIN_CHANGE * 10105 / 100); // we should get all coins
BOOST_CHECK_EQUAL(setCoinsRet.size(), 3U);
// but if we try to make 99.9, we should take the bigger of the two small coins to avoid small change
- BOOST_CHECK(wallet.SelectCoinsMinConf(MIN_CHANGE * 9990 / 100, 1, 1, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK(wallet.SelectCoinsMinConf(MIN_CHANGE * 9990 / 100, 1, 1, 0, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 101 * MIN_CHANGE);
BOOST_CHECK_EQUAL(setCoinsRet.size(), 2U);
@@ -269,7 +269,7 @@ BOOST_AUTO_TEST_CASE(coin_selection_tests)
// Create 676 inputs (= (old MAX_STANDARD_TX_SIZE == 100000) / 148 bytes per input)
for (uint16_t j = 0; j < 676; j++)
add_coin(amt);
- BOOST_CHECK(wallet.SelectCoinsMinConf(2000, 1, 1, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK(wallet.SelectCoinsMinConf(2000, 1, 1, 0, vCoins, setCoinsRet, nValueRet));
if (amt - 2000 < MIN_CHANGE) {
// needs more than one input:
uint16_t returnSize = std::ceil((2000.0 + MIN_CHANGE)/amt);
@@ -291,8 +291,8 @@ BOOST_AUTO_TEST_CASE(coin_selection_tests)
// picking 50 from 100 coins doesn't depend on the shuffle,
// but does depend on randomness in the stochastic approximation code
- BOOST_CHECK(wallet.SelectCoinsMinConf(50 * COIN, 1, 6, vCoins, setCoinsRet , nValueRet));
- BOOST_CHECK(wallet.SelectCoinsMinConf(50 * COIN, 1, 6, vCoins, setCoinsRet2, nValueRet));
+ BOOST_CHECK(wallet.SelectCoinsMinConf(50 * COIN, 1, 6, 0, vCoins, setCoinsRet , nValueRet));
+ BOOST_CHECK(wallet.SelectCoinsMinConf(50 * COIN, 1, 6, 0, vCoins, setCoinsRet2, nValueRet));
BOOST_CHECK(!equal_sets(setCoinsRet, setCoinsRet2));
int fails = 0;
@@ -300,8 +300,8 @@ BOOST_AUTO_TEST_CASE(coin_selection_tests)
{
// selecting 1 from 100 identical coins depends on the shuffle; this test will fail 1% of the time
// run the test RANDOM_REPEATS times and only complain if all of them fail
- BOOST_CHECK(wallet.SelectCoinsMinConf(COIN, 1, 6, vCoins, setCoinsRet , nValueRet));
- BOOST_CHECK(wallet.SelectCoinsMinConf(COIN, 1, 6, vCoins, setCoinsRet2, nValueRet));
+ BOOST_CHECK(wallet.SelectCoinsMinConf(COIN, 1, 6, 0, vCoins, setCoinsRet , nValueRet));
+ BOOST_CHECK(wallet.SelectCoinsMinConf(COIN, 1, 6, 0, vCoins, setCoinsRet2, nValueRet));
if (equal_sets(setCoinsRet, setCoinsRet2))
fails++;
}
@@ -321,8 +321,8 @@ BOOST_AUTO_TEST_CASE(coin_selection_tests)
{
// selecting 1 from 100 identical coins depends on the shuffle; this test will fail 1% of the time
// run the test RANDOM_REPEATS times and only complain if all of them fail
- BOOST_CHECK(wallet.SelectCoinsMinConf(90*CENT, 1, 6, vCoins, setCoinsRet , nValueRet));
- BOOST_CHECK(wallet.SelectCoinsMinConf(90*CENT, 1, 6, vCoins, setCoinsRet2, nValueRet));
+ BOOST_CHECK(wallet.SelectCoinsMinConf(90*CENT, 1, 6, 0, vCoins, setCoinsRet , nValueRet));
+ BOOST_CHECK(wallet.SelectCoinsMinConf(90*CENT, 1, 6, 0, vCoins, setCoinsRet2, nValueRet));
if (equal_sets(setCoinsRet, setCoinsRet2))
fails++;
}
@@ -346,7 +346,7 @@ BOOST_AUTO_TEST_CASE(ApproximateBestSubset)
add_coin(1000 * COIN);
add_coin(3 * COIN);
- BOOST_CHECK(wallet.SelectCoinsMinConf(1003 * COIN, 1, 6, vCoins, setCoinsRet, nValueRet));
+ BOOST_CHECK(wallet.SelectCoinsMinConf(1003 * COIN, 1, 6, 0, vCoins, setCoinsRet, nValueRet));
BOOST_CHECK_EQUAL(nValueRet, 1003 * COIN);
BOOST_CHECK_EQUAL(setCoinsRet.size(), 2U);
}
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index 2ef4802a10..1261aad3f2 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -1009,8 +1009,7 @@ bool CWallet::AbandonTransaction(const uint256& hashTx)
{
LOCK2(cs_main, cs_wallet);
- // Do not flush the wallet here for performance reasons
- CWalletDB walletdb(strWalletFile, "r+", false);
+ CWalletDB walletdb(strWalletFile, "r+");
std::set<uint256> todo;
std::set<uint256> done;
@@ -1542,9 +1541,11 @@ void CWallet::ReacceptWalletTransactions()
bool CWalletTx::RelayWalletTransaction(CConnman* connman)
{
assert(pwallet->GetBroadcastTransactions());
- if (!IsCoinBase())
+ if (!IsCoinBase() && !isAbandoned() && GetDepthInMainChain() == 0)
{
- if (GetDepthInMainChain() == 0 && !isAbandoned() && InMempool()) {
+ CValidationState state;
+ /* GetDepthInMainChain already catches known conflicts. */
+ if (InMempool() || AcceptToMemoryPool(maxTxFee, state)) {
LogPrintf("Relaying wtx %s\n", GetHash().ToString());
if (connman) {
CInv inv(MSG_TX, GetHash());
@@ -2017,7 +2018,7 @@ static void ApproximateBestSubset(vector<pair<CAmount, pair<const CWalletTx*,uns
}
}
-bool CWallet::SelectCoinsMinConf(const CAmount& nTargetValue, int nConfMine, int nConfTheirs, vector<COutput> vCoins,
+bool CWallet::SelectCoinsMinConf(const CAmount& nTargetValue, const int nConfMine, const int nConfTheirs, const uint64_t nMaxAncestors, vector<COutput> vCoins,
set<pair<const CWalletTx*,unsigned int> >& setCoinsRet, CAmount& nValueRet) const
{
setCoinsRet.clear();
@@ -2042,6 +2043,9 @@ bool CWallet::SelectCoinsMinConf(const CAmount& nTargetValue, int nConfMine, int
if (output.nDepth < (pcoin->IsFromMe(ISMINE_ALL) ? nConfMine : nConfTheirs))
continue;
+ if (!mempool.TransactionWithinChainLimit(pcoin->GetHash(), nMaxAncestors))
+ continue;
+
int i = output.i;
CAmount n = pcoin->tx->vout[i].nValue;
@@ -2167,10 +2171,17 @@ bool CWallet::SelectCoins(const vector<COutput>& vAvailableCoins, const CAmount&
++it;
}
+ size_t nMaxChainLength = std::min(GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT), GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT));
+ bool fRejectLongChains = GetBoolArg("-walletrejectlongchains", DEFAULT_WALLET_REJECT_LONG_CHAINS);
+
bool res = nTargetValue <= nValueFromPresetInputs ||
- SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, 1, 6, vCoins, setCoinsRet, nValueRet) ||
- SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, 1, 1, vCoins, setCoinsRet, nValueRet) ||
- (bSpendZeroConfChange && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, 0, 1, vCoins, setCoinsRet, nValueRet));
+ SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, 1, 6, 0, vCoins, setCoinsRet, nValueRet) ||
+ SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, 1, 1, 0, vCoins, setCoinsRet, nValueRet) ||
+ (bSpendZeroConfChange && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, 0, 1, 2, vCoins, setCoinsRet, nValueRet)) ||
+ (bSpendZeroConfChange && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, 0, 1, std::min((size_t)4, nMaxChainLength/3), vCoins, setCoinsRet, nValueRet)) ||
+ (bSpendZeroConfChange && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, 0, 1, nMaxChainLength/2, vCoins, setCoinsRet, nValueRet)) ||
+ (bSpendZeroConfChange && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, 0, 1, nMaxChainLength, vCoins, setCoinsRet, nValueRet)) ||
+ (bSpendZeroConfChange && !fRejectLongChains && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, 0, 1, std::numeric_limits<uint64_t>::max(), vCoins, setCoinsRet, nValueRet));
// because SelectCoinsMinConf clears the setCoinsRet, we now add the possible inputs to the coinset
setCoinsRet.insert(setPresetCoins.begin(), setPresetCoins.end());
@@ -2388,7 +2399,11 @@ bool CWallet::CreateTransaction(const vector<CRecipient>& vecSend, CWalletTx& wt
CPubKey vchPubKey;
bool ret;
ret = reservekey.GetReservedKey(vchPubKey);
- assert(ret); // should never fail, as we just unlocked
+ if (!ret)
+ {
+ strFailReason = _("Keypool ran out, please call keypoolrefill first");
+ return false;
+ }
scriptChange = GetScriptForDestination(vchPubKey.GetID());
}
@@ -2545,6 +2560,21 @@ bool CWallet::CreateTransaction(const vector<CRecipient>& vecSend, CWalletTx& wt
}
}
+ if (GetBoolArg("-walletrejectlongchains", DEFAULT_WALLET_REJECT_LONG_CHAINS)) {
+ // Lastly, ensure this tx will pass the mempool's chain limits
+ LockPoints lp;
+ CTxMemPoolEntry entry(txNew, 0, 0, 0, 0, false, 0, false, 0, lp);
+ CTxMemPool::setEntries setAncestors;
+ size_t nLimitAncestors = GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT);
+ size_t nLimitAncestorSize = GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000;
+ size_t nLimitDescendants = GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT);
+ size_t nLimitDescendantSize = GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000;
+ std::string errString;
+ if (!mempool.CalculateMemPoolAncestors(entry, setAncestors, nLimitAncestors, nLimitAncestorSize, nLimitDescendants, nLimitDescendantSize, errString)) {
+ strFailReason = _("Transaction has too long of a mempool chain");
+ return false;
+ }
+ }
return true;
}
@@ -2580,11 +2610,11 @@ bool CWallet::CommitTransaction(CWalletTx& wtxNew, CReserveKey& reservekey, CCon
{
// Broadcast
if (!wtxNew.AcceptToMemoryPool(maxTxFee, state)) {
- // This must not fail. The transaction has already been signed and recorded.
- LogPrintf("CommitTransaction(): Error: Transaction not valid, %s\n", state.GetRejectReason());
- return false;
+ LogPrintf("CommitTransaction(): Transaction cannot be broadcast immediately, %s\n", state.GetRejectReason());
+ // TODO: if we expect the failure to be long term or permanent, instead delete wtx from the wallet and return failure.
+ } else {
+ wtxNew.RelayWalletTransaction(connman);
}
- wtxNew.RelayWalletTransaction(connman);
}
}
return true;
@@ -3369,6 +3399,7 @@ std::string CWallet::GetWalletHelpString(bool showDebug)
strUsage += HelpMessageOpt("-dblogsize=<n>", strprintf("Flush wallet database activity from memory to disk log every <n> megabytes (default: %u)", DEFAULT_WALLET_DBLOGSIZE));
strUsage += HelpMessageOpt("-flushwallet", strprintf("Run a thread to flush wallet periodically (default: %u)", DEFAULT_FLUSHWALLET));
strUsage += HelpMessageOpt("-privdb", strprintf("Sets the DB_PRIVATE flag in the wallet db environment (default: %u)", DEFAULT_WALLET_PRIVDB));
+ strUsage += HelpMessageOpt("-walletrejectlongchains", strprintf(_("Wallet will not create transactions that violate mempool chain limits (default: %u"), DEFAULT_WALLET_REJECT_LONG_CHAINS));
}
return strUsage;
@@ -3642,6 +3673,9 @@ bool CWallet::ParameterInteraction()
fSendFreeTransactions = GetBoolArg("-sendfreetransactions", DEFAULT_SEND_FREE_TRANSACTIONS);
fWalletRbf = GetBoolArg("-walletrbf", DEFAULT_WALLET_RBF);
+ if (fSendFreeTransactions && GetArg("-limitfreerelay", DEFAULT_LIMITFREERELAY) <= 0)
+ return InitError("Creation of free transactions with their relay disabled is not supported.");
+
return true;
}
diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h
index f7103b6a80..72c1239cc4 100644
--- a/src/wallet/wallet.h
+++ b/src/wallet/wallet.h
@@ -53,6 +53,8 @@ static const CAmount MIN_CHANGE = CENT;
static const bool DEFAULT_SPEND_ZEROCONF_CHANGE = true;
//! Default for -sendfreetransactions
static const bool DEFAULT_SEND_FREE_TRANSACTIONS = false;
+//! Default for -walletrejectlongchains
+static const bool DEFAULT_WALLET_REJECT_LONG_CHAINS = false;
//! -txconfirmtarget default
static const unsigned int DEFAULT_TX_CONFIRM_TARGET = 6;
//! -walletrbf default
@@ -367,6 +369,7 @@ public:
{
fCreditCached = false;
fAvailableCreditCached = false;
+ fImmatureCreditCached = false;
fWatchDebitCached = false;
fWatchCreditCached = false;
fAvailableWatchCreditCached = false;
@@ -690,7 +693,7 @@ public:
* completion the coin set and corresponding actual target value is
* assembled
*/
- bool SelectCoinsMinConf(const CAmount& nTargetValue, int nConfMine, int nConfTheirs, std::vector<COutput> vCoins, std::set<std::pair<const CWalletTx*,unsigned int> >& setCoinsRet, CAmount& nValueRet) const;
+ bool SelectCoinsMinConf(const CAmount& nTargetValue, int nConfMine, int nConfTheirs, uint64_t nMaxAncestors, std::vector<COutput> vCoins, std::set<std::pair<const CWalletTx*,unsigned int> >& setCoinsRet, CAmount& nValueRet) const;
bool IsSpent(const uint256& hash, unsigned int n) const;
diff --git a/src/warnings.cpp b/src/warnings.cpp
new file mode 100644
index 0000000000..2c1b1b0e12
--- /dev/null
+++ b/src/warnings.cpp
@@ -0,0 +1,89 @@
+// Copyright (c) 2009-2010 Satoshi Nakamoto
+// Copyright (c) 2009-2016 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include "sync.h"
+#include "clientversion.h"
+#include "util.h"
+#include "warnings.h"
+
+CCriticalSection cs_warnings;
+std::string strMiscWarning;
+bool fLargeWorkForkFound = false;
+bool fLargeWorkInvalidChainFound = false;
+
+void SetMiscWarning(const std::string& strWarning)
+{
+ LOCK(cs_warnings);
+ strMiscWarning = strWarning;
+}
+
+void SetfLargeWorkForkFound(bool flag)
+{
+ LOCK(cs_warnings);
+ fLargeWorkForkFound = flag;
+}
+
+bool GetfLargeWorkForkFound()
+{
+ LOCK(cs_warnings);
+ return fLargeWorkForkFound;
+}
+
+void SetfLargeWorkInvalidChainFound(bool flag)
+{
+ LOCK(cs_warnings);
+ fLargeWorkInvalidChainFound = flag;
+}
+
+bool GetfLargeWorkInvalidChainFound()
+{
+ LOCK(cs_warnings);
+ return fLargeWorkInvalidChainFound;
+}
+
+std::string GetWarnings(const std::string& strFor)
+{
+ std::string strStatusBar;
+ std::string strRPC;
+ std::string strGUI;
+ const std::string uiAlertSeperator = "<hr />";
+
+ LOCK(cs_warnings);
+
+ if (!CLIENT_VERSION_IS_RELEASE) {
+ strStatusBar = "This is a pre-release test build - use at your own risk - do not use for mining or merchant applications";
+ strGUI = _("This is a pre-release test build - use at your own risk - do not use for mining or merchant applications");
+ }
+
+ if (GetBoolArg("-testsafemode", DEFAULT_TESTSAFEMODE))
+ strStatusBar = strRPC = strGUI = "testsafemode enabled";
+
+ // Misc warnings like out of disk space and clock is wrong
+ if (strMiscWarning != "")
+ {
+ strStatusBar = strMiscWarning;
+ strGUI += (strGUI.empty() ? "" : uiAlertSeperator) + strMiscWarning;
+ }
+
+ if (fLargeWorkForkFound)
+ {
+ strStatusBar = strRPC = "Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.";
+ strGUI += (strGUI.empty() ? "" : uiAlertSeperator) + _("Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.");
+ }
+ else if (fLargeWorkInvalidChainFound)
+ {
+ strStatusBar = strRPC = "Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.";
+ strGUI += (strGUI.empty() ? "" : uiAlertSeperator) + _("Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.");
+ }
+
+ if (strFor == "gui")
+ return strGUI;
+ else if (strFor == "statusbar")
+ return strStatusBar;
+ else if (strFor == "rpc")
+ return strRPC;
+ assert(!"GetWarnings(): invalid parameter");
+ return "error";
+}
diff --git a/src/warnings.h b/src/warnings.h
new file mode 100644
index 0000000000..a7aa657426
--- /dev/null
+++ b/src/warnings.h
@@ -0,0 +1,21 @@
+// Copyright (c) 2009-2010 Satoshi Nakamoto
+// Copyright (c) 2009-2016 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_WARNINGS_H
+#define BITCOIN_WARNINGS_H
+
+#include <stdlib.h>
+#include <string>
+
+void SetMiscWarning(const std::string& strWarning);
+void SetfLargeWorkForkFound(bool flag);
+bool GetfLargeWorkForkFound();
+void SetfLargeWorkInvalidChainFound(bool flag);
+bool GetfLargeWorkInvalidChainFound();
+std::string GetWarnings(const std::string& strFor);
+
+static const bool DEFAULT_TESTSAFEMODE = false;
+
+#endif // BITCOIN_WARNINGS_H
diff --git a/src/zmq/zmqpublishnotifier.cpp b/src/zmq/zmqpublishnotifier.cpp
index 99d42ab526..a11256dfd5 100644
--- a/src/zmq/zmqpublishnotifier.cpp
+++ b/src/zmq/zmqpublishnotifier.cpp
@@ -7,6 +7,7 @@
#include "zmqpublishnotifier.h"
#include "validation.h"
#include "util.h"
+#include "rpc/server.h"
static std::multimap<std::string, CZMQAbstractPublishNotifier*> mapPublishNotifiers;
@@ -166,7 +167,7 @@ bool CZMQPublishRawBlockNotifier::NotifyBlock(const CBlockIndex *pindex)
LogPrint("zmq", "zmq: Publish rawblock %s\n", pindex->GetBlockHash().GetHex());
const Consensus::Params& consensusParams = Params().GetConsensus();
- CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
+ CDataStream ss(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags());
{
LOCK(cs_main);
CBlock block;
@@ -186,7 +187,7 @@ bool CZMQPublishRawTransactionNotifier::NotifyTransaction(const CTransaction &tr
{
uint256 hash = transaction.GetHash();
LogPrint("zmq", "zmq: Publish rawtx %s\n", hash.GetHex());
- CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
+ CDataStream ss(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags());
ss << transaction;
return SendMessage(MSG_RAWTX, &(*ss.begin()), ss.size());
}