aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.appveyor.yml4
-rw-r--r--.github/workflows/ci.yml81
-rw-r--r--.gitignore8
-rw-r--r--.travis.yml33
-rw-r--r--Makefile.am1
-rw-r--r--build-aux/m4/ax_boost_chrono.m4118
-rw-r--r--build_msvc/.gitignore14
-rw-r--r--build_msvc/README.md14
-rw-r--r--build_msvc/bitcoin-qt/bitcoin-qt.vcxproj4
-rw-r--r--build_msvc/bitcoin_config.h85
-rw-r--r--build_msvc/common.init.vcxproj3
-rw-r--r--build_msvc/libleveldb/libleveldb.vcxproj14
-rw-r--r--build_msvc/msvc-autogen.py3
-rw-r--r--build_msvc/test_bitcoin-qt/test_bitcoin-qt.vcxproj6
-rw-r--r--build_msvc/test_bitcoin/test_bitcoin.vcxproj2
-rw-r--r--build_msvc/testconsensus/testconsensus.cpp4
-rw-r--r--ci/README.md2
-rwxr-xr-xci/lint/06_script.sh1
-rwxr-xr-xci/test/00_setup_env.sh5
-rw-r--r--ci/test/00_setup_env_arm.sh15
-rw-r--r--ci/test/00_setup_env_i686.sh13
-rw-r--r--ci/test/00_setup_env_i686_centos.sh15
-rw-r--r--ci/test/00_setup_env_mac.sh3
-rw-r--r--ci/test/00_setup_env_native_asan.sh3
-rw-r--r--ci/test/00_setup_env_native_centos.sh14
-rw-r--r--ci/test/00_setup_env_native_fuzz.sh3
-rw-r--r--ci/test/00_setup_env_native_fuzz_with_valgrind.sh18
-rw-r--r--ci/test/00_setup_env_native_nowallet.sh1
-rw-r--r--ci/test/00_setup_env_native_qt5.sh2
-rw-r--r--ci/test/00_setup_env_native_tsan.sh3
-rw-r--r--ci/test/00_setup_env_native_valgrind.sh5
-rw-r--r--ci/test/00_setup_env_s390x.sh12
-rw-r--r--ci/test/00_setup_env_win64.sh1
-rwxr-xr-xci/test/04_install.sh30
-rwxr-xr-xci/test/05_before_script.sh15
-rwxr-xr-xci/test/06_script_b.sh13
-rwxr-xr-xci/test/wrap-qemu.sh18
-rw-r--r--configure.ac249
-rw-r--r--contrib/devtools/README.md14
-rwxr-xr-xcontrib/devtools/circular-dependencies.py3
-rwxr-xr-xcontrib/devtools/copyright_header.py22
-rwxr-xr-xcontrib/devtools/gen-manpages.sh3
-rwxr-xr-xcontrib/devtools/previous_release.sh149
-rwxr-xr-xcontrib/devtools/security-check.py2
-rwxr-xr-xcontrib/devtools/symbol-check.py136
-rwxr-xr-xcontrib/devtools/test_deterministic_coverage.sh4
-rwxr-xr-xcontrib/filter-lcov.py3
-rwxr-xr-xcontrib/gitian-build.py5
-rw-r--r--contrib/gitian-descriptors/gitian-linux.yml4
-rw-r--r--contrib/gitian-descriptors/gitian-osx-signer.yml2
-rw-r--r--contrib/gitian-descriptors/gitian-osx.yml7
-rw-r--r--contrib/gitian-descriptors/gitian-win-signer.yml2
-rw-r--r--contrib/gitian-descriptors/gitian-win.yml6
-rw-r--r--contrib/guix/README.md19
-rwxr-xr-xcontrib/guix/guix-build.sh34
-rwxr-xr-xcontrib/install_db4.sh3
-rwxr-xr-xcontrib/linearize/linearize-data.py29
-rw-r--r--contrib/macdeploy/README.md136
-rwxr-xr-xcontrib/macdeploy/extract-osx-sdk.sh2
-rwxr-xr-xcontrib/seeds/makeseeds.py10
-rw-r--r--contrib/seeds/suspicious_hosts.txt16
-rw-r--r--contrib/valgrind.supp13
-rwxr-xr-xcontrib/verify-commits/pre-push-hook.sh2
-rw-r--r--depends/README.md2
-rw-r--r--depends/config.site.in2
-rw-r--r--depends/hosts/darwin.mk7
-rw-r--r--depends/packages/boost.mk6
-rw-r--r--depends/packages/native_cctools.mk47
-rw-r--r--depends/packages/native_libdmg-hfsplus.mk2
-rw-r--r--depends/packages/qt.mk1
-rw-r--r--depends/patches/qt/mac-qmake.conf2
-rw-r--r--doc/Doxyfile.in3
-rw-r--r--doc/build-osx.md94
-rw-r--r--doc/build-unix.md2
-rw-r--r--doc/descriptors.md8
-rw-r--r--doc/developer-notes.md84
-rw-r--r--doc/fuzzing.md75
-rw-r--r--doc/release-notes-15437.md53
-rw-r--r--doc/release-notes-15954.md4
-rw-r--r--doc/release-notes-17056.md4
-rw-r--r--doc/release-notes-17410.md5
-rw-r--r--doc/release-notes-17437.md5
-rw-r--r--doc/release-notes.md94
-rw-r--r--doc/release-notes/release-notes-0.19.1.md115
-rw-r--r--doc/release-process.md5
-rw-r--r--share/qt/Info.plist.in2
-rw-r--r--share/setup.nsi.in4
-rw-r--r--src/Makefile.am18
-rw-r--r--src/Makefile.crc32c.include75
-rw-r--r--src/Makefile.leveldb.include44
-rw-r--r--src/Makefile.qt_locale.include1
-rw-r--r--src/Makefile.test.include765
-rw-r--r--src/addrman.cpp58
-rw-r--r--src/addrman.h104
-rw-r--r--src/banman.h4
-rw-r--r--src/bench/bench.cpp2
-rw-r--r--src/bench/bench.h2
-rw-r--r--src/bench/ccoins_caching.cpp42
-rw-r--r--src/bench/coin_selection.cpp6
-rw-r--r--src/bench/examples.cpp2
-rw-r--r--src/bench/wallet_balance.cpp1
-rw-r--r--src/bitcoin-cli.cpp2
-rw-r--r--src/bitcoin-wallet.cpp2
-rw-r--r--src/bitcoind.cpp2
-rw-r--r--src/blockencodings.h140
-rw-r--r--src/chain.h58
-rw-r--r--src/chainparams.cpp4
-rw-r--r--src/chainparams.h3
-rw-r--r--src/checkqueue.h3
-rw-r--r--src/coins.h4
-rw-r--r--src/compressor.h63
-rw-r--r--src/consensus/validation.h61
-rw-r--r--src/crc32c/.appveyor.yml37
-rw-r--r--src/crc32c/.clang-format3
-rw-r--r--src/crc32c/.clang_complete8
-rw-r--r--src/crc32c/.gitignore8
-rw-r--r--src/crc32c/.gitmodules0
-rw-r--r--src/crc32c/.travis.yml76
-rw-r--r--src/crc32c/.ycm_extra_conf.py142
-rw-r--r--src/crc32c/AUTHORS9
-rw-r--r--src/crc32c/CMakeLists.txt423
-rw-r--r--src/crc32c/CONTRIBUTING.md23
-rw-r--r--src/crc32c/Crc32cConfig.cmake5
-rw-r--r--src/crc32c/LICENSE28
-rw-r--r--src/crc32c/README.md125
-rw-r--r--src/crc32c/include/crc32c/crc32c.h89
-rw-r--r--src/crc32c/src/crc32c.cc39
-rw-r--r--src/crc32c/src/crc32c_arm64.cc126
-rw-r--r--src/crc32c/src/crc32c_arm64.h27
-rw-r--r--src/crc32c/src/crc32c_arm64_linux_check.h50
-rw-r--r--src/crc32c/src/crc32c_arm64_unittest.cc24
-rw-r--r--src/crc32c/src/crc32c_benchmark.cc106
-rw-r--r--src/crc32c/src/crc32c_capi_unittest.c66
-rw-r--r--src/crc32c/src/crc32c_config.h.in36
-rw-r--r--src/crc32c/src/crc32c_extend_unittests.h112
-rw-r--r--src/crc32c/src/crc32c_internal.h23
-rw-r--r--src/crc32c/src/crc32c_portable.cc351
-rw-r--r--src/crc32c/src/crc32c_portable_unittest.cc20
-rw-r--r--src/crc32c/src/crc32c_prefetch.h46
-rw-r--r--src/crc32c/src/crc32c_prefetch_unittest.cc9
-rw-r--r--src/crc32c/src/crc32c_read_le.h53
-rw-r--r--src/crc32c/src/crc32c_read_le_unittest.cc32
-rw-r--r--src/crc32c/src/crc32c_round_up.h34
-rw-r--r--src/crc32c/src/crc32c_round_up_unittest.cc84
-rw-r--r--src/crc32c/src/crc32c_sse42.cc258
-rw-r--r--src/crc32c/src/crc32c_sse42.h33
-rw-r--r--src/crc32c/src/crc32c_sse42_check.h50
-rw-r--r--src/crc32c/src/crc32c_sse42_unittest.cc24
-rw-r--r--src/crc32c/src/crc32c_test_main.cc22
-rw-r--r--src/crc32c/src/crc32c_unittest.cc129
-rw-r--r--src/crypto/sha256_avx2.cpp4
-rw-r--r--src/crypto/sha256_sse41.cpp4
-rw-r--r--src/dummywallet.cpp12
-rw-r--r--src/flatfile.h2
-rw-r--r--src/fs.cpp4
-rw-r--r--src/httprpc.cpp2
-rw-r--r--src/httpserver.cpp12
-rw-r--r--src/index/base.cpp2
-rw-r--r--src/index/base.h2
-rw-r--r--src/indirectmap.h2
-rw-r--r--src/init.cpp102
-rw-r--r--src/interfaces/chain.cpp5
-rw-r--r--src/interfaces/chain.h7
-rw-r--r--src/interfaces/handler.cpp16
-rw-r--r--src/interfaces/handler.h6
-rw-r--r--src/interfaces/node.cpp7
-rw-r--r--src/interfaces/wallet.cpp18
-rw-r--r--src/interfaces/wallet.h13
-rw-r--r--src/leveldb/.appveyor.yml35
-rw-r--r--src/leveldb/.clang-format18
-rw-r--r--src/leveldb/.gitignore21
-rw-r--r--src/leveldb/.travis.yml81
-rw-r--r--src/leveldb/CMakeLists.txt465
-rw-r--r--src/leveldb/CONTRIBUTING.md4
-rw-r--r--src/leveldb/Makefile424
-rw-r--r--src/leveldb/README.md87
-rw-r--r--src/leveldb/WINDOWS.md39
-rw-r--r--src/leveldb/benchmarks/db_bench.cc (renamed from src/leveldb/db/db_bench.cc)203
-rw-r--r--src/leveldb/benchmarks/db_bench_sqlite3.cc (renamed from src/leveldb/doc/bench/db_bench_sqlite3.cc)184
-rw-r--r--src/leveldb/benchmarks/db_bench_tree_db.cc (renamed from src/leveldb/doc/bench/db_bench_tree_db.cc)130
-rwxr-xr-xsrc/leveldb/build_detect_platform259
-rw-r--r--src/leveldb/cmake/leveldbConfig.cmake1
-rw-r--r--src/leveldb/db/autocompact_test.cc36
-rw-r--r--src/leveldb/db/builder.cc25
-rw-r--r--src/leveldb/db/builder.h8
-rw-r--r--src/leveldb/db/c.cc391
-rw-r--r--src/leveldb/db/c_test.c36
-rw-r--r--src/leveldb/db/corruption_test.cc100
-rw-r--r--src/leveldb/db/db_impl.cc547
-rw-r--r--src/leveldb/db/db_impl.h146
-rw-r--r--src/leveldb/db/db_iter.cc93
-rw-r--r--src/leveldb/db/db_iter.h12
-rw-r--r--src/leveldb/db/db_test.cc722
-rw-r--r--src/leveldb/db/dbformat.cc43
-rw-r--r--src/leveldb/db/dbformat.h80
-rw-r--r--src/leveldb/db/dbformat_test.cc95
-rw-r--r--src/leveldb/db/dumpfile.cc29
-rw-r--r--src/leveldb/db/fault_injection_test.cc132
-rw-r--r--src/leveldb/db/filename.cc37
-rw-r--r--src/leveldb/db/filename.h31
-rw-r--r--src/leveldb/db/filename_test.cc86
-rw-r--r--src/leveldb/db/leveldbutil.cc21
-rw-r--r--src/leveldb/db/log_reader.cc28
-rw-r--r--src/leveldb/db/log_reader.h47
-rw-r--r--src/leveldb/db/log_test.cc291
-rw-r--r--src/leveldb/db/log_writer.cc29
-rw-r--r--src/leveldb/db/log_writer.h14
-rw-r--r--src/leveldb/db/memtable.cc68
-rw-r--r--src/leveldb/db/memtable.h23
-rw-r--r--src/leveldb/db/recovery_test.cc72
-rw-r--r--src/leveldb/db/repair.cc95
-rw-r--r--src/leveldb/db/skiplist.h182
-rw-r--r--src/leveldb/db/skiplist_test.cc65
-rw-r--r--src/leveldb/db/snapshot.h78
-rw-r--r--src/leveldb/db/table_cache.cc47
-rw-r--r--src/leveldb/db/table_cache.h33
-rw-r--r--src/leveldb/db/version_edit.cc55
-rw-r--r--src/leveldb/db/version_edit.h27
-rw-r--r--src/leveldb/db/version_edit_test.cc6
-rw-r--r--src/leveldb/db/version_set.cc517
-rw-r--r--src/leveldb/db/version_set.h123
-rw-r--r--src/leveldb/db/version_set_test.cc243
-rw-r--r--src/leveldb/db/write_batch.cc23
-rw-r--r--src/leveldb/db/write_batch_internal.h9
-rw-r--r--src/leveldb/db/write_batch_test.cc73
-rw-r--r--src/leveldb/doc/benchmark.html6
-rw-r--r--src/leveldb/doc/impl.md14
-rw-r--r--src/leveldb/doc/index.md10
-rw-r--r--src/leveldb/helpers/memenv/memenv.cc203
-rw-r--r--src/leveldb/helpers/memenv/memenv.h4
-rw-r--r--src/leveldb/helpers/memenv/memenv_test.cc60
-rw-r--r--src/leveldb/include/leveldb/c.h320
-rw-r--r--src/leveldb/include/leveldb/cache.h21
-rw-r--r--src/leveldb/include/leveldb/comparator.h11
-rw-r--r--src/leveldb/include/leveldb/db.h58
-rw-r--r--src/leveldb/include/leveldb/dumpfile.h5
-rw-r--r--src/leveldb/include/leveldb/env.h220
-rw-r--r--src/leveldb/include/leveldb/export.h33
-rw-r--r--src/leveldb/include/leveldb/filter_policy.h12
-rw-r--r--src/leveldb/include/leveldb/iterator.h34
-rw-r--r--src/leveldb/include/leveldb/options.h100
-rw-r--r--src/leveldb/include/leveldb/slice.h38
-rw-r--r--src/leveldb/include/leveldb/status.h52
-rw-r--r--src/leveldb/include/leveldb/table.h33
-rw-r--r--src/leveldb/include/leveldb/table_builder.h11
-rw-r--r--src/leveldb/include/leveldb/write_batch.h37
-rw-r--r--src/leveldb/issues/issue178_test.cc12
-rw-r--r--src/leveldb/issues/issue200_test.cc10
-rw-r--r--src/leveldb/issues/issue320_test.cc128
-rw-r--r--src/leveldb/port/README.md (renamed from src/leveldb/port/README)2
-rw-r--r--src/leveldb/port/atomic_pointer.h245
-rw-r--r--src/leveldb/port/port.h8
-rw-r--r--src/leveldb/port/port_config.h.in39
-rw-r--r--src/leveldb/port/port_example.h67
-rw-r--r--src/leveldb/port/port_posix.cc67
-rw-r--r--src/leveldb/port/port_posix.h161
-rw-r--r--src/leveldb/port/port_posix_sse.cc110
-rw-r--r--src/leveldb/port/port_stdcxx.h153
-rw-r--r--src/leveldb/port/port_win.cc158
-rw-r--r--src/leveldb/port/port_win.h184
-rw-r--r--src/leveldb/port/thread_annotations.h78
-rw-r--r--src/leveldb/port/win/stdint.h24
-rw-r--r--src/leveldb/table/block.cc69
-rw-r--r--src/leveldb/table/block.h16
-rw-r--r--src/leveldb/table/block_builder.cc23
-rw-r--r--src/leveldb/table/block_builder.h26
-rw-r--r--src/leveldb/table/filter_block.cc19
-rw-r--r--src/leveldb/table/filter_block.h21
-rw-r--r--src/leveldb/table/filter_block_test.cc38
-rw-r--r--src/leveldb/table/format.cc11
-rw-r--r--src/leveldb/table/format.h42
-rw-r--r--src/leveldb/table/iterator.cc75
-rw-r--r--src/leveldb/table/iterator_wrapper.h58
-rw-r--r--src/leveldb/table/merger.cc52
-rw-r--r--src/leveldb/table/merger.h4
-rw-r--r--src/leveldb/table/table.cc68
-rw-r--r--src/leveldb/table/table_builder.cc61
-rw-r--r--src/leveldb/table/table_test.cc315
-rw-r--r--src/leveldb/table/two_level_iterator.cc91
-rw-r--r--src/leveldb/table/two_level_iterator.h11
-rw-r--r--src/leveldb/util/arena.cc18
-rw-r--r--src/leveldb/util/arena.h25
-rw-r--r--src/leveldb/util/arena_test.cc19
-rw-r--r--src/leveldb/util/bloom.cc27
-rw-r--r--src/leveldb/util/bloom_test.cc56
-rw-r--r--src/leveldb/util/cache.cc123
-rw-r--r--src/leveldb/util/cache_test.cc60
-rw-r--r--src/leveldb/util/coding.cc84
-rw-r--r--src/leveldb/util/coding.h136
-rw-r--r--src/leveldb/util/coding_test.cc42
-rw-r--r--src/leveldb/util/comparator.cc40
-rw-r--r--src/leveldb/util/crc32c.cc634
-rw-r--r--src/leveldb/util/crc32c.h6
-rw-r--r--src/leveldb/util/crc32c_test.cc31
-rw-r--r--src/leveldb/util/env.cc26
-rw-r--r--src/leveldb/util/env_posix.cc1122
-rw-r--r--src/leveldb/util/env_posix_test.cc300
-rw-r--r--src/leveldb/util/env_test.cc235
-rw-r--r--src/leveldb/util/env_win.cc902
-rw-r--r--src/leveldb/util/env_windows.cc849
-rw-r--r--src/leveldb/util/env_windows_test.cc64
-rw-r--r--src/leveldb/util/env_windows_test_helper.h25
-rw-r--r--src/leveldb/util/filter_policy.cc2
-rw-r--r--src/leveldb/util/hash.cc15
-rw-r--r--src/leveldb/util/hash.h4
-rw-r--r--src/leveldb/util/hash_test.cc32
-rw-r--r--src/leveldb/util/histogram.cc207
-rw-r--r--src/leveldb/util/histogram.h20
-rw-r--r--src/leveldb/util/logging.cc52
-rw-r--r--src/leveldb/util/logging.h14
-rw-r--r--src/leveldb/util/logging_test.cc143
-rw-r--r--src/leveldb/util/mutexlock.h12
-rw-r--r--src/leveldb/util/no_destructor.h46
-rw-r--r--src/leveldb/util/no_destructor_test.cc47
-rw-r--r--src/leveldb/util/options.cc18
-rw-r--r--src/leveldb/util/posix_logger.h168
-rw-r--r--src/leveldb/util/random.h9
-rw-r--r--src/leveldb/util/status.cc14
-rw-r--r--src/leveldb/util/status_test.cc40
-rw-r--r--src/leveldb/util/testharness.cc18
-rw-r--r--src/leveldb/util/testharness.h85
-rw-r--r--src/leveldb/util/testutil.cc12
-rw-r--r--src/leveldb/util/testutil.h29
-rw-r--r--src/leveldb/util/windows_logger.h124
-rw-r--r--src/logging.cpp3
-rw-r--r--src/logging.h3
-rw-r--r--src/logging/timer.h2
-rw-r--r--src/memusage.h2
-rw-r--r--src/miner.cpp3
-rw-r--r--src/net.cpp54
-rw-r--r--src/net.h67
-rw-r--r--src/net_permissions.cpp4
-rw-r--r--src/net_permissions.h4
-rw-r--r--src/net_processing.cpp140
-rw-r--r--src/net_processing.h5
-rw-r--r--src/netaddress.cpp151
-rw-r--r--src/netaddress.h17
-rw-r--r--src/netbase.cpp83
-rw-r--r--src/netbase.h14
-rw-r--r--src/node/coinstats.cpp2
-rw-r--r--src/node/context.cpp1
-rw-r--r--src/node/context.h2
-rw-r--r--src/node/psbt.cpp47
-rw-r--r--src/node/transaction.cpp3
-rw-r--r--src/node/transaction.h4
-rw-r--r--src/outputtype.cpp2
-rw-r--r--src/outputtype.h4
-rw-r--r--src/policy/fees.h4
-rw-r--r--src/prevector.h23
-rw-r--r--src/psbt.cpp8
-rw-r--r--src/psbt.h6
-rw-r--r--src/qt/bantablemodel.cpp10
-rw-r--r--src/qt/bantablemodel.h4
-rw-r--r--src/qt/bitcoin.cpp12
-rw-r--r--src/qt/bitcoin.h6
-rw-r--r--src/qt/bitcoin_locale.qrc1
-rw-r--r--src/qt/bitcoingui.cpp14
-rw-r--r--src/qt/bitcoinunits.h2
-rw-r--r--src/qt/clientmodel.cpp5
-rw-r--r--src/qt/coincontroltreewidget.h2
-rw-r--r--src/qt/csvmodelwriter.h2
-rw-r--r--src/qt/forms/debugwindow.ui2
-rw-r--r--src/qt/forms/modaloverlay.ui3
-rw-r--r--src/qt/guiconstants.h5
-rw-r--r--src/qt/guiutil.cpp29
-rw-r--r--src/qt/guiutil.h11
-rw-r--r--src/qt/intro.cpp104
-rw-r--r--src/qt/intro.h12
-rw-r--r--src/qt/macnotificationhandler.mm2
-rw-r--r--src/qt/modaloverlay.cpp2
-rw-r--r--src/qt/optionsdialog.cpp2
-rw-r--r--src/qt/optionsmodel.cpp23
-rw-r--r--src/qt/optionsmodel.h16
-rw-r--r--src/qt/peertablemodel.cpp8
-rw-r--r--src/qt/peertablemodel.h4
-rw-r--r--src/qt/qvaluecombobox.h2
-rw-r--r--src/qt/rpcconsole.cpp14
-rw-r--r--src/qt/sendcoinsdialog.cpp1
-rw-r--r--src/qt/signverifymessagedialog.cpp112
-rw-r--r--src/qt/splashscreen.cpp2
-rw-r--r--src/qt/test/addressbooktests.cpp5
-rw-r--r--src/qt/test/addressbooktests.h4
-rw-r--r--src/qt/test/apptests.h2
-rw-r--r--src/qt/test/test_main.cpp2
-rw-r--r--src/qt/test/util.cpp4
-rw-r--r--src/qt/test/util.h4
-rw-r--r--src/qt/test/wallettests.cpp9
-rw-r--r--src/qt/test/wallettests.h4
-rw-r--r--src/qt/trafficgraphwidget.h2
-rw-r--r--src/qt/transactiondescdialog.h2
-rw-r--r--src/qt/utilitydialog.cpp10
-rw-r--r--src/qt/utilitydialog.h8
-rw-r--r--src/qt/walletframe.cpp23
-rw-r--r--src/qt/walletframe.h2
-rw-r--r--src/qt/walletmodel.cpp26
-rw-r--r--src/qt/walletview.cpp29
-rw-r--r--src/qt/walletview.h6
-rw-r--r--src/randomenv.cpp19
-rw-r--r--src/reverselock.h34
-rw-r--r--src/rpc/blockchain.cpp627
-rw-r--r--src/rpc/client.cpp1
-rw-r--r--src/rpc/mining.cpp218
-rw-r--r--src/rpc/misc.cpp173
-rw-r--r--src/rpc/net.cpp278
-rw-r--r--src/rpc/rawtransaction.cpp577
-rw-r--r--src/rpc/rawtransaction_util.cpp56
-rw-r--r--src/rpc/rawtransaction_util.h4
-rw-r--r--src/rpc/server.cpp32
-rw-r--r--src/rpc/util.cpp115
-rw-r--r--src/rpc/util.h149
-rw-r--r--src/scheduler.cpp72
-rw-r--r--src/scheduler.h42
-rw-r--r--src/script/descriptor.cpp49
-rw-r--r--src/script/descriptor.h29
-rw-r--r--src/script/interpreter.cpp4
-rw-r--r--src/script/sign.cpp60
-rw-r--r--src/script/sign.h4
-rw-r--r--src/script/signingprovider.h50
-rw-r--r--src/script/standard.h7
-rw-r--r--src/serialize.h267
-rw-r--r--src/sync.cpp23
-rw-r--r--src/sync.h42
-rw-r--r--src/test/addrman_tests.cpp312
-rw-r--r--src/test/blockchain_tests.cpp4
-rw-r--r--src/test/blockfilter_index_tests.cpp2
-rw-r--r--src/test/bswap_tests.cpp20
-rw-r--r--src/test/checkqueue_tests.cpp2
-rw-r--r--src/test/cuckoocache_tests.cpp1
-rw-r--r--src/test/data/asmap.rawbin0 -> 59 bytes
-rw-r--r--src/test/denialofservice_tests.cpp14
-rw-r--r--src/test/descriptor_tests.cpp74
-rw-r--r--src/test/fuzz/FuzzedDataProvider.h84
-rw-r--r--src/test/fuzz/asmap.cpp28
-rw-r--r--src/test/fuzz/bloom_filter.cpp80
-rw-r--r--src/test/fuzz/decode_tx.cpp31
-rw-r--r--src/test/fuzz/deserialize.cpp2
-rw-r--r--src/test/fuzz/float.cpp42
-rw-r--r--src/test/fuzz/fuzz.cpp13
-rw-r--r--src/test/fuzz/hex.cpp16
-rw-r--r--src/test/fuzz/integer.cpp94
-rw-r--r--src/test/fuzz/key.cpp309
-rw-r--r--src/test/fuzz/key_io.cpp48
-rw-r--r--src/test/fuzz/locale.cpp96
-rw-r--r--src/test/fuzz/netaddress.cpp123
-rw-r--r--src/test/fuzz/p2p_transport_deserializer.cpp47
-rw-r--r--src/test/fuzz/process_message.cpp98
-rw-r--r--src/test/fuzz/rolling_bloom_filter.cpp50
-rw-r--r--src/test/fuzz/script.cpp33
-rw-r--r--src/test/fuzz/script_ops.cpp67
-rw-r--r--src/test/fuzz/scriptnum_ops.cpp137
-rw-r--r--src/test/fuzz/strprintf.cpp166
-rw-r--r--src/test/fuzz/transaction.cpp24
-rw-r--r--src/test/fuzz/util.h57
-rw-r--r--src/test/getarg_tests.cpp28
-rw-r--r--src/test/main.cpp15
-rw-r--r--src/test/net_tests.cpp2
-rw-r--r--src/test/netbase_tests.cpp55
-rw-r--r--src/test/reverselock_tests.cpp45
-rw-r--r--src/test/scheduler_tests.cpp78
-rw-r--r--src/test/serialize_tests.cpp26
-rw-r--r--src/test/sighash_tests.cpp5
-rw-r--r--src/test/sync_tests.cpp4
-rw-r--r--src/test/transaction_tests.cpp80
-rw-r--r--src/test/txindex_tests.cpp4
-rw-r--r--src/test/txvalidationcache_tests.cpp4
-rw-r--r--src/test/util/setup_common.cpp16
-rw-r--r--src/test/util/setup_common.h4
-rw-r--r--src/test/util/transaction_utils.cpp32
-rw-r--r--src/test/util/transaction_utils.h10
-rw-r--r--src/test/util/wallet.cpp3
-rw-r--r--src/test/util_tests.cpp150
-rw-r--r--src/test/validation_block_tests.cpp2
-rw-r--r--src/test/validation_flush_tests.cpp166
-rw-r--r--src/timedata.cpp4
-rw-r--r--src/torcontrol.cpp2
-rw-r--r--src/txdb.cpp4
-rw-r--r--src/txdb.h4
-rw-r--r--src/txmempool.cpp49
-rw-r--r--src/txmempool.h56
-rw-r--r--src/ui_interface.cpp5
-rw-r--r--src/ui_interface.h9
-rw-r--r--src/uint256.cpp5
-rw-r--r--src/uint256.h2
-rw-r--r--src/undo.h69
-rw-r--r--src/univalue/Makefile.am2
-rw-r--r--src/univalue/lib/univalue_read.cpp11
-rw-r--r--src/univalue/test/fail45.json1
-rw-r--r--src/univalue/test/pass4.json1
-rw-r--r--src/univalue/test/unitester.cpp2
-rw-r--r--src/util/asmap.cpp103
-rw-r--r--src/util/asmap.h10
-rw-r--r--src/util/bip32.cpp2
-rw-r--r--src/util/bip32.h2
-rw-r--r--src/util/message.cpp92
-rw-r--r--src/util/message.h76
-rw-r--r--src/util/moneystr.cpp10
-rw-r--r--src/util/moneystr.h2
-rw-r--r--src/util/settings.h12
-rw-r--r--src/util/system.cpp67
-rw-r--r--src/util/system.h22
-rw-r--r--src/util/time.cpp37
-rw-r--r--src/util/time.h4
-rw-r--r--src/util/validation.cpp19
-rw-r--r--src/util/validation.h18
-rw-r--r--src/validation.cpp96
-rw-r--r--src/validation.h28
-rw-r--r--src/validationinterface.cpp67
-rw-r--r--src/validationinterface.h4
-rw-r--r--src/wallet/db.cpp12
-rw-r--r--src/wallet/db.h4
-rw-r--r--src/wallet/feebumper.cpp13
-rw-r--r--src/wallet/init.cpp4
-rw-r--r--src/wallet/psbtwallet.cpp74
-rw-r--r--src/wallet/psbtwallet.h32
-rw-r--r--src/wallet/rpcdump.cpp49
-rw-r--r--src/wallet/rpcwallet.cpp936
-rw-r--r--src/wallet/rpcwallet.h2
-rw-r--r--src/wallet/scriptpubkeyman.cpp195
-rw-r--r--src/wallet/scriptpubkeyman.h146
-rw-r--r--src/wallet/test/coinselector_tests.cpp20
-rw-r--r--src/wallet/test/ismine_tests.cpp60
-rw-r--r--src/wallet/test/psbt_wallet_tests.cpp14
-rw-r--r--src/wallet/test/scriptpubkeyman_tests.cpp43
-rw-r--r--src/wallet/test/wallet_tests.cpp25
-rw-r--r--src/wallet/wallet.cpp528
-rw-r--r--src/wallet/wallet.h128
-rw-r--r--src/wallet/walletdb.cpp23
-rw-r--r--src/wallet/walletdb.h2
-rw-r--r--src/wallet/wallettool.cpp3
-rw-r--r--src/zmq/zmqpublishnotifier.cpp5
-rw-r--r--src/zmq/zmqrpc.cpp17
-rw-r--r--test/README.md2
-rw-r--r--test/functional/README.md5
-rwxr-xr-xtest/functional/combine_logs.py3
-rwxr-xr-xtest/functional/feature_abortnode.py4
-rwxr-xr-xtest/functional/feature_asmap.py106
-rwxr-xr-xtest/functional/feature_backwards_compatibility.py347
-rwxr-xr-xtest/functional/feature_block.py8
-rwxr-xr-xtest/functional/feature_cltv.py4
-rwxr-xr-xtest/functional/feature_config_args.py48
-rwxr-xr-xtest/functional/feature_csv_activation.py79
-rwxr-xr-xtest/functional/feature_dersig.py5
-rwxr-xr-xtest/functional/feature_fee_estimation.py8
-rwxr-xr-xtest/functional/feature_filelock.py2
-rwxr-xr-xtest/functional/feature_help.py2
-rwxr-xr-xtest/functional/feature_loadblock.py2
-rwxr-xr-xtest/functional/feature_logging.py2
-rwxr-xr-xtest/functional/feature_maxuploadtarget.py5
-rwxr-xr-xtest/functional/feature_notifications.py18
-rwxr-xr-xtest/functional/feature_nulldummy.py5
-rwxr-xr-xtest/functional/feature_pruning.py4
-rwxr-xr-xtest/functional/feature_segwit.py1
-rwxr-xr-xtest/functional/interface_rpc.py2
-rwxr-xr-xtest/functional/interface_zmq.py4
-rwxr-xr-xtest/functional/mempool_accept.py15
-rwxr-xr-xtest/functional/mempool_expiry.py100
-rwxr-xr-xtest/functional/mempool_packages.py32
-rwxr-xr-xtest/functional/mempool_persist.py4
-rwxr-xr-xtest/functional/mempool_reorg.py4
-rwxr-xr-xtest/functional/mining_basic.py2
-rwxr-xr-xtest/functional/p2p_invalid_block.py2
-rwxr-xr-xtest/functional/p2p_invalid_messages.py6
-rwxr-xr-xtest/functional/p2p_permissions.py68
-rwxr-xr-xtest/functional/p2p_segwit.py15
-rwxr-xr-xtest/functional/rpc_createmultisig.py14
-rwxr-xr-xtest/functional/rpc_dumptxoutset.py2
-rwxr-xr-xtest/functional/rpc_fundrawtransaction.py2
-rwxr-xr-xtest/functional/rpc_getaddressinfo_label_deprecation.py43
-rwxr-xr-xtest/functional/rpc_getaddressinfo_labels_purpose_deprecation.py48
-rwxr-xr-xtest/functional/rpc_getdescriptorinfo.py65
-rwxr-xr-xtest/functional/rpc_psbt.py37
-rwxr-xr-xtest/functional/rpc_scantxoutset.py2
-rwxr-xr-xtest/functional/rpc_setban.py2
-rw-r--r--test/functional/test_framework/address.py2
-rw-r--r--test/functional/test_framework/descriptors.py9
-rwxr-xr-xtest/functional/test_framework/messages.py6
-rwxr-xr-xtest/functional/test_framework/test_framework.py11
-rwxr-xr-xtest/functional/test_framework/test_node.py20
-rwxr-xr-xtest/functional/test_framework/wallet_util.py7
-rwxr-xr-xtest/functional/test_runner.py6
-rwxr-xr-xtest/functional/tool_wallet.py4
-rwxr-xr-xtest/functional/wallet_address_types.py2
-rwxr-xr-xtest/functional/wallet_avoidreuse.py60
-rwxr-xr-xtest/functional/wallet_backup.py36
-rwxr-xr-xtest/functional/wallet_basic.py7
-rwxr-xr-xtest/functional/wallet_bumpfee.py188
-rwxr-xr-xtest/functional/wallet_createwallet.py2
-rwxr-xr-xtest/functional/wallet_dump.py2
-rwxr-xr-xtest/functional/wallet_groups.py2
-rwxr-xr-xtest/functional/wallet_hd.py14
-rwxr-xr-xtest/functional/wallet_import_with_label.py45
-rwxr-xr-xtest/functional/wallet_importmulti.py7
-rwxr-xr-xtest/functional/wallet_keypool_topup.py2
-rwxr-xr-xtest/functional/wallet_labels.py15
-rwxr-xr-xtest/functional/wallet_listreceivedby.py7
-rwxr-xr-xtest/functional/wallet_listsinceblock.py10
-rwxr-xr-xtest/functional/wallet_multiwallet.py4
-rwxr-xr-xtest/functional/wallet_reorgsrestore.py2
-rwxr-xr-xtest/fuzz/test_runner.py88
-rw-r--r--test/lint/README.md1
-rwxr-xr-xtest/lint/extended-lint-all.sh2
-rwxr-xr-xtest/lint/extended-lint-cppcheck.sh6
-rwxr-xr-xtest/lint/lint-circular-dependencies.sh5
-rwxr-xr-xtest/lint/lint-format-strings.py5
-rwxr-xr-xtest/lint/lint-format-strings.sh2
-rwxr-xr-xtest/lint/lint-include-guards.sh2
-rwxr-xr-xtest/lint/lint-includes.sh3
-rwxr-xr-xtest/lint/lint-locale-dependence.sh4
-rwxr-xr-xtest/lint/lint-python-utf8-encoding.sh4
-rwxr-xr-xtest/lint/lint-shebang.sh4
-rwxr-xr-xtest/lint/lint-shell.sh2
-rw-r--r--test/lint/lint-spelling.ignore-words.txt2
-rwxr-xr-xtest/lint/lint-spelling.sh2
-rwxr-xr-xtest/lint/lint-submodule.sh20
-rwxr-xr-xtest/lint/lint-whitespace.sh4
-rw-r--r--test/sanitizer_suppressions/tsan8
-rw-r--r--test/sanitizer_suppressions/ubsan3
617 files changed, 22179 insertions, 12701 deletions
diff --git a/.appveyor.yml b/.appveyor.yml
index dacfba658b..777eebd2c3 100644
--- a/.appveyor.yml
+++ b/.appveyor.yml
@@ -13,6 +13,7 @@ environment:
QT_DOWNLOAD_HASH: '9a8c6eb20967873785057fdcd329a657c7f922b0af08c5fde105cc597dd37e21'
QT_LOCAL_PATH: 'C:\Qt5.9.8_x64_static_vs2019'
VCPKG_INSTALL_PATH: 'C:\tools\vcpkg\installed'
+ VCPKG_COMMIT_ID: 'ed0df8ecc4ed7e755ea03e18aaf285fd9b4b4a74'
cache:
- C:\tools\vcpkg\installed -> build_msvc\vcpkg-packages.txt
- C:\Users\appveyor\clcache -> .appveyor.yml, build_msvc\**, **\Makefile.am, **\*.vcxproj.in
@@ -25,7 +26,7 @@ install:
# 1. Check whether the vcpkg install directory exists (note that updating the vcpkg-packages.txt file
# will cause the appveyor cache rules to invalidate the directory)
# 2. If the directory is missing:
-# a. Update the vcpkg source (including port files) and build the vcpkg binary,
+# a. Checkout the vcpkg source (including port files) for the specific checkout and build the vcpkg binary,
# b. Install the missing packages.
- ps: |
$env:PACKAGES = Get-Content -Path build_msvc\vcpkg-packages.txt
@@ -34,6 +35,7 @@ install:
cd c:\tools\vcpkg
$env:GIT_REDIRECT_STDERR = '2>&1' # git is writing non-errors to STDERR when doing git pull. Send to STDOUT instead.
git pull origin master
+ git checkout $env:VCPKG_COMMIT_ID
.\bootstrap-vcpkg.bat
Add-Content "C:\tools\vcpkg\triplets\$env:PLATFORM-windows-static.cmake" "set(VCPKG_BUILD_TYPE release)"
.\vcpkg install --triplet $env:PLATFORM-windows-static $env:PACKAGES.split() > $null
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
deleted file mode 100644
index 1e15ec5f4b..0000000000
--- a/.github/workflows/ci.yml
+++ /dev/null
@@ -1,81 +0,0 @@
-name: bitcoin-core-ci
-
-on:
- push:
-jobs:
- build:
- runs-on: windows-latest
- env:
- PYTHONUTF8: 1
- QT_DOWNLOAD_URL: 'https://github.com/sipsorcery/qt_win_binary/releases/download/v1.6/Qt5.9.8_x64_static_vs2019.zip'
- QT_DOWNLOAD_HASH: '9a8c6eb20967873785057fdcd329a657c7f922b0af08c5fde105cc597dd37e21'
- QT_LOCAL_PATH: 'C:\Qt5.9.8_x64_static_vs2019'
- VCPKG_INSTALL_PATH: "$env:VCPKG_INSTALLATION_ROOT/installed"
- PLATFORM: x64
- steps:
- - uses: actions/checkout@v1
-
- - uses: actions/setup-python@v1
- with:
- python-version: '3.7' # Needed for PEP 540
-
- - name: Setup MSBuild.exe
- uses: warrenbuckley/Setup-MSBuild@v1
-
- - name: Check MSBuild.exe
- run: MSBuild.exe -version | Out-File -FilePath $env:GITHUB_WORKSPACE\MSBuild_version
-
- - uses: actions/cache@v1
- id: vcpkgcache
- with:
- path: C:/vcpkg/installed
- key: ${{ runner.os }}-vcpkg-${{ hashFiles('MSBuild_version') }}
-
- - name: Update vcpkg and install packages
- if: steps.vcpkgcache.outputs.cache-hit != 'true'
- run: |
- $env:PACKAGES = Get-Content -Path "$env:GITHUB_WORKSPACE/build_msvc/vcpkg-packages.txt"
- Write-Host "vcpkg list: $env:PACKAGES"
- cd $env:VCPKG_INSTALLATION_ROOT
- git pull origin master
- .\bootstrap-vcpkg.bat
- .\vcpkg install --triplet $env:PLATFORM-windows-static $env:PACKAGES.split() > $null
- - name: Install prebuilt Qt libraries
- run: |
- if(!(Test-Path -Path ($env:QT_LOCAL_PATH))) {
- Write-Host "Downloading Qt binaries.";
- Invoke-WebRequest -Uri $env:QT_DOWNLOAD_URL -Out qtdownload.zip;
- Write-Host "Qt binaries successfully downloaded, checking hash against $env:QT_DOWNLOAD_HASH...";
- if((Get-FileHash qtdownload.zip).Hash -eq $env:QT_DOWNLOAD_HASH) {
- Expand-Archive qtdownload.zip -DestinationPath $env:QT_LOCAL_PATH;
- Write-Host "Qt binary download matched the expected hash.";
- }
- else {
- Write-Host "ERROR: Qt binary download did not match the expected hash.";
- exit 1
- }
- }
- else {
- Write-Host "Qt binaries already present.";
- }
- - name: Generate project files
- run: python build_msvc\msvc-autogen.py
- - name: vcpkg integration
- run: C:/vcpkg/vcpkg.exe integrate install
- - name: Build
- run: msbuild build_msvc\bitcoin.sln /m /v:n /p:Configuration=Release
- - name: Run test_bitcoin
- shell: cmd
- run: src\test_bitcoin.exe -k stdout -e stdout 2> NUL
- - name: Run bench_bitcoin
- shell: cmd
- run: src\bench_bitcoin.exe -evals=1 -scaling=0 > NUL
- - name: bitcoin-util-test
- run: python test\util\bitcoin-util-test.py
- - name: rpcauth-test
- shell: cmd
- run: python test\util\rpcauth-test.py
- - name: test_runner
- shell: cmd
- run: |
- python test\functional\test_runner.py --ansi --ci --quiet --combinedlogslen=4000 --failfast --exclude feature_fee_estimation
diff --git a/.gitignore b/.gitignore
index 078657ae85..1c487f43a7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,8 +7,9 @@ src/bitcoind
src/bitcoin-cli
src/bitcoin-tx
src/bitcoin-wallet
+src/test/fuzz
+!src/test/fuzz/*.*
src/test/test_bitcoin
-src/test/test_bitcoin_fuzzy
src/qt/test/test_bitcoin-qt
# autoreconf
@@ -89,7 +90,7 @@ src/qt/bitcoin-qt.includes
*.qm
Makefile
!depends/Makefile
-bitcoin-qt
+src/qt/bitcoin-qt
Bitcoin-Qt.app
background.tiff*
@@ -107,6 +108,9 @@ qrc_*.cpp
.DS_Store
build
+# Previous releases
+releases
+
#lcov
*.gcno
*.gcda
diff --git a/.travis.yml b/.travis.yml
index 38ad79af29..9a111c03ad 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -35,6 +35,7 @@ cache:
- $TRAVIS_BUILD_DIR/depends/built
- $TRAVIS_BUILD_DIR/depends/sdk-sources
- $TRAVIS_BUILD_DIR/ci/scratch/.ccache
+ - $TRAVIS_BUILD_DIR/releases/$HOST
# macOS
- $HOME/Library/Caches/Homebrew
- /usr/local/Homebrew
@@ -82,18 +83,20 @@ jobs:
- set -o errexit; source ./ci/lint/06_script.sh
- stage: test
- name: 'ARM [GOAL: install] [bionic] [unit tests, functional tests]'
+ name: 'ARM [GOAL: install] [buster] [unit tests, functional tests]'
arch: arm64
env: >-
FILE_ENV="./ci/test/00_setup_env_arm.sh"
QEMU_USER_CMD="" # Can run the tests natively without qemu
- - stage: test
- name: 'S390x [GOAL: install] [bionic] [unit tests, functional tests]'
- arch: s390x
- env: >-
- FILE_ENV="./ci/test/00_setup_env_s390x.sh"
- QEMU_USER_CMD="" # Can run the tests natively without qemu
+# s390 build was disabled temporarily because of disk space issues on the Travis VM
+#
+# - stage: test
+# name: 'S390x [GOAL: install] [buster] [unit tests, functional tests]'
+# arch: s390x
+# env: >-
+# FILE_ENV="./ci/test/00_setup_env_s390x.sh"
+# QEMU_USER_CMD="" # Can run the tests natively without qemu
- stage: test
name: 'Win64 [GOAL: deploy] [unit tests, no gui, no functional tests]'
@@ -101,17 +104,12 @@ jobs:
FILE_ENV="./ci/test/00_setup_env_win64.sh"
- stage: test
- name: '32-bit + dash [GOAL: install] [gui]'
- env: >-
- FILE_ENV="./ci/test/00_setup_env_i686.sh"
-
- - stage: test
- name: 'x86_64 Linux [GOAL: install] [CentOS 7] [no depends, only system libs]'
+ name: '32-bit + dash [GOAL: install] [CentOS 7] [gui]'
env: >-
- FILE_ENV="./ci/test/00_setup_env_native_centos.sh"
+ FILE_ENV="./ci/test/00_setup_env_i686_centos.sh"
- stage: test
- name: 'x86_64 Linux [GOAL: install] [bionic] [uses qt5 dev package and some depends packages] [unsigned char]'
+ name: 'x86_64 Linux [GOAL: install] [bionic] [previous releases, uses qt5 dev package and some depends packages] [unsigned char]'
env: >-
FILE_ENV="./ci/test/00_setup_env_native_qt5.sh"
@@ -137,6 +135,11 @@ jobs:
FILE_ENV="./ci/test/00_setup_env_native_fuzz.sh"
- stage: test
+ name: 'x86_64 Linux [GOAL: install] [bionic] [no depends, only system libs, fuzzers under valgrind]'
+ env: >-
+ FILE_ENV="./ci/test/00_setup_env_native_fuzz_with_valgrind.sh"
+
+ - stage: test
name: 'x86_64 Linux [GOAL: install] [bionic] [no wallet]'
env: >-
FILE_ENV="./ci/test/00_setup_env_native_nowallet.sh"
diff --git a/Makefile.am b/Makefile.am
index 22b83e80dd..bd41c5ae27 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -194,6 +194,7 @@ LCOV_FILTER_PATTERN = \
-p "/usr/lib/" \
-p "/usr/lib64/" \
-p "src/leveldb/" \
+ -p "src/crc32c/" \
-p "src/bench/" \
-p "src/univalue" \
-p "src/crypto/ctaes" \
diff --git a/build-aux/m4/ax_boost_chrono.m4 b/build-aux/m4/ax_boost_chrono.m4
deleted file mode 100644
index 4cd3b86041..0000000000
--- a/build-aux/m4/ax_boost_chrono.m4
+++ /dev/null
@@ -1,118 +0,0 @@
-# ===========================================================================
-# https://www.gnu.org/software/autoconf-archive/ax_boost_chrono.html
-# ===========================================================================
-#
-# SYNOPSIS
-#
-# AX_BOOST_CHRONO
-#
-# DESCRIPTION
-#
-# Test for Chrono library from the Boost C++ libraries. The macro requires
-# a preceding call to AX_BOOST_BASE. Further documentation is available at
-# <http://randspringer.de/boost/index.html>.
-#
-# This macro calls:
-#
-# AC_SUBST(BOOST_CHRONO_LIB)
-#
-# And sets:
-#
-# HAVE_BOOST_CHRONO
-#
-# LICENSE
-#
-# Copyright (c) 2012 Xiyue Deng <manphiz@gmail.com>
-#
-# Copying and distribution of this file, with or without modification, are
-# permitted in any medium without royalty provided the copyright notice
-# and this notice are preserved. This file is offered as-is, without any
-# warranty.
-
-#serial 5
-
-AC_DEFUN([AX_BOOST_CHRONO],
-[
- AC_ARG_WITH([boost-chrono],
- AS_HELP_STRING([--with-boost-chrono@<:@=special-lib@:>@],
- [use the Chrono library from boost - it is possible to specify a certain library for the linker
- e.g. --with-boost-chrono=boost_chrono-gcc-mt ]),
- [
- if test "$withval" = "no"; then
- want_boost="no"
- elif test "$withval" = "yes"; then
- want_boost="yes"
- ax_boost_user_chrono_lib=""
- else
- want_boost="yes"
- ax_boost_user_chrono_lib="$withval"
- fi
- ],
- [want_boost="yes"]
- )
-
- if test "x$want_boost" = "xyes"; then
- AC_REQUIRE([AC_PROG_CC])
- AC_REQUIRE([AC_CANONICAL_BUILD])
- CPPFLAGS_SAVED="$CPPFLAGS"
- CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
- export CPPFLAGS
-
- LDFLAGS_SAVED="$LDFLAGS"
- LDFLAGS="$LDFLAGS $BOOST_LDFLAGS"
- export LDFLAGS
-
- AC_CACHE_CHECK(whether the Boost::Chrono library is available,
- ax_cv_boost_chrono,
- [AC_LANG_PUSH([C++])
- CXXFLAGS_SAVE=$CXXFLAGS
-
- AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[@%:@include <boost/chrono.hpp>]],
- [[boost::chrono::system_clock::time_point* time = new boost::chrono::system_clock::time_point; delete time;]])],
- ax_cv_boost_chrono=yes, ax_cv_boost_chrono=no)
- CXXFLAGS=$CXXFLAGS_SAVE
- AC_LANG_POP([C++])
- ])
- if test "x$ax_cv_boost_chrono" = "xyes"; then
- AC_SUBST(BOOST_CPPFLAGS)
-
- AC_DEFINE(HAVE_BOOST_CHRONO,,[define if the Boost::Chrono library is available])
- BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/@<:@^\/@:>@*//'`
-
- LDFLAGS_SAVE=$LDFLAGS
- if test "x$ax_boost_user_chrono_lib" = "x"; then
- for libextension in `ls $BOOSTLIBDIR/libboost_chrono*.so* $BOOSTLIBDIR/libboost_chrono*.dylib* $BOOSTLIBDIR/libboost_chrono*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^lib\(boost_chrono.*\)\.so.*$;\1;' -e 's;^lib\(boost_chrono.*\)\.dylib.*$;\1;' -e 's;^lib\(boost_chrono.*\)\.a.*$;\1;'` ; do
- ax_lib=${libextension}
- AC_CHECK_LIB($ax_lib, exit,
- [BOOST_CHRONO_LIB="-l$ax_lib"; AC_SUBST(BOOST_CHRONO_LIB) link_chrono="yes"; break],
- [link_chrono="no"])
- done
- if test "x$link_chrono" != "xyes"; then
- for libextension in `ls $BOOSTLIBDIR/boost_chrono*.dll* $BOOSTLIBDIR/boost_chrono*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^\(boost_chrono.*\)\.dll.*$;\1;' -e 's;^\(boost_chrono.*\)\.a.*$;\1;'` ; do
- ax_lib=${libextension}
- AC_CHECK_LIB($ax_lib, exit,
- [BOOST_CHRONO_LIB="-l$ax_lib"; AC_SUBST(BOOST_CHRONO_LIB) link_chrono="yes"; break],
- [link_chrono="no"])
- done
- fi
-
- else
- for ax_lib in $ax_boost_user_chrono_lib boost_chrono-$ax_boost_user_chrono_lib; do
- AC_CHECK_LIB($ax_lib, exit,
- [BOOST_CHRONO_LIB="-l$ax_lib"; AC_SUBST(BOOST_CHRONO_LIB) link_chrono="yes"; break],
- [link_chrono="no"])
- done
-
- fi
- if test "x$ax_lib" = "x"; then
- AC_MSG_ERROR(Could not find a version of the Boost::Chrono library!)
- fi
- if test "x$link_chrono" = "xno"; then
- AC_MSG_ERROR(Could not link against $ax_lib !)
- fi
- fi
-
- CPPFLAGS="$CPPFLAGS_SAVED"
- LDFLAGS="$LDFLAGS_SAVED"
- fi
-])
diff --git a/build_msvc/.gitignore b/build_msvc/.gitignore
index 4d4aef7e35..3e71c7b8d3 100644
--- a/build_msvc/.gitignore
+++ b/build_msvc/.gitignore
@@ -8,7 +8,19 @@ packages/*
*/Release
*/x64
*.vcxproj.user
-*.vcxproj
+
+# .vcxproj files that are auto-generated by the msvc-autogen.py script.
+libbitcoin_cli/libbitcoin_cli.vcxproj
+libbitcoin_common/libbitcoin_common.vcxproj
+libbitcoin_crypto/libbitcoin_crypto.vcxproj
+libbitcoin_server/libbitcoin_server.vcxproj
+libbitcoin_util/libbitcoin_util.vcxproj
+libbitcoin_wallet_tool/libbitcoin_wallet_tool.vcxproj
+libbitcoin_wallet/libbitcoin_wallet.vcxproj
+libbitcoin_zmq/libbitcoin_zmq.vcxproj
+bench_bitcoin/bench_bitcoin.vcxproj
+libtest_util/libtest_util.vcxproj
+
*/Win32
libbitcoin_qt/QtGeneratedFiles/*
test_bitcoin-qt/QtGeneratedFiles/*
diff --git a/build_msvc/README.md b/build_msvc/README.md
index 704470cac8..d4e710d55b 100644
--- a/build_msvc/README.md
+++ b/build_msvc/README.md
@@ -12,7 +12,8 @@ Quick Start
The minimal steps required to build Bitcoin Core with the msbuild toolchain are below. More detailed instructions are contained in the following sections.
```
-vcpkg install --triplet x64-windows-static boost-filesystem boost-multi-index boost-signals2 boost-test boost-thread libevent zeromq berkeleydb rapidcheck double-conversion
+vcpkg install --triplet x64-windows-static berkeleydb boost-filesystem boost-multi-index boost-signals2 boost-test boost-thread libevent[thread] rapidcheck zeromq double-conversion
+vcpkg integrate install
py -3 build_msvc\msvc-autogen.py
msbuild /m build_msvc\bitcoin.sln /p:Platform=x64 /p:Configuration=Release /t:build
```
@@ -41,7 +42,9 @@ Qt
---------------------
In order to build the Bitcoin Core a static build of Qt is required. The runtime library version (e.g. v141, v142) and platform type (x86 or x64) must also match.
-A prebuilt version of Qt can be downloaded from [here](https://github.com/sipsorcery/qt_win_binary/releases). Please be aware this download is NOT an officially sanctioned Bitcoin Core distribution and is provided for developer convenience. It should NOT be used for builds that will be used in a production environment or with real funds.
+Some prebuilt x64 versions of Qt can be downloaded from [here](https://github.com/sipsorcery/qt_win_binary/releases). Please be aware these downloads are NOT officially sanctioned by Bitcoin Core and are provided for developer convenience only. They should NOT be used for builds that will be used in a production environment or with real funds.
+
+To determine which Qt prebuilt version to download open the `.appveyor.yml` file and note the `QT_DOWNLOAD_URL`. When extracting the zip file the destination path must be set to `C:\`. This is due to the way that Qt includes, libraries and tools use internal paths.
To build Bitcoin Core without Qt unload or disable the `bitcoin-qt`, `libbitcoin_qt` and `test_bitcoin-qt` projects.
@@ -54,6 +57,7 @@ The instructions below use `vcpkg` to install the dependencies.
```
PS >.\vcpkg install --triplet x64-windows-static $(Get-Content -Path build_msvc\vcpkg-packages.txt).split()
+PS >.\vcpkg integrate install
```
- Use Python to generate `*.vcxproj` from Makefile
@@ -64,18 +68,20 @@ PS >py -3 msvc-autogen.py
- An optional step is to adjust the settings in the build_msvc directory and the common.init.vcxproj file. This project file contains settings that are common to all projects such as the runtime library version and target Windows SDK version. The Qt directories can also be set.
-- Build with Visual Studio 2017 or msbuild.
+- To build from the command line with the Visual Studio 2017 toolchain use:
```
msbuild /m bitcoin.sln /p:Platform=x64 /p:Configuration=Release /p:PlatformToolset=v141 /t:build
```
-- Build with Visual Studio 2019 or msbuild.
+- To build from the command line with the Visual Studio 2019 toolchain use:
```
msbuild /m bitcoin.sln /p:Platform=x64 /p:Configuration=Release /t:build
```
+- Alternatively open the `build_msvc\bitcoin.sln` file in Visual Studio.
+
AppVeyor
---------------------
The .appveyor.yml in the root directory is suitable to perform builds on [AppVeyor](https://www.appveyor.com/) Continuous Integration servers. The simplest way to perform an AppVeyor build is to fork Bitcoin Core and then configure a new AppVeyor Project pointing to the forked repository.
diff --git a/build_msvc/bitcoin-qt/bitcoin-qt.vcxproj b/build_msvc/bitcoin-qt/bitcoin-qt.vcxproj
index dcfc1e1b33..17cd31a52e 100644
--- a/build_msvc/bitcoin-qt/bitcoin-qt.vcxproj
+++ b/build_msvc/bitcoin-qt/bitcoin-qt.vcxproj
@@ -50,7 +50,7 @@
</ProjectReference>
</ItemGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <ItemDefinitionGroup Condition="'$(Configuration)'=='Release'">
<ClCompile>
<AdditionalIncludeDirectories>$(QtIncludes);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
@@ -64,7 +64,7 @@
</ResourceCompile>
</ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <ItemDefinitionGroup Condition="'$(Configuration)'=='Debug'">
<ClCompile>
<AdditionalIncludeDirectories>$(QtIncludes);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
diff --git a/build_msvc/bitcoin_config.h b/build_msvc/bitcoin_config.h
index e892765fde..ea304dc9fc 100644
--- a/build_msvc/bitcoin_config.h
+++ b/build_msvc/bitcoin_config.h
@@ -1,3 +1,7 @@
+// Copyright (c) 2018-2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
#ifndef BITCOIN_BITCOIN_CONFIG_H
#define BITCOIN_BITCOIN_CONFIG_H
@@ -43,15 +47,9 @@
/* define if the Boost library is available */
#define HAVE_BOOST /**/
-/* define if the Boost::Chrono library is available */
-#define HAVE_BOOST_CHRONO /**/
-
/* define if the Boost::Filesystem library is available */
#define HAVE_BOOST_FILESYSTEM /**/
-/* define if the Boost::PROGRAM_OPTIONS library is available */
-#define HAVE_BOOST_PROGRAM_OPTIONS /**/
-
/* define if the Boost::System library is available */
#define HAVE_BOOST_SYSTEM /**/
@@ -179,75 +177,6 @@
/* Define to 1 if you have the <inttypes.h> header file. */
#define HAVE_INTTYPES_H 1
-/* Define to 1 if you have the `advapi32' library (-ladvapi32). */
-#define HAVE_LIBADVAPI32 1
-
-/* Define to 1 if you have the `comctl32' library (-lcomctl32). */
-#define HAVE_LIBCOMCTL32 1
-
-/* Define to 1 if you have the `comdlg32' library (-lcomdlg32). */
-#define HAVE_LIBCOMDLG32 1
-
-/* Define to 1 if you have the `crypt32' library (-lcrypt32). */
-#define HAVE_LIBCRYPT32 1
-
-/* Define to 1 if you have the `gdi32' library (-lgdi32). */
-#define HAVE_LIBGDI32 1
-
-/* Define to 1 if you have the `imm32' library (-limm32). */
-#define HAVE_LIBIMM32 1
-
-/* Define to 1 if you have the `iphlpapi' library (-liphlpapi). */
-#define HAVE_LIBIPHLPAPI 1
-
-/* Define to 1 if you have the `kernel32' library (-lkernel32). */
-#define HAVE_LIBKERNEL32 1
-
-/* Define to 1 if you have the `mingwthrd' library (-lmingwthrd). */
-#define HAVE_LIBMINGWTHRD 1
-
-/* Define to 1 if you have the `mswsock' library (-lmswsock). */
-#define HAVE_LIBMSWSOCK 1
-
-/* Define to 1 if you have the `ole32' library (-lole32). */
-#define HAVE_LIBOLE32 1
-
-/* Define to 1 if you have the `oleaut32' library (-loleaut32). */
-#define HAVE_LIBOLEAUT32 1
-
-/* Define to 1 if you have the `rpcrt4' library (-lrpcrt4). */
-#define HAVE_LIBRPCRT4 1
-
-/* Define to 1 if you have the `rt' library (-lrt). */
-/* #undef HAVE_LIBRT */
-
-/* Define to 1 if you have the `shell32' library (-lshell32). */
-#define HAVE_LIBSHELL32 1
-
-/* Define to 1 if you have the `shlwapi' library (-lshlwapi). */
-#define HAVE_LIBSHLWAPI 1
-
-/* Define to 1 if you have the `ssp' library (-lssp). */
-#define HAVE_LIBSSP 1
-
-/* Define to 1 if you have the `user32' library (-luser32). */
-#define HAVE_LIBUSER32 1
-
-/* Define to 1 if you have the `uuid' library (-luuid). */
-#define HAVE_LIBUUID 1
-
-/* Define to 1 if you have the `winmm' library (-lwinmm). */
-#define HAVE_LIBWINMM 1
-
-/* Define to 1 if you have the `winspool' library (-lwinspool). */
-#define HAVE_LIBWINSPOOL 1
-
-/* Define to 1 if you have the `ws2_32' library (-lws2_32). */
-#define HAVE_LIBWS2_32 1
-
-/* Define to 1 if you have the `z ' library (-lz ). */
-#define HAVE_LIBZ_ 1
-
/* Define this symbol if you have malloc_info */
/* #undef HAVE_MALLOC_INFO */
@@ -326,12 +255,6 @@
/* Define if the visibility attribute is supported. */
#define HAVE_VISIBILITY_ATTRIBUTE 1
-/* Define this symbol if boost sleep works */
-/* #undef HAVE_WORKING_BOOST_SLEEP */
-
-/* Define this symbol if boost sleep_for works */
-#define HAVE_WORKING_BOOST_SLEEP_FOR 1
-
/* Define to the sub-directory where libtool stores uninstalled libraries. */
#define LT_OBJDIR ".libs/"
diff --git a/build_msvc/common.init.vcxproj b/build_msvc/common.init.vcxproj
index 583cd9f88c..c09997d39d 100644
--- a/build_msvc/common.init.vcxproj
+++ b/build_msvc/common.init.vcxproj
@@ -6,6 +6,7 @@
<VCProjectVersion>16.0</VCProjectVersion>
<VcpkgTriplet Condition="'$(Platform)'=='Win32'">x86-windows-static</VcpkgTriplet>
<VcpkgTriplet Condition="'$(Platform)'=='x64'">x64-windows-static</VcpkgTriplet>
+ <UseNativeEnvironment>true</UseNativeEnvironment>
</PropertyGroup>
<PropertyGroup Condition="'$(WindowsTargetPlatformVersion)'=='' and !Exists('$(WindowsSdkDir)\DesignTime\CommonConfiguration\Neutral\Windows.props')">
@@ -115,7 +116,7 @@
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
- <AdditionalDependencies>crypt32.lib;Iphlpapi.lib;ws2_32.lib;Shlwapi.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalDependencies>Iphlpapi.lib;ws2_32.lib;Shlwapi.lib;kernel32.lib;user32.lib;gdi32.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
<Lib>
<AdditionalOptions>/ignore:4221</AdditionalOptions>
diff --git a/build_msvc/libleveldb/libleveldb.vcxproj b/build_msvc/libleveldb/libleveldb.vcxproj
index f855923c62..1610ae7d86 100644
--- a/build_msvc/libleveldb/libleveldb.vcxproj
+++ b/build_msvc/libleveldb/libleveldb.vcxproj
@@ -24,8 +24,6 @@
<ClCompile Include="..\..\src\leveldb\db\version_set.cc" />
<ClCompile Include="..\..\src\leveldb\db\write_batch.cc" />
<ClCompile Include="..\..\src\leveldb\helpers\memenv\memenv.cc" />
- <ClCompile Include="..\..\src\leveldb\port\port_posix_sse.cc" />
- <ClCompile Include="..\..\src\leveldb\port\port_win.cc" />
<ClCompile Include="..\..\src\leveldb\table\block.cc" />
<ClCompile Include="..\..\src\leveldb\table\block_builder.cc" />
<ClCompile Include="..\..\src\leveldb\table\filter_block.cc" />
@@ -42,7 +40,7 @@
<ClCompile Include="..\..\src\leveldb\util\comparator.cc" />
<ClCompile Include="..\..\src\leveldb\util\crc32c.cc" />
<ClCompile Include="..\..\src\leveldb\util\env.cc" />
- <ClCompile Include="..\..\src\leveldb\util\env_win.cc" />
+ <ClCompile Include="..\..\src\leveldb\util\env_windows.cc" />
<ClCompile Include="..\..\src\leveldb\util\filter_policy.cc" />
<ClCompile Include="..\..\src\leveldb\util\hash.cc" />
<ClCompile Include="..\..\src\leveldb\util\histogram.cc" />
@@ -51,11 +49,11 @@
<ClCompile Include="..\..\src\leveldb\util\status.cc" />
</ItemGroup>
<ItemDefinitionGroup>
- <ClCompile>
- <PreprocessorDefinitions>_CRT_NONSTDC_NO_DEPRECATE;LEVELDB_PLATFORM_WINDOWS;LEVELDB_ATOMIC_PRESENT;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <DisableSpecificWarnings>4244;4267;4312;</DisableSpecificWarnings>
- <AdditionalIncludeDirectories>..\..\src\leveldb;..\..\src\leveldb\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- </ClCompile>
+ <ClCompile>
+ <PreprocessorDefinitions>HAVE_CRC32C=0;HAVE_SNAPPY=0;__STDC_LIMIT_MACROS;LEVELDB_IS_BIG_ENDIAN=0;_UNICODE;UNICODE;_CRT_NONSTDC_NO_DEPRECATE;LEVELDB_PLATFORM_WINDOWS;LEVELDB_ATOMIC_PRESENT;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <DisableSpecificWarnings>4244;4267;4312;4722;</DisableSpecificWarnings>
+ <AdditionalIncludeDirectories>..\..\src\leveldb;..\..\src\leveldb\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ </ClCompile>
</ItemDefinitionGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
diff --git a/build_msvc/msvc-autogen.py b/build_msvc/msvc-autogen.py
index 3951438408..d99b17d381 100644
--- a/build_msvc/msvc-autogen.py
+++ b/build_msvc/msvc-autogen.py
@@ -1,4 +1,7 @@
#!/usr/bin/env python3
+# Copyright (c) 2016-2019 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import os
import re
diff --git a/build_msvc/test_bitcoin-qt/test_bitcoin-qt.vcxproj b/build_msvc/test_bitcoin-qt/test_bitcoin-qt.vcxproj
index 0979eee8dd..2095c0c321 100644
--- a/build_msvc/test_bitcoin-qt/test_bitcoin-qt.vcxproj
+++ b/build_msvc/test_bitcoin-qt/test_bitcoin-qt.vcxproj
@@ -67,21 +67,23 @@
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <ItemDefinitionGroup Condition="'$(Configuration)'=='Release'">
<ClCompile>
<AdditionalIncludeDirectories>..\libbitcoin_qt\$(GeneratedFilesOutDir)\..\;$(QtIncludeDir)\QtTest;$(QtIncludes);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<AdditionalDependencies>$(QtLibraryDir)\Qt5Test.lib;$(QtReleaseLibraries);%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalOptions>/ignore:4206</AdditionalOptions>
</Link>
</ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <ItemDefinitionGroup Condition="'$(Configuration)'=='Debug'">
<ClCompile>
<AdditionalIncludeDirectories>..\libbitcoin_qt\$(GeneratedFilesOutDir)\..\;$(QtIncludeDir)\QtTest;$(QtIncludes);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<AdditionalDependencies>$(QtDebugLibraries);%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalOptions>/ignore:4206</AdditionalOptions>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
diff --git a/build_msvc/test_bitcoin/test_bitcoin.vcxproj b/build_msvc/test_bitcoin/test_bitcoin.vcxproj
index 96d20a2720..5c4b777d51 100644
--- a/build_msvc/test_bitcoin/test_bitcoin.vcxproj
+++ b/build_msvc/test_bitcoin/test_bitcoin.vcxproj
@@ -61,7 +61,9 @@
</PropertyGroup>
<ItemGroup>
<JsonTestFile Include="..\..\src\test\data\*.json" />
+ <RawTestFile Include="..\..\src\test\data\*.raw" />
</ItemGroup>
+ <HeaderFromHexdump RawFilePath="%(RawTestFile.FullPath)" HeaderFilePath="%(RawTestFile.FullPath).h" SourceHeader="static unsigned const char %(RawTestFile.Filename)_raw[] = {" SourceFooter="};" />
<HeaderFromHexdump RawFilePath="%(JsonTestFile.FullPath)" HeaderFilePath="%(JsonTestFile.FullPath).h" SourceHeader="namespace json_tests{ static unsigned const char %(JsonTestFile.Filename)[] = {" SourceFooter="};}" />
</Target>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
diff --git a/build_msvc/testconsensus/testconsensus.cpp b/build_msvc/testconsensus/testconsensus.cpp
index 0068f588cc..571d19957f 100644
--- a/build_msvc/testconsensus/testconsensus.cpp
+++ b/build_msvc/testconsensus/testconsensus.cpp
@@ -1,3 +1,7 @@
+// Copyright (c) 2018 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
#include <iostream>
// bitcoin includes.
diff --git a/ci/README.md b/ci/README.md
index 880e49b459..d2ea255b4b 100644
--- a/ci/README.md
+++ b/ci/README.md
@@ -13,7 +13,7 @@ If the repository is not a fresh git clone, you might have to clean files from p
The ci needs to perform various sysadmin tasks such as installing packages or writing to the user's home directory.
While most of the actions are done inside a docker container, this is not possible for all. Thus, cache directories,
-such as the depends cache or ccache, are mounted as read-write into the docker container. While it should be fine to run
+such as the depends cache, previous release binaries, or ccache, are mounted as read-write into the docker container. While it should be fine to run
the ci system locally on you development box, the ci scripts can generally be assumed to have received less review and
testing compared to other parts of the codebase. If you want to keep the work tree clean, you might want to run the ci
system in a virtual machine with a Linux operating system of your choice.
diff --git a/ci/lint/06_script.sh b/ci/lint/06_script.sh
index ae8122f9af..003bdf3c29 100755
--- a/ci/lint/06_script.sh
+++ b/ci/lint/06_script.sh
@@ -14,6 +14,7 @@ test/lint/git-subtree-check.sh src/crypto/ctaes
test/lint/git-subtree-check.sh src/secp256k1
test/lint/git-subtree-check.sh src/univalue
test/lint/git-subtree-check.sh src/leveldb
+test/lint/git-subtree-check.sh src/crc32c
test/lint/check-doc.py
test/lint/check-rpc-mappings.py .
test/lint/lint-all.sh
diff --git a/ci/test/00_setup_env.sh b/ci/test/00_setup_env.sh
index 1f485fbec4..4c22e4e6c5 100755
--- a/ci/test/00_setup_env.sh
+++ b/ci/test/00_setup_env.sh
@@ -33,7 +33,9 @@ export HOST=${HOST:-$("$BASE_ROOT_DIR/depends/config.guess")}
export USE_BUSY_BOX=${USE_BUSY_BOX:-false}
export RUN_UNIT_TESTS=${RUN_UNIT_TESTS:-true}
export RUN_FUNCTIONAL_TESTS=${RUN_FUNCTIONAL_TESTS:-true}
+export TEST_PREVIOUS_RELEASES=${TEST_PREVIOUS_RELEASES:-false}
export RUN_FUZZ_TESTS=${RUN_FUZZ_TESTS:-false}
+export CONTAINER_NAME=${CONTAINER_NAME:-ci_unnamed}
export DOCKER_NAME_TAG=${DOCKER_NAME_TAG:-ubuntu:18.04}
# Randomize test order.
# See https://www.boost.org/doc/libs/1_71_0/libs/test/doc/html/boost_test/utf_reference/rt_param_reference/random.html
@@ -49,9 +51,10 @@ export CCACHE_DIR=${CCACHE_DIR:-$BASE_SCRATCH_DIR/.ccache}
export DEPENDS_DIR=${DEPENDS_DIR:-$BASE_ROOT_DIR/depends}
# Folder where the build is done (bin and lib).
export BASE_OUTDIR=${BASE_OUTDIR:-$BASE_SCRATCH_DIR/out/$HOST}
+export PREVIOUS_RELEASES_DIR=${PREVIOUS_RELEASES_DIR:-$BASE_ROOT_DIR/releases/$HOST}
export SDK_URL=${SDK_URL:-https://bitcoincore.org/depends-sources/sdks}
export WINEDEBUG=${WINEDEBUG:-fixme-all}
-export DOCKER_PACKAGES=${DOCKER_PACKAGES:-build-essential libtool autotools-dev automake pkg-config bsdmainutils curl ca-certificates ccache python3 rsync git}
+export DOCKER_PACKAGES=${DOCKER_PACKAGES:-build-essential libtool autotools-dev automake pkg-config bsdmainutils curl ca-certificates ccache python3 rsync git procps}
export GOAL=${GOAL:-install}
export DIR_QA_ASSETS=${DIR_QA_ASSETS:-${BASE_SCRATCH_DIR}/qa-assets}
export PATH=${BASE_ROOT_DIR}/ci/retry:$PATH
diff --git a/ci/test/00_setup_env_arm.sh b/ci/test/00_setup_env_arm.sh
index 6e2542584c..2b30b4a5e3 100644
--- a/ci/test/00_setup_env_arm.sh
+++ b/ci/test/00_setup_env_arm.sh
@@ -9,15 +9,20 @@ export LC_ALL=C.UTF-8
export HOST=arm-linux-gnueabihf
# The host arch is unknown, so we run the tests through qemu.
# If the host is arm and wants to run the tests natively, it can set QEMU_USER_CMD to the empty string.
-export QEMU_USER_CMD="${QEMU_USER_CMD:"qemu-arm -L /usr/arm-linux-gnueabihf/"}"
-# We don't know whether the host can run the cross compiled binaries. To run them, either qemu-user or libc6:armhf for
-# the target is required, so install both.
+if [ -z ${QEMU_USER_CMD+x} ]; then export QEMU_USER_CMD="${QEMU_USER_CMD:-"qemu-arm -L /usr/arm-linux-gnueabihf/"}"; fi
export DPKG_ADD_ARCH="armhf"
-export PACKAGES="python3 g++-arm-linux-gnueabihf busybox qemu-user libc6:armhf libstdc++6:armhf libfontconfig1:armhf libxcb1:armhf"
+export PACKAGES="python3-zmq g++-arm-linux-gnueabihf busybox libc6:armhf libstdc++6:armhf libfontconfig1:armhf libxcb1:armhf"
+if [ -n "$QEMU_USER_CMD" ]; then
+ # Likely cross-compiling, so install the needed gcc and qemu-user
+ export PACKAGES="$PACKAGES qemu-user"
+fi
+export CONTAINER_NAME=ci_arm_linux
+# Use debian to avoid 404 apt errors when cross compiling
+export DOCKER_NAME_TAG="debian:buster"
export USE_BUSY_BOX=true
export RUN_UNIT_TESTS=true
export RUN_FUNCTIONAL_TESTS=true
export GOAL="install"
# -Wno-psabi is to disable ABI warnings: "note: parameter passing for argument of type ... changed in GCC 7.1"
# This could be removed once the ABI change warning does not show up by default
-export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CXXFLAGS=-Wno-psabi"
+export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CXXFLAGS=-Wno-psabi --enable-werror"
diff --git a/ci/test/00_setup_env_i686.sh b/ci/test/00_setup_env_i686.sh
deleted file mode 100644
index 6df65dd4a0..0000000000
--- a/ci/test/00_setup_env_i686.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env bash
-#
-# Copyright (c) 2019 The Bitcoin Core developers
-# Distributed under the MIT software license, see the accompanying
-# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-export LC_ALL=C.UTF-8
-
-export HOST=i686-pc-linux-gnu
-export PACKAGES="g++-multilib python3-zmq"
-export GOAL="install"
-export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-glibc-back-compat --enable-reduce-exports LDFLAGS=-static-libstdc++"
-export CONFIG_SHELL="/bin/dash"
diff --git a/ci/test/00_setup_env_i686_centos.sh b/ci/test/00_setup_env_i686_centos.sh
new file mode 100644
index 0000000000..5688799f9e
--- /dev/null
+++ b/ci/test/00_setup_env_i686_centos.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2020 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+export LC_ALL=C.UTF-8
+
+export HOST=i686-pc-linux-gnu
+export CONTAINER_NAME=ci_i686_centos_7
+export DOCKER_NAME_TAG=centos:7
+export DOCKER_PACKAGES="gcc-c++ glibc-devel.x86_64 libstdc++-devel.x86_64 glibc-devel.i686 libstdc++-devel.i686 ccache libtool make git python3 python36-zmq which patch lbzip2 dash"
+export GOAL="install"
+export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-reduce-exports"
+export CONFIG_SHELL="/bin/dash"
diff --git a/ci/test/00_setup_env_mac.sh b/ci/test/00_setup_env_mac.sh
index af166b6ca7..6ed6e40bd6 100644
--- a/ci/test/00_setup_env_mac.sh
+++ b/ci/test/00_setup_env_mac.sh
@@ -6,9 +6,10 @@
export LC_ALL=C.UTF-8
+export CONTAINER_NAME=ci_macos_cross
export HOST=x86_64-apple-darwin16
export PACKAGES="cmake imagemagick libcap-dev librsvg2-bin libz-dev libbz2-dev libtiff-tools python3-dev python3-setuptools"
-export OSX_SDK=10.11
+export OSX_SDK=10.14
export RUN_UNIT_TESTS=false
export RUN_FUNCTIONAL_TESTS=false
export GOAL="deploy"
diff --git a/ci/test/00_setup_env_native_asan.sh b/ci/test/00_setup_env_native_asan.sh
index 2ffd3c5107..d5f39daaf5 100644
--- a/ci/test/00_setup_env_native_asan.sh
+++ b/ci/test/00_setup_env_native_asan.sh
@@ -6,7 +6,8 @@
export LC_ALL=C.UTF-8
-export PACKAGES="clang-8 llvm-8 python3-zmq qtbase5-dev qttools5-dev-tools libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-chrono-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev libqrencode-dev"
+export CONTAINER_NAME=ci_native_asan
+export PACKAGES="clang-8 llvm-8 python3-zmq qtbase5-dev qttools5-dev-tools libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev libqrencode-dev"
# Use clang-8 instead of default clang (which is clang-6 on Bionic) to avoid spurious segfaults when running on ppc64le
export NO_DEPENDS=1
export GOAL="install"
diff --git a/ci/test/00_setup_env_native_centos.sh b/ci/test/00_setup_env_native_centos.sh
deleted file mode 100644
index 56b915b6c7..0000000000
--- a/ci/test/00_setup_env_native_centos.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/usr/bin/env bash
-#
-# Copyright (c) 2019 The Bitcoin Core developers
-# Distributed under the MIT software license, see the accompanying
-# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-export LC_ALL=C.UTF-8
-
-export DOCKER_NAME_TAG=centos:7
-export DOCKER_PACKAGES="gcc-c++ libtool make git python3 python36-zmq"
-export PACKAGES="boost-devel libevent-devel libdb4-devel libdb4-cxx-devel miniupnpc-devel zeromq-devel qt5-qtbase-devel qt5-qttools-devel qrencode-devel"
-export NO_DEPENDS=1
-export GOAL="install"
-export BITCOIN_CONFIG="--enable-reduce-exports"
diff --git a/ci/test/00_setup_env_native_fuzz.sh b/ci/test/00_setup_env_native_fuzz.sh
index b0405bb762..a739ad50d2 100644
--- a/ci/test/00_setup_env_native_fuzz.sh
+++ b/ci/test/00_setup_env_native_fuzz.sh
@@ -6,7 +6,8 @@
export LC_ALL=C.UTF-8
-export PACKAGES="clang-8 llvm-8 python3 libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-chrono-dev libboost-test-dev libboost-thread-dev"
+export CONTAINER_NAME=ci_native_fuzz
+export PACKAGES="clang-8 llvm-8 python3 libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev"
export NO_DEPENDS=1
export RUN_UNIT_TESTS=false
export RUN_FUNCTIONAL_TESTS=false
diff --git a/ci/test/00_setup_env_native_fuzz_with_valgrind.sh b/ci/test/00_setup_env_native_fuzz_with_valgrind.sh
new file mode 100644
index 0000000000..fabb3affa4
--- /dev/null
+++ b/ci/test/00_setup_env_native_fuzz_with_valgrind.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2019 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+export LC_ALL=C.UTF-8
+
+export CONTAINER_NAME=ci_native_fuzz_valgrind
+export PACKAGES="clang-8 llvm-8 python3 libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev valgrind"
+export NO_DEPENDS=1
+export RUN_UNIT_TESTS=false
+export RUN_FUNCTIONAL_TESTS=false
+export RUN_FUZZ_TESTS=true
+export FUZZ_TESTS_CONFIG="--valgrind"
+export GOAL="install"
+export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer CC=clang-8 CXX=clang++-8"
+# Use clang-8, instead of default clang on bionic, which is clang-6 and does not come with libfuzzer on aarch64
diff --git a/ci/test/00_setup_env_native_nowallet.sh b/ci/test/00_setup_env_native_nowallet.sh
index 53348559be..6bb371920c 100644
--- a/ci/test/00_setup_env_native_nowallet.sh
+++ b/ci/test/00_setup_env_native_nowallet.sh
@@ -6,6 +6,7 @@
export LC_ALL=C.UTF-8
+export CONTAINER_NAME=ci_native_nowallet
export PACKAGES="python3-zmq"
export DEP_OPTS="NO_WALLET=1"
export GOAL="install"
diff --git a/ci/test/00_setup_env_native_qt5.sh b/ci/test/00_setup_env_native_qt5.sh
index 622ec3cfe1..21c15236d2 100644
--- a/ci/test/00_setup_env_native_qt5.sh
+++ b/ci/test/00_setup_env_native_qt5.sh
@@ -6,8 +6,10 @@
export LC_ALL=C.UTF-8
+export CONTAINER_NAME=ci_native_qt5
export PACKAGES="python3-zmq qtbase5-dev qttools5-dev-tools libdbus-1-dev libharfbuzz-dev"
export DEP_OPTS="NO_QT=1 NO_UPNP=1 DEBUG=1 ALLOW_HOST_PACKAGES=1"
export TEST_RUNNER_EXTRA="--coverage --extended --exclude feature_dbcrash" # Run extended tests so that coverage does not fail, but exclude the very slow dbcrash
export GOAL="install"
+export TEST_PREVIOUS_RELEASES=true
export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-glibc-back-compat --enable-reduce-exports --enable-debug CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\""
diff --git a/ci/test/00_setup_env_native_tsan.sh b/ci/test/00_setup_env_native_tsan.sh
index e9b7a7bba1..4d3f345ca6 100644
--- a/ci/test/00_setup_env_native_tsan.sh
+++ b/ci/test/00_setup_env_native_tsan.sh
@@ -6,8 +6,9 @@
export LC_ALL=C.UTF-8
+export CONTAINER_NAME=ci_native_tsan
export DOCKER_NAME_TAG=ubuntu:16.04
-export PACKAGES="clang llvm python3-zmq qtbase5-dev qttools5-dev-tools libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-chrono-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev libqrencode-dev"
+export PACKAGES="clang llvm python3-zmq qtbase5-dev qttools5-dev-tools libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev libqrencode-dev"
export NO_DEPENDS=1
export GOAL="install"
export BITCOIN_CONFIG="--enable-zmq --disable-wallet --with-gui=qt5 CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' --with-sanitizers=thread --disable-hardening --disable-asm CC=clang CXX=clang++"
diff --git a/ci/test/00_setup_env_native_valgrind.sh b/ci/test/00_setup_env_native_valgrind.sh
index 906ffd7d79..f112aa87f9 100644
--- a/ci/test/00_setup_env_native_valgrind.sh
+++ b/ci/test/00_setup_env_native_valgrind.sh
@@ -6,10 +6,11 @@
export LC_ALL=C.UTF-8
-export PACKAGES="valgrind clang llvm python3-zmq libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-chrono-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev"
+export CONTAINER_NAME=ci_native_valgrind
+export PACKAGES="valgrind clang llvm python3-zmq libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev"
export USE_VALGRIND=1
export NO_DEPENDS=1
-export TEST_RUNNER_EXTRA="p2p_segwit.py" # Only run one test for now. TODO enable all and bump timeouts
+export TEST_RUNNER_EXTRA="--exclude feature_abortnode,feature_block,rpc_bind" # Excluded for now
export RUN_FUNCTIONAL_TESTS=true
export GOAL="install"
export BITCOIN_CONFIG="--enable-zmq --with-incompatible-bdb --with-gui=no CC=clang CXX=clang++" # TODO enable GUI
diff --git a/ci/test/00_setup_env_s390x.sh b/ci/test/00_setup_env_s390x.sh
index 637d549553..72ad99fe5d 100644
--- a/ci/test/00_setup_env_s390x.sh
+++ b/ci/test/00_setup_env_s390x.sh
@@ -9,8 +9,16 @@ export LC_ALL=C.UTF-8
export HOST=s390x-linux-gnu
# The host arch is unknown, so we run the tests through qemu.
# If the host is s390x and wants to run the tests natively, it can set QEMU_USER_CMD to the empty string.
-export QEMU_USER_CMD="${QEMU_USER_CMD:"qemu-s390x"}"
-export PACKAGES="python3-zmq bsdmainutils qemu-user"
+if [ -z ${QEMU_USER_CMD+x} ]; then export QEMU_USER_CMD="${QEMU_USER_CMD:-"qemu-s390x"}"; fi
+export PACKAGES="python3-zmq"
+if [ -n "$QEMU_USER_CMD" ]; then
+ # Likely cross-compiling, so install the needed gcc and qemu-user
+ export DPKG_ADD_ARCH="s390x"
+ export PACKAGES="$PACKAGES g++-s390x-linux-gnu qemu-user libc6:s390x libstdc++6:s390x libfontconfig1:s390x libxcb1:s390x"
+fi
+# Use debian to avoid 404 apt errors
+export CONTAINER_NAME=ci_s390x
+export DOCKER_NAME_TAG="debian:buster"
export RUN_UNIT_TESTS=true
export RUN_FUNCTIONAL_TESTS=true
export GOAL="install"
diff --git a/ci/test/00_setup_env_win64.sh b/ci/test/00_setup_env_win64.sh
index 1e04c4287a..a34933731c 100644
--- a/ci/test/00_setup_env_win64.sh
+++ b/ci/test/00_setup_env_win64.sh
@@ -6,6 +6,7 @@
export LC_ALL=C.UTF-8
+export CONTAINER_NAME=ci_win64
export HOST=x86_64-w64-mingw32
export PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine-binfmt wine64"
export RUN_FUNCTIONAL_TESTS=false
diff --git a/ci/test/04_install.sh b/ci/test/04_install.sh
index 56f4ece1ad..c6e8331a71 100755
--- a/ci/test/04_install.sh
+++ b/ci/test/04_install.sh
@@ -1,6 +1,6 @@
#!/usr/bin/env bash
#
-# Copyright (c) 2018-2019 The Bitcoin Core developers
+# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -9,6 +9,9 @@ export LC_ALL=C.UTF-8
if [[ $DOCKER_NAME_TAG == centos* ]]; then
export LC_ALL=en_US.utf8
fi
+if [[ $QEMU_USER_CMD == qemu-s390* ]]; then
+ export LC_ALL=C
+fi
if [ "$TRAVIS_OS_NAME" == "osx" ]; then
set +o errexit
@@ -37,12 +40,13 @@ fi
mkdir -p "${BASE_SCRATCH_DIR}"
mkdir -p "${CCACHE_DIR}"
+mkdir -p "${PREVIOUS_RELEASES_DIR}"
export ASAN_OPTIONS="detect_stack_use_after_return=1:check_initialization_order=1:strict_init_order=1"
export LSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/lsan"
export TSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/tsan:log_path=${BASE_SCRATCH_DIR}/sanitizer-output/tsan"
export UBSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/ubsan:print_stacktrace=1:halt_on_error=1:report_error_type=1"
-env | grep -E '^(BITCOIN_CONFIG|BASE_|CCACHE_|WINEDEBUG|LC_ALL|BOOST_TEST_RANDOM|CONFIG_SHELL|(ASAN|LSAN|TSAN|UBSAN)_OPTIONS)' | tee /tmp/env
+env | grep -E '^(BITCOIN_CONFIG|BASE_|QEMU_|CCACHE_|WINEDEBUG|LC_ALL|BOOST_TEST_RANDOM|CONFIG_SHELL|(ASAN|LSAN|TSAN|UBSAN)_OPTIONS|TEST_PREVIOUS_RELEASES|PREVIOUS_RELEASES_DIR)' | tee /tmp/env
if [[ $HOST = *-mingw32 ]]; then
DOCKER_ADMIN="--cap-add SYS_ADMIN"
elif [[ $BITCOIN_CONFIG = *--with-sanitizers=*address* ]]; then # If ran with (ASan + LSan), Docker needs access to ptrace (https://github.com/google/sanitizers/issues/764)
@@ -59,8 +63,10 @@ if [ -z "$RUN_CI_ON_HOST" ]; then
--mount type=bind,src=$BASE_ROOT_DIR,dst=/ro_base,readonly \
--mount type=bind,src=$CCACHE_DIR,dst=$CCACHE_DIR \
--mount type=bind,src=$DEPENDS_DIR,dst=$DEPENDS_DIR \
+ --mount type=bind,src=$PREVIOUS_RELEASES_DIR,dst=$PREVIOUS_RELEASES_DIR \
-w $BASE_ROOT_DIR \
--env-file /tmp/env \
+ --name $CONTAINER_NAME \
$DOCKER_NAME_TAG)
DOCKER_EXEC () {
@@ -73,16 +79,6 @@ else
}
fi
-if [ "$TRAVIS_OS_NAME" == "osx" ]; then
- top -l 1 -s 0 | awk ' /PhysMem/ {print}'
- echo "Number of CPUs: $(sysctl -n hw.logicalcpu)"
-else
- DOCKER_EXEC free -m -h
- DOCKER_EXEC echo "Number of CPUs \(nproc\):" \$\(nproc\)
- DOCKER_EXEC echo "Free disk space:"
- DOCKER_EXEC df -h
-fi
-
if [ -n "$DPKG_ADD_ARCH" ]; then
DOCKER_EXEC dpkg --add-architecture "$DPKG_ADD_ARCH"
fi
@@ -95,6 +91,16 @@ elif [ "$TRAVIS_OS_NAME" != "osx" ]; then
${CI_RETRY_EXE} DOCKER_EXEC apt-get install --no-install-recommends --no-upgrade -y $PACKAGES $DOCKER_PACKAGES
fi
+if [ "$TRAVIS_OS_NAME" == "osx" ]; then
+ top -l 1 -s 0 | awk ' /PhysMem/ {print}'
+ echo "Number of CPUs: $(sysctl -n hw.logicalcpu)"
+else
+ DOCKER_EXEC free -m -h
+ DOCKER_EXEC echo "Number of CPUs \(nproc\):" \$\(nproc\)
+ DOCKER_EXEC echo "Free disk space:"
+ DOCKER_EXEC df -h
+fi
+
if [ ! -d ${DIR_QA_ASSETS} ]; then
DOCKER_EXEC git clone https://github.com/bitcoin-core/qa-assets ${DIR_QA_ASSETS}
fi
diff --git a/ci/test/05_before_script.sh b/ci/test/05_before_script.sh
index 1222432c96..933f4cea91 100755
--- a/ci/test/05_before_script.sh
+++ b/ci/test/05_before_script.sh
@@ -25,5 +25,18 @@ if [[ $HOST = *-mingw32 ]]; then
DOCKER_EXEC update-alternatives --set $HOST-g++ \$\(which $HOST-g++-posix\)
fi
if [ -z "$NO_DEPENDS" ]; then
- DOCKER_EXEC CONFIG_SHELL= make $MAKEJOBS -C depends HOST=$HOST $DEP_OPTS
+ if [[ $DOCKER_NAME_TAG == centos* ]]; then
+ # CentOS has problems building the depends if the config shell is not explicitly set
+ # (i.e. for libevent a Makefile with an empty SHELL variable is generated, leading to
+ # an error as the first command is executed)
+ SHELL_OPTS="CONFIG_SHELL=/bin/bash"
+ else
+ SHELL_OPTS="CONFIG_SHELL="
+ fi
+ DOCKER_EXEC $SHELL_OPTS make $MAKEJOBS -C depends HOST=$HOST $DEP_OPTS
+fi
+if [ "$TEST_PREVIOUS_RELEASES" = "true" ]; then
+ BEGIN_FOLD previous-versions
+ DOCKER_EXEC contrib/devtools/previous_release.sh -b -t "$PREVIOUS_RELEASES_DIR" v0.17.1 v0.18.1 v0.19.0.1
+ END_FOLD
fi
diff --git a/ci/test/06_script_b.sh b/ci/test/06_script_b.sh
index ac0b36d14c..3b32513353 100755
--- a/ci/test/06_script_b.sh
+++ b/ci/test/06_script_b.sh
@@ -11,16 +11,7 @@ if [ -n "$QEMU_USER_CMD" ]; then
# Generate all binaries, so that they can be wrapped
DOCKER_EXEC make $MAKEJOBS -C src/secp256k1 VERBOSE=1
DOCKER_EXEC make $MAKEJOBS -C src/univalue VERBOSE=1
- for b_name in {"${BASE_OUTDIR}/bin"/*,src/secp256k1/*tests,src/univalue/{no_nul,test_json,unitester,object}}; do
- # shellcheck disable=SC2044
- for b in $(find "${BASE_ROOT_DIR}" -executable -type f -name $(basename $b_name)); do
- echo "Wrap $b ..."
- DOCKER_EXEC mv "$b" "${b}_orig"
- DOCKER_EXEC echo "\#\!/usr/bin/env bash" \> "$b"
- DOCKER_EXEC echo "$QEMU_USER_CMD \\\"${b}_orig\\\" \\\"\\\$@\\\"" \>\> "$b"
- DOCKER_EXEC chmod +x "$b"
- done
- done
+ DOCKER_EXEC "${BASE_ROOT_DIR}/ci/test/wrap-qemu.sh"
END_FOLD
fi
@@ -45,6 +36,6 @@ fi
if [ "$RUN_FUZZ_TESTS" = "true" ]; then
BEGIN_FOLD fuzz-tests
- DOCKER_EXEC test/fuzz/test_runner.py -l DEBUG ${DIR_FUZZ_IN}
+ DOCKER_EXEC test/fuzz/test_runner.py ${FUZZ_TESTS_CONFIG} -l DEBUG ${DIR_FUZZ_IN}
END_FOLD
fi
diff --git a/ci/test/wrap-qemu.sh b/ci/test/wrap-qemu.sh
new file mode 100755
index 0000000000..f1d3088748
--- /dev/null
+++ b/ci/test/wrap-qemu.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2018 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+export LC_ALL=C.UTF-8
+
+for b_name in {"${BASE_OUTDIR}/bin"/*,src/secp256k1/*tests,src/univalue/{no_nul,test_json,unitester,object}}; do
+ # shellcheck disable=SC2044
+ for b in $(find "${BASE_ROOT_DIR}" -executable -type f -name $(basename $b_name)); do
+ echo "Wrap $b ..."
+ mv "$b" "${b}_orig"
+ echo '#!/usr/bin/env bash' > "$b"
+ echo "$QEMU_USER_CMD \"${b}_orig\" \"\$@\"" >> "$b"
+ chmod +x "$b"
+ done
+done
diff --git a/configure.ac b/configure.ac
index 875491fb99..1c0f39d7f7 100644
--- a/configure.ac
+++ b/configure.ac
@@ -262,6 +262,13 @@ AC_ARG_ENABLE([gprof],
[enable_gprof=$enableval],
[enable_gprof=no])
+dnl Pass compiler & liner flags that make builds deterministic
+AC_ARG_ENABLE([determinism],
+ [AS_HELP_STRING([--enable-determinism],
+ [Enable compilation flags that make builds deterministic (default is no)])],
+ [enable_determinism=$enableval],
+ [enable_determinism=no])
+
dnl Turn warnings into errors
AC_ARG_ENABLE([werror],
[AS_HELP_STRING([--enable-werror],
@@ -329,6 +336,8 @@ if test "x$enable_werror" = "xyes"; then
AX_CHECK_COMPILE_FLAG([-Werror=switch],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=switch"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Werror=thread-safety-analysis],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=thread-safety-analysis"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Werror=unused-variable],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=unused-variable"],,[[$CXXFLAG_WERROR]])
+ AX_CHECK_COMPILE_FLAG([-Werror=date-time],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=date-time"],,[[$CXXFLAG_WERROR]])
+ AX_CHECK_COMPILE_FLAG([-Werror=return-type],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=return-type"],,[[$CXXFLAG_WERROR]])
fi
if test "x$CXXFLAGS_overridden" = "xno"; then
@@ -342,6 +351,7 @@ if test "x$CXXFLAGS_overridden" = "xno"; then
AX_CHECK_COMPILE_FLAG([-Wrange-loop-analysis],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wrange-loop-analysis"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wredundant-decls],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wredundant-decls"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wunused-variable],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wunused-variable"],,[[$CXXFLAG_WERROR]])
+ AX_CHECK_COMPILE_FLAG([-Wdate-time],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wdate-time"],,[[$CXXFLAG_WERROR]])
dnl Some compilers (gcc) ignore unknown -Wno-* options, but warn about all
dnl unknown options if any other warning is produced. Test the -Wfoo case, and
@@ -353,7 +363,7 @@ if test "x$CXXFLAGS_overridden" = "xno"; then
AX_CHECK_COMPILE_FLAG([-Wimplicit-fallthrough],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-implicit-fallthrough"],,[[$CXXFLAG_WERROR]])
fi
-enable_hwcrc32=no
+enable_sse42=no
enable_sse41=no
enable_avx2=no
enable_shani=no
@@ -363,6 +373,8 @@ if test "x$use_asm" = "xyes"; then
dnl Check for optional instruction set support. Enabling these does _not_ imply that all code will
dnl be compiled with them, rather that specific objects/libs may use them after checking for runtime
dnl compatibility.
+
+dnl x86
AX_CHECK_COMPILE_FLAG([-msse4.2],[[SSE42_CXXFLAGS="-msse4.2"]],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-msse4.1],[[SSE41_CXXFLAGS="-msse4.1"]],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-mavx -mavx2],[[AVX2_CXXFLAGS="-mavx -mavx2"]],,[[$CXXFLAG_WERROR]])
@@ -370,7 +382,7 @@ AX_CHECK_COMPILE_FLAG([-msse4 -msha],[[SHANI_CXXFLAGS="-msse4 -msha"]],,[[$CXXFL
TEMP_CXXFLAGS="$CXXFLAGS"
CXXFLAGS="$CXXFLAGS $SSE42_CXXFLAGS"
-AC_MSG_CHECKING(for assembler crc32 support)
+AC_MSG_CHECKING(for SSE4.2 intrinsics)
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#include <stdint.h>
#if defined(_MSC_VER)
@@ -385,7 +397,7 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
l = _mm_crc32_u64(l, 0);
return l;
]])],
- [ AC_MSG_RESULT(yes); enable_hwcrc32=yes],
+ [ AC_MSG_RESULT(yes); enable_sse42=yes],
[ AC_MSG_RESULT(no)]
)
CXXFLAGS="$TEMP_CXXFLAGS"
@@ -437,6 +449,24 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
)
CXXFLAGS="$TEMP_CXXFLAGS"
+# ARM
+AX_CHECK_COMPILE_FLAG([-march=armv8-a+crc+crypto],[[ARM_CRC_CXXFLAGS="-march=armv8-a+crc+crypto"]],,[[$CXXFLAG_WERROR]])
+
+TEMP_CXXFLAGS="$CXXFLAGS"
+CXXFLAGS="$CXXFLAGS $ARM_CRC_CXXFLAGS"
+AC_MSG_CHECKING(for ARM CRC32 intrinsics)
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+ #include <arm_acle.h>
+ #include <arm_neon.h>
+ ]],[[
+ __crc32cb(0, 0); __crc32ch(0, 0); __crc32cw(0, 0); __crc32cd(0, 0);
+ vmull_p64(0, 0);
+ ]])],
+ [ AC_MSG_RESULT(yes); enable_arm_crc=yes; ],
+ [ AC_MSG_RESULT(no)]
+)
+CXXFLAGS="$TEMP_CXXFLAGS"
+
fi
CPPFLAGS="$CPPFLAGS -DHAVE_BUILD_INFO -D__STDC_FORMAT_MACROS"
@@ -481,29 +511,24 @@ use_pkgconfig=yes
case $host in
*mingw*)
- #pkgconfig does more harm than good with MinGW
+ dnl pkgconfig does more harm than good with MinGW
use_pkgconfig=no
TARGET_OS=windows
- AC_CHECK_LIB([mingwthrd], [main],, AC_MSG_ERROR(libmingwthrd missing))
- AC_CHECK_LIB([kernel32], [main],, AC_MSG_ERROR(libkernel32 missing))
- AC_CHECK_LIB([user32], [main],, AC_MSG_ERROR(libuser32 missing))
- AC_CHECK_LIB([gdi32], [main],, AC_MSG_ERROR(libgdi32 missing))
- AC_CHECK_LIB([comdlg32], [main],, AC_MSG_ERROR(libcomdlg32 missing))
- AC_CHECK_LIB([winspool], [main],, AC_MSG_ERROR(libwinspool missing))
- AC_CHECK_LIB([winmm], [main],, AC_MSG_ERROR(libwinmm missing))
- AC_CHECK_LIB([shell32], [main],, AC_MSG_ERROR(libshell32 missing))
- AC_CHECK_LIB([comctl32], [main],, AC_MSG_ERROR(libcomctl32 missing))
- AC_CHECK_LIB([ole32], [main],, AC_MSG_ERROR(libole32 missing))
- AC_CHECK_LIB([oleaut32], [main],, AC_MSG_ERROR(liboleaut32 missing))
- AC_CHECK_LIB([uuid], [main],, AC_MSG_ERROR(libuuid missing))
- AC_CHECK_LIB([rpcrt4], [main],, AC_MSG_ERROR(librpcrt4 missing))
- AC_CHECK_LIB([advapi32], [main],, AC_MSG_ERROR(libadvapi32 missing))
- AC_CHECK_LIB([ws2_32], [main],, AC_MSG_ERROR(libws2_32 missing))
- AC_CHECK_LIB([mswsock], [main],, AC_MSG_ERROR(libmswsock missing))
- AC_CHECK_LIB([shlwapi], [main],, AC_MSG_ERROR(libshlwapi missing))
- AC_CHECK_LIB([iphlpapi], [main],, AC_MSG_ERROR(libiphlpapi missing))
- AC_CHECK_LIB([crypt32], [main],, AC_MSG_ERROR(libcrypt32 missing))
+ AC_CHECK_LIB([kernel32], [GetModuleFileNameA],, AC_MSG_ERROR(libkernel32 missing))
+ AC_CHECK_LIB([user32], [main],, AC_MSG_ERROR(libuser32 missing))
+ AC_CHECK_LIB([gdi32], [main],, AC_MSG_ERROR(libgdi32 missing))
+ AC_CHECK_LIB([comdlg32], [main],, AC_MSG_ERROR(libcomdlg32 missing))
+ AC_CHECK_LIB([winmm], [main],, AC_MSG_ERROR(libwinmm missing))
+ AC_CHECK_LIB([shell32], [SHGetSpecialFolderPathW],, AC_MSG_ERROR(libshell32 missing))
+ AC_CHECK_LIB([comctl32], [main],, AC_MSG_ERROR(libcomctl32 missing))
+ AC_CHECK_LIB([ole32], [CoCreateInstance],, AC_MSG_ERROR(libole32 missing))
+ AC_CHECK_LIB([oleaut32], [main],, AC_MSG_ERROR(liboleaut32 missing))
+ AC_CHECK_LIB([uuid], [main],, AC_MSG_ERROR(libuuid missing))
+ AC_CHECK_LIB([advapi32], [CryptAcquireContextW],, AC_MSG_ERROR(libadvapi32 missing))
+ AC_CHECK_LIB([ws2_32], [WSAStartup],, AC_MSG_ERROR(libws2_32 missing))
+ AC_CHECK_LIB([shlwapi], [PathRemoveFileSpecW],, AC_MSG_ERROR(libshlwapi missing))
+ AC_CHECK_LIB([iphlpapi], [GetAdaptersAddresses],, AC_MSG_ERROR(libiphlpapi missing))
dnl -static is interpreted by libtool, where it has a different meaning.
dnl In libtool-speak, it's -all-static.
@@ -520,7 +545,6 @@ case $host in
fi
CPPFLAGS="$CPPFLAGS -D_MT -DWIN32 -D_WINDOWS -DBOOST_THREAD_USE_LIB -D_WIN32_WINNT=0x0601"
- LEVELDB_TARGET_FLAGS="-DOS_WINDOWS"
if test "x$CXXFLAGS_overridden" = "xno"; then
CXXFLAGS="$CXXFLAGS -w"
fi
@@ -536,7 +560,6 @@ case $host in
;;
*darwin*)
TARGET_OS=darwin
- LEVELDB_TARGET_FLAGS="-DOS_MACOSX"
if test x$cross_compiling != xyes; then
BUILD_OS=darwin
AC_PATH_PROGS([RSVG_CONVERT], [rsvg-convert rsvg],rsvg-convert)
@@ -588,35 +611,9 @@ case $host in
*android*)
dnl make sure android stays above linux for hosts like *linux-android*
TARGET_OS=android
- LEVELDB_TARGET_FLAGS="-DOS_ANDROID"
;;
*linux*)
TARGET_OS=linux
- LEVELDB_TARGET_FLAGS="-DOS_LINUX"
- ;;
- *kfreebsd*)
- LEVELDB_TARGET_FLAGS="-DOS_KFREEBSD"
- ;;
- *freebsd*)
- LEVELDB_TARGET_FLAGS="-DOS_FREEBSD"
- ;;
- *openbsd*)
- LEVELDB_TARGET_FLAGS="-DOS_OPENBSD"
- ;;
- *netbsd*)
- LEVELDB_TARGET_FLAGS="-DOS_NETBSD"
- ;;
- *dragonfly*)
- LEVELDB_TARGET_FLAGS="-DOS_DRAGONFLYBSD"
- ;;
- *solaris*)
- LEVELDB_TARGET_FLAGS="-DOS_SOLARIS"
- ;;
- *hpux*)
- LEVELDB_TARGET_FLAGS="-DOS_HPUX"
- ;;
- *)
- AC_MSG_ERROR(Cannot build leveldb for $host. Please file a bug report.)
;;
esac
@@ -685,18 +682,12 @@ if test x$ac_cv_sys_large_files != x &&
CPPFLAGS="$CPPFLAGS -D_LARGE_FILES=$ac_cv_sys_large_files"
fi
-AX_CHECK_LINK_FLAG([[-Wl,--large-address-aware]], [LDFLAGS="$LDFLAGS -Wl,--large-address-aware"])
-
AX_GCC_FUNC_ATTRIBUTE([visibility])
AX_GCC_FUNC_ATTRIBUTE([dllexport])
AX_GCC_FUNC_ATTRIBUTE([dllimport])
if test x$use_glibc_compat != xno; then
- dnl glibc absorbed clock_gettime in 2.17. librt (its previous location) is safe to link
- dnl in anyway for back-compat.
- AC_CHECK_LIB([rt],[clock_gettime],, AC_MSG_ERROR(librt missing))
-
dnl __fdelt_chk's params and return type have changed from long unsigned int to long int.
dnl See which one is present here.
AC_MSG_CHECKING(__fdelt_chk type)
@@ -780,6 +771,12 @@ if test x$TARGET_OS = xdarwin; then
AX_CHECK_LINK_FLAG([[-Wl,-bind_at_load]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,-bind_at_load"])
fi
+if test x$enable_determinism = xyes; then
+ if test x$TARGET_OS = xwindows; then
+ AX_CHECK_LINK_FLAG([[-Wl,--no-insert-timestamp]], [LDFLAGS="$LDFLAGS -Wl,--no-insert-timestamp"])
+ fi
+fi
+
AC_CHECK_HEADERS([endian.h sys/endian.h byteswap.h stdio.h stdlib.h unistd.h strings.h sys/types.h sys/stat.h sys/select.h sys/prctl.h sys/sysctl.h vm/vm_param.h sys/vmmeter.h sys/resources.h])
dnl FD_ZERO may be dependent on a declaration of memcpy, e.g. in SmartOS
@@ -892,11 +889,6 @@ if test "x$use_thread_local" = xyes || { test "x$use_thread_local" = xauto && te
dnl https://gist.github.com/jamesob/fe9a872051a88b2025b1aa37bfa98605
AC_MSG_RESULT(no)
;;
- *darwin*)
- dnl TODO enable thread_local on later versions of Darwin where it is
- dnl supported (per https://stackoverflow.com/a/29929949)
- AC_MSG_RESULT(no)
- ;;
*freebsd*)
dnl FreeBSD's implementation of thread_local is also buggy (per
dnl https://groups.google.com/d/msg/bsdmailinglist/22ncTZAbDp4/Dii_pII5AwAJ)
@@ -969,6 +961,72 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <stdint.h>
[ AC_MSG_RESULT(no)]
)
+dnl LevelDB platform checks
+AC_MSG_CHECKING(for fdatasync)
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <unistd.h>]],
+ [[ fdatasync(0); ]])],
+ [ AC_MSG_RESULT(yes); HAVE_FDATASYNC=1 ],
+ [ AC_MSG_RESULT(no); HAVE_FDATASYNC=0 ]
+)
+
+AC_MSG_CHECKING(for F_FULLFSYNC)
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <fcntl.h>]],
+ [[ fcntl(0, F_FULLFSYNC, 0); ]])],
+ [ AC_MSG_RESULT(yes); HAVE_FULLFSYNC=1 ],
+ [ AC_MSG_RESULT(no); HAVE_FULLFSYNC=0 ]
+)
+
+AC_MSG_CHECKING(for O_CLOEXEC)
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <fcntl.h>]],
+ [[ open("", O_CLOEXEC); ]])],
+ [ AC_MSG_RESULT(yes); HAVE_O_CLOEXEC=1 ],
+ [ AC_MSG_RESULT(no); HAVE_O_CLOEXEC=0 ]
+)
+
+dnl crc32c platform checks
+AC_MSG_CHECKING(for __builtin_prefetch)
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[
+ char data = 0;
+ const char* address = &data;
+ __builtin_prefetch(address, 0, 0);
+ ]])],
+ [ AC_MSG_RESULT(yes); HAVE_BUILTIN_PREFETCH=1 ],
+ [ AC_MSG_RESULT(no); HAVE_BUILTIN_PREFETCH=0 ]
+)
+
+AC_MSG_CHECKING(for _mm_prefetch)
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <xmmintrin.h>]], [[
+ char data = 0;
+ const char* address = &data;
+ _mm_prefetch(address, _MM_HINT_NTA);
+ ]])],
+ [ AC_MSG_RESULT(yes); HAVE_MM_PREFETCH=1 ],
+ [ AC_MSG_RESULT(no); HAVE_MM_PREFETCH=0 ]
+)
+
+AC_MSG_CHECKING(for strong getauxval support in the system headers)
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+ #include <arm_acle.h>
+ #include <arm_neon.h>
+ #include <sys/auxv.h>
+ ]], [[
+ getauxval(AT_HWCAP);
+ ]])],
+ [ AC_MSG_RESULT(yes); HAVE_STRONG_GETAUXVAL=1 ],
+ [ AC_MSG_RESULT(no); HAVE_STRONG_GETAUXVAL=0 ]
+)
+
+AC_MSG_CHECKING(for weak getauxval support in the compiler)
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+ unsigned long getauxval(unsigned long type) __attribute__((weak));
+ #define AT_HWCAP 16
+ ]], [[
+ getauxval(AT_HWCAP);
+ ]])],
+ [ AC_MSG_RESULT(yes); HAVE_WEAK_GETAUXVAL=1 ],
+ [ AC_MSG_RESULT(no); HAVE_WEAK_GETAUXVAL=0 ]
+)
+
dnl Check for reduced exports
if test x$use_reduce_exports = xyes; then
AX_CHECK_COMPILE_FLAG([-fvisibility=hidden],[RE_CXXFLAGS="-fvisibility=hidden"],
@@ -1081,7 +1139,6 @@ fi
AX_BOOST_SYSTEM
AX_BOOST_FILESYSTEM
AX_BOOST_THREAD
-AX_BOOST_CHRONO
dnl Boost 1.56 through 1.62 allow using std::atomic instead of its own atomic
dnl counter implementations. In 1.63 and later the std::atomic approach is default.
@@ -1148,7 +1205,7 @@ fi
if test x$use_boost = xyes; then
-BOOST_LIBS="$BOOST_LDFLAGS $BOOST_SYSTEM_LIB $BOOST_FILESYSTEM_LIB $BOOST_THREAD_LIB $BOOST_CHRONO_LIB"
+BOOST_LIBS="$BOOST_LDFLAGS $BOOST_SYSTEM_LIB $BOOST_FILESYSTEM_LIB $BOOST_THREAD_LIB"
dnl If boost (prior to 1.57) was built without c++11, it emulated scoped enums
@@ -1186,57 +1243,6 @@ AC_LINK_IFELSE([AC_LANG_PROGRAM([[
LIBS="$TEMP_LIBS"
CPPFLAGS="$TEMP_CPPFLAGS"
-dnl Boost >= 1.50 uses sleep_for rather than the now-deprecated sleep, however
-dnl it was broken from 1.50 to 1.52 when backed by nanosleep. Use sleep_for if
-dnl a working version is available, else fall back to sleep. sleep was removed
-dnl after 1.56.
-dnl If neither is available, abort.
-TEMP_LIBS="$LIBS"
-LIBS="$BOOST_LIBS $LIBS"
-TEMP_CPPFLAGS="$CPPFLAGS"
-CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
-AC_LINK_IFELSE([AC_LANG_PROGRAM([[
- #include <boost/thread/thread.hpp>
- #include <boost/version.hpp>
- ]],[[
- #if BOOST_VERSION >= 105000 && (!defined(BOOST_HAS_NANOSLEEP) || BOOST_VERSION >= 105200)
- boost::this_thread::sleep_for(boost::chrono::milliseconds(0));
- #else
- choke me
- #endif
- ]])],
- [boost_sleep=yes;
- AC_DEFINE(HAVE_WORKING_BOOST_SLEEP_FOR, 1, [Define this symbol if boost sleep_for works])],
- [boost_sleep=no])
-LIBS="$TEMP_LIBS"
-CPPFLAGS="$TEMP_CPPFLAGS"
-
-if test x$boost_sleep != xyes; then
-TEMP_LIBS="$LIBS"
-LIBS="$BOOST_LIBS $LIBS"
-TEMP_CPPFLAGS="$CPPFLAGS"
-CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
-AC_LINK_IFELSE([AC_LANG_PROGRAM([[
- #include <boost/version.hpp>
- #include <boost/thread.hpp>
- #include <boost/date_time/posix_time/posix_time_types.hpp>
- ]],[[
- #if BOOST_VERSION <= 105600
- boost::this_thread::sleep(boost::posix_time::milliseconds(0));
- #else
- choke me
- #endif
- ]])],
- [boost_sleep=yes; AC_DEFINE(HAVE_WORKING_BOOST_SLEEP, 1, [Define this symbol if boost sleep works])],
- [boost_sleep=no])
-LIBS="$TEMP_LIBS"
-CPPFLAGS="$TEMP_CPPFLAGS"
-fi
-
-if test x$boost_sleep != xyes; then
- AC_MSG_ERROR(No working boost sleep implementation found.)
-fi
-
fi
if test x$use_pkgconfig = xyes; then
@@ -1524,11 +1530,13 @@ AM_CONDITIONAL([USE_QRCODE], [test x$use_qr = xyes])
AM_CONDITIONAL([USE_LCOV],[test x$use_lcov = xyes])
AM_CONDITIONAL([GLIBC_BACK_COMPAT],[test x$use_glibc_compat = xyes])
AM_CONDITIONAL([HARDEN],[test x$use_hardening = xyes])
-AM_CONDITIONAL([ENABLE_HWCRC32],[test x$enable_hwcrc32 = xyes])
+AM_CONDITIONAL([ENABLE_SSE42],[test x$enable_sse42 = xyes])
AM_CONDITIONAL([ENABLE_SSE41],[test x$enable_sse41 = xyes])
AM_CONDITIONAL([ENABLE_AVX2],[test x$enable_avx2 = xyes])
AM_CONDITIONAL([ENABLE_SHANI],[test x$enable_shani = xyes])
+AM_CONDITIONAL([ENABLE_ARM_CRC],[test x$enable_arm_crc = xyes])
AM_CONDITIONAL([USE_ASM],[test x$use_asm = xyes])
+AM_CONDITIONAL([WORDS_BIGENDIAN],[test x$ac_cv_c_bigendian = xyes])
AC_DEFINE(CLIENT_VERSION_MAJOR, _CLIENT_VERSION_MAJOR, [Major version])
AC_DEFINE(CLIENT_VERSION_MINOR, _CLIENT_VERSION_MINOR, [Minor version])
@@ -1575,18 +1583,25 @@ AC_SUBST(SSE42_CXXFLAGS)
AC_SUBST(SSE41_CXXFLAGS)
AC_SUBST(AVX2_CXXFLAGS)
AC_SUBST(SHANI_CXXFLAGS)
+AC_SUBST(ARM_CRC_CXXFLAGS)
AC_SUBST(LIBTOOL_APP_LDFLAGS)
AC_SUBST(USE_UPNP)
AC_SUBST(USE_QRCODE)
AC_SUBST(BOOST_LIBS)
AC_SUBST(TESTDEFS)
-AC_SUBST(LEVELDB_TARGET_FLAGS)
AC_SUBST(MINIUPNPC_CPPFLAGS)
AC_SUBST(MINIUPNPC_LIBS)
AC_SUBST(EVENT_LIBS)
AC_SUBST(EVENT_PTHREADS_LIBS)
AC_SUBST(ZMQ_LIBS)
AC_SUBST(QR_LIBS)
+AC_SUBST(HAVE_FDATASYNC)
+AC_SUBST(HAVE_FULLFSYNC)
+AC_SUBST(HAVE_O_CLOEXEC)
+AC_SUBST(HAVE_BUILTIN_PREFETCH)
+AC_SUBST(HAVE_MM_PREFETCH)
+AC_SUBST(HAVE_STRONG_GETAUXVAL)
+AC_SUBST(HAVE_WEAK_GETAUXVAL)
AC_CONFIG_FILES([Makefile src/Makefile doc/man/Makefile share/setup.nsi share/qt/Info.plist test/config.ini])
AC_CONFIG_FILES([contrib/devtools/split-debug.sh],[chmod +x contrib/devtools/split-debug.sh])
AM_COND_IF([HAVE_DOXYGEN], [AC_CONFIG_FILES([doc/Doxyfile])])
diff --git a/contrib/devtools/README.md b/contrib/devtools/README.md
index c35affac59..515a0d8fc6 100644
--- a/contrib/devtools/README.md
+++ b/contrib/devtools/README.md
@@ -103,17 +103,21 @@ Perform basic security checks on a series of executables.
symbol-check.py
===============
-A script to check that the (Linux) executables produced by gitian only contain
-allowed gcc, glibc and libstdc++ version symbols. This makes sure they are
-still compatible with the minimum supported Linux distribution versions.
+A script to check that the executables produced by gitian only contain
+certain symbols and are only linked against allowed libraries.
+
+For Linux this means checking for allowed gcc, glibc and libstdc++ version symbols.
+This makes sure they are still compatible with the minimum supported distribution versions.
+
+For macOS we check that the executables are only linked against libraries we allow.
Example usage after a gitian build:
find ../gitian-builder/build -type f -executable | xargs python3 contrib/devtools/symbol-check.py
-If only supported symbols are used the return value will be 0 and the output will be empty.
+If no errors occur the return value will be 0 and the output will be empty.
-If there are 'unsupported' symbols, the return value will be 1 a list like this will be printed:
+If there are any errors the return value will be 1 and output like this will be printed:
.../64/test_bitcoin: symbol memcpy from unsupported version GLIBC_2.14
.../64/test_bitcoin: symbol __fdelt_chk from unsupported version GLIBC_2.15
diff --git a/contrib/devtools/circular-dependencies.py b/contrib/devtools/circular-dependencies.py
index 2e4657f1dd..6afa4351e7 100755
--- a/contrib/devtools/circular-dependencies.py
+++ b/contrib/devtools/circular-dependencies.py
@@ -1,4 +1,7 @@
#!/usr/bin/env python3
+# Copyright (c) 2018 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import sys
import re
diff --git a/contrib/devtools/copyright_header.py b/contrib/devtools/copyright_header.py
index 5307b04197..92120eaff7 100755
--- a/contrib/devtools/copyright_header.py
+++ b/contrib/devtools/copyright_header.py
@@ -19,6 +19,8 @@ EXCLUDE = [
'src/qt/bitcoinstrings.cpp',
'src/chainparamsseeds.h',
# other external copyrights:
+ 'src/reverse_iterator.h',
+ 'src/test/fuzz/FuzzedDataProvider.h',
'src/tinyformat.h',
'test/functional/test_framework/bignum.py',
# python init:
@@ -32,6 +34,7 @@ EXCLUDE_DIRS = [
"src/leveldb/",
"src/secp256k1/",
"src/univalue/",
+ "src/crc32c/",
]
INCLUDE = ['*.h', '*.cpp', '*.cc', '*.c', '*.mm', '*.py', '*.sh', '*.bash-completion']
@@ -455,14 +458,14 @@ CPP_HEADER = '''
def get_cpp_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(CPP_HEADER, start_year, end_year))
-PYTHON_HEADER = '''
+SCRIPT_HEADER = '''
# Copyright (c) %s The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
-def get_python_header_lines_to_insert(start_year, end_year):
- return reversed(get_header_lines(PYTHON_HEADER, start_year, end_year))
+def get_script_header_lines_to_insert(start_year, end_year):
+ return reversed(get_header_lines(SCRIPT_HEADER, start_year, end_year))
################################################################################
# query git for year of last change
@@ -491,17 +494,18 @@ def file_has_hashbang(file_lines):
return False
return file_lines[0][:2] == '#!'
-def insert_python_header(filename, file_lines, start_year, end_year):
+def insert_script_header(filename, file_lines, start_year, end_year):
if file_has_hashbang(file_lines):
insert_idx = 1
else:
insert_idx = 0
- header_lines = get_python_header_lines_to_insert(start_year, end_year)
+ header_lines = get_script_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(insert_idx, line)
write_file_lines(filename, file_lines)
def insert_cpp_header(filename, file_lines, start_year, end_year):
+ file_lines.insert(0, '\n')
header_lines = get_cpp_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(0, line)
@@ -513,8 +517,8 @@ def exec_insert_header(filename, style):
sys.exit('*** %s already has a copyright by The Bitcoin Core developers'
% (filename))
start_year, end_year = get_git_change_year_range(filename)
- if style == 'python':
- insert_python_header(filename, file_lines, start_year, end_year)
+ if style in ['python', 'shell']:
+ insert_script_header(filename, file_lines, start_year, end_year)
else:
insert_cpp_header(filename, file_lines, start_year, end_year)
@@ -555,11 +559,13 @@ def insert_cmd(argv):
if not os.path.isfile(filename):
sys.exit("*** bad filename: %s" % filename)
_, extension = os.path.splitext(filename)
- if extension not in ['.h', '.cpp', '.cc', '.c', '.py']:
+ if extension not in ['.h', '.cpp', '.cc', '.c', '.py', '.sh']:
sys.exit("*** cannot insert for file extension %s" % extension)
if extension == '.py':
style = 'python'
+ elif extension == '.sh':
+ style = 'shell'
else:
style = 'cpp'
exec_insert_header(filename, style)
diff --git a/contrib/devtools/gen-manpages.sh b/contrib/devtools/gen-manpages.sh
index dbdb622877..aa65953d83 100755
--- a/contrib/devtools/gen-manpages.sh
+++ b/contrib/devtools/gen-manpages.sh
@@ -1,4 +1,7 @@
#!/usr/bin/env bash
+# Copyright (c) 2016-2019 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C
TOPDIR=${TOPDIR:-$(git rev-parse --show-toplevel)}
diff --git a/contrib/devtools/previous_release.sh b/contrib/devtools/previous_release.sh
new file mode 100755
index 0000000000..efd035f778
--- /dev/null
+++ b/contrib/devtools/previous_release.sh
@@ -0,0 +1,149 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2018-2019 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+# Build previous releases.
+
+export LC_ALL=C
+
+CONFIG_FLAGS=""
+FUNCTIONAL_TESTS=0
+DELETE_EXISTING=0
+USE_DEPENDS=0
+DOWNLOAD_BINARY=0
+CONFIG_FLAGS=""
+TARGET="releases"
+
+while getopts ":hfrdbt:" opt; do
+ case $opt in
+ h)
+ echo "Usage: .previous_release.sh [options] tag1 tag2"
+ echo " options:"
+ echo " -h Print this message"
+ echo " -f Configure for functional tests"
+ echo " -r Remove existing directory"
+ echo " -d Use depends"
+ echo " -b Download release binary"
+ echo " -t Target directory (default: releases)"
+ exit 0
+ ;;
+ f)
+ FUNCTIONAL_TESTS=1
+ CONFIG_FLAGS="$CONFIG_FLAGS --without-gui --disable-tests --disable-bench"
+ ;;
+ r)
+ DELETE_EXISTING=1
+ ;;
+ d)
+ USE_DEPENDS=1
+ ;;
+ b)
+ DOWNLOAD_BINARY=1
+ ;;
+ t)
+ TARGET=$OPTARG
+ ;;
+ \?)
+ echo "Invalid option: -$OPTARG" >&2
+ exit 1
+ ;;
+ esac
+done
+
+shift $((OPTIND-1))
+
+if [ -z "$1" ]; then
+ echo "Specify release tag(s), e.g.: .previous_release v0.15.1"
+ exit 1
+fi
+
+if [ ! -d "$TARGET" ]; then
+ mkdir -p $TARGET
+fi
+
+if [ "$DOWNLOAD_BINARY" -eq "1" ]; then
+ HOST="${HOST:-$(./depends/config.guess)}"
+ case "$HOST" in
+ x86_64-*-linux*)
+ PLATFORM=x86_64-linux-gnu
+ ;;
+ x86_64-apple-darwin*)
+ PLATFORM=osx64
+ ;;
+ *)
+ echo "Not sure which binary to download for $HOST."
+ exit 1
+ ;;
+ esac
+fi
+
+echo "Releases directory: $TARGET"
+pushd "$TARGET" || exit 1
+{
+ for tag in "$@"
+ do
+ if [ "$DELETE_EXISTING" -eq "1" ]; then
+ if [ -d "$tag" ]; then
+ rm -r "$tag"
+ fi
+ fi
+
+ if [ "$DOWNLOAD_BINARY" -eq "0" ]; then
+
+ if [ ! -d "$tag" ]; then
+ if [ -z $(git tag -l "$tag") ]; then
+ echo "Tag $tag not found"
+ exit 1
+ fi
+
+ git clone https://github.com/bitcoin/bitcoin "$tag"
+ pushd "$tag" || exit 1
+ {
+ git checkout "$tag"
+ if [ "$USE_DEPENDS" -eq "1" ]; then
+ pushd depends || exit 1
+ {
+ if [ "$FUNCTIONAL_TESTS" -eq "1" ]; then
+ make NO_QT=1
+ else
+ make
+ fi
+ HOST="${HOST:-$(./config.guess)}"
+ }
+ popd || exit 1
+ CONFIG_FLAGS="--prefix=$PWD/depends/$HOST $CONFIG_FLAGS"
+ fi
+ ./autogen.sh
+ ./configure $CONFIG_FLAGS
+ make
+ # Move binaries, so they're in the same place as in the release download:
+ mkdir bin
+ mv src/bitcoind src/bitcoin-cli src/bitcoin-tx bin
+ if [ "$FUNCTIONAL_TESTS" -eq "0" ]; then
+ mv src/qt/bitcoin-qt bin
+ fi
+ }
+ popd || exit 1
+ fi
+ else
+ if [ -d "$tag" ]; then
+ echo "Using cached $tag"
+ else
+ mkdir "$tag"
+ if [[ "$tag" =~ v(.*)(rc[0-9]+)$ ]]; then
+ BIN_PATH="bin/bitcoin-core-${BASH_REMATCH[1]}/test.${BASH_REMATCH[2]}"
+ else
+ BIN_PATH="bin/bitcoin-core-${tag:1}"
+ fi
+ URL="https://bitcoin.org/$BIN_PATH/bitcoin-${tag:1}-$PLATFORM.tar.gz"
+ echo "Fetching: $URL"
+ curl -O $URL
+ tar -zxf "bitcoin-${tag:1}-$PLATFORM.tar.gz" -C "$tag" --strip-components=1 "bitcoin-${tag:1}"
+ rm "bitcoin-${tag:1}-$PLATFORM.tar.gz"
+ fi
+ fi
+ done
+}
+popd || exit 1
diff --git a/contrib/devtools/security-check.py b/contrib/devtools/security-check.py
index 9941c57479..21d64e893d 100755
--- a/contrib/devtools/security-check.py
+++ b/contrib/devtools/security-check.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright (c) 2015-2018 The Bitcoin Core developers
+# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
diff --git a/contrib/devtools/symbol-check.py b/contrib/devtools/symbol-check.py
index 0c59ab6239..f92d997621 100755
--- a/contrib/devtools/symbol-check.py
+++ b/contrib/devtools/symbol-check.py
@@ -15,6 +15,7 @@ import subprocess
import re
import sys
import os
+from typing import List, Optional, Tuple
# Debian 8 (Jessie) EOL: 2020. https://wiki.debian.org/DebianReleases#Production_Releases
#
@@ -52,8 +53,10 @@ IGNORE_EXPORTS = {
}
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')
+OTOOL_CMD = os.getenv('OTOOL', '/usr/bin/otool')
+
# Allowed NEEDED libraries
-ALLOWED_LIBRARIES = {
+ELF_ALLOWED_LIBRARIES = {
# bitcoind and bitcoin-qt
'libgcc_s.so.1', # GCC base support
'libc.so.6', # C library
@@ -79,6 +82,25 @@ ARCH_MIN_GLIBC_VER = {
'AArch64':(2,17),
'RISC-V': (2,27)
}
+
+MACHO_ALLOWED_LIBRARIES = {
+# bitcoind and bitcoin-qt
+'libc++.1.dylib', # C++ Standard Library
+'libSystem.B.dylib', # libc, libm, libpthread, libinfo
+# bitcoin-qt only
+'AppKit', # user interface
+'ApplicationServices', # common application tasks.
+'Carbon', # deprecated c back-compat API
+'CoreFoundation', # low level func, data types
+'CoreGraphics', # 2D rendering
+'CoreServices', # operating system services
+'CoreText', # interface for laying out text and handling fonts.
+'Foundation', # base layer functionality for apps/frameworks
+'ImageIO', # read and write image file formats.
+'IOKit', # user-space access to hardware devices and drivers.
+'libobjc.A.dylib', # Objective-C runtime library
+}
+
class CPPFilt(object):
'''
Demangle C++ symbol names.
@@ -98,15 +120,15 @@ class CPPFilt(object):
self.proc.stdout.close()
self.proc.wait()
-def read_symbols(executable, imports=True):
+def read_symbols(executable, imports=True) -> List[Tuple[str, str, str]]:
'''
- Parse an ELF executable and return a list of (symbol,version) tuples
+ Parse an ELF executable and return a list of (symbol,version, arch) tuples
for dynamic, imported symbols.
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', '-h', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
- raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip()))
+ raise IOError('Could not read symbols for {}: {}'.format(executable, stderr.strip()))
syms = []
for line in stdout.splitlines():
line = line.split()
@@ -121,7 +143,7 @@ def read_symbols(executable, imports=True):
syms.append((sym, version, arch))
return syms
-def check_version(max_versions, version, arch):
+def check_version(max_versions, version, arch) -> bool:
if '_' in version:
(lib, _, ver) = version.rpartition('_')
else:
@@ -132,7 +154,7 @@ def check_version(max_versions, version, arch):
return False
return ver <= max_versions[lib] or lib == 'GLIBC' and ver <= ARCH_MIN_GLIBC_VER[arch]
-def read_libraries(filename):
+def elf_read_libraries(filename) -> List[str]:
p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
@@ -148,26 +170,94 @@ def read_libraries(filename):
raise ValueError('Unparseable (NEEDED) specification')
return libraries
-if __name__ == '__main__':
+def check_imported_symbols(filename) -> bool:
cppfilt = CPPFilt()
+ ok = True
+ for sym, version, arch in read_symbols(filename, True):
+ if version and not check_version(MAX_VERSIONS, version, arch):
+ print('{}: symbol {} from unsupported version {}'.format(filename, cppfilt(sym), version))
+ ok = False
+ return ok
+
+def check_exported_symbols(filename) -> bool:
+ cppfilt = CPPFilt()
+ ok = True
+ for sym,version,arch in read_symbols(filename, False):
+ if arch == 'RISC-V' or sym in IGNORE_EXPORTS:
+ continue
+ print('{}: export of symbol {} not allowed'.format(filename, cppfilt(sym)))
+ ok = False
+ return ok
+
+def check_ELF_libraries(filename) -> bool:
+ ok = True
+ for library_name in elf_read_libraries(filename):
+ if library_name not in ELF_ALLOWED_LIBRARIES:
+ print('{}: NEEDED library {} is not allowed'.format(filename, library_name))
+ ok = False
+ return ok
+
+def macho_read_libraries(filename) -> List[str]:
+ p = subprocess.Popen([OTOOL_CMD, '-L', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
+ (stdout, stderr) = p.communicate()
+ if p.returncode:
+ raise IOError('Error opening file')
+ libraries = []
+ for line in stdout.splitlines():
+ tokens = line.split()
+ if len(tokens) == 1: # skip executable name
+ continue
+ libraries.append(tokens[0].split('/')[-1])
+ return libraries
+
+def check_MACHO_libraries(filename) -> bool:
+ ok = True
+ for dylib in macho_read_libraries(filename):
+ if dylib not in MACHO_ALLOWED_LIBRARIES:
+ print('{} is not in ALLOWED_LIBRARIES!'.format(dylib))
+ ok = False
+ return ok
+
+CHECKS = {
+'ELF': [
+ ('IMPORTED_SYMBOLS', check_imported_symbols),
+ ('EXPORTED_SYMBOLS', check_exported_symbols),
+ ('LIBRARY_DEPENDENCIES', check_ELF_libraries)
+],
+'MACHO': [
+ ('DYNAMIC_LIBRARIES', check_MACHO_libraries)
+]
+}
+
+def identify_executable(executable) -> Optional[str]:
+ with open(filename, 'rb') as f:
+ magic = f.read(4)
+ if magic.startswith(b'MZ'):
+ return 'PE'
+ elif magic.startswith(b'\x7fELF'):
+ return 'ELF'
+ elif magic.startswith(b'\xcf\xfa'):
+ return 'MACHO'
+ return None
+
+if __name__ == '__main__':
retval = 0
for filename in sys.argv[1:]:
- # Check imported symbols
- for sym,version,arch in read_symbols(filename, True):
- if version and not check_version(MAX_VERSIONS, version, arch):
- print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym), version))
- retval = 1
- # Check exported symbols
- if arch != 'RISC-V':
- for sym,version,arch in read_symbols(filename, False):
- if sym in IGNORE_EXPORTS:
- continue
- print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym)))
- retval = 1
- # Check dependency libraries
- for library_name in read_libraries(filename):
- if library_name not in ALLOWED_LIBRARIES:
- print('%s: NEEDED library %s is not allowed' % (filename, library_name))
+ try:
+ etype = identify_executable(filename)
+ if etype is None:
+ print('{}: unknown format'.format(filename))
retval = 1
+ continue
+ failed = []
+ for (name, func) in CHECKS[etype]:
+ if not func(filename):
+ failed.append(name)
+ if failed:
+ print('{}: failed {}'.format(filename, ' '.join(failed)))
+ retval = 1
+ except IOError:
+ print('{}: cannot open'.format(filename))
+ retval = 1
sys.exit(retval)
diff --git a/contrib/devtools/test_deterministic_coverage.sh b/contrib/devtools/test_deterministic_coverage.sh
index 88ac850021..f5cd05a2c3 100755
--- a/contrib/devtools/test_deterministic_coverage.sh
+++ b/contrib/devtools/test_deterministic_coverage.sh
@@ -21,8 +21,8 @@ NON_DETERMINISTIC_TESTS=(
"miner_tests/CreateNewBlock_validity" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10)
"scheduler_tests/manythreads" # scheduler.cpp: CScheduler::serviceQueue()
"scheduler_tests/singlethreadedscheduler_ordered" # scheduler.cpp: CScheduler::serviceQueue()
- "tx_validationcache_tests/checkinputs_test" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10)
- "tx_validationcache_tests/tx_mempool_block_doublespend" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10)
+ "txvalidationcache_tests/checkinputs_test" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10)
+ "txvalidationcache_tests/tx_mempool_block_doublespend" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10)
"txindex_tests/txindex_initial_sync" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10)
"txvalidation_tests/tx_mempool_reject_coinbase" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10)
"validation_block_tests/processnewblock_signals_ordering" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10)
diff --git a/contrib/filter-lcov.py b/contrib/filter-lcov.py
index df1db76e92..75034616f7 100755
--- a/contrib/filter-lcov.py
+++ b/contrib/filter-lcov.py
@@ -1,4 +1,7 @@
#!/usr/bin/env python3
+# Copyright (c) 2017-2018 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import argparse
diff --git a/contrib/gitian-build.py b/contrib/gitian-build.py
index e38fa6fcb0..4a3df93cea 100755
--- a/contrib/gitian-build.py
+++ b/contrib/gitian-build.py
@@ -1,4 +1,7 @@
#!/usr/bin/env python3
+# Copyright (c) 2018-2019 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import argparse
import os
@@ -206,7 +209,7 @@ def main():
args.macos = 'm' in args.os
# Disable for MacOS if no SDK found
- if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.11.sdk.tar.gz'):
+ if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.14.sdk.tar.gz'):
print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
args.macos = False
diff --git a/contrib/gitian-descriptors/gitian-linux.yml b/contrib/gitian-descriptors/gitian-linux.yml
index 2b86602a82..4a8b125ae7 100644
--- a/contrib/gitian-descriptors/gitian-linux.yml
+++ b/contrib/gitian-descriptors/gitian-linux.yml
@@ -5,7 +5,7 @@ distro: "ubuntu"
suites:
- "bionic"
architectures:
-- "linux64"
+- "amd64"
packages:
- "curl"
- "g++-aarch64-linux-gnu"
@@ -40,7 +40,7 @@ script: |
set -e -o pipefail
WRAP_DIR=$HOME/wrapped
- HOSTS="i686-pc-linux-gnu x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu"
+ HOSTS="x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu"
CONFIGFLAGS="--enable-glibc-back-compat --enable-reduce-exports --disable-bench --disable-gui-tests"
FAKETIME_HOST_PROGS="gcc g++"
FAKETIME_PROGS="date ar ranlib nm"
diff --git a/contrib/gitian-descriptors/gitian-osx-signer.yml b/contrib/gitian-descriptors/gitian-osx-signer.yml
index 2d49493641..a4f3219c22 100644
--- a/contrib/gitian-descriptors/gitian-osx-signer.yml
+++ b/contrib/gitian-descriptors/gitian-osx-signer.yml
@@ -4,7 +4,7 @@ distro: "ubuntu"
suites:
- "bionic"
architectures:
-- "linux64"
+- "amd64"
packages:
- "faketime"
remotes:
diff --git a/contrib/gitian-descriptors/gitian-osx.yml b/contrib/gitian-descriptors/gitian-osx.yml
index 75040c137f..2b6aa599e0 100644
--- a/contrib/gitian-descriptors/gitian-osx.yml
+++ b/contrib/gitian-descriptors/gitian-osx.yml
@@ -5,7 +5,7 @@ distro: "ubuntu"
suites:
- "bionic"
architectures:
-- "linux64"
+- "amd64"
packages:
- "ca-certificates"
- "curl"
@@ -32,7 +32,7 @@ remotes:
- "url": "https://github.com/bitcoin/bitcoin.git"
"dir": "bitcoin"
files:
-- "MacOSX10.11.sdk.tar.gz"
+- "MacOSX10.14.sdk.tar.gz"
script: |
set -e -o pipefail
@@ -90,7 +90,7 @@ script: |
BASEPREFIX="${PWD}/depends"
mkdir -p ${BASEPREFIX}/SDKs
- tar -C ${BASEPREFIX}/SDKs -xf ${BUILD_DIR}/MacOSX10.11.sdk.tar.gz
+ tar -C ${BASEPREFIX}/SDKs -xf ${BUILD_DIR}/MacOSX10.14.sdk.tar.gz
# Build dependencies for each host
for i in $HOSTS; do
@@ -138,6 +138,7 @@ script: |
CONFIG_SITE=${BASEPREFIX}/${i}/share/config.site ./configure --prefix=/ --disable-ccache --disable-maintainer-mode --disable-dependency-tracking ${CONFIGFLAGS}
make ${MAKEOPTS}
make ${MAKEOPTS} -C src check-security
+ make ${MAKEOPTS} -C src check-symbols
make install-strip DESTDIR=${INSTALLPATH}
make osx_volname
diff --git a/contrib/gitian-descriptors/gitian-win-signer.yml b/contrib/gitian-descriptors/gitian-win-signer.yml
index 70b7bb111d..9d96465742 100644
--- a/contrib/gitian-descriptors/gitian-win-signer.yml
+++ b/contrib/gitian-descriptors/gitian-win-signer.yml
@@ -4,7 +4,7 @@ distro: "ubuntu"
suites:
- "bionic"
architectures:
-- "linux64"
+- "amd64"
packages:
- "libssl-dev"
- "autoconf"
diff --git a/contrib/gitian-descriptors/gitian-win.yml b/contrib/gitian-descriptors/gitian-win.yml
index b772404ae5..a69439369e 100644
--- a/contrib/gitian-descriptors/gitian-win.yml
+++ b/contrib/gitian-descriptors/gitian-win.yml
@@ -5,7 +5,7 @@ distro: "ubuntu"
suites:
- "bionic"
architectures:
-- "linux64"
+- "amd64"
packages:
- "curl"
- "g++"
@@ -34,8 +34,8 @@ script: |
CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests"
FAKETIME_HOST_PROGS="ar ranlib nm windres strip objcopy"
FAKETIME_PROGS="date makensis zip"
- HOST_CFLAGS="-O2 -g"
- HOST_CXXFLAGS="-O2 -g"
+ HOST_CFLAGS="-O2 -g -fno-ident"
+ HOST_CXXFLAGS="-O2 -g -fno-ident"
export QT_RCC_TEST=1
export QT_RCC_SOURCE_DATE_OVERRIDE=1
diff --git a/contrib/guix/README.md b/contrib/guix/README.md
index 4dfa1729a5..8500379025 100644
--- a/contrib/guix/README.md
+++ b/contrib/guix/README.md
@@ -62,15 +62,16 @@ Likewise, to perform a bootstrapped build (takes even longer):
export ADDITIONAL_GUIX_ENVIRONMENT_FLAGS='--bootstrap --no-substitutes'
```
-### Using the right Guix
+### Using a version of Guix with `guix time-machine` capabilities
-Once Guix is installed, deploy our patched version into your current Guix
-profile. The changes there are slowly being upstreamed.
+> Note: This entire section can be skipped if you are already using a version of
+> Guix that has [the `guix time-machine` command][guix/time-machine].
+
+Once Guix is installed, if it doesn't have the `guix time-machine` command, pull
+the latest `guix`.
```sh
-guix pull --url=https://github.com/dongcarl/guix.git \
- --commit=82c77e52b8b46e0a3aad2cb12307c2e30547deec \
- --max-jobs=4 # change accordingly
+guix pull --max-jobs=4 # change number of jobs accordingly
```
Make sure that you are using your current profile. (You are prompted to do this
@@ -80,9 +81,6 @@ at the end of the `guix pull`)
export PATH="${HOME}/.config/guix/current/bin${PATH:+:}$PATH"
```
-> Note: There is ongoing work to eliminate this entire section using Guix
-> [inferiors][guix/inferiors] and [channels][guix/channels].
-
## Usage
### As a Development Environment
@@ -116,7 +114,7 @@ find output/ -type f -print0 | sort -z | xargs -r0 sha256sum
* _**HOSTS**_
Override the space-separated list of platform triples for which to perform a
- bootstrappable build. _(defaults to "i686-linux-gnu x86\_64-linux-gnu
+ bootstrappable build. _(defaults to "x86\_64-linux-gnu
arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu")_
> Windows and OS X platform triplet support are WIP.
@@ -224,6 +222,7 @@ repository and will likely put one up soon.
[guix/substitute-server-auth]: https://www.gnu.org/software/guix/manual/en/html_node/Substitute-Server-Authorization.html
[guix/inferiors]: https://www.gnu.org/software/guix/manual/en/html_node/Inferiors.html
[guix/channels]: https://www.gnu.org/software/guix/manual/en/html_node/Channels.html
+[guix/time-machine]: https://guix.gnu.org/manual/en/html_node/Invoking-guix-time_002dmachine.html
[debian/guix-package]: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=850644
[fanquake/guix-docker]: https://github.com/fanquake/core-review/tree/master/guix
diff --git a/contrib/guix/guix-build.sh b/contrib/guix/guix-build.sh
index f8ba8c7ed2..2daa8aba5e 100755
--- a/contrib/guix/guix-build.sh
+++ b/contrib/guix/guix-build.sh
@@ -13,8 +13,14 @@ make -C "${PWD}/depends" -j"$MAX_JOBS" download ${V:+V=1} ${SOURCES_PATH:+SOURCE
# Determine the reference time used for determinism (overridable by environment)
SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git log --format=%at -1)}"
+time-machine() {
+ guix time-machine --url=https://github.com/dongcarl/guix.git \
+ --commit=b3a7c72c8b2425f8ddb0fc6e3b1caeed40f86dee \
+ -- "$@"
+}
+
# Deterministically build Bitcoin Core for HOSTs (overriable by environment)
-for host in ${HOSTS=i686-linux-gnu x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu}; do
+for host in ${HOSTS=x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu}; do
# Display proper warning when the user interrupts the build
trap 'echo "** INT received while building ${host}, you may want to clean up the relevant output and distsrc-* directories before rebuilding"' INT
@@ -22,18 +28,18 @@ for host in ${HOSTS=i686-linux-gnu x86_64-linux-gnu arm-linux-gnueabihf aarch64-
# Run the build script 'contrib/guix/libexec/build.sh' in the build
# container specified by 'contrib/guix/manifest.scm'
# shellcheck disable=SC2086
- guix environment --manifest="${PWD}/contrib/guix/manifest.scm" \
- --container \
- --pure \
- --no-cwd \
- --share="$PWD"=/bitcoin \
- ${SOURCES_PATH:+--share="$SOURCES_PATH"} \
- ${ADDITIONAL_GUIX_ENVIRONMENT_FLAGS} \
- -- env HOST="$host" \
- MAX_JOBS="$MAX_JOBS" \
- SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:?unable to determine value}" \
- ${V:+V=1} \
- ${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"} \
- bash -c "cd /bitcoin && bash contrib/guix/libexec/build.sh"
+ time-machine environment --manifest="${PWD}/contrib/guix/manifest.scm" \
+ --container \
+ --pure \
+ --no-cwd \
+ --share="$PWD"=/bitcoin \
+ ${SOURCES_PATH:+--share="$SOURCES_PATH"} \
+ ${ADDITIONAL_GUIX_ENVIRONMENT_FLAGS} \
+ -- env HOST="$host" \
+ MAX_JOBS="$MAX_JOBS" \
+ SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:?unable to determine value}" \
+ ${V:+V=1} \
+ ${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"} \
+ bash -c "cd /bitcoin && bash contrib/guix/libexec/build.sh"
done
diff --git a/contrib/install_db4.sh b/contrib/install_db4.sh
index 4b848dda99..e9130a21de 100755
--- a/contrib/install_db4.sh
+++ b/contrib/install_db4.sh
@@ -1,4 +1,7 @@
#!/bin/sh
+# Copyright (c) 2017-2019 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Install libdb4.8 (Berkeley DB).
diff --git a/contrib/linearize/linearize-data.py b/contrib/linearize/linearize-data.py
index 1b7d77f7b4..bcca3b7cea 100755
--- a/contrib/linearize/linearize-data.py
+++ b/contrib/linearize/linearize-data.py
@@ -15,6 +15,7 @@ import sys
import hashlib
import datetime
import time
+import glob
from collections import namedtuple
from binascii import unhexlify
@@ -92,6 +93,30 @@ def mkblockmap(blkindex):
blkmap[hash] = height
return blkmap
+# This gets the first block file ID that exists from the input block
+# file directory.
+def getFirstBlockFileId(block_dir_path):
+ # First, this sets up a pattern to search for block files, for
+ # example 'blkNNNNN.dat'.
+ blkFilePattern = os.path.join(block_dir_path, "blk[0-9][0-9][0-9][0-9][0-9].dat")
+
+ # This search is done with glob
+ blkFnList = glob.glob(blkFilePattern)
+
+ if len(blkFnList) == 0:
+ print("blocks not pruned - starting at 0")
+ return 0
+ # We then get the lexicographic minimum, which should be the first
+ # block file name.
+ firstBlkFilePath = min(blkFnList)
+ firstBlkFn = os.path.basename(firstBlkFilePath)
+
+ # now, the string should be ['b','l','k','N','N','N','N','N','.','d','a','t']
+ # So get the ID by choosing: 3 4 5 6 7
+ # The ID is not necessarily 0 if this is a pruned node.
+ blkId = int(firstBlkFn[3:8])
+ return blkId
+
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
@@ -101,7 +126,9 @@ class BlockDataCopier:
self.blkindex = blkindex
self.blkmap = blkmap
- self.inFn = 0
+ # Get first occurring block file id - for pruned nodes this
+ # will not necessarily be 0
+ self.inFn = getFirstBlockFileId(self.settings['input'])
self.inF = None
self.outFn = 0
self.outsz = 0
diff --git a/contrib/macdeploy/README.md b/contrib/macdeploy/README.md
index 29b49ebff4..f78bebf114 100644
--- a/contrib/macdeploy/README.md
+++ b/contrib/macdeploy/README.md
@@ -1,15 +1,135 @@
-### MacDeploy ###
+# MacOS Deployment
-For Snow Leopard (which uses [Python 2.6](http://www.python.org/download/releases/2.6/)), you will need the param_parser package:
+The `macdeployqtplus` script should not be run manually. Instead, after building as usual:
- sudo easy_install argparse
+```bash
+make deploy
+```
-This script should not be run manually, instead, after building as usual:
+During the deployment process, the disk image window will pop up briefly
+when the fancy settings are applied. This is normal, please do not interfere,
+the process will unmount the DMG and cleanup before finishing.
- make deploy
+When complete, it will have produced `Bitcoin-Qt.dmg`.
-During the process, the disk image window will pop up briefly where the fancy
-settings are applied. This is normal, please do not interfere.
+## SDK Extraction
-When finished, it will produce `Bitcoin-Qt.dmg`.
+`Xcode.app` is packaged in a `.xip` archive.
+This makes the SDK less-trivial to extract on non-macOS machines.
+One approach (tested on Debian Buster) is outlined below:
+```bash
+
+apt install clang cpio git liblzma-dev libxml2-dev libssl-dev make
+
+git clone https://github.com/tpoechtrager/xar
+pushd xar/xar
+./configure
+make
+make install
+popd
+
+git clone https://github.com/NiklasRosenstein/pbzx
+pushd pbzx
+clang -llzma -lxar pbzx.c -o pbzx -Wl,-rpath=/usr/local/lib
+popd
+
+xar -xf Xcode_10.2.1.xip -C .
+
+./pbzx/pbzx -n Content | cpio -i
+
+find Xcode.app -type d -name MacOSX.sdk -execdir sh -c 'tar -c MacOSX.sdk/ | gzip -9n > /MacOSX10.14.sdk.tar.gz' \;
+```
+
+on macOS the process is more straightforward:
+
+```bash
+xip -x Xcode_10.2.1.xip
+tar -C Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/ -czf MacOSX10.14.sdk.tar.gz MacOSX.sdk
+```
+
+Our previously used macOS SDK (`MacOSX10.11.sdk`) can be extracted from
+[Xcode 7.3.1 dmg](https://developer.apple.com/devcenter/download.action?path=/Developer_Tools/Xcode_7.3.1/Xcode_7.3.1.dmg).
+The script [`extract-osx-sdk.sh`](./extract-osx-sdk.sh) automates this. First
+ensure the DMG file is in the current directory, and then run the script. You
+may wish to delete the `intermediate 5.hfs` file and `MacOSX10.11.sdk` (the
+directory) when you've confirmed the extraction succeeded.
+
+```bash
+apt-get install p7zip-full sleuthkit
+contrib/macdeploy/extract-osx-sdk.sh
+rm -rf 5.hfs MacOSX10.11.sdk
+```
+
+## Deterministic macOS DMG Notes
+Working macOS DMGs are created in Linux by combining a recent `clang`, the Apple
+`binutils` (`ld`, `ar`, etc) and DMG authoring tools.
+
+Apple uses `clang` extensively for development and has upstreamed the necessary
+functionality so that a vanilla clang can take advantage. It supports the use of `-F`,
+`-target`, `-mmacosx-version-min`, and `--sysroot`, which are all necessary when
+building for macOS.
+
+Apple's version of `binutils` (called `cctools`) contains lots of functionality missing in the
+FSF's `binutils`. In addition to extra linker options for frameworks and sysroots, several
+other tools are needed as well such as `install_name_tool`, `lipo`, and `nmedit`. These
+do not build under Linux, so they have been patched to do so. The work here was used as
+a starting point: [mingwandroid/toolchain4](https://github.com/mingwandroid/toolchain4).
+
+In order to build a working toolchain, the following source packages are needed from
+Apple: `cctools`, `dyld`, and `ld64`.
+
+These tools inject timestamps by default, which produce non-deterministic binaries. The
+`ZERO_AR_DATE` environment variable is used to disable that.
+
+This version of `cctools` has been patched to use the current version of `clang`'s headers
+and its `libLTO.so` rather than those from `llvmgcc`, as it was originally done in `toolchain4`.
+
+To complicate things further, all builds must target an Apple SDK. These SDKs are free to
+download, but not redistributable. To obtain it, register for an Apple Developer Account,
+then download [Xcode 10.2.1](https://download.developer.apple.com/Developer_Tools/Xcode_10.2.1/Xcode_10.2.1.xip).
+
+This file is many gigabytes in size, but most (but not all) of what we need is
+contained only in a single directory:
+
+```bash
+Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.14.sdk
+```
+
+See the SDK Extraction notes above for how to obtain it.
+
+The Gitian descriptors build 2 sets of files: Linux tools, then Apple binaries which are
+created using these tools. The build process has been designed to avoid including the
+SDK's files in Gitian's outputs. All interim tarballs are fully deterministic and may be freely
+redistributed.
+
+`genisoimage` is used to create the initial DMG. It is not deterministic as-is, so it has been
+patched. A system `genisoimage` will work fine, but it will not be deterministic because
+the file-order will change between invocations. The patch can be seen here: [cdrkit-deterministic.patch](https://github.com/bitcoin/bitcoin/blob/master/depends/patches/native_cdrkit/cdrkit-deterministic.patch).
+No effort was made to fix this cleanly, so it likely leaks memory badly, however it's only used for
+a single invocation, so that's no real concern.
+
+`genisoimage` cannot compress DMGs, so afterwards, the DMG tool from the
+`libdmg-hfsplus` project is used to compress it. There are several bugs in this tool and its
+maintainer has seemingly abandoned the project.
+
+The DMG tool has the ability to create DMGs from scratch as well, but this functionality is
+broken. Only the compression feature is currently used. Ideally, the creation could be fixed
+and `genisoimage` would no longer be necessary.
+
+Background images and other features can be added to DMG files by inserting a
+`.DS_Store` before creation. This is generated by the script `contrib/macdeploy/custom_dsstore.py`.
+
+As of OS X 10.9 Mavericks, using an Apple-blessed key to sign binaries is a requirement in
+order to satisfy the new Gatekeeper requirements. Because this private key cannot be
+shared, we'll have to be a bit creative in order for the build process to remain somewhat
+deterministic. Here's how it works:
+
+- Builders use Gitian to create an unsigned release. This outputs an unsigned DMG which
+ users may choose to bless and run. It also outputs an unsigned app structure in the form
+ of a tarball, which also contains all of the tools that have been previously (deterministically)
+ built in order to create a final DMG.
+- The Apple keyholder uses this unsigned app to create a detached signature, using the
+ script that is also included there. Detached signatures are available from this [repository](https://github.com/bitcoin-core/bitcoin-detached-sigs).
+- Builders feed the unsigned app + detached signature back into Gitian. It uses the
+ pre-built tools to recombine the pieces into a deterministic DMG.
diff --git a/contrib/macdeploy/extract-osx-sdk.sh b/contrib/macdeploy/extract-osx-sdk.sh
index 3fa05cafac..21243ada04 100755
--- a/contrib/macdeploy/extract-osx-sdk.sh
+++ b/contrib/macdeploy/extract-osx-sdk.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-# Copyright (c) 2016-2018 The Bitcoin Core developers
+# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/contrib/seeds/makeseeds.py b/contrib/seeds/makeseeds.py
index b90a628781..d516ca10c1 100755
--- a/contrib/seeds/makeseeds.py
+++ b/contrib/seeds/makeseeds.py
@@ -19,13 +19,9 @@ MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
-SUSPICIOUS_HOSTS = {
- "130.211.129.106", "178.63.107.226",
- "83.81.130.26", "88.198.17.7", "148.251.238.178", "176.9.46.6",
- "54.173.72.127", "54.174.10.182", "54.183.64.54", "54.194.231.211",
- "54.66.214.167", "54.66.220.137", "54.67.33.14", "54.77.251.214",
- "54.94.195.96", "54.94.200.247"
-}
+with open("suspicious_hosts.txt", mode="r", encoding="utf-8") as f:
+ SUSPICIOUS_HOSTS = {s.strip() for s in f if s.strip()}
+
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
diff --git a/contrib/seeds/suspicious_hosts.txt b/contrib/seeds/suspicious_hosts.txt
new file mode 100644
index 0000000000..13385cc816
--- /dev/null
+++ b/contrib/seeds/suspicious_hosts.txt
@@ -0,0 +1,16 @@
+130.211.129.106
+148.251.238.178
+176.9.46.6
+178.63.107.226
+54.173.72.127
+54.174.10.182
+54.183.64.54
+54.194.231.211
+54.66.214.167
+54.66.220.137
+54.67.33.14
+54.77.251.214
+54.94.195.96
+54.94.200.247
+83.81.130.26
+88.198.17.7 \ No newline at end of file
diff --git a/contrib/valgrind.supp b/contrib/valgrind.supp
index f232bb62c2..744b8ee70f 100644
--- a/contrib/valgrind.supp
+++ b/contrib/valgrind.supp
@@ -184,3 +184,16 @@
...
fun:_ZN5BCLog6Logger12StartLoggingEv
}
+{
+ Suppress BCLog::Logger::StartLogging() still reachable memory warning
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:malloc
+ ...
+ fun:_ZN5BCLog6Logger12StartLoggingEv
+}
+{
+ Suppress rest_blockhash_by_height Conditional jump or move depends on uninitialised value(s)
+ Memcheck:Cond
+ fun:_ZL24rest_blockhash_by_heightP11HTTPRequestRKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE
+}
diff --git a/contrib/verify-commits/pre-push-hook.sh b/contrib/verify-commits/pre-push-hook.sh
index 2e15a6326d..a26791f0d1 100755
--- a/contrib/verify-commits/pre-push-hook.sh
+++ b/contrib/verify-commits/pre-push-hook.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-# Copyright (c) 2014-2018 The Bitcoin Core developers
+# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/depends/README.md b/depends/README.md
index 93f619983f..9461887a33 100644
--- a/depends/README.md
+++ b/depends/README.md
@@ -46,7 +46,7 @@ The paths are automatically configured and no other options are needed unless ta
sudo apt-get install curl librsvg2-bin libtiff-tools bsdmainutils cmake imagemagick libcap-dev libz-dev libbz2-dev python3-setuptools
-#### For Win32/Win64 cross compilation
+#### For Win64 cross compilation
- see [build-windows.md](../doc/build-windows.md#cross-compilation-for-ubuntu-and-windows-subsystem-for-linux)
diff --git a/depends/config.site.in b/depends/config.site.in
index c5731e5269..fb9bf713cc 100644
--- a/depends/config.site.in
+++ b/depends/config.site.in
@@ -59,7 +59,7 @@ PKG_CONFIG="`which pkg-config` --static"
# avoid ruining the cache. Sigh.
export PKG_CONFIG_PATH=$depends_prefix/share/pkgconfig:$depends_prefix/lib/pkgconfig
if test -z "@allow_host_packages@"; then
- export PKGCONFIG_LIBDIR=
+ export PKG_CONFIG_LIBDIR=$depends_prefix/lib/pkgconfig
fi
CPPFLAGS="-I$depends_prefix/include/ $CPPFLAGS"
diff --git a/depends/hosts/darwin.mk b/depends/hosts/darwin.mk
index 1f88c209cf..1bc4fb8189 100644
--- a/depends/hosts/darwin.mk
+++ b/depends/hosts/darwin.mk
@@ -1,9 +1,8 @@
OSX_MIN_VERSION=10.12
-OSX_SDK_VERSION=10.11
+OSX_SDK_VERSION=10.14
OSX_SDK=$(SDK_PATH)/MacOSX$(OSX_SDK_VERSION).sdk
-LD64_VERSION=253.9
-darwin_CC=clang -target $(host) -mmacosx-version-min=$(OSX_MIN_VERSION) --sysroot $(OSX_SDK) -mlinker-version=$(LD64_VERSION)
-darwin_CXX=clang++ -target $(host) -mmacosx-version-min=$(OSX_MIN_VERSION) --sysroot $(OSX_SDK) -mlinker-version=$(LD64_VERSION) -stdlib=libc++
+darwin_CC=clang -target $(host) -mmacosx-version-min=$(OSX_MIN_VERSION) --sysroot $(OSX_SDK)
+darwin_CXX=clang++ -target $(host) -mmacosx-version-min=$(OSX_MIN_VERSION) --sysroot $(OSX_SDK) -stdlib=libc++
darwin_CFLAGS=-pipe
darwin_CXXFLAGS=$(darwin_CFLAGS)
diff --git a/depends/packages/boost.mk b/depends/packages/boost.mk
index d360bb5ba6..cbe4fe4d97 100644
--- a/depends/packages/boost.mk
+++ b/depends/packages/boost.mk
@@ -22,18 +22,18 @@ $(package)_config_opts_armv7a_android=address-model=32
$(package)_toolset_$(host_os)=gcc
$(package)_archiver_$(host_os)=$($(package)_ar)
$(package)_toolset_darwin=clang-darwin
-$(package)_config_libraries=chrono,filesystem,system,thread,test
+$(package)_config_libraries=filesystem,system,thread,test
$(package)_cxxflags=-std=c++11 -fvisibility=hidden
$(package)_cxxflags_linux=-fPIC
$(package)_cxxflags_android=-fPIC
endef
define $(package)_preprocess_cmds
- echo "using $(boost_toolset_$(host_os)) : : $($(package)_cxx) : <cxxflags>\"$($(package)_cxxflags) $($(package)_cppflags)\" <linkflags>\"$($(package)_ldflags)\" <archiver>\"$(boost_archiver_$(host_os))\" <striper>\"$(host_STRIP)\" <ranlib>\"$(host_RANLIB)\" <rc>\"$(host_WINDRES)\" : ;" > user-config.jam
+ echo "using $($(package)_toolset_$(host_os)) : : $($(package)_cxx) : <cxxflags>\"$($(package)_cxxflags) $($(package)_cppflags)\" <linkflags>\"$($(package)_ldflags)\" <archiver>\"$($(package)_archiver_$(host_os))\" <striper>\"$(host_STRIP)\" <ranlib>\"$(host_RANLIB)\" <rc>\"$(host_WINDRES)\" : ;" > user-config.jam
endef
define $(package)_config_cmds
- ./bootstrap.sh --without-icu --with-libraries=$(boost_config_libraries)
+ ./bootstrap.sh --without-icu --with-libraries=$($(package)_config_libraries)
endef
define $(package)_build_cmds
diff --git a/depends/packages/native_cctools.mk b/depends/packages/native_cctools.mk
index a065256c1c..4195230b40 100644
--- a/depends/packages/native_cctools.mk
+++ b/depends/packages/native_cctools.mk
@@ -1,45 +1,55 @@
package=native_cctools
-$(package)_version=807d6fd1be5d2224872e381870c0a75387fe05e6
-$(package)_download_path=https://github.com/theuni/cctools-port/archive
+$(package)_version=3764b223c011574971ee3ae09ce968ba5dc2f00f
+$(package)_download_path=https://github.com/tpoechtrager/cctools-port/archive
$(package)_file_name=$($(package)_version).tar.gz
-$(package)_sha256_hash=a09c9ba4684670a0375e42d9d67e7f12c1f62581a27f28f7c825d6d7032ccc6a
+$(package)_sha256_hash=3e35907bf376269a844df08e03cbb43e345c88125374f2228e03724b5f9a2a04
$(package)_build_subdir=cctools
-$(package)_clang_version=3.7.1
-$(package)_clang_download_path=https://llvm.org/releases/$($(package)_clang_version)
+$(package)_clang_version=6.0.1
+$(package)_clang_download_path=https://releases.llvm.org/$($(package)_clang_version)
$(package)_clang_download_file=clang+llvm-$($(package)_clang_version)-x86_64-linux-gnu-ubuntu-14.04.tar.xz
$(package)_clang_file_name=clang-llvm-$($(package)_clang_version)-x86_64-linux-gnu-ubuntu-14.04.tar.xz
-$(package)_clang_sha256_hash=99b28a6b48e793705228a390471991386daa33a9717cd9ca007fcdde69608fd9
+$(package)_clang_sha256_hash=fa5416553ca94a8c071a27134c094a5fb736fe1bd0ecc5ef2d9bc02754e1bef0
+
+$(package)_libtapi_version=3efb201881e7a76a21e0554906cf306432539cef
+$(package)_libtapi_download_path=https://github.com/tpoechtrager/apple-libtapi/archive
+$(package)_libtapi_download_file=$($(package)_libtapi_version).tar.gz
+$(package)_libtapi_file_name=$($(package)_libtapi_version).tar.gz
+$(package)_libtapi_sha256_hash=380c1ca37cfa04a8699d0887a8d3ee1ad27f3d08baba78887c73b09485c0fbd3
+
$(package)_extra_sources=$($(package)_clang_file_name)
+$(package)_extra_sources += $($(package)_libtapi_file_name)
define $(package)_fetch_cmds
$(call fetch_file,$(package),$($(package)_download_path),$($(package)_download_file),$($(package)_file_name),$($(package)_sha256_hash)) && \
-$(call fetch_file,$(package),$($(package)_clang_download_path),$($(package)_clang_download_file),$($(package)_clang_file_name),$($(package)_clang_sha256_hash))
+$(call fetch_file,$(package),$($(package)_clang_download_path),$($(package)_clang_download_file),$($(package)_clang_file_name),$($(package)_clang_sha256_hash)) && \
+$(call fetch_file,$(package),$($(package)_libtapi_download_path),$($(package)_libtapi_download_file),$($(package)_libtapi_file_name),$($(package)_libtapi_sha256_hash))
endef
define $(package)_extract_cmds
mkdir -p $($(package)_extract_dir) && \
echo "$($(package)_sha256_hash) $($(package)_source)" > $($(package)_extract_dir)/.$($(package)_file_name).hash && \
echo "$($(package)_clang_sha256_hash) $($(package)_source_dir)/$($(package)_clang_file_name)" >> $($(package)_extract_dir)/.$($(package)_file_name).hash && \
+ echo "$($(package)_libtapi_sha256_hash) $($(package)_source_dir)/$($(package)_libtapi_file_name)" >> $($(package)_extract_dir)/.$($(package)_file_name).hash && \
$(build_SHA256SUM) -c $($(package)_extract_dir)/.$($(package)_file_name).hash && \
- mkdir -p toolchain/bin toolchain/lib/clang/3.5/include && \
+ mkdir -p toolchain/bin toolchain/lib/clang/$($(package)_clang_version)/include && \
+ mkdir -p libtapi && \
+ tar --no-same-owner --strip-components=1 -C libtapi -xf $($(package)_source_dir)/$($(package)_libtapi_file_name) && \
tar --no-same-owner --strip-components=1 -C toolchain -xf $($(package)_source_dir)/$($(package)_clang_file_name) && \
rm -f toolchain/lib/libc++abi.so* && \
- echo "#!/bin/sh" > toolchain/bin/$(host)-dsymutil && \
- echo "exit 0" >> toolchain/bin/$(host)-dsymutil && \
- chmod +x toolchain/bin/$(host)-dsymutil && \
tar --no-same-owner --strip-components=1 -xf $($(package)_source)
endef
define $(package)_set_vars
-$(package)_config_opts=--target=$(host) --disable-lto-support
-$(package)_ldflags+=-Wl,-rpath=\\$$$$$$$$\$$$$$$$$ORIGIN/../lib
-$(package)_cc=$($(package)_extract_dir)/toolchain/bin/clang
-$(package)_cxx=$($(package)_extract_dir)/toolchain/bin/clang++
+ $(package)_config_opts=--target=$(host) --disable-lto-support --with-libtapi=$($(package)_extract_dir)
+ $(package)_ldflags+=-Wl,-rpath=\\$$$$$$$$\$$$$$$$$ORIGIN/../lib
+ $(package)_cc=$($(package)_extract_dir)/toolchain/bin/clang
+ $(package)_cxx=$($(package)_extract_dir)/toolchain/bin/clang++
endef
define $(package)_preprocess_cmds
- cd $($(package)_build_subdir); ./autogen.sh && \
- sed -i.old "/define HAVE_PTHREADS/d" ld64/src/ld/InputFiles.h
+ CC=$($(package)_cc) CXX=$($(package)_cxx) INSTALLPREFIX=$($(package)_extract_dir) ./libtapi/build.sh && \
+ CC=$($(package)_cc) CXX=$($(package)_cxx) INSTALLPREFIX=$($(package)_extract_dir) ./libtapi/install.sh && \
+ sed -i.old "/define HAVE_PTHREADS/d" $($(package)_build_subdir)/ld64/src/ld/InputFiles.h
endef
define $(package)_config_cmds
@@ -52,6 +62,9 @@ endef
define $(package)_stage_cmds
$(MAKE) DESTDIR=$($(package)_staging_dir) install && \
+ mkdir -p $($(package)_staging_prefix_dir)/lib/ && \
+ cd $($(package)_extract_dir) && \
+ cp lib/libtapi.so.6 $($(package)_staging_prefix_dir)/lib/ && \
cd $($(package)_extract_dir)/toolchain && \
mkdir -p $($(package)_staging_prefix_dir)/lib/clang/$($(package)_clang_version)/include && \
mkdir -p $($(package)_staging_prefix_dir)/bin $($(package)_staging_prefix_dir)/include && \
diff --git a/depends/packages/native_libdmg-hfsplus.mk b/depends/packages/native_libdmg-hfsplus.mk
index 8493f1d979..c0f0ce74de 100644
--- a/depends/packages/native_libdmg-hfsplus.mk
+++ b/depends/packages/native_libdmg-hfsplus.mk
@@ -12,7 +12,7 @@ define $(package)_preprocess_cmds
endef
define $(package)_config_cmds
- cmake -DCMAKE_INSTALL_PREFIX:PATH=$(build_prefix) ..
+ cmake -DCMAKE_INSTALL_PREFIX:PATH=$(build_prefix) -DCMAKE_C_FLAGS="-Wl,--build-id=none" ..
endef
define $(package)_build_cmds
diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk
index efa76965d5..366b1d0c42 100644
--- a/depends/packages/qt.mk
+++ b/depends/packages/qt.mk
@@ -121,7 +121,6 @@ $(package)_config_opts_darwin += -device-option MAC_SDK_VERSION=$(OSX_SDK_VERSIO
$(package)_config_opts_darwin += -device-option CROSS_COMPILE="$(host)-"
$(package)_config_opts_darwin += -device-option MAC_MIN_VERSION=$(OSX_MIN_VERSION)
$(package)_config_opts_darwin += -device-option MAC_TARGET=$(host)
-$(package)_config_opts_darwin += -device-option MAC_LD64_VERSION=$(LD64_VERSION)
endif
$(package)_config_opts_linux = -qt-xkbcommon-x11
diff --git a/depends/patches/qt/mac-qmake.conf b/depends/patches/qt/mac-qmake.conf
index 337d0eb9ca..4cd96df29f 100644
--- a/depends/patches/qt/mac-qmake.conf
+++ b/depends/patches/qt/mac-qmake.conf
@@ -18,7 +18,7 @@ QMAKE_APPLE_DEVICE_ARCHS=x86_64
!host_build: QMAKE_CFLAGS += -target $${MAC_TARGET}
!host_build: QMAKE_OBJECTIVE_CFLAGS += $$QMAKE_CFLAGS
!host_build: QMAKE_CXXFLAGS += $$QMAKE_CFLAGS
-!host_build: QMAKE_LFLAGS += -target $${MAC_TARGET} -mlinker-version=$${MAC_LD64_VERSION}
+!host_build: QMAKE_LFLAGS += -target $${MAC_TARGET}
QMAKE_AR = $${CROSS_COMPILE}ar cq
QMAKE_RANLIB=$${CROSS_COMPILE}ranlib
QMAKE_LIBTOOL=$${CROSS_COMPILE}libtool
diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in
index cd7ccf80ab..7e307ab7c8 100644
--- a/doc/Doxyfile.in
+++ b/doc/Doxyfile.in
@@ -861,7 +861,8 @@ RECURSIVE = YES
# Note that relative paths are relative to the directory from which doxygen is
# run.
-EXCLUDE = src/leveldb \
+EXCLUDE = src/crc32c \
+ src/leveldb \
src/json \
src/test \
src/qt/test
diff --git a/doc/build-osx.md b/doc/build-osx.md
index bf655538c7..7b76117c8b 100644
--- a/doc/build-osx.md
+++ b/doc/build-osx.md
@@ -22,6 +22,7 @@ Then install [Homebrew](https://brew.sh).
brew install automake berkeley-db4 libtool boost miniupnpc pkg-config python qt libevent qrencode
```
+If you run into issues, check [Homebrew's troubleshooting page](https://docs.brew.sh/Troubleshooting).
See [dependencies.md](dependencies.md) for a complete overview.
If you want to build the disk image with `make deploy` (.dmg / optional), you need RSVG:
@@ -113,96 +114,3 @@ tail -f $HOME/Library/Application\ Support/Bitcoin/debug.log
* Tested on OS X 10.12 Sierra through macOS 10.15 Catalina on 64-bit Intel
processors only.
* Building with downloaded Qt binaries is not officially supported. See the notes in [#7714](https://github.com/bitcoin/bitcoin/issues/7714).
-
-## Deterministic macOS DMG Notes
-Working macOS DMGs are created in Linux by combining a recent `clang`, the Apple
-`binutils` (`ld`, `ar`, etc) and DMG authoring tools.
-
-Apple uses `clang` extensively for development and has upstreamed the necessary
-functionality so that a vanilla clang can take advantage. It supports the use of `-F`,
-`-target`, `-mmacosx-version-min`, and `--sysroot`, which are all necessary when
-building for macOS.
-
-Apple's version of `binutils` (called `cctools`) contains lots of functionality missing in the
-FSF's `binutils`. In addition to extra linker options for frameworks and sysroots, several
-other tools are needed as well such as `install_name_tool`, `lipo`, and `nmedit`. These
-do not build under Linux, so they have been patched to do so. The work here was used as
-a starting point: [mingwandroid/toolchain4](https://github.com/mingwandroid/toolchain4).
-
-In order to build a working toolchain, the following source packages are needed from
-Apple: `cctools`, `dyld`, and `ld64`.
-
-These tools inject timestamps by default, which produce non-deterministic binaries. The
-`ZERO_AR_DATE` environment variable is used to disable that.
-
-This version of `cctools` has been patched to use the current version of `clang`'s headers
-and its `libLTO.so` rather than those from `llvmgcc`, as it was originally done in `toolchain4`.
-
-To complicate things further, all builds must target an Apple SDK. These SDKs are free to
-download, but not redistributable. To obtain it, register for an Apple Developer Account,
-then download the [Xcode 7.3.1 dmg](https://developer.apple.com/devcenter/download.action?path=/Developer_Tools/Xcode_7.3.1/Xcode_7.3.1.dmg).
-
-This file is several gigabytes in size, but only a single directory inside is needed:
-```
-Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk
-```
-
-Unfortunately, the usual Linux tools (7zip, hpmount, loopback mount) are incapable of
-opening this file. To create a tarball suitable for Gitian input, there are two options:
-
-Using macOS, you can mount the DMG, and then create it with:
-```shell
-hdiutil attach Xcode_7.3.1.dmg
-tar -C /Volumes/Xcode/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/ -czf MacOSX10.11.sdk.tar.gz MacOSX10.11.sdk
-```
-
-Alternatively, you can use 7zip and SleuthKit to extract the files one by one. The script
-[`extract-osx-sdk.sh`](./../contrib/macdeploy/extract-osx-sdk.sh) automates this. First
-ensure the DMG file is in the current directory, and then run the script. You may wish to
-delete the `intermediate 5.hfs` file and `MacOSX10.11.sdk` (the directory) when you've
-confirmed the extraction succeeded.
-
-```shell
-apt-get install p7zip-full sleuthkit
-contrib/macdeploy/extract-osx-sdk.sh
-rm -rf 5.hfs MacOSX10.11.sdk
-```
-
-The Gitian descriptors build 2 sets of files: Linux tools, then Apple binaries which are
-created using these tools. The build process has been designed to avoid including the
-SDK's files in Gitian's outputs. All interim tarballs are fully deterministic and may be freely
-redistributed.
-
-`genisoimage` is used to create the initial DMG. It is not deterministic as-is, so it has been
-patched. A system `genisoimage` will work fine, but it will not be deterministic because
-the file-order will change between invocations. The patch can be seen here: [theuni/osx-cross-depends](https://raw.githubusercontent.com/theuni/osx-cross-depends/master/patches/cdrtools/genisoimage.diff).
-No effort was made to fix this cleanly, so it likely leaks memory badly. But it's only used for
-a single invocation, so that's no real concern.
-
-`genisoimage` cannot compress DMGs, so afterwards, the DMG tool from the
-`libdmg-hfsplus` project is used to compress it. There are several bugs in this tool and its
-maintainer has seemingly abandoned the project. It has been forked and is available
-(with fixes) here: [theuni/libdmg-hfsplus](https://github.com/theuni/libdmg-hfsplus).
-
-The DMG tool has the ability to create DMGs from scratch as well, but this functionality is
-broken. Only the compression feature is currently used. Ideally, the creation could be fixed
-and `genisoimage` would no longer be necessary.
-
-Background images and other features can be added to DMG files by inserting a
-`.DS_Store` before creation. This is generated by the script
-`contrib/macdeploy/custom_dsstore.py`.
-
-As of OS X 10.9 Mavericks, using an Apple-blessed key to sign binaries is a requirement in
-order to satisfy the new Gatekeeper requirements. Because this private key cannot be
-shared, we'll have to be a bit creative in order for the build process to remain somewhat
-deterministic. Here's how it works:
-
-- Builders use Gitian to create an unsigned release. This outputs an unsigned DMG which
- users may choose to bless and run. It also outputs an unsigned app structure in the form
- of a tarball, which also contains all of the tools that have been previously (deterministically)
- built in order to create a final DMG.
-- The Apple keyholder uses this unsigned app to create a detached signature, using the
- script that is also included there. Detached signatures are available from this [repository](https://github.com/bitcoin-core/bitcoin-detached-sigs).
-- Builders feed the unsigned app + detached signature back into Gitian. It uses the
- pre-built tools to recombine the pieces into a deterministic DMG.
-
diff --git a/doc/build-unix.md b/doc/build-unix.md
index e799e709fa..6b51db5f55 100644
--- a/doc/build-unix.md
+++ b/doc/build-unix.md
@@ -80,7 +80,7 @@ Build requirements:
Now, you can either build from self-compiled [depends](/depends/README.md) or install the required dependencies:
- sudo apt-get install libevent-dev libboost-system-dev libboost-filesystem-dev libboost-chrono-dev libboost-test-dev libboost-thread-dev
+ sudo apt-get install libevent-dev libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev
BerkeleyDB is required for the wallet.
diff --git a/doc/descriptors.md b/doc/descriptors.md
index a98f43737e..181ff77e50 100644
--- a/doc/descriptors.md
+++ b/doc/descriptors.md
@@ -10,6 +10,14 @@ Supporting RPCs are:
- `deriveaddresses` takes as input a descriptor and computes the corresponding
addresses.
- `listunspent` outputs a specialized descriptor for the reported unspent outputs.
+- `getaddressinfo` outputs a descriptor for solvable addresses (since v0.18).
+- `importmulti` takes as input descriptors to import into the wallet
+ (since v0.18).
+- `generatetodescriptor` takes as input a descriptor and generates coins to it
+ (`regtest` only, since v0.19).
+- `utxoupdatepsbt` takes as input descriptors to add information to the psbt
+ (since v0.19).
+- `createmultisig` and `addmultisigaddress` return descriptors as well (since v0.20)
This document describes the language. For the specifics on usage, see the RPC
documentation for the functions mentioned above.
diff --git a/doc/developer-notes.md b/doc/developer-notes.md
index b50f552e92..d106aab3e4 100644
--- a/doc/developer-notes.md
+++ b/doc/developer-notes.md
@@ -9,6 +9,7 @@ Developer Notes
- [Coding Style (C++)](#coding-style-c)
- [Coding Style (Python)](#coding-style-python)
- [Coding Style (Doxygen-compatible comments)](#coding-style-doxygen-compatible-comments)
+ - [Generating Documentation](#generating-documentation)
- [Development tips and tricks](#development-tips-and-tricks)
- [Compiling for debugging](#compiling-for-debugging)
- [Compiling for gprof profiling](#compiling-for-gprof-profiling)
@@ -35,6 +36,9 @@ Developer Notes
- [Source code organization](#source-code-organization)
- [GUI](#gui)
- [Subtrees](#subtrees)
+ - [Upgrading LevelDB](#upgrading-leveldb)
+ - [File Descriptor Counts](#file-descriptor-counts)
+ - [Consensus Compatibility](#consensus-compatibility)
- [Scripted diffs](#scripted-diffs)
- [Suggestions and examples](#suggestions-and-examples)
- [Release notes](#release-notes)
@@ -138,12 +142,17 @@ For example, to describe a function use:
```c++
/**
- * ... text ...
- * @param[in] arg1 A description
- * @param[in] arg2 Another argument description
- * @pre Precondition for function...
+ * ... Description ...
+ *
+ * @param[in] arg1 input description...
+ * @param[in] arg2 input description...
+ * @param[out] arg3 output description...
+ * @return Return cases...
+ * @throws Error type and cases...
+ * @pre Pre-condition for function...
+ * @post Post-condition for function...
*/
-bool function(int arg1, const char *arg2)
+bool function(int arg1, const char *arg2, std::string& arg3)
```
A complete list of `@xxx` commands can be found at http://www.doxygen.nl/manual/commands.html.
@@ -158,44 +167,73 @@ To describe a class, use the same construct above the class definition:
* @see GetWarnings()
*/
class CAlert
-{
```
To describe a member or variable use:
```c++
-int var; //!< Detailed description after the member
+//! Description before the member
+int var;
```
or
```c++
-//! Description before the member
-int var;
+int var; //!< Description after the member
```
Also OK:
```c++
///
-/// ... text ...
+/// ... Description ...
///
bool function2(int arg1, const char *arg2)
```
-Not OK (used plenty in the current source, but not picked up):
+Not picked up by Doxygen:
```c++
//
-// ... text ...
+// ... Description ...
//
```
+Also not picked up by Doxygen:
+```c++
+/*
+ * ... Description ...
+ */
+```
+
A full list of comment syntaxes picked up by Doxygen can be found at http://www.doxygen.nl/manual/docblocks.html,
but the above styles are favored.
-Documentation can be generated with `make docs` and cleaned up with `make clean-docs`. The resulting files are located in `doc/doxygen/html`; open `index.html` to view the homepage.
+Recommendations:
-Before running `make docs`, you will need to install dependencies `doxygen` and `dot`. For example, on macOS via Homebrew:
-```
-brew install graphviz doxygen
-```
+- Avoiding duplicating type and input/output information in function
+ descriptions.
+
+- Use backticks (&#96;&#96;) to refer to `argument` names in function and
+ parameter descriptions.
+
+- Backticks aren't required when referring to functions Doxygen already knows
+ about; it will build hyperlinks for these automatically. See
+ http://www.doxygen.nl/manual/autolink.html for complete info.
+
+- Avoid linking to external documentation; links can break.
+
+- Javadoc and all valid Doxygen comments are stripped from Doxygen source code
+ previews (`STRIP_CODE_COMMENTS = YES` in [Doxyfile.in](doc/Doxyfile.in)). If
+ you want a comment to be preserved, it must instead use `//` or `/* */`.
+
+### Generating Documentation
+
+The documentation can be generated with `make docs` and cleaned up with `make
+clean-docs`. The resulting files are located in `doc/doxygen/html`; open
+`index.html` in that directory to view the homepage.
+
+Before running `make docs`, you'll need to install these dependencies:
+
+Linux: `sudo apt install doxygen graphviz`
+
+MacOS: `brew install doxygen graphviz`
Development tips and tricks
---------------------------
@@ -820,6 +858,10 @@ Current subtrees include:
- **Note**: Follow the instructions in [Upgrading LevelDB](#upgrading-leveldb) when
merging upstream changes to the LevelDB subtree.
+- src/crc32c
+ - Used by leveldb for hardware acceleration of CRC32C checksums for data integrity.
+ - Upstream at https://github.com/google/crc32c ; Maintained by Google.
+
- src/secp256k1
- Upstream at https://github.com/bitcoin-core/secp256k1/ ; actively maintained by Core contributors.
@@ -919,7 +961,7 @@ introduce accidental changes.
Some good examples of scripted-diff:
- [scripted-diff: Rename InitInterfaces to NodeContext](https://github.com/bitcoin/bitcoin/commit/301bd41a2e6765b185bd55f4c541f9e27aeea29d)
-uses an elegant script to replace occurences of multiple terms in all source files.
+uses an elegant script to replace occurrences of multiple terms in all source files.
- [scripted-diff: Remove g_connman, g_banman globals](https://github.com/bitcoin/bitcoin/commit/301bd41a2e6765b185bd55f4c541f9e27aeea29d)
replaces specific terms in a list of specific source files.
@@ -1047,6 +1089,12 @@ A few guidelines for introducing and reviewing new RPC interfaces:
new RPC is replacing a deprecated RPC, to avoid both RPCs confusingly
showing up in the command list.
+- Use *invalid* bech32 addresses (e.g. the constant `EXAMPLE_ADDRESS`) for
+ `RPCExamples` help documentation.
+
+ - *Rationale*: Prevent accidental transactions by users and encourage the use
+ of bech32 addresses by default.
+
- Use the `UNIX_EPOCH_TIME` constant when describing UNIX epoch time or
timestamps in the documentation.
diff --git a/doc/fuzzing.md b/doc/fuzzing.md
index 50e9251b8d..c34ca4cb59 100644
--- a/doc/fuzzing.md
+++ b/doc/fuzzing.md
@@ -7,11 +7,8 @@ describe how to use it with AFL and libFuzzer.
## Preparing fuzzing
-AFL needs an input directory with examples, and an output directory where it
-will place examples that it found. These can be anywhere in the file system,
-we'll define environment variables to make it easy to reference them.
-
-libFuzzer will use the input directory as output directory.
+The fuzzer needs some inputs to work on, but the inputs or seeds can be used
+interchangeably between libFuzzer and AFL.
Extract the example seeds (or other starting inputs) into the inputs
directory before starting fuzzing.
@@ -21,13 +18,19 @@ git clone https://github.com/bitcoin-core/qa-assets
export DIR_FUZZ_IN=$PWD/qa-assets/fuzz_seed_corpus
```
-Only for AFL:
+AFL needs an input directory with examples, and an output directory where it
+will place examples that it found. These can be anywhere in the file system,
+we'll define environment variables to make it easy to reference them.
+
+So, only for AFL you need to configure the outputs path:
```
mkdir outputs
export AFLOUT=$PWD/outputs
```
+libFuzzer will use the input directory as output directory.
+
## AFL
### Building AFL
@@ -41,6 +44,9 @@ make
export AFLPATH=$PWD
```
+For macOS you may need to ignore x86 compilation checks when running `make`:
+`AFL_NO_X86=1 make`.
+
### Instrumentation
To build Bitcoin Core using AFL instrumentation (this assumes that the
@@ -48,9 +54,15 @@ To build Bitcoin Core using AFL instrumentation (this assumes that the
```
./configure --disable-ccache --disable-shared --enable-tests --enable-fuzz CC=${AFLPATH}/afl-gcc CXX=${AFLPATH}/afl-g++
export AFL_HARDEN=1
-cd src/
make
```
+
+If you are using clang you will need to substitute `afl-gcc` with `afl-clang`
+and `afl-g++` with `afl-clang++`, so the first line above becomes:
+```
+./configure --disable-ccache --disable-shared --enable-tests --enable-fuzz CC=${AFLPATH}/afl-clang CXX=${AFLPATH}/afl-clang++
+```
+
We disable ccache because we don't want to pollute the ccache with instrumented
objects, and similarly don't want to use non-instrumented cached objects linked
in.
@@ -60,25 +72,32 @@ The fuzzing can be sped up significantly (~200x) by using `afl-clang-fast` and
compiling using `afl-clang-fast`/`afl-clang-fast++` the resulting
binary will be instrumented in such a way that the AFL
features "persistent mode" and "deferred forkserver" can be used. See
-https://github.com/mcarpenter/afl/tree/master/llvm_mode for details.
+https://github.com/google/AFL/tree/master/llvm_mode for details.
### Fuzzing
To start the actual fuzzing use:
```
-export FUZZ_TARGET=fuzz_target_foo # Pick a fuzz_target
+export FUZZ_TARGET=bech32 # Pick a fuzz_target
mkdir ${AFLOUT}/${FUZZ_TARGET}
-$AFLPATH/afl-fuzz -i ${DIR_FUZZ_IN}/${FUZZ_TARGET} -o ${AFLOUT}/${FUZZ_TARGET} -m52 -- test/fuzz/${FUZZ_TARGET}
+$AFLPATH/afl-fuzz -i ${DIR_FUZZ_IN}/${FUZZ_TARGET} -o ${AFLOUT}/${FUZZ_TARGET} -m52 -- src/test/fuzz/${FUZZ_TARGET}
```
You may have to change a few kernel parameters to test optimally - `afl-fuzz`
will print an error and suggestion if so.
+On macOS you may need to set `AFL_NO_FORKSRV=1` to get the target to run.
+```
+export FUZZ_TARGET=bech32 # Pick a fuzz_target
+mkdir ${AFLOUT}/${FUZZ_TARGET}
+AFL_NO_FORKSRV=1 $AFLPATH/afl-fuzz -i ${DIR_FUZZ_IN}/${FUZZ_TARGET} -o ${AFLOUT}/${FUZZ_TARGET} -m52 -- src/test/fuzz/${FUZZ_TARGET}
+```
+
## libFuzzer
-A recent version of `clang`, the address/undefined sanitizers (ASan/UBSan) and libFuzzer is needed (all
-found in the `compiler-rt` runtime libraries package).
+A recent version of `clang`, the address/undefined sanitizers (ASan/UBSan) and
+libFuzzer is needed (all found in the `compiler-rt` runtime libraries package).
To build all fuzz targets with libFuzzer, run
@@ -87,11 +106,33 @@ To build all fuzz targets with libFuzzer, run
make
```
-The fuzzer needs some inputs to work on, but the inputs or seeds can be used
-interchangeably between libFuzzer and AFL.
-
See https://llvm.org/docs/LibFuzzer.html#running on how to run the libFuzzer
instrumented executable.
-Alternatively run the script in `./test/fuzz/test_runner.py` and provide it
-with the `${DIR_FUZZ_IN}` created earlier.
+Alternatively, you can run the script through the fuzzing test harness (only
+libFuzzer supported so far). You need to pass it the inputs directory and
+the specific test target you want to run.
+
+```
+./test/fuzz/test_runner.py ${DIR_FUZZ_IN} bech32
+```
+
+### macOS hints for libFuzzer
+
+The default clang/llvm version supplied by Apple on macOS does not include
+fuzzing libraries, so macOS users will need to install a full version, for
+example using `brew install llvm`.
+
+Should you run into problems with the address sanitizer, it is possible you
+may need to run `./configure` with `--disable-asm` to avoid errors
+with certain assembly code from Bitcoin Core's code. See [developer notes on sanitizers](https://github.com/bitcoin/bitcoin/blob/master/doc/developer-notes.md#sanitizers)
+for more information.
+
+You may also need to take care of giving the correct path for clang and
+clang++, like `CC=/path/to/clang CXX=/path/to/clang++` if the non-systems
+clang does not come first in your path.
+
+Full configure that was tested on macOS Catalina with `brew` installed `llvm`:
+```
+./configure --disable-ccache --enable-fuzz --with-sanitizers=fuzzer,address,undefined CC=/usr/local/opt/llvm/bin/clang CXX=/usr/local/opt/llvm/bin/clang++ --disable-asm
+```
diff --git a/doc/release-notes-15437.md b/doc/release-notes-15437.md
deleted file mode 100644
index 6614207757..0000000000
--- a/doc/release-notes-15437.md
+++ /dev/null
@@ -1,53 +0,0 @@
-P2P and network changes
------------------------
-
-#### Removal of reject network messages from Bitcoin Core (BIP61)
-
-The command line option to enable BIP61 (`-enablebip61`) has been removed.
-
-This feature has been disabled by default since Bitcoin Core version 0.18.0.
-Nodes on the network can not generally be trusted to send valid ("reject")
-messages, so this should only ever be used when connected to a trusted node.
-Please use the recommended alternatives if you rely on this deprecated feature:
-
-* Testing or debugging of implementations of the Bitcoin P2P network protocol
- should be done by inspecting the log messages that are produced by a recent
- version of Bitcoin Core. Bitcoin Core logs debug messages
- (`-debug=<category>`) to a stream (`-printtoconsole`) or to a file
- (`-debuglogfile=<debug.log>`).
-
-* Testing the validity of a block can be achieved by specific RPCs:
- - `submitblock`
- - `getblocktemplate` with `'mode'` set to `'proposal'` for blocks with
- potentially invalid POW
-
-* Testing the validity of a transaction can be achieved by specific RPCs:
- - `sendrawtransaction`
- - `testmempoolaccept`
-
-* Wallets should not use the absence of "reject" messages to indicate a
- transaction has propagated the network, nor should wallets use "reject"
- messages to set transaction fees. Wallets should rather use fee estimation
- to determine transaction fees and set replace-by-fee if desired. Thus, they
- could wait until the transaction has confirmed (taking into account the fee
- target they set (compare the RPC `estimatesmartfee`)) or listen for the
- transaction announcement by other network peers to check for propagation.
-
-The removal of BIP61 REJECT message support also has the following minor RPC
-and logging implications:
-
-* `testmempoolaccept` and `sendrawtransaction` no longer return the P2P REJECT
- code when a transaction is not accepted to the mempool. They still return the
- verbal reject reason.
-
-* Log messages that previously reported the REJECT code when a transaction was
- not accepted to the mempool now no longer report the REJECT code. The reason
- for rejection is still reported.
-
-Updated RPCs
-------------
-
-- `testmempoolaccept` and `sendrawtransaction` no longer return the P2P REJECT
- code when a transaction is not accepted to the mempool. See the Section
- _Removal of reject network messages from Bitcoin Core (BIP61)_ for details on
- the removal of BIP61 REJECT message support.
diff --git a/doc/release-notes-15954.md b/doc/release-notes-15954.md
deleted file mode 100644
index f4d2c5688c..0000000000
--- a/doc/release-notes-15954.md
+++ /dev/null
@@ -1,4 +0,0 @@
-Configuration option changes
------------------------------
-
-Importing blocks upon startup via the `bootstrap.dat` file no longer occurs by default. The file must now be specified with `-loadblock=<file>`.
diff --git a/doc/release-notes-17056.md b/doc/release-notes-17056.md
deleted file mode 100644
index 23d5a8c8cd..0000000000
--- a/doc/release-notes-17056.md
+++ /dev/null
@@ -1,4 +0,0 @@
-Low-level RPC Changes
-===
-
-- A new descriptor type `sortedmulti(...)` has been added to support multisig scripts where the public keys are sorted lexicographically in the resulting script.
diff --git a/doc/release-notes-17410.md b/doc/release-notes-17410.md
deleted file mode 100644
index 08ed353889..0000000000
--- a/doc/release-notes-17410.md
+++ /dev/null
@@ -1,5 +0,0 @@
-Command-line options
---------------------
-
-- The `-debug=db` logging category has been renamed to `-debug=walletdb`, to distinguish it from `coindb`.
- `-debug=db` has been deprecated and will be removed in the next major release.
diff --git a/doc/release-notes-17437.md b/doc/release-notes-17437.md
deleted file mode 100644
index 3edfd00a38..0000000000
--- a/doc/release-notes-17437.md
+++ /dev/null
@@ -1,5 +0,0 @@
-Low-level RPC Changes
-===
-
-- The RPC gettransaction, listtransactions and listsinceblock responses now also
-includes the height of the block that contains the wallet transaction, if any.
diff --git a/doc/release-notes.md b/doc/release-notes.md
index eec1ef9c46..22d5767b7b 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -62,6 +62,64 @@ distribution provides binaries for the RISC-V platform.
Notable changes
===============
+P2P and network changes
+-----------------------
+
+#### Removal of reject network messages from Bitcoin Core (BIP61)
+
+The command line option to enable BIP61 (`-enablebip61`) has been removed.
+
+This feature has been disabled by default since Bitcoin Core version 0.18.0.
+Nodes on the network can not generally be trusted to send valid ("reject")
+messages, so this should only ever be used when connected to a trusted node.
+Please use the recommended alternatives if you rely on this deprecated feature:
+
+* Testing or debugging of implementations of the Bitcoin P2P network protocol
+ should be done by inspecting the log messages that are produced by a recent
+ version of Bitcoin Core. Bitcoin Core logs debug messages
+ (`-debug=<category>`) to a stream (`-printtoconsole`) or to a file
+ (`-debuglogfile=<debug.log>`).
+
+* Testing the validity of a block can be achieved by specific RPCs:
+ - `submitblock`
+ - `getblocktemplate` with `'mode'` set to `'proposal'` for blocks with
+ potentially invalid POW
+
+* Testing the validity of a transaction can be achieved by specific RPCs:
+ - `sendrawtransaction`
+ - `testmempoolaccept`
+
+* Wallets should not use the absence of "reject" messages to indicate a
+ transaction has propagated the network, nor should wallets use "reject"
+ messages to set transaction fees. Wallets should rather use fee estimation
+ to determine transaction fees and set replace-by-fee if desired. Thus, they
+ could wait until the transaction has confirmed (taking into account the fee
+ target they set (compare the RPC `estimatesmartfee`)) or listen for the
+ transaction announcement by other network peers to check for propagation.
+
+The removal of BIP61 REJECT message support also has the following minor RPC
+and logging implications:
+
+* `testmempoolaccept` and `sendrawtransaction` no longer return the P2P REJECT
+ code when a transaction is not accepted to the mempool. They still return the
+ verbal reject reason.
+
+* Log messages that previously reported the REJECT code when a transaction was
+ not accepted to the mempool now no longer report the REJECT code. The reason
+ for rejection is still reported.
+
+Updated RPCs
+------------
+
+- `testmempoolaccept` and `sendrawtransaction` no longer return the P2P REJECT
+ code when a transaction is not accepted to the mempool. See the Section
+ _Removal of reject network messages from Bitcoin Core (BIP61)_ for details on
+ the removal of BIP61 REJECT message support.
+
+- A new descriptor type `sortedmulti(...)` has been added to support multisig scripts where the public keys are sorted lexicographically in the resulting script.
+
+- `walletprocesspsbt` and `walletcreatefundedpsbt` now include BIP 32 derivation paths by default for public keys if we know them. This can be disabled by setting `bip32derivs` to `false`.
+
Build System
------------
@@ -81,14 +139,21 @@ New settings
- RPC Whitelist system. It can give certain RPC users permissions to only some RPC calls.
It can be set with two command line arguments (`rpcwhitelist` and `rpcwhitelistdefault`). (#12763)
+- A new `-asmap` configuration option has been added to enable IP-to-ASN mapping
+ for bucketing of the network peers to diversify the network connections. The
+ legacy /16 prefix mapping remains the default. See [issue
+ #16599](https://github.com/bitcoin/bitcoin/issues/16599), [PR
+ #16702](https://github.com/bitcoin/bitcoin/pull/16702), and the `bitcoind
+ help` for more information. This option is experimental and subject to changes
+ or removal in future releases.
+
Updated settings
----------------
-Updated RPCs
-------------
+Importing blocks upon startup via the `bootstrap.dat` file no longer occurs by default. The file must now be specified with `-loadblock=<file>`.
-Note: some low-level RPC changes mainly useful for testing are described in the
-Low-level Changes section below.
+- The `-debug=db` logging category has been renamed to `-debug=walletdb`, to distinguish it from `coindb`.
+ `-debug=db` has been deprecated and will be removed in the next major release.
GUI changes
-----------
@@ -101,9 +166,30 @@ Wallet
- The wallet now by default uses bech32 addresses when using RPC, and creates native segwit change outputs.
- The way that output trust was computed has been fixed in #16766, which impacts confirmed/unconfirmed balance status and coin selection.
+- The RPC gettransaction, listtransactions and listsinceblock responses now also
+includes the height of the block that contains the wallet transaction, if any.
+
+- RPC `getaddressinfo` changes:
+
+ - the `label` field has been deprecated in favor of the `labels` field and
+ will be removed in 0.21. It can be re-enabled in the interim by launching
+ with `-deprecatedrpc=label`.
+
+ - the `labels` behavior of returning an array of JSON objects containing name
+ and purpose key/value pairs has been deprecated in favor of an array of
+ label names and will be removed in 0.21. The previous behavior can be
+ re-enabled in the interim by launching with `-deprecatedrpc=labelspurpose`.
+
Low-level changes
=================
+Command line
+------------
+
+Command line options prefixed with main/test/regtest network names like
+`-main.port=8333` `-test.server=1` previously were allowed but ignored. Now
+they trigger "Invalid parameter" errors on startup.
+
Tests
-----
diff --git a/doc/release-notes/release-notes-0.19.1.md b/doc/release-notes/release-notes-0.19.1.md
new file mode 100644
index 0000000000..5746bebb0d
--- /dev/null
+++ b/doc/release-notes/release-notes-0.19.1.md
@@ -0,0 +1,115 @@
+0.19.1 Release Notes
+===============================
+
+Bitcoin Core version 0.19.1 is now available from:
+
+ <https://bitcoincore.org/bin/bitcoin-core-0.19.1/>
+
+This minor release includes various bug fixes and performance
+improvements, as well as updated translations.
+
+Please report bugs using the issue tracker at GitHub:
+
+ <https://github.com/bitcoin/bitcoin/issues>
+
+To receive security and update notifications, please subscribe to:
+
+ <https://bitcoincore.org/en/list/announcements/join/>
+
+How to Upgrade
+==============
+
+If you are running an older version, shut it down. Wait until it has completely
+shut down (which might take a few minutes for older versions), then run the
+installer (on Windows) or just copy over `/Applications/Bitcoin-Qt` (on Mac)
+or `bitcoind`/`bitcoin-qt` (on Linux).
+
+Upgrading directly from a version of Bitcoin Core that has reached its EOL is
+possible, but it might take some time if the datadir needs to be migrated. Old
+wallet versions of Bitcoin Core are generally supported.
+
+Compatibility
+==============
+
+Bitcoin Core is supported and extensively tested on operating systems using
+the Linux kernel, macOS 10.10+, and Windows 7 and newer. It is not recommended
+to use Bitcoin Core on unsupported systems.
+
+Bitcoin Core should also work on most other Unix-like systems but is not
+as frequently tested on them.
+
+From Bitcoin Core 0.17.0 onwards, macOS versions earlier than 10.10 are no
+longer supported, as Bitcoin Core is now built using Qt 5.9.x which requires
+macOS 10.10+. Additionally, Bitcoin Core does not yet change appearance when
+macOS "dark mode" is activated.
+
+In addition to previously supported CPU platforms, this release's pre-compiled
+distribution provides binaries for the RISC-V platform.
+
+0.19.1 change log
+=================
+
+### Wallet
+- #17643 Fix origfee return for bumpfee with feerate arg (instagibbs)
+- #16963 Fix `unique_ptr` usage in boost::signals2 (promag)
+- #17258 Fix issue with conflicted mempool tx in listsinceblock (adamjonas, mchrostowski)
+- #17924 Bug: IsUsedDestination shouldn't use key id as script id for ScriptHash (instagibbs)
+- #17621 IsUsedDestination should count any known single-key address (instagibbs)
+- #17843 Reset reused transactions cache (fjahr)
+
+### RPC and other APIs
+- #17687 cli: Fix fatal leveldb error when specifying -blockfilterindex=basic twice (brakmic)
+- #17728 require second argument only for scantxoutset start action (achow101)
+- #17445 zmq: Fix due to invalid argument and multiple notifiers (promag)
+- #17524 psbt: handle unspendable psbts (achow101)
+- #17156 psbt: check that various indexes and amounts are within bounds (achow101)
+
+### GUI
+- #17427 Fix missing qRegisterMetaType for `size_t` (hebasto)
+- #17695 disable File-\>CreateWallet during startup (fanquake)
+- #17634 Fix comparison function signature (hebasto)
+- #18062 Fix unintialized WalletView::progressDialog (promag)
+
+### Tests and QA
+- #17416 Appveyor improvement - text file for vcpkg package list (sipsorcery)
+- #17488 fix "bitcoind already running" warnings on macOS (fanquake)
+- #17980 add missing #include to fix compiler errors (kallewoof)
+
+### Platform support
+- #17736 Update msvc build for Visual Studio 2019 v16.4 (sipsorcery)
+- #17364 Updates to appveyor config for VS2019 and Qt5.9.8 + msvc project fixes (sipsorcery)
+- #17887 bug-fix macos: give free bytes to `F_PREALLOCATE` (kallewoof)
+
+### Miscellaneous
+- #17897 init: Stop indexes on shutdown after ChainStateFlushed callback (jimpo)
+- #17450 util: Add missing headers to util/fees.cpp (hebasto)
+- #17654 Unbreak build with Boost 1.72.0 (jbeich)
+- #17857 scripts: Fix symbol-check & security-check argument passing (fanquake)
+- #17762 Log to net category for exceptions in ProcessMessages (laanwj)
+- #18100 Update univalue subtree (MarcoFalke)
+
+Credits
+=======
+
+Thanks to everyone who directly contributed to this release:
+
+- Aaron Clauson
+- Adam Jonas
+- Andrew Chow
+- Fabian Jahr
+- fanquake
+- Gregory Sanders
+- Harris
+- Hennadii Stepanov
+- Jan Beich
+- Jim Posen
+- João Barbosa
+- Karl-Johan Alm
+- Luke Dashjr
+- MarcoFalke
+- Michael Chrostowski
+- Russell Yanofsky
+- Wladimir J. van der Laan
+
+As well as to everyone that helped with translations on
+[Transifex](https://www.transifex.com/bitcoin/bitcoin/).
diff --git a/doc/release-process.md b/doc/release-process.md
index 1ffef3e106..e0f29f6ad7 100644
--- a/doc/release-process.md
+++ b/doc/release-process.md
@@ -121,7 +121,7 @@ Ensure gitian-builder is up-to-date:
echo '5a60e0a4b3e0b4d655317b2f12a810211c50242138322b16e7e01c6fbb89d92f inputs/osslsigncode-2.0.tar.gz' | sha256sum -c
popd
-Create the macOS SDK tarball, see the [macOS build instructions](build-osx.md#deterministic-macos-dmg-notes) for details, and copy it into the inputs directory.
+Create the macOS SDK tarball, see the [macdeploy instructions](/contrib/macdeploy/README.md#deterministic-macos-dmg-notes) for details, and copy it into the inputs directory.
### Optional: Seed the Gitian sources cache and offline git repositories
@@ -268,7 +268,6 @@ The list of files should be:
```
bitcoin-${VERSION}-aarch64-linux-gnu.tar.gz
bitcoin-${VERSION}-arm-linux-gnueabihf.tar.gz
-bitcoin-${VERSION}-i686-pc-linux-gnu.tar.gz
bitcoin-${VERSION}-riscv64-linux-gnu.tar.gz
bitcoin-${VERSION}-x86_64-linux-gnu.tar.gz
bitcoin-${VERSION}-osx64.tar.gz
@@ -329,8 +328,6 @@ bitcoin.org (see below for bitcoin.org update instructions).
- Update packaging repo
- - Notify BlueMatt so that he can start building [the PPAs](https://launchpad.net/~bitcoin/+archive/ubuntu/bitcoin)
-
- Push the flatpak to flathub, e.g. https://github.com/flathub/org.bitcoincore.bitcoin-qt/pull/2
- Push the latest version to master (if applicable), e.g. https://github.com/bitcoin-core/packaging/pull/32
diff --git a/share/qt/Info.plist.in b/share/qt/Info.plist.in
index 275078c88d..3befba3425 100644
--- a/share/qt/Info.plist.in
+++ b/share/qt/Info.plist.in
@@ -16,7 +16,7 @@
<key>CFBundlePackageType</key>
<string>APPL</string>
- <key>CFBundleGetInfoString</key>
+ <key>NSHumanReadableCopyright</key>
<string>@CLIENT_VERSION_MAJOR@.@CLIENT_VERSION_MINOR@.@CLIENT_VERSION_REVISION@.@CLIENT_VERSION_BUILD@, Copyright © 2009-@COPYRIGHT_YEAR@ @COPYRIGHT_HOLDERS_FINAL@</string>
<key>CFBundleShortVersionString</key>
diff --git a/share/setup.nsi.in b/share/setup.nsi.in
index be482ae741..dd9ee54d6d 100644
--- a/share/setup.nsi.in
+++ b/share/setup.nsi.in
@@ -58,8 +58,8 @@ VIAddVersionKey ProductVersion "@PACKAGE_VERSION@"
VIAddVersionKey CompanyName "${COMPANY}"
VIAddVersionKey CompanyWebsite "${URL}"
VIAddVersionKey FileVersion "@PACKAGE_VERSION@"
-VIAddVersionKey FileDescription ""
-VIAddVersionKey LegalCopyright ""
+VIAddVersionKey FileDescription "Installer for @PACKAGE_NAME@"
+VIAddVersionKey LegalCopyright "Copyright (C) 2009-@COPYRIGHT_YEAR@ @COPYRIGHT_HOLDERS_FINAL@"
InstallDirRegKey HKCU "${REGKEY}" Path
ShowUninstDetails show
diff --git a/src/Makefile.am b/src/Makefile.am
index 5ae945ac69..6fc6d5b5a3 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -178,7 +178,6 @@ BITCOIN_CORE_H = \
random.h \
randomenv.h \
reverse_iterator.h \
- reverselock.h \
rpc/blockchain.h \
rpc/client.h \
rpc/protocol.h \
@@ -210,6 +209,7 @@ BITCOIN_CORE_H = \
txmempool.h \
ui_interface.h \
undo.h \
+ util/asmap.h \
util/bip32.h \
util/bytevectorhash.h \
util/check.h \
@@ -219,6 +219,7 @@ BITCOIN_CORE_H = \
util/system.h \
util/macros.h \
util/memory.h \
+ util/message.h \
util/moneystr.h \
util/rbf.h \
util/settings.h \
@@ -227,7 +228,6 @@ BITCOIN_CORE_H = \
util/time.h \
util/translation.h \
util/url.h \
- util/validation.h \
util/vector.h \
validation.h \
validationinterface.h \
@@ -241,7 +241,6 @@ BITCOIN_CORE_H = \
wallet/fees.h \
wallet/ismine.h \
wallet/load.h \
- wallet/psbtwallet.h \
wallet/rpcwallet.h \
wallet/scriptpubkeyman.h \
wallet/wallet.h \
@@ -349,7 +348,6 @@ libbitcoin_wallet_a_SOURCES = \
wallet/feebumper.cpp \
wallet/fees.cpp \
wallet/load.cpp \
- wallet/psbtwallet.cpp \
wallet/rpcdump.cpp \
wallet/rpcwallet.cpp \
wallet/scriptpubkeyman.cpp \
@@ -510,11 +508,13 @@ libbitcoin_util_a_SOURCES = \
support/cleanse.cpp \
sync.cpp \
threadinterrupt.cpp \
+ util/asmap.cpp \
util/bip32.cpp \
util/bytevectorhash.cpp \
util/error.cpp \
util/fees.cpp \
util/system.cpp \
+ util/message.cpp \
util/moneystr.cpp \
util/rbf.cpp \
util/settings.cpp \
@@ -524,7 +524,6 @@ libbitcoin_util_a_SOURCES = \
util/string.cpp \
util/time.cpp \
util/url.cpp \
- util/validation.cpp \
$(BITCOIN_CORE_H)
if GLIBC_BACK_COMPAT
@@ -554,12 +553,9 @@ if TARGET_WINDOWS
bitcoind_SOURCES += bitcoind-res.rc
endif
-# Libraries below may be listed more than once to resolve circular dependencies (see
-# https://eli.thegreenplace.net/2013/07/09/library-order-in-static-linking#circular-dependency)
bitcoind_LDADD = \
$(LIBBITCOIN_SERVER) \
$(LIBBITCOIN_WALLET) \
- $(LIBBITCOIN_SERVER) \
$(LIBBITCOIN_COMMON) \
$(LIBUNIVALUE) \
$(LIBBITCOIN_UTIL) \
@@ -702,6 +698,11 @@ clean-local:
$(AM_V_GEN) $(WINDRES) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(CPPFLAGS) -DWINDRES_PREPROC -i $< -o $@
check-symbols: $(bin_PROGRAMS)
+if TARGET_DARWIN
+ @echo "Checking macOS dynamic libraries..."
+ $(AM_V_at) OTOOL=$(OTOOL) $(PYTHON) $(top_srcdir)/contrib/devtools/symbol-check.py $(bin_PROGRAMS)
+endif
+
if GLIBC_BACK_COMPAT
@echo "Checking glibc back compat..."
$(AM_V_at) READELF=$(READELF) CPPFILT=$(CPPFILT) $(PYTHON) $(top_srcdir)/contrib/devtools/symbol-check.py $(bin_PROGRAMS)
@@ -714,6 +715,7 @@ if HARDEN
endif
if EMBEDDED_LEVELDB
+include Makefile.crc32c.include
include Makefile.leveldb.include
endif
diff --git a/src/Makefile.crc32c.include b/src/Makefile.crc32c.include
new file mode 100644
index 0000000000..802b3a2e4b
--- /dev/null
+++ b/src/Makefile.crc32c.include
@@ -0,0 +1,75 @@
+# Copyright (c) 2019 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+LIBCRC32C_INT = crc32c/libcrc32c.a
+LIBLEVELDB_SSE42_INT = leveldb/libleveldb_sse42.a
+
+EXTRA_LIBRARIES += $(LIBCRC32C_INT)
+
+LIBCRC32C = $(LIBCRC32C_INT)
+
+CRC32C_CPPFLAGS_INT =
+CRC32C_CPPFLAGS_INT += -I$(srcdir)/crc32c/include
+CRC32C_CPPFLAGS_INT += -DHAVE_BUILTIN_PREFETCH=@HAVE_BUILTIN_PREFETCH@
+CRC32C_CPPFLAGS_INT += -DHAVE_MM_PREFETCH=@HAVE_MM_PREFETCH@
+CRC32C_CPPFLAGS_INT += -DHAVE_STRONG_GETAUXVAL=@HAVE_STRONG_GETAUXVAL@
+CRC32C_CPPFLAGS_INT += -DHAVE_WEAK_GETAUXVAL=@HAVE_WEAK_GETAUXVAL@
+CRC32C_CPPFLAGS_INT += -DCRC32C_TESTS_BUILT_WITH_GLOG=0
+
+if ENABLE_SSE42
+CRC32C_CPPFLAGS_INT += -DHAVE_SSE42=1
+else
+CRC32C_CPPFLAGS_INT += -DHAVE_SSE42=0
+endif
+
+if ENABLE_ARM_CRC
+CRC32C_CPPFLAGS_INT += -DHAVE_ARM64_CRC32C=1
+else
+CRC32C_CPPFLAGS_INT += -DHAVE_ARM64_CRC32C=0
+endif
+
+if WORDS_BIGENDIAN
+CRC32C_CPPFLAGS_INT += -DBYTE_ORDER_BIG_ENDIAN=1
+else
+CRC32C_CPPFLAGS_INT += -DBYTE_ORDER_BIG_ENDIAN=0
+endif
+
+crc32c_libcrc32c_a_CPPFLAGS = $(AM_CPPFLAGS) $(CRC32C_CPPFLAGS_INT) $(CRC32C_CPPFLAGS)
+crc32c_libcrc32c_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+
+crc32c_libcrc32c_a_SOURCES =
+crc32c_libcrc32c_a_SOURCES += crc32c/include/crc32c/crc32c.h
+crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_arm64.h
+crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_arm64_linux_check.h
+crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_internal.h
+crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_prefetch.h
+crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_read_le.h
+crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_round_up.h
+crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_sse42_check.h
+crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_sse42.h
+
+crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c.cc
+crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_portable.cc
+
+if ENABLE_SSE42
+LIBCRC32C_SSE42_INT = crc32c/libcrc32c_sse42.a
+EXTRA_LIBRARIES += $(LIBCRC32C_SSE42_INT)
+LIBCRC32C += $(LIBCRC32C_SSE42_INT)
+
+crc32c_libcrc32c_sse42_a_CPPFLAGS = $(crc32c_libcrc32c_a_CPPFLAGS)
+crc32c_libcrc32c_sse42_a_CXXFLAGS = $(crc32c_libcrc32c_a_CXXFLAGS) $(SSE42_CXXFLAGS)
+
+crc32c_libcrc32c_sse42_a_SOURCES = crc32c/src/crc32c_sse42.cc
+endif
+
+if ENABLE_ARM_CRC
+LIBCRC32C_ARM_CRC_INT = crc32c/libcrc32c_arm_crc.a
+EXTRA_LIBRARIES += $(LIBCRC32C_ARM_CRC_INT)
+LIBCRC32C += $(LIBCRC32C_ARM_CRC_INT)
+
+crc32c_libcrc32c_arm_crc_a_CPPFLAGS = $(crc32c_libcrc32c_a_CPPFLAGS)
+crc32c_libcrc32c_arm_crc_a_CXXFLAGS = $(crc32c_libcrc32c_a_CXXFLAGS) $(ARM_CRC_CXXFLAGS)
+
+crc32c_libcrc32c_arm_crc_a_SOURCES = crc32c/src/crc32c_arm64.cc
+endif
diff --git a/src/Makefile.leveldb.include b/src/Makefile.leveldb.include
index bd08bcb4ed..04b53471e4 100644
--- a/src/Makefile.leveldb.include
+++ b/src/Makefile.leveldb.include
@@ -4,27 +4,33 @@
LIBLEVELDB_INT = leveldb/libleveldb.a
LIBMEMENV_INT = leveldb/libmemenv.a
-LIBLEVELDB_SSE42_INT = leveldb/libleveldb_sse42.a
EXTRA_LIBRARIES += $(LIBLEVELDB_INT)
EXTRA_LIBRARIES += $(LIBMEMENV_INT)
-EXTRA_LIBRARIES += $(LIBLEVELDB_SSE42_INT)
-LIBLEVELDB += $(LIBLEVELDB_INT)
+LIBLEVELDB += $(LIBLEVELDB_INT) $(LIBCRC32C)
LIBMEMENV += $(LIBMEMENV_INT)
-LIBLEVELDB_SSE42 = $(LIBLEVELDB_SSE42_INT)
LEVELDB_CPPFLAGS += -I$(srcdir)/leveldb/include
LEVELDB_CPPFLAGS += -I$(srcdir)/leveldb/helpers/memenv
LEVELDB_CPPFLAGS_INT =
LEVELDB_CPPFLAGS_INT += -I$(srcdir)/leveldb
-LEVELDB_CPPFLAGS_INT += $(LEVELDB_TARGET_FLAGS)
-LEVELDB_CPPFLAGS_INT += -DLEVELDB_ATOMIC_PRESENT
+LEVELDB_CPPFLAGS_INT += -I$(srcdir)/crc32c/include
LEVELDB_CPPFLAGS_INT += -D__STDC_LIMIT_MACROS
+LEVELDB_CPPFLAGS_INT += -DHAVE_SNAPPY=0 -DHAVE_CRC32C=1
+LEVELDB_CPPFLAGS_INT += -DHAVE_FDATASYNC=@HAVE_FDATASYNC@
+LEVELDB_CPPFLAGS_INT += -DHAVE_FULLFSYNC=@HAVE_FULLFSYNC@
+LEVELDB_CPPFLAGS_INT += -DHAVE_O_CLOEXEC=@HAVE_O_CLOEXEC@
+
+if WORDS_BIGENDIAN
+LEVELDB_CPPFLAGS_INT += -DLEVELDB_IS_BIG_ENDIAN=1
+else
+LEVELDB_CPPFLAGS_INT += -DLEVELDB_IS_BIG_ENDIAN=0
+endif
if TARGET_WINDOWS
-LEVELDB_CPPFLAGS_INT += -DLEVELDB_PLATFORM_WINDOWS -D__USE_MINGW_ANSI_STDIO=1
+LEVELDB_CPPFLAGS_INT += -DLEVELDB_PLATFORM_WINDOWS -D_UNICODE -DUNICODE -D__USE_MINGW_ANSI_STDIO=1
else
LEVELDB_CPPFLAGS_INT += -DLEVELDB_PLATFORM_POSIX
endif
@@ -33,12 +39,8 @@ leveldb_libleveldb_a_CPPFLAGS = $(AM_CPPFLAGS) $(LEVELDB_CPPFLAGS_INT) $(LEVELDB
leveldb_libleveldb_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
leveldb_libleveldb_a_SOURCES=
-leveldb_libleveldb_a_SOURCES += leveldb/port/atomic_pointer.h
-leveldb_libleveldb_a_SOURCES += leveldb/port/port_example.h
-leveldb_libleveldb_a_SOURCES += leveldb/port/port_posix.h
-leveldb_libleveldb_a_SOURCES += leveldb/port/win/stdint.h
+leveldb_libleveldb_a_SOURCES += leveldb/port/port_stdcxx.h
leveldb_libleveldb_a_SOURCES += leveldb/port/port.h
-leveldb_libleveldb_a_SOURCES += leveldb/port/port_win.h
leveldb_libleveldb_a_SOURCES += leveldb/port/thread_annotations.h
leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/db.h
leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/options.h
@@ -47,6 +49,7 @@ leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/filter_policy.h
leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/slice.h
leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/table_builder.h
leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/env.h
+leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/export.h
leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/c.h
leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/iterator.h
leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/cache.h
@@ -78,6 +81,7 @@ leveldb_libleveldb_a_SOURCES += leveldb/table/format.h
leveldb_libleveldb_a_SOURCES += leveldb/table/iterator_wrapper.h
leveldb_libleveldb_a_SOURCES += leveldb/util/crc32c.h
leveldb_libleveldb_a_SOURCES += leveldb/util/env_posix_test_helper.h
+leveldb_libleveldb_a_SOURCES += leveldb/util/env_windows_test_helper.h
leveldb_libleveldb_a_SOURCES += leveldb/util/arena.h
leveldb_libleveldb_a_SOURCES += leveldb/util/random.h
leveldb_libleveldb_a_SOURCES += leveldb/util/posix_logger.h
@@ -87,7 +91,9 @@ leveldb_libleveldb_a_SOURCES += leveldb/util/coding.h
leveldb_libleveldb_a_SOURCES += leveldb/util/testutil.h
leveldb_libleveldb_a_SOURCES += leveldb/util/mutexlock.h
leveldb_libleveldb_a_SOURCES += leveldb/util/logging.h
+leveldb_libleveldb_a_SOURCES += leveldb/util/no_destructor.h
leveldb_libleveldb_a_SOURCES += leveldb/util/testharness.h
+leveldb_libleveldb_a_SOURCES += leveldb/util/windows_logger.h
leveldb_libleveldb_a_SOURCES += leveldb/db/builder.cc
leveldb_libleveldb_a_SOURCES += leveldb/db/c.cc
@@ -120,7 +126,6 @@ leveldb_libleveldb_a_SOURCES += leveldb/util/coding.cc
leveldb_libleveldb_a_SOURCES += leveldb/util/comparator.cc
leveldb_libleveldb_a_SOURCES += leveldb/util/crc32c.cc
leveldb_libleveldb_a_SOURCES += leveldb/util/env.cc
-leveldb_libleveldb_a_SOURCES += leveldb/util/env_posix.cc
leveldb_libleveldb_a_SOURCES += leveldb/util/filter_policy.cc
leveldb_libleveldb_a_SOURCES += leveldb/util/hash.cc
leveldb_libleveldb_a_SOURCES += leveldb/util/histogram.cc
@@ -129,21 +134,12 @@ leveldb_libleveldb_a_SOURCES += leveldb/util/options.cc
leveldb_libleveldb_a_SOURCES += leveldb/util/status.cc
if TARGET_WINDOWS
-leveldb_libleveldb_a_SOURCES += leveldb/util/env_win.cc
-leveldb_libleveldb_a_SOURCES += leveldb/port/port_win.cc
+leveldb_libleveldb_a_SOURCES += leveldb/util/env_windows.cc
else
-leveldb_libleveldb_a_SOURCES += leveldb/port/port_posix.cc
+leveldb_libleveldb_a_SOURCES += leveldb/util/env_posix.cc
endif
leveldb_libmemenv_a_CPPFLAGS = $(leveldb_libleveldb_a_CPPFLAGS)
leveldb_libmemenv_a_CXXFLAGS = $(leveldb_libleveldb_a_CXXFLAGS)
leveldb_libmemenv_a_SOURCES = leveldb/helpers/memenv/memenv.cc
leveldb_libmemenv_a_SOURCES += leveldb/helpers/memenv/memenv.h
-
-leveldb_libleveldb_sse42_a_CPPFLAGS = $(leveldb_libleveldb_a_CPPFLAGS)
-leveldb_libleveldb_sse42_a_CXXFLAGS = $(leveldb_libleveldb_a_CXXFLAGS)
-if ENABLE_HWCRC32
-leveldb_libleveldb_sse42_a_CPPFLAGS += -DLEVELDB_PLATFORM_POSIX_SSE
-leveldb_libleveldb_sse42_a_CXXFLAGS += $(SSE42_CXXFLAGS)
-endif
-leveldb_libleveldb_sse42_a_SOURCES = leveldb/port/port_posix_sse.cc
diff --git a/src/Makefile.qt_locale.include b/src/Makefile.qt_locale.include
index fad4873545..79db5cd7b4 100644
--- a/src/Makefile.qt_locale.include
+++ b/src/Makefile.qt_locale.include
@@ -10,6 +10,7 @@ QT_TS = \
qt/locale/bitcoin_de_DE.ts \
qt/locale/bitcoin_el.ts \
qt/locale/bitcoin_el_GR.ts \
+ qt/locale/bitcoin_en.ts \
qt/locale/bitcoin_en_AU.ts \
qt/locale/bitcoin_en_GB.ts \
qt/locale/bitcoin_eo.ts \
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index 091ef50349..5a2ecff651 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -2,11 +2,11 @@
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
FUZZ_TARGETS = \
test/fuzz/addr_info_deserialize \
test/fuzz/address_deserialize \
test/fuzz/addrman_deserialize \
+ test/fuzz/asmap \
test/fuzz/banentry_deserialize \
test/fuzz/base_encode_decode \
test/fuzz/bech32 \
@@ -21,38 +21,75 @@ FUZZ_TARGETS = \
test/fuzz/blocktransactions_deserialize \
test/fuzz/blocktransactionsrequest_deserialize \
test/fuzz/blockundo_deserialize \
+ test/fuzz/bloom_filter \
test/fuzz/bloomfilter_deserialize \
test/fuzz/coins_deserialize \
+ test/fuzz/decode_tx \
test/fuzz/descriptor_parse \
test/fuzz/diskblockindex_deserialize \
test/fuzz/eval_script \
test/fuzz/fee_rate_deserialize \
test/fuzz/flat_file_pos_deserialize \
+ test/fuzz/float \
test/fuzz/hex \
test/fuzz/integer \
test/fuzz/inv_deserialize \
+ test/fuzz/key \
+ test/fuzz/key_io \
test/fuzz/key_origin_info_deserialize \
+ test/fuzz/locale \
test/fuzz/merkle_block_deserialize \
test/fuzz/messageheader_deserialize \
test/fuzz/netaddr_deserialize \
+ test/fuzz/netaddress \
test/fuzz/out_point_deserialize \
+ test/fuzz/p2p_transport_deserializer \
test/fuzz/parse_hd_keypath \
test/fuzz/parse_iso8601 \
- test/fuzz/partial_merkle_tree_deserialize \
- test/fuzz/partially_signed_transaction_deserialize \
- test/fuzz/prefilled_transaction_deserialize \
test/fuzz/parse_numbers \
test/fuzz/parse_script \
test/fuzz/parse_univalue \
+ test/fuzz/partial_merkle_tree_deserialize \
+ test/fuzz/partially_signed_transaction_deserialize \
+ test/fuzz/prefilled_transaction_deserialize \
+ test/fuzz/process_message \
+ test/fuzz/process_message_addr \
+ test/fuzz/process_message_block \
+ test/fuzz/process_message_blocktxn \
+ test/fuzz/process_message_cmpctblock \
+ test/fuzz/process_message_feefilter \
+ test/fuzz/process_message_filteradd \
+ test/fuzz/process_message_filterclear \
+ test/fuzz/process_message_filterload \
+ test/fuzz/process_message_getaddr \
+ test/fuzz/process_message_getblocks \
+ test/fuzz/process_message_getblocktxn \
+ test/fuzz/process_message_getdata \
+ test/fuzz/process_message_getheaders \
+ test/fuzz/process_message_headers \
+ test/fuzz/process_message_inv \
+ test/fuzz/process_message_mempool \
+ test/fuzz/process_message_notfound \
+ test/fuzz/process_message_ping \
+ test/fuzz/process_message_pong \
+ test/fuzz/process_message_sendcmpct \
+ test/fuzz/process_message_sendheaders \
+ test/fuzz/process_message_tx \
+ test/fuzz/process_message_verack \
+ test/fuzz/process_message_version \
test/fuzz/psbt \
test/fuzz/psbt_input_deserialize \
test/fuzz/psbt_output_deserialize \
test/fuzz/pub_key_deserialize \
+ test/fuzz/rolling_bloom_filter \
test/fuzz/script \
test/fuzz/script_deserialize \
test/fuzz/script_flags \
+ test/fuzz/script_ops \
+ test/fuzz/scriptnum_ops \
test/fuzz/service_deserialize \
test/fuzz/spanparsing \
+ test/fuzz/strprintf \
test/fuzz/sub_net_deserialize \
test/fuzz/transaction \
test/fuzz/tx_in \
@@ -81,7 +118,8 @@ JSON_TEST_FILES = \
test/data/tx_invalid.json \
test/data/tx_valid.json
-RAW_TEST_FILES =
+RAW_TEST_FILES = \
+ test/data/asmap.raw
GENERATED_TEST_FILES = $(JSON_TEST_FILES:.json=.json.h) $(RAW_TEST_FILES:.raw=.raw.h)
@@ -92,7 +130,8 @@ BITCOIN_TEST_SUITE = \
FUZZ_SUITE = \
test/fuzz/fuzz.cpp \
test/fuzz/fuzz.h \
- test/fuzz/FuzzedDataProvider.h
+ test/fuzz/FuzzedDataProvider.h \
+ test/fuzz/util.h
FUZZ_SUITE_LD_COMMON = \
$(LIBBITCOIN_SERVER) \
@@ -185,6 +224,7 @@ BITCOIN_TESTS =\
test/uint256_tests.cpp \
test/util_tests.cpp \
test/validation_block_tests.cpp \
+ test/validation_flush_tests.cpp \
test/versionbits_tests.cpp
if ENABLE_PROPERTY_TESTS
@@ -204,7 +244,8 @@ BITCOIN_TESTS += \
wallet/test/wallet_crypto_tests.cpp \
wallet/test/coinselector_tests.cpp \
wallet/test/init_tests.cpp \
- wallet/test/ismine_tests.cpp
+ wallet/test/ismine_tests.cpp \
+ wallet/test/scriptpubkeyman_tests.cpp
BITCOIN_TEST_SUITE += \
wallet/test/wallet_test_fixture.cpp \
@@ -232,341 +273,570 @@ test_test_bitcoin_LDADD += $(LIBBITCOIN_ZMQ) $(ZMQ_LIBS)
endif
if ENABLE_FUZZ
-test_fuzz_block_SOURCES = $(FUZZ_SUITE) test/fuzz/block.cpp
-test_fuzz_block_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
-test_fuzz_block_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_block_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_block_LDADD = $(FUZZ_SUITE_LD_COMMON)
-
-test_fuzz_block_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_block_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBLOCK_DESERIALIZE=1
-test_fuzz_block_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_block_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_block_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
-test_fuzz_blocklocator_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_blocklocator_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBLOCKLOCATOR_DESERIALIZE=1
-test_fuzz_blocklocator_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_blocklocator_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_blocklocator_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_addr_info_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DADDR_INFO_DESERIALIZE=1
+test_fuzz_addr_info_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_addr_info_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_addr_info_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_addr_info_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_blockmerkleroot_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_blockmerkleroot_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBLOCKMERKLEROOT=1
-test_fuzz_blockmerkleroot_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_blockmerkleroot_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_blockmerkleroot_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_address_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DADDRESS_DESERIALIZE=1
+test_fuzz_address_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_address_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_address_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_address_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_addrman_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
test_fuzz_addrman_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DADDRMAN_DESERIALIZE=1
test_fuzz_addrman_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_addrman_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_addrman_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_addrman_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_addrman_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_blockheader_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_blockheader_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBLOCKHEADER_DESERIALIZE=1
-test_fuzz_blockheader_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_blockheader_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_blockheader_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_asmap_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_asmap_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_asmap_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_asmap_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_asmap_SOURCES = $(FUZZ_SUITE) test/fuzz/asmap.cpp
-test_fuzz_banentry_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
test_fuzz_banentry_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBANENTRY_DESERIALIZE=1
test_fuzz_banentry_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_banentry_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_banentry_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_banentry_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_banentry_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_bech32_SOURCES = $(FUZZ_SUITE) test/fuzz/bech32.cpp
-test_fuzz_bech32_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
-test_fuzz_bech32_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_bech32_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_bech32_LDADD = $(FUZZ_SUITE_LD_COMMON)
-
-test_fuzz_base_encode_decode_SOURCES = $(FUZZ_SUITE) test/fuzz/base_encode_decode.cpp
test_fuzz_base_encode_decode_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
test_fuzz_base_encode_decode_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_base_encode_decode_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_base_encode_decode_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_base_encode_decode_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_base_encode_decode_SOURCES = $(FUZZ_SUITE) test/fuzz/base_encode_decode.cpp
-test_fuzz_txundo_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_txundo_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DTXUNDO_DESERIALIZE=1
-test_fuzz_txundo_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_txundo_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_txundo_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
-
-test_fuzz_blockundo_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_blockundo_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBLOCKUNDO_DESERIALIZE=1
-test_fuzz_blockundo_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_blockundo_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_blockundo_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
-
-test_fuzz_coins_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_coins_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DCOINS_DESERIALIZE=1
-test_fuzz_coins_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_coins_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_coins_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_bech32_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_bech32_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_bech32_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_bech32_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_bech32_SOURCES = $(FUZZ_SUITE) test/fuzz/bech32.cpp
-test_fuzz_descriptor_parse_SOURCES = $(FUZZ_SUITE) test/fuzz/descriptor_parse.cpp
-test_fuzz_descriptor_parse_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
-test_fuzz_descriptor_parse_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_descriptor_parse_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_descriptor_parse_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_block_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_block_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_block_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_block_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_block_SOURCES = $(FUZZ_SUITE) test/fuzz/block.cpp
-test_fuzz_netaddr_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_netaddr_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DNETADDR_DESERIALIZE=1
-test_fuzz_netaddr_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_netaddr_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_netaddr_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_block_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBLOCK_DESERIALIZE=1
+test_fuzz_block_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_block_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_block_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_block_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_parse_iso8601_SOURCES = $(FUZZ_SUITE) test/fuzz/parse_iso8601.cpp
-test_fuzz_parse_iso8601_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
-test_fuzz_parse_iso8601_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_parse_iso8601_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_parse_iso8601_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_block_file_info_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBLOCK_FILE_INFO_DESERIALIZE=1
+test_fuzz_block_file_info_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_block_file_info_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_block_file_info_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_block_file_info_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_psbt_SOURCES = $(FUZZ_SUITE) test/fuzz/psbt.cpp
-test_fuzz_psbt_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
-test_fuzz_psbt_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_psbt_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_psbt_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_block_filter_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBLOCK_FILTER_DESERIALIZE=1
+test_fuzz_block_filter_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_block_filter_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_block_filter_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_block_filter_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_script_SOURCES = $(FUZZ_SUITE) test/fuzz/script.cpp
-test_fuzz_script_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
-test_fuzz_script_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_script_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_script_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_block_header_and_short_txids_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBLOCK_HEADER_AND_SHORT_TXIDS_DESERIALIZE=1
+test_fuzz_block_header_and_short_txids_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_block_header_and_short_txids_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_block_header_and_short_txids_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_block_header_and_short_txids_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_script_flags_SOURCES = $(FUZZ_SUITE) test/fuzz/script_flags.cpp
-test_fuzz_script_flags_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
-test_fuzz_script_flags_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_script_flags_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_script_flags_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_blockheader_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBLOCKHEADER_DESERIALIZE=1
+test_fuzz_blockheader_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_blockheader_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_blockheader_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_blockheader_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_service_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_service_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DSERVICE_DESERIALIZE=1
-test_fuzz_service_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_service_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_service_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_blocklocator_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBLOCKLOCATOR_DESERIALIZE=1
+test_fuzz_blocklocator_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_blocklocator_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_blocklocator_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_blocklocator_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_spanparsing_SOURCES = $(FUZZ_SUITE) test/fuzz/spanparsing.cpp
-test_fuzz_spanparsing_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
-test_fuzz_spanparsing_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_spanparsing_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_spanparsing_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_blockmerkleroot_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBLOCKMERKLEROOT=1
+test_fuzz_blockmerkleroot_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_blockmerkleroot_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_blockmerkleroot_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_blockmerkleroot_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_messageheader_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_messageheader_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGEHEADER_DESERIALIZE=1
-test_fuzz_messageheader_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_messageheader_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_messageheader_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_blocktransactions_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBLOCKTRANSACTIONS_DESERIALIZE=1
+test_fuzz_blocktransactions_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_blocktransactions_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_blocktransactions_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_blocktransactions_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_address_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_address_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DADDRESS_DESERIALIZE=1
-test_fuzz_address_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_address_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_address_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_blocktransactionsrequest_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBLOCKTRANSACTIONSREQUEST_DESERIALIZE=1
+test_fuzz_blocktransactionsrequest_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_blocktransactionsrequest_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_blocktransactionsrequest_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_blocktransactionsrequest_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_hex_SOURCES = $(FUZZ_SUITE) test/fuzz/hex.cpp
-test_fuzz_hex_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
-test_fuzz_hex_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_hex_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_hex_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_blockundo_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBLOCKUNDO_DESERIALIZE=1
+test_fuzz_blockundo_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_blockundo_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_blockundo_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_blockundo_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_inv_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_inv_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DINV_DESERIALIZE=1
-test_fuzz_inv_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_inv_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_inv_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_bloom_filter_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_bloom_filter_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_bloom_filter_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_bloom_filter_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_bloom_filter_SOURCES = $(FUZZ_SUITE) test/fuzz/bloom_filter.cpp
-test_fuzz_bloomfilter_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
test_fuzz_bloomfilter_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBLOOMFILTER_DESERIALIZE=1
test_fuzz_bloomfilter_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_bloomfilter_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_bloomfilter_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_bloomfilter_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_bloomfilter_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+
+test_fuzz_coins_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DCOINS_DESERIALIZE=1
+test_fuzz_coins_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_coins_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_coins_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_coins_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+
+test_fuzz_decode_tx_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_decode_tx_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_decode_tx_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_decode_tx_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_decode_tx_SOURCES = $(FUZZ_SUITE) test/fuzz/decode_tx.cpp
+
+test_fuzz_descriptor_parse_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_descriptor_parse_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_descriptor_parse_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_descriptor_parse_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_descriptor_parse_SOURCES = $(FUZZ_SUITE) test/fuzz/descriptor_parse.cpp
-test_fuzz_diskblockindex_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
test_fuzz_diskblockindex_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DDISKBLOCKINDEX_DESERIALIZE=1
test_fuzz_diskblockindex_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_diskblockindex_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_diskblockindex_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_diskblockindex_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_diskblockindex_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_eval_script_SOURCES = $(FUZZ_SUITE) test/fuzz/eval_script.cpp
test_fuzz_eval_script_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
test_fuzz_eval_script_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_eval_script_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_eval_script_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_eval_script_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_eval_script_SOURCES = $(FUZZ_SUITE) test/fuzz/eval_script.cpp
-test_fuzz_integer_SOURCES = $(FUZZ_SUITE) test/fuzz/integer.cpp
-test_fuzz_integer_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
-test_fuzz_integer_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_integer_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_integer_LDADD = $(FUZZ_SUITE_LD_COMMON)
-
-test_fuzz_txoutcompressor_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_txoutcompressor_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DTXOUTCOMPRESSOR_DESERIALIZE=1
-test_fuzz_txoutcompressor_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_txoutcompressor_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_txoutcompressor_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
-
-test_fuzz_blocktransactions_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_blocktransactions_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBLOCKTRANSACTIONS_DESERIALIZE=1
-test_fuzz_blocktransactions_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_blocktransactions_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_blocktransactions_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
-
-test_fuzz_blocktransactionsrequest_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_blocktransactionsrequest_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBLOCKTRANSACTIONSREQUEST_DESERIALIZE=1
-test_fuzz_blocktransactionsrequest_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_blocktransactionsrequest_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_blocktransactionsrequest_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
-
-test_fuzz_transaction_SOURCES = $(FUZZ_SUITE) test/fuzz/transaction.cpp
-test_fuzz_transaction_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
-test_fuzz_transaction_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_transaction_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_transaction_LDADD = $(FUZZ_SUITE_LD_COMMON)
-
-test_fuzz_addr_info_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_addr_info_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DADDR_INFO_DESERIALIZE=1
-test_fuzz_addr_info_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_addr_info_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_addr_info_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
-
-test_fuzz_block_file_info_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_block_file_info_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBLOCK_FILE_INFO_DESERIALIZE=1
-test_fuzz_block_file_info_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_block_file_info_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_block_file_info_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
-
-test_fuzz_block_filter_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_block_filter_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBLOCK_FILTER_DESERIALIZE=1
-test_fuzz_block_filter_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_block_filter_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_block_filter_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
-
-test_fuzz_block_header_and_short_txids_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_block_header_and_short_txids_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBLOCK_HEADER_AND_SHORT_TXIDS_DESERIALIZE=1
-test_fuzz_block_header_and_short_txids_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_block_header_and_short_txids_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_block_header_and_short_txids_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
-
-test_fuzz_fee_rate_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
test_fuzz_fee_rate_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DFEE_RATE_DESERIALIZE=1
test_fuzz_fee_rate_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_fee_rate_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_fee_rate_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_fee_rate_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_fee_rate_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_flat_file_pos_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
test_fuzz_flat_file_pos_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DFLAT_FILE_POS_DESERIALIZE=1
test_fuzz_flat_file_pos_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_flat_file_pos_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_flat_file_pos_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_flat_file_pos_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_flat_file_pos_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+
+test_fuzz_float_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_float_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_float_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_float_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_float_SOURCES = $(FUZZ_SUITE) test/fuzz/float.cpp
+
+test_fuzz_hex_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_hex_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_hex_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_hex_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_hex_SOURCES = $(FUZZ_SUITE) test/fuzz/hex.cpp
+
+test_fuzz_integer_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_integer_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_integer_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_integer_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_integer_SOURCES = $(FUZZ_SUITE) test/fuzz/integer.cpp
+
+test_fuzz_inv_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DINV_DESERIALIZE=1
+test_fuzz_inv_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_inv_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_inv_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_inv_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+
+test_fuzz_key_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_key_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_key_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_key_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_key_SOURCES = $(FUZZ_SUITE) test/fuzz/key.cpp
+
+test_fuzz_key_io_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_key_io_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_key_io_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_key_io_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_key_io_SOURCES = $(FUZZ_SUITE) test/fuzz/key_io.cpp
-test_fuzz_key_origin_info_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
test_fuzz_key_origin_info_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DKEY_ORIGIN_INFO_DESERIALIZE=1
test_fuzz_key_origin_info_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_key_origin_info_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_key_origin_info_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_key_origin_info_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_key_origin_info_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+
+test_fuzz_locale_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_locale_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_locale_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_locale_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_locale_SOURCES = $(FUZZ_SUITE) test/fuzz/locale.cpp
-test_fuzz_merkle_block_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
test_fuzz_merkle_block_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMERKLE_BLOCK_DESERIALIZE=1
test_fuzz_merkle_block_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_merkle_block_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_merkle_block_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_merkle_block_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_merkle_block_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+
+test_fuzz_messageheader_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGEHEADER_DESERIALIZE=1
+test_fuzz_messageheader_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_messageheader_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_messageheader_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_messageheader_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+
+test_fuzz_netaddr_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DNETADDR_DESERIALIZE=1
+test_fuzz_netaddr_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_netaddr_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_netaddr_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_netaddr_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+
+test_fuzz_netaddress_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_netaddress_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_netaddress_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_netaddress_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_netaddress_SOURCES = $(FUZZ_SUITE) test/fuzz/netaddress.cpp
-test_fuzz_out_point_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
test_fuzz_out_point_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DOUT_POINT_DESERIALIZE=1
test_fuzz_out_point_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_out_point_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_out_point_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_out_point_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_out_point_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_partially_signed_transaction_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_partially_signed_transaction_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DPARTIALLY_SIGNED_TRANSACTION_DESERIALIZE=1
-test_fuzz_partially_signed_transaction_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_partially_signed_transaction_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_partially_signed_transaction_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_p2p_transport_deserializer_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_p2p_transport_deserializer_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_p2p_transport_deserializer_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_p2p_transport_deserializer_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_p2p_transport_deserializer_SOURCES = $(FUZZ_SUITE) test/fuzz/p2p_transport_deserializer.cpp
+
+test_fuzz_parse_hd_keypath_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_parse_hd_keypath_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_parse_hd_keypath_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_parse_hd_keypath_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_parse_hd_keypath_SOURCES = $(FUZZ_SUITE) test/fuzz/parse_hd_keypath.cpp
+
+test_fuzz_parse_iso8601_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_parse_iso8601_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_parse_iso8601_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_parse_iso8601_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_parse_iso8601_SOURCES = $(FUZZ_SUITE) test/fuzz/parse_iso8601.cpp
+
+test_fuzz_parse_numbers_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_parse_numbers_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_parse_numbers_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_parse_numbers_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_parse_numbers_SOURCES = $(FUZZ_SUITE) test/fuzz/parse_numbers.cpp
+
+test_fuzz_parse_script_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_parse_script_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_parse_script_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_parse_script_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_parse_script_SOURCES = $(FUZZ_SUITE) test/fuzz/parse_script.cpp
+
+test_fuzz_parse_univalue_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_parse_univalue_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_parse_univalue_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_parse_univalue_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_parse_univalue_SOURCES = $(FUZZ_SUITE) test/fuzz/parse_univalue.cpp
-test_fuzz_partial_merkle_tree_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
test_fuzz_partial_merkle_tree_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DPARTIAL_MERKLE_TREE_DESERIALIZE=1
test_fuzz_partial_merkle_tree_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_partial_merkle_tree_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_partial_merkle_tree_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_partial_merkle_tree_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_partial_merkle_tree_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+
+test_fuzz_partially_signed_transaction_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DPARTIALLY_SIGNED_TRANSACTION_DESERIALIZE=1
+test_fuzz_partially_signed_transaction_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_partially_signed_transaction_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_partially_signed_transaction_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_partially_signed_transaction_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_prefilled_transaction_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
test_fuzz_prefilled_transaction_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DPREFILLED_TRANSACTION_DESERIALIZE=1
test_fuzz_prefilled_transaction_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_prefilled_transaction_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_prefilled_transaction_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_prefilled_transaction_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_prefilled_transaction_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+
+test_fuzz_process_message_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_process_message_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_addr_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=addr
+test_fuzz_process_message_addr_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_addr_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_addr_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_addr_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_block_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=block
+test_fuzz_process_message_block_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_block_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_block_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_block_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_blocktxn_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=blocktxn
+test_fuzz_process_message_blocktxn_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_blocktxn_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_blocktxn_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_blocktxn_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_cmpctblock_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=cmpctblock
+test_fuzz_process_message_cmpctblock_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_cmpctblock_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_cmpctblock_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_cmpctblock_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_feefilter_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=feefilter
+test_fuzz_process_message_feefilter_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_feefilter_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_feefilter_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_feefilter_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_filteradd_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=filteradd
+test_fuzz_process_message_filteradd_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_filteradd_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_filteradd_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_filteradd_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_filterclear_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=filterclear
+test_fuzz_process_message_filterclear_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_filterclear_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_filterclear_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_filterclear_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_filterload_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=filterload
+test_fuzz_process_message_filterload_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_filterload_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_filterload_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_filterload_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_getaddr_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=getaddr
+test_fuzz_process_message_getaddr_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_getaddr_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_getaddr_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_getaddr_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_getblocks_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=getblocks
+test_fuzz_process_message_getblocks_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_getblocks_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_getblocks_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_getblocks_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_getblocktxn_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=getblocktxn
+test_fuzz_process_message_getblocktxn_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_getblocktxn_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_getblocktxn_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_getblocktxn_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_getdata_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=getdata
+test_fuzz_process_message_getdata_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_getdata_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_getdata_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_getdata_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_getheaders_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=getheaders
+test_fuzz_process_message_getheaders_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_getheaders_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_getheaders_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_getheaders_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_headers_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=headers
+test_fuzz_process_message_headers_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_headers_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_headers_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_headers_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_inv_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=inv
+test_fuzz_process_message_inv_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_inv_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_inv_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_inv_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_mempool_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=mempool
+test_fuzz_process_message_mempool_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_mempool_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_mempool_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_mempool_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_notfound_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=notfound
+test_fuzz_process_message_notfound_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_notfound_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_notfound_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_notfound_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_ping_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=ping
+test_fuzz_process_message_ping_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_ping_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_ping_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_ping_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_pong_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=pong
+test_fuzz_process_message_pong_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_pong_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_pong_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_pong_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_sendcmpct_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=sendcmpct
+test_fuzz_process_message_sendcmpct_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_sendcmpct_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_sendcmpct_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_sendcmpct_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_sendheaders_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=sendheaders
+test_fuzz_process_message_sendheaders_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_sendheaders_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_sendheaders_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_sendheaders_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_tx_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=tx
+test_fuzz_process_message_tx_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_tx_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_tx_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_tx_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_verack_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=verack
+test_fuzz_process_message_verack_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_verack_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_verack_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_verack_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_version_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=version
+test_fuzz_process_message_version_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_version_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_version_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_version_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_psbt_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_psbt_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_psbt_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_psbt_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_psbt_SOURCES = $(FUZZ_SUITE) test/fuzz/psbt.cpp
-test_fuzz_psbt_input_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
test_fuzz_psbt_input_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DPSBT_INPUT_DESERIALIZE=1
test_fuzz_psbt_input_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_psbt_input_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_psbt_input_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_psbt_input_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_psbt_input_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_psbt_output_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
test_fuzz_psbt_output_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DPSBT_OUTPUT_DESERIALIZE=1
test_fuzz_psbt_output_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_psbt_output_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_psbt_output_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_psbt_output_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_psbt_output_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_pub_key_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
test_fuzz_pub_key_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DPUB_KEY_DESERIALIZE=1
test_fuzz_pub_key_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_pub_key_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_pub_key_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_pub_key_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_pub_key_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+
+test_fuzz_rolling_bloom_filter_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_rolling_bloom_filter_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_rolling_bloom_filter_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_rolling_bloom_filter_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_rolling_bloom_filter_SOURCES = $(FUZZ_SUITE) test/fuzz/rolling_bloom_filter.cpp
+
+test_fuzz_script_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_script_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_script_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_script_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_script_SOURCES = $(FUZZ_SUITE) test/fuzz/script.cpp
-test_fuzz_script_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
test_fuzz_script_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DSCRIPT_DESERIALIZE=1
test_fuzz_script_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_script_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_script_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_script_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_script_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+
+test_fuzz_script_flags_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_script_flags_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_script_flags_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_script_flags_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_script_flags_SOURCES = $(FUZZ_SUITE) test/fuzz/script_flags.cpp
+
+test_fuzz_script_ops_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_script_ops_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_script_ops_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_script_ops_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_script_ops_SOURCES = $(FUZZ_SUITE) test/fuzz/script_ops.cpp
+
+test_fuzz_scriptnum_ops_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_scriptnum_ops_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_scriptnum_ops_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_scriptnum_ops_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_scriptnum_ops_SOURCES = $(FUZZ_SUITE) test/fuzz/scriptnum_ops.cpp
+
+test_fuzz_service_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DSERVICE_DESERIALIZE=1
+test_fuzz_service_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_service_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_service_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_service_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+
+test_fuzz_spanparsing_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_spanparsing_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_spanparsing_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_spanparsing_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_spanparsing_SOURCES = $(FUZZ_SUITE) test/fuzz/spanparsing.cpp
+
+test_fuzz_strprintf_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_strprintf_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_strprintf_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_strprintf_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_strprintf_SOURCES = $(FUZZ_SUITE) test/fuzz/strprintf.cpp
-test_fuzz_sub_net_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
test_fuzz_sub_net_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DSUB_NET_DESERIALIZE=1
test_fuzz_sub_net_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_sub_net_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_sub_net_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_sub_net_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_sub_net_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_tx_in_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_tx_in_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DTX_IN_DESERIALIZE=1
-test_fuzz_tx_in_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_tx_in_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_tx_in_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_transaction_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_transaction_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_transaction_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_transaction_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_transaction_SOURCES = $(FUZZ_SUITE) test/fuzz/transaction.cpp
-test_fuzz_tx_in_SOURCES = $(FUZZ_SUITE) test/fuzz/tx_in.cpp
test_fuzz_tx_in_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
test_fuzz_tx_in_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_tx_in_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_tx_in_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_tx_in_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_tx_in_SOURCES = $(FUZZ_SUITE) test/fuzz/tx_in.cpp
+
+test_fuzz_tx_in_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DTX_IN_DESERIALIZE=1
+test_fuzz_tx_in_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_tx_in_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_tx_in_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_tx_in_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_tx_out_SOURCES = $(FUZZ_SUITE) test/fuzz/tx_out.cpp
test_fuzz_tx_out_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
test_fuzz_tx_out_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_tx_out_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_tx_out_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_tx_out_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_tx_out_SOURCES = $(FUZZ_SUITE) test/fuzz/tx_out.cpp
-test_fuzz_parse_hd_keypath_SOURCES = $(FUZZ_SUITE) test/fuzz/parse_hd_keypath.cpp
-test_fuzz_parse_hd_keypath_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
-test_fuzz_parse_hd_keypath_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_parse_hd_keypath_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_parse_hd_keypath_LDADD = $(FUZZ_SUITE_LD_COMMON)
-
-test_fuzz_parse_script_SOURCES = $(FUZZ_SUITE) test/fuzz/parse_script.cpp
-test_fuzz_parse_script_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
-test_fuzz_parse_script_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_parse_script_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_parse_script_LDADD = $(FUZZ_SUITE_LD_COMMON)
-
-test_fuzz_parse_numbers_SOURCES = $(FUZZ_SUITE) test/fuzz/parse_numbers.cpp
-test_fuzz_parse_numbers_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
-test_fuzz_parse_numbers_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_parse_numbers_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_parse_numbers_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_txoutcompressor_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DTXOUTCOMPRESSOR_DESERIALIZE=1
+test_fuzz_txoutcompressor_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_txoutcompressor_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_txoutcompressor_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_txoutcompressor_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
-test_fuzz_parse_univalue_SOURCES = $(FUZZ_SUITE) test/fuzz/parse_univalue.cpp
-test_fuzz_parse_univalue_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
-test_fuzz_parse_univalue_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-test_fuzz_parse_univalue_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
-test_fuzz_parse_univalue_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_txundo_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DTXUNDO_DESERIALIZE=1
+test_fuzz_txundo_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_txundo_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_txundo_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_txundo_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
endif # ENABLE_FUZZ
@@ -615,7 +885,7 @@ endif
%.cpp.test: %.cpp
@echo Running tests: `cat $< | grep -E "(BOOST_FIXTURE_TEST_SUITE\\(|BOOST_AUTO_TEST_SUITE\\()" | cut -d '(' -f 2 | cut -d ',' -f 1 | cut -d ')' -f 1` from $<
- $(AM_V_at)$(TEST_BINARY) -l test_suite -t "`cat $< | grep -E "(BOOST_FIXTURE_TEST_SUITE\\(|BOOST_AUTO_TEST_SUITE\\()" | cut -d '(' -f 2 | cut -d ',' -f 1 | cut -d ')' -f 1`" > $<.log 2>&1 || (cat $<.log && false)
+ $(AM_V_at)$(TEST_BINARY) --catch_system_errors=no -l test_suite -t "`cat $< | grep -E "(BOOST_FIXTURE_TEST_SUITE\\(|BOOST_AUTO_TEST_SUITE\\()" | cut -d '(' -f 2 | cut -d ',' -f 1 | cut -d ')' -f 1`" > $<.log 2>&1 || (cat $<.log && false)
%.json.h: %.json
@$(MKDIR_P) $(@D)
@@ -626,3 +896,12 @@ endif
echo "};};"; \
} > "$@.new" && mv -f "$@.new" "$@"
@echo "Generated $@"
+
+%.raw.h: %.raw
+ @$(MKDIR_P) $(@D)
+ @{ \
+ echo "static unsigned const char $(*F)_raw[] = {" && \
+ $(HEXDUMP) -v -e '8/1 "0x%02x, "' -e '"\n"' $< | $(SED) -e 's/0x ,//g' && \
+ echo "};"; \
+ } > "$@.new" && mv -f "$@.new" "$@"
+ @echo "Generated $@"
diff --git a/src/addrman.cpp b/src/addrman.cpp
index 065fdbe4c6..2f8a3a0bd5 100644
--- a/src/addrman.cpp
+++ b/src/addrman.cpp
@@ -6,21 +6,28 @@
#include <addrman.h>
#include <hash.h>
+#include <logging.h>
#include <serialize.h>
-int CAddrInfo::GetTriedBucket(const uint256& nKey) const
+int CAddrInfo::GetTriedBucket(const uint256& nKey, const std::vector<bool> &asmap) const
{
uint64_t hash1 = (CHashWriter(SER_GETHASH, 0) << nKey << GetKey()).GetCheapHash();
- uint64_t hash2 = (CHashWriter(SER_GETHASH, 0) << nKey << GetGroup() << (hash1 % ADDRMAN_TRIED_BUCKETS_PER_GROUP)).GetCheapHash();
- return hash2 % ADDRMAN_TRIED_BUCKET_COUNT;
+ uint64_t hash2 = (CHashWriter(SER_GETHASH, 0) << nKey << GetGroup(asmap) << (hash1 % ADDRMAN_TRIED_BUCKETS_PER_GROUP)).GetCheapHash();
+ int tried_bucket = hash2 % ADDRMAN_TRIED_BUCKET_COUNT;
+ uint32_t mapped_as = GetMappedAS(asmap);
+ LogPrint(BCLog::NET, "IP %s mapped to AS%i belongs to tried bucket %i\n", ToStringIP(), mapped_as, tried_bucket);
+ return tried_bucket;
}
-int CAddrInfo::GetNewBucket(const uint256& nKey, const CNetAddr& src) const
+int CAddrInfo::GetNewBucket(const uint256& nKey, const CNetAddr& src, const std::vector<bool> &asmap) const
{
- std::vector<unsigned char> vchSourceGroupKey = src.GetGroup();
- uint64_t hash1 = (CHashWriter(SER_GETHASH, 0) << nKey << GetGroup() << vchSourceGroupKey).GetCheapHash();
+ std::vector<unsigned char> vchSourceGroupKey = src.GetGroup(asmap);
+ uint64_t hash1 = (CHashWriter(SER_GETHASH, 0) << nKey << GetGroup(asmap) << vchSourceGroupKey).GetCheapHash();
uint64_t hash2 = (CHashWriter(SER_GETHASH, 0) << nKey << vchSourceGroupKey << (hash1 % ADDRMAN_NEW_BUCKETS_PER_SOURCE_GROUP)).GetCheapHash();
- return hash2 % ADDRMAN_NEW_BUCKET_COUNT;
+ int new_bucket = hash2 % ADDRMAN_NEW_BUCKET_COUNT;
+ uint32_t mapped_as = GetMappedAS(asmap);
+ LogPrint(BCLog::NET, "IP %s mapped to AS%i belongs to new bucket %i\n", ToStringIP(), mapped_as, new_bucket);
+ return new_bucket;
}
int CAddrInfo::GetBucketPosition(const uint256 &nKey, bool fNew, int nBucket) const
@@ -153,7 +160,7 @@ void CAddrMan::MakeTried(CAddrInfo& info, int nId)
assert(info.nRefCount == 0);
// which tried bucket to move the entry to
- int nKBucket = info.GetTriedBucket(nKey);
+ int nKBucket = info.GetTriedBucket(nKey, m_asmap);
int nKBucketPos = info.GetBucketPosition(nKey, false, nKBucket);
// first make space to add it (the existing tried entry there is moved to new, deleting whatever is there).
@@ -169,7 +176,7 @@ void CAddrMan::MakeTried(CAddrInfo& info, int nId)
nTried--;
// find which new bucket it belongs to
- int nUBucket = infoOld.GetNewBucket(nKey);
+ int nUBucket = infoOld.GetNewBucket(nKey, m_asmap);
int nUBucketPos = infoOld.GetBucketPosition(nKey, true, nUBucket);
ClearNew(nUBucket, nUBucketPos);
assert(vvNew[nUBucket][nUBucketPos] == -1);
@@ -233,7 +240,7 @@ void CAddrMan::Good_(const CService& addr, bool test_before_evict, int64_t nTime
return;
// which tried bucket to move the entry to
- int tried_bucket = info.GetTriedBucket(nKey);
+ int tried_bucket = info.GetTriedBucket(nKey, m_asmap);
int tried_bucket_pos = info.GetBucketPosition(nKey, false, tried_bucket);
// Will moving this address into tried evict another entry?
@@ -301,7 +308,7 @@ bool CAddrMan::Add_(const CAddress& addr, const CNetAddr& source, int64_t nTimeP
fNew = true;
}
- int nUBucket = pinfo->GetNewBucket(nKey, source);
+ int nUBucket = pinfo->GetNewBucket(nKey, source, m_asmap);
int nUBucketPos = pinfo->GetBucketPosition(nKey, true, nUBucket);
if (vvNew[nUBucket][nUBucketPos] != nId) {
bool fInsert = vvNew[nUBucket][nUBucketPos] == -1;
@@ -439,7 +446,7 @@ int CAddrMan::Check_()
if (vvTried[n][i] != -1) {
if (!setTried.count(vvTried[n][i]))
return -11;
- if (mapInfo[vvTried[n][i]].GetTriedBucket(nKey) != n)
+ if (mapInfo[vvTried[n][i]].GetTriedBucket(nKey, m_asmap) != n)
return -17;
if (mapInfo[vvTried[n][i]].GetBucketPosition(nKey, false, n) != i)
return -18;
@@ -545,7 +552,7 @@ void CAddrMan::ResolveCollisions_()
CAddrInfo& info_new = mapInfo[id_new];
// Which tried bucket to move the entry to.
- int tried_bucket = info_new.GetTriedBucket(nKey);
+ int tried_bucket = info_new.GetTriedBucket(nKey, m_asmap);
int tried_bucket_pos = info_new.GetBucketPosition(nKey, false, tried_bucket);
if (!info_new.IsValid()) { // id_new may no longer map to a valid address
erase_collision = true;
@@ -609,10 +616,33 @@ CAddrInfo CAddrMan::SelectTriedCollision_()
CAddrInfo& newInfo = mapInfo[id_new];
// which tried bucket to move the entry to
- int tried_bucket = newInfo.GetTriedBucket(nKey);
+ int tried_bucket = newInfo.GetTriedBucket(nKey, m_asmap);
int tried_bucket_pos = newInfo.GetBucketPosition(nKey, false, tried_bucket);
int id_old = vvTried[tried_bucket][tried_bucket_pos];
return mapInfo[id_old];
}
+
+std::vector<bool> CAddrMan::DecodeAsmap(fs::path path)
+{
+ std::vector<bool> bits;
+ FILE *filestr = fsbridge::fopen(path, "rb");
+ CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
+ if (file.IsNull()) {
+ LogPrintf("Failed to open asmap file from disk\n");
+ return bits;
+ }
+ fseek(filestr, 0, SEEK_END);
+ int length = ftell(filestr);
+ LogPrintf("Opened asmap file %s (%d bytes) from disk\n", path, length);
+ fseek(filestr, 0, SEEK_SET);
+ char cur_byte;
+ for (int i = 0; i < length; ++i) {
+ file >> cur_byte;
+ for (int bit = 0; bit < 8; ++bit) {
+ bits.push_back((cur_byte >> bit) & 1);
+ }
+ }
+ return bits;
+}
diff --git a/src/addrman.h b/src/addrman.h
index c001d59da5..8e82020df0 100644
--- a/src/addrman.h
+++ b/src/addrman.h
@@ -1,11 +1,12 @@
// Copyright (c) 2012 Pieter Wuille
-// Copyright (c) 2012-2019 The Bitcoin Core developers
+// Copyright (c) 2012-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_ADDRMAN_H
#define BITCOIN_ADDRMAN_H
+#include <clientversion.h>
#include <netaddress.h>
#include <protocol.h>
#include <random.h>
@@ -13,9 +14,13 @@
#include <timedata.h>
#include <util/system.h>
+#include <fs.h>
+#include <hash.h>
+#include <iostream>
#include <map>
#include <set>
#include <stdint.h>
+#include <streams.h>
#include <vector>
/**
@@ -68,15 +73,15 @@ public:
}
//! Calculate in which "tried" bucket this entry belongs
- int GetTriedBucket(const uint256 &nKey) const;
+ int GetTriedBucket(const uint256 &nKey, const std::vector<bool> &asmap) const;
//! Calculate in which "new" bucket this entry belongs, given a certain source
- int GetNewBucket(const uint256 &nKey, const CNetAddr& src) const;
+ int GetNewBucket(const uint256 &nKey, const CNetAddr& src, const std::vector<bool> &asmap) const;
//! Calculate in which "new" bucket this entry belongs, using its default source
- int GetNewBucket(const uint256 &nKey) const
+ int GetNewBucket(const uint256 &nKey, const std::vector<bool> &asmap) const
{
- return GetNewBucket(nKey, source);
+ return GetNewBucket(nKey, source, asmap);
}
//! Calculate in which position of a bucket to store this entry.
@@ -170,9 +175,10 @@ static const int64_t ADDRMAN_TEST_WINDOW = 40*60; // 40 minutes
*/
class CAddrMan
{
+friend class CAddrManTest;
protected:
//! critical section to protect the inner data structures
- mutable CCriticalSection cs;
+ mutable RecursiveMutex cs;
private:
//! last used nId
@@ -264,9 +270,29 @@ protected:
void SetServices_(const CService &addr, ServiceFlags nServices) EXCLUSIVE_LOCKS_REQUIRED(cs);
public:
+ // Compressed IP->ASN mapping, loaded from a file when a node starts.
+ // Should be always empty if no file was provided.
+ // This mapping is then used for bucketing nodes in Addrman.
+ //
+ // If asmap is provided, nodes will be bucketed by
+ // AS they belong to, in order to make impossible for a node
+ // to connect to several nodes hosted in a single AS.
+ // This is done in response to Erebus attack, but also to generally
+ // diversify the connections every node creates,
+ // especially useful when a large fraction of nodes
+ // operate under a couple of cloud providers.
+ //
+ // If a new asmap was provided, the existing records
+ // would be re-bucketed accordingly.
+ std::vector<bool> m_asmap;
+
+ // Read asmap from provided binary file
+ static std::vector<bool> DecodeAsmap(fs::path path);
+
+
/**
* serialized format:
- * * version byte (currently 1)
+ * * version byte (1 for pre-asmap files, 2 for files including asmap version)
* * 0x20 + nKey (serialized as if it were a vector, for backward compatibility)
* * nNew
* * nTried
@@ -298,7 +324,7 @@ public:
{
LOCK(cs);
- unsigned char nVersion = 1;
+ unsigned char nVersion = 2;
s << nVersion;
s << ((unsigned char)32);
s << nKey;
@@ -341,6 +367,13 @@ public:
}
}
}
+ // Store asmap version after bucket entries so that it
+ // can be ignored by older clients for backward compatibility.
+ uint256 asmap_version;
+ if (m_asmap.size() != 0) {
+ asmap_version = SerializeHash(m_asmap);
+ }
+ s << asmap_version;
}
template<typename Stream>
@@ -349,7 +382,6 @@ public:
LOCK(cs);
Clear();
-
unsigned char nVersion;
s >> nVersion;
unsigned char nKeySize;
@@ -379,16 +411,6 @@ public:
mapAddr[info] = n;
info.nRandomPos = vRandom.size();
vRandom.push_back(n);
- if (nVersion != 1 || nUBuckets != ADDRMAN_NEW_BUCKET_COUNT) {
- // In case the new table data cannot be used (nVersion unknown, or bucket count wrong),
- // immediately try to give them a reference based on their primary source address.
- int nUBucket = info.GetNewBucket(nKey);
- int nUBucketPos = info.GetBucketPosition(nKey, true, nUBucket);
- if (vvNew[nUBucket][nUBucketPos] == -1) {
- vvNew[nUBucket][nUBucketPos] = n;
- info.nRefCount++;
- }
- }
}
nIdCount = nNew;
@@ -397,7 +419,7 @@ public:
for (int n = 0; n < nTried; n++) {
CAddrInfo info;
s >> info;
- int nKBucket = info.GetTriedBucket(nKey);
+ int nKBucket = info.GetTriedBucket(nKey, m_asmap);
int nKBucketPos = info.GetBucketPosition(nKey, false, nKBucket);
if (vvTried[nKBucket][nKBucketPos] == -1) {
info.nRandomPos = vRandom.size();
@@ -413,7 +435,9 @@ public:
}
nTried -= nLost;
- // Deserialize positions in the new table (if possible).
+ // Store positions in the new table buckets to apply later (if possible).
+ std::map<int, int> entryToBucket; // Represents which entry belonged to which bucket when serializing
+
for (int bucket = 0; bucket < nUBuckets; bucket++) {
int nSize = 0;
s >> nSize;
@@ -421,12 +445,38 @@ public:
int nIndex = 0;
s >> nIndex;
if (nIndex >= 0 && nIndex < nNew) {
- CAddrInfo &info = mapInfo[nIndex];
- int nUBucketPos = info.GetBucketPosition(nKey, true, bucket);
- if (nVersion == 1 && nUBuckets == ADDRMAN_NEW_BUCKET_COUNT && vvNew[bucket][nUBucketPos] == -1 && info.nRefCount < ADDRMAN_NEW_BUCKETS_PER_ADDRESS) {
- info.nRefCount++;
- vvNew[bucket][nUBucketPos] = nIndex;
- }
+ entryToBucket[nIndex] = bucket;
+ }
+ }
+ }
+
+ uint256 supplied_asmap_version;
+ if (m_asmap.size() != 0) {
+ supplied_asmap_version = SerializeHash(m_asmap);
+ }
+ uint256 serialized_asmap_version;
+ if (nVersion > 1) {
+ s >> serialized_asmap_version;
+ }
+
+ for (int n = 0; n < nNew; n++) {
+ CAddrInfo &info = mapInfo[n];
+ int bucket = entryToBucket[n];
+ int nUBucketPos = info.GetBucketPosition(nKey, true, bucket);
+ if (nVersion == 2 && nUBuckets == ADDRMAN_NEW_BUCKET_COUNT && vvNew[bucket][nUBucketPos] == -1 &&
+ info.nRefCount < ADDRMAN_NEW_BUCKETS_PER_ADDRESS && serialized_asmap_version == supplied_asmap_version) {
+ // Bucketing has not changed, using existing bucket positions for the new table
+ vvNew[bucket][nUBucketPos] = n;
+ info.nRefCount++;
+ } else {
+ // In case the new table data cannot be used (nVersion unknown, bucket count wrong or new asmap),
+ // try to give them a reference based on their primary source address.
+ LogPrint(BCLog::ADDRMAN, "Bucketing method was updated, re-bucketing addrman entries from disk\n");
+ bucket = info.GetNewBucket(nKey, m_asmap);
+ nUBucketPos = info.GetBucketPosition(nKey, true, bucket);
+ if (vvNew[bucket][nUBucketPos] == -1) {
+ vvNew[bucket][nUBucketPos] = n;
+ info.nRefCount++;
}
}
}
diff --git a/src/banman.h b/src/banman.h
index 3997ca8096..8984874914 100644
--- a/src/banman.h
+++ b/src/banman.h
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_BANMAN_H
@@ -59,7 +59,7 @@ private:
//!clean unused entries (if bantime has expired)
void SweepBanned();
- CCriticalSection m_cs_banned;
+ RecursiveMutex m_cs_banned;
banmap_t m_banned GUARDED_BY(m_cs_banned);
bool m_is_dirty GUARDED_BY(m_cs_banned);
CClientUIInterface* m_client_interface = nullptr;
diff --git a/src/bench/bench.cpp b/src/bench/bench.cpp
index 745fd0966d..5cf7e43f4b 100644
--- a/src/bench/bench.cpp
+++ b/src/bench/bench.cpp
@@ -16,6 +16,7 @@
#include <regex>
const RegTestingSetup* g_testing_setup = nullptr;
+const std::function<void(const std::string&)> G_TEST_LOG_FUN{};
void benchmark::ConsolePrinter::header()
{
@@ -125,6 +126,7 @@ void benchmark::BenchRunner::RunAll(Printer& printer, uint64_t num_evals, double
}
if (!std::regex_match(p.first, baseMatch, reFilter)) {
+ g_testing_setup = nullptr;
continue;
}
diff --git a/src/bench/bench.h b/src/bench/bench.h
index e2af2a8856..6b7a0f76d1 100644
--- a/src/bench/bench.h
+++ b/src/bench/bench.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2015-2019 The Bitcoin Core developers
+// Copyright (c) 2015-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/src/bench/ccoins_caching.cpp b/src/bench/ccoins_caching.cpp
index c313029ea8..e9dd40293f 100644
--- a/src/bench/ccoins_caching.cpp
+++ b/src/bench/ccoins_caching.cpp
@@ -6,47 +6,10 @@
#include <coins.h>
#include <policy/policy.h>
#include <script/signingprovider.h>
+#include <test/util/transaction_utils.h>
#include <vector>
-// FIXME: Dedup with SetupDummyInputs in test/transaction_tests.cpp.
-//
-// Helper: create two dummy transactions, each with
-// two outputs. The first has 11 and 50 COIN outputs
-// paid to a TX_PUBKEY, the second 21 and 22 COIN outputs
-// paid to a TX_PUBKEYHASH.
-//
-static std::vector<CMutableTransaction>
-SetupDummyInputs(FillableSigningProvider& keystoreRet, CCoinsViewCache& coinsRet)
-{
- std::vector<CMutableTransaction> dummyTransactions;
- dummyTransactions.resize(2);
-
- // Add some keys to the keystore:
- CKey key[4];
- for (int i = 0; i < 4; i++) {
- key[i].MakeNewKey(i % 2);
- keystoreRet.AddKey(key[i]);
- }
-
- // Create some dummy input transactions
- dummyTransactions[0].vout.resize(2);
- dummyTransactions[0].vout[0].nValue = 11 * COIN;
- dummyTransactions[0].vout[0].scriptPubKey << ToByteVector(key[0].GetPubKey()) << OP_CHECKSIG;
- dummyTransactions[0].vout[1].nValue = 50 * COIN;
- dummyTransactions[0].vout[1].scriptPubKey << ToByteVector(key[1].GetPubKey()) << OP_CHECKSIG;
- AddCoins(coinsRet, CTransaction(dummyTransactions[0]), 0);
-
- dummyTransactions[1].vout.resize(2);
- dummyTransactions[1].vout[0].nValue = 21 * COIN;
- dummyTransactions[1].vout[0].scriptPubKey = GetScriptForDestination(PKHash(key[2].GetPubKey()));
- dummyTransactions[1].vout[1].nValue = 22 * COIN;
- dummyTransactions[1].vout[1].scriptPubKey = GetScriptForDestination(PKHash(key[3].GetPubKey()));
- AddCoins(coinsRet, CTransaction(dummyTransactions[1]), 0);
-
- return dummyTransactions;
-}
-
// Microbenchmark for simple accesses to a CCoinsViewCache database. Note from
// laanwj, "replicating the actual usage patterns of the client is hard though,
// many times micro-benchmarks of the database showed completely different
@@ -58,7 +21,8 @@ static void CCoinsCaching(benchmark::State& state)
FillableSigningProvider keystore;
CCoinsView coinsDummy;
CCoinsViewCache coins(&coinsDummy);
- std::vector<CMutableTransaction> dummyTransactions = SetupDummyInputs(keystore, coins);
+ std::vector<CMutableTransaction> dummyTransactions =
+ SetupDummyInputs(keystore, coins, {11 * COIN, 50 * COIN, 21 * COIN, 22 * COIN});
CMutableTransaction t1;
t1.vin.resize(3);
diff --git a/src/bench/coin_selection.cpp b/src/bench/coin_selection.cpp
index de8e2e5e8f..d6d5e67c5b 100644
--- a/src/bench/coin_selection.cpp
+++ b/src/bench/coin_selection.cpp
@@ -31,7 +31,8 @@ static void CoinSelection(benchmark::State& state)
{
NodeContext node;
auto chain = interfaces::MakeChain(node);
- const CWallet wallet(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
+ CWallet wallet(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
+ wallet.SetupLegacyScriptPubKeyMan();
std::vector<std::unique_ptr<CWalletTx>> wtxs;
LOCK(wallet.cs_wallet);
@@ -64,7 +65,7 @@ static void CoinSelection(benchmark::State& state)
typedef std::set<CInputCoin> CoinSet;
static NodeContext testNode;
static auto testChain = interfaces::MakeChain(testNode);
-static const CWallet testWallet(testChain.get(), WalletLocation(), WalletDatabase::CreateDummy());
+static CWallet testWallet(testChain.get(), WalletLocation(), WalletDatabase::CreateDummy());
std::vector<std::unique_ptr<CWalletTx>> wtxn;
// Copied from src/wallet/test/coinselector_tests.cpp
@@ -93,6 +94,7 @@ static CAmount make_hard_case(int utxos, std::vector<OutputGroup>& utxo_pool)
static void BnBExhaustion(benchmark::State& state)
{
// Setup
+ testWallet.SetupLegacyScriptPubKeyMan();
std::vector<OutputGroup> utxo_pool;
CoinSet selection;
CAmount value_ret = 0;
diff --git a/src/bench/examples.cpp b/src/bench/examples.cpp
index 60a4fbf0ba..a2fdab5609 100644
--- a/src/bench/examples.cpp
+++ b/src/bench/examples.cpp
@@ -10,7 +10,7 @@
static void Sleep100ms(benchmark::State& state)
{
while (state.KeepRunning()) {
- MilliSleep(100);
+ UninterruptibleSleep(std::chrono::milliseconds{100});
}
}
diff --git a/src/bench/wallet_balance.cpp b/src/bench/wallet_balance.cpp
index da94afd62b..62568a9da5 100644
--- a/src/bench/wallet_balance.cpp
+++ b/src/bench/wallet_balance.cpp
@@ -20,6 +20,7 @@ static void WalletBalance(benchmark::State& state, const bool set_dirty, const b
std::unique_ptr<interfaces::Chain> chain = interfaces::MakeChain(node);
CWallet wallet{chain.get(), WalletLocation(), WalletDatabase::CreateMock()};
{
+ wallet.SetupLegacyScriptPubKeyMan();
bool first_run;
if (wallet.LoadWallet(first_run) != DBErrors::LOAD_OK) assert(false);
wallet.handleNotifications();
diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp
index c085095a2b..6982eaab61 100644
--- a/src/bitcoin-cli.cpp
+++ b/src/bitcoin-cli.cpp
@@ -524,7 +524,7 @@ static int CommandLineRPC(int argc, char *argv[])
}
catch (const CConnectionFailed&) {
if (fWait)
- MilliSleep(1000);
+ UninterruptibleSleep(std::chrono::milliseconds{1000});
else
throw;
}
diff --git a/src/bitcoin-wallet.cpp b/src/bitcoin-wallet.cpp
index 68abcbfdf1..7f1a4a114b 100644
--- a/src/bitcoin-wallet.cpp
+++ b/src/bitcoin-wallet.cpp
@@ -41,7 +41,7 @@ static bool WalletAppInit(int argc, char* argv[])
}
if (argc < 2 || HelpRequested(gArgs)) {
std::string usage = strprintf("%s bitcoin-wallet version", PACKAGE_NAME) + " " + FormatFullVersion() + "\n\n" +
- "bitcoin-wallet is an offline tool for creating and interacting with Bitcoin Core wallet files.\n" +
+ "bitcoin-wallet is an offline tool for creating and interacting with " PACKAGE_NAME " wallet files.\n" +
"By default bitcoin-wallet will act on wallets in the default mainnet wallet directory in the datadir.\n" +
"To change the target wallet, use the -datadir, -wallet and -testnet/-regtest arguments.\n\n" +
"Usage:\n" +
diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp
index 4b5cea4849..e284dce0d5 100644
--- a/src/bitcoind.cpp
+++ b/src/bitcoind.cpp
@@ -29,7 +29,7 @@ static void WaitForShutdown(NodeContext& node)
{
while (!ShutdownRequested())
{
- MilliSleep(200);
+ UninterruptibleSleep(std::chrono::milliseconds{200});
}
Interrupt(node);
}
diff --git a/src/blockencodings.h b/src/blockencodings.h
index 55ed8989bb..be50166cfc 100644
--- a/src/blockencodings.h
+++ b/src/blockencodings.h
@@ -10,18 +10,29 @@
class CTxMemPool;
-// Dumb helper to handle CTransaction compression at serialize-time
-struct TransactionCompressor {
-private:
- CTransactionRef& tx;
-public:
- explicit TransactionCompressor(CTransactionRef& txIn) : tx(txIn) {}
+// Transaction compression schemes for compact block relay can be introduced by writing
+// an actual formatter here.
+using TransactionCompression = DefaultFormatter;
- ADD_SERIALIZE_METHODS;
+class DifferenceFormatter
+{
+ uint64_t m_shift = 0;
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
- READWRITE(tx); //TODO: Compress tx encoding
+public:
+ template<typename Stream, typename I>
+ void Ser(Stream& s, I v)
+ {
+ if (v < m_shift || v >= std::numeric_limits<uint64_t>::max()) throw std::ios_base::failure("differential value overflow");
+ WriteCompactSize(s, v - m_shift);
+ m_shift = uint64_t(v) + 1;
+ }
+ template<typename Stream, typename I>
+ void Unser(Stream& s, I& v)
+ {
+ uint64_t n = ReadCompactSize(s);
+ m_shift += n;
+ if (m_shift < n || m_shift >= std::numeric_limits<uint64_t>::max() || m_shift < std::numeric_limits<I>::min() || m_shift > std::numeric_limits<I>::max()) throw std::ios_base::failure("differential value overflow");
+ v = I(m_shift++);
}
};
@@ -31,39 +42,9 @@ public:
uint256 blockhash;
std::vector<uint16_t> indexes;
- ADD_SERIALIZE_METHODS;
-
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
- READWRITE(blockhash);
- uint64_t indexes_size = (uint64_t)indexes.size();
- READWRITE(COMPACTSIZE(indexes_size));
- if (ser_action.ForRead()) {
- size_t i = 0;
- while (indexes.size() < indexes_size) {
- indexes.resize(std::min((uint64_t)(1000 + indexes.size()), indexes_size));
- for (; i < indexes.size(); i++) {
- uint64_t index = 0;
- READWRITE(COMPACTSIZE(index));
- if (index > std::numeric_limits<uint16_t>::max())
- throw std::ios_base::failure("index overflowed 16 bits");
- indexes[i] = index;
- }
- }
-
- int32_t offset = 0;
- for (size_t j = 0; j < indexes.size(); j++) {
- if (int32_t(indexes[j]) + offset > std::numeric_limits<uint16_t>::max())
- throw std::ios_base::failure("indexes overflowed 16 bits");
- indexes[j] = indexes[j] + offset;
- offset = int32_t(indexes[j]) + 1;
- }
- } else {
- for (size_t i = 0; i < indexes.size(); i++) {
- uint64_t index = indexes[i] - (i == 0 ? 0 : (indexes[i - 1] + 1));
- READWRITE(COMPACTSIZE(index));
- }
- }
+ SERIALIZE_METHODS(BlockTransactionsRequest, obj)
+ {
+ READWRITE(obj.blockhash, Using<VectorFormatter<DifferenceFormatter>>(obj.indexes));
}
};
@@ -77,24 +58,9 @@ public:
explicit BlockTransactions(const BlockTransactionsRequest& req) :
blockhash(req.blockhash), txn(req.indexes.size()) {}
- ADD_SERIALIZE_METHODS;
-
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
- READWRITE(blockhash);
- uint64_t txn_size = (uint64_t)txn.size();
- READWRITE(COMPACTSIZE(txn_size));
- if (ser_action.ForRead()) {
- size_t i = 0;
- while (txn.size() < txn_size) {
- txn.resize(std::min((uint64_t)(1000 + txn.size()), txn_size));
- for (; i < txn.size(); i++)
- READWRITE(TransactionCompressor(txn[i]));
- }
- } else {
- for (size_t i = 0; i < txn.size(); i++)
- READWRITE(TransactionCompressor(txn[i]));
- }
+ SERIALIZE_METHODS(BlockTransactions, obj)
+ {
+ READWRITE(obj.blockhash, Using<VectorFormatter<TransactionCompression>>(obj.txn));
}
};
@@ -105,17 +71,7 @@ struct PrefilledTransaction {
uint16_t index;
CTransactionRef tx;
- ADD_SERIALIZE_METHODS;
-
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
- uint64_t idx = index;
- READWRITE(COMPACTSIZE(idx));
- if (idx > std::numeric_limits<uint16_t>::max())
- throw std::ios_base::failure("index overflowed 16-bits");
- index = idx;
- READWRITE(TransactionCompressor(tx));
- }
+ SERIALIZE_METHODS(PrefilledTransaction, obj) { READWRITE(COMPACTSIZE(obj.index), Using<TransactionCompression>(obj.tx)); }
};
typedef enum ReadStatus_t
@@ -153,43 +109,15 @@ public:
size_t BlockTxCount() const { return shorttxids.size() + prefilledtxn.size(); }
- ADD_SERIALIZE_METHODS;
-
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
- READWRITE(header);
- READWRITE(nonce);
-
- uint64_t shorttxids_size = (uint64_t)shorttxids.size();
- READWRITE(COMPACTSIZE(shorttxids_size));
+ SERIALIZE_METHODS(CBlockHeaderAndShortTxIDs, obj)
+ {
+ READWRITE(obj.header, obj.nonce, Using<VectorFormatter<CustomUintFormatter<SHORTTXIDS_LENGTH>>>(obj.shorttxids), obj.prefilledtxn);
if (ser_action.ForRead()) {
- size_t i = 0;
- while (shorttxids.size() < shorttxids_size) {
- shorttxids.resize(std::min((uint64_t)(1000 + shorttxids.size()), shorttxids_size));
- for (; i < shorttxids.size(); i++) {
- uint32_t lsb = 0; uint16_t msb = 0;
- READWRITE(lsb);
- READWRITE(msb);
- shorttxids[i] = (uint64_t(msb) << 32) | uint64_t(lsb);
- static_assert(SHORTTXIDS_LENGTH == 6, "shorttxids serialization assumes 6-byte shorttxids");
- }
- }
- } else {
- for (size_t i = 0; i < shorttxids.size(); i++) {
- uint32_t lsb = shorttxids[i] & 0xffffffff;
- uint16_t msb = (shorttxids[i] >> 32) & 0xffff;
- READWRITE(lsb);
- READWRITE(msb);
+ if (obj.BlockTxCount() > std::numeric_limits<uint16_t>::max()) {
+ throw std::ios_base::failure("indexes overflowed 16 bits");
}
+ obj.FillShortTxIDSelector();
}
-
- READWRITE(prefilledtxn);
-
- if (BlockTxCount() > std::numeric_limits<uint16_t>::max())
- throw std::ios_base::failure("indexes overflowed 16 bits");
-
- if (ser_action.ForRead())
- FillShortTxIDSelector();
}
};
diff --git a/src/chain.h b/src/chain.h
index f28071ff2e..64c016a1d6 100644
--- a/src/chain.h
+++ b/src/chain.h
@@ -48,17 +48,15 @@ public:
uint64_t nTimeFirst; //!< earliest time of block in file
uint64_t nTimeLast; //!< latest time of block in file
- ADD_SERIALIZE_METHODS;
-
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
- READWRITE(VARINT(nBlocks));
- READWRITE(VARINT(nSize));
- READWRITE(VARINT(nUndoSize));
- READWRITE(VARINT(nHeightFirst));
- READWRITE(VARINT(nHeightLast));
- READWRITE(VARINT(nTimeFirst));
- READWRITE(VARINT(nTimeLast));
+ SERIALIZE_METHODS(CBlockFileInfo, obj)
+ {
+ READWRITE(VARINT(obj.nBlocks));
+ READWRITE(VARINT(obj.nSize));
+ READWRITE(VARINT(obj.nUndoSize));
+ READWRITE(VARINT(obj.nHeightFirst));
+ READWRITE(VARINT(obj.nHeightLast));
+ READWRITE(VARINT(obj.nTimeFirst));
+ READWRITE(VARINT(obj.nTimeLast));
}
void SetNull() {
@@ -332,31 +330,25 @@ public:
hashPrev = (pprev ? pprev->GetBlockHash() : uint256());
}
- ADD_SERIALIZE_METHODS;
-
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
+ SERIALIZE_METHODS(CDiskBlockIndex, obj)
+ {
int _nVersion = s.GetVersion();
- if (!(s.GetType() & SER_GETHASH))
- READWRITE(VARINT(_nVersion, VarIntMode::NONNEGATIVE_SIGNED));
-
- READWRITE(VARINT(nHeight, VarIntMode::NONNEGATIVE_SIGNED));
- READWRITE(VARINT(nStatus));
- READWRITE(VARINT(nTx));
- if (nStatus & (BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO))
- READWRITE(VARINT(nFile, VarIntMode::NONNEGATIVE_SIGNED));
- if (nStatus & BLOCK_HAVE_DATA)
- READWRITE(VARINT(nDataPos));
- if (nStatus & BLOCK_HAVE_UNDO)
- READWRITE(VARINT(nUndoPos));
+ if (!(s.GetType() & SER_GETHASH)) READWRITE(VARINT_MODE(_nVersion, VarIntMode::NONNEGATIVE_SIGNED));
+
+ READWRITE(VARINT_MODE(obj.nHeight, VarIntMode::NONNEGATIVE_SIGNED));
+ READWRITE(VARINT(obj.nStatus));
+ READWRITE(VARINT(obj.nTx));
+ if (obj.nStatus & (BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO)) READWRITE(VARINT_MODE(obj.nFile, VarIntMode::NONNEGATIVE_SIGNED));
+ if (obj.nStatus & BLOCK_HAVE_DATA) READWRITE(VARINT(obj.nDataPos));
+ if (obj.nStatus & BLOCK_HAVE_UNDO) READWRITE(VARINT(obj.nUndoPos));
// block header
- READWRITE(this->nVersion);
- READWRITE(hashPrev);
- READWRITE(hashMerkleRoot);
- READWRITE(nTime);
- READWRITE(nBits);
- READWRITE(nNonce);
+ READWRITE(obj.nVersion);
+ READWRITE(obj.hashPrev);
+ READWRITE(obj.hashMerkleRoot);
+ READWRITE(obj.nTime);
+ READWRITE(obj.nBits);
+ READWRITE(obj.nNonce);
}
uint256 GetBlockHash() const
diff --git a/src/chainparams.cpp b/src/chainparams.cpp
index 31592b0f0a..a9183ac970 100644
--- a/src/chainparams.cpp
+++ b/src/chainparams.cpp
@@ -135,6 +135,7 @@ public:
fDefaultConsistencyChecks = false;
fRequireStandard = true;
m_is_test_chain = false;
+ m_is_mockable_chain = false;
checkpointData = {
{
@@ -231,7 +232,7 @@ public:
fDefaultConsistencyChecks = false;
fRequireStandard = false;
m_is_test_chain = true;
-
+ m_is_mockable_chain = false;
checkpointData = {
{
@@ -303,6 +304,7 @@ public:
fDefaultConsistencyChecks = true;
fRequireStandard = true;
m_is_test_chain = true;
+ m_is_mockable_chain = true;
checkpointData = {
{
diff --git a/src/chainparams.h b/src/chainparams.h
index 63398e587e..379c75e4be 100644
--- a/src/chainparams.h
+++ b/src/chainparams.h
@@ -68,6 +68,8 @@ public:
bool RequireStandard() const { return fRequireStandard; }
/** If this chain is exclusively used for testing */
bool IsTestChain() const { return m_is_test_chain; }
+ /** If this chain allows time to be mocked */
+ bool IsMockableChain() const { return m_is_mockable_chain; }
uint64_t PruneAfterHeight() const { return nPruneAfterHeight; }
/** Minimum free space (in GB) needed for data directory */
uint64_t AssumedBlockchainSize() const { return m_assumed_blockchain_size; }
@@ -102,6 +104,7 @@ protected:
bool fDefaultConsistencyChecks;
bool fRequireStandard;
bool m_is_test_chain;
+ bool m_is_mockable_chain;
CCheckpointData checkpointData;
ChainTxData chainTxData;
};
diff --git a/src/checkqueue.h b/src/checkqueue.h
index 978e23a7c4..9dab5a09ac 100644
--- a/src/checkqueue.h
+++ b/src/checkqueue.h
@@ -90,8 +90,7 @@ private:
nTotal--;
bool fRet = fAllOk;
// reset the status for new work later
- if (fMaster)
- fAllOk = true;
+ fAllOk = true;
// return the current status
return fRet;
}
diff --git a/src/coins.h b/src/coins.h
index 68f7596745..e71c8a47bc 100644
--- a/src/coins.h
+++ b/src/coins.h
@@ -61,7 +61,7 @@ public:
assert(!IsSpent());
uint32_t code = nHeight * 2 + fCoinBase;
::Serialize(s, VARINT(code));
- ::Serialize(s, CTxOutCompressor(REF(out)));
+ ::Serialize(s, Using<TxOutCompression>(out));
}
template<typename Stream>
@@ -70,7 +70,7 @@ public:
::Unserialize(s, VARINT(code));
nHeight = code >> 1;
fCoinBase = code & 1;
- ::Unserialize(s, CTxOutCompressor(out));
+ ::Unserialize(s, Using<TxOutCompression>(out));
}
bool IsSpent() const {
diff --git a/src/compressor.h b/src/compressor.h
index c1eda503c8..223603e7e9 100644
--- a/src/compressor.h
+++ b/src/compressor.h
@@ -11,15 +11,21 @@
#include <serialize.h>
#include <span.h>
-class CKeyID;
-class CPubKey;
-class CScriptID;
-
bool CompressScript(const CScript& script, std::vector<unsigned char> &out);
unsigned int GetSpecialScriptSize(unsigned int nSize);
bool DecompressScript(CScript& script, unsigned int nSize, const std::vector<unsigned char> &out);
+/**
+ * Compress amount.
+ *
+ * nAmount is of type uint64_t and thus cannot be negative. If you're passing in
+ * a CAmount (int64_t), make sure to properly handle the case where the amount
+ * is negative before calling CompressAmount(...).
+ *
+ * @pre Function defined only for 0 <= nAmount <= MAX_MONEY.
+ */
uint64_t CompressAmount(uint64_t nAmount);
+
uint64_t DecompressAmount(uint64_t nAmount);
/** Compact serializer for scripts.
@@ -33,9 +39,8 @@ uint64_t DecompressAmount(uint64_t nAmount);
* Other scripts up to 121 bytes require 1 byte + script length. Above
* that, scripts up to 16505 bytes require 2 bytes + script length.
*/
-class CScriptCompressor
+struct ScriptCompression
{
-private:
/**
* make this static for now (there are only 6 special scripts defined)
* this can potentially be extended together with a new nVersion for
@@ -44,12 +49,8 @@ private:
*/
static const unsigned int nSpecialScripts = 6;
- CScript &script;
-public:
- explicit CScriptCompressor(CScript &scriptIn) : script(scriptIn) { }
-
template<typename Stream>
- void Serialize(Stream &s) const {
+ void Ser(Stream &s, const CScript& script) {
std::vector<unsigned char> compr;
if (CompressScript(script, compr)) {
s << MakeSpan(compr);
@@ -61,7 +62,7 @@ public:
}
template<typename Stream>
- void Unserialize(Stream &s) {
+ void Unser(Stream &s, CScript& script) {
unsigned int nSize = 0;
s >> VARINT(nSize);
if (nSize < nSpecialScripts) {
@@ -82,30 +83,24 @@ public:
}
};
-/** wrapper for CTxOut that provides a more compact serialization */
-class CTxOutCompressor
+struct AmountCompression
{
-private:
- CTxOut &txout;
-
-public:
- explicit CTxOutCompressor(CTxOut &txoutIn) : txout(txoutIn) { }
-
- ADD_SERIALIZE_METHODS;
-
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
- if (!ser_action.ForRead()) {
- uint64_t nVal = CompressAmount(txout.nValue);
- READWRITE(VARINT(nVal));
- } else {
- uint64_t nVal = 0;
- READWRITE(VARINT(nVal));
- txout.nValue = DecompressAmount(nVal);
- }
- CScriptCompressor cscript(REF(txout.scriptPubKey));
- READWRITE(cscript);
+ template<typename Stream, typename I> void Ser(Stream& s, I val)
+ {
+ s << VARINT(CompressAmount(val));
+ }
+ template<typename Stream, typename I> void Unser(Stream& s, I& val)
+ {
+ uint64_t v;
+ s >> VARINT(v);
+ val = DecompressAmount(v);
}
};
+/** wrapper for CTxOut that provides a more compact serialization */
+struct TxOutCompression
+{
+ FORMATTER_METHODS(CTxOut, obj) { READWRITE(Using<AmountCompression>(obj.nValue), Using<ScriptCompression>(obj.scriptPubKey)); }
+};
+
#endif // BITCOIN_COMPRESSOR_H
diff --git a/src/consensus/validation.h b/src/consensus/validation.h
index 8a3abb31f4..a79e7b9d12 100644
--- a/src/consensus/validation.h
+++ b/src/consensus/validation.h
@@ -16,7 +16,7 @@
* provider of the transaction should be banned/ignored/disconnected/etc.
*/
enum class TxValidationResult {
- TX_RESULT_UNSET, //!< initial value. Tx has not yet been rejected
+ TX_RESULT_UNSET = 0, //!< initial value. Tx has not yet been rejected
TX_CONSENSUS, //!< invalid by consensus rules
/**
* Invalid by a change to consensus rules more recent than SegWit.
@@ -50,7 +50,7 @@ enum class TxValidationResult {
* useful for some other use-cases.
*/
enum class BlockValidationResult {
- BLOCK_RESULT_UNSET, //!< initial value. Block has not yet been rejected
+ BLOCK_RESULT_UNSET = 0, //!< initial value. Block has not yet been rejected
BLOCK_CONSENSUS, //!< invalid by consensus rules (excluding any below reasons)
/**
* Invalid by a change to consensus rules more recent than SegWit.
@@ -71,31 +71,31 @@ enum class BlockValidationResult {
-/** Base class for capturing information about block/transaction validation. This is subclassed
+/** Template for capturing information about block/transaction validation. This is instantiated
* by TxValidationState and BlockValidationState for validation information on transactions
* and blocks respectively. */
+template <typename Result>
class ValidationState {
private:
enum mode_state {
MODE_VALID, //!< everything ok
MODE_INVALID, //!< network rule violation (DoS value may be set)
MODE_ERROR, //!< run-time error
- } m_mode;
+ } m_mode{MODE_VALID};
+ Result m_result{};
std::string m_reject_reason;
std::string m_debug_message;
-protected:
- void Invalid(const std::string &reject_reason="",
+public:
+ bool Invalid(Result result,
+ const std::string &reject_reason="",
const std::string &debug_message="")
{
+ m_result = result;
m_reject_reason = reject_reason;
m_debug_message = debug_message;
if (m_mode != MODE_ERROR) m_mode = MODE_INVALID;
+ return false;
}
-public:
- // ValidationState is abstract. Have a pure virtual destructor.
- virtual ~ValidationState() = 0;
-
- ValidationState() : m_mode(MODE_VALID) {}
bool Error(const std::string& reject_reason)
{
if (m_mode == MODE_VALID)
@@ -106,40 +106,25 @@ public:
bool IsValid() const { return m_mode == MODE_VALID; }
bool IsInvalid() const { return m_mode == MODE_INVALID; }
bool IsError() const { return m_mode == MODE_ERROR; }
+ Result GetResult() const { return m_result; }
std::string GetRejectReason() const { return m_reject_reason; }
std::string GetDebugMessage() const { return m_debug_message; }
-};
+ std::string ToString() const
+ {
+ if (IsValid()) {
+ return "Valid";
+ }
-inline ValidationState::~ValidationState() {};
+ if (!m_debug_message.empty()) {
+ return m_reject_reason + ", " + m_debug_message;
+ }
-class TxValidationState : public ValidationState {
-private:
- TxValidationResult m_result = TxValidationResult::TX_RESULT_UNSET;
-public:
- bool Invalid(TxValidationResult result,
- const std::string &reject_reason="",
- const std::string &debug_message="")
- {
- m_result = result;
- ValidationState::Invalid(reject_reason, debug_message);
- return false;
+ return m_reject_reason;
}
- TxValidationResult GetResult() const { return m_result; }
};
-class BlockValidationState : public ValidationState {
-private:
- BlockValidationResult m_result = BlockValidationResult::BLOCK_RESULT_UNSET;
-public:
- bool Invalid(BlockValidationResult result,
- const std::string &reject_reason="",
- const std::string &debug_message="") {
- m_result = result;
- ValidationState::Invalid(reject_reason, debug_message);
- return false;
- }
- BlockValidationResult GetResult() const { return m_result; }
-};
+class TxValidationState : public ValidationState<TxValidationResult> {};
+class BlockValidationState : public ValidationState<BlockValidationResult> {};
// These implement the weight = (stripped_size * 4) + witness_size formula,
// using only serialization with and without witness data. As witness_size
diff --git a/src/crc32c/.appveyor.yml b/src/crc32c/.appveyor.yml
new file mode 100644
index 0000000000..7345746750
--- /dev/null
+++ b/src/crc32c/.appveyor.yml
@@ -0,0 +1,37 @@
+# Build matrix / environment variables are explained on:
+# https://www.appveyor.com/docs/appveyor-yml/
+# This file can be validated on: https://ci.appveyor.com/tools/validate-yaml
+
+version: "{build}"
+
+environment:
+ matrix:
+ # AppVeyor currently has no custom job name feature.
+ # http://help.appveyor.com/discussions/questions/1623-can-i-provide-a-friendly-name-for-jobs
+ - JOB: Visual Studio 2017
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
+ CMAKE_GENERATOR: Visual Studio 15 2017
+
+platform:
+ - x86
+ - x64
+
+configuration:
+ - RelWithDebInfo
+ - Debug
+
+build_script:
+ - git submodule update --init --recursive
+ - mkdir build
+ - cd build
+ - if "%platform%"=="x64" set CMAKE_GENERATOR=%CMAKE_GENERATOR% Win64
+ - cmake --version
+ - cmake .. -G "%CMAKE_GENERATOR%" -DCRC32C_USE_GLOG=0
+ -DCMAKE_CONFIGURATION_TYPES="%CONFIGURATION%"
+ - cmake --build . --config "%CONFIGURATION%"
+ - cd ..
+
+test_script:
+ - build\%CONFIGURATION%\crc32c_tests.exe
+ - build\%CONFIGURATION%\crc32c_capi_tests.exe
+ - build\%CONFIGURATION%\crc32c_bench.exe
diff --git a/src/crc32c/.clang-format b/src/crc32c/.clang-format
new file mode 100644
index 0000000000..be9b80799f
--- /dev/null
+++ b/src/crc32c/.clang-format
@@ -0,0 +1,3 @@
+---
+Language: Cpp
+BasedOnStyle: Google
diff --git a/src/crc32c/.clang_complete b/src/crc32c/.clang_complete
new file mode 100644
index 0000000000..fa6757c6f3
--- /dev/null
+++ b/src/crc32c/.clang_complete
@@ -0,0 +1,8 @@
+-Ibuild/include/
+-Ibuild/third_party/glog/
+-Iinclude/
+-Ithird_party/benchmark/include/
+-Ithird_party/googletest/googletest/include/
+-Ithird_party/googletest/googlemock/include/
+-Ithird_party/glog/src/
+-std=c++11
diff --git a/src/crc32c/.gitignore b/src/crc32c/.gitignore
new file mode 100644
index 0000000000..61769727e3
--- /dev/null
+++ b/src/crc32c/.gitignore
@@ -0,0 +1,8 @@
+# Editors.
+*.sw*
+.DS_Store
+/.vscode
+
+# Build directory.
+build/
+out/
diff --git a/src/crc32c/.gitmodules b/src/crc32c/.gitmodules
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/src/crc32c/.gitmodules
diff --git a/src/crc32c/.travis.yml b/src/crc32c/.travis.yml
new file mode 100644
index 0000000000..d990a89f07
--- /dev/null
+++ b/src/crc32c/.travis.yml
@@ -0,0 +1,76 @@
+# Build matrix / environment variables are explained on:
+# http://about.travis-ci.org/docs/user/build-configuration/
+# This file can be validated on: http://lint.travis-ci.org/
+
+language: cpp
+dist: bionic
+osx_image: xcode10.3
+
+compiler:
+- gcc
+- clang
+os:
+- linux
+- osx
+
+env:
+- GLOG=1 SHARED_LIB=0 BUILD_TYPE=Debug
+- GLOG=1 SHARED_LIB=0 BUILD_TYPE=RelWithDebInfo
+- GLOG=0 SHARED_LIB=0 BUILD_TYPE=Debug
+- GLOG=0 SHARED_LIB=0 BUILD_TYPE=RelWithDebInfo
+- GLOG=0 SHARED_LIB=1 BUILD_TYPE=Debug
+- GLOG=0 SHARED_LIB=1 BUILD_TYPE=RelWithDebInfo
+
+addons:
+ apt:
+ sources:
+ - sourceline: 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main'
+ key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key'
+ - sourceline: 'ppa:ubuntu-toolchain-r/test'
+ packages:
+ - clang-9
+ - cmake
+ - gcc-9
+ - g++-9
+ - ninja-build
+ homebrew:
+ packages:
+ - cmake
+ - gcc@9
+ - llvm@9
+ - ninja
+ update: true
+
+install:
+# The following Homebrew packages aren't linked by default, and need to be
+# prepended to the path explicitly.
+- if [ "$TRAVIS_OS_NAME" = "osx" ]; then
+ export PATH="$(brew --prefix llvm)/bin:$PATH";
+ fi
+# /usr/bin/gcc points to an older compiler on both Linux and macOS.
+- if [ "$CXX" = "g++" ]; then export CXX="g++-9" CC="gcc-9"; fi
+# /usr/bin/clang points to an older compiler on both Linux and macOS.
+#
+# Homebrew's llvm package doesn't ship a versioned clang++ binary, so the values
+# below don't work on macOS. Fortunately, the path change above makes the
+# default values (clang and clang++) resolve to the correct compiler on macOS.
+- if [ "$TRAVIS_OS_NAME" = "linux" ]; then
+ if [ "$CXX" = "clang++" ]; then export CXX="clang++-9" CC="clang-9"; fi;
+ fi
+- echo ${CC}
+- echo ${CXX}
+- ${CXX} --version
+- cmake --version
+
+before_script:
+- mkdir -p build && cd build
+- cmake .. -G Ninja -DCRC32C_USE_GLOG=$GLOG -DCMAKE_BUILD_TYPE=$BUILD_TYPE
+ -DBUILD_SHARED_LIBS=$SHARED_LIB -DCMAKE_INSTALL_PREFIX=$HOME/.local
+- cmake --build .
+- cd ..
+
+script:
+- build/crc32c_tests
+- build/crc32c_capi_tests
+- build/crc32c_bench
+- cd build && cmake --build . --target install
diff --git a/src/crc32c/.ycm_extra_conf.py b/src/crc32c/.ycm_extra_conf.py
new file mode 100644
index 0000000000..536aadcec8
--- /dev/null
+++ b/src/crc32c/.ycm_extra_conf.py
@@ -0,0 +1,142 @@
+# Copyright 2017 The CRC32C Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""YouCompleteMe configuration that interprets a .clang_complete file.
+
+This module implementes the YouCompleteMe configuration API documented at:
+https://github.com/Valloric/ycmd#ycm_extra_confpy-specification
+
+The implementation loads and processes a .clang_complete file, documented at:
+https://github.com/Rip-Rip/clang_complete/blob/master/README.md
+"""
+
+import os
+
+# Flags added to the list in .clang_complete.
+BASE_FLAGS = [
+ '-Werror', # Unlike clang_complete, YCM can also be used as a linter.
+ '-DUSE_CLANG_COMPLETER', # YCM needs this.
+ '-xc++', # YCM needs this to avoid compiling headers as C code.
+]
+
+# Clang flags that take in paths.
+# See https://clang.llvm.org/docs/ClangCommandLineReference.html
+PATH_FLAGS = [
+ '-isystem',
+ '-I',
+ '-iquote',
+ '--sysroot='
+]
+
+
+def DirectoryOfThisScript():
+ """Returns the absolute path to the directory containing this script."""
+ return os.path.dirname(os.path.abspath(__file__))
+
+
+def MakeRelativePathsInFlagsAbsolute(flags, build_root):
+ """Expands relative paths in a list of Clang command-line flags.
+
+ Args:
+ flags: The list of flags passed to Clang.
+ build_root: The current directory when running the Clang compiler. Should be
+ an absolute path.
+
+ Returns:
+ A list of flags with relative paths replaced by absolute paths.
+ """
+ new_flags = []
+ make_next_absolute = False
+ for flag in flags:
+ new_flag = flag
+
+ if make_next_absolute:
+ make_next_absolute = False
+ if not flag.startswith('/'):
+ new_flag = os.path.join(build_root, flag)
+
+ for path_flag in PATH_FLAGS:
+ if flag == path_flag:
+ make_next_absolute = True
+ break
+
+ if flag.startswith(path_flag):
+ path = flag[len(path_flag):]
+ new_flag = path_flag + os.path.join(build_root, path)
+ break
+
+ if new_flag:
+ new_flags.append(new_flag)
+ return new_flags
+
+
+def FindNearest(target, path, build_root):
+ """Looks for a file with a specific name closest to a project path.
+
+ This is similar to the logic used by a version-control system (like git) to
+ find its configuration directory (.git) based on the current directory when a
+ command is invoked.
+
+ Args:
+ target: The file name to search for.
+ path: The directory where the search starts. The search will explore the
+ given directory's ascendants using the parent relationship. Should be an
+ absolute path.
+ build_root: A directory that acts as a fence for the search. If the search
+ reaches this directory, it will not advance to its parent. Should be an
+ absolute path.
+
+ Returns:
+ The path to a file with the desired name. None if the search failed.
+ """
+ candidate = os.path.join(path, target)
+ if os.path.isfile(candidate):
+ return candidate
+
+ if path == build_root:
+ return None
+
+ parent = os.path.dirname(path)
+ if parent == path:
+ return None
+
+ return FindNearest(target, parent, build_root)
+
+
+def FlagsForClangComplete(file_path, build_root):
+ """Reads the .clang_complete flags for a source file.
+
+ Args:
+ file_path: The path to the source file. Should be inside the project. Used
+ to locate the relevant .clang_complete file.
+ build_root: The current directory when running the Clang compiler for this
+ file. Should be an absolute path.
+
+ Returns:
+ A list of strings, where each element is a Clang command-line flag.
+ """
+ clang_complete_path = FindNearest('.clang_complete', file_path, build_root)
+ if clang_complete_path is None:
+ return None
+ clang_complete_flags = open(clang_complete_path, 'r').read().splitlines()
+ return clang_complete_flags
+
+
+def FlagsForFile(filename, **kwargs):
+ """Implements the YouCompleteMe API."""
+
+ # kwargs can be used to pass 'client_data' to the YCM configuration. This
+ # configuration script does not need any extra information, so
+ # pylint: disable=unused-argument
+
+ build_root = DirectoryOfThisScript()
+ file_path = os.path.realpath(filename)
+
+ flags = BASE_FLAGS
+ clang_flags = FlagsForClangComplete(file_path, build_root)
+ if clang_flags:
+ flags += clang_flags
+
+ final_flags = MakeRelativePathsInFlagsAbsolute(flags, build_root)
+
+ return {'flags': final_flags}
diff --git a/src/crc32c/AUTHORS b/src/crc32c/AUTHORS
new file mode 100644
index 0000000000..6f1f6871a6
--- /dev/null
+++ b/src/crc32c/AUTHORS
@@ -0,0 +1,9 @@
+# This is the list of CRC32C authors for copyright purposes.
+#
+# This does not necessarily list everyone who has contributed code, since in
+# some cases, their employer may be the copyright holder. To see the full list
+# of contributors, see the revision history in source control.
+Google Inc.
+
+Fangming Fang <Fangming.Fang@arm.com>
+Vadim Skipin <vadim.skipin@gmail.com>
diff --git a/src/crc32c/CMakeLists.txt b/src/crc32c/CMakeLists.txt
new file mode 100644
index 0000000000..111a3e3614
--- /dev/null
+++ b/src/crc32c/CMakeLists.txt
@@ -0,0 +1,423 @@
+# Copyright 2017 The CRC32C Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+cmake_minimum_required(VERSION 3.1)
+project(Crc32c VERSION 1.1.0 LANGUAGES C CXX)
+
+# This project can use C11, but will gracefully decay down to C89.
+set(CMAKE_C_STANDARD 11)
+set(CMAKE_C_STANDARD_REQUIRED OFF)
+set(CMAKE_C_EXTENSIONS OFF)
+
+# This project requires C++11.
+set(CMAKE_CXX_STANDARD 11)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
+set(CMAKE_CXX_EXTENSIONS OFF)
+
+# https://github.com/izenecloud/cmake/blob/master/SetCompilerWarningAll.cmake
+if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ # Use the highest warning level for Visual Studio.
+ set(CMAKE_CXX_WARNING_LEVEL 4)
+ if(CMAKE_CXX_FLAGS MATCHES "/W[0-4]")
+ string(REGEX REPLACE "/W[0-4]" "/W4" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ else(CMAKE_CXX_FLAGS MATCHES "/W[0-4]")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4")
+ endif(CMAKE_CXX_FLAGS MATCHES "/W[0-4]")
+
+ # Disable C++ exceptions.
+ string(REGEX REPLACE "/EH[a-z]+" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHs-c-")
+ add_definitions(-D_HAS_EXCEPTIONS=0)
+
+ # Disable RTTI.
+ string(REGEX REPLACE "/GR" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /GR-")
+else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ # Use -Wall for clang and gcc.
+ if(NOT CMAKE_CXX_FLAGS MATCHES "-Wall")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall")
+ endif(NOT CMAKE_CXX_FLAGS MATCHES "-Wall")
+
+ # Use -Wextra for clang and gcc.
+ if(NOT CMAKE_CXX_FLAGS MATCHES "-Wextra")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wextra")
+ endif(NOT CMAKE_CXX_FLAGS MATCHES "-Wextra")
+
+ # Use -Werror for clang and gcc.
+ if(NOT CMAKE_CXX_FLAGS MATCHES "-Werror")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror")
+ endif(NOT CMAKE_CXX_FLAGS MATCHES "-Werror")
+
+ # Disable C++ exceptions.
+ string(REGEX REPLACE "-fexceptions" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions")
+
+ # Disable RTTI.
+ string(REGEX REPLACE "-frtti" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti")
+endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+
+option(CRC32C_BUILD_TESTS "Build CRC32C's unit tests" ON)
+option(CRC32C_BUILD_BENCHMARKS "Build CRC32C's benchmarks" ON)
+option(CRC32C_USE_GLOG "Build CRC32C's tests with Google Logging" ON)
+option(CRC32C_INSTALL "Install CRC32C's header and library" ON)
+
+include(TestBigEndian)
+test_big_endian(BYTE_ORDER_BIG_ENDIAN)
+
+include(CheckCXXCompilerFlag)
+# Used by glog.
+check_cxx_compiler_flag(-Wno-deprecated CRC32C_HAVE_NO_DEPRECATED)
+# Used by glog.
+check_cxx_compiler_flag(-Wno-sign-compare CRC32C_HAVE_NO_SIGN_COMPARE)
+# Used by glog.
+check_cxx_compiler_flag(-Wno-unused-parameter CRC32C_HAVE_NO_UNUSED_PARAMETER)
+# Used by googletest.
+check_cxx_compiler_flag(-Wno-missing-field-initializers
+ CRC32C_HAVE_NO_MISSING_FIELD_INITIALIZERS)
+
+# Check for __builtin_prefetch support in the compiler.
+include(CheckCXXSourceCompiles)
+check_cxx_source_compiles("
+int main() {
+ char data = 0;
+ const char* address = &data;
+ __builtin_prefetch(address, 0, 0);
+ return 0;
+}
+" HAVE_BUILTIN_PREFETCH)
+
+# Check for _mm_prefetch support in the compiler.
+include(CheckCXXSourceCompiles)
+check_cxx_source_compiles("
+#if defined(_MSC_VER)
+#include <intrin.h>
+#else // !defined(_MSC_VER)
+#include <xmmintrin.h>
+#endif // defined(_MSC_VER)
+
+int main() {
+ char data = 0;
+ const char* address = &data;
+ _mm_prefetch(address, _MM_HINT_NTA);
+ return 0;
+}
+" HAVE_MM_PREFETCH)
+
+# Check for SSE4.2 support in the compiler.
+set(OLD_CMAKE_REQURED_FLAGS ${CMAKE_REQUIRED_FLAGS})
+if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} /arch:AVX")
+else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -msse4.2")
+endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+check_cxx_source_compiles("
+#if defined(_MSC_VER)
+#include <intrin.h>
+#else // !defined(_MSC_VER)
+#include <cpuid.h>
+#include <nmmintrin.h>
+#endif // defined(_MSC_VER)
+
+int main() {
+ _mm_crc32_u8(0, 0); _mm_crc32_u32(0, 0);
+#if defined(_M_X64) || defined(__x86_64__)
+ _mm_crc32_u64(0, 0);
+#endif // defined(_M_X64) || defined(__x86_64__)
+ return 0;
+}
+" HAVE_SSE42)
+set(CMAKE_REQUIRED_FLAGS ${OLD_CMAKE_REQURED_FLAGS})
+
+# Check for ARMv8 w/ CRC and CRYPTO extensions support in the compiler.
+set(OLD_CMAKE_REQURED_FLAGS ${CMAKE_REQUIRED_FLAGS})
+if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ # TODO(pwnall): Insert correct flag when VS gets ARM CRC32C support.
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} /arch:NOTYET")
+else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -march=armv8-a+crc+crypto")
+endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+check_cxx_source_compiles("
+#include <arm_acle.h>
+#include <arm_neon.h>
+
+int main() {
+ __crc32cb(0, 0); __crc32ch(0, 0); __crc32cw(0, 0); __crc32cd(0, 0);
+ vmull_p64(0, 0);
+ return 0;
+}
+" HAVE_ARM64_CRC32C)
+set(CMAKE_REQUIRED_FLAGS ${OLD_CMAKE_REQURED_FLAGS})
+
+# Check for strong getauxval() support in the system headers.
+check_cxx_source_compiles("
+#include <arm_acle.h>
+#include <arm_neon.h>
+#include <sys/auxv.h>
+
+int main() {
+ getauxval(AT_HWCAP);
+ return 0;
+}
+" HAVE_STRONG_GETAUXVAL)
+
+# Check for weak getauxval() support in the compiler.
+check_cxx_source_compiles("
+unsigned long getauxval(unsigned long type) __attribute__((weak));
+#define AT_HWCAP 16
+
+int main() {
+ getauxval(AT_HWCAP);
+ return 0;
+}
+" HAVE_WEAK_GETAUXVAL)
+
+if(CRC32C_USE_GLOG)
+ # glog requires this setting to avoid using dynamic_cast.
+ set(DISABLE_RTTI ON CACHE BOOL "" FORCE)
+
+ # glog's test targets trigger deprecation warnings, and compiling them burns
+ # CPU cycles on the CI.
+ set(BUILD_TESTING_SAVED "${BUILD_TESTING}")
+ set(BUILD_TESTING OFF CACHE BOOL "" FORCE)
+ add_subdirectory("third_party/glog" EXCLUDE_FROM_ALL)
+ set(BUILD_TESTING "${BUILD_TESTING_SAVED}" CACHE BOOL "" FORCE)
+
+ # glog triggers deprecation warnings on OSX.
+ # https://github.com/google/glog/issues/185
+ if(CRC32C_HAVE_NO_DEPRECATED)
+ set_property(TARGET glog APPEND PROPERTY COMPILE_OPTIONS -Wno-deprecated)
+ endif(CRC32C_HAVE_NO_DEPRECATED)
+
+ # glog triggers sign comparison warnings on gcc.
+ if(CRC32C_HAVE_NO_SIGN_COMPARE)
+ set_property(TARGET glog APPEND PROPERTY COMPILE_OPTIONS -Wno-sign-compare)
+ endif(CRC32C_HAVE_NO_SIGN_COMPARE)
+
+ # glog triggers unused parameter warnings on clang.
+ if(CRC32C_HAVE_NO_UNUSED_PARAMETER)
+ set_property(TARGET glog
+ APPEND PROPERTY COMPILE_OPTIONS -Wno-unused-parameter)
+ endif(CRC32C_HAVE_NO_UNUSED_PARAMETER)
+
+ set(CRC32C_TESTS_BUILT_WITH_GLOG 1)
+endif(CRC32C_USE_GLOG)
+
+configure_file(
+ "src/crc32c_config.h.in"
+ "${PROJECT_BINARY_DIR}/include/crc32c/crc32c_config.h"
+)
+
+include_directories("${PROJECT_BINARY_DIR}/include")
+
+# ARM64 CRC32C code is built separately, so we don't accidentally compile
+# unsupported instructions into code that gets run without ARM32 support.
+add_library(crc32c_arm64 OBJECT "")
+target_sources(crc32c_arm64
+ PRIVATE
+ "${PROJECT_BINARY_DIR}/include/crc32c/crc32c_config.h"
+ "src/crc32c_arm64.cc"
+ "src/crc32c_arm64.h"
+)
+if(HAVE_ARM64_CRC32C)
+ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ # TODO(pwnall): Insert correct flag when VS gets ARM64 CRC32C support.
+ target_compile_options(crc32c_arm64 PRIVATE "/arch:NOTYET")
+ else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ target_compile_options(crc32c_arm64 PRIVATE "-march=armv8-a+crc+crypto")
+ endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+endif(HAVE_ARM64_CRC32C)
+
+# CMake only enables PIC by default in SHARED and MODULE targets.
+if(BUILD_SHARED_LIBS)
+ set_property(TARGET crc32c_arm64 PROPERTY POSITION_INDEPENDENT_CODE TRUE)
+endif(BUILD_SHARED_LIBS)
+
+# SSE4.2 code is built separately, so we don't accidentally compile unsupported
+# instructions into code that gets run without SSE4.2 support.
+add_library(crc32c_sse42 OBJECT "")
+target_sources(crc32c_sse42
+ PRIVATE
+ "${PROJECT_BINARY_DIR}/include/crc32c/crc32c_config.h"
+ "src/crc32c_sse42.cc"
+ "src/crc32c_sse42.h"
+)
+if(HAVE_SSE42)
+ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ target_compile_options(crc32c_sse42 PRIVATE "/arch:AVX")
+ else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ target_compile_options(crc32c_sse42 PRIVATE "-msse4.2")
+ endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+endif(HAVE_SSE42)
+
+# CMake only enables PIC by default in SHARED and MODULE targets.
+if(BUILD_SHARED_LIBS)
+ set_property(TARGET crc32c_sse42 PROPERTY POSITION_INDEPENDENT_CODE TRUE)
+endif(BUILD_SHARED_LIBS)
+
+# Must be included before CMAKE_INSTALL_INCLUDEDIR is used.
+include(GNUInstallDirs)
+
+add_library(crc32c ""
+ # TODO(pwnall): Move the TARGET_OBJECTS generator expressions to the PRIVATE
+ # section of target_sources when cmake_minimum_required becomes 3.9 or above.
+ $<TARGET_OBJECTS:crc32c_arm64>
+ $<TARGET_OBJECTS:crc32c_sse42>
+)
+target_sources(crc32c
+ PRIVATE
+ "${PROJECT_BINARY_DIR}/include/crc32c/crc32c_config.h"
+ "src/crc32c_arm64.h"
+ "src/crc32c_arm64_linux_check.h"
+ "src/crc32c_internal.h"
+ "src/crc32c_portable.cc"
+ "src/crc32c_prefetch.h"
+ "src/crc32c_read_le.h"
+ "src/crc32c_round_up.h"
+ "src/crc32c_sse42.h"
+ "src/crc32c_sse42_check.h"
+ "src/crc32c.cc"
+
+ # Only CMake 3.3+ supports PUBLIC sources in targets exported by "install".
+ $<$<VERSION_GREATER:CMAKE_VERSION,3.2>:PUBLIC>
+ "include/crc32c/crc32c.h"
+)
+
+target_include_directories(crc32c
+ PUBLIC
+ $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include>
+ $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
+)
+
+target_compile_definitions(crc32c
+PRIVATE
+ CRC32C_HAVE_CONFIG_H=1
+)
+
+set_target_properties(crc32c
+ PROPERTIES VERSION ${PROJECT_VERSION} SOVERSION ${PROJECT_VERSION_MAJOR})
+
+# Warnings as errors in Visual Studio for this project's targets.
+if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ set_property(TARGET crc32c APPEND PROPERTY COMPILE_OPTIONS "/WX")
+ set_property(TARGET crc32c_arm64 APPEND PROPERTY COMPILE_OPTIONS "/WX")
+ set_property(TARGET crc32c_sse42 APPEND PROPERTY COMPILE_OPTIONS "/WX")
+endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+
+if(CRC32C_BUILD_TESTS)
+ enable_testing()
+
+ # Prevent overriding the parent project's compiler/linker settings on Windows.
+ set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
+ set(install_gtest OFF)
+ set(install_gmock OFF)
+
+ # This project is tested using GoogleTest.
+ add_subdirectory("third_party/googletest")
+
+ # GoogleTest triggers a missing field initializers warning.
+ if(CRC32C_HAVE_NO_MISSING_FIELD_INITIALIZERS)
+ set_property(TARGET gtest
+ APPEND PROPERTY COMPILE_OPTIONS -Wno-missing-field-initializers)
+ set_property(TARGET gmock
+ APPEND PROPERTY COMPILE_OPTIONS -Wno-missing-field-initializers)
+ endif(CRC32C_HAVE_NO_MISSING_FIELD_INITIALIZERS)
+
+ add_executable(crc32c_tests "")
+ target_sources(crc32c_tests
+ PRIVATE
+ "${PROJECT_BINARY_DIR}/include/crc32c/crc32c_config.h"
+ "src/crc32c_arm64_unittest.cc"
+ "src/crc32c_extend_unittests.h"
+ "src/crc32c_portable_unittest.cc"
+ "src/crc32c_prefetch_unittest.cc"
+ "src/crc32c_read_le_unittest.cc"
+ "src/crc32c_round_up_unittest.cc"
+ "src/crc32c_sse42_unittest.cc"
+ "src/crc32c_unittest.cc"
+ "src/crc32c_test_main.cc"
+ )
+ target_link_libraries(crc32c_tests crc32c gtest)
+
+ # Warnings as errors in Visual Studio for this project's targets.
+ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ set_property(TARGET crc32c_tests APPEND PROPERTY COMPILE_OPTIONS "/WX")
+ endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+
+ if(CRC32C_USE_GLOG)
+ target_link_libraries(crc32c_tests glog)
+ endif(CRC32C_USE_GLOG)
+
+ add_test(NAME crc32c_tests COMMAND crc32c_tests)
+
+ add_executable(crc32c_capi_tests "")
+ target_sources(crc32c_capi_tests
+ PRIVATE
+ "src/crc32c_capi_unittest.c"
+ )
+ target_link_libraries(crc32c_capi_tests crc32c)
+
+ # Warnings as errors in Visual Studio for this project's targets.
+ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ set_property(TARGET crc32c_capi_tests APPEND PROPERTY COMPILE_OPTIONS "/WX")
+ endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+
+ add_test(NAME crc32c_capi_tests COMMAND crc32c_capi_tests)
+endif(CRC32C_BUILD_TESTS)
+
+if(CRC32C_BUILD_BENCHMARKS)
+ add_executable(crc32c_bench "")
+ target_sources(crc32c_bench
+ PRIVATE
+ "${PROJECT_BINARY_DIR}/include/crc32c/crc32c_config.h"
+ "src/crc32c_benchmark.cc"
+ )
+ target_link_libraries(crc32c_bench crc32c)
+
+ # This project uses Google benchmark for benchmarking.
+ set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "" FORCE)
+ set(BENCHMARK_ENABLE_EXCEPTIONS OFF CACHE BOOL "" FORCE)
+ add_subdirectory("third_party/benchmark")
+ target_link_libraries(crc32c_bench benchmark)
+
+ if(CRC32C_USE_GLOG)
+ target_link_libraries(crc32c_bench glog)
+ endif(CRC32C_USE_GLOG)
+
+ # Warnings as errors in Visual Studio for this project's targets.
+ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ set_property(TARGET crc32c_bench APPEND PROPERTY COMPILE_OPTIONS "/WX")
+ endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+endif(CRC32C_BUILD_BENCHMARKS)
+
+if(CRC32C_INSTALL)
+ install(TARGETS crc32c
+ EXPORT Crc32cTargets
+ RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
+ LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ )
+ install(
+ FILES
+ "include/crc32c/crc32c.h"
+ DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/crc32c"
+ )
+
+ include(CMakePackageConfigHelpers)
+ write_basic_package_version_file(
+ "${PROJECT_BINARY_DIR}/Crc32cConfigVersion.cmake"
+ COMPATIBILITY SameMajorVersion
+ )
+ install(
+ EXPORT Crc32cTargets
+ NAMESPACE Crc32c::
+ DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/Crc32c"
+ )
+ install(
+ FILES
+ "Crc32cConfig.cmake"
+ "${PROJECT_BINARY_DIR}/Crc32cConfigVersion.cmake"
+ DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/Crc32c"
+ )
+endif(CRC32C_INSTALL)
diff --git a/src/crc32c/CONTRIBUTING.md b/src/crc32c/CONTRIBUTING.md
new file mode 100644
index 0000000000..ae319c70ac
--- /dev/null
+++ b/src/crc32c/CONTRIBUTING.md
@@ -0,0 +1,23 @@
+# How to Contribute
+
+We'd love to accept your patches and contributions to this project. There are
+just a few small guidelines you need to follow.
+
+## Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License
+Agreement. You (or your employer) retain the copyright to your contribution,
+this simply gives us permission to use and redistribute your contributions as
+part of the project. Head over to <https://cla.developers.google.com/> to see
+your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one
+(even if it was for a different project), you probably don't need to do it
+again.
+
+## Code reviews
+
+All submissions, including submissions by project members, require review. We
+use GitHub pull requests for this purpose. Consult
+[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
+information on using pull requests.
diff --git a/src/crc32c/Crc32cConfig.cmake b/src/crc32c/Crc32cConfig.cmake
new file mode 100644
index 0000000000..4d6057ec26
--- /dev/null
+++ b/src/crc32c/Crc32cConfig.cmake
@@ -0,0 +1,5 @@
+# Copyright 2017 The CRC32C Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+include("${CMAKE_CURRENT_LIST_DIR}/Crc32cTargets.cmake")
diff --git a/src/crc32c/LICENSE b/src/crc32c/LICENSE
new file mode 100644
index 0000000000..8c8735cf12
--- /dev/null
+++ b/src/crc32c/LICENSE
@@ -0,0 +1,28 @@
+Copyright 2017, The CRC32C Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/crc32c/README.md b/src/crc32c/README.md
new file mode 100644
index 0000000000..0bd69f7f09
--- /dev/null
+++ b/src/crc32c/README.md
@@ -0,0 +1,125 @@
+# CRC32C
+
+[![Build Status](https://travis-ci.org/google/crc32c.svg?branch=master)](https://travis-ci.org/google/crc32c)
+[![Build Status](https://ci.appveyor.com/api/projects/status/moiq7331pett4xuj/branch/master?svg=true)](https://ci.appveyor.com/project/pwnall/crc32c)
+
+New file format authors should consider
+[HighwayHash](https://github.com/google/highwayhash). The initial version of
+this code was extracted from [LevelDB](https://github.com/google/leveldb), which
+is a stable key-value store that is widely used at Google.
+
+This project collects a few CRC32C implementations under an umbrella that
+dispatches to a suitable implementation based on the host computer's hardware
+capabilities.
+
+CRC32C is specified as the CRC that uses the iSCSI polynomial in
+[RFC 3720](https://tools.ietf.org/html/rfc3720#section-12.1). The polynomial was
+introduced by G. Castagnoli, S. Braeuer and M. Herrmann. CRC32C is used in
+software such as Btrfs, ext4, Ceph and leveldb.
+
+
+## Usage
+
+```cpp
+#include "crc32c/crc32c.h"
+
+int main() {
+ const std::uint8_t buffer[] = {0, 0, 0, 0};
+ std::uint32_t result;
+
+ // Process a raw buffer.
+ result = crc32c::Crc32c(buffer, 4);
+
+ // Process a std::string.
+ std::string string;
+ string.resize(4);
+ result = crc32c::Crc32c(string);
+
+ // If you have C++17 support, process a std::string_view.
+ std::string_view string_view(string);
+ result = crc32c::Crc32c(string_view);
+
+ return 0;
+}
+```
+
+
+## Prerequisites
+
+This project uses [CMake](https://cmake.org/) for building and testing. CMake is
+available in all popular Linux distributions, as well as in
+[Homebrew](https://brew.sh/).
+
+This project uses submodules for dependency management.
+
+```bash
+git submodule update --init --recursive
+```
+
+If you're using [Atom](https://atom.io/), the following packages can help.
+
+```bash
+apm install autocomplete-clang build build-cmake clang-format language-cmake \
+ linter linter-clang
+```
+
+If you don't mind more setup in return for more speed, replace
+`autocomplete-clang` and `linter-clang` with `you-complete-me`. This requires
+[setting up ycmd](https://github.com/Valloric/ycmd#building).
+
+```bash
+apm install autocomplete-plus build build-cmake clang-format language-cmake \
+ linter you-complete-me
+```
+
+## Building
+
+The following commands build and install the project.
+
+```bash
+mkdir build
+cd build
+cmake -DCRC32C_BUILD_TESTS=0 -DCRC32C_BUILD_BENCHMARKS=0 .. && make all install
+```
+
+
+## Development
+
+The following command (when executed from `build/`) (re)builds the project and
+runs the tests.
+
+```bash
+cmake .. && cmake --build . && ctest --output-on-failure
+```
+
+
+### Android testing
+
+The following command builds the project against the Android NDK, which is
+useful for benchmarking against ARM processors.
+
+```bash
+cmake .. -DCMAKE_SYSTEM_NAME=Android -DCMAKE_ANDROID_ARCH_ABI=arm64-v8a \
+ -DCMAKE_ANDROID_NDK=$HOME/Library/Android/sdk/ndk-bundle \
+ -DCMAKE_ANDROID_NDK_TOOLCHAIN_VERSION=clang \
+ -DCMAKE_ANDROID_STL_TYPE=c++_static -DCRC32C_USE_GLOG=0 \
+ -DCMAKE_BUILD_TYPE=Release && cmake --build .
+```
+
+The following commands install and run the benchmarks.
+
+```bash
+adb push crc32c_bench /data/local/tmp
+adb shell chmod +x /data/local/tmp/crc32c_bench
+adb shell 'cd /data/local/tmp && ./crc32c_bench'
+adb shell rm /data/local/tmp/crc32c_bench
+```
+
+The following commands install and run the tests.
+
+```bash
+adb push crc32c_tests /data/local/tmp
+adb shell chmod +x /data/local/tmp/crc32c_tests
+adb shell 'cd /data/local/tmp && ./crc32c_tests'
+adb shell rm /data/local/tmp/crc32c_tests
+```
diff --git a/src/crc32c/include/crc32c/crc32c.h b/src/crc32c/include/crc32c/crc32c.h
new file mode 100644
index 0000000000..e8a78170a9
--- /dev/null
+++ b/src/crc32c/include/crc32c/crc32c.h
@@ -0,0 +1,89 @@
+/* Copyright 2017 The CRC32C Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style license that can be
+ found in the LICENSE file. See the AUTHORS file for names of contributors. */
+
+#ifndef CRC32C_CRC32C_H_
+#define CRC32C_CRC32C_H_
+
+/* The API exported by the CRC32C project. */
+
+#if defined(__cplusplus)
+
+#include <cstddef>
+#include <cstdint>
+#include <string>
+
+#else /* !defined(__cplusplus) */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#endif /* !defined(__cplusplus) */
+
+
+/* The C API. */
+
+#if defined(__cplusplus)
+extern "C" {
+#endif /* defined(__cplusplus) */
+
+/* Extends "crc" with the CRC32C of "count" bytes in the buffer pointed by
+ "data" */
+uint32_t crc32c_extend(uint32_t crc, const uint8_t* data, size_t count);
+
+/* Computes the CRC32C of "count" bytes in the buffer pointed by "data". */
+uint32_t crc32c_value(const uint8_t* data, size_t count);
+
+#ifdef __cplusplus
+} /* end extern "C" */
+#endif /* defined(__cplusplus) */
+
+
+/* The C++ API. */
+
+#if defined(__cplusplus)
+
+namespace crc32c {
+
+// Extends "crc" with the CRC32C of "count" bytes in the buffer pointed by
+// "data".
+uint32_t Extend(uint32_t crc, const uint8_t* data, size_t count);
+
+// Computes the CRC32C of "count" bytes in the buffer pointed by "data".
+inline uint32_t Crc32c(const uint8_t* data, size_t count) {
+ return Extend(0, data, count);
+}
+
+// Computes the CRC32C of "count" bytes in the buffer pointed by "data".
+inline uint32_t Crc32c(const char* data, size_t count) {
+ return Extend(0, reinterpret_cast<const uint8_t*>(data), count);
+}
+
+// Computes the CRC32C of the string's content.
+inline uint32_t Crc32c(const std::string& string) {
+ return Crc32c(reinterpret_cast<const uint8_t*>(string.data()),
+ string.size());
+}
+
+} // namespace crc32c
+
+#if __cplusplus > 201402L
+#if __has_include(<string_view>)
+#include <string_view>
+
+namespace crc32c {
+
+// Computes the CRC32C of the bytes in the string_view.
+inline uint32_t Crc32c(const std::string_view& string_view) {
+ return Crc32c(reinterpret_cast<const uint8_t*>(string_view.data()),
+ string_view.size());
+}
+
+} // namespace crc32c
+
+#endif // __has_include(<string_view>)
+#endif // __cplusplus > 201402L
+
+#endif /* defined(__cplusplus) */
+
+#endif // CRC32C_CRC32C_H_
diff --git a/src/crc32c/src/crc32c.cc b/src/crc32c/src/crc32c.cc
new file mode 100644
index 0000000000..4d3018af47
--- /dev/null
+++ b/src/crc32c/src/crc32c.cc
@@ -0,0 +1,39 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "crc32c/crc32c.h"
+
+#include <cstddef>
+#include <cstdint>
+
+#include "./crc32c_arm64.h"
+#include "./crc32c_arm64_linux_check.h"
+#include "./crc32c_internal.h"
+#include "./crc32c_sse42.h"
+#include "./crc32c_sse42_check.h"
+
+namespace crc32c {
+
+uint32_t Extend(uint32_t crc, const uint8_t* data, size_t count) {
+#if HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
+ static bool can_use_sse42 = CanUseSse42();
+ if (can_use_sse42) return ExtendSse42(crc, data, count);
+#elif HAVE_ARM64_CRC32C
+ static bool can_use_arm_linux = CanUseArm64Linux();
+ if (can_use_arm_linux) return ExtendArm64(crc, data, count);
+#endif // HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
+
+ return ExtendPortable(crc, data, count);
+}
+
+extern "C" uint32_t crc32c_extend(uint32_t crc, const uint8_t* data,
+ size_t count) {
+ return crc32c::Extend(crc, data, count);
+}
+
+extern "C" uint32_t crc32c_value(const uint8_t* data, size_t count) {
+ return crc32c::Crc32c(data, count);
+}
+
+} // namespace crc32c
diff --git a/src/crc32c/src/crc32c_arm64.cc b/src/crc32c/src/crc32c_arm64.cc
new file mode 100644
index 0000000000..b872245f95
--- /dev/null
+++ b/src/crc32c/src/crc32c_arm64.cc
@@ -0,0 +1,126 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "./crc32c_arm64.h"
+
+// In a separate source file to allow this accelerated CRC32C function to be
+// compiled with the appropriate compiler flags to enable ARM NEON CRC32C
+// instructions.
+
+// This implementation is based on https://github.com/google/leveldb/pull/490.
+
+#include <cstddef>
+#include <cstdint>
+
+#include "./crc32c_internal.h"
+#ifdef CRC32C_HAVE_CONFIG_H
+#include "crc32c/crc32c_config.h"
+#endif
+
+#if HAVE_ARM64_CRC32C
+
+#include <arm_acle.h>
+#include <arm_neon.h>
+
+#define KBYTES 1032
+#define SEGMENTBYTES 256
+
+// compute 8bytes for each segment parallelly
+#define CRC32C32BYTES(P, IND) \
+ do { \
+ crc1 = __crc32cd( \
+ crc1, *((const uint64_t *)(P) + (SEGMENTBYTES / 8) * 1 + (IND))); \
+ crc2 = __crc32cd( \
+ crc2, *((const uint64_t *)(P) + (SEGMENTBYTES / 8) * 2 + (IND))); \
+ crc3 = __crc32cd( \
+ crc3, *((const uint64_t *)(P) + (SEGMENTBYTES / 8) * 3 + (IND))); \
+ crc0 = __crc32cd( \
+ crc0, *((const uint64_t *)(P) + (SEGMENTBYTES / 8) * 0 + (IND))); \
+ } while (0);
+
+// compute 8*8 bytes for each segment parallelly
+#define CRC32C256BYTES(P, IND) \
+ do { \
+ CRC32C32BYTES((P), (IND)*8 + 0) \
+ CRC32C32BYTES((P), (IND)*8 + 1) \
+ CRC32C32BYTES((P), (IND)*8 + 2) \
+ CRC32C32BYTES((P), (IND)*8 + 3) \
+ CRC32C32BYTES((P), (IND)*8 + 4) \
+ CRC32C32BYTES((P), (IND)*8 + 5) \
+ CRC32C32BYTES((P), (IND)*8 + 6) \
+ CRC32C32BYTES((P), (IND)*8 + 7) \
+ } while (0);
+
+// compute 4*8*8 bytes for each segment parallelly
+#define CRC32C1024BYTES(P) \
+ do { \
+ CRC32C256BYTES((P), 0) \
+ CRC32C256BYTES((P), 1) \
+ CRC32C256BYTES((P), 2) \
+ CRC32C256BYTES((P), 3) \
+ (P) += 4 * SEGMENTBYTES; \
+ } while (0)
+
+namespace crc32c {
+
+uint32_t ExtendArm64(uint32_t crc, const uint8_t *buf, size_t size) {
+ int64_t length = size;
+ uint32_t crc0, crc1, crc2, crc3;
+ uint64_t t0, t1, t2;
+
+ // k0=CRC(x^(3*SEGMENTBYTES*8)), k1=CRC(x^(2*SEGMENTBYTES*8)),
+ // k2=CRC(x^(SEGMENTBYTES*8))
+ const poly64_t k0 = 0x8d96551c, k1 = 0xbd6f81f8, k2 = 0xdcb17aa4;
+
+ crc = crc ^ kCRC32Xor;
+ const uint8_t *p = reinterpret_cast<const uint8_t *>(buf);
+
+ while (length >= KBYTES) {
+ crc0 = crc;
+ crc1 = 0;
+ crc2 = 0;
+ crc3 = 0;
+
+ // Process 1024 bytes in parallel.
+ CRC32C1024BYTES(p);
+
+ // Merge the 4 partial CRC32C values.
+ t2 = (uint64_t)vmull_p64(crc2, k2);
+ t1 = (uint64_t)vmull_p64(crc1, k1);
+ t0 = (uint64_t)vmull_p64(crc0, k0);
+ crc = __crc32cd(crc3, *(uint64_t *)p);
+ p += sizeof(uint64_t);
+ crc ^= __crc32cd(0, t2);
+ crc ^= __crc32cd(0, t1);
+ crc ^= __crc32cd(0, t0);
+
+ length -= KBYTES;
+ }
+
+ while (length >= 8) {
+ crc = __crc32cd(crc, *(uint64_t *)p);
+ p += 8;
+ length -= 8;
+ }
+
+ if (length & 4) {
+ crc = __crc32cw(crc, *(uint32_t *)p);
+ p += 4;
+ }
+
+ if (length & 2) {
+ crc = __crc32ch(crc, *(uint16_t *)p);
+ p += 2;
+ }
+
+ if (length & 1) {
+ crc = __crc32cb(crc, *p);
+ }
+
+ return crc ^ kCRC32Xor;
+}
+
+} // namespace crc32c
+
+#endif // HAVE_ARM64_CRC32C
diff --git a/src/crc32c/src/crc32c_arm64.h b/src/crc32c/src/crc32c_arm64.h
new file mode 100644
index 0000000000..100cd56ec8
--- /dev/null
+++ b/src/crc32c/src/crc32c_arm64.h
@@ -0,0 +1,27 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+// Linux-specific code checking the availability for ARM CRC32C instructions.
+
+#ifndef CRC32C_CRC32C_ARM_LINUX_H_
+#define CRC32C_CRC32C_ARM_LINUX_H_
+
+#include <cstddef>
+#include <cstdint>
+
+#ifdef CRC32C_HAVE_CONFIG_H
+#include "crc32c/crc32c_config.h"
+#endif
+
+#if HAVE_ARM64_CRC32C
+
+namespace crc32c {
+
+uint32_t ExtendArm64(uint32_t crc, const uint8_t* data, size_t count);
+
+} // namespace crc32c
+
+#endif // HAVE_ARM64_CRC32C
+
+#endif // CRC32C_CRC32C_ARM_LINUX_H_
diff --git a/src/crc32c/src/crc32c_arm64_linux_check.h b/src/crc32c/src/crc32c_arm64_linux_check.h
new file mode 100644
index 0000000000..1a20a757bb
--- /dev/null
+++ b/src/crc32c/src/crc32c_arm64_linux_check.h
@@ -0,0 +1,50 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+// ARM Linux-specific code checking for the availability of CRC32C instructions.
+
+#ifndef CRC32C_CRC32C_ARM_LINUX_CHECK_H_
+#define CRC32C_CRC32C_ARM_LINUX_CHECK_H_
+
+// X86-specific code checking for the availability of SSE4.2 instructions.
+
+#include <cstddef>
+#include <cstdint>
+
+#ifdef CRC32C_HAVE_CONFIG_H
+#include "crc32c/crc32c_config.h"
+#endif
+
+#if HAVE_ARM64_CRC32C
+
+#if HAVE_STRONG_GETAUXVAL
+#include <sys/auxv.h>
+#elif HAVE_WEAK_GETAUXVAL
+// getauxval() is not available on Android until API level 20. Link it as a weak
+// symbol.
+extern "C" unsigned long getauxval(unsigned long type) __attribute__((weak));
+
+#define AT_HWCAP 16
+#endif // HAVE_STRONG_GETAUXVAL || HAVE_WEAK_GETAUXVAL
+
+namespace crc32c {
+
+inline bool CanUseArm64Linux() {
+#if HAVE_STRONG_GETAUXVAL || HAVE_WEAK_GETAUXVAL
+ // From 'arch/arm64/include/uapi/asm/hwcap.h' in Linux kernel source code.
+ constexpr unsigned long kHWCAP_PMULL = 1 << 4;
+ constexpr unsigned long kHWCAP_CRC32 = 1 << 7;
+ unsigned long hwcap = (&getauxval != nullptr) ? getauxval(AT_HWCAP) : 0;
+ return (hwcap & (kHWCAP_PMULL | kHWCAP_CRC32)) ==
+ (kHWCAP_PMULL | kHWCAP_CRC32);
+#else
+ return false;
+#endif // HAVE_STRONG_GETAUXVAL || HAVE_WEAK_GETAUXVAL
+}
+
+} // namespace crc32c
+
+#endif // HAVE_ARM64_CRC32C
+
+#endif // CRC32C_CRC32C_ARM_LINUX_CHECK_H_
diff --git a/src/crc32c/src/crc32c_arm64_unittest.cc b/src/crc32c/src/crc32c_arm64_unittest.cc
new file mode 100644
index 0000000000..6f917d9c0c
--- /dev/null
+++ b/src/crc32c/src/crc32c_arm64_unittest.cc
@@ -0,0 +1,24 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "gtest/gtest.h"
+
+#include "./crc32c_arm64.h"
+#include "./crc32c_extend_unittests.h"
+
+namespace crc32c {
+
+#if HAVE_ARM64_CRC32C
+
+struct Arm64TestTraits {
+ static uint32_t Extend(uint32_t crc, const uint8_t* data, size_t count) {
+ return ExtendArm64(crc, data, count);
+ }
+};
+
+INSTANTIATE_TYPED_TEST_SUITE_P(Arm64, ExtendTest, Arm64TestTraits);
+
+#endif // HAVE_ARM64_CRC32C
+
+} // namespace crc32c
diff --git a/src/crc32c/src/crc32c_benchmark.cc b/src/crc32c/src/crc32c_benchmark.cc
new file mode 100644
index 0000000000..c464304b3f
--- /dev/null
+++ b/src/crc32c/src/crc32c_benchmark.cc
@@ -0,0 +1,106 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include <cstddef>
+#include <cstdint>
+
+#ifdef CRC32C_HAVE_CONFIG_H
+#include "crc32c/crc32c_config.h"
+#endif
+
+#include "benchmark/benchmark.h"
+
+#if CRC32C_TESTS_BUILT_WITH_GLOG
+#include "glog/logging.h"
+#endif // CRC32C_TESTS_BUILT_WITH_GLOG
+
+#include "./crc32c_arm64.h"
+#include "./crc32c_arm64_linux_check.h"
+#include "./crc32c_internal.h"
+#include "./crc32c_sse42.h"
+#include "./crc32c_sse42_check.h"
+#include "crc32c/crc32c.h"
+
+class CRC32CBenchmark : public benchmark::Fixture {
+ public:
+ void SetUp(const benchmark::State& state) override {
+ block_size_ = static_cast<size_t>(state.range(0));
+ block_data_ = std::string(block_size_, 'x');
+ block_buffer_ = reinterpret_cast<const uint8_t*>(block_data_.data());
+ }
+
+ protected:
+ std::string block_data_;
+ const uint8_t* block_buffer_;
+ size_t block_size_;
+};
+
+BENCHMARK_DEFINE_F(CRC32CBenchmark, Public)(benchmark::State& state) {
+ uint32_t crc = 0;
+ for (auto _ : state)
+ crc = crc32c::Extend(crc, block_buffer_, block_size_);
+ state.SetBytesProcessed(state.iterations() * block_size_);
+}
+BENCHMARK_REGISTER_F(CRC32CBenchmark, Public)
+ ->RangeMultiplier(16)
+ ->Range(256, 16777216); // Block size.
+
+BENCHMARK_DEFINE_F(CRC32CBenchmark, Portable)(benchmark::State& state) {
+ uint32_t crc = 0;
+ for (auto _ : state)
+ crc = crc32c::ExtendPortable(crc, block_buffer_, block_size_);
+ state.SetBytesProcessed(state.iterations() * block_size_);
+}
+BENCHMARK_REGISTER_F(CRC32CBenchmark, Portable)
+ ->RangeMultiplier(16)
+ ->Range(256, 16777216); // Block size.
+
+#if HAVE_ARM64_CRC32C
+
+BENCHMARK_DEFINE_F(CRC32CBenchmark, ArmLinux)(benchmark::State& state) {
+ if (!crc32c::CanUseArm64Linux()) {
+ state.SkipWithError("ARM CRC32C instructions not available or not enabled");
+ return;
+ }
+
+ uint32_t crc = 0;
+ for (auto _ : state)
+ crc = crc32c::ExtendArm64(crc, block_buffer_, block_size_);
+ state.SetBytesProcessed(state.iterations() * block_size_);
+}
+BENCHMARK_REGISTER_F(CRC32CBenchmark, ArmLinux)
+ ->RangeMultiplier(16)
+ ->Range(256, 16777216); // Block size.
+
+#endif // HAVE_ARM64_CRC32C
+
+#if HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
+
+BENCHMARK_DEFINE_F(CRC32CBenchmark, Sse42)(benchmark::State& state) {
+ if (!crc32c::CanUseSse42()) {
+ state.SkipWithError("SSE4.2 instructions not available or not enabled");
+ return;
+ }
+
+ uint32_t crc = 0;
+ for (auto _ : state)
+ crc = crc32c::ExtendSse42(crc, block_buffer_, block_size_);
+ state.SetBytesProcessed(state.iterations() * block_size_);
+}
+BENCHMARK_REGISTER_F(CRC32CBenchmark, Sse42)
+ ->RangeMultiplier(16)
+ ->Range(256, 16777216); // Block size.
+
+#endif // HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
+
+int main(int argc, char** argv) {
+#if CRC32C_TESTS_BUILT_WITH_GLOG
+ google::InitGoogleLogging(argv[0]);
+ google::InstallFailureSignalHandler();
+#endif // CRC32C_TESTS_BUILT_WITH_GLOG
+
+ benchmark::Initialize(&argc, argv);
+ benchmark::RunSpecifiedBenchmarks();
+ return 0;
+}
diff --git a/src/crc32c/src/crc32c_capi_unittest.c b/src/crc32c/src/crc32c_capi_unittest.c
new file mode 100644
index 0000000000..c8993a0959
--- /dev/null
+++ b/src/crc32c/src/crc32c_capi_unittest.c
@@ -0,0 +1,66 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "crc32c/crc32c.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+int main() {
+ /* From rfc3720 section B.4. */
+ uint8_t buf[32];
+
+ memset(buf, 0, sizeof(buf));
+ if ((uint32_t)0x8a9136aa != crc32c_value(buf, sizeof(buf))) {
+ printf("crc32c_value(zeros) test failed\n");
+ return 1;
+ }
+
+ memset(buf, 0xff, sizeof(buf));
+ if ((uint32_t)0x62a8ab43 != crc32c_value(buf, sizeof(buf))) {
+ printf("crc32c_value(0xff) test failed\n");
+ return 1;
+ }
+
+ for (size_t i = 0; i < 32; ++i)
+ buf[i] = (uint8_t)i;
+ if ((uint32_t)0x46dd794e != crc32c_value(buf, sizeof(buf))) {
+ printf("crc32c_value(0..31) test failed\n");
+ return 1;
+ }
+
+ for (size_t i = 0; i < 32; ++i)
+ buf[i] = (uint8_t)(31 - i);
+ if ((uint32_t)0x113fdb5c != crc32c_value(buf, sizeof(buf))) {
+ printf("crc32c_value(31..0) test failed\n");
+ return 1;
+ }
+
+ uint8_t data[48] = {
+ 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ };
+ if ((uint32_t)0xd9963a56 != crc32c_value(data, sizeof(data))) {
+ printf("crc32c_value(31..0) test failed\n");
+ return 1;
+ }
+
+ const uint8_t* hello_space_world = (const uint8_t*)"hello world";
+ const uint8_t* hello_space = (const uint8_t*)"hello ";
+ const uint8_t* world = (const uint8_t*)"world";
+
+ if (crc32c_value(hello_space_world, 11) !=
+ crc32c_extend(crc32c_value(hello_space, 6), world, 5)) {
+ printf("crc32c_extend test failed\n");
+ return 1;
+ }
+
+ printf("All tests passed\n");
+ return 0;
+}
diff --git a/src/crc32c/src/crc32c_config.h.in b/src/crc32c/src/crc32c_config.h.in
new file mode 100644
index 0000000000..4034fa5644
--- /dev/null
+++ b/src/crc32c/src/crc32c_config.h.in
@@ -0,0 +1,36 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef CRC32C_CRC32C_CONFIG_H_
+#define CRC32C_CRC32C_CONFIG_H_
+
+// Define to 1 if building for a big-endian platform.
+#cmakedefine01 BYTE_ORDER_BIG_ENDIAN
+
+// Define to 1 if the compiler has the __builtin_prefetch intrinsic.
+#cmakedefine01 HAVE_BUILTIN_PREFETCH
+
+// Define to 1 if targeting X86 and the compiler has the _mm_prefetch intrinsic.
+#cmakedefine01 HAVE_MM_PREFETCH
+
+// Define to 1 if targeting X86 and the compiler has the _mm_crc32_u{8,32,64}
+// intrinsics.
+#cmakedefine01 HAVE_SSE42
+
+// Define to 1 if targeting ARM and the compiler has the __crc32c{b,h,w,d} and
+// the vmull_p64 intrinsics.
+#cmakedefine01 HAVE_ARM64_CRC32C
+
+// Define to 1 if the system libraries have the getauxval function in the
+// <sys/auxv.h> header. Should be true on Linux and Android API level 20+.
+#cmakedefine01 HAVE_STRONG_GETAUXVAL
+
+// Define to 1 if the compiler supports defining getauxval as a weak symbol.
+// Should be true for any compiler that supports __attribute__((weak)).
+#cmakedefine01 HAVE_WEAK_GETAUXVAL
+
+// Define to 1 if CRC32C tests have been built with Google Logging.
+#cmakedefine01 CRC32C_TESTS_BUILT_WITH_GLOG
+
+#endif // CRC32C_CRC32C_CONFIG_H_
diff --git a/src/crc32c/src/crc32c_extend_unittests.h b/src/crc32c/src/crc32c_extend_unittests.h
new file mode 100644
index 0000000000..0732973737
--- /dev/null
+++ b/src/crc32c/src/crc32c_extend_unittests.h
@@ -0,0 +1,112 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef CRC32C_CRC32C_EXTEND_UNITTESTS_H_
+#define CRC32C_CRC32C_EXTEND_UNITTESTS_H_
+
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+
+#include "gtest/gtest.h"
+
+// Common test cases for all implementations of CRC32C_Extend().
+
+namespace crc32c {
+
+template<typename TestTraits>
+class ExtendTest : public testing::Test {};
+
+TYPED_TEST_SUITE_P(ExtendTest);
+
+TYPED_TEST_P(ExtendTest, StandardResults) {
+ // From rfc3720 section B.4.
+ uint8_t buf[32];
+
+ std::memset(buf, 0, sizeof(buf));
+ EXPECT_EQ(static_cast<uint32_t>(0x8a9136aa),
+ TypeParam::Extend(0, buf, sizeof(buf)));
+
+ std::memset(buf, 0xff, sizeof(buf));
+ EXPECT_EQ(static_cast<uint32_t>(0x62a8ab43),
+ TypeParam::Extend(0, buf, sizeof(buf)));
+
+ for (int i = 0; i < 32; ++i)
+ buf[i] = static_cast<uint8_t>(i);
+ EXPECT_EQ(static_cast<uint32_t>(0x46dd794e),
+ TypeParam::Extend(0, buf, sizeof(buf)));
+
+ for (int i = 0; i < 32; ++i)
+ buf[i] = static_cast<uint8_t>(31 - i);
+ EXPECT_EQ(static_cast<uint32_t>(0x113fdb5c),
+ TypeParam::Extend(0, buf, sizeof(buf)));
+
+ uint8_t data[48] = {
+ 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ };
+ EXPECT_EQ(static_cast<uint32_t>(0xd9963a56),
+ TypeParam::Extend(0, data, sizeof(data)));
+}
+
+TYPED_TEST_P(ExtendTest, HelloWorld) {
+ const uint8_t* hello_space_world =
+ reinterpret_cast<const uint8_t*>("hello world");
+ const uint8_t* hello_space = reinterpret_cast<const uint8_t*>("hello ");
+ const uint8_t* world = reinterpret_cast<const uint8_t*>("world");
+
+ EXPECT_EQ(TypeParam::Extend(0, hello_space_world, 11),
+ TypeParam::Extend(TypeParam::Extend(0, hello_space, 6), world, 5));
+}
+
+TYPED_TEST_P(ExtendTest, BufferSlicing) {
+ uint8_t buffer[48] = {
+ 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ };
+
+ for (size_t i = 0; i < 48; ++i) {
+ for (size_t j = i + 1; j <= 48; ++j) {
+ uint32_t crc = 0;
+
+ if (i > 0) crc = TypeParam::Extend(crc, buffer, i);
+ crc = TypeParam::Extend(crc, buffer + i, j - i);
+ if (j < 48) crc = TypeParam::Extend(crc, buffer + j, 48 - j);
+
+ EXPECT_EQ(static_cast<uint32_t>(0xd9963a56), crc);
+ }
+ }
+}
+
+TYPED_TEST_P(ExtendTest, LargeBufferSlicing) {
+ uint8_t buffer[2048];
+ for (size_t i = 0; i < 2048; i++)
+ buffer[i] = static_cast<uint8_t>(3 * i * i + 7 * i + 11);
+
+ for (size_t i = 0; i < 2048; ++i) {
+ for (size_t j = i + 1; j <= 2048; ++j) {
+ uint32_t crc = 0;
+
+ if (i > 0) crc = TypeParam::Extend(crc, buffer, i);
+ crc = TypeParam::Extend(crc, buffer + i, j - i);
+ if (j < 2048) crc = TypeParam::Extend(crc, buffer + j, 2048 - j);
+
+ EXPECT_EQ(static_cast<uint32_t>(0x36dcc753), crc);
+ }
+ }
+}
+
+REGISTER_TYPED_TEST_SUITE_P(ExtendTest,
+ StandardResults,
+ HelloWorld,
+ BufferSlicing,
+ LargeBufferSlicing);
+
+} // namespace crc32c
+
+#endif // CRC32C_CRC32C_EXTEND_UNITTESTS_H_
diff --git a/src/crc32c/src/crc32c_internal.h b/src/crc32c/src/crc32c_internal.h
new file mode 100644
index 0000000000..2bd23dea43
--- /dev/null
+++ b/src/crc32c/src/crc32c_internal.h
@@ -0,0 +1,23 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef CRC32C_CRC32C_INTERNAL_H_
+#define CRC32C_CRC32C_INTERNAL_H_
+
+// Internal functions that may change between releases.
+
+#include <cstddef>
+#include <cstdint>
+
+namespace crc32c {
+
+// Un-accelerated implementation that works on all CPUs.
+uint32_t ExtendPortable(uint32_t crc, const uint8_t* data, size_t count);
+
+// CRCs are pre- and post- conditioned by xoring with all ones.
+static constexpr const uint32_t kCRC32Xor = static_cast<uint32_t>(0xffffffffU);
+
+} // namespace crc32c
+
+#endif // CRC32C_CRC32C_INTERNAL_H_
diff --git a/src/crc32c/src/crc32c_portable.cc b/src/crc32c/src/crc32c_portable.cc
new file mode 100644
index 0000000000..31ec6eac53
--- /dev/null
+++ b/src/crc32c/src/crc32c_portable.cc
@@ -0,0 +1,351 @@
+// Copyright 2008 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "./crc32c_internal.h"
+
+#include <cstddef>
+#include <cstdint>
+
+#include "./crc32c_prefetch.h"
+#include "./crc32c_read_le.h"
+#include "./crc32c_round_up.h"
+
+namespace {
+
+const uint32_t kByteExtensionTable[256] = {
+ 0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, 0xc79a971f, 0x35f1141c,
+ 0x26a1e7e8, 0xd4ca64eb, 0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b,
+ 0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24, 0x105ec76f, 0xe235446c,
+ 0xf165b798, 0x030e349b, 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384,
+ 0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54, 0x5d1d08bf, 0xaf768bbc,
+ 0xbc267848, 0x4e4dfb4b, 0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a,
+ 0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35, 0xaa64d611, 0x580f5512,
+ 0x4b5fa6e6, 0xb93425e5, 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa,
+ 0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45, 0xf779deae, 0x05125dad,
+ 0x1642ae59, 0xe4292d5a, 0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a,
+ 0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595, 0x417b1dbc, 0xb3109ebf,
+ 0xa0406d4b, 0x522bee48, 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957,
+ 0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687, 0x0c38d26c, 0xfe53516f,
+ 0xed03a29b, 0x1f682198, 0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927,
+ 0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38, 0xdbfc821c, 0x2997011f,
+ 0x3ac7f2eb, 0xc8ac71e8, 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7,
+ 0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096, 0xa65c047d, 0x5437877e,
+ 0x4767748a, 0xb50cf789, 0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859,
+ 0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46, 0x7198540d, 0x83f3d70e,
+ 0x90a324fa, 0x62c8a7f9, 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6,
+ 0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36, 0x3cdb9bdd, 0xceb018de,
+ 0xdde0eb2a, 0x2f8b6829, 0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c,
+ 0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93, 0x082f63b7, 0xfa44e0b4,
+ 0xe9141340, 0x1b7f9043, 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c,
+ 0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3, 0x55326b08, 0xa759e80b,
+ 0xb4091bff, 0x466298fc, 0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c,
+ 0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033, 0xa24bb5a6, 0x502036a5,
+ 0x4370c551, 0xb11b4652, 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d,
+ 0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d, 0xef087a76, 0x1d63f975,
+ 0x0e330a81, 0xfc588982, 0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d,
+ 0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622, 0x38cc2a06, 0xcaa7a905,
+ 0xd9f75af1, 0x2b9cd9f2, 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed,
+ 0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530, 0x0417b1db, 0xf67c32d8,
+ 0xe52cc12c, 0x1747422f, 0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff,
+ 0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0, 0xd3d3e1ab, 0x21b862a8,
+ 0x32e8915c, 0xc083125f, 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540,
+ 0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90, 0x9e902e7b, 0x6cfbad78,
+ 0x7fab5e8c, 0x8dc0dd8f, 0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee,
+ 0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1, 0x69e9f0d5, 0x9b8273d6,
+ 0x88d28022, 0x7ab90321, 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e,
+ 0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81, 0x34f4f86a, 0xc69f7b69,
+ 0xd5cf889d, 0x27a40b9e, 0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e,
+ 0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351};
+
+const uint32_t kStrideExtensionTable0[256] = {
+ 0x00000000, 0x30d23865, 0x61a470ca, 0x517648af, 0xc348e194, 0xf39ad9f1,
+ 0xa2ec915e, 0x923ea93b, 0x837db5d9, 0xb3af8dbc, 0xe2d9c513, 0xd20bfd76,
+ 0x4035544d, 0x70e76c28, 0x21912487, 0x11431ce2, 0x03171d43, 0x33c52526,
+ 0x62b36d89, 0x526155ec, 0xc05ffcd7, 0xf08dc4b2, 0xa1fb8c1d, 0x9129b478,
+ 0x806aa89a, 0xb0b890ff, 0xe1ced850, 0xd11ce035, 0x4322490e, 0x73f0716b,
+ 0x228639c4, 0x125401a1, 0x062e3a86, 0x36fc02e3, 0x678a4a4c, 0x57587229,
+ 0xc566db12, 0xf5b4e377, 0xa4c2abd8, 0x941093bd, 0x85538f5f, 0xb581b73a,
+ 0xe4f7ff95, 0xd425c7f0, 0x461b6ecb, 0x76c956ae, 0x27bf1e01, 0x176d2664,
+ 0x053927c5, 0x35eb1fa0, 0x649d570f, 0x544f6f6a, 0xc671c651, 0xf6a3fe34,
+ 0xa7d5b69b, 0x97078efe, 0x8644921c, 0xb696aa79, 0xe7e0e2d6, 0xd732dab3,
+ 0x450c7388, 0x75de4bed, 0x24a80342, 0x147a3b27, 0x0c5c750c, 0x3c8e4d69,
+ 0x6df805c6, 0x5d2a3da3, 0xcf149498, 0xffc6acfd, 0xaeb0e452, 0x9e62dc37,
+ 0x8f21c0d5, 0xbff3f8b0, 0xee85b01f, 0xde57887a, 0x4c692141, 0x7cbb1924,
+ 0x2dcd518b, 0x1d1f69ee, 0x0f4b684f, 0x3f99502a, 0x6eef1885, 0x5e3d20e0,
+ 0xcc0389db, 0xfcd1b1be, 0xada7f911, 0x9d75c174, 0x8c36dd96, 0xbce4e5f3,
+ 0xed92ad5c, 0xdd409539, 0x4f7e3c02, 0x7fac0467, 0x2eda4cc8, 0x1e0874ad,
+ 0x0a724f8a, 0x3aa077ef, 0x6bd63f40, 0x5b040725, 0xc93aae1e, 0xf9e8967b,
+ 0xa89eded4, 0x984ce6b1, 0x890ffa53, 0xb9ddc236, 0xe8ab8a99, 0xd879b2fc,
+ 0x4a471bc7, 0x7a9523a2, 0x2be36b0d, 0x1b315368, 0x096552c9, 0x39b76aac,
+ 0x68c12203, 0x58131a66, 0xca2db35d, 0xfaff8b38, 0xab89c397, 0x9b5bfbf2,
+ 0x8a18e710, 0xbacadf75, 0xebbc97da, 0xdb6eafbf, 0x49500684, 0x79823ee1,
+ 0x28f4764e, 0x18264e2b, 0x18b8ea18, 0x286ad27d, 0x791c9ad2, 0x49cea2b7,
+ 0xdbf00b8c, 0xeb2233e9, 0xba547b46, 0x8a864323, 0x9bc55fc1, 0xab1767a4,
+ 0xfa612f0b, 0xcab3176e, 0x588dbe55, 0x685f8630, 0x3929ce9f, 0x09fbf6fa,
+ 0x1baff75b, 0x2b7dcf3e, 0x7a0b8791, 0x4ad9bff4, 0xd8e716cf, 0xe8352eaa,
+ 0xb9436605, 0x89915e60, 0x98d24282, 0xa8007ae7, 0xf9763248, 0xc9a40a2d,
+ 0x5b9aa316, 0x6b489b73, 0x3a3ed3dc, 0x0aecebb9, 0x1e96d09e, 0x2e44e8fb,
+ 0x7f32a054, 0x4fe09831, 0xddde310a, 0xed0c096f, 0xbc7a41c0, 0x8ca879a5,
+ 0x9deb6547, 0xad395d22, 0xfc4f158d, 0xcc9d2de8, 0x5ea384d3, 0x6e71bcb6,
+ 0x3f07f419, 0x0fd5cc7c, 0x1d81cddd, 0x2d53f5b8, 0x7c25bd17, 0x4cf78572,
+ 0xdec92c49, 0xee1b142c, 0xbf6d5c83, 0x8fbf64e6, 0x9efc7804, 0xae2e4061,
+ 0xff5808ce, 0xcf8a30ab, 0x5db49990, 0x6d66a1f5, 0x3c10e95a, 0x0cc2d13f,
+ 0x14e49f14, 0x2436a771, 0x7540efde, 0x4592d7bb, 0xd7ac7e80, 0xe77e46e5,
+ 0xb6080e4a, 0x86da362f, 0x97992acd, 0xa74b12a8, 0xf63d5a07, 0xc6ef6262,
+ 0x54d1cb59, 0x6403f33c, 0x3575bb93, 0x05a783f6, 0x17f38257, 0x2721ba32,
+ 0x7657f29d, 0x4685caf8, 0xd4bb63c3, 0xe4695ba6, 0xb51f1309, 0x85cd2b6c,
+ 0x948e378e, 0xa45c0feb, 0xf52a4744, 0xc5f87f21, 0x57c6d61a, 0x6714ee7f,
+ 0x3662a6d0, 0x06b09eb5, 0x12caa592, 0x22189df7, 0x736ed558, 0x43bced3d,
+ 0xd1824406, 0xe1507c63, 0xb02634cc, 0x80f40ca9, 0x91b7104b, 0xa165282e,
+ 0xf0136081, 0xc0c158e4, 0x52fff1df, 0x622dc9ba, 0x335b8115, 0x0389b970,
+ 0x11ddb8d1, 0x210f80b4, 0x7079c81b, 0x40abf07e, 0xd2955945, 0xe2476120,
+ 0xb331298f, 0x83e311ea, 0x92a00d08, 0xa272356d, 0xf3047dc2, 0xc3d645a7,
+ 0x51e8ec9c, 0x613ad4f9, 0x304c9c56, 0x009ea433};
+
+const uint32_t kStrideExtensionTable1[256] = {
+ 0x00000000, 0x54075546, 0xa80eaa8c, 0xfc09ffca, 0x55f123e9, 0x01f676af,
+ 0xfdff8965, 0xa9f8dc23, 0xabe247d2, 0xffe51294, 0x03eced5e, 0x57ebb818,
+ 0xfe13643b, 0xaa14317d, 0x561dceb7, 0x021a9bf1, 0x5228f955, 0x062fac13,
+ 0xfa2653d9, 0xae21069f, 0x07d9dabc, 0x53de8ffa, 0xafd77030, 0xfbd02576,
+ 0xf9cabe87, 0xadcdebc1, 0x51c4140b, 0x05c3414d, 0xac3b9d6e, 0xf83cc828,
+ 0x043537e2, 0x503262a4, 0xa451f2aa, 0xf056a7ec, 0x0c5f5826, 0x58580d60,
+ 0xf1a0d143, 0xa5a78405, 0x59ae7bcf, 0x0da92e89, 0x0fb3b578, 0x5bb4e03e,
+ 0xa7bd1ff4, 0xf3ba4ab2, 0x5a429691, 0x0e45c3d7, 0xf24c3c1d, 0xa64b695b,
+ 0xf6790bff, 0xa27e5eb9, 0x5e77a173, 0x0a70f435, 0xa3882816, 0xf78f7d50,
+ 0x0b86829a, 0x5f81d7dc, 0x5d9b4c2d, 0x099c196b, 0xf595e6a1, 0xa192b3e7,
+ 0x086a6fc4, 0x5c6d3a82, 0xa064c548, 0xf463900e, 0x4d4f93a5, 0x1948c6e3,
+ 0xe5413929, 0xb1466c6f, 0x18beb04c, 0x4cb9e50a, 0xb0b01ac0, 0xe4b74f86,
+ 0xe6add477, 0xb2aa8131, 0x4ea37efb, 0x1aa42bbd, 0xb35cf79e, 0xe75ba2d8,
+ 0x1b525d12, 0x4f550854, 0x1f676af0, 0x4b603fb6, 0xb769c07c, 0xe36e953a,
+ 0x4a964919, 0x1e911c5f, 0xe298e395, 0xb69fb6d3, 0xb4852d22, 0xe0827864,
+ 0x1c8b87ae, 0x488cd2e8, 0xe1740ecb, 0xb5735b8d, 0x497aa447, 0x1d7df101,
+ 0xe91e610f, 0xbd193449, 0x4110cb83, 0x15179ec5, 0xbcef42e6, 0xe8e817a0,
+ 0x14e1e86a, 0x40e6bd2c, 0x42fc26dd, 0x16fb739b, 0xeaf28c51, 0xbef5d917,
+ 0x170d0534, 0x430a5072, 0xbf03afb8, 0xeb04fafe, 0xbb36985a, 0xef31cd1c,
+ 0x133832d6, 0x473f6790, 0xeec7bbb3, 0xbac0eef5, 0x46c9113f, 0x12ce4479,
+ 0x10d4df88, 0x44d38ace, 0xb8da7504, 0xecdd2042, 0x4525fc61, 0x1122a927,
+ 0xed2b56ed, 0xb92c03ab, 0x9a9f274a, 0xce98720c, 0x32918dc6, 0x6696d880,
+ 0xcf6e04a3, 0x9b6951e5, 0x6760ae2f, 0x3367fb69, 0x317d6098, 0x657a35de,
+ 0x9973ca14, 0xcd749f52, 0x648c4371, 0x308b1637, 0xcc82e9fd, 0x9885bcbb,
+ 0xc8b7de1f, 0x9cb08b59, 0x60b97493, 0x34be21d5, 0x9d46fdf6, 0xc941a8b0,
+ 0x3548577a, 0x614f023c, 0x635599cd, 0x3752cc8b, 0xcb5b3341, 0x9f5c6607,
+ 0x36a4ba24, 0x62a3ef62, 0x9eaa10a8, 0xcaad45ee, 0x3eced5e0, 0x6ac980a6,
+ 0x96c07f6c, 0xc2c72a2a, 0x6b3ff609, 0x3f38a34f, 0xc3315c85, 0x973609c3,
+ 0x952c9232, 0xc12bc774, 0x3d2238be, 0x69256df8, 0xc0ddb1db, 0x94dae49d,
+ 0x68d31b57, 0x3cd44e11, 0x6ce62cb5, 0x38e179f3, 0xc4e88639, 0x90efd37f,
+ 0x39170f5c, 0x6d105a1a, 0x9119a5d0, 0xc51ef096, 0xc7046b67, 0x93033e21,
+ 0x6f0ac1eb, 0x3b0d94ad, 0x92f5488e, 0xc6f21dc8, 0x3afbe202, 0x6efcb744,
+ 0xd7d0b4ef, 0x83d7e1a9, 0x7fde1e63, 0x2bd94b25, 0x82219706, 0xd626c240,
+ 0x2a2f3d8a, 0x7e2868cc, 0x7c32f33d, 0x2835a67b, 0xd43c59b1, 0x803b0cf7,
+ 0x29c3d0d4, 0x7dc48592, 0x81cd7a58, 0xd5ca2f1e, 0x85f84dba, 0xd1ff18fc,
+ 0x2df6e736, 0x79f1b270, 0xd0096e53, 0x840e3b15, 0x7807c4df, 0x2c009199,
+ 0x2e1a0a68, 0x7a1d5f2e, 0x8614a0e4, 0xd213f5a2, 0x7beb2981, 0x2fec7cc7,
+ 0xd3e5830d, 0x87e2d64b, 0x73814645, 0x27861303, 0xdb8fecc9, 0x8f88b98f,
+ 0x267065ac, 0x727730ea, 0x8e7ecf20, 0xda799a66, 0xd8630197, 0x8c6454d1,
+ 0x706dab1b, 0x246afe5d, 0x8d92227e, 0xd9957738, 0x259c88f2, 0x719bddb4,
+ 0x21a9bf10, 0x75aeea56, 0x89a7159c, 0xdda040da, 0x74589cf9, 0x205fc9bf,
+ 0xdc563675, 0x88516333, 0x8a4bf8c2, 0xde4cad84, 0x2245524e, 0x76420708,
+ 0xdfbadb2b, 0x8bbd8e6d, 0x77b471a7, 0x23b324e1};
+
+const uint32_t kStrideExtensionTable2[256] = {
+ 0x00000000, 0x678efd01, 0xcf1dfa02, 0xa8930703, 0x9bd782f5, 0xfc597ff4,
+ 0x54ca78f7, 0x334485f6, 0x3243731b, 0x55cd8e1a, 0xfd5e8919, 0x9ad07418,
+ 0xa994f1ee, 0xce1a0cef, 0x66890bec, 0x0107f6ed, 0x6486e636, 0x03081b37,
+ 0xab9b1c34, 0xcc15e135, 0xff5164c3, 0x98df99c2, 0x304c9ec1, 0x57c263c0,
+ 0x56c5952d, 0x314b682c, 0x99d86f2f, 0xfe56922e, 0xcd1217d8, 0xaa9cead9,
+ 0x020fedda, 0x658110db, 0xc90dcc6c, 0xae83316d, 0x0610366e, 0x619ecb6f,
+ 0x52da4e99, 0x3554b398, 0x9dc7b49b, 0xfa49499a, 0xfb4ebf77, 0x9cc04276,
+ 0x34534575, 0x53ddb874, 0x60993d82, 0x0717c083, 0xaf84c780, 0xc80a3a81,
+ 0xad8b2a5a, 0xca05d75b, 0x6296d058, 0x05182d59, 0x365ca8af, 0x51d255ae,
+ 0xf94152ad, 0x9ecfafac, 0x9fc85941, 0xf846a440, 0x50d5a343, 0x375b5e42,
+ 0x041fdbb4, 0x639126b5, 0xcb0221b6, 0xac8cdcb7, 0x97f7ee29, 0xf0791328,
+ 0x58ea142b, 0x3f64e92a, 0x0c206cdc, 0x6bae91dd, 0xc33d96de, 0xa4b36bdf,
+ 0xa5b49d32, 0xc23a6033, 0x6aa96730, 0x0d279a31, 0x3e631fc7, 0x59ede2c6,
+ 0xf17ee5c5, 0x96f018c4, 0xf371081f, 0x94fff51e, 0x3c6cf21d, 0x5be20f1c,
+ 0x68a68aea, 0x0f2877eb, 0xa7bb70e8, 0xc0358de9, 0xc1327b04, 0xa6bc8605,
+ 0x0e2f8106, 0x69a17c07, 0x5ae5f9f1, 0x3d6b04f0, 0x95f803f3, 0xf276fef2,
+ 0x5efa2245, 0x3974df44, 0x91e7d847, 0xf6692546, 0xc52da0b0, 0xa2a35db1,
+ 0x0a305ab2, 0x6dbea7b3, 0x6cb9515e, 0x0b37ac5f, 0xa3a4ab5c, 0xc42a565d,
+ 0xf76ed3ab, 0x90e02eaa, 0x387329a9, 0x5ffdd4a8, 0x3a7cc473, 0x5df23972,
+ 0xf5613e71, 0x92efc370, 0xa1ab4686, 0xc625bb87, 0x6eb6bc84, 0x09384185,
+ 0x083fb768, 0x6fb14a69, 0xc7224d6a, 0xa0acb06b, 0x93e8359d, 0xf466c89c,
+ 0x5cf5cf9f, 0x3b7b329e, 0x2a03aaa3, 0x4d8d57a2, 0xe51e50a1, 0x8290ada0,
+ 0xb1d42856, 0xd65ad557, 0x7ec9d254, 0x19472f55, 0x1840d9b8, 0x7fce24b9,
+ 0xd75d23ba, 0xb0d3debb, 0x83975b4d, 0xe419a64c, 0x4c8aa14f, 0x2b045c4e,
+ 0x4e854c95, 0x290bb194, 0x8198b697, 0xe6164b96, 0xd552ce60, 0xb2dc3361,
+ 0x1a4f3462, 0x7dc1c963, 0x7cc63f8e, 0x1b48c28f, 0xb3dbc58c, 0xd455388d,
+ 0xe711bd7b, 0x809f407a, 0x280c4779, 0x4f82ba78, 0xe30e66cf, 0x84809bce,
+ 0x2c139ccd, 0x4b9d61cc, 0x78d9e43a, 0x1f57193b, 0xb7c41e38, 0xd04ae339,
+ 0xd14d15d4, 0xb6c3e8d5, 0x1e50efd6, 0x79de12d7, 0x4a9a9721, 0x2d146a20,
+ 0x85876d23, 0xe2099022, 0x878880f9, 0xe0067df8, 0x48957afb, 0x2f1b87fa,
+ 0x1c5f020c, 0x7bd1ff0d, 0xd342f80e, 0xb4cc050f, 0xb5cbf3e2, 0xd2450ee3,
+ 0x7ad609e0, 0x1d58f4e1, 0x2e1c7117, 0x49928c16, 0xe1018b15, 0x868f7614,
+ 0xbdf4448a, 0xda7ab98b, 0x72e9be88, 0x15674389, 0x2623c67f, 0x41ad3b7e,
+ 0xe93e3c7d, 0x8eb0c17c, 0x8fb73791, 0xe839ca90, 0x40aacd93, 0x27243092,
+ 0x1460b564, 0x73ee4865, 0xdb7d4f66, 0xbcf3b267, 0xd972a2bc, 0xbefc5fbd,
+ 0x166f58be, 0x71e1a5bf, 0x42a52049, 0x252bdd48, 0x8db8da4b, 0xea36274a,
+ 0xeb31d1a7, 0x8cbf2ca6, 0x242c2ba5, 0x43a2d6a4, 0x70e65352, 0x1768ae53,
+ 0xbffba950, 0xd8755451, 0x74f988e6, 0x137775e7, 0xbbe472e4, 0xdc6a8fe5,
+ 0xef2e0a13, 0x88a0f712, 0x2033f011, 0x47bd0d10, 0x46bafbfd, 0x213406fc,
+ 0x89a701ff, 0xee29fcfe, 0xdd6d7908, 0xbae38409, 0x1270830a, 0x75fe7e0b,
+ 0x107f6ed0, 0x77f193d1, 0xdf6294d2, 0xb8ec69d3, 0x8ba8ec25, 0xec261124,
+ 0x44b51627, 0x233beb26, 0x223c1dcb, 0x45b2e0ca, 0xed21e7c9, 0x8aaf1ac8,
+ 0xb9eb9f3e, 0xde65623f, 0x76f6653c, 0x1178983d};
+
+const uint32_t kStrideExtensionTable3[256] = {
+ 0x00000000, 0xf20c0dfe, 0xe1f46d0d, 0x13f860f3, 0xc604aceb, 0x3408a115,
+ 0x27f0c1e6, 0xd5fccc18, 0x89e52f27, 0x7be922d9, 0x6811422a, 0x9a1d4fd4,
+ 0x4fe183cc, 0xbded8e32, 0xae15eec1, 0x5c19e33f, 0x162628bf, 0xe42a2541,
+ 0xf7d245b2, 0x05de484c, 0xd0228454, 0x222e89aa, 0x31d6e959, 0xc3dae4a7,
+ 0x9fc30798, 0x6dcf0a66, 0x7e376a95, 0x8c3b676b, 0x59c7ab73, 0xabcba68d,
+ 0xb833c67e, 0x4a3fcb80, 0x2c4c517e, 0xde405c80, 0xcdb83c73, 0x3fb4318d,
+ 0xea48fd95, 0x1844f06b, 0x0bbc9098, 0xf9b09d66, 0xa5a97e59, 0x57a573a7,
+ 0x445d1354, 0xb6511eaa, 0x63add2b2, 0x91a1df4c, 0x8259bfbf, 0x7055b241,
+ 0x3a6a79c1, 0xc866743f, 0xdb9e14cc, 0x29921932, 0xfc6ed52a, 0x0e62d8d4,
+ 0x1d9ab827, 0xef96b5d9, 0xb38f56e6, 0x41835b18, 0x527b3beb, 0xa0773615,
+ 0x758bfa0d, 0x8787f7f3, 0x947f9700, 0x66739afe, 0x5898a2fc, 0xaa94af02,
+ 0xb96ccff1, 0x4b60c20f, 0x9e9c0e17, 0x6c9003e9, 0x7f68631a, 0x8d646ee4,
+ 0xd17d8ddb, 0x23718025, 0x3089e0d6, 0xc285ed28, 0x17792130, 0xe5752cce,
+ 0xf68d4c3d, 0x048141c3, 0x4ebe8a43, 0xbcb287bd, 0xaf4ae74e, 0x5d46eab0,
+ 0x88ba26a8, 0x7ab62b56, 0x694e4ba5, 0x9b42465b, 0xc75ba564, 0x3557a89a,
+ 0x26afc869, 0xd4a3c597, 0x015f098f, 0xf3530471, 0xe0ab6482, 0x12a7697c,
+ 0x74d4f382, 0x86d8fe7c, 0x95209e8f, 0x672c9371, 0xb2d05f69, 0x40dc5297,
+ 0x53243264, 0xa1283f9a, 0xfd31dca5, 0x0f3dd15b, 0x1cc5b1a8, 0xeec9bc56,
+ 0x3b35704e, 0xc9397db0, 0xdac11d43, 0x28cd10bd, 0x62f2db3d, 0x90fed6c3,
+ 0x8306b630, 0x710abbce, 0xa4f677d6, 0x56fa7a28, 0x45021adb, 0xb70e1725,
+ 0xeb17f41a, 0x191bf9e4, 0x0ae39917, 0xf8ef94e9, 0x2d1358f1, 0xdf1f550f,
+ 0xcce735fc, 0x3eeb3802, 0xb13145f8, 0x433d4806, 0x50c528f5, 0xa2c9250b,
+ 0x7735e913, 0x8539e4ed, 0x96c1841e, 0x64cd89e0, 0x38d46adf, 0xcad86721,
+ 0xd92007d2, 0x2b2c0a2c, 0xfed0c634, 0x0cdccbca, 0x1f24ab39, 0xed28a6c7,
+ 0xa7176d47, 0x551b60b9, 0x46e3004a, 0xb4ef0db4, 0x6113c1ac, 0x931fcc52,
+ 0x80e7aca1, 0x72eba15f, 0x2ef24260, 0xdcfe4f9e, 0xcf062f6d, 0x3d0a2293,
+ 0xe8f6ee8b, 0x1afae375, 0x09028386, 0xfb0e8e78, 0x9d7d1486, 0x6f711978,
+ 0x7c89798b, 0x8e857475, 0x5b79b86d, 0xa975b593, 0xba8dd560, 0x4881d89e,
+ 0x14983ba1, 0xe694365f, 0xf56c56ac, 0x07605b52, 0xd29c974a, 0x20909ab4,
+ 0x3368fa47, 0xc164f7b9, 0x8b5b3c39, 0x795731c7, 0x6aaf5134, 0x98a35cca,
+ 0x4d5f90d2, 0xbf539d2c, 0xacabfddf, 0x5ea7f021, 0x02be131e, 0xf0b21ee0,
+ 0xe34a7e13, 0x114673ed, 0xc4babff5, 0x36b6b20b, 0x254ed2f8, 0xd742df06,
+ 0xe9a9e704, 0x1ba5eafa, 0x085d8a09, 0xfa5187f7, 0x2fad4bef, 0xdda14611,
+ 0xce5926e2, 0x3c552b1c, 0x604cc823, 0x9240c5dd, 0x81b8a52e, 0x73b4a8d0,
+ 0xa64864c8, 0x54446936, 0x47bc09c5, 0xb5b0043b, 0xff8fcfbb, 0x0d83c245,
+ 0x1e7ba2b6, 0xec77af48, 0x398b6350, 0xcb876eae, 0xd87f0e5d, 0x2a7303a3,
+ 0x766ae09c, 0x8466ed62, 0x979e8d91, 0x6592806f, 0xb06e4c77, 0x42624189,
+ 0x519a217a, 0xa3962c84, 0xc5e5b67a, 0x37e9bb84, 0x2411db77, 0xd61dd689,
+ 0x03e11a91, 0xf1ed176f, 0xe215779c, 0x10197a62, 0x4c00995d, 0xbe0c94a3,
+ 0xadf4f450, 0x5ff8f9ae, 0x8a0435b6, 0x78083848, 0x6bf058bb, 0x99fc5545,
+ 0xd3c39ec5, 0x21cf933b, 0x3237f3c8, 0xc03bfe36, 0x15c7322e, 0xe7cb3fd0,
+ 0xf4335f23, 0x063f52dd, 0x5a26b1e2, 0xa82abc1c, 0xbbd2dcef, 0x49ded111,
+ 0x9c221d09, 0x6e2e10f7, 0x7dd67004, 0x8fda7dfa};
+
+constexpr const ptrdiff_t kPrefetchHorizon = 256;
+
+} // namespace
+
+namespace crc32c {
+
+uint32_t ExtendPortable(uint32_t crc, const uint8_t* data, size_t size) {
+ const uint8_t* p = data;
+ const uint8_t* e = p + size;
+ uint32_t l = crc ^ kCRC32Xor;
+
+// Process one byte at a time.
+#define STEP1 \
+ do { \
+ int c = (l & 0xff) ^ *p++; \
+ l = kByteExtensionTable[c] ^ (l >> 8); \
+ } while (0)
+
+// Process one of the 4 strides of 4-byte data.
+#define STEP4(s) \
+ do { \
+ crc##s = ReadUint32LE(p + s * 4) ^ kStrideExtensionTable3[crc##s & 0xff] ^ \
+ kStrideExtensionTable2[(crc##s >> 8) & 0xff] ^ \
+ kStrideExtensionTable1[(crc##s >> 16) & 0xff] ^ \
+ kStrideExtensionTable0[crc##s >> 24]; \
+ } while (0)
+
+// Process a 16-byte swath of 4 strides, each of which has 4 bytes of data.
+#define STEP16 \
+ do { \
+ STEP4(0); \
+ STEP4(1); \
+ STEP4(2); \
+ STEP4(3); \
+ p += 16; \
+ } while (0)
+
+// Process 4 bytes that were already loaded into a word.
+#define STEP4W(w) \
+ do { \
+ w ^= l; \
+ for (size_t i = 0; i < 4; ++i) { \
+ w = (w >> 8) ^ kByteExtensionTable[w & 0xff]; \
+ } \
+ l = w; \
+ } while (0)
+
+ // Point x at first 4-byte aligned byte in the buffer. This might be past the
+ // end of the buffer.
+ const uint8_t* x = RoundUp<4>(p);
+ if (x <= e) {
+ // Process bytes p is 4-byte aligned.
+ while (p != x) {
+ STEP1;
+ }
+ }
+
+ if ((e - p) >= 16) {
+ // Load a 16-byte swath into the stride partial results.
+ uint32_t crc0 = ReadUint32LE(p + 0 * 4) ^ l;
+ uint32_t crc1 = ReadUint32LE(p + 1 * 4);
+ uint32_t crc2 = ReadUint32LE(p + 2 * 4);
+ uint32_t crc3 = ReadUint32LE(p + 3 * 4);
+ p += 16;
+
+ while ((e - p) > kPrefetchHorizon) {
+ RequestPrefetch(p + kPrefetchHorizon);
+
+ // Process 64 bytes at a time.
+ STEP16;
+ STEP16;
+ STEP16;
+ STEP16;
+ }
+
+ // Process one 16-byte swath at a time.
+ while ((e - p) >= 16) {
+ STEP16;
+ }
+
+ // Advance one word at a time as far as possible.
+ while ((e - p) >= 4) {
+ STEP4(0);
+ uint32_t tmp = crc0;
+ crc0 = crc1;
+ crc1 = crc2;
+ crc2 = crc3;
+ crc3 = tmp;
+ p += 4;
+ }
+
+ // Combine the 4 partial stride results.
+ l = 0;
+ STEP4W(crc0);
+ STEP4W(crc1);
+ STEP4W(crc2);
+ STEP4W(crc3);
+ }
+
+ // Process the last few bytes.
+ while (p != e) {
+ STEP1;
+ }
+#undef STEP4W
+#undef STEP16
+#undef STEP4
+#undef STEP1
+ return l ^ kCRC32Xor;
+}
+
+} // namespace crc32c
diff --git a/src/crc32c/src/crc32c_portable_unittest.cc b/src/crc32c/src/crc32c_portable_unittest.cc
new file mode 100644
index 0000000000..5098e2c373
--- /dev/null
+++ b/src/crc32c/src/crc32c_portable_unittest.cc
@@ -0,0 +1,20 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "gtest/gtest.h"
+
+#include "./crc32c_extend_unittests.h"
+#include "./crc32c_internal.h"
+
+namespace crc32c {
+
+struct PortableTestTraits {
+ static uint32_t Extend(uint32_t crc, const uint8_t* data, size_t count) {
+ return ExtendPortable(crc, data, count);
+ }
+};
+
+INSTANTIATE_TYPED_TEST_SUITE_P(Portable, ExtendTest, PortableTestTraits);
+
+} // namespace crc32c
diff --git a/src/crc32c/src/crc32c_prefetch.h b/src/crc32c/src/crc32c_prefetch.h
new file mode 100644
index 0000000000..aec7d54e84
--- /dev/null
+++ b/src/crc32c/src/crc32c_prefetch.h
@@ -0,0 +1,46 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef CRC32C_CRC32C_PREFETCH_H_
+#define CRC32C_CRC32C_PREFETCH_H_
+
+#include <cstddef>
+#include <cstdint>
+
+#ifdef CRC32C_HAVE_CONFIG_H
+#include "crc32c/crc32c_config.h"
+#endif
+
+#if HAVE_MM_PREFETCH
+
+#if defined(_MSC_VER)
+#include <intrin.h>
+#else // !defined(_MSC_VER)
+#include <xmmintrin.h>
+#endif // defined(_MSC_VER)
+
+#endif // HAVE_MM_PREFETCH
+
+namespace crc32c {
+
+// Ask the hardware to prefetch the data at the given address into the L1 cache.
+inline void RequestPrefetch(const uint8_t* address) {
+#if HAVE_BUILTIN_PREFETCH
+ // Clang and GCC implement the __builtin_prefetch non-standard extension,
+ // which maps to the best instruction on the target architecture.
+ __builtin_prefetch(reinterpret_cast<const char*>(address), 0 /* Read only. */,
+ 0 /* No temporal locality. */);
+#elif HAVE_MM_PREFETCH
+ // Visual Studio doesn't implement __builtin_prefetch, but exposes the
+ // PREFETCHNTA instruction via the _mm_prefetch intrinsic.
+ _mm_prefetch(reinterpret_cast<const char*>(address), _MM_HINT_NTA);
+#else
+ // No prefetch support. Silence compiler warnings.
+ (void)address;
+#endif // HAVE_BUILTIN_PREFETCH
+}
+
+} // namespace crc32c
+
+#endif // CRC32C_CRC32C_ROUND_UP_H_
diff --git a/src/crc32c/src/crc32c_prefetch_unittest.cc b/src/crc32c/src/crc32c_prefetch_unittest.cc
new file mode 100644
index 0000000000..b34ed2d5fe
--- /dev/null
+++ b/src/crc32c/src/crc32c_prefetch_unittest.cc
@@ -0,0 +1,9 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "./crc32c_prefetch.h"
+
+// There is no easy way to test cache prefetching. We can only test that the
+// crc32c_prefetch.h header compiles on its own, so it doesn't have any unstated
+// dependencies.
diff --git a/src/crc32c/src/crc32c_read_le.h b/src/crc32c/src/crc32c_read_le.h
new file mode 100644
index 0000000000..3bd45fe3aa
--- /dev/null
+++ b/src/crc32c/src/crc32c_read_le.h
@@ -0,0 +1,53 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef CRC32C_CRC32C_READ_LE_H_
+#define CRC32C_CRC32C_READ_LE_H_
+
+#include <cstdint>
+#include <cstring>
+
+#ifdef CRC32C_HAVE_CONFIG_H
+#include "crc32c/crc32c_config.h"
+#endif
+
+namespace crc32c {
+
+// Reads a little-endian 32-bit integer from a 32-bit-aligned buffer.
+inline uint32_t ReadUint32LE(const uint8_t* buffer) {
+#if BYTE_ORDER_BIG_ENDIAN
+ return ((static_cast<uint32_t>(static_cast<uint8_t>(buffer[0]))) |
+ (static_cast<uint32_t>(static_cast<uint8_t>(buffer[1])) << 8) |
+ (static_cast<uint32_t>(static_cast<uint8_t>(buffer[2])) << 16) |
+ (static_cast<uint32_t>(static_cast<uint8_t>(buffer[3])) << 24));
+#else // !BYTE_ORDER_BIG_ENDIAN
+ uint32_t result;
+ // This should be optimized to a single instruction.
+ std::memcpy(&result, buffer, sizeof(result));
+ return result;
+#endif // BYTE_ORDER_BIG_ENDIAN
+}
+
+// Reads a little-endian 64-bit integer from a 64-bit-aligned buffer.
+inline uint64_t ReadUint64LE(const uint8_t* buffer) {
+#if BYTE_ORDER_BIG_ENDIAN
+ return ((static_cast<uint32_t>(static_cast<uint8_t>(buffer[0]))) |
+ (static_cast<uint32_t>(static_cast<uint8_t>(buffer[1])) << 8) |
+ (static_cast<uint32_t>(static_cast<uint8_t>(buffer[2])) << 16) |
+ (static_cast<uint32_t>(static_cast<uint8_t>(buffer[3])) << 24) |
+ (static_cast<uint32_t>(static_cast<uint8_t>(buffer[4])) << 32) |
+ (static_cast<uint32_t>(static_cast<uint8_t>(buffer[5])) << 40) |
+ (static_cast<uint32_t>(static_cast<uint8_t>(buffer[6])) << 48) |
+ (static_cast<uint32_t>(static_cast<uint8_t>(buffer[7])) << 56));
+#else // !BYTE_ORDER_BIG_ENDIAN
+ uint64_t result;
+ // This should be optimized to a single instruction.
+ std::memcpy(&result, buffer, sizeof(result));
+ return result;
+#endif // BYTE_ORDER_BIG_ENDIAN
+}
+
+} // namespace crc32c
+
+#endif // CRC32C_CRC32C_READ_LE_H_
diff --git a/src/crc32c/src/crc32c_read_le_unittest.cc b/src/crc32c/src/crc32c_read_le_unittest.cc
new file mode 100644
index 0000000000..2a30302adf
--- /dev/null
+++ b/src/crc32c/src/crc32c_read_le_unittest.cc
@@ -0,0 +1,32 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "./crc32c_read_le.h"
+
+#include <cstddef>
+#include <cstdint>
+
+#include "gtest/gtest.h"
+
+#include "./crc32c_round_up.h"
+
+namespace crc32c {
+
+TEST(Crc32CReadLETest, ReadUint32LE) {
+ // little-endian 0x12345678
+ alignas(4) uint8_t bytes[] = {0x78, 0x56, 0x34, 0x12};
+
+ ASSERT_EQ(RoundUp<4>(bytes), bytes) << "Stack array is not aligned";
+ EXPECT_EQ(static_cast<uint32_t>(0x12345678), ReadUint32LE(bytes));
+}
+
+TEST(Crc32CReadLETest, ReadUint64LE) {
+ // little-endian 0x123456789ABCDEF0
+ alignas(8) uint8_t bytes[] = {0xF0, 0xDE, 0xBC, 0x9A, 0x78, 0x56, 0x34, 0x12};
+
+ ASSERT_EQ(RoundUp<8>(bytes), bytes) << "Stack array is not aligned";
+ EXPECT_EQ(static_cast<uint64_t>(0x123456789ABCDEF0), ReadUint64LE(bytes));
+}
+
+} // namespace crc32c
diff --git a/src/crc32c/src/crc32c_round_up.h b/src/crc32c/src/crc32c_round_up.h
new file mode 100644
index 0000000000..d3b922beb9
--- /dev/null
+++ b/src/crc32c/src/crc32c_round_up.h
@@ -0,0 +1,34 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef CRC32C_CRC32C_ROUND_UP_H_
+#define CRC32C_CRC32C_ROUND_UP_H_
+
+#include <cstddef>
+#include <cstdint>
+
+namespace crc32c {
+
+// Returns the smallest number >= the given number that is evenly divided by N.
+//
+// N must be a power of two.
+template <int N>
+constexpr inline uintptr_t RoundUp(uintptr_t pointer) {
+ static_assert((N & (N - 1)) == 0, "N must be a power of two");
+ return (pointer + (N - 1)) & ~(N - 1);
+}
+
+// Returns the smallest address >= the given address that is aligned to N bytes.
+//
+// N must be a power of two.
+template <int N>
+constexpr inline const uint8_t* RoundUp(const uint8_t* pointer) {
+ static_assert((N & (N - 1)) == 0, "N must be a power of two");
+ return reinterpret_cast<uint8_t*>(
+ RoundUp<N>(reinterpret_cast<uintptr_t>(pointer)));
+}
+
+} // namespace crc32c
+
+#endif // CRC32C_CRC32C_ROUND_UP_H_
diff --git a/src/crc32c/src/crc32c_round_up_unittest.cc b/src/crc32c/src/crc32c_round_up_unittest.cc
new file mode 100644
index 0000000000..5ff657bb5c
--- /dev/null
+++ b/src/crc32c/src/crc32c_round_up_unittest.cc
@@ -0,0 +1,84 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "./crc32c_round_up.h"
+
+#include <cstddef>
+#include <cstdint>
+
+#include "gtest/gtest.h"
+
+namespace crc32c {
+
+TEST(CRC32CRoundUpTest, RoundUpUintptr) {
+ uintptr_t zero = 0;
+
+ ASSERT_EQ(zero, RoundUp<1>(zero));
+ ASSERT_EQ(1U, RoundUp<1>(1U));
+ ASSERT_EQ(2U, RoundUp<1>(2U));
+ ASSERT_EQ(3U, RoundUp<1>(3U));
+ ASSERT_EQ(~static_cast<uintptr_t>(0), RoundUp<1>(~static_cast<uintptr_t>(0)));
+ ASSERT_EQ(~static_cast<uintptr_t>(1), RoundUp<1>(~static_cast<uintptr_t>(1)));
+ ASSERT_EQ(~static_cast<uintptr_t>(2), RoundUp<1>(~static_cast<uintptr_t>(2)));
+ ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<1>(~static_cast<uintptr_t>(3)));
+
+ ASSERT_EQ(zero, RoundUp<2>(zero));
+ ASSERT_EQ(2U, RoundUp<2>(1U));
+ ASSERT_EQ(2U, RoundUp<2>(2U));
+ ASSERT_EQ(4U, RoundUp<2>(3U));
+ ASSERT_EQ(4U, RoundUp<2>(4U));
+ ASSERT_EQ(6U, RoundUp<2>(5U));
+ ASSERT_EQ(6U, RoundUp<2>(6U));
+ ASSERT_EQ(8U, RoundUp<2>(7U));
+ ASSERT_EQ(8U, RoundUp<2>(8U));
+ ASSERT_EQ(~static_cast<uintptr_t>(1), RoundUp<2>(~static_cast<uintptr_t>(1)));
+ ASSERT_EQ(~static_cast<uintptr_t>(1), RoundUp<2>(~static_cast<uintptr_t>(2)));
+ ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<2>(~static_cast<uintptr_t>(3)));
+ ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<2>(~static_cast<uintptr_t>(4)));
+
+ ASSERT_EQ(zero, RoundUp<4>(zero));
+ ASSERT_EQ(4U, RoundUp<4>(1U));
+ ASSERT_EQ(4U, RoundUp<4>(2U));
+ ASSERT_EQ(4U, RoundUp<4>(3U));
+ ASSERT_EQ(4U, RoundUp<4>(4U));
+ ASSERT_EQ(8U, RoundUp<4>(5U));
+ ASSERT_EQ(8U, RoundUp<4>(6U));
+ ASSERT_EQ(8U, RoundUp<4>(7U));
+ ASSERT_EQ(8U, RoundUp<4>(8U));
+ ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<4>(~static_cast<uintptr_t>(3)));
+ ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<4>(~static_cast<uintptr_t>(4)));
+ ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<4>(~static_cast<uintptr_t>(5)));
+ ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<4>(~static_cast<uintptr_t>(6)));
+ ASSERT_EQ(~static_cast<uintptr_t>(7), RoundUp<4>(~static_cast<uintptr_t>(7)));
+ ASSERT_EQ(~static_cast<uintptr_t>(7), RoundUp<4>(~static_cast<uintptr_t>(8)));
+ ASSERT_EQ(~static_cast<uintptr_t>(7), RoundUp<4>(~static_cast<uintptr_t>(9)));
+}
+
+TEST(CRC32CRoundUpTest, RoundUpPointer) {
+ uintptr_t zero = 0, three = 3, four = 4, seven = 7, eight = 8;
+
+ const uint8_t* zero_ptr = reinterpret_cast<const uint8_t*>(zero);
+ const uint8_t* three_ptr = reinterpret_cast<const uint8_t*>(three);
+ const uint8_t* four_ptr = reinterpret_cast<const uint8_t*>(four);
+ const uint8_t* seven_ptr = reinterpret_cast<const uint8_t*>(seven);
+ const uint8_t* eight_ptr = reinterpret_cast<uint8_t*>(eight);
+
+ ASSERT_EQ(zero_ptr, RoundUp<1>(zero_ptr));
+ ASSERT_EQ(zero_ptr, RoundUp<4>(zero_ptr));
+ ASSERT_EQ(zero_ptr, RoundUp<8>(zero_ptr));
+
+ ASSERT_EQ(three_ptr, RoundUp<1>(three_ptr));
+ ASSERT_EQ(four_ptr, RoundUp<4>(three_ptr));
+ ASSERT_EQ(eight_ptr, RoundUp<8>(three_ptr));
+
+ ASSERT_EQ(four_ptr, RoundUp<1>(four_ptr));
+ ASSERT_EQ(four_ptr, RoundUp<4>(four_ptr));
+ ASSERT_EQ(eight_ptr, RoundUp<8>(four_ptr));
+
+ ASSERT_EQ(seven_ptr, RoundUp<1>(seven_ptr));
+ ASSERT_EQ(eight_ptr, RoundUp<4>(seven_ptr));
+ ASSERT_EQ(eight_ptr, RoundUp<8>(four_ptr));
+}
+
+} // namespace crc32c
diff --git a/src/crc32c/src/crc32c_sse42.cc b/src/crc32c/src/crc32c_sse42.cc
new file mode 100644
index 0000000000..139520428e
--- /dev/null
+++ b/src/crc32c/src/crc32c_sse42.cc
@@ -0,0 +1,258 @@
+// Copyright 2008 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "./crc32c_sse42.h"
+
+// In a separate source file to allow this accelerated CRC32C function to be
+// compiled with the appropriate compiler flags to enable SSE4.2 instructions.
+
+// This implementation is loosely based on Intel Pub 323405 from April 2011,
+// "Fast CRC Computation for iSCSI Polynomial Using CRC32 Instruction".
+
+#include <cstddef>
+#include <cstdint>
+
+#include "./crc32c_internal.h"
+#include "./crc32c_prefetch.h"
+#include "./crc32c_read_le.h"
+#include "./crc32c_round_up.h"
+#ifdef CRC32C_HAVE_CONFIG_H
+#include "crc32c/crc32c_config.h"
+#endif
+
+#if HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
+
+#if defined(_MSC_VER)
+#include <intrin.h>
+#else // !defined(_MSC_VER)
+#include <nmmintrin.h>
+#endif // defined(_MSC_VER)
+
+namespace crc32c {
+
+namespace {
+
+constexpr const ptrdiff_t kGroups = 3;
+constexpr const ptrdiff_t kBlock0Size = 16 * 1024 / kGroups / 64 * 64;
+constexpr const ptrdiff_t kBlock1Size = 4 * 1024 / kGroups / 8 * 8;
+constexpr const ptrdiff_t kBlock2Size = 1024 / kGroups / 8 * 8;
+
+const uint32_t kBlock0SkipTable[8][16] = {
+ {0x00000000, 0xff770459, 0xfb027e43, 0x04757a1a, 0xf3e88a77, 0x0c9f8e2e,
+ 0x08eaf434, 0xf79df06d, 0xe23d621f, 0x1d4a6646, 0x193f1c5c, 0xe6481805,
+ 0x11d5e868, 0xeea2ec31, 0xead7962b, 0x15a09272},
+ {0x00000000, 0xc196b2cf, 0x86c1136f, 0x4757a1a0, 0x086e502f, 0xc9f8e2e0,
+ 0x8eaf4340, 0x4f39f18f, 0x10dca05e, 0xd14a1291, 0x961db331, 0x578b01fe,
+ 0x18b2f071, 0xd92442be, 0x9e73e31e, 0x5fe551d1},
+ {0x00000000, 0x21b940bc, 0x43728178, 0x62cbc1c4, 0x86e502f0, 0xa75c424c,
+ 0xc5978388, 0xe42ec334, 0x08267311, 0x299f33ad, 0x4b54f269, 0x6aedb2d5,
+ 0x8ec371e1, 0xaf7a315d, 0xcdb1f099, 0xec08b025},
+ {0x00000000, 0x104ce622, 0x2099cc44, 0x30d52a66, 0x41339888, 0x517f7eaa,
+ 0x61aa54cc, 0x71e6b2ee, 0x82673110, 0x922bd732, 0xa2fefd54, 0xb2b21b76,
+ 0xc354a998, 0xd3184fba, 0xe3cd65dc, 0xf38183fe},
+ {0x00000000, 0x012214d1, 0x024429a2, 0x03663d73, 0x04885344, 0x05aa4795,
+ 0x06cc7ae6, 0x07ee6e37, 0x0910a688, 0x0832b259, 0x0b548f2a, 0x0a769bfb,
+ 0x0d98f5cc, 0x0cbae11d, 0x0fdcdc6e, 0x0efec8bf},
+ {0x00000000, 0x12214d10, 0x24429a20, 0x3663d730, 0x48853440, 0x5aa47950,
+ 0x6cc7ae60, 0x7ee6e370, 0x910a6880, 0x832b2590, 0xb548f2a0, 0xa769bfb0,
+ 0xd98f5cc0, 0xcbae11d0, 0xfdcdc6e0, 0xefec8bf0},
+ {0x00000000, 0x27f8a7f1, 0x4ff14fe2, 0x6809e813, 0x9fe29fc4, 0xb81a3835,
+ 0xd013d026, 0xf7eb77d7, 0x3a294979, 0x1dd1ee88, 0x75d8069b, 0x5220a16a,
+ 0xa5cbd6bd, 0x8233714c, 0xea3a995f, 0xcdc23eae},
+ {0x00000000, 0x745292f2, 0xe8a525e4, 0x9cf7b716, 0xd4a63d39, 0xa0f4afcb,
+ 0x3c0318dd, 0x48518a2f, 0xaca00c83, 0xd8f29e71, 0x44052967, 0x3057bb95,
+ 0x780631ba, 0x0c54a348, 0x90a3145e, 0xe4f186ac},
+};
+const uint32_t kBlock1SkipTable[8][16] = {
+ {0x00000000, 0x79113270, 0xf22264e0, 0x8b335690, 0xe1a8bf31, 0x98b98d41,
+ 0x138adbd1, 0x6a9be9a1, 0xc6bd0893, 0xbfac3ae3, 0x349f6c73, 0x4d8e5e03,
+ 0x2715b7a2, 0x5e0485d2, 0xd537d342, 0xac26e132},
+ {0x00000000, 0x889667d7, 0x14c0b95f, 0x9c56de88, 0x298172be, 0xa1171569,
+ 0x3d41cbe1, 0xb5d7ac36, 0x5302e57c, 0xdb9482ab, 0x47c25c23, 0xcf543bf4,
+ 0x7a8397c2, 0xf215f015, 0x6e432e9d, 0xe6d5494a},
+ {0x00000000, 0xa605caf8, 0x49e7e301, 0xefe229f9, 0x93cfc602, 0x35ca0cfa,
+ 0xda282503, 0x7c2deffb, 0x2273faf5, 0x8476300d, 0x6b9419f4, 0xcd91d30c,
+ 0xb1bc3cf7, 0x17b9f60f, 0xf85bdff6, 0x5e5e150e},
+ {0x00000000, 0x44e7f5ea, 0x89cfebd4, 0xcd281e3e, 0x1673a159, 0x529454b3,
+ 0x9fbc4a8d, 0xdb5bbf67, 0x2ce742b2, 0x6800b758, 0xa528a966, 0xe1cf5c8c,
+ 0x3a94e3eb, 0x7e731601, 0xb35b083f, 0xf7bcfdd5},
+ {0x00000000, 0x59ce8564, 0xb39d0ac8, 0xea538fac, 0x62d66361, 0x3b18e605,
+ 0xd14b69a9, 0x8885eccd, 0xc5acc6c2, 0x9c6243a6, 0x7631cc0a, 0x2fff496e,
+ 0xa77aa5a3, 0xfeb420c7, 0x14e7af6b, 0x4d292a0f},
+ {0x00000000, 0x8eb5fb75, 0x1887801b, 0x96327b6e, 0x310f0036, 0xbfbafb43,
+ 0x2988802d, 0xa73d7b58, 0x621e006c, 0xecabfb19, 0x7a998077, 0xf42c7b02,
+ 0x5311005a, 0xdda4fb2f, 0x4b968041, 0xc5237b34},
+ {0x00000000, 0xc43c00d8, 0x8d947741, 0x49a87799, 0x1ec49873, 0xdaf898ab,
+ 0x9350ef32, 0x576cefea, 0x3d8930e6, 0xf9b5303e, 0xb01d47a7, 0x7421477f,
+ 0x234da895, 0xe771a84d, 0xaed9dfd4, 0x6ae5df0c},
+ {0x00000000, 0x7b1261cc, 0xf624c398, 0x8d36a254, 0xe9a5f1c1, 0x92b7900d,
+ 0x1f813259, 0x64935395, 0xd6a79573, 0xadb5f4bf, 0x208356eb, 0x5b913727,
+ 0x3f0264b2, 0x4410057e, 0xc926a72a, 0xb234c6e6},
+};
+const uint32_t kBlock2SkipTable[8][16] = {
+ {0x00000000, 0x8f158014, 0x1bc776d9, 0x94d2f6cd, 0x378eedb2, 0xb89b6da6,
+ 0x2c499b6b, 0xa35c1b7f, 0x6f1ddb64, 0xe0085b70, 0x74daadbd, 0xfbcf2da9,
+ 0x589336d6, 0xd786b6c2, 0x4354400f, 0xcc41c01b},
+ {0x00000000, 0xde3bb6c8, 0xb99b1b61, 0x67a0ada9, 0x76da4033, 0xa8e1f6fb,
+ 0xcf415b52, 0x117aed9a, 0xedb48066, 0x338f36ae, 0x542f9b07, 0x8a142dcf,
+ 0x9b6ec055, 0x4555769d, 0x22f5db34, 0xfcce6dfc},
+ {0x00000000, 0xde85763d, 0xb8e69a8b, 0x6663ecb6, 0x742143e7, 0xaaa435da,
+ 0xccc7d96c, 0x1242af51, 0xe84287ce, 0x36c7f1f3, 0x50a41d45, 0x8e216b78,
+ 0x9c63c429, 0x42e6b214, 0x24855ea2, 0xfa00289f},
+ {0x00000000, 0xd569796d, 0xaf3e842b, 0x7a57fd46, 0x5b917ea7, 0x8ef807ca,
+ 0xf4affa8c, 0x21c683e1, 0xb722fd4e, 0x624b8423, 0x181c7965, 0xcd750008,
+ 0xecb383e9, 0x39dafa84, 0x438d07c2, 0x96e47eaf},
+ {0x00000000, 0x6ba98c6d, 0xd75318da, 0xbcfa94b7, 0xab4a4745, 0xc0e3cb28,
+ 0x7c195f9f, 0x17b0d3f2, 0x5378f87b, 0x38d17416, 0x842be0a1, 0xef826ccc,
+ 0xf832bf3e, 0x939b3353, 0x2f61a7e4, 0x44c82b89},
+ {0x00000000, 0xa6f1f0f6, 0x480f971d, 0xeefe67eb, 0x901f2e3a, 0x36eedecc,
+ 0xd810b927, 0x7ee149d1, 0x25d22a85, 0x8323da73, 0x6dddbd98, 0xcb2c4d6e,
+ 0xb5cd04bf, 0x133cf449, 0xfdc293a2, 0x5b336354},
+ {0x00000000, 0x4ba4550a, 0x9748aa14, 0xdcecff1e, 0x2b7d22d9, 0x60d977d3,
+ 0xbc3588cd, 0xf791ddc7, 0x56fa45b2, 0x1d5e10b8, 0xc1b2efa6, 0x8a16baac,
+ 0x7d87676b, 0x36233261, 0xeacfcd7f, 0xa16b9875},
+ {0x00000000, 0xadf48b64, 0x5e056039, 0xf3f1eb5d, 0xbc0ac072, 0x11fe4b16,
+ 0xe20fa04b, 0x4ffb2b2f, 0x7df9f615, 0xd00d7d71, 0x23fc962c, 0x8e081d48,
+ 0xc1f33667, 0x6c07bd03, 0x9ff6565e, 0x3202dd3a},
+};
+
+constexpr const ptrdiff_t kPrefetchHorizon = 256;
+
+} // namespace
+
+uint32_t ExtendSse42(uint32_t crc, const uint8_t* data, size_t size) {
+ const uint8_t* p = data;
+ const uint8_t* e = data + size;
+ uint32_t l = crc ^ kCRC32Xor;
+
+#define STEP1 \
+ do { \
+ l = _mm_crc32_u8(l, *p++); \
+ } while (0)
+
+#define STEP4(crc) \
+ do { \
+ crc = _mm_crc32_u32(crc, ReadUint32LE(p)); \
+ p += 4; \
+ } while (0)
+
+#define STEP8(crc, data) \
+ do { \
+ crc = _mm_crc32_u64(crc, ReadUint64LE(data)); \
+ data += 8; \
+ } while (0)
+
+#define STEP8BY3(crc0, crc1, crc2, p0, p1, p2) \
+ do { \
+ STEP8(crc0, p0); \
+ STEP8(crc1, p1); \
+ STEP8(crc2, p2); \
+ } while (0)
+
+#define STEP8X3(crc0, crc1, crc2, bs) \
+ do { \
+ crc0 = _mm_crc32_u64(crc0, ReadUint64LE(p)); \
+ crc1 = _mm_crc32_u64(crc1, ReadUint64LE(p + bs)); \
+ crc2 = _mm_crc32_u64(crc2, ReadUint64LE(p + 2 * bs)); \
+ p += 8; \
+ } while (0)
+
+#define SKIP_BLOCK(crc, tab) \
+ do { \
+ crc = tab[0][crc & 0xf] ^ tab[1][(crc >> 4) & 0xf] ^ \
+ tab[2][(crc >> 8) & 0xf] ^ tab[3][(crc >> 12) & 0xf] ^ \
+ tab[4][(crc >> 16) & 0xf] ^ tab[5][(crc >> 20) & 0xf] ^ \
+ tab[6][(crc >> 24) & 0xf] ^ tab[7][(crc >> 28) & 0xf]; \
+ } while (0)
+
+ // Point x at first 8-byte aligned byte in the buffer. This might be past the
+ // end of the buffer.
+ const uint8_t* x = RoundUp<8>(p);
+ if (x <= e) {
+ // Process bytes p is 8-byte aligned.
+ while (p != x) {
+ STEP1;
+ }
+ }
+
+ // Proccess the data in predetermined block sizes with tables for quickly
+ // combining the checksum. Experimentally it's better to use larger block
+ // sizes where possible so use a hierarchy of decreasing block sizes.
+ uint64_t l64 = l;
+ while ((e - p) >= kGroups * kBlock0Size) {
+ uint64_t l641 = 0;
+ uint64_t l642 = 0;
+ for (int i = 0; i < kBlock0Size; i += 8 * 8) {
+ // Prefetch ahead to hide latency.
+ RequestPrefetch(p + kPrefetchHorizon);
+ RequestPrefetch(p + kBlock0Size + kPrefetchHorizon);
+ RequestPrefetch(p + 2 * kBlock0Size + kPrefetchHorizon);
+
+ // Process 64 bytes at a time.
+ STEP8X3(l64, l641, l642, kBlock0Size);
+ STEP8X3(l64, l641, l642, kBlock0Size);
+ STEP8X3(l64, l641, l642, kBlock0Size);
+ STEP8X3(l64, l641, l642, kBlock0Size);
+ STEP8X3(l64, l641, l642, kBlock0Size);
+ STEP8X3(l64, l641, l642, kBlock0Size);
+ STEP8X3(l64, l641, l642, kBlock0Size);
+ STEP8X3(l64, l641, l642, kBlock0Size);
+ }
+
+ // Combine results.
+ SKIP_BLOCK(l64, kBlock0SkipTable);
+ l64 ^= l641;
+ SKIP_BLOCK(l64, kBlock0SkipTable);
+ l64 ^= l642;
+ p += (kGroups - 1) * kBlock0Size;
+ }
+ while ((e - p) >= kGroups * kBlock1Size) {
+ uint64_t l641 = 0;
+ uint64_t l642 = 0;
+ for (int i = 0; i < kBlock1Size; i += 8) {
+ STEP8X3(l64, l641, l642, kBlock1Size);
+ }
+ SKIP_BLOCK(l64, kBlock1SkipTable);
+ l64 ^= l641;
+ SKIP_BLOCK(l64, kBlock1SkipTable);
+ l64 ^= l642;
+ p += (kGroups - 1) * kBlock1Size;
+ }
+ while ((e - p) >= kGroups * kBlock2Size) {
+ uint64_t l641 = 0;
+ uint64_t l642 = 0;
+ for (int i = 0; i < kBlock2Size; i += 8) {
+ STEP8X3(l64, l641, l642, kBlock2Size);
+ }
+ SKIP_BLOCK(l64, kBlock2SkipTable);
+ l64 ^= l641;
+ SKIP_BLOCK(l64, kBlock2SkipTable);
+ l64 ^= l642;
+ p += (kGroups - 1) * kBlock2Size;
+ }
+
+ // Process bytes 16 at a time
+ while ((e - p) >= 16) {
+ STEP8(l64, p);
+ STEP8(l64, p);
+ }
+
+ l = static_cast<uint32_t>(l64);
+ // Process the last few bytes.
+ while (p != e) {
+ STEP1;
+ }
+#undef SKIP_BLOCK
+#undef STEP8X3
+#undef STEP8BY3
+#undef STEP8
+#undef STEP4
+#undef STEP1
+
+ return l ^ kCRC32Xor;
+}
+
+} // namespace crc32c
+
+#endif // HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
diff --git a/src/crc32c/src/crc32c_sse42.h b/src/crc32c/src/crc32c_sse42.h
new file mode 100644
index 0000000000..95da926632
--- /dev/null
+++ b/src/crc32c/src/crc32c_sse42.h
@@ -0,0 +1,33 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef CRC32C_CRC32C_SSE42_H_
+#define CRC32C_CRC32C_SSE42_H_
+
+// X86-specific code.
+
+#include <cstddef>
+#include <cstdint>
+
+#ifdef CRC32C_HAVE_CONFIG_H
+#include "crc32c/crc32c_config.h"
+#endif
+
+// The hardware-accelerated implementation is only enabled for 64-bit builds,
+// because a straightforward 32-bit implementation actually runs slower than the
+// portable version. Most X86 machines are 64-bit nowadays, so it doesn't make
+// much sense to spend time building an optimized hardware-accelerated
+// implementation.
+#if HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
+
+namespace crc32c {
+
+// SSE4.2-accelerated implementation in crc32c_sse42.cc
+uint32_t ExtendSse42(uint32_t crc, const uint8_t* data, size_t count);
+
+} // namespace crc32c
+
+#endif // HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
+
+#endif // CRC32C_CRC32C_SSE42_H_
diff --git a/src/crc32c/src/crc32c_sse42_check.h b/src/crc32c/src/crc32c_sse42_check.h
new file mode 100644
index 0000000000..e7528912a6
--- /dev/null
+++ b/src/crc32c/src/crc32c_sse42_check.h
@@ -0,0 +1,50 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef CRC32C_CRC32C_SSE42_CHECK_H_
+#define CRC32C_CRC32C_SSE42_CHECK_H_
+
+// X86-specific code checking the availability of SSE4.2 instructions.
+
+#include <cstddef>
+#include <cstdint>
+
+#ifdef CRC32C_HAVE_CONFIG_H
+#include "crc32c/crc32c_config.h"
+#endif
+
+#if HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
+
+// If the compiler supports SSE4.2, it definitely supports X86.
+
+#if defined(_MSC_VER)
+#include <intrin.h>
+
+namespace crc32c {
+
+inline bool CanUseSse42() {
+ int cpu_info[4];
+ __cpuid(cpu_info, 1);
+ return (cpu_info[2] & (1 << 20)) != 0;
+}
+
+} // namespace crc32c
+
+#else // !defined(_MSC_VER)
+#include <cpuid.h>
+
+namespace crc32c {
+
+inline bool CanUseSse42() {
+ unsigned int eax, ebx, ecx, edx;
+ return __get_cpuid(1, &eax, &ebx, &ecx, &edx) && ((ecx & (1 << 20)) != 0);
+}
+
+} // namespace crc32c
+
+#endif // defined(_MSC_VER)
+
+#endif // HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
+
+#endif // CRC32C_CRC32C_SSE42_CHECK_H_
diff --git a/src/crc32c/src/crc32c_sse42_unittest.cc b/src/crc32c/src/crc32c_sse42_unittest.cc
new file mode 100644
index 0000000000..c73ad8ddd1
--- /dev/null
+++ b/src/crc32c/src/crc32c_sse42_unittest.cc
@@ -0,0 +1,24 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "gtest/gtest.h"
+
+#include "./crc32c_extend_unittests.h"
+#include "./crc32c_sse42.h"
+
+namespace crc32c {
+
+#if HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
+
+struct Sse42TestTraits {
+ static uint32_t Extend(uint32_t crc, const uint8_t* data, size_t count) {
+ return ExtendSse42(crc, data, count);
+ }
+};
+
+INSTANTIATE_TYPED_TEST_SUITE_P(Sse42, ExtendTest, Sse42TestTraits);
+
+#endif // HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
+
+} // namespace crc32c
diff --git a/src/crc32c/src/crc32c_test_main.cc b/src/crc32c/src/crc32c_test_main.cc
new file mode 100644
index 0000000000..275ee380c6
--- /dev/null
+++ b/src/crc32c/src/crc32c_test_main.cc
@@ -0,0 +1,22 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifdef CRC32C_HAVE_CONFIG_H
+#include "crc32c/crc32c_config.h"
+#endif
+
+#include "gtest/gtest.h"
+
+#if CRC32C_TESTS_BUILT_WITH_GLOG
+#include "glog/logging.h"
+#endif // CRC32C_TESTS_BUILT_WITH_GLOG
+
+int main(int argc, char** argv) {
+#if CRC32C_TESTS_BUILT_WITH_GLOG
+ google::InitGoogleLogging(argv[0]);
+ google::InstallFailureSignalHandler();
+#endif // CRC32C_TESTS_BUILT_WITH_GLOG
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/src/crc32c/src/crc32c_unittest.cc b/src/crc32c/src/crc32c_unittest.cc
new file mode 100644
index 0000000000..d6c6af680c
--- /dev/null
+++ b/src/crc32c/src/crc32c_unittest.cc
@@ -0,0 +1,129 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "crc32c/crc32c.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+
+#include "gtest/gtest.h"
+
+#include "./crc32c_extend_unittests.h"
+
+TEST(Crc32CTest, Crc32c) {
+ // From rfc3720 section B.4.
+ uint8_t buf[32];
+
+ std::memset(buf, 0, sizeof(buf));
+ EXPECT_EQ(static_cast<uint32_t>(0x8a9136aa),
+ crc32c::Crc32c(buf, sizeof(buf)));
+
+ std::memset(buf, 0xff, sizeof(buf));
+ EXPECT_EQ(static_cast<uint32_t>(0x62a8ab43),
+ crc32c::Crc32c(buf, sizeof(buf)));
+
+ for (size_t i = 0; i < 32; ++i)
+ buf[i] = static_cast<uint8_t>(i);
+ EXPECT_EQ(static_cast<uint32_t>(0x46dd794e),
+ crc32c::Crc32c(buf, sizeof(buf)));
+
+ for (size_t i = 0; i < 32; ++i)
+ buf[i] = static_cast<uint8_t>(31 - i);
+ EXPECT_EQ(static_cast<uint32_t>(0x113fdb5c),
+ crc32c::Crc32c(buf, sizeof(buf)));
+
+ uint8_t data[48] = {
+ 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ };
+ EXPECT_EQ(static_cast<uint32_t>(0xd9963a56),
+ crc32c::Crc32c(data, sizeof(data)));
+}
+
+namespace crc32c {
+
+struct ApiTestTraits {
+ static uint32_t Extend(uint32_t crc, const uint8_t* data, size_t count) {
+ return ::crc32c::Extend(crc, data, count);
+ }
+};
+
+INSTANTIATE_TYPED_TEST_SUITE_P(Api, ExtendTest, ApiTestTraits);
+
+} // namespace crc32c
+
+TEST(CRC32CTest, Crc32cCharPointer) {
+ char buf[32];
+
+ std::memset(buf, 0, sizeof(buf));
+ EXPECT_EQ(static_cast<uint32_t>(0x8a9136aa),
+ crc32c::Crc32c(buf, sizeof(buf)));
+
+ std::memset(buf, 0xff, sizeof(buf));
+ EXPECT_EQ(static_cast<uint32_t>(0x62a8ab43),
+ crc32c::Crc32c(buf, sizeof(buf)));
+
+ for (size_t i = 0; i < 32; ++i)
+ buf[i] = static_cast<char>(i);
+ EXPECT_EQ(static_cast<uint32_t>(0x46dd794e),
+ crc32c::Crc32c(buf, sizeof(buf)));
+
+ for (size_t i = 0; i < 32; ++i)
+ buf[i] = static_cast<char>(31 - i);
+ EXPECT_EQ(static_cast<uint32_t>(0x113fdb5c),
+ crc32c::Crc32c(buf, sizeof(buf)));
+}
+
+TEST(CRC32CTest, Crc32cStdString) {
+ std::string buf;
+ buf.resize(32);
+
+ for (size_t i = 0; i < 32; ++i)
+ buf[i] = static_cast<char>(0x00);
+ EXPECT_EQ(static_cast<uint32_t>(0x8a9136aa), crc32c::Crc32c(buf));
+
+ for (size_t i = 0; i < 32; ++i)
+ buf[i] = '\xff';
+ EXPECT_EQ(static_cast<uint32_t>(0x62a8ab43), crc32c::Crc32c(buf));
+
+ for (size_t i = 0; i < 32; ++i)
+ buf[i] = static_cast<char>(i);
+ EXPECT_EQ(static_cast<uint32_t>(0x46dd794e), crc32c::Crc32c(buf));
+
+ for (size_t i = 0; i < 32; ++i)
+ buf[i] = static_cast<char>(31 - i);
+ EXPECT_EQ(static_cast<uint32_t>(0x113fdb5c), crc32c::Crc32c(buf));
+}
+
+#if __cplusplus > 201402L
+#if __has_include(<string_view>)
+
+TEST(CRC32CTest, Crc32cStdStringView) {
+ uint8_t buf[32];
+ std::string_view view(reinterpret_cast<const char*>(buf), sizeof(buf));
+
+ std::memset(buf, 0, sizeof(buf));
+ EXPECT_EQ(static_cast<uint32_t>(0x8a9136aa), crc32c::Crc32c(view));
+
+ std::memset(buf, 0xff, sizeof(buf));
+ EXPECT_EQ(static_cast<uint32_t>(0x62a8ab43), crc32c::Crc32c(view));
+
+ for (size_t i = 0; i < 32; ++i)
+ buf[i] = static_cast<uint8_t>(i);
+ EXPECT_EQ(static_cast<uint32_t>(0x46dd794e), crc32c::Crc32c(view));
+
+ for (size_t i = 0; i < 32; ++i)
+ buf[i] = static_cast<uint8_t>(31 - i);
+ EXPECT_EQ(static_cast<uint32_t>(0x113fdb5c), crc32c::Crc32c(view));
+}
+
+#endif // __has_include(<string_view>)
+#endif // __cplusplus > 201402L
+
+#define TESTED_EXTEND Extend
+#include "./crc32c_extend_unittests.h"
+#undef TESTED_EXTEND
diff --git a/src/crypto/sha256_avx2.cpp b/src/crypto/sha256_avx2.cpp
index 90a72516a4..624bdb42e4 100644
--- a/src/crypto/sha256_avx2.cpp
+++ b/src/crypto/sha256_avx2.cpp
@@ -1,3 +1,7 @@
+// Copyright (c) 2017-2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
#ifdef ENABLE_AVX2
#include <stdint.h>
diff --git a/src/crypto/sha256_sse41.cpp b/src/crypto/sha256_sse41.cpp
index fc79f46f7f..4eaf7d7b18 100644
--- a/src/crypto/sha256_sse41.cpp
+++ b/src/crypto/sha256_sse41.cpp
@@ -1,3 +1,7 @@
+// Copyright (c) 2018-2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
#ifdef ENABLE_SSE41
#include <stdint.h>
diff --git a/src/dummywallet.cpp b/src/dummywallet.cpp
index 2fa1ad59da..a5582e3b2c 100644
--- a/src/dummywallet.cpp
+++ b/src/dummywallet.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2018-2019 The Bitcoin Core developers
+// Copyright (c) 2018-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -11,6 +11,8 @@ enum class WalletCreationStatus;
namespace interfaces {
class Chain;
+class Handler;
+class Wallet;
}
class DummyWalletInit : public WalletInitInterface {
@@ -80,9 +82,13 @@ WalletCreationStatus CreateWallet(interfaces::Chain& chain, const SecureString&
throw std::logic_error("Wallet function called in non-wallet build.");
}
-namespace interfaces {
+using LoadWalletFn = std::function<void(std::unique_ptr<interfaces::Wallet> wallet)>;
+std::unique_ptr<interfaces::Handler> HandleLoadWallet(LoadWalletFn load_wallet)
+{
+ throw std::logic_error("Wallet function called in non-wallet build.");
+}
-class Wallet;
+namespace interfaces {
std::unique_ptr<Wallet> MakeWallet(const std::shared_ptr<CWallet>& wallet)
{
diff --git a/src/flatfile.h b/src/flatfile.h
index 374ceff411..d80682d383 100644
--- a/src/flatfile.h
+++ b/src/flatfile.h
@@ -20,7 +20,7 @@ struct FlatFilePos
template <typename Stream, typename Operation>
inline void SerializationOp(Stream& s, Operation ser_action) {
- READWRITE(VARINT(nFile, VarIntMode::NONNEGATIVE_SIGNED));
+ READWRITE(VARINT_MODE(nFile, VarIntMode::NONNEGATIVE_SIGNED));
READWRITE(VARINT(nPos));
}
diff --git a/src/fs.cpp b/src/fs.cpp
index 73fb3b606e..066c6c10d3 100644
--- a/src/fs.cpp
+++ b/src/fs.cpp
@@ -1,3 +1,7 @@
+// Copyright (c) 2017-2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
#include <fs.h>
#ifndef WIN32
diff --git a/src/httprpc.cpp b/src/httprpc.cpp
index ff75789223..4d49736140 100644
--- a/src/httprpc.cpp
+++ b/src/httprpc.cpp
@@ -174,7 +174,7 @@ static bool HTTPReq_JSONRPC(HTTPRequest* req, const std::string &)
/* Deter brute-forcing
If this results in a DoS the user really
shouldn't have their RPC port exposed. */
- MilliSleep(250);
+ UninterruptibleSleep(std::chrono::milliseconds{250});
req->WriteHeader("WWW-Authenticate", WWW_AUTH_HEADER_DATA);
req->WriteReply(HTTP_UNAUTHORIZED);
diff --git a/src/httpserver.cpp b/src/httpserver.cpp
index 7179949eaf..11d73b7c9a 100644
--- a/src/httpserver.cpp
+++ b/src/httpserver.cpp
@@ -172,7 +172,7 @@ static bool InitHTTPAllowList()
rpc_allow_subnets.push_back(CSubNet(localv6)); // always allow IPv6 localhost
for (const std::string& strAllow : gArgs.GetArgs("-rpcallowip")) {
CSubNet subnet;
- LookupSubNet(strAllow.c_str(), subnet);
+ LookupSubNet(strAllow, subnet);
if (!subnet.IsValid()) {
uiInterface.ThreadSafeMessageBox(
strprintf("Invalid -rpcallowip subnet specification: %s. Valid are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24).", strAllow),
@@ -236,7 +236,7 @@ static void http_request_cb(struct evhttp_request* req, void* arg)
if (hreq->GetRequestMethod() == HTTPRequest::UNKNOWN) {
LogPrint(BCLog::HTTP, "HTTP request from %s rejected: Unknown HTTP request method\n",
hreq->GetPeer().ToString());
- hreq->WriteReply(HTTP_BADMETHOD);
+ hreq->WriteReply(HTTP_BAD_METHOD);
return;
}
@@ -268,10 +268,10 @@ static void http_request_cb(struct evhttp_request* req, void* arg)
item.release(); /* if true, queue took ownership */
else {
LogPrintf("WARNING: request rejected because http work queue depth exceeded, it can be increased with the -rpcworkqueue= setting\n");
- item->req->WriteReply(HTTP_INTERNAL, "Work queue depth exceeded");
+ item->req->WriteReply(HTTP_INTERNAL_SERVER_ERROR, "Work queue depth exceeded");
}
} else {
- hreq->WriteReply(HTTP_NOTFOUND);
+ hreq->WriteReply(HTTP_NOT_FOUND);
}
}
@@ -324,7 +324,7 @@ static bool HTTPBindAddresses(struct evhttp* http)
evhttp_bound_socket *bind_handle = evhttp_bind_socket_with_handle(http, i->first.empty() ? nullptr : i->first.c_str(), i->second);
if (bind_handle) {
CNetAddr addr;
- if (i->first.empty() || (LookupHost(i->first.c_str(), addr, false) && addr.IsBindAny())) {
+ if (i->first.empty() || (LookupHost(i->first, addr, false) && addr.IsBindAny())) {
LogPrintf("WARNING: the RPC server is not safe to expose to untrusted networks such as the public internet\n");
}
boundSockets.push_back(bind_handle);
@@ -519,7 +519,7 @@ HTTPRequest::~HTTPRequest()
if (!replySent) {
// Keep track of whether reply was sent to avoid request leaks
LogPrintf("%s: Unhandled request\n", __func__);
- WriteReply(HTTP_INTERNAL, "Unhandled request");
+ WriteReply(HTTP_INTERNAL_SERVER_ERROR, "Unhandled request");
}
// evhttpd cleans up the request, as long as a reply was sent.
}
diff --git a/src/index/base.cpp b/src/index/base.cpp
index dcb8e99fc1..3f3b301246 100644
--- a/src/index/base.cpp
+++ b/src/index/base.cpp
@@ -270,7 +270,7 @@ void BaseIndex::ChainStateFlushed(const CBlockLocator& locator)
Commit();
}
-bool BaseIndex::BlockUntilSyncedToCurrentChain()
+bool BaseIndex::BlockUntilSyncedToCurrentChain() const
{
AssertLockNotHeld(cs_main);
diff --git a/src/index/base.h b/src/index/base.h
index d0088d9c9a..fb30aa7a0e 100644
--- a/src/index/base.h
+++ b/src/index/base.h
@@ -97,7 +97,7 @@ public:
/// sync once and only needs to process blocks in the ValidationInterface
/// queue. If the index is catching up from far behind, this method does
/// not block and immediately returns false.
- bool BlockUntilSyncedToCurrentChain();
+ bool BlockUntilSyncedToCurrentChain() const;
void Interrupt();
diff --git a/src/indirectmap.h b/src/indirectmap.h
index 76da4a6bd5..417d500bd4 100644
--- a/src/indirectmap.h
+++ b/src/indirectmap.h
@@ -5,6 +5,8 @@
#ifndef BITCOIN_INDIRECTMAP_H
#define BITCOIN_INDIRECTMAP_H
+#include <map>
+
template <class T>
struct DereferencingComparator { bool operator()(const T a, const T b) const { return *a < *b; } };
diff --git a/src/init.cpp b/src/init.cpp
index dc0f2ce05c..a637aac4d2 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -47,12 +47,15 @@
#include <txdb.h>
#include <txmempool.h>
#include <ui_interface.h>
+#include <util/asmap.h>
#include <util/moneystr.h>
#include <util/system.h>
#include <util/threadnames.h>
#include <util/translation.h>
-#include <util/validation.h>
#include <validation.h>
+#include <hash.h>
+
+
#include <validationinterface.h>
#include <walletinitinterface.h>
@@ -98,6 +101,8 @@ static constexpr int DUMP_BANS_INTERVAL = 60 * 15;
static const char* FEE_ESTIMATES_FILENAME="fee_estimates.dat";
+static const char* DEFAULT_ASMAP_FILENAME="ip_asn.map";
+
/**
* The PID file facilities.
*/
@@ -151,7 +156,6 @@ NODISCARD static bool CreatePidFile()
static std::unique_ptr<ECCVerifyHandle> globalVerifyHandle;
static boost::thread_group threadGroup;
-static CScheduler scheduler;
void Interrupt(NodeContext& node)
{
@@ -172,7 +176,7 @@ void Interrupt(NodeContext& node)
void Shutdown(NodeContext& node)
{
LogPrintf("%s: In progress...\n", __func__);
- static CCriticalSection cs_Shutdown;
+ static RecursiveMutex cs_Shutdown;
TRY_LOCK(cs_Shutdown, lockShutdown);
if (!lockShutdown)
return;
@@ -197,13 +201,12 @@ void Shutdown(NodeContext& node)
// using the other before destroying them.
if (node.peer_logic) UnregisterValidationInterface(node.peer_logic.get());
if (node.connman) node.connman->Stop();
- if (g_txindex) g_txindex->Stop();
- ForEachBlockFilterIndex([](BlockFilterIndex& index) { index.Stop(); });
StopTorControl();
// After everything has been shut down, but before things get flushed, stop the
// CScheduler/checkqueue threadGroup
+ if (node.scheduler) node.scheduler->stop();
threadGroup.interrupt_all();
threadGroup.join_all();
@@ -212,8 +215,6 @@ void Shutdown(NodeContext& node)
node.peer_logic.reset();
node.connman.reset();
node.banman.reset();
- g_txindex.reset();
- DestroyAllBlockFilterIndexes();
if (::mempool.IsLoaded() && gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
DumpMempool(::mempool);
@@ -246,6 +247,14 @@ void Shutdown(NodeContext& node)
// CValidationInterface callbacks, flush them...
GetMainSignals().FlushBackgroundCallbacks();
+ // Stop and delete all indexes only after flushing background callbacks.
+ if (g_txindex) {
+ g_txindex->Stop();
+ g_txindex.reset();
+ }
+ ForEachBlockFilterIndex([](BlockFilterIndex& index) { index.Stop(); });
+ DestroyAllBlockFilterIndexes();
+
// Any future callbacks will be dropped. This should absolutely be safe - if
// missing a callback results in an unrecoverable situation, unclean shutdown
// would too. The only reason to do the above flushes is to let the wallet catch
@@ -285,6 +294,7 @@ void Shutdown(NodeContext& node)
globalVerifyHandle.reset();
ECC_Stop();
if (node.mempool) node.mempool = nullptr;
+ node.scheduler.reset();
LogPrintf("%s: done\n", __func__);
}
@@ -399,6 +409,7 @@ void SetupServerArgs()
ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
gArgs.AddArg("-addnode=<ip>", "Add a node to connect to and attempt to keep the connection open (see the `addnode` RPC command help for more info). This option can be specified multiple times to add multiple nodes.", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
+ gArgs.AddArg("-asmap=<file>", strprintf("Specify asn mapping used for bucketing of the peers (default: %s). Relative paths will be prefixed by the net-specific datadir location.", DEFAULT_ASMAP_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
gArgs.AddArg("-banscore=<n>", strprintf("Threshold for disconnecting misbehaving peers (default: %u)", DEFAULT_BANSCORE_THRESHOLD), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
gArgs.AddArg("-bantime=<n>", strprintf("Number of seconds to keep misbehaving peers from reconnecting (default: %u)", DEFAULT_MISBEHAVING_BANTIME), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
gArgs.AddArg("-bind=<addr>", "Bind to given address and always listen on it. Use [host]:port notation for IPv6", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
@@ -426,7 +437,7 @@ void SetupServerArgs()
gArgs.AddArg("-timeout=<n>", strprintf("Specify connection timeout in milliseconds (minimum: 1, default: %d)", DEFAULT_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
gArgs.AddArg("-peertimeout=<n>", strprintf("Specify p2p connection timeout in seconds. This option determines the amount of time a peer may be inactive before the connection to it is dropped. (minimum: 1, default: %d)", DEFAULT_PEER_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CONNECTION);
gArgs.AddArg("-torcontrol=<ip>:<port>", strprintf("Tor control port to use if onion listening enabled (default: %s)", DEFAULT_TOR_CONTROL), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- gArgs.AddArg("-torpassword=<pass>", "Tor control port password (default: empty)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ gArgs.AddArg("-torpassword=<pass>", "Tor control port password (default: empty)", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::CONNECTION);
#ifdef USE_UPNP
#if USE_UPNP
gArgs.AddArg("-upnp", "Use UPnP to map the listening port (default: 1 when listening and no -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
@@ -439,7 +450,7 @@ void SetupServerArgs()
gArgs.AddArg("-whitebind=<[permissions@]addr>", "Bind to given address and whitelist peers connecting to it. "
"Use [host]:port notation for IPv6. Allowed permissions are bloomfilter (allow requesting BIP37 filtered blocks and transactions), "
"noban (do not ban for misbehavior), "
- "forcerelay (relay even non-standard transactions), "
+ "forcerelay (relay transactions that are already in the mempool; implies relay), "
"relay (relay even in -blocksonly mode), "
"and mempool (allow requesting BIP35 mempool contents). "
"Specify multiple permissions separated by commas (default: noban,mempool,relay). Can be specified multiple times.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
@@ -516,8 +527,8 @@ void SetupServerArgs()
gArgs.AddArg("-datacarriersize", strprintf("Maximum size of data in data carrier transactions we relay and mine (default: %u)", MAX_OP_RETURN_RELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
gArgs.AddArg("-minrelaytxfee=<amt>", strprintf("Fees (in %s/kB) smaller than this are considered zero fee for relaying, mining and transaction creation (default: %s)",
CURRENCY_UNIT, FormatMoney(DEFAULT_MIN_RELAY_TX_FEE)), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
- gArgs.AddArg("-whitelistforcerelay", strprintf("Add 'forcerelay' permission to whitelisted inbound peers with default permissions. This will relay transactions even if the transactions were already in the mempool or violate local relay policy. (default: %d)", DEFAULT_WHITELISTFORCERELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
- gArgs.AddArg("-whitelistrelay", strprintf("Add 'relay' permission to whitelisted inbound peers with default permissions. The will accept relayed transactions even when not relaying transactions (default: %d)", DEFAULT_WHITELISTRELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
+ gArgs.AddArg("-whitelistforcerelay", strprintf("Add 'forcerelay' permission to whitelisted inbound peers with default permissions. This will relay transactions even if the transactions were already in the mempool. (default: %d)", DEFAULT_WHITELISTFORCERELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
+ gArgs.AddArg("-whitelistrelay", strprintf("Add 'relay' permission to whitelisted inbound peers with default permissions. This will accept relayed transactions even when not relaying transactions (default: %d)", DEFAULT_WHITELISTRELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
gArgs.AddArg("-blockmaxweight=<n>", strprintf("Set maximum BIP141 block weight (default: %d)", DEFAULT_BLOCK_MAX_WEIGHT), ArgsManager::ALLOW_ANY, OptionsCategory::BLOCK_CREATION);
@@ -526,15 +537,15 @@ void SetupServerArgs()
gArgs.AddArg("-rest", strprintf("Accept public REST requests (default: %u)", DEFAULT_REST_ENABLE), ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
gArgs.AddArg("-rpcallowip=<ip>", "Allow JSON-RPC connections from specified source. Valid for <ip> are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). This option can be specified multiple times", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
- gArgs.AddArg("-rpcauth=<userpw>", "Username and HMAC-SHA-256 hashed password for JSON-RPC connections. The field <userpw> comes in the format: <USERNAME>:<SALT>$<HASH>. A canonical python script is included in share/rpcauth. The client then connects normally using the rpcuser=<USERNAME>/rpcpassword=<PASSWORD> pair of arguments. This option can be specified multiple times", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
- gArgs.AddArg("-rpcbind=<addr>[:port]", "Bind to given address to listen for JSON-RPC connections. Do not expose the RPC server to untrusted networks such as the public internet! This option is ignored unless -rpcallowip is also passed. Port is optional and overrides -rpcport. Use [host]:port notation for IPv6. This option can be specified multiple times (default: 127.0.0.1 and ::1 i.e., localhost)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::RPC);
+ gArgs.AddArg("-rpcauth=<userpw>", "Username and HMAC-SHA-256 hashed password for JSON-RPC connections. The field <userpw> comes in the format: <USERNAME>:<SALT>$<HASH>. A canonical python script is included in share/rpcauth. The client then connects normally using the rpcuser=<USERNAME>/rpcpassword=<PASSWORD> pair of arguments. This option can be specified multiple times", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
+ gArgs.AddArg("-rpcbind=<addr>[:port]", "Bind to given address to listen for JSON-RPC connections. Do not expose the RPC server to untrusted networks such as the public internet! This option is ignored unless -rpcallowip is also passed. Port is optional and overrides -rpcport. Use [host]:port notation for IPv6. This option can be specified multiple times (default: 127.0.0.1 and ::1 i.e., localhost)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
gArgs.AddArg("-rpccookiefile=<loc>", "Location of the auth cookie. Relative paths will be prefixed by a net-specific datadir location. (default: data dir)", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
- gArgs.AddArg("-rpcpassword=<pw>", "Password for JSON-RPC connections", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
+ gArgs.AddArg("-rpcpassword=<pw>", "Password for JSON-RPC connections", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
gArgs.AddArg("-rpcport=<port>", strprintf("Listen for JSON-RPC connections on <port> (default: %u, testnet: %u, regtest: %u)", defaultBaseParams->RPCPort(), testnetBaseParams->RPCPort(), regtestBaseParams->RPCPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::RPC);
gArgs.AddArg("-rpcserialversion", strprintf("Sets the serialization of raw transaction or block hex returned in non-verbose mode, non-segwit(0) or segwit(1) (default: %d)", DEFAULT_RPC_SERIALIZE_VERSION), ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
gArgs.AddArg("-rpcservertimeout=<n>", strprintf("Timeout during HTTP requests (default: %d)", DEFAULT_HTTP_SERVER_TIMEOUT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::RPC);
gArgs.AddArg("-rpcthreads=<n>", strprintf("Set the number of threads to service RPC calls (default: %d)", DEFAULT_HTTP_THREADS), ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
- gArgs.AddArg("-rpcuser=<user>", "Username for JSON-RPC connections", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
+ gArgs.AddArg("-rpcuser=<user>", "Username for JSON-RPC connections", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
gArgs.AddArg("-rpcwhitelist=<whitelist>", "Set a whitelist to filter incoming RPC calls for a specific user. The field <whitelist> comes in the format: <USERNAME>:<rpc 1>,<rpc 2>,...,<rpc n>. If multiple whitelists are set for a given user, they are set-intersected. See -rpcwhitelistdefault documentation for information on default whitelist behavior.", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
gArgs.AddArg("-rpcwhitelistdefault", "Sets default behavior for rpc whitelisting. Unless rpcwhitelistdefault is set to 0, if any -rpcwhitelist is set, the rpc server acts as if all rpc users are subject to empty-unless-otherwise-specified whitelists. If rpcwhitelistdefault is set to 1 and no -rpcwhitelist is set, rpc server acts as if all rpc users are subject to empty whitelists.", ArgsManager::ALLOW_BOOL, OptionsCategory::RPC);
gArgs.AddArg("-rpcworkqueue=<n>", strprintf("Set the depth of the work queue to service RPC calls (default: %d)", DEFAULT_HTTP_WORKQUEUE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::RPC);
@@ -699,7 +710,7 @@ static void ThreadImport(std::vector<fs::path> vImportFiles)
// scan for better chains in the block chain database, that are not yet connected in the active best chain
BlockValidationState state;
if (!ActivateBestChain(state, chainparams)) {
- LogPrintf("Failed to connect best block (%s)\n", FormatStateMessage(state));
+ LogPrintf("Failed to connect best block (%s)\n", state.ToString());
StartShutdown();
return;
}
@@ -877,8 +888,8 @@ bool AppInitBasicSetup()
_set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT);
#endif
#ifdef WIN32
- // Enable Data Execution Prevention (DEP)
- SetProcessDEPPolicy(PROCESS_DEP_ENABLE);
+ // Enable heap terminate-on-corruption
+ HeapSetInformation(nullptr, HeapEnableTerminationOnCorruption, nullptr, 0);
#endif
if (!SetupNetworking())
@@ -1219,6 +1230,9 @@ bool AppInitMain(NodeContext& node)
LogPrintf("Config file: %s (not found, skipping)\n", config_file_path.string());
}
+ // Log the config arguments to debug.log
+ gArgs.LogArgs();
+
LogPrintf("Using at most %i automatic connections (%i file descriptors available)\n", nMaxConnections, nFD);
// Warn about relative -datadir path.
@@ -1254,16 +1268,19 @@ bool AppInitMain(NodeContext& node)
}
}
+ assert(!node.scheduler);
+ node.scheduler = MakeUnique<CScheduler>();
+
// Start the lightweight task scheduler thread
- CScheduler::Function serviceLoop = std::bind(&CScheduler::serviceQueue, &scheduler);
+ CScheduler::Function serviceLoop = [&node]{ node.scheduler->serviceQueue(); };
threadGroup.create_thread(std::bind(&TraceThread<CScheduler::Function>, "scheduler", serviceLoop));
// Gather some entropy once per minute.
- scheduler.scheduleEvery([]{
+ node.scheduler->scheduleEvery([]{
RandAddPeriodic();
}, 60000);
- GetMainSignals().RegisterBackgroundSignalScheduler(scheduler);
+ GetMainSignals().RegisterBackgroundSignalScheduler(*node.scheduler);
// Create client interfaces for wallets that are supposed to be loaded
// according to -wallet and -disablewallet options. This only constructs
@@ -1313,7 +1330,7 @@ bool AppInitMain(NodeContext& node)
assert(!node.connman);
node.connman = std::unique_ptr<CConnman>(new CConnman(GetRand(std::numeric_limits<uint64_t>::max()), GetRand(std::numeric_limits<uint64_t>::max())));
- node.peer_logic.reset(new PeerLogicValidation(node.connman.get(), node.banman.get(), scheduler));
+ node.peer_logic.reset(new PeerLogicValidation(node.connman.get(), node.banman.get(), *node.scheduler));
RegisterValidationInterface(node.peer_logic.get());
// sanitize comments per BIP-0014, format user agent and check total size
@@ -1354,7 +1371,7 @@ bool AppInitMain(NodeContext& node)
SetReachable(NET_ONION, false);
if (proxyArg != "" && proxyArg != "0") {
CService proxyAddr;
- if (!Lookup(proxyArg.c_str(), proxyAddr, 9050, fNameLookup)) {
+ if (!Lookup(proxyArg, proxyAddr, 9050, fNameLookup)) {
return InitError(strprintf(_("Invalid -proxy address or hostname: '%s'").translated, proxyArg));
}
@@ -1378,7 +1395,7 @@ bool AppInitMain(NodeContext& node)
SetReachable(NET_ONION, false);
} else {
CService onionProxy;
- if (!Lookup(onionArg.c_str(), onionProxy, 9050, fNameLookup)) {
+ if (!Lookup(onionArg, onionProxy, 9050, fNameLookup)) {
return InitError(strprintf(_("Invalid -onion address or hostname: '%s'").translated, onionArg));
}
proxyType addrOnion = proxyType(onionProxy, proxyRandomize);
@@ -1396,12 +1413,37 @@ bool AppInitMain(NodeContext& node)
for (const std::string& strAddr : gArgs.GetArgs("-externalip")) {
CService addrLocal;
- if (Lookup(strAddr.c_str(), addrLocal, GetListenPort(), fNameLookup) && addrLocal.IsValid())
+ if (Lookup(strAddr, addrLocal, GetListenPort(), fNameLookup) && addrLocal.IsValid())
AddLocal(addrLocal, LOCAL_MANUAL);
else
return InitError(ResolveErrMsg("externalip", strAddr));
}
+ // Read asmap file if configured
+ if (gArgs.IsArgSet("-asmap")) {
+ fs::path asmap_path = fs::path(gArgs.GetArg("-asmap", ""));
+ if (asmap_path.empty()) {
+ asmap_path = DEFAULT_ASMAP_FILENAME;
+ }
+ if (!asmap_path.is_absolute()) {
+ asmap_path = GetDataDir() / asmap_path;
+ }
+ if (!fs::exists(asmap_path)) {
+ InitError(strprintf(_("Could not find asmap file %s").translated, asmap_path));
+ return false;
+ }
+ std::vector<bool> asmap = CAddrMan::DecodeAsmap(asmap_path);
+ if (asmap.size() == 0) {
+ InitError(strprintf(_("Could not parse asmap file %s").translated, asmap_path));
+ return false;
+ }
+ const uint256 asmap_version = SerializeHash(asmap);
+ node.connman->SetAsmap(std::move(asmap));
+ LogPrintf("Using asmap version %s for IP bucketing\n", asmap_version.ToString());
+ } else {
+ LogPrintf("Using /16 prefix for IP bucketing\n");
+ }
+
#if ENABLE_ZMQ
g_zmq_notification_interface = CZMQNotificationInterface::Create();
@@ -1776,7 +1818,7 @@ bool AppInitMain(NodeContext& node)
for (const std::string& strBind : gArgs.GetArgs("-bind")) {
CService addrBind;
- if (!Lookup(strBind.c_str(), addrBind, GetListenPort(), false)) {
+ if (!Lookup(strBind, addrBind, GetListenPort(), false)) {
return InitError(ResolveErrMsg("bind", strBind));
}
connOptions.vBinds.push_back(addrBind);
@@ -1805,7 +1847,7 @@ bool AppInitMain(NodeContext& node)
connOptions.m_specified_outgoing = connect;
}
}
- if (!node.connman->Start(scheduler, connOptions)) {
+ if (!node.connman->Start(*node.scheduler, connOptions)) {
return false;
}
@@ -1815,11 +1857,11 @@ bool AppInitMain(NodeContext& node)
uiInterface.InitMessage(_("Done loading").translated);
for (const auto& client : node.chain_clients) {
- client->start(scheduler);
+ client->start(*node.scheduler);
}
BanMan* banman = node.banman.get();
- scheduler.scheduleEvery([banman]{
+ node.scheduler->scheduleEvery([banman]{
banman->DumpBanlist();
}, DUMP_BANS_INTERVAL * 1000);
diff --git a/src/interfaces/chain.cpp b/src/interfaces/chain.cpp
index ac640aa35a..643bb58d56 100644
--- a/src/interfaces/chain.cpp
+++ b/src/interfaces/chain.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2018-2019 The Bitcoin Core developers
+// Copyright (c) 2018-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -38,7 +38,7 @@
namespace interfaces {
namespace {
-class LockImpl : public Chain::Lock, public UniqueLock<CCriticalSection>
+class LockImpl : public Chain::Lock, public UniqueLock<RecursiveMutex>
{
Optional<int> getHeight() override
{
@@ -338,7 +338,6 @@ public:
void initMessage(const std::string& message) override { ::uiInterface.InitMessage(message); }
void initWarning(const std::string& message) override { InitWarning(message); }
void initError(const std::string& message) override { InitError(message); }
- void loadWallet(std::unique_ptr<Wallet> wallet) override { ::uiInterface.LoadWallet(wallet); }
void showProgress(const std::string& title, int progress, bool resume_possible) override
{
::uiInterface.ShowProgress(title, progress, resume_possible);
diff --git a/src/interfaces/chain.h b/src/interfaces/chain.h
index 349af152d5..7304f82749 100644
--- a/src/interfaces/chain.h
+++ b/src/interfaces/chain.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2018-2019 The Bitcoin Core developers
+// Copyright (c) 2018-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -43,7 +43,7 @@ class Wallet;
//! asynchronously
//! (https://github.com/bitcoin/bitcoin/pull/10973#issuecomment-380101269).
//!
-//! * The initMessages() and loadWallet() methods which the wallet uses to send
+//! * The initMessage() and showProgress() methods which the wallet uses to send
//! notifications to the GUI should go away when GUI and wallet can directly
//! communicate with each other without going through the node
//! (https://github.com/bitcoin/bitcoin/pull/15288#discussion_r253321096).
@@ -209,9 +209,6 @@ public:
//! Send init error.
virtual void initError(const std::string& message) = 0;
- //! Send wallet load notification to the GUI.
- virtual void loadWallet(std::unique_ptr<Wallet> wallet) = 0;
-
//! Send progress indicator.
virtual void showProgress(const std::string& title, int progress, bool resume_possible) = 0;
diff --git a/src/interfaces/handler.cpp b/src/interfaces/handler.cpp
index 92601fc4e9..95035c1b54 100644
--- a/src/interfaces/handler.cpp
+++ b/src/interfaces/handler.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2018 The Bitcoin Core developers
+// Copyright (c) 2018-2019 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -22,6 +22,15 @@ public:
boost::signals2::scoped_connection m_connection;
};
+class CleanupHandler : public Handler
+{
+public:
+ explicit CleanupHandler(std::function<void()> cleanup) : m_cleanup(std::move(cleanup)) {}
+ ~CleanupHandler() override { if (!m_cleanup) return; m_cleanup(); m_cleanup = nullptr; }
+ void disconnect() override { if (!m_cleanup) return; m_cleanup(); m_cleanup = nullptr; }
+ std::function<void()> m_cleanup;
+};
+
} // namespace
std::unique_ptr<Handler> MakeHandler(boost::signals2::connection connection)
@@ -29,4 +38,9 @@ std::unique_ptr<Handler> MakeHandler(boost::signals2::connection connection)
return MakeUnique<HandlerImpl>(std::move(connection));
}
+std::unique_ptr<Handler> MakeHandler(std::function<void()> cleanup)
+{
+ return MakeUnique<CleanupHandler>(std::move(cleanup));
+}
+
} // namespace interfaces
diff --git a/src/interfaces/handler.h b/src/interfaces/handler.h
index c4c674cac5..fbac3c6b71 100644
--- a/src/interfaces/handler.h
+++ b/src/interfaces/handler.h
@@ -1,10 +1,11 @@
-// Copyright (c) 2018 The Bitcoin Core developers
+// Copyright (c) 2018-2019 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_INTERFACES_HANDLER_H
#define BITCOIN_INTERFACES_HANDLER_H
+#include <functional>
#include <memory>
namespace boost {
@@ -30,6 +31,9 @@ public:
//! Return handler wrapping a boost signal connection.
std::unique_ptr<Handler> MakeHandler(boost::signals2::connection connection);
+//! Return handler wrapping a cleanup function.
+std::unique_ptr<Handler> MakeHandler(std::function<void()> cleanup);
+
} // namespace interfaces
#endif // BITCOIN_INTERFACES_HANDLER_H
diff --git a/src/interfaces/node.cpp b/src/interfaces/node.cpp
index f2b00377be..8a64a9d26a 100644
--- a/src/interfaces/node.cpp
+++ b/src/interfaces/node.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2018-2019 The Bitcoin Core developers
+// Copyright (c) 2018-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -43,11 +43,10 @@ std::vector<fs::path> ListWalletDir();
std::vector<std::shared_ptr<CWallet>> GetWallets();
std::shared_ptr<CWallet> LoadWallet(interfaces::Chain& chain, const std::string& name, std::string& error, std::vector<std::string>& warnings);
WalletCreationStatus CreateWallet(interfaces::Chain& chain, const SecureString& passphrase, uint64_t wallet_creation_flags, const std::string& name, std::string& error, std::vector<std::string>& warnings, std::shared_ptr<CWallet>& result);
+std::unique_ptr<interfaces::Handler> HandleLoadWallet(interfaces::Node::LoadWalletFn load_wallet);
namespace interfaces {
-class Wallet;
-
namespace {
class NodeImpl : public Node
@@ -286,7 +285,7 @@ public:
}
std::unique_ptr<Handler> handleLoadWallet(LoadWalletFn fn) override
{
- return MakeHandler(::uiInterface.LoadWallet_connect([fn](std::unique_ptr<Wallet>& wallet) { fn(std::move(wallet)); }));
+ return HandleLoadWallet(std::move(fn));
}
std::unique_ptr<Handler> handleNotifyNumConnectionsChanged(NotifyNumConnectionsChangedFn fn) override
{
diff --git a/src/interfaces/wallet.cpp b/src/interfaces/wallet.cpp
index 568ab43ac0..01ade56b2a 100644
--- a/src/interfaces/wallet.cpp
+++ b/src/interfaces/wallet.cpp
@@ -19,7 +19,6 @@
#include <wallet/fees.h>
#include <wallet/ismine.h>
#include <wallet/load.h>
-#include <wallet/psbtwallet.h>
#include <wallet/rpcwallet.h>
#include <wallet/wallet.h>
@@ -119,19 +118,15 @@ public:
}
bool getPubKey(const CScript& script, const CKeyID& address, CPubKey& pub_key) override
{
- const SigningProvider* provider = m_wallet->GetSigningProvider(script);
+ std::unique_ptr<SigningProvider> provider = m_wallet->GetSolvingProvider(script);
if (provider) {
return provider->GetPubKey(address, pub_key);
}
return false;
}
- bool getPrivKey(const CScript& script, const CKeyID& address, CKey& key) override
+ SigningResult signMessage(const std::string& message, const PKHash& pkhash, std::string& str_sig) override
{
- const SigningProvider* provider = m_wallet->GetSigningProvider(script);
- if (provider) {
- return provider->GetKey(address, key);
- }
- return false;
+ return m_wallet->SignMessage(message, pkhash, str_sig);
}
bool isSpendable(const CTxDestination& dest) override { return m_wallet->IsMine(dest) & ISMINE_SPENDABLE; }
bool haveWatchOnly() override
@@ -180,7 +175,6 @@ public:
}
return result;
}
- void learnRelatedScripts(const CPubKey& key, OutputType type) override { m_wallet->GetLegacyScriptPubKeyMan()->LearnRelatedScripts(key, type); }
bool addDestData(const CTxDestination& dest, const std::string& key, const std::string& value) override
{
LOCK(m_wallet->cs_wallet);
@@ -362,9 +356,9 @@ public:
bool& complete,
int sighash_type = 1 /* SIGHASH_ALL */,
bool sign = true,
- bool bip32derivs = false) override
+ bool bip32derivs = false) const override
{
- return FillPSBT(m_wallet.get(), psbtx, complete, sighash_type, sign, bip32derivs);
+ return m_wallet->FillPSBT(psbtx, complete, sighash_type, sign, bip32derivs);
}
WalletBalances getBalances() override
{
@@ -469,7 +463,7 @@ public:
}
unsigned int getConfirmTarget() override { return m_wallet->m_confirm_target; }
bool hdEnabled() override { return m_wallet->IsHDEnabled(); }
- bool canGetAddresses() override { return m_wallet->CanGetAddresses(); }
+ bool canGetAddresses() const override { return m_wallet->CanGetAddresses(); }
bool IsWalletFlagSet(uint64_t flag) override { return m_wallet->IsWalletFlagSet(flag); }
OutputType getDefaultAddressType() override { return m_wallet->m_default_address_type; }
OutputType getDefaultChangeType() override { return m_wallet->m_default_change_type; }
diff --git a/src/interfaces/wallet.h b/src/interfaces/wallet.h
index de53b16c0c..9476c9f77f 100644
--- a/src/interfaces/wallet.h
+++ b/src/interfaces/wallet.h
@@ -10,6 +10,7 @@
#include <script/standard.h> // For CTxDestination
#include <support/allocators/secure.h> // For SecureString
#include <ui_interface.h> // For ChangeType
+#include <util/message.h>
#include <functional>
#include <map>
@@ -84,8 +85,8 @@ public:
//! Get public key.
virtual bool getPubKey(const CScript& script, const CKeyID& address, CPubKey& pub_key) = 0;
- //! Get private key.
- virtual bool getPrivKey(const CScript& script, const CKeyID& address, CKey& key) = 0;
+ //! Sign message
+ virtual SigningResult signMessage(const std::string& message, const PKHash& pkhash, std::string& str_sig) = 0;
//! Return whether wallet has private key.
virtual bool isSpendable(const CTxDestination& dest) = 0;
@@ -108,10 +109,6 @@ public:
//! Get wallet address list.
virtual std::vector<WalletAddress> getAddresses() = 0;
- //! Add scripts to key store so old so software versions opening the wallet
- //! database can detect payments to newer address types.
- virtual void learnRelatedScripts(const CPubKey& key, OutputType type) = 0;
-
//! Add dest data.
virtual bool addDestData(const CTxDestination& dest, const std::string& key, const std::string& value) = 0;
@@ -200,7 +197,7 @@ public:
bool& complete,
int sighash_type = 1 /* SIGHASH_ALL */,
bool sign = true,
- bool bip32derivs = false) = 0;
+ bool bip32derivs = false) const = 0;
//! Get balances.
virtual WalletBalances getBalances() = 0;
@@ -250,7 +247,7 @@ public:
virtual bool hdEnabled() = 0;
// Return whether the wallet is blank.
- virtual bool canGetAddresses() = 0;
+ virtual bool canGetAddresses() const = 0;
// check if a certain wallet flag is set.
virtual bool IsWalletFlagSet(uint64_t flag) = 0;
diff --git a/src/leveldb/.appveyor.yml b/src/leveldb/.appveyor.yml
new file mode 100644
index 0000000000..c24b17e805
--- /dev/null
+++ b/src/leveldb/.appveyor.yml
@@ -0,0 +1,35 @@
+# Build matrix / environment variables are explained on:
+# https://www.appveyor.com/docs/appveyor-yml/
+# This file can be validated on: https://ci.appveyor.com/tools/validate-yaml
+
+version: "{build}"
+
+environment:
+ matrix:
+ # AppVeyor currently has no custom job name feature.
+ # http://help.appveyor.com/discussions/questions/1623-can-i-provide-a-friendly-name-for-jobs
+ - JOB: Visual Studio 2017
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
+ CMAKE_GENERATOR: Visual Studio 15 2017
+
+platform:
+ - x86
+ - x64
+
+configuration:
+ - RelWithDebInfo
+ - Debug
+
+build_script:
+ - git submodule update --init --recursive
+ - mkdir build
+ - cd build
+ - if "%platform%"=="x64" set CMAKE_GENERATOR=%CMAKE_GENERATOR% Win64
+ - cmake --version
+ - cmake .. -G "%CMAKE_GENERATOR%"
+ -DCMAKE_CONFIGURATION_TYPES="%CONFIGURATION%"
+ - cmake --build . --config "%CONFIGURATION%"
+ - cd ..
+
+test_script:
+ - cd build && ctest --verbose --build-config "%CONFIGURATION%" && cd ..
diff --git a/src/leveldb/.clang-format b/src/leveldb/.clang-format
new file mode 100644
index 0000000000..f493f75382
--- /dev/null
+++ b/src/leveldb/.clang-format
@@ -0,0 +1,18 @@
+# Run manually to reformat a file:
+# clang-format -i --style=file <file>
+# find . -iname '*.cc' -o -iname '*.h' -o -iname '*.h.in' | xargs clang-format -i --style=file
+BasedOnStyle: Google
+DerivePointerAlignment: false
+
+# Public headers are in a different location in the internal Google repository.
+# Order them so that when imported to the authoritative repository they will be
+# in correct alphabetical order.
+IncludeCategories:
+ - Regex: '^(<|"(benchmarks|db|helpers)/)'
+ Priority: 1
+ - Regex: '^"(leveldb)/'
+ Priority: 2
+ - Regex: '^(<|"(issues|port|table|third_party|util)/)'
+ Priority: 3
+ - Regex: '.*'
+ Priority: 4
diff --git a/src/leveldb/.gitignore b/src/leveldb/.gitignore
index 71d87a4eeb..c4b242534f 100644
--- a/src/leveldb/.gitignore
+++ b/src/leveldb/.gitignore
@@ -1,13 +1,8 @@
-build_config.mk
-*.a
-*.o
-*.dylib*
-*.so
-*.so.*
-*_test
-db_bench
-leveldbutil
-Release
-Debug
-Benchmark
-vs2010.*
+# Editors.
+*.sw*
+.vscode
+.DS_Store
+
+# Build directory.
+build/
+out/
diff --git a/src/leveldb/.travis.yml b/src/leveldb/.travis.yml
index f5bd74c454..42cbe64fd0 100644
--- a/src/leveldb/.travis.yml
+++ b/src/leveldb/.travis.yml
@@ -1,13 +1,82 @@
+# Build matrix / environment variables are explained on:
+# http://about.travis-ci.org/docs/user/build-configuration/
+# This file can be validated on: http://lint.travis-ci.org/
+
language: cpp
+dist: bionic
+osx_image: xcode10.3
+
compiler:
-- clang
- gcc
+- clang
os:
- linux
- osx
-sudo: false
-before_install:
-- echo $LANG
-- echo $LC_ALL
+
+env:
+- BUILD_TYPE=Debug
+- BUILD_TYPE=RelWithDebInfo
+
+addons:
+ apt:
+ sources:
+ - sourceline: 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main'
+ key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key'
+ - sourceline: 'ppa:ubuntu-toolchain-r/test'
+ packages:
+ - clang-9
+ - cmake
+ - gcc-9
+ - g++-9
+ - libgoogle-perftools-dev
+ - libkyotocabinet-dev
+ - libsnappy-dev
+ - libsqlite3-dev
+ - ninja-build
+ homebrew:
+ packages:
+ - cmake
+ - crc32c
+ - gcc@9
+ - gperftools
+ - kyoto-cabinet
+ - llvm@9
+ - ninja
+ - snappy
+ - sqlite3
+ update: true
+
+install:
+# The following Homebrew packages aren't linked by default, and need to be
+# prepended to the path explicitly.
+- if [ "$TRAVIS_OS_NAME" = "osx" ]; then
+ export PATH="$(brew --prefix llvm)/bin:$PATH";
+ fi
+# /usr/bin/gcc points to an older compiler on both Linux and macOS.
+- if [ "$CXX" = "g++" ]; then export CXX="g++-9" CC="gcc-9"; fi
+# /usr/bin/clang points to an older compiler on both Linux and macOS.
+#
+# Homebrew's llvm package doesn't ship a versioned clang++ binary, so the values
+# below don't work on macOS. Fortunately, the path change above makes the
+# default values (clang and clang++) resolve to the correct compiler on macOS.
+- if [ "$TRAVIS_OS_NAME" = "linux" ]; then
+ if [ "$CXX" = "clang++" ]; then export CXX="clang++-9" CC="clang-9"; fi;
+ fi
+- echo ${CC}
+- echo ${CXX}
+- ${CXX} --version
+- cmake --version
+
+before_script:
+- mkdir -p build && cd build
+- cmake .. -G Ninja -DCMAKE_BUILD_TYPE=$BUILD_TYPE
+ -DCMAKE_INSTALL_PREFIX=$HOME/.local
+- cmake --build .
+- cd ..
+
script:
-- make -j 4 check
+- cd build && ctest --verbose && cd ..
+- "if [ -f build/db_bench ] ; then build/db_bench ; fi"
+- "if [ -f build/db_bench_sqlite3 ] ; then build/db_bench_sqlite3 ; fi"
+- "if [ -f build/db_bench_tree_db ] ; then build/db_bench_tree_db ; fi"
+- cd build && cmake --build . --target install
diff --git a/src/leveldb/CMakeLists.txt b/src/leveldb/CMakeLists.txt
new file mode 100644
index 0000000000..1cb46256c2
--- /dev/null
+++ b/src/leveldb/CMakeLists.txt
@@ -0,0 +1,465 @@
+# Copyright 2017 The LevelDB Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+cmake_minimum_required(VERSION 3.9)
+# Keep the version below in sync with the one in db.h
+project(leveldb VERSION 1.22.0 LANGUAGES C CXX)
+
+# This project can use C11, but will gracefully decay down to C89.
+set(CMAKE_C_STANDARD 11)
+set(CMAKE_C_STANDARD_REQUIRED OFF)
+set(CMAKE_C_EXTENSIONS OFF)
+
+# This project requires C++11.
+set(CMAKE_CXX_STANDARD 11)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
+set(CMAKE_CXX_EXTENSIONS OFF)
+
+if (WIN32)
+ set(LEVELDB_PLATFORM_NAME LEVELDB_PLATFORM_WINDOWS)
+ # TODO(cmumford): Make UNICODE configurable for Windows.
+ add_definitions(-D_UNICODE -DUNICODE)
+else (WIN32)
+ set(LEVELDB_PLATFORM_NAME LEVELDB_PLATFORM_POSIX)
+endif (WIN32)
+
+option(LEVELDB_BUILD_TESTS "Build LevelDB's unit tests" ON)
+option(LEVELDB_BUILD_BENCHMARKS "Build LevelDB's benchmarks" ON)
+option(LEVELDB_INSTALL "Install LevelDB's header and library" ON)
+
+include(TestBigEndian)
+test_big_endian(LEVELDB_IS_BIG_ENDIAN)
+
+include(CheckIncludeFile)
+check_include_file("unistd.h" HAVE_UNISTD_H)
+
+include(CheckLibraryExists)
+check_library_exists(crc32c crc32c_value "" HAVE_CRC32C)
+check_library_exists(snappy snappy_compress "" HAVE_SNAPPY)
+check_library_exists(tcmalloc malloc "" HAVE_TCMALLOC)
+
+include(CheckCXXSymbolExists)
+# Using check_cxx_symbol_exists() instead of check_c_symbol_exists() because
+# we're including the header from C++, and feature detection should use the same
+# compiler language that the project will use later. Principles aside, some
+# versions of do not expose fdatasync() in <unistd.h> in standard C mode
+# (-std=c11), but do expose the function in standard C++ mode (-std=c++11).
+check_cxx_symbol_exists(fdatasync "unistd.h" HAVE_FDATASYNC)
+check_cxx_symbol_exists(F_FULLFSYNC "fcntl.h" HAVE_FULLFSYNC)
+check_cxx_symbol_exists(O_CLOEXEC "fcntl.h" HAVE_O_CLOEXEC)
+
+if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ # Disable C++ exceptions.
+ string(REGEX REPLACE "/EH[a-z]+" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHs-c-")
+ add_definitions(-D_HAS_EXCEPTIONS=0)
+
+ # Disable RTTI.
+ string(REGEX REPLACE "/GR" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /GR-")
+else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ # Enable strict prototype warnings for C code in clang and gcc.
+ if(NOT CMAKE_C_FLAGS MATCHES "-Wstrict-prototypes")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wstrict-prototypes")
+ endif(NOT CMAKE_C_FLAGS MATCHES "-Wstrict-prototypes")
+
+ # Disable C++ exceptions.
+ string(REGEX REPLACE "-fexceptions" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions")
+
+ # Disable RTTI.
+ string(REGEX REPLACE "-frtti" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti")
+endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+
+# Test whether -Wthread-safety is available. See
+# https://clang.llvm.org/docs/ThreadSafetyAnalysis.html
+include(CheckCXXCompilerFlag)
+check_cxx_compiler_flag(-Wthread-safety HAVE_CLANG_THREAD_SAFETY)
+
+include(CheckCXXSourceCompiles)
+
+# Test whether C++17 __has_include is available.
+check_cxx_source_compiles("
+#if defined(__has_include) && __has_include(<string>)
+#include <string>
+#endif
+int main() { std::string str; return 0; }
+" HAVE_CXX17_HAS_INCLUDE)
+
+set(LEVELDB_PUBLIC_INCLUDE_DIR "include/leveldb")
+set(LEVELDB_PORT_CONFIG_DIR "include/port")
+
+configure_file(
+ "port/port_config.h.in"
+ "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
+)
+
+include_directories(
+ "${PROJECT_BINARY_DIR}/include"
+ "."
+)
+
+if(BUILD_SHARED_LIBS)
+ # Only export LEVELDB_EXPORT symbols from the shared library.
+ add_compile_options(-fvisibility=hidden)
+endif(BUILD_SHARED_LIBS)
+
+# Must be included before CMAKE_INSTALL_INCLUDEDIR is used.
+include(GNUInstallDirs)
+
+add_library(leveldb "")
+target_sources(leveldb
+ PRIVATE
+ "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
+ "db/builder.cc"
+ "db/builder.h"
+ "db/c.cc"
+ "db/db_impl.cc"
+ "db/db_impl.h"
+ "db/db_iter.cc"
+ "db/db_iter.h"
+ "db/dbformat.cc"
+ "db/dbformat.h"
+ "db/dumpfile.cc"
+ "db/filename.cc"
+ "db/filename.h"
+ "db/log_format.h"
+ "db/log_reader.cc"
+ "db/log_reader.h"
+ "db/log_writer.cc"
+ "db/log_writer.h"
+ "db/memtable.cc"
+ "db/memtable.h"
+ "db/repair.cc"
+ "db/skiplist.h"
+ "db/snapshot.h"
+ "db/table_cache.cc"
+ "db/table_cache.h"
+ "db/version_edit.cc"
+ "db/version_edit.h"
+ "db/version_set.cc"
+ "db/version_set.h"
+ "db/write_batch_internal.h"
+ "db/write_batch.cc"
+ "port/port_stdcxx.h"
+ "port/port.h"
+ "port/thread_annotations.h"
+ "table/block_builder.cc"
+ "table/block_builder.h"
+ "table/block.cc"
+ "table/block.h"
+ "table/filter_block.cc"
+ "table/filter_block.h"
+ "table/format.cc"
+ "table/format.h"
+ "table/iterator_wrapper.h"
+ "table/iterator.cc"
+ "table/merger.cc"
+ "table/merger.h"
+ "table/table_builder.cc"
+ "table/table.cc"
+ "table/two_level_iterator.cc"
+ "table/two_level_iterator.h"
+ "util/arena.cc"
+ "util/arena.h"
+ "util/bloom.cc"
+ "util/cache.cc"
+ "util/coding.cc"
+ "util/coding.h"
+ "util/comparator.cc"
+ "util/crc32c.cc"
+ "util/crc32c.h"
+ "util/env.cc"
+ "util/filter_policy.cc"
+ "util/hash.cc"
+ "util/hash.h"
+ "util/logging.cc"
+ "util/logging.h"
+ "util/mutexlock.h"
+ "util/no_destructor.h"
+ "util/options.cc"
+ "util/random.h"
+ "util/status.cc"
+
+ # Only CMake 3.3+ supports PUBLIC sources in targets exported by "install".
+ $<$<VERSION_GREATER:CMAKE_VERSION,3.2>:PUBLIC>
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/c.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/cache.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/comparator.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/db.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/dumpfile.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/env.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/export.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/filter_policy.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/iterator.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/options.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/slice.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/status.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/table_builder.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/table.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/write_batch.h"
+)
+
+if (WIN32)
+ target_sources(leveldb
+ PRIVATE
+ "util/env_windows.cc"
+ "util/windows_logger.h"
+ )
+else (WIN32)
+ target_sources(leveldb
+ PRIVATE
+ "util/env_posix.cc"
+ "util/posix_logger.h"
+ )
+endif (WIN32)
+
+# MemEnv is not part of the interface and could be pulled to a separate library.
+target_sources(leveldb
+ PRIVATE
+ "helpers/memenv/memenv.cc"
+ "helpers/memenv/memenv.h"
+)
+
+target_include_directories(leveldb
+ PUBLIC
+ $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include>
+ $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
+)
+
+set_target_properties(leveldb
+ PROPERTIES VERSION ${PROJECT_VERSION} SOVERSION ${PROJECT_VERSION_MAJOR})
+
+target_compile_definitions(leveldb
+ PRIVATE
+ # Used by include/export.h when building shared libraries.
+ LEVELDB_COMPILE_LIBRARY
+ # Used by port/port.h.
+ ${LEVELDB_PLATFORM_NAME}=1
+)
+if (NOT HAVE_CXX17_HAS_INCLUDE)
+ target_compile_definitions(leveldb
+ PRIVATE
+ LEVELDB_HAS_PORT_CONFIG_H=1
+ )
+endif(NOT HAVE_CXX17_HAS_INCLUDE)
+
+if(BUILD_SHARED_LIBS)
+ target_compile_definitions(leveldb
+ PUBLIC
+ # Used by include/export.h.
+ LEVELDB_SHARED_LIBRARY
+ )
+endif(BUILD_SHARED_LIBS)
+
+if(HAVE_CLANG_THREAD_SAFETY)
+ target_compile_options(leveldb
+ PUBLIC
+ -Werror -Wthread-safety)
+endif(HAVE_CLANG_THREAD_SAFETY)
+
+if(HAVE_CRC32C)
+ target_link_libraries(leveldb crc32c)
+endif(HAVE_CRC32C)
+if(HAVE_SNAPPY)
+ target_link_libraries(leveldb snappy)
+endif(HAVE_SNAPPY)
+if(HAVE_TCMALLOC)
+ target_link_libraries(leveldb tcmalloc)
+endif(HAVE_TCMALLOC)
+
+# Needed by port_stdcxx.h
+find_package(Threads REQUIRED)
+target_link_libraries(leveldb Threads::Threads)
+
+add_executable(leveldbutil
+ "db/leveldbutil.cc"
+)
+target_link_libraries(leveldbutil leveldb)
+
+if(LEVELDB_BUILD_TESTS)
+ enable_testing()
+
+ function(leveldb_test test_file)
+ get_filename_component(test_target_name "${test_file}" NAME_WE)
+
+ add_executable("${test_target_name}" "")
+ target_sources("${test_target_name}"
+ PRIVATE
+ "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
+ "util/testharness.cc"
+ "util/testharness.h"
+ "util/testutil.cc"
+ "util/testutil.h"
+
+ "${test_file}"
+ )
+ target_link_libraries("${test_target_name}" leveldb)
+ target_compile_definitions("${test_target_name}"
+ PRIVATE
+ ${LEVELDB_PLATFORM_NAME}=1
+ )
+ if (NOT HAVE_CXX17_HAS_INCLUDE)
+ target_compile_definitions("${test_target_name}"
+ PRIVATE
+ LEVELDB_HAS_PORT_CONFIG_H=1
+ )
+ endif(NOT HAVE_CXX17_HAS_INCLUDE)
+
+ add_test(NAME "${test_target_name}" COMMAND "${test_target_name}")
+ endfunction(leveldb_test)
+
+ leveldb_test("db/c_test.c")
+ leveldb_test("db/fault_injection_test.cc")
+
+ leveldb_test("issues/issue178_test.cc")
+ leveldb_test("issues/issue200_test.cc")
+ leveldb_test("issues/issue320_test.cc")
+
+ leveldb_test("util/env_test.cc")
+ leveldb_test("util/status_test.cc")
+ leveldb_test("util/no_destructor_test.cc")
+
+ if(NOT BUILD_SHARED_LIBS)
+ leveldb_test("db/autocompact_test.cc")
+ leveldb_test("db/corruption_test.cc")
+ leveldb_test("db/db_test.cc")
+ leveldb_test("db/dbformat_test.cc")
+ leveldb_test("db/filename_test.cc")
+ leveldb_test("db/log_test.cc")
+ leveldb_test("db/recovery_test.cc")
+ leveldb_test("db/skiplist_test.cc")
+ leveldb_test("db/version_edit_test.cc")
+ leveldb_test("db/version_set_test.cc")
+ leveldb_test("db/write_batch_test.cc")
+
+ leveldb_test("helpers/memenv/memenv_test.cc")
+
+ leveldb_test("table/filter_block_test.cc")
+ leveldb_test("table/table_test.cc")
+
+ leveldb_test("util/arena_test.cc")
+ leveldb_test("util/bloom_test.cc")
+ leveldb_test("util/cache_test.cc")
+ leveldb_test("util/coding_test.cc")
+ leveldb_test("util/crc32c_test.cc")
+ leveldb_test("util/hash_test.cc")
+ leveldb_test("util/logging_test.cc")
+
+ # TODO(costan): This test also uses
+ # "util/env_{posix|windows}_test_helper.h"
+ if (WIN32)
+ leveldb_test("util/env_windows_test.cc")
+ else (WIN32)
+ leveldb_test("util/env_posix_test.cc")
+ endif (WIN32)
+ endif(NOT BUILD_SHARED_LIBS)
+endif(LEVELDB_BUILD_TESTS)
+
+if(LEVELDB_BUILD_BENCHMARKS)
+ function(leveldb_benchmark bench_file)
+ get_filename_component(bench_target_name "${bench_file}" NAME_WE)
+
+ add_executable("${bench_target_name}" "")
+ target_sources("${bench_target_name}"
+ PRIVATE
+ "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
+ "util/histogram.cc"
+ "util/histogram.h"
+ "util/testharness.cc"
+ "util/testharness.h"
+ "util/testutil.cc"
+ "util/testutil.h"
+
+ "${bench_file}"
+ )
+ target_link_libraries("${bench_target_name}" leveldb)
+ target_compile_definitions("${bench_target_name}"
+ PRIVATE
+ ${LEVELDB_PLATFORM_NAME}=1
+ )
+ if (NOT HAVE_CXX17_HAS_INCLUDE)
+ target_compile_definitions("${bench_target_name}"
+ PRIVATE
+ LEVELDB_HAS_PORT_CONFIG_H=1
+ )
+ endif(NOT HAVE_CXX17_HAS_INCLUDE)
+ endfunction(leveldb_benchmark)
+
+ if(NOT BUILD_SHARED_LIBS)
+ leveldb_benchmark("benchmarks/db_bench.cc")
+ endif(NOT BUILD_SHARED_LIBS)
+
+ check_library_exists(sqlite3 sqlite3_open "" HAVE_SQLITE3)
+ if(HAVE_SQLITE3)
+ leveldb_benchmark("benchmarks/db_bench_sqlite3.cc")
+ target_link_libraries(db_bench_sqlite3 sqlite3)
+ endif(HAVE_SQLITE3)
+
+ # check_library_exists is insufficient here because the library names have
+ # different manglings when compiled with clang or gcc, at least when installed
+ # with Homebrew on Mac.
+ set(OLD_CMAKE_REQURED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES})
+ list(APPEND CMAKE_REQUIRED_LIBRARIES kyotocabinet)
+ check_cxx_source_compiles("
+#include <kcpolydb.h>
+
+int main() {
+ kyotocabinet::TreeDB* db = new kyotocabinet::TreeDB();
+ delete db;
+ return 0;
+}
+ " HAVE_KYOTOCABINET)
+ set(CMAKE_REQUIRED_LIBRARIES ${OLD_CMAKE_REQURED_LIBRARIES})
+ if(HAVE_KYOTOCABINET)
+ leveldb_benchmark("benchmarks/db_bench_tree_db.cc")
+ target_link_libraries(db_bench_tree_db kyotocabinet)
+ endif(HAVE_KYOTOCABINET)
+endif(LEVELDB_BUILD_BENCHMARKS)
+
+if(LEVELDB_INSTALL)
+ install(TARGETS leveldb
+ EXPORT leveldbTargets
+ RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
+ LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ )
+ install(
+ FILES
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/c.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/cache.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/comparator.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/db.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/dumpfile.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/env.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/export.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/filter_policy.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/iterator.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/options.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/slice.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/status.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/table_builder.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/table.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/write_batch.h"
+ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/leveldb
+ )
+
+ include(CMakePackageConfigHelpers)
+ write_basic_package_version_file(
+ "${PROJECT_BINARY_DIR}/leveldbConfigVersion.cmake"
+ COMPATIBILITY SameMajorVersion
+ )
+ install(
+ EXPORT leveldbTargets
+ NAMESPACE leveldb::
+ DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/leveldb"
+ )
+ install(
+ FILES
+ "cmake/leveldbConfig.cmake"
+ "${PROJECT_BINARY_DIR}/leveldbConfigVersion.cmake"
+ DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/leveldb"
+ )
+endif(LEVELDB_INSTALL)
diff --git a/src/leveldb/CONTRIBUTING.md b/src/leveldb/CONTRIBUTING.md
index cd600ff46b..a74572a596 100644
--- a/src/leveldb/CONTRIBUTING.md
+++ b/src/leveldb/CONTRIBUTING.md
@@ -31,6 +31,6 @@ the CLA.
## Writing Code ##
-If your contribution contains code, please make sure that it follows
-[the style guide](http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml).
+If your contribution contains code, please make sure that it follows
+[the style guide](http://google.github.io/styleguide/cppguide.html).
Otherwise we will have to ask you to make changes, and that's no fun for anyone.
diff --git a/src/leveldb/Makefile b/src/leveldb/Makefile
deleted file mode 100644
index f7cc7d736c..0000000000
--- a/src/leveldb/Makefile
+++ /dev/null
@@ -1,424 +0,0 @@
-# Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#-----------------------------------------------
-# Uncomment exactly one of the lines labelled (A), (B), and (C) below
-# to switch between compilation modes.
-
-# (A) Production use (optimized mode)
-OPT ?= -O2 -DNDEBUG
-# (B) Debug mode, w/ full line-level debugging symbols
-# OPT ?= -g2
-# (C) Profiling mode: opt, but w/debugging symbols
-# OPT ?= -O2 -g2 -DNDEBUG
-#-----------------------------------------------
-
-# detect what platform we're building on
-$(shell CC="$(CC)" CXX="$(CXX)" TARGET_OS="$(TARGET_OS)" \
- ./build_detect_platform build_config.mk ./)
-# this file is generated by the previous line to set build flags and sources
-include build_config.mk
-
-TESTS = \
- db/autocompact_test \
- db/c_test \
- db/corruption_test \
- db/db_test \
- db/dbformat_test \
- db/fault_injection_test \
- db/filename_test \
- db/log_test \
- db/recovery_test \
- db/skiplist_test \
- db/version_edit_test \
- db/version_set_test \
- db/write_batch_test \
- helpers/memenv/memenv_test \
- issues/issue178_test \
- issues/issue200_test \
- table/filter_block_test \
- table/table_test \
- util/arena_test \
- util/bloom_test \
- util/cache_test \
- util/coding_test \
- util/crc32c_test \
- util/env_posix_test \
- util/env_test \
- util/hash_test
-
-UTILS = \
- db/db_bench \
- db/leveldbutil
-
-# Put the object files in a subdirectory, but the application at the top of the object dir.
-PROGNAMES := $(notdir $(TESTS) $(UTILS))
-
-# On Linux may need libkyotocabinet-dev for dependency.
-BENCHMARKS = \
- doc/bench/db_bench_sqlite3 \
- doc/bench/db_bench_tree_db
-
-CFLAGS += -I. -I./include $(PLATFORM_CCFLAGS) $(OPT)
-CXXFLAGS += -I. -I./include $(PLATFORM_CXXFLAGS) $(OPT)
-
-LDFLAGS += $(PLATFORM_LDFLAGS)
-LIBS += $(PLATFORM_LIBS)
-
-SIMULATOR_OUTDIR=out-ios-x86
-DEVICE_OUTDIR=out-ios-arm
-
-ifeq ($(PLATFORM), IOS)
-# Note: iOS should probably be using libtool, not ar.
-AR=xcrun ar
-SIMULATORSDK=$(shell xcrun -sdk iphonesimulator --show-sdk-path)
-DEVICESDK=$(shell xcrun -sdk iphoneos --show-sdk-path)
-DEVICE_CFLAGS = -isysroot "$(DEVICESDK)" -arch armv6 -arch armv7 -arch armv7s -arch arm64
-SIMULATOR_CFLAGS = -isysroot "$(SIMULATORSDK)" -arch i686 -arch x86_64
-STATIC_OUTDIR=out-ios-universal
-else
-STATIC_OUTDIR=out-static
-SHARED_OUTDIR=out-shared
-STATIC_PROGRAMS := $(addprefix $(STATIC_OUTDIR)/, $(PROGNAMES))
-SHARED_PROGRAMS := $(addprefix $(SHARED_OUTDIR)/, db_bench)
-endif
-
-STATIC_LIBOBJECTS := $(addprefix $(STATIC_OUTDIR)/, $(SOURCES:.cc=.o))
-STATIC_MEMENVOBJECTS := $(addprefix $(STATIC_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o))
-
-DEVICE_LIBOBJECTS := $(addprefix $(DEVICE_OUTDIR)/, $(SOURCES:.cc=.o))
-DEVICE_MEMENVOBJECTS := $(addprefix $(DEVICE_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o))
-
-SIMULATOR_LIBOBJECTS := $(addprefix $(SIMULATOR_OUTDIR)/, $(SOURCES:.cc=.o))
-SIMULATOR_MEMENVOBJECTS := $(addprefix $(SIMULATOR_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o))
-
-SHARED_LIBOBJECTS := $(addprefix $(SHARED_OUTDIR)/, $(SOURCES:.cc=.o))
-SHARED_MEMENVOBJECTS := $(addprefix $(SHARED_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o))
-
-TESTUTIL := $(STATIC_OUTDIR)/util/testutil.o
-TESTHARNESS := $(STATIC_OUTDIR)/util/testharness.o $(TESTUTIL)
-
-STATIC_TESTOBJS := $(addprefix $(STATIC_OUTDIR)/, $(addsuffix .o, $(TESTS)))
-STATIC_UTILOBJS := $(addprefix $(STATIC_OUTDIR)/, $(addsuffix .o, $(UTILS)))
-STATIC_ALLOBJS := $(STATIC_LIBOBJECTS) $(STATIC_MEMENVOBJECTS) $(STATIC_TESTOBJS) $(STATIC_UTILOBJS) $(TESTHARNESS)
-DEVICE_ALLOBJS := $(DEVICE_LIBOBJECTS) $(DEVICE_MEMENVOBJECTS)
-SIMULATOR_ALLOBJS := $(SIMULATOR_LIBOBJECTS) $(SIMULATOR_MEMENVOBJECTS)
-
-default: all
-
-# Should we build shared libraries?
-ifneq ($(PLATFORM_SHARED_EXT),)
-
-# Many leveldb test apps use non-exported API's. Only build a subset for testing.
-SHARED_ALLOBJS := $(SHARED_LIBOBJECTS) $(SHARED_MEMENVOBJECTS) $(TESTHARNESS)
-
-ifneq ($(PLATFORM_SHARED_VERSIONED),true)
-SHARED_LIB1 = libleveldb.$(PLATFORM_SHARED_EXT)
-SHARED_LIB2 = $(SHARED_LIB1)
-SHARED_LIB3 = $(SHARED_LIB1)
-SHARED_LIBS = $(SHARED_LIB1)
-SHARED_MEMENVLIB = $(SHARED_OUTDIR)/libmemenv.a
-else
-# Update db.h if you change these.
-SHARED_VERSION_MAJOR = 1
-SHARED_VERSION_MINOR = 20
-SHARED_LIB1 = libleveldb.$(PLATFORM_SHARED_EXT)
-SHARED_LIB2 = $(SHARED_LIB1).$(SHARED_VERSION_MAJOR)
-SHARED_LIB3 = $(SHARED_LIB1).$(SHARED_VERSION_MAJOR).$(SHARED_VERSION_MINOR)
-SHARED_LIBS = $(SHARED_OUTDIR)/$(SHARED_LIB1) $(SHARED_OUTDIR)/$(SHARED_LIB2) $(SHARED_OUTDIR)/$(SHARED_LIB3)
-$(SHARED_OUTDIR)/$(SHARED_LIB1): $(SHARED_OUTDIR)/$(SHARED_LIB3)
- ln -fs $(SHARED_LIB3) $(SHARED_OUTDIR)/$(SHARED_LIB1)
-$(SHARED_OUTDIR)/$(SHARED_LIB2): $(SHARED_OUTDIR)/$(SHARED_LIB3)
- ln -fs $(SHARED_LIB3) $(SHARED_OUTDIR)/$(SHARED_LIB2)
-SHARED_MEMENVLIB = $(SHARED_OUTDIR)/libmemenv.a
-endif
-
-$(SHARED_OUTDIR)/$(SHARED_LIB3): $(SHARED_LIBOBJECTS)
- $(CXX) $(LDFLAGS) $(PLATFORM_SHARED_LDFLAGS)$(SHARED_LIB2) $(SHARED_LIBOBJECTS) -o $(SHARED_OUTDIR)/$(SHARED_LIB3) $(LIBS)
-
-endif # PLATFORM_SHARED_EXT
-
-all: $(SHARED_LIBS) $(SHARED_PROGRAMS) $(STATIC_OUTDIR)/libleveldb.a $(STATIC_OUTDIR)/libmemenv.a $(STATIC_PROGRAMS)
-
-check: $(STATIC_PROGRAMS)
- for t in $(notdir $(TESTS)); do echo "***** Running $$t"; $(STATIC_OUTDIR)/$$t || exit 1; done
-
-clean:
- -rm -rf out-static out-shared out-ios-x86 out-ios-arm out-ios-universal
- -rm -f build_config.mk
- -rm -rf ios-x86 ios-arm
-
-$(STATIC_OUTDIR):
- mkdir $@
-
-$(STATIC_OUTDIR)/db: | $(STATIC_OUTDIR)
- mkdir $@
-
-$(STATIC_OUTDIR)/helpers/memenv: | $(STATIC_OUTDIR)
- mkdir -p $@
-
-$(STATIC_OUTDIR)/port: | $(STATIC_OUTDIR)
- mkdir $@
-
-$(STATIC_OUTDIR)/table: | $(STATIC_OUTDIR)
- mkdir $@
-
-$(STATIC_OUTDIR)/util: | $(STATIC_OUTDIR)
- mkdir $@
-
-.PHONY: STATIC_OBJDIRS
-STATIC_OBJDIRS: \
- $(STATIC_OUTDIR)/db \
- $(STATIC_OUTDIR)/port \
- $(STATIC_OUTDIR)/table \
- $(STATIC_OUTDIR)/util \
- $(STATIC_OUTDIR)/helpers/memenv
-
-$(SHARED_OUTDIR):
- mkdir $@
-
-$(SHARED_OUTDIR)/db: | $(SHARED_OUTDIR)
- mkdir $@
-
-$(SHARED_OUTDIR)/helpers/memenv: | $(SHARED_OUTDIR)
- mkdir -p $@
-
-$(SHARED_OUTDIR)/port: | $(SHARED_OUTDIR)
- mkdir $@
-
-$(SHARED_OUTDIR)/table: | $(SHARED_OUTDIR)
- mkdir $@
-
-$(SHARED_OUTDIR)/util: | $(SHARED_OUTDIR)
- mkdir $@
-
-.PHONY: SHARED_OBJDIRS
-SHARED_OBJDIRS: \
- $(SHARED_OUTDIR)/db \
- $(SHARED_OUTDIR)/port \
- $(SHARED_OUTDIR)/table \
- $(SHARED_OUTDIR)/util \
- $(SHARED_OUTDIR)/helpers/memenv
-
-$(DEVICE_OUTDIR):
- mkdir $@
-
-$(DEVICE_OUTDIR)/db: | $(DEVICE_OUTDIR)
- mkdir $@
-
-$(DEVICE_OUTDIR)/helpers/memenv: | $(DEVICE_OUTDIR)
- mkdir -p $@
-
-$(DEVICE_OUTDIR)/port: | $(DEVICE_OUTDIR)
- mkdir $@
-
-$(DEVICE_OUTDIR)/table: | $(DEVICE_OUTDIR)
- mkdir $@
-
-$(DEVICE_OUTDIR)/util: | $(DEVICE_OUTDIR)
- mkdir $@
-
-.PHONY: DEVICE_OBJDIRS
-DEVICE_OBJDIRS: \
- $(DEVICE_OUTDIR)/db \
- $(DEVICE_OUTDIR)/port \
- $(DEVICE_OUTDIR)/table \
- $(DEVICE_OUTDIR)/util \
- $(DEVICE_OUTDIR)/helpers/memenv
-
-$(SIMULATOR_OUTDIR):
- mkdir $@
-
-$(SIMULATOR_OUTDIR)/db: | $(SIMULATOR_OUTDIR)
- mkdir $@
-
-$(SIMULATOR_OUTDIR)/helpers/memenv: | $(SIMULATOR_OUTDIR)
- mkdir -p $@
-
-$(SIMULATOR_OUTDIR)/port: | $(SIMULATOR_OUTDIR)
- mkdir $@
-
-$(SIMULATOR_OUTDIR)/table: | $(SIMULATOR_OUTDIR)
- mkdir $@
-
-$(SIMULATOR_OUTDIR)/util: | $(SIMULATOR_OUTDIR)
- mkdir $@
-
-.PHONY: SIMULATOR_OBJDIRS
-SIMULATOR_OBJDIRS: \
- $(SIMULATOR_OUTDIR)/db \
- $(SIMULATOR_OUTDIR)/port \
- $(SIMULATOR_OUTDIR)/table \
- $(SIMULATOR_OUTDIR)/util \
- $(SIMULATOR_OUTDIR)/helpers/memenv
-
-$(STATIC_ALLOBJS): | STATIC_OBJDIRS
-$(DEVICE_ALLOBJS): | DEVICE_OBJDIRS
-$(SIMULATOR_ALLOBJS): | SIMULATOR_OBJDIRS
-$(SHARED_ALLOBJS): | SHARED_OBJDIRS
-
-ifeq ($(PLATFORM), IOS)
-$(DEVICE_OUTDIR)/libleveldb.a: $(DEVICE_LIBOBJECTS)
- rm -f $@
- $(AR) -rs $@ $(DEVICE_LIBOBJECTS)
-
-$(SIMULATOR_OUTDIR)/libleveldb.a: $(SIMULATOR_LIBOBJECTS)
- rm -f $@
- $(AR) -rs $@ $(SIMULATOR_LIBOBJECTS)
-
-$(DEVICE_OUTDIR)/libmemenv.a: $(DEVICE_MEMENVOBJECTS)
- rm -f $@
- $(AR) -rs $@ $(DEVICE_MEMENVOBJECTS)
-
-$(SIMULATOR_OUTDIR)/libmemenv.a: $(SIMULATOR_MEMENVOBJECTS)
- rm -f $@
- $(AR) -rs $@ $(SIMULATOR_MEMENVOBJECTS)
-
-# For iOS, create universal object libraries to be used on both the simulator and
-# a device.
-$(STATIC_OUTDIR)/libleveldb.a: $(STATIC_OUTDIR) $(DEVICE_OUTDIR)/libleveldb.a $(SIMULATOR_OUTDIR)/libleveldb.a
- lipo -create $(DEVICE_OUTDIR)/libleveldb.a $(SIMULATOR_OUTDIR)/libleveldb.a -output $@
-
-$(STATIC_OUTDIR)/libmemenv.a: $(STATIC_OUTDIR) $(DEVICE_OUTDIR)/libmemenv.a $(SIMULATOR_OUTDIR)/libmemenv.a
- lipo -create $(DEVICE_OUTDIR)/libmemenv.a $(SIMULATOR_OUTDIR)/libmemenv.a -output $@
-else
-$(STATIC_OUTDIR)/libleveldb.a:$(STATIC_LIBOBJECTS)
- rm -f $@
- $(AR) -rs $@ $(STATIC_LIBOBJECTS)
-
-$(STATIC_OUTDIR)/libmemenv.a:$(STATIC_MEMENVOBJECTS)
- rm -f $@
- $(AR) -rs $@ $(STATIC_MEMENVOBJECTS)
-endif
-
-$(SHARED_MEMENVLIB):$(SHARED_MEMENVOBJECTS)
- rm -f $@
- $(AR) -rs $@ $(SHARED_MEMENVOBJECTS)
-
-$(STATIC_OUTDIR)/db_bench:db/db_bench.cc $(STATIC_LIBOBJECTS) $(TESTUTIL)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/db_bench.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/db_bench_sqlite3:doc/bench/db_bench_sqlite3.cc $(STATIC_LIBOBJECTS) $(TESTUTIL)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) doc/bench/db_bench_sqlite3.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) -o $@ -lsqlite3 $(LIBS)
-
-$(STATIC_OUTDIR)/db_bench_tree_db:doc/bench/db_bench_tree_db.cc $(STATIC_LIBOBJECTS) $(TESTUTIL)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) doc/bench/db_bench_tree_db.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) -o $@ -lkyotocabinet $(LIBS)
-
-$(STATIC_OUTDIR)/leveldbutil:db/leveldbutil.cc $(STATIC_LIBOBJECTS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/leveldbutil.cc $(STATIC_LIBOBJECTS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/arena_test:util/arena_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) util/arena_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/autocompact_test:db/autocompact_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/autocompact_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/bloom_test:util/bloom_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) util/bloom_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/c_test:$(STATIC_OUTDIR)/db/c_test.o $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(STATIC_OUTDIR)/db/c_test.o $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/cache_test:util/cache_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) util/cache_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/coding_test:util/coding_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) util/coding_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/corruption_test:db/corruption_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/corruption_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/crc32c_test:util/crc32c_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) util/crc32c_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/db_test:db/db_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/db_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/dbformat_test:db/dbformat_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/dbformat_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/env_posix_test:util/env_posix_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) util/env_posix_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/env_test:util/env_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) util/env_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/fault_injection_test:db/fault_injection_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/fault_injection_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/filename_test:db/filename_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/filename_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/filter_block_test:table/filter_block_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) table/filter_block_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/hash_test:util/hash_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) util/hash_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/issue178_test:issues/issue178_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) issues/issue178_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/issue200_test:issues/issue200_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) issues/issue200_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/log_test:db/log_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/log_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/recovery_test:db/recovery_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/recovery_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/table_test:table/table_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) table/table_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/skiplist_test:db/skiplist_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/skiplist_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/version_edit_test:db/version_edit_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/version_edit_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/version_set_test:db/version_set_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/version_set_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/write_batch_test:db/write_batch_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/write_batch_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/memenv_test:$(STATIC_OUTDIR)/helpers/memenv/memenv_test.o $(STATIC_OUTDIR)/libmemenv.a $(STATIC_OUTDIR)/libleveldb.a $(TESTHARNESS)
- $(XCRUN) $(CXX) $(LDFLAGS) $(STATIC_OUTDIR)/helpers/memenv/memenv_test.o $(STATIC_OUTDIR)/libmemenv.a $(STATIC_OUTDIR)/libleveldb.a $(TESTHARNESS) -o $@ $(LIBS)
-
-$(SHARED_OUTDIR)/db_bench:$(SHARED_OUTDIR)/db/db_bench.o $(SHARED_LIBS) $(TESTUTIL)
- $(XCRUN) $(CXX) $(LDFLAGS) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) $(SHARED_OUTDIR)/db/db_bench.o $(TESTUTIL) $(SHARED_OUTDIR)/$(SHARED_LIB3) -o $@ $(LIBS)
-
-.PHONY: run-shared
-run-shared: $(SHARED_OUTDIR)/db_bench
- LD_LIBRARY_PATH=$(SHARED_OUTDIR) $(SHARED_OUTDIR)/db_bench
-
-$(SIMULATOR_OUTDIR)/%.o: %.cc
- xcrun -sdk iphonesimulator $(CXX) $(CXXFLAGS) $(SIMULATOR_CFLAGS) -c $< -o $@
-
-$(DEVICE_OUTDIR)/%.o: %.cc
- xcrun -sdk iphoneos $(CXX) $(CXXFLAGS) $(DEVICE_CFLAGS) -c $< -o $@
-
-$(SIMULATOR_OUTDIR)/%.o: %.c
- xcrun -sdk iphonesimulator $(CC) $(CFLAGS) $(SIMULATOR_CFLAGS) -c $< -o $@
-
-$(DEVICE_OUTDIR)/%.o: %.c
- xcrun -sdk iphoneos $(CC) $(CFLAGS) $(DEVICE_CFLAGS) -c $< -o $@
-
-$(STATIC_OUTDIR)/%.o: %.cc
- $(CXX) $(CXXFLAGS) -c $< -o $@
-
-$(STATIC_OUTDIR)/%.o: %.c
- $(CC) $(CFLAGS) -c $< -o $@
-
-$(SHARED_OUTDIR)/%.o: %.cc
- $(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) -c $< -o $@
-
-$(SHARED_OUTDIR)/%.o: %.c
- $(CC) $(CFLAGS) $(PLATFORM_SHARED_CFLAGS) -c $< -o $@
-
-$(STATIC_OUTDIR)/port/port_posix_sse.o: port/port_posix_sse.cc
- $(CXX) $(CXXFLAGS) $(PLATFORM_SSEFLAGS) -c $< -o $@
-
-$(SHARED_OUTDIR)/port/port_posix_sse.o: port/port_posix_sse.cc
- $(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) $(PLATFORM_SSEFLAGS) -c $< -o $@
diff --git a/src/leveldb/README.md b/src/leveldb/README.md
index a010c50858..dadfd5693e 100644
--- a/src/leveldb/README.md
+++ b/src/leveldb/README.md
@@ -1,10 +1,12 @@
**LevelDB is a fast key-value storage library written at Google that provides an ordered mapping from string keys to string values.**
[![Build Status](https://travis-ci.org/google/leveldb.svg?branch=master)](https://travis-ci.org/google/leveldb)
+[![Build status](https://ci.appveyor.com/api/projects/status/g2j5j4rfkda6eyw5/branch/master?svg=true)](https://ci.appveyor.com/project/pwnall/leveldb)
Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
# Features
+
* Keys and values are arbitrary byte arrays.
* Data is stored sorted by key.
* Callers can provide a custom comparison function to override the sort order.
@@ -16,15 +18,55 @@ Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
* External activity (file system operations etc.) is relayed through a virtual interface so users can customize the operating system interactions.
# Documentation
- [LevelDB library documentation](https://github.com/google/leveldb/blob/master/doc/index.md) is online and bundled with the source code.
+ [LevelDB library documentation](https://github.com/google/leveldb/blob/master/doc/index.md) is online and bundled with the source code.
# Limitations
+
* This is not a SQL database. It does not have a relational data model, it does not support SQL queries, and it has no support for indexes.
* Only a single process (possibly multi-threaded) can access a particular database at a time.
* There is no client-server support builtin to the library. An application that needs such support will have to wrap their own server around the library.
+# Building
+
+This project supports [CMake](https://cmake.org/) out of the box.
+
+### Build for POSIX
+
+Quick start:
+
+```bash
+mkdir -p build && cd build
+cmake -DCMAKE_BUILD_TYPE=Release .. && cmake --build .
+```
+
+### Building for Windows
+
+First generate the Visual Studio 2017 project/solution files:
+
+```cmd
+mkdir build
+cd build
+cmake -G "Visual Studio 15" ..
+```
+The default default will build for x86. For 64-bit run:
+
+```cmd
+cmake -G "Visual Studio 15 Win64" ..
+```
+
+To compile the Windows solution from the command-line:
+
+```cmd
+devenv /build Debug leveldb.sln
+```
+
+or open leveldb.sln in Visual Studio and build from within.
+
+Please see the CMake documentation and `CMakeLists.txt` for more advanced usage.
+
# Contributing to the leveldb Project
+
The leveldb project welcomes contributions. leveldb's primary goal is to be
a reliable and fast key/value store. Changes that are in line with the
features/limitations outlined above, and meet the requirements below,
@@ -32,10 +74,10 @@ will be considered.
Contribution requirements:
-1. **POSIX only**. We _generally_ will only accept changes that are both
- compiled, and tested on a POSIX platform - usually Linux. Very small
- changes will sometimes be accepted, but consider that more of an
- exception than the rule.
+1. **Tested platforms only**. We _generally_ will only accept changes for
+ platforms that are compiled and tested. This means POSIX (for Linux and
+ macOS) or Windows. Very small changes will sometimes be accepted, but
+ consider that more of an exception than the rule.
2. **Stable API**. We strive very hard to maintain a stable API. Changes that
require changes for projects using leveldb _might_ be rejected without
@@ -44,7 +86,16 @@ Contribution requirements:
3. **Tests**: All changes must be accompanied by a new (or changed) test, or
a sufficient explanation as to why a new (or changed) test is not required.
+4. **Consistent Style**: This project conforms to the
+ [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html).
+ To ensure your changes are properly formatted please run:
+
+ ```
+ clang-format -i --style=file <file>
+ ```
+
## Submitting a Pull Request
+
Before any pull request will be accepted the author must first sign a
Contributor License Agreement (CLA) at https://cla.developers.google.com/.
@@ -138,37 +189,37 @@ uncompressed blocks in memory, the read performance improves again:
See [doc/index.md](doc/index.md) for more explanation. See
[doc/impl.md](doc/impl.md) for a brief overview of the implementation.
-The public interface is in include/*.h. Callers should not include or
+The public interface is in include/leveldb/*.h. Callers should not include or
rely on the details of any other header files in this package. Those
internal APIs may be changed without warning.
Guide to header files:
-* **include/db.h**: Main interface to the DB: Start here
+* **include/leveldb/db.h**: Main interface to the DB: Start here.
-* **include/options.h**: Control over the behavior of an entire database,
+* **include/leveldb/options.h**: Control over the behavior of an entire database,
and also control over the behavior of individual reads and writes.
-* **include/comparator.h**: Abstraction for user-specified comparison function.
+* **include/leveldb/comparator.h**: Abstraction for user-specified comparison function.
If you want just bytewise comparison of keys, you can use the default
comparator, but clients can write their own comparator implementations if they
-want custom ordering (e.g. to handle different character encodings, etc.)
+want custom ordering (e.g. to handle different character encodings, etc.).
-* **include/iterator.h**: Interface for iterating over data. You can get
+* **include/leveldb/iterator.h**: Interface for iterating over data. You can get
an iterator from a DB object.
-* **include/write_batch.h**: Interface for atomically applying multiple
+* **include/leveldb/write_batch.h**: Interface for atomically applying multiple
updates to a database.
-* **include/slice.h**: A simple module for maintaining a pointer and a
+* **include/leveldb/slice.h**: A simple module for maintaining a pointer and a
length into some other byte array.
-* **include/status.h**: Status is returned from many of the public interfaces
+* **include/leveldb/status.h**: Status is returned from many of the public interfaces
and is used to report success and various kinds of errors.
-* **include/env.h**:
+* **include/leveldb/env.h**:
Abstraction of the OS environment. A posix implementation of this interface is
-in util/env_posix.cc
+in util/env_posix.cc.
-* **include/table.h, include/table_builder.h**: Lower-level modules that most
-clients probably won't use directly
+* **include/leveldb/table.h, include/leveldb/table_builder.h**: Lower-level modules that most
+clients probably won't use directly.
diff --git a/src/leveldb/WINDOWS.md b/src/leveldb/WINDOWS.md
deleted file mode 100644
index 5b76c2448f..0000000000
--- a/src/leveldb/WINDOWS.md
+++ /dev/null
@@ -1,39 +0,0 @@
-# Building LevelDB On Windows
-
-## Prereqs
-
-Install the [Windows Software Development Kit version 7.1](http://www.microsoft.com/downloads/dlx/en-us/listdetailsview.aspx?FamilyID=6b6c21d2-2006-4afa-9702-529fa782d63b).
-
-Download and extract the [Snappy source distribution](http://snappy.googlecode.com/files/snappy-1.0.5.tar.gz)
-
-1. Open the "Windows SDK 7.1 Command Prompt" :
- Start Menu -> "Microsoft Windows SDK v7.1" > "Windows SDK 7.1 Command Prompt"
-2. Change the directory to the leveldb project
-
-## Building the Static lib
-
-* 32 bit Version
-
- setenv /x86
- msbuild.exe /p:Configuration=Release /p:Platform=Win32 /p:Snappy=..\snappy-1.0.5
-
-* 64 bit Version
-
- setenv /x64
- msbuild.exe /p:Configuration=Release /p:Platform=x64 /p:Snappy=..\snappy-1.0.5
-
-
-## Building and Running the Benchmark app
-
-* 32 bit Version
-
- setenv /x86
- msbuild.exe /p:Configuration=Benchmark /p:Platform=Win32 /p:Snappy=..\snappy-1.0.5
- Benchmark\leveldb.exe
-
-* 64 bit Version
-
- setenv /x64
- msbuild.exe /p:Configuration=Benchmark /p:Platform=x64 /p:Snappy=..\snappy-1.0.5
- x64\Benchmark\leveldb.exe
-
diff --git a/src/leveldb/db/db_bench.cc b/src/leveldb/benchmarks/db_bench.cc
index 3ad19a512b..3696023b70 100644
--- a/src/leveldb/db/db_bench.cc
+++ b/src/leveldb/benchmarks/db_bench.cc
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include <sys/types.h>
#include <stdio.h>
#include <stdlib.h>
-#include "db/db_impl.h"
-#include "db/version_set.h"
+#include <sys/types.h>
+
#include "leveldb/cache.h"
#include "leveldb/db.h"
#include "leveldb/env.h"
+#include "leveldb/filter_policy.h"
#include "leveldb/write_batch.h"
#include "port/port.h"
#include "util/crc32c.h"
@@ -35,7 +35,6 @@
// seekrandom -- N random seeks
// open -- cost of opening a DB
// crc32c -- repeated crc32c of 4K of data
-// acquireload -- load N*1000 times
// Meta operations:
// compact -- Compact the entire DB
// stats -- Print DB stats
@@ -57,9 +56,7 @@ static const char* FLAGS_benchmarks =
"fill100K,"
"crc32c,"
"snappycomp,"
- "snappyuncomp,"
- "acquireload,"
- ;
+ "snappyuncomp,";
// Number of key/values to place in database
static int FLAGS_num = 1000000;
@@ -112,12 +109,12 @@ static bool FLAGS_use_existing_db = false;
static bool FLAGS_reuse_logs = false;
// Use the db with the following name.
-static const char* FLAGS_db = NULL;
+static const char* FLAGS_db = nullptr;
namespace leveldb {
namespace {
-leveldb::Env* g_env = NULL;
+leveldb::Env* g_env = nullptr;
// Helper for quickly generating random data.
class RandomGenerator {
@@ -158,7 +155,7 @@ static Slice TrimSpace(Slice s) {
start++;
}
size_t limit = s.size();
- while (limit > start && isspace(s[limit-1])) {
+ while (limit > start && isspace(s[limit - 1])) {
limit--;
}
return Slice(s.data() + start, limit - start);
@@ -190,14 +187,12 @@ class Stats {
void Start() {
next_report_ = 100;
- last_op_finish_ = start_;
hist_.Clear();
done_ = 0;
bytes_ = 0;
seconds_ = 0;
- start_ = g_env->NowMicros();
- finish_ = start_;
message_.clear();
+ start_ = finish_ = last_op_finish_ = g_env->NowMicros();
}
void Merge(const Stats& other) {
@@ -217,9 +212,7 @@ class Stats {
seconds_ = (finish_ - start_) * 1e-6;
}
- void AddMessage(Slice msg) {
- AppendWithSpace(&message_, msg);
- }
+ void AddMessage(Slice msg) { AppendWithSpace(&message_, msg); }
void FinishedSingleOp() {
if (FLAGS_histogram) {
@@ -235,21 +228,26 @@ class Stats {
done_++;
if (done_ >= next_report_) {
- if (next_report_ < 1000) next_report_ += 100;
- else if (next_report_ < 5000) next_report_ += 500;
- else if (next_report_ < 10000) next_report_ += 1000;
- else if (next_report_ < 50000) next_report_ += 5000;
- else if (next_report_ < 100000) next_report_ += 10000;
- else if (next_report_ < 500000) next_report_ += 50000;
- else next_report_ += 100000;
+ if (next_report_ < 1000)
+ next_report_ += 100;
+ else if (next_report_ < 5000)
+ next_report_ += 500;
+ else if (next_report_ < 10000)
+ next_report_ += 1000;
+ else if (next_report_ < 50000)
+ next_report_ += 5000;
+ else if (next_report_ < 100000)
+ next_report_ += 10000;
+ else if (next_report_ < 500000)
+ next_report_ += 50000;
+ else
+ next_report_ += 100000;
fprintf(stderr, "... finished %d ops%30s\r", done_, "");
fflush(stderr);
}
}
- void AddBytes(int64_t n) {
- bytes_ += n;
- }
+ void AddBytes(int64_t n) { bytes_ += n; }
void Report(const Slice& name) {
// Pretend at least one op was done in case we are running a benchmark
@@ -268,11 +266,8 @@ class Stats {
}
AppendWithSpace(&extra, message_);
- fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
- name.ToString().c_str(),
- seconds_ * 1e6 / done_,
- (extra.empty() ? "" : " "),
- extra.c_str());
+ fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
+ seconds_ * 1e6 / done_, (extra.empty() ? "" : " "), extra.c_str());
if (FLAGS_histogram) {
fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
}
@@ -283,8 +278,8 @@ class Stats {
// State shared by all concurrent executions of the same benchmark.
struct SharedState {
port::Mutex mu;
- port::CondVar cv;
- int total;
+ port::CondVar cv GUARDED_BY(mu);
+ int total GUARDED_BY(mu);
// Each thread goes through the following states:
// (1) initializing
@@ -292,24 +287,22 @@ struct SharedState {
// (3) running
// (4) done
- int num_initialized;
- int num_done;
- bool start;
+ int num_initialized GUARDED_BY(mu);
+ int num_done GUARDED_BY(mu);
+ bool start GUARDED_BY(mu);
- SharedState() : cv(&mu) { }
+ SharedState(int total)
+ : cv(&mu), total(total), num_initialized(0), num_done(0), start(false) {}
};
// Per-thread state for concurrent executions of the same benchmark.
struct ThreadState {
- int tid; // 0..n-1 when running in n threads
- Random rand; // Has different seeds for different threads
+ int tid; // 0..n-1 when running in n threads
+ Random rand; // Has different seeds for different threads
Stats stats;
SharedState* shared;
- ThreadState(int index)
- : tid(index),
- rand(1000 + index) {
- }
+ ThreadState(int index) : tid(index), rand(1000 + index), shared(nullptr) {}
};
} // namespace
@@ -335,20 +328,20 @@ class Benchmark {
static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
fprintf(stdout, "Entries: %d\n", num_);
fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
- ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_)
- / 1048576.0));
+ ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
+ 1048576.0));
fprintf(stdout, "FileSize: %.1f MB (estimated)\n",
- (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_)
- / 1048576.0));
+ (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
+ 1048576.0));
PrintWarnings();
fprintf(stdout, "------------------------------------------------\n");
}
void PrintWarnings() {
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
- fprintf(stdout,
- "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
- );
+ fprintf(
+ stdout,
+ "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
#endif
#ifndef NDEBUG
fprintf(stdout,
@@ -366,22 +359,22 @@ class Benchmark {
}
void PrintEnvironment() {
- fprintf(stderr, "LevelDB: version %d.%d\n",
- kMajorVersion, kMinorVersion);
+ fprintf(stderr, "LevelDB: version %d.%d\n", kMajorVersion,
+ kMinorVersion);
#if defined(__linux)
- time_t now = time(NULL);
+ time_t now = time(nullptr);
fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline
FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
- if (cpuinfo != NULL) {
+ if (cpuinfo != nullptr) {
char line[1000];
int num_cpus = 0;
std::string cpu_type;
std::string cache_size;
- while (fgets(line, sizeof(line), cpuinfo) != NULL) {
+ while (fgets(line, sizeof(line), cpuinfo) != nullptr) {
const char* sep = strchr(line, ':');
- if (sep == NULL) {
+ if (sep == nullptr) {
continue;
}
Slice key = TrimSpace(Slice(line, sep - 1 - line));
@@ -402,16 +395,16 @@ class Benchmark {
public:
Benchmark()
- : cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : NULL),
- filter_policy_(FLAGS_bloom_bits >= 0
- ? NewBloomFilterPolicy(FLAGS_bloom_bits)
- : NULL),
- db_(NULL),
- num_(FLAGS_num),
- value_size_(FLAGS_value_size),
- entries_per_batch_(1),
- reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
- heap_counter_(0) {
+ : cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : nullptr),
+ filter_policy_(FLAGS_bloom_bits >= 0
+ ? NewBloomFilterPolicy(FLAGS_bloom_bits)
+ : nullptr),
+ db_(nullptr),
+ num_(FLAGS_num),
+ value_size_(FLAGS_value_size),
+ entries_per_batch_(1),
+ reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
+ heap_counter_(0) {
std::vector<std::string> files;
g_env->GetChildren(FLAGS_db, &files);
for (size_t i = 0; i < files.size(); i++) {
@@ -435,12 +428,12 @@ class Benchmark {
Open();
const char* benchmarks = FLAGS_benchmarks;
- while (benchmarks != NULL) {
+ while (benchmarks != nullptr) {
const char* sep = strchr(benchmarks, ',');
Slice name;
- if (sep == NULL) {
+ if (sep == nullptr) {
name = benchmarks;
- benchmarks = NULL;
+ benchmarks = nullptr;
} else {
name = Slice(benchmarks, sep - benchmarks);
benchmarks = sep + 1;
@@ -453,7 +446,7 @@ class Benchmark {
entries_per_batch_ = 1;
write_options_ = WriteOptions();
- void (Benchmark::*method)(ThreadState*) = NULL;
+ void (Benchmark::*method)(ThreadState*) = nullptr;
bool fresh_db = false;
int num_threads = FLAGS_threads;
@@ -510,8 +503,6 @@ class Benchmark {
method = &Benchmark::Compact;
} else if (name == Slice("crc32c")) {
method = &Benchmark::Crc32c;
- } else if (name == Slice("acquireload")) {
- method = &Benchmark::AcquireLoad;
} else if (name == Slice("snappycomp")) {
method = &Benchmark::SnappyCompress;
} else if (name == Slice("snappyuncomp")) {
@@ -523,7 +514,7 @@ class Benchmark {
} else if (name == Slice("sstables")) {
PrintStats("leveldb.sstables");
} else {
- if (name != Slice()) { // No error message for empty name
+ if (!name.empty()) { // No error message for empty name
fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str());
}
}
@@ -532,16 +523,16 @@ class Benchmark {
if (FLAGS_use_existing_db) {
fprintf(stdout, "%-12s : skipped (--use_existing_db is true)\n",
name.ToString().c_str());
- method = NULL;
+ method = nullptr;
} else {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
DestroyDB(FLAGS_db, Options());
Open();
}
}
- if (method != NULL) {
+ if (method != nullptr) {
RunBenchmark(num_threads, name, method);
}
}
@@ -585,11 +576,7 @@ class Benchmark {
void RunBenchmark(int n, Slice name,
void (Benchmark::*method)(ThreadState*)) {
- SharedState shared;
- shared.total = n;
- shared.num_initialized = 0;
- shared.num_done = 0;
- shared.start = false;
+ SharedState shared(n);
ThreadArg* arg = new ThreadArg[n];
for (int i = 0; i < n; i++) {
@@ -643,22 +630,6 @@ class Benchmark {
thread->stats.AddMessage(label);
}
- void AcquireLoad(ThreadState* thread) {
- int dummy;
- port::AtomicPointer ap(&dummy);
- int count = 0;
- void *ptr = NULL;
- thread->stats.AddMessage("(each op is 1000 loads)");
- while (count < 100000) {
- for (int i = 0; i < 1000; i++) {
- ptr = ap.Acquire_Load();
- }
- count++;
- thread->stats.FinishedSingleOp();
- }
- if (ptr == NULL) exit(1); // Disable unused variable warning.
- }
-
void SnappyCompress(ThreadState* thread) {
RandomGenerator gen;
Slice input = gen.Generate(Options().block_size);
@@ -692,8 +663,8 @@ class Benchmark {
int64_t bytes = 0;
char* uncompressed = new char[input.size()];
while (ok && bytes < 1024 * 1048576) { // Compress 1G
- ok = port::Snappy_Uncompress(compressed.data(), compressed.size(),
- uncompressed);
+ ok = port::Snappy_Uncompress(compressed.data(), compressed.size(),
+ uncompressed);
bytes += input.size();
thread->stats.FinishedSingleOp();
}
@@ -707,7 +678,7 @@ class Benchmark {
}
void Open() {
- assert(db_ == NULL);
+ assert(db_ == nullptr);
Options options;
options.env = g_env;
options.create_if_missing = !FLAGS_use_existing_db;
@@ -733,13 +704,9 @@ class Benchmark {
}
}
- void WriteSeq(ThreadState* thread) {
- DoWrite(thread, true);
- }
+ void WriteSeq(ThreadState* thread) { DoWrite(thread, true); }
- void WriteRandom(ThreadState* thread) {
- DoWrite(thread, false);
- }
+ void WriteRandom(ThreadState* thread) { DoWrite(thread, false); }
void DoWrite(ThreadState* thread, bool seq) {
if (num_ != FLAGS_num) {
@@ -755,7 +722,7 @@ class Benchmark {
for (int i = 0; i < num_; i += entries_per_batch_) {
batch.Clear();
for (int j = 0; j < entries_per_batch_; j++) {
- const int k = seq ? i+j : (thread->rand.Next() % FLAGS_num);
+ const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num);
char key[100];
snprintf(key, sizeof(key), "%016d", k);
batch.Put(key, gen.Generate(value_size_));
@@ -865,7 +832,7 @@ class Benchmark {
for (int i = 0; i < num_; i += entries_per_batch_) {
batch.Clear();
for (int j = 0; j < entries_per_batch_; j++) {
- const int k = seq ? i+j : (thread->rand.Next() % FLAGS_num);
+ const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num);
char key[100];
snprintf(key, sizeof(key), "%016d", k);
batch.Delete(key);
@@ -879,13 +846,9 @@ class Benchmark {
}
}
- void DeleteSeq(ThreadState* thread) {
- DoDelete(thread, true);
- }
+ void DeleteSeq(ThreadState* thread) { DoDelete(thread, true); }
- void DeleteRandom(ThreadState* thread) {
- DoDelete(thread, false);
- }
+ void DeleteRandom(ThreadState* thread) { DoDelete(thread, false); }
void ReadWhileWriting(ThreadState* thread) {
if (thread->tid > 0) {
@@ -917,9 +880,7 @@ class Benchmark {
}
}
- void Compact(ThreadState* thread) {
- db_->CompactRange(NULL, NULL);
- }
+ void Compact(ThreadState* thread) { db_->CompactRange(nullptr, nullptr); }
void PrintStats(const char* key) {
std::string stats;
@@ -1008,10 +969,10 @@ int main(int argc, char** argv) {
leveldb::g_env = leveldb::Env::Default();
// Choose a location for the test database if none given with --db=<path>
- if (FLAGS_db == NULL) {
- leveldb::g_env->GetTestDirectory(&default_db_path);
- default_db_path += "/dbbench";
- FLAGS_db = default_db_path.c_str();
+ if (FLAGS_db == nullptr) {
+ leveldb::g_env->GetTestDirectory(&default_db_path);
+ default_db_path += "/dbbench";
+ FLAGS_db = default_db_path.c_str();
}
leveldb::Benchmark benchmark;
diff --git a/src/leveldb/doc/bench/db_bench_sqlite3.cc b/src/leveldb/benchmarks/db_bench_sqlite3.cc
index e63aaa8dcc..f183f4fcfd 100644
--- a/src/leveldb/doc/bench/db_bench_sqlite3.cc
+++ b/src/leveldb/benchmarks/db_bench_sqlite3.cc
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include <sqlite3.h>
#include <stdio.h>
#include <stdlib.h>
-#include <sqlite3.h>
+
#include "util/histogram.h"
#include "util/random.h"
#include "util/testutil.h"
@@ -38,8 +39,7 @@ static const char* FLAGS_benchmarks =
"fillrand100K,"
"fillseq100K,"
"readseq,"
- "readrand100K,"
- ;
+ "readrand100K,";
// Number of key/values to place in database
static int FLAGS_num = 1000000;
@@ -76,10 +76,9 @@ static bool FLAGS_transaction = true;
static bool FLAGS_WAL_enabled = true;
// Use the db with the following name.
-static const char* FLAGS_db = NULL;
+static const char* FLAGS_db = nullptr;
-inline
-static void ExecErrorCheck(int status, char *err_msg) {
+inline static void ExecErrorCheck(int status, char* err_msg) {
if (status != SQLITE_OK) {
fprintf(stderr, "SQL error: %s\n", err_msg);
sqlite3_free(err_msg);
@@ -87,27 +86,25 @@ static void ExecErrorCheck(int status, char *err_msg) {
}
}
-inline
-static void StepErrorCheck(int status) {
+inline static void StepErrorCheck(int status) {
if (status != SQLITE_DONE) {
fprintf(stderr, "SQL step error: status = %d\n", status);
exit(1);
}
}
-inline
-static void ErrorCheck(int status) {
+inline static void ErrorCheck(int status) {
if (status != SQLITE_OK) {
fprintf(stderr, "sqlite3 error: status = %d\n", status);
exit(1);
}
}
-inline
-static void WalCheckpoint(sqlite3* db_) {
+inline static void WalCheckpoint(sqlite3* db_) {
// Flush all writes to disk
if (FLAGS_WAL_enabled) {
- sqlite3_wal_checkpoint_v2(db_, NULL, SQLITE_CHECKPOINT_FULL, NULL, NULL);
+ sqlite3_wal_checkpoint_v2(db_, nullptr, SQLITE_CHECKPOINT_FULL, nullptr,
+ nullptr);
}
}
@@ -152,7 +149,7 @@ static Slice TrimSpace(Slice s) {
start++;
}
int limit = s.size();
- while (limit > start && isspace(s[limit-1])) {
+ while (limit > start && isspace(s[limit - 1])) {
limit--;
}
return Slice(s.data() + start, limit - start);
@@ -176,7 +173,7 @@ class Benchmark {
// State kept for progress messages
int done_;
- int next_report_; // When to report next
+ int next_report_; // When to report next
void PrintHeader() {
const int kKeySize = 16;
@@ -185,17 +182,17 @@ class Benchmark {
fprintf(stdout, "Values: %d bytes each\n", FLAGS_value_size);
fprintf(stdout, "Entries: %d\n", num_);
fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
- ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_)
- / 1048576.0));
+ ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
+ 1048576.0));
PrintWarnings();
fprintf(stdout, "------------------------------------------------\n");
}
void PrintWarnings() {
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
- fprintf(stdout,
- "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
- );
+ fprintf(
+ stdout,
+ "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
#endif
#ifndef NDEBUG
fprintf(stdout,
@@ -207,18 +204,18 @@ class Benchmark {
fprintf(stderr, "SQLite: version %s\n", SQLITE_VERSION);
#if defined(__linux)
- time_t now = time(NULL);
+ time_t now = time(nullptr);
fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline
FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
- if (cpuinfo != NULL) {
+ if (cpuinfo != nullptr) {
char line[1000];
int num_cpus = 0;
std::string cpu_type;
std::string cache_size;
- while (fgets(line, sizeof(line), cpuinfo) != NULL) {
+ while (fgets(line, sizeof(line), cpuinfo) != nullptr) {
const char* sep = strchr(line, ':');
- if (sep == NULL) {
+ if (sep == nullptr) {
continue;
}
Slice key = TrimSpace(Slice(line, sep - 1 - line));
@@ -261,13 +258,20 @@ class Benchmark {
done_++;
if (done_ >= next_report_) {
- if (next_report_ < 1000) next_report_ += 100;
- else if (next_report_ < 5000) next_report_ += 500;
- else if (next_report_ < 10000) next_report_ += 1000;
- else if (next_report_ < 50000) next_report_ += 5000;
- else if (next_report_ < 100000) next_report_ += 10000;
- else if (next_report_ < 500000) next_report_ += 50000;
- else next_report_ += 100000;
+ if (next_report_ < 1000)
+ next_report_ += 100;
+ else if (next_report_ < 5000)
+ next_report_ += 500;
+ else if (next_report_ < 10000)
+ next_report_ += 1000;
+ else if (next_report_ < 50000)
+ next_report_ += 5000;
+ else if (next_report_ < 100000)
+ next_report_ += 10000;
+ else if (next_report_ < 500000)
+ next_report_ += 50000;
+ else
+ next_report_ += 100000;
fprintf(stderr, "... finished %d ops%30s\r", done_, "");
fflush(stderr);
}
@@ -285,16 +289,14 @@ class Benchmark {
snprintf(rate, sizeof(rate), "%6.1f MB/s",
(bytes_ / 1048576.0) / (finish - start_));
if (!message_.empty()) {
- message_ = std::string(rate) + " " + message_;
+ message_ = std::string(rate) + " " + message_;
} else {
message_ = rate;
}
}
- fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
- name.ToString().c_str(),
- (finish - start_) * 1e6 / done_,
- (message_.empty() ? "" : " "),
+ fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
+ (finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "),
message_.c_str());
if (FLAGS_histogram) {
fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
@@ -303,22 +305,16 @@ class Benchmark {
}
public:
- enum Order {
- SEQUENTIAL,
- RANDOM
- };
- enum DBState {
- FRESH,
- EXISTING
- };
+ enum Order { SEQUENTIAL, RANDOM };
+ enum DBState { FRESH, EXISTING };
Benchmark()
- : db_(NULL),
- db_num_(0),
- num_(FLAGS_num),
- reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
- bytes_(0),
- rand_(301) {
+ : db_(nullptr),
+ db_num_(0),
+ num_(FLAGS_num),
+ reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
+ bytes_(0),
+ rand_(301) {
std::vector<std::string> files;
std::string test_dir;
Env::Default()->GetTestDirectory(&test_dir);
@@ -345,12 +341,12 @@ class Benchmark {
Open();
const char* benchmarks = FLAGS_benchmarks;
- while (benchmarks != NULL) {
+ while (benchmarks != nullptr) {
const char* sep = strchr(benchmarks, ',');
Slice name;
- if (sep == NULL) {
+ if (sep == nullptr) {
name = benchmarks;
- benchmarks = NULL;
+ benchmarks = nullptr;
} else {
name = Slice(benchmarks, sep - benchmarks);
benchmarks = sep + 1;
@@ -415,20 +411,18 @@ class Benchmark {
}
void Open() {
- assert(db_ == NULL);
+ assert(db_ == nullptr);
int status;
char file_name[100];
- char* err_msg = NULL;
+ char* err_msg = nullptr;
db_num_++;
// Open database
std::string tmp_dir;
Env::Default()->GetTestDirectory(&tmp_dir);
- snprintf(file_name, sizeof(file_name),
- "%s/dbbench_sqlite3-%d.db",
- tmp_dir.c_str(),
- db_num_);
+ snprintf(file_name, sizeof(file_name), "%s/dbbench_sqlite3-%d.db",
+ tmp_dir.c_str(), db_num_);
status = sqlite3_open(file_name, &db_);
if (status) {
fprintf(stderr, "open error: %s\n", sqlite3_errmsg(db_));
@@ -439,7 +433,7 @@ class Benchmark {
char cache_size[100];
snprintf(cache_size, sizeof(cache_size), "PRAGMA cache_size = %d",
FLAGS_num_pages);
- status = sqlite3_exec(db_, cache_size, NULL, NULL, &err_msg);
+ status = sqlite3_exec(db_, cache_size, nullptr, nullptr, &err_msg);
ExecErrorCheck(status, err_msg);
// FLAGS_page_size is defaulted to 1024
@@ -447,7 +441,7 @@ class Benchmark {
char page_size[100];
snprintf(page_size, sizeof(page_size), "PRAGMA page_size = %d",
FLAGS_page_size);
- status = sqlite3_exec(db_, page_size, NULL, NULL, &err_msg);
+ status = sqlite3_exec(db_, page_size, nullptr, nullptr, &err_msg);
ExecErrorCheck(status, err_msg);
}
@@ -457,26 +451,28 @@ class Benchmark {
// LevelDB's default cache size is a combined 4 MB
std::string WAL_checkpoint = "PRAGMA wal_autocheckpoint = 4096";
- status = sqlite3_exec(db_, WAL_stmt.c_str(), NULL, NULL, &err_msg);
+ status = sqlite3_exec(db_, WAL_stmt.c_str(), nullptr, nullptr, &err_msg);
ExecErrorCheck(status, err_msg);
- status = sqlite3_exec(db_, WAL_checkpoint.c_str(), NULL, NULL, &err_msg);
+ status =
+ sqlite3_exec(db_, WAL_checkpoint.c_str(), nullptr, nullptr, &err_msg);
ExecErrorCheck(status, err_msg);
}
// Change locking mode to exclusive and create tables/index for database
std::string locking_stmt = "PRAGMA locking_mode = EXCLUSIVE";
std::string create_stmt =
- "CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))";
- std::string stmt_array[] = { locking_stmt, create_stmt };
+ "CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))";
+ std::string stmt_array[] = {locking_stmt, create_stmt};
int stmt_array_length = sizeof(stmt_array) / sizeof(std::string);
for (int i = 0; i < stmt_array_length; i++) {
- status = sqlite3_exec(db_, stmt_array[i].c_str(), NULL, NULL, &err_msg);
+ status =
+ sqlite3_exec(db_, stmt_array[i].c_str(), nullptr, nullptr, &err_msg);
ExecErrorCheck(status, err_msg);
}
}
- void Write(bool write_sync, Order order, DBState state,
- int num_entries, int value_size, int entries_per_batch) {
+ void Write(bool write_sync, Order order, DBState state, int num_entries,
+ int value_size, int entries_per_batch) {
// Create new database if state == FRESH
if (state == FRESH) {
if (FLAGS_use_existing_db) {
@@ -484,7 +480,7 @@ class Benchmark {
return;
}
sqlite3_close(db_);
- db_ = NULL;
+ db_ = nullptr;
Open();
Start();
}
@@ -495,7 +491,7 @@ class Benchmark {
message_ = msg;
}
- char* err_msg = NULL;
+ char* err_msg = nullptr;
int status;
sqlite3_stmt *replace_stmt, *begin_trans_stmt, *end_trans_stmt;
@@ -504,20 +500,20 @@ class Benchmark {
std::string end_trans_str = "END TRANSACTION;";
// Check for synchronous flag in options
- std::string sync_stmt = (write_sync) ? "PRAGMA synchronous = FULL" :
- "PRAGMA synchronous = OFF";
- status = sqlite3_exec(db_, sync_stmt.c_str(), NULL, NULL, &err_msg);
+ std::string sync_stmt =
+ (write_sync) ? "PRAGMA synchronous = FULL" : "PRAGMA synchronous = OFF";
+ status = sqlite3_exec(db_, sync_stmt.c_str(), nullptr, nullptr, &err_msg);
ExecErrorCheck(status, err_msg);
// Preparing sqlite3 statements
- status = sqlite3_prepare_v2(db_, replace_str.c_str(), -1,
- &replace_stmt, NULL);
+ status = sqlite3_prepare_v2(db_, replace_str.c_str(), -1, &replace_stmt,
+ nullptr);
ErrorCheck(status);
status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
- &begin_trans_stmt, NULL);
+ &begin_trans_stmt, nullptr);
ErrorCheck(status);
- status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1,
- &end_trans_stmt, NULL);
+ status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, &end_trans_stmt,
+ nullptr);
ErrorCheck(status);
bool transaction = (entries_per_batch > 1);
@@ -535,16 +531,16 @@ class Benchmark {
const char* value = gen_.Generate(value_size).data();
// Create values for key-value pair
- const int k = (order == SEQUENTIAL) ? i + j :
- (rand_.Next() % num_entries);
+ const int k =
+ (order == SEQUENTIAL) ? i + j : (rand_.Next() % num_entries);
char key[100];
snprintf(key, sizeof(key), "%016d", k);
// Bind KV values into replace_stmt
status = sqlite3_bind_blob(replace_stmt, 1, key, 16, SQLITE_STATIC);
ErrorCheck(status);
- status = sqlite3_bind_blob(replace_stmt, 2, value,
- value_size, SQLITE_STATIC);
+ status = sqlite3_bind_blob(replace_stmt, 2, value, value_size,
+ SQLITE_STATIC);
ErrorCheck(status);
// Execute replace_stmt
@@ -588,12 +584,12 @@ class Benchmark {
// Preparing sqlite3 statements
status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
- &begin_trans_stmt, NULL);
+ &begin_trans_stmt, nullptr);
ErrorCheck(status);
- status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1,
- &end_trans_stmt, NULL);
+ status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, &end_trans_stmt,
+ nullptr);
ErrorCheck(status);
- status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &read_stmt, NULL);
+ status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &read_stmt, nullptr);
ErrorCheck(status);
bool transaction = (entries_per_batch > 1);
@@ -618,7 +614,8 @@ class Benchmark {
ErrorCheck(status);
// Execute read statement
- while ((status = sqlite3_step(read_stmt)) == SQLITE_ROW) {}
+ while ((status = sqlite3_step(read_stmt)) == SQLITE_ROW) {
+ }
StepErrorCheck(status);
// Reset SQLite statement for another use
@@ -648,10 +645,10 @@ class Benchmark {
void ReadSequential() {
int status;
- sqlite3_stmt *pStmt;
+ sqlite3_stmt* pStmt;
std::string read_str = "SELECT * FROM test ORDER BY key";
- status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &pStmt, NULL);
+ status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &pStmt, nullptr);
ErrorCheck(status);
for (int i = 0; i < reads_ && SQLITE_ROW == sqlite3_step(pStmt); i++) {
bytes_ += sqlite3_column_bytes(pStmt, 1) + sqlite3_column_bytes(pStmt, 2);
@@ -661,7 +658,6 @@ class Benchmark {
status = sqlite3_finalize(pStmt);
ErrorCheck(status);
}
-
};
} // namespace leveldb
@@ -706,10 +702,10 @@ int main(int argc, char** argv) {
}
// Choose a location for the test database if none given with --db=<path>
- if (FLAGS_db == NULL) {
- leveldb::Env::Default()->GetTestDirectory(&default_db_path);
- default_db_path += "/dbbench";
- FLAGS_db = default_db_path.c_str();
+ if (FLAGS_db == nullptr) {
+ leveldb::Env::Default()->GetTestDirectory(&default_db_path);
+ default_db_path += "/dbbench";
+ FLAGS_db = default_db_path.c_str();
}
leveldb::Benchmark benchmark;
diff --git a/src/leveldb/doc/bench/db_bench_tree_db.cc b/src/leveldb/benchmarks/db_bench_tree_db.cc
index 4ca381f11f..b2f6646d89 100644
--- a/src/leveldb/doc/bench/db_bench_tree_db.cc
+++ b/src/leveldb/benchmarks/db_bench_tree_db.cc
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include <kcpolydb.h>
#include <stdio.h>
#include <stdlib.h>
-#include <kcpolydb.h>
+
#include "util/histogram.h"
#include "util/random.h"
#include "util/testutil.h"
@@ -34,8 +35,7 @@ static const char* FLAGS_benchmarks =
"fillrand100K,"
"fillseq100K,"
"readseq100K,"
- "readrand100K,"
- ;
+ "readrand100K,";
// Number of key/values to place in database
static int FLAGS_num = 1000000;
@@ -69,11 +69,9 @@ static bool FLAGS_use_existing_db = false;
static bool FLAGS_compression = true;
// Use the db with the following name.
-static const char* FLAGS_db = NULL;
+static const char* FLAGS_db = nullptr;
-inline
-static void DBSynchronize(kyotocabinet::TreeDB* db_)
-{
+inline static void DBSynchronize(kyotocabinet::TreeDB* db_) {
// Synchronize will flush writes to disk
if (!db_->synchronize()) {
fprintf(stderr, "synchronize error: %s\n", db_->error().name());
@@ -121,7 +119,7 @@ static Slice TrimSpace(Slice s) {
start++;
}
int limit = s.size();
- while (limit > start && isspace(s[limit-1])) {
+ while (limit > start && isspace(s[limit - 1])) {
limit--;
}
return Slice(s.data() + start, limit - start);
@@ -146,7 +144,7 @@ class Benchmark {
// State kept for progress messages
int done_;
- int next_report_; // When to report next
+ int next_report_; // When to report next
void PrintHeader() {
const int kKeySize = 16;
@@ -157,20 +155,20 @@ class Benchmark {
static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
fprintf(stdout, "Entries: %d\n", num_);
fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
- ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_)
- / 1048576.0));
+ ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
+ 1048576.0));
fprintf(stdout, "FileSize: %.1f MB (estimated)\n",
- (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_)
- / 1048576.0));
+ (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
+ 1048576.0));
PrintWarnings();
fprintf(stdout, "------------------------------------------------\n");
}
void PrintWarnings() {
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
- fprintf(stdout,
- "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
- );
+ fprintf(
+ stdout,
+ "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
#endif
#ifndef NDEBUG
fprintf(stdout,
@@ -183,18 +181,18 @@ class Benchmark {
kyotocabinet::VERSION, kyotocabinet::LIBVER, kyotocabinet::LIBREV);
#if defined(__linux)
- time_t now = time(NULL);
+ time_t now = time(nullptr);
fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline
FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
- if (cpuinfo != NULL) {
+ if (cpuinfo != nullptr) {
char line[1000];
int num_cpus = 0;
std::string cpu_type;
std::string cache_size;
- while (fgets(line, sizeof(line), cpuinfo) != NULL) {
+ while (fgets(line, sizeof(line), cpuinfo) != nullptr) {
const char* sep = strchr(line, ':');
- if (sep == NULL) {
+ if (sep == nullptr) {
continue;
}
Slice key = TrimSpace(Slice(line, sep - 1 - line));
@@ -237,13 +235,20 @@ class Benchmark {
done_++;
if (done_ >= next_report_) {
- if (next_report_ < 1000) next_report_ += 100;
- else if (next_report_ < 5000) next_report_ += 500;
- else if (next_report_ < 10000) next_report_ += 1000;
- else if (next_report_ < 50000) next_report_ += 5000;
- else if (next_report_ < 100000) next_report_ += 10000;
- else if (next_report_ < 500000) next_report_ += 50000;
- else next_report_ += 100000;
+ if (next_report_ < 1000)
+ next_report_ += 100;
+ else if (next_report_ < 5000)
+ next_report_ += 500;
+ else if (next_report_ < 10000)
+ next_report_ += 1000;
+ else if (next_report_ < 50000)
+ next_report_ += 5000;
+ else if (next_report_ < 100000)
+ next_report_ += 10000;
+ else if (next_report_ < 500000)
+ next_report_ += 50000;
+ else
+ next_report_ += 100000;
fprintf(stderr, "... finished %d ops%30s\r", done_, "");
fflush(stderr);
}
@@ -261,16 +266,14 @@ class Benchmark {
snprintf(rate, sizeof(rate), "%6.1f MB/s",
(bytes_ / 1048576.0) / (finish - start_));
if (!message_.empty()) {
- message_ = std::string(rate) + " " + message_;
+ message_ = std::string(rate) + " " + message_;
} else {
message_ = rate;
}
}
- fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
- name.ToString().c_str(),
- (finish - start_) * 1e6 / done_,
- (message_.empty() ? "" : " "),
+ fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
+ (finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "),
message_.c_str());
if (FLAGS_histogram) {
fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
@@ -279,21 +282,15 @@ class Benchmark {
}
public:
- enum Order {
- SEQUENTIAL,
- RANDOM
- };
- enum DBState {
- FRESH,
- EXISTING
- };
+ enum Order { SEQUENTIAL, RANDOM };
+ enum DBState { FRESH, EXISTING };
Benchmark()
- : db_(NULL),
- num_(FLAGS_num),
- reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
- bytes_(0),
- rand_(301) {
+ : db_(nullptr),
+ num_(FLAGS_num),
+ reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
+ bytes_(0),
+ rand_(301) {
std::vector<std::string> files;
std::string test_dir;
Env::Default()->GetTestDirectory(&test_dir);
@@ -321,12 +318,12 @@ class Benchmark {
Open(false);
const char* benchmarks = FLAGS_benchmarks;
- while (benchmarks != NULL) {
+ while (benchmarks != nullptr) {
const char* sep = strchr(benchmarks, ',');
Slice name;
- if (sep == NULL) {
+ if (sep == nullptr) {
name = benchmarks;
- benchmarks = NULL;
+ benchmarks = nullptr;
} else {
name = Slice(benchmarks, sep - benchmarks);
benchmarks = sep + 1;
@@ -386,8 +383,8 @@ class Benchmark {
}
private:
- void Open(bool sync) {
- assert(db_ == NULL);
+ void Open(bool sync) {
+ assert(db_ == nullptr);
// Initialize db_
db_ = new kyotocabinet::TreeDB();
@@ -395,16 +392,14 @@ class Benchmark {
db_num_++;
std::string test_dir;
Env::Default()->GetTestDirectory(&test_dir);
- snprintf(file_name, sizeof(file_name),
- "%s/dbbench_polyDB-%d.kct",
- test_dir.c_str(),
- db_num_);
+ snprintf(file_name, sizeof(file_name), "%s/dbbench_polyDB-%d.kct",
+ test_dir.c_str(), db_num_);
// Create tuning options and open the database
- int open_options = kyotocabinet::PolyDB::OWRITER |
- kyotocabinet::PolyDB::OCREATE;
- int tune_options = kyotocabinet::TreeDB::TSMALL |
- kyotocabinet::TreeDB::TLINEAR;
+ int open_options =
+ kyotocabinet::PolyDB::OWRITER | kyotocabinet::PolyDB::OCREATE;
+ int tune_options =
+ kyotocabinet::TreeDB::TSMALL | kyotocabinet::TreeDB::TLINEAR;
if (FLAGS_compression) {
tune_options |= kyotocabinet::TreeDB::TCOMPRESS;
db_->tune_compressor(&comp_);
@@ -412,7 +407,7 @@ class Benchmark {
db_->tune_options(tune_options);
db_->tune_page_cache(FLAGS_cache_size);
db_->tune_page(FLAGS_page_size);
- db_->tune_map(256LL<<20);
+ db_->tune_map(256LL << 20);
if (sync) {
open_options |= kyotocabinet::PolyDB::OAUTOSYNC;
}
@@ -421,8 +416,8 @@ class Benchmark {
}
}
- void Write(bool sync, Order order, DBState state,
- int num_entries, int value_size, int entries_per_batch) {
+ void Write(bool sync, Order order, DBState state, int num_entries,
+ int value_size, int entries_per_batch) {
// Create new database if state == FRESH
if (state == FRESH) {
if (FLAGS_use_existing_db) {
@@ -430,7 +425,7 @@ class Benchmark {
return;
}
delete db_;
- db_ = NULL;
+ db_ = nullptr;
Open(sync);
Start(); // Do not count time taken to destroy/open
}
@@ -442,8 +437,7 @@ class Benchmark {
}
// Write to database
- for (int i = 0; i < num_entries; i++)
- {
+ for (int i = 0; i < num_entries; i++) {
const int k = (order == SEQUENTIAL) ? i : (rand_.Next() % num_entries);
char key[100];
snprintf(key, sizeof(key), "%016d", k);
@@ -516,10 +510,10 @@ int main(int argc, char** argv) {
}
// Choose a location for the test database if none given with --db=<path>
- if (FLAGS_db == NULL) {
- leveldb::Env::Default()->GetTestDirectory(&default_db_path);
- default_db_path += "/dbbench";
- FLAGS_db = default_db_path.c_str();
+ if (FLAGS_db == nullptr) {
+ leveldb::Env::Default()->GetTestDirectory(&default_db_path);
+ default_db_path += "/dbbench";
+ FLAGS_db = default_db_path.c_str();
}
leveldb::Benchmark benchmark;
diff --git a/src/leveldb/build_detect_platform b/src/leveldb/build_detect_platform
deleted file mode 100755
index 4a94715900..0000000000
--- a/src/leveldb/build_detect_platform
+++ /dev/null
@@ -1,259 +0,0 @@
-#!/bin/sh
-#
-# Detects OS we're compiling on and outputs a file specified by the first
-# argument, which in turn gets read while processing Makefile.
-#
-# The output will set the following variables:
-# CC C Compiler path
-# CXX C++ Compiler path
-# PLATFORM_LDFLAGS Linker flags
-# PLATFORM_LIBS Libraries flags
-# PLATFORM_SHARED_EXT Extension for shared libraries
-# PLATFORM_SHARED_LDFLAGS Flags for building shared library
-# This flag is embedded just before the name
-# of the shared library without intervening spaces
-# PLATFORM_SHARED_CFLAGS Flags for compiling objects for shared library
-# PLATFORM_CCFLAGS C compiler flags
-# PLATFORM_CXXFLAGS C++ compiler flags. Will contain:
-# PLATFORM_SHARED_VERSIONED Set to 'true' if platform supports versioned
-# shared libraries, empty otherwise.
-#
-# The PLATFORM_CCFLAGS and PLATFORM_CXXFLAGS might include the following:
-#
-# -DLEVELDB_ATOMIC_PRESENT if <atomic> is present
-# -DLEVELDB_PLATFORM_POSIX for Posix-based platforms
-# -DSNAPPY if the Snappy library is present
-#
-
-OUTPUT=$1
-PREFIX=$2
-if test -z "$OUTPUT" || test -z "$PREFIX"; then
- echo "usage: $0 <output-filename> <directory_prefix>" >&2
- exit 1
-fi
-
-# Delete existing output, if it exists
-rm -f $OUTPUT
-touch $OUTPUT
-
-if test -z "$CC"; then
- CC=cc
-fi
-
-if test -z "$CXX"; then
- CXX=g++
-fi
-
-if test -z "$TMPDIR"; then
- TMPDIR=/tmp
-fi
-
-# Detect OS
-if test -z "$TARGET_OS"; then
- TARGET_OS=`uname -s`
-fi
-
-COMMON_FLAGS=
-CROSS_COMPILE=
-PLATFORM_CCFLAGS=
-PLATFORM_CXXFLAGS=
-PLATFORM_LDFLAGS=
-PLATFORM_LIBS=
-PLATFORM_SHARED_EXT="so"
-PLATFORM_SHARED_LDFLAGS="-shared -Wl,-soname -Wl,"
-PLATFORM_SHARED_CFLAGS="-fPIC"
-PLATFORM_SHARED_VERSIONED=true
-PLATFORM_SSEFLAGS=
-
-MEMCMP_FLAG=
-if [ "$CXX" = "g++" ]; then
- # Use libc's memcmp instead of GCC's memcmp. This results in ~40%
- # performance improvement on readrandom under gcc 4.4.3 on Linux/x86.
- MEMCMP_FLAG="-fno-builtin-memcmp"
-fi
-
-case "$TARGET_OS" in
- CYGWIN_*)
- PLATFORM=OS_LINUX
- COMMON_FLAGS="$MEMCMP_FLAG -lpthread -DOS_LINUX -DCYGWIN"
- PLATFORM_LDFLAGS="-lpthread"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- ;;
- Darwin)
- PLATFORM=OS_MACOSX
- COMMON_FLAGS="$MEMCMP_FLAG -DOS_MACOSX"
- PLATFORM_SHARED_EXT=dylib
- [ -z "$INSTALL_PATH" ] && INSTALL_PATH=`pwd`
- PLATFORM_SHARED_LDFLAGS="-dynamiclib -install_name $INSTALL_PATH/"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- ;;
- Linux)
- PLATFORM=OS_LINUX
- COMMON_FLAGS="$MEMCMP_FLAG -pthread -DOS_LINUX"
- PLATFORM_LDFLAGS="-pthread"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- ;;
- SunOS)
- PLATFORM=OS_SOLARIS
- COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_SOLARIS"
- PLATFORM_LIBS="-lpthread -lrt"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- ;;
- FreeBSD)
- PLATFORM=OS_FREEBSD
- COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_FREEBSD"
- PLATFORM_LIBS="-lpthread"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- ;;
- GNU/kFreeBSD)
- PLATFORM=OS_KFREEBSD
- COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_KFREEBSD"
- PLATFORM_LIBS="-lpthread"
- PORT_FILE=port/port_posix.cc
- ;;
- NetBSD)
- PLATFORM=OS_NETBSD
- COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_NETBSD"
- PLATFORM_LIBS="-lpthread -lgcc_s"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- ;;
- OpenBSD)
- PLATFORM=OS_OPENBSD
- COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_OPENBSD"
- PLATFORM_LDFLAGS="-pthread"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- ;;
- DragonFly)
- PLATFORM=OS_DRAGONFLYBSD
- COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_DRAGONFLYBSD"
- PLATFORM_LIBS="-lpthread"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- ;;
- OS_ANDROID_CROSSCOMPILE)
- PLATFORM=OS_ANDROID
- COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_ANDROID -DLEVELDB_PLATFORM_POSIX"
- PLATFORM_LDFLAGS="" # All pthread features are in the Android C library
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- CROSS_COMPILE=true
- ;;
- HP-UX)
- PLATFORM=OS_HPUX
- COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_HPUX"
- PLATFORM_LDFLAGS="-pthread"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- # man ld: +h internal_name
- PLATFORM_SHARED_LDFLAGS="-shared -Wl,+h -Wl,"
- ;;
- IOS)
- PLATFORM=IOS
- COMMON_FLAGS="$MEMCMP_FLAG -DOS_MACOSX"
- [ -z "$INSTALL_PATH" ] && INSTALL_PATH=`pwd`
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- PLATFORM_SHARED_EXT=
- PLATFORM_SHARED_LDFLAGS=
- PLATFORM_SHARED_CFLAGS=
- PLATFORM_SHARED_VERSIONED=
- ;;
- OS_WINDOWS_CROSSCOMPILE | NATIVE_WINDOWS)
- PLATFORM=OS_WINDOWS
- COMMON_FLAGS="-fno-builtin-memcmp -D_REENTRANT -DOS_WINDOWS -DLEVELDB_PLATFORM_WINDOWS -DWINVER=0x0500 -D__USE_MINGW_ANSI_STDIO=1"
- PLATFORM_SOURCES="util/env_win.cc"
- PLATFORM_LIBS="-lshlwapi"
- PORT_FILE=port/port_win.cc
- CROSS_COMPILE=true
- ;;
- *)
- echo "Unknown platform!" >&2
- exit 1
-esac
-
-# We want to make a list of all cc files within util, db, table, and helpers
-# except for the test and benchmark files. By default, find will output a list
-# of all files matching either rule, so we need to append -print to make the
-# prune take effect.
-DIRS="$PREFIX/db $PREFIX/util $PREFIX/table"
-
-set -f # temporarily disable globbing so that our patterns aren't expanded
-PRUNE_TEST="-name *test*.cc -prune"
-PRUNE_BENCH="-name *_bench.cc -prune"
-PRUNE_TOOL="-name leveldbutil.cc -prune"
-PORTABLE_FILES=`find $DIRS $PRUNE_TEST -o $PRUNE_BENCH -o $PRUNE_TOOL -o -name '*.cc' -print | sort | sed "s,^$PREFIX/,," | tr "\n" " "`
-
-set +f # re-enable globbing
-
-# The sources consist of the portable files, plus the platform-specific port
-# file.
-echo "SOURCES=$PORTABLE_FILES $PORT_FILE $PORT_SSE_FILE" >> $OUTPUT
-echo "MEMENV_SOURCES=helpers/memenv/memenv.cc" >> $OUTPUT
-
-if [ "$CROSS_COMPILE" = "true" ]; then
- # Cross-compiling; do not try any compilation tests.
- true
-else
- CXXOUTPUT="${TMPDIR}/leveldb_build_detect_platform-cxx.$$"
-
- # If -std=c++0x works, use <atomic> as fallback for when memory barriers
- # are not available.
- $CXX $CXXFLAGS -std=c++0x -x c++ - -o $CXXOUTPUT 2>/dev/null <<EOF
- #include <atomic>
- int main() {}
-EOF
- if [ "$?" = 0 ]; then
- COMMON_FLAGS="$COMMON_FLAGS -DLEVELDB_PLATFORM_POSIX -DLEVELDB_ATOMIC_PRESENT"
- PLATFORM_CXXFLAGS="-std=c++0x"
- else
- COMMON_FLAGS="$COMMON_FLAGS -DLEVELDB_PLATFORM_POSIX"
- fi
-
- # Test whether tcmalloc is available
- $CXX $CXXFLAGS -x c++ - -o $CXXOUTPUT -ltcmalloc 2>/dev/null <<EOF
- int main() {}
-EOF
- if [ "$?" = 0 ]; then
- PLATFORM_LIBS="$PLATFORM_LIBS -ltcmalloc"
- fi
-
- rm -f $CXXOUTPUT 2>/dev/null
-
- # Test if gcc SSE 4.2 is supported
- $CXX $CXXFLAGS -x c++ - -o $CXXOUTPUT -msse4.2 2>/dev/null <<EOF
- int main() {}
-EOF
- if [ "$?" = 0 ]; then
- PLATFORM_SSEFLAGS="-msse4.2"
- fi
-
- rm -f $CXXOUTPUT 2>/dev/null
-fi
-
-# Use the SSE 4.2 CRC32C intrinsics iff runtime checks indicate compiler supports them.
-if [ -n "$PLATFORM_SSEFLAGS" ]; then
- PLATFORM_SSEFLAGS="$PLATFORM_SSEFLAGS -DLEVELDB_PLATFORM_POSIX_SSE"
-fi
-
-PLATFORM_CCFLAGS="$PLATFORM_CCFLAGS $COMMON_FLAGS"
-PLATFORM_CXXFLAGS="$PLATFORM_CXXFLAGS $COMMON_FLAGS"
-
-echo "CC=$CC" >> $OUTPUT
-echo "CXX=$CXX" >> $OUTPUT
-echo "PLATFORM=$PLATFORM" >> $OUTPUT
-echo "PLATFORM_LDFLAGS=$PLATFORM_LDFLAGS" >> $OUTPUT
-echo "PLATFORM_LIBS=$PLATFORM_LIBS" >> $OUTPUT
-echo "PLATFORM_CCFLAGS=$PLATFORM_CCFLAGS" >> $OUTPUT
-echo "PLATFORM_CXXFLAGS=$PLATFORM_CXXFLAGS" >> $OUTPUT
-echo "PLATFORM_SSEFLAGS=$PLATFORM_SSEFLAGS" >> $OUTPUT
-echo "PLATFORM_SHARED_CFLAGS=$PLATFORM_SHARED_CFLAGS" >> $OUTPUT
-echo "PLATFORM_SHARED_EXT=$PLATFORM_SHARED_EXT" >> $OUTPUT
-echo "PLATFORM_SHARED_LDFLAGS=$PLATFORM_SHARED_LDFLAGS" >> $OUTPUT
-echo "PLATFORM_SHARED_VERSIONED=$PLATFORM_SHARED_VERSIONED" >> $OUTPUT
diff --git a/src/leveldb/cmake/leveldbConfig.cmake b/src/leveldb/cmake/leveldbConfig.cmake
new file mode 100644
index 0000000000..eea6e5c477
--- /dev/null
+++ b/src/leveldb/cmake/leveldbConfig.cmake
@@ -0,0 +1 @@
+include("${CMAKE_CURRENT_LIST_DIR}/leveldbTargets.cmake")
diff --git a/src/leveldb/db/autocompact_test.cc b/src/leveldb/db/autocompact_test.cc
index d20a2362c3..e6c97a05a6 100644
--- a/src/leveldb/db/autocompact_test.cc
+++ b/src/leveldb/db/autocompact_test.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include "leveldb/db.h"
#include "db/db_impl.h"
#include "leveldb/cache.h"
+#include "leveldb/db.h"
#include "util/testharness.h"
#include "util/testutil.h"
@@ -12,11 +12,6 @@ namespace leveldb {
class AutoCompactTest {
public:
- std::string dbname_;
- Cache* tiny_cache_;
- Options options_;
- DB* db_;
-
AutoCompactTest() {
dbname_ = test::TmpDir() + "/autocompact_test";
tiny_cache_ = NewLRUCache(100);
@@ -47,6 +42,12 @@ class AutoCompactTest {
}
void DoReads(int n);
+
+ private:
+ std::string dbname_;
+ Cache* tiny_cache_;
+ Options options_;
+ DB* db_;
};
static const int kValueSize = 200 * 1024;
@@ -81,17 +82,16 @@ void AutoCompactTest::DoReads(int n) {
ASSERT_LT(read, 100) << "Taking too long to compact";
Iterator* iter = db_->NewIterator(ReadOptions());
for (iter->SeekToFirst();
- iter->Valid() && iter->key().ToString() < limit_key;
- iter->Next()) {
+ iter->Valid() && iter->key().ToString() < limit_key; iter->Next()) {
// Drop data
}
delete iter;
// Wait a little bit to allow any triggered compactions to complete.
Env::Default()->SleepForMicroseconds(1000000);
uint64_t size = Size(Key(0), Key(n));
- fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n",
- read+1, size/1048576.0, Size(Key(n), Key(kCount))/1048576.0);
- if (size <= initial_size/10) {
+ fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n", read + 1,
+ size / 1048576.0, Size(Key(n), Key(kCount)) / 1048576.0);
+ if (size <= initial_size / 10) {
break;
}
}
@@ -100,19 +100,13 @@ void AutoCompactTest::DoReads(int n) {
// is pretty much unchanged.
const int64_t final_other_size = Size(Key(n), Key(kCount));
ASSERT_LE(final_other_size, initial_other_size + 1048576);
- ASSERT_GE(final_other_size, initial_other_size/5 - 1048576);
+ ASSERT_GE(final_other_size, initial_other_size / 5 - 1048576);
}
-TEST(AutoCompactTest, ReadAll) {
- DoReads(kCount);
-}
+TEST(AutoCompactTest, ReadAll) { DoReads(kCount); }
-TEST(AutoCompactTest, ReadHalf) {
- DoReads(kCount/2);
-}
+TEST(AutoCompactTest, ReadHalf) { DoReads(kCount / 2); }
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/db/builder.cc b/src/leveldb/db/builder.cc
index f419882197..9520ee4535 100644
--- a/src/leveldb/db/builder.cc
+++ b/src/leveldb/db/builder.cc
@@ -4,8 +4,8 @@
#include "db/builder.h"
-#include "db/filename.h"
#include "db/dbformat.h"
+#include "db/filename.h"
#include "db/table_cache.h"
#include "db/version_edit.h"
#include "leveldb/db.h"
@@ -14,12 +14,8 @@
namespace leveldb {
-Status BuildTable(const std::string& dbname,
- Env* env,
- const Options& options,
- TableCache* table_cache,
- Iterator* iter,
- FileMetaData* meta) {
+Status BuildTable(const std::string& dbname, Env* env, const Options& options,
+ TableCache* table_cache, Iterator* iter, FileMetaData* meta) {
Status s;
meta->file_size = 0;
iter->SeekToFirst();
@@ -41,14 +37,10 @@ Status BuildTable(const std::string& dbname,
}
// Finish and check for builder errors
+ s = builder->Finish();
if (s.ok()) {
- s = builder->Finish();
- if (s.ok()) {
- meta->file_size = builder->FileSize();
- assert(meta->file_size > 0);
- }
- } else {
- builder->Abandon();
+ meta->file_size = builder->FileSize();
+ assert(meta->file_size > 0);
}
delete builder;
@@ -60,12 +52,11 @@ Status BuildTable(const std::string& dbname,
s = file->Close();
}
delete file;
- file = NULL;
+ file = nullptr;
if (s.ok()) {
// Verify that the table is usable
- Iterator* it = table_cache->NewIterator(ReadOptions(),
- meta->number,
+ Iterator* it = table_cache->NewIterator(ReadOptions(), meta->number,
meta->file_size);
s = it->status();
delete it;
diff --git a/src/leveldb/db/builder.h b/src/leveldb/db/builder.h
index 62431fcf44..7bd0b8049b 100644
--- a/src/leveldb/db/builder.h
+++ b/src/leveldb/db/builder.h
@@ -22,12 +22,8 @@ class VersionEdit;
// *meta will be filled with metadata about the generated table.
// If no data is present in *iter, meta->file_size will be set to
// zero, and no Table file will be produced.
-extern Status BuildTable(const std::string& dbname,
- Env* env,
- const Options& options,
- TableCache* table_cache,
- Iterator* iter,
- FileMetaData* meta);
+Status BuildTable(const std::string& dbname, Env* env, const Options& options,
+ TableCache* table_cache, Iterator* iter, FileMetaData* meta);
} // namespace leveldb
diff --git a/src/leveldb/db/c.cc b/src/leveldb/db/c.cc
index b23e3dcc9d..3a492f9ac5 100644
--- a/src/leveldb/db/c.cc
+++ b/src/leveldb/db/c.cc
@@ -4,10 +4,9 @@
#include "leveldb/c.h"
-#include <stdlib.h>
-#ifndef WIN32
-#include <unistd.h>
-#endif
+#include <cstdint>
+#include <cstdlib>
+
#include "leveldb/cache.h"
#include "leveldb/comparator.h"
#include "leveldb/db.h"
@@ -45,69 +44,72 @@ using leveldb::WriteOptions;
extern "C" {
-struct leveldb_t { DB* rep; };
-struct leveldb_iterator_t { Iterator* rep; };
-struct leveldb_writebatch_t { WriteBatch rep; };
-struct leveldb_snapshot_t { const Snapshot* rep; };
-struct leveldb_readoptions_t { ReadOptions rep; };
-struct leveldb_writeoptions_t { WriteOptions rep; };
-struct leveldb_options_t { Options rep; };
-struct leveldb_cache_t { Cache* rep; };
-struct leveldb_seqfile_t { SequentialFile* rep; };
-struct leveldb_randomfile_t { RandomAccessFile* rep; };
-struct leveldb_writablefile_t { WritableFile* rep; };
-struct leveldb_logger_t { Logger* rep; };
-struct leveldb_filelock_t { FileLock* rep; };
+struct leveldb_t {
+ DB* rep;
+};
+struct leveldb_iterator_t {
+ Iterator* rep;
+};
+struct leveldb_writebatch_t {
+ WriteBatch rep;
+};
+struct leveldb_snapshot_t {
+ const Snapshot* rep;
+};
+struct leveldb_readoptions_t {
+ ReadOptions rep;
+};
+struct leveldb_writeoptions_t {
+ WriteOptions rep;
+};
+struct leveldb_options_t {
+ Options rep;
+};
+struct leveldb_cache_t {
+ Cache* rep;
+};
+struct leveldb_seqfile_t {
+ SequentialFile* rep;
+};
+struct leveldb_randomfile_t {
+ RandomAccessFile* rep;
+};
+struct leveldb_writablefile_t {
+ WritableFile* rep;
+};
+struct leveldb_logger_t {
+ Logger* rep;
+};
+struct leveldb_filelock_t {
+ FileLock* rep;
+};
struct leveldb_comparator_t : public Comparator {
- void* state_;
- void (*destructor_)(void*);
- int (*compare_)(
- void*,
- const char* a, size_t alen,
- const char* b, size_t blen);
- const char* (*name_)(void*);
+ ~leveldb_comparator_t() override { (*destructor_)(state_); }
- virtual ~leveldb_comparator_t() {
- (*destructor_)(state_);
- }
-
- virtual int Compare(const Slice& a, const Slice& b) const {
+ int Compare(const Slice& a, const Slice& b) const override {
return (*compare_)(state_, a.data(), a.size(), b.data(), b.size());
}
- virtual const char* Name() const {
- return (*name_)(state_);
- }
+ const char* Name() const override { return (*name_)(state_); }
// No-ops since the C binding does not support key shortening methods.
- virtual void FindShortestSeparator(std::string*, const Slice&) const { }
- virtual void FindShortSuccessor(std::string* key) const { }
-};
+ void FindShortestSeparator(std::string*, const Slice&) const override {}
+ void FindShortSuccessor(std::string* key) const override {}
-struct leveldb_filterpolicy_t : public FilterPolicy {
void* state_;
void (*destructor_)(void*);
+ int (*compare_)(void*, const char* a, size_t alen, const char* b,
+ size_t blen);
const char* (*name_)(void*);
- char* (*create_)(
- void*,
- const char* const* key_array, const size_t* key_length_array,
- int num_keys,
- size_t* filter_length);
- unsigned char (*key_match_)(
- void*,
- const char* key, size_t length,
- const char* filter, size_t filter_length);
-
- virtual ~leveldb_filterpolicy_t() {
- (*destructor_)(state_);
- }
+};
- virtual const char* Name() const {
- return (*name_)(state_);
- }
+struct leveldb_filterpolicy_t : public FilterPolicy {
+ ~leveldb_filterpolicy_t() override { (*destructor_)(state_); }
- virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
+ const char* Name() const override { return (*name_)(state_); }
+
+ void CreateFilter(const Slice* keys, int n, std::string* dst) const override {
std::vector<const char*> key_pointers(n);
std::vector<size_t> key_sizes(n);
for (int i = 0; i < n; i++) {
@@ -120,10 +122,19 @@ struct leveldb_filterpolicy_t : public FilterPolicy {
free(filter);
}
- virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const {
- return (*key_match_)(state_, key.data(), key.size(),
- filter.data(), filter.size());
+ bool KeyMayMatch(const Slice& key, const Slice& filter) const override {
+ return (*key_match_)(state_, key.data(), key.size(), filter.data(),
+ filter.size());
}
+
+ void* state_;
+ void (*destructor_)(void*);
+ const char* (*name_)(void*);
+ char* (*create_)(void*, const char* const* key_array,
+ const size_t* key_length_array, int num_keys,
+ size_t* filter_length);
+ uint8_t (*key_match_)(void*, const char* key, size_t length,
+ const char* filter, size_t filter_length);
};
struct leveldb_env_t {
@@ -132,10 +143,10 @@ struct leveldb_env_t {
};
static bool SaveError(char** errptr, const Status& s) {
- assert(errptr != NULL);
+ assert(errptr != nullptr);
if (s.ok()) {
return false;
- } else if (*errptr == NULL) {
+ } else if (*errptr == nullptr) {
*errptr = strdup(s.ToString().c_str());
} else {
// TODO(sanjay): Merge with existing error?
@@ -151,13 +162,11 @@ static char* CopyString(const std::string& str) {
return result;
}
-leveldb_t* leveldb_open(
- const leveldb_options_t* options,
- const char* name,
- char** errptr) {
+leveldb_t* leveldb_open(const leveldb_options_t* options, const char* name,
+ char** errptr) {
DB* db;
if (SaveError(errptr, DB::Open(options->rep, std::string(name), &db))) {
- return NULL;
+ return nullptr;
}
leveldb_t* result = new leveldb_t;
result->rep = db;
@@ -169,40 +178,27 @@ void leveldb_close(leveldb_t* db) {
delete db;
}
-void leveldb_put(
- leveldb_t* db,
- const leveldb_writeoptions_t* options,
- const char* key, size_t keylen,
- const char* val, size_t vallen,
- char** errptr) {
+void leveldb_put(leveldb_t* db, const leveldb_writeoptions_t* options,
+ const char* key, size_t keylen, const char* val, size_t vallen,
+ char** errptr) {
SaveError(errptr,
db->rep->Put(options->rep, Slice(key, keylen), Slice(val, vallen)));
}
-void leveldb_delete(
- leveldb_t* db,
- const leveldb_writeoptions_t* options,
- const char* key, size_t keylen,
- char** errptr) {
+void leveldb_delete(leveldb_t* db, const leveldb_writeoptions_t* options,
+ const char* key, size_t keylen, char** errptr) {
SaveError(errptr, db->rep->Delete(options->rep, Slice(key, keylen)));
}
-
-void leveldb_write(
- leveldb_t* db,
- const leveldb_writeoptions_t* options,
- leveldb_writebatch_t* batch,
- char** errptr) {
+void leveldb_write(leveldb_t* db, const leveldb_writeoptions_t* options,
+ leveldb_writebatch_t* batch, char** errptr) {
SaveError(errptr, db->rep->Write(options->rep, &batch->rep));
}
-char* leveldb_get(
- leveldb_t* db,
- const leveldb_readoptions_t* options,
- const char* key, size_t keylen,
- size_t* vallen,
- char** errptr) {
- char* result = NULL;
+char* leveldb_get(leveldb_t* db, const leveldb_readoptions_t* options,
+ const char* key, size_t keylen, size_t* vallen,
+ char** errptr) {
+ char* result = nullptr;
std::string tmp;
Status s = db->rep->Get(options->rep, Slice(key, keylen), &tmp);
if (s.ok()) {
@@ -218,45 +214,40 @@ char* leveldb_get(
}
leveldb_iterator_t* leveldb_create_iterator(
- leveldb_t* db,
- const leveldb_readoptions_t* options) {
+ leveldb_t* db, const leveldb_readoptions_t* options) {
leveldb_iterator_t* result = new leveldb_iterator_t;
result->rep = db->rep->NewIterator(options->rep);
return result;
}
-const leveldb_snapshot_t* leveldb_create_snapshot(
- leveldb_t* db) {
+const leveldb_snapshot_t* leveldb_create_snapshot(leveldb_t* db) {
leveldb_snapshot_t* result = new leveldb_snapshot_t;
result->rep = db->rep->GetSnapshot();
return result;
}
-void leveldb_release_snapshot(
- leveldb_t* db,
- const leveldb_snapshot_t* snapshot) {
+void leveldb_release_snapshot(leveldb_t* db,
+ const leveldb_snapshot_t* snapshot) {
db->rep->ReleaseSnapshot(snapshot->rep);
delete snapshot;
}
-char* leveldb_property_value(
- leveldb_t* db,
- const char* propname) {
+char* leveldb_property_value(leveldb_t* db, const char* propname) {
std::string tmp;
if (db->rep->GetProperty(Slice(propname), &tmp)) {
// We use strdup() since we expect human readable output.
return strdup(tmp.c_str());
} else {
- return NULL;
+ return nullptr;
}
}
-void leveldb_approximate_sizes(
- leveldb_t* db,
- int num_ranges,
- const char* const* range_start_key, const size_t* range_start_key_len,
- const char* const* range_limit_key, const size_t* range_limit_key_len,
- uint64_t* sizes) {
+void leveldb_approximate_sizes(leveldb_t* db, int num_ranges,
+ const char* const* range_start_key,
+ const size_t* range_start_key_len,
+ const char* const* range_limit_key,
+ const size_t* range_limit_key_len,
+ uint64_t* sizes) {
Range* ranges = new Range[num_ranges];
for (int i = 0; i < num_ranges; i++) {
ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]);
@@ -266,28 +257,23 @@ void leveldb_approximate_sizes(
delete[] ranges;
}
-void leveldb_compact_range(
- leveldb_t* db,
- const char* start_key, size_t start_key_len,
- const char* limit_key, size_t limit_key_len) {
+void leveldb_compact_range(leveldb_t* db, const char* start_key,
+ size_t start_key_len, const char* limit_key,
+ size_t limit_key_len) {
Slice a, b;
db->rep->CompactRange(
- // Pass NULL Slice if corresponding "const char*" is NULL
- (start_key ? (a = Slice(start_key, start_key_len), &a) : NULL),
- (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : NULL));
+ // Pass null Slice if corresponding "const char*" is null
+ (start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
+ (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
}
-void leveldb_destroy_db(
- const leveldb_options_t* options,
- const char* name,
- char** errptr) {
+void leveldb_destroy_db(const leveldb_options_t* options, const char* name,
+ char** errptr) {
SaveError(errptr, DestroyDB(name, options->rep));
}
-void leveldb_repair_db(
- const leveldb_options_t* options,
- const char* name,
- char** errptr) {
+void leveldb_repair_db(const leveldb_options_t* options, const char* name,
+ char** errptr) {
SaveError(errptr, RepairDB(name, options->rep));
}
@@ -296,7 +282,7 @@ void leveldb_iter_destroy(leveldb_iterator_t* iter) {
delete iter;
}
-unsigned char leveldb_iter_valid(const leveldb_iterator_t* iter) {
+uint8_t leveldb_iter_valid(const leveldb_iterator_t* iter) {
return iter->rep->Valid();
}
@@ -312,13 +298,9 @@ void leveldb_iter_seek(leveldb_iterator_t* iter, const char* k, size_t klen) {
iter->rep->Seek(Slice(k, klen));
}
-void leveldb_iter_next(leveldb_iterator_t* iter) {
- iter->rep->Next();
-}
+void leveldb_iter_next(leveldb_iterator_t* iter) { iter->rep->Next(); }
-void leveldb_iter_prev(leveldb_iterator_t* iter) {
- iter->rep->Prev();
-}
+void leveldb_iter_prev(leveldb_iterator_t* iter) { iter->rep->Prev(); }
const char* leveldb_iter_key(const leveldb_iterator_t* iter, size_t* klen) {
Slice s = iter->rep->key();
@@ -340,41 +322,34 @@ leveldb_writebatch_t* leveldb_writebatch_create() {
return new leveldb_writebatch_t;
}
-void leveldb_writebatch_destroy(leveldb_writebatch_t* b) {
- delete b;
-}
+void leveldb_writebatch_destroy(leveldb_writebatch_t* b) { delete b; }
-void leveldb_writebatch_clear(leveldb_writebatch_t* b) {
- b->rep.Clear();
-}
+void leveldb_writebatch_clear(leveldb_writebatch_t* b) { b->rep.Clear(); }
-void leveldb_writebatch_put(
- leveldb_writebatch_t* b,
- const char* key, size_t klen,
- const char* val, size_t vlen) {
+void leveldb_writebatch_put(leveldb_writebatch_t* b, const char* key,
+ size_t klen, const char* val, size_t vlen) {
b->rep.Put(Slice(key, klen), Slice(val, vlen));
}
-void leveldb_writebatch_delete(
- leveldb_writebatch_t* b,
- const char* key, size_t klen) {
+void leveldb_writebatch_delete(leveldb_writebatch_t* b, const char* key,
+ size_t klen) {
b->rep.Delete(Slice(key, klen));
}
-void leveldb_writebatch_iterate(
- leveldb_writebatch_t* b,
- void* state,
- void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen),
- void (*deleted)(void*, const char* k, size_t klen)) {
+void leveldb_writebatch_iterate(const leveldb_writebatch_t* b, void* state,
+ void (*put)(void*, const char* k, size_t klen,
+ const char* v, size_t vlen),
+ void (*deleted)(void*, const char* k,
+ size_t klen)) {
class H : public WriteBatch::Handler {
public:
void* state_;
void (*put_)(void*, const char* k, size_t klen, const char* v, size_t vlen);
void (*deleted_)(void*, const char* k, size_t klen);
- virtual void Put(const Slice& key, const Slice& value) {
+ void Put(const Slice& key, const Slice& value) override {
(*put_)(state_, key.data(), key.size(), value.data(), value.size());
}
- virtual void Delete(const Slice& key) {
+ void Delete(const Slice& key) override {
(*deleted_)(state_, key.data(), key.size());
}
};
@@ -385,47 +360,43 @@ void leveldb_writebatch_iterate(
b->rep.Iterate(&handler);
}
-leveldb_options_t* leveldb_options_create() {
- return new leveldb_options_t;
+void leveldb_writebatch_append(leveldb_writebatch_t* destination,
+ const leveldb_writebatch_t* source) {
+ destination->rep.Append(source->rep);
}
-void leveldb_options_destroy(leveldb_options_t* options) {
- delete options;
-}
+leveldb_options_t* leveldb_options_create() { return new leveldb_options_t; }
+
+void leveldb_options_destroy(leveldb_options_t* options) { delete options; }
-void leveldb_options_set_comparator(
- leveldb_options_t* opt,
- leveldb_comparator_t* cmp) {
+void leveldb_options_set_comparator(leveldb_options_t* opt,
+ leveldb_comparator_t* cmp) {
opt->rep.comparator = cmp;
}
-void leveldb_options_set_filter_policy(
- leveldb_options_t* opt,
- leveldb_filterpolicy_t* policy) {
+void leveldb_options_set_filter_policy(leveldb_options_t* opt,
+ leveldb_filterpolicy_t* policy) {
opt->rep.filter_policy = policy;
}
-void leveldb_options_set_create_if_missing(
- leveldb_options_t* opt, unsigned char v) {
+void leveldb_options_set_create_if_missing(leveldb_options_t* opt, uint8_t v) {
opt->rep.create_if_missing = v;
}
-void leveldb_options_set_error_if_exists(
- leveldb_options_t* opt, unsigned char v) {
+void leveldb_options_set_error_if_exists(leveldb_options_t* opt, uint8_t v) {
opt->rep.error_if_exists = v;
}
-void leveldb_options_set_paranoid_checks(
- leveldb_options_t* opt, unsigned char v) {
+void leveldb_options_set_paranoid_checks(leveldb_options_t* opt, uint8_t v) {
opt->rep.paranoid_checks = v;
}
void leveldb_options_set_env(leveldb_options_t* opt, leveldb_env_t* env) {
- opt->rep.env = (env ? env->rep : NULL);
+ opt->rep.env = (env ? env->rep : nullptr);
}
void leveldb_options_set_info_log(leveldb_options_t* opt, leveldb_logger_t* l) {
- opt->rep.info_log = (l ? l->rep : NULL);
+ opt->rep.info_log = (l ? l->rep : nullptr);
}
void leveldb_options_set_write_buffer_size(leveldb_options_t* opt, size_t s) {
@@ -448,17 +419,18 @@ void leveldb_options_set_block_restart_interval(leveldb_options_t* opt, int n) {
opt->rep.block_restart_interval = n;
}
+void leveldb_options_set_max_file_size(leveldb_options_t* opt, size_t s) {
+ opt->rep.max_file_size = s;
+}
+
void leveldb_options_set_compression(leveldb_options_t* opt, int t) {
opt->rep.compression = static_cast<CompressionType>(t);
}
leveldb_comparator_t* leveldb_comparator_create(
- void* state,
- void (*destructor)(void*),
- int (*compare)(
- void*,
- const char* a, size_t alen,
- const char* b, size_t blen),
+ void* state, void (*destructor)(void*),
+ int (*compare)(void*, const char* a, size_t alen, const char* b,
+ size_t blen),
const char* (*name)(void*)) {
leveldb_comparator_t* result = new leveldb_comparator_t;
result->state_ = state;
@@ -468,22 +440,15 @@ leveldb_comparator_t* leveldb_comparator_create(
return result;
}
-void leveldb_comparator_destroy(leveldb_comparator_t* cmp) {
- delete cmp;
-}
+void leveldb_comparator_destroy(leveldb_comparator_t* cmp) { delete cmp; }
leveldb_filterpolicy_t* leveldb_filterpolicy_create(
- void* state,
- void (*destructor)(void*),
- char* (*create_filter)(
- void*,
- const char* const* key_array, const size_t* key_length_array,
- int num_keys,
- size_t* filter_length),
- unsigned char (*key_may_match)(
- void*,
- const char* key, size_t length,
- const char* filter, size_t filter_length),
+ void* state, void (*destructor)(void*),
+ char* (*create_filter)(void*, const char* const* key_array,
+ const size_t* key_length_array, int num_keys,
+ size_t* filter_length),
+ uint8_t (*key_may_match)(void*, const char* key, size_t length,
+ const char* filter, size_t filter_length),
const char* (*name)(void*)) {
leveldb_filterpolicy_t* result = new leveldb_filterpolicy_t;
result->state_ = state;
@@ -503,7 +468,8 @@ leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(int bits_per_key) {
// they delegate to a NewBloomFilterPolicy() instead of user
// supplied C functions.
struct Wrapper : public leveldb_filterpolicy_t {
- const FilterPolicy* rep_;
+ static void DoNothing(void*) {}
+
~Wrapper() { delete rep_; }
const char* Name() const { return rep_->Name(); }
void CreateFilter(const Slice* keys, int n, std::string* dst) const {
@@ -512,11 +478,12 @@ leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(int bits_per_key) {
bool KeyMayMatch(const Slice& key, const Slice& filter) const {
return rep_->KeyMayMatch(key, filter);
}
- static void DoNothing(void*) { }
+
+ const FilterPolicy* rep_;
};
Wrapper* wrapper = new Wrapper;
wrapper->rep_ = NewBloomFilterPolicy(bits_per_key);
- wrapper->state_ = NULL;
+ wrapper->state_ = nullptr;
wrapper->destructor_ = &Wrapper::DoNothing;
return wrapper;
}
@@ -525,37 +492,29 @@ leveldb_readoptions_t* leveldb_readoptions_create() {
return new leveldb_readoptions_t;
}
-void leveldb_readoptions_destroy(leveldb_readoptions_t* opt) {
- delete opt;
-}
+void leveldb_readoptions_destroy(leveldb_readoptions_t* opt) { delete opt; }
-void leveldb_readoptions_set_verify_checksums(
- leveldb_readoptions_t* opt,
- unsigned char v) {
+void leveldb_readoptions_set_verify_checksums(leveldb_readoptions_t* opt,
+ uint8_t v) {
opt->rep.verify_checksums = v;
}
-void leveldb_readoptions_set_fill_cache(
- leveldb_readoptions_t* opt, unsigned char v) {
+void leveldb_readoptions_set_fill_cache(leveldb_readoptions_t* opt, uint8_t v) {
opt->rep.fill_cache = v;
}
-void leveldb_readoptions_set_snapshot(
- leveldb_readoptions_t* opt,
- const leveldb_snapshot_t* snap) {
- opt->rep.snapshot = (snap ? snap->rep : NULL);
+void leveldb_readoptions_set_snapshot(leveldb_readoptions_t* opt,
+ const leveldb_snapshot_t* snap) {
+ opt->rep.snapshot = (snap ? snap->rep : nullptr);
}
leveldb_writeoptions_t* leveldb_writeoptions_create() {
return new leveldb_writeoptions_t;
}
-void leveldb_writeoptions_destroy(leveldb_writeoptions_t* opt) {
- delete opt;
-}
+void leveldb_writeoptions_destroy(leveldb_writeoptions_t* opt) { delete opt; }
-void leveldb_writeoptions_set_sync(
- leveldb_writeoptions_t* opt, unsigned char v) {
+void leveldb_writeoptions_set_sync(leveldb_writeoptions_t* opt, uint8_t v) {
opt->rep.sync = v;
}
@@ -582,16 +541,22 @@ void leveldb_env_destroy(leveldb_env_t* env) {
delete env;
}
-void leveldb_free(void* ptr) {
- free(ptr);
-}
+char* leveldb_env_get_test_directory(leveldb_env_t* env) {
+ std::string result;
+ if (!env->rep->GetTestDirectory(&result).ok()) {
+ return nullptr;
+ }
-int leveldb_major_version() {
- return kMajorVersion;
+ char* buffer = static_cast<char*>(malloc(result.size() + 1));
+ memcpy(buffer, result.data(), result.size());
+ buffer[result.size()] = '\0';
+ return buffer;
}
-int leveldb_minor_version() {
- return kMinorVersion;
-}
+void leveldb_free(void* ptr) { free(ptr); }
+
+int leveldb_major_version() { return kMajorVersion; }
+
+int leveldb_minor_version() { return kMinorVersion; }
} // end extern "C"
diff --git a/src/leveldb/db/c_test.c b/src/leveldb/db/c_test.c
index 7cd5ee0207..16c77eed6a 100644
--- a/src/leveldb/db/c_test.c
+++ b/src/leveldb/db/c_test.c
@@ -8,24 +8,14 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <sys/types.h>
-#include <unistd.h>
const char* phase = "";
-static char dbname[200];
static void StartPhase(const char* name) {
fprintf(stderr, "=== Test %s\n", name);
phase = name;
}
-static const char* GetTempDir(void) {
- const char* ret = getenv("TEST_TMPDIR");
- if (ret == NULL || ret[0] == '\0')
- ret = "/tmp";
- return ret;
-}
-
#define CheckNoError(err) \
if ((err) != NULL) { \
fprintf(stderr, "%s:%d: %s: %s\n", __FILE__, __LINE__, phase, (err)); \
@@ -130,7 +120,7 @@ static const char* CmpName(void* arg) {
}
// Custom filter policy
-static unsigned char fake_filter_result = 1;
+static uint8_t fake_filter_result = 1;
static void FilterDestroy(void* arg) { }
static const char* FilterName(void* arg) {
return "TestFilter";
@@ -145,10 +135,8 @@ static char* FilterCreate(
memcpy(result, "fake", 4);
return result;
}
-unsigned char FilterKeyMatch(
- void* arg,
- const char* key, size_t length,
- const char* filter, size_t filter_length) {
+uint8_t FilterKeyMatch(void* arg, const char* key, size_t length,
+ const char* filter, size_t filter_length) {
CheckCondition(filter_length == 4);
CheckCondition(memcmp(filter, "fake", 4) == 0);
return fake_filter_result;
@@ -162,21 +150,19 @@ int main(int argc, char** argv) {
leveldb_options_t* options;
leveldb_readoptions_t* roptions;
leveldb_writeoptions_t* woptions;
+ char* dbname;
char* err = NULL;
int run = -1;
CheckCondition(leveldb_major_version() >= 1);
CheckCondition(leveldb_minor_version() >= 1);
- snprintf(dbname, sizeof(dbname),
- "%s/leveldb_c_test-%d",
- GetTempDir(),
- ((int) geteuid()));
-
StartPhase("create_objects");
cmp = leveldb_comparator_create(NULL, CmpDestroy, CmpCompare, CmpName);
env = leveldb_create_default_env();
cache = leveldb_cache_create_lru(100000);
+ dbname = leveldb_env_get_test_directory(env);
+ CheckCondition(dbname != NULL);
options = leveldb_options_create();
leveldb_options_set_comparator(options, cmp);
@@ -189,6 +175,7 @@ int main(int argc, char** argv) {
leveldb_options_set_max_open_files(options, 10);
leveldb_options_set_block_size(options, 1024);
leveldb_options_set_block_restart_interval(options, 8);
+ leveldb_options_set_max_file_size(options, 3 << 20);
leveldb_options_set_compression(options, leveldb_no_compression);
roptions = leveldb_readoptions_create();
@@ -239,12 +226,18 @@ int main(int argc, char** argv) {
leveldb_writebatch_clear(wb);
leveldb_writebatch_put(wb, "bar", 3, "b", 1);
leveldb_writebatch_put(wb, "box", 3, "c", 1);
- leveldb_writebatch_delete(wb, "bar", 3);
+
+ leveldb_writebatch_t* wb2 = leveldb_writebatch_create();
+ leveldb_writebatch_delete(wb2, "bar", 3);
+ leveldb_writebatch_append(wb, wb2);
+ leveldb_writebatch_destroy(wb2);
+
leveldb_write(db, woptions, wb, &err);
CheckNoError(err);
CheckGet(db, roptions, "foo", "hello");
CheckGet(db, roptions, "bar", NULL);
CheckGet(db, roptions, "box", "c");
+
int pos = 0;
leveldb_writebatch_iterate(wb, &pos, CheckPut, CheckDel);
CheckCondition(pos == 3);
@@ -381,6 +374,7 @@ int main(int argc, char** argv) {
leveldb_options_destroy(options);
leveldb_readoptions_destroy(roptions);
leveldb_writeoptions_destroy(woptions);
+ leveldb_free(dbname);
leveldb_cache_destroy(cache);
leveldb_comparator_destroy(cmp);
leveldb_env_destroy(env);
diff --git a/src/leveldb/db/corruption_test.cc b/src/leveldb/db/corruption_test.cc
index 37a484d25f..42f5237c65 100644
--- a/src/leveldb/db/corruption_test.cc
+++ b/src/leveldb/db/corruption_test.cc
@@ -2,20 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include "leveldb/db.h"
-
-#include <errno.h>
-#include <fcntl.h>
-#include <sys/stat.h>
#include <sys/types.h>
-#include "leveldb/cache.h"
-#include "leveldb/env.h"
-#include "leveldb/table.h"
-#include "leveldb/write_batch.h"
+
#include "db/db_impl.h"
#include "db/filename.h"
#include "db/log_format.h"
#include "db/version_set.h"
+#include "leveldb/cache.h"
+#include "leveldb/db.h"
+#include "leveldb/table.h"
+#include "leveldb/write_batch.h"
#include "util/logging.h"
#include "util/testharness.h"
#include "util/testutil.h"
@@ -26,44 +22,35 @@ static const int kValueSize = 1000;
class CorruptionTest {
public:
- test::ErrorEnv env_;
- std::string dbname_;
- Cache* tiny_cache_;
- Options options_;
- DB* db_;
-
- CorruptionTest() {
- tiny_cache_ = NewLRUCache(100);
+ CorruptionTest()
+ : db_(nullptr),
+ dbname_("/memenv/corruption_test"),
+ tiny_cache_(NewLRUCache(100)) {
options_.env = &env_;
options_.block_cache = tiny_cache_;
- dbname_ = test::TmpDir() + "/corruption_test";
DestroyDB(dbname_, options_);
- db_ = NULL;
options_.create_if_missing = true;
Reopen();
options_.create_if_missing = false;
}
~CorruptionTest() {
- delete db_;
- DestroyDB(dbname_, Options());
- delete tiny_cache_;
+ delete db_;
+ delete tiny_cache_;
}
Status TryReopen() {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
return DB::Open(options_, dbname_, &db_);
}
- void Reopen() {
- ASSERT_OK(TryReopen());
- }
+ void Reopen() { ASSERT_OK(TryReopen()); }
void RepairDB() {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
ASSERT_OK(::leveldb::RepairDB(dbname_, options_));
}
@@ -71,7 +58,7 @@ class CorruptionTest {
std::string key_space, value_space;
WriteBatch batch;
for (int i = 0; i < n; i++) {
- //if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n);
+ // if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n);
Slice key = Key(i, &key_space);
batch.Clear();
batch.Put(key, Value(i, &value_space));
@@ -100,8 +87,7 @@ class CorruptionTest {
// Ignore boundary keys.
continue;
}
- if (!ConsumeDecimalNumber(&in, &key) ||
- !in.empty() ||
+ if (!ConsumeDecimalNumber(&in, &key) || !in.empty() ||
key < next_expected) {
bad_keys++;
continue;
@@ -126,14 +112,13 @@ class CorruptionTest {
void Corrupt(FileType filetype, int offset, int bytes_to_corrupt) {
// Pick file to corrupt
std::vector<std::string> filenames;
- ASSERT_OK(env_.GetChildren(dbname_, &filenames));
+ ASSERT_OK(env_.target()->GetChildren(dbname_, &filenames));
uint64_t number;
FileType type;
std::string fname;
int picked_number = -1;
for (size_t i = 0; i < filenames.size(); i++) {
- if (ParseFileName(filenames[i], &number, &type) &&
- type == filetype &&
+ if (ParseFileName(filenames[i], &number, &type) && type == filetype &&
int(number) > picked_number) { // Pick latest file
fname = dbname_ + "/" + filenames[i];
picked_number = number;
@@ -141,35 +126,32 @@ class CorruptionTest {
}
ASSERT_TRUE(!fname.empty()) << filetype;
- struct stat sbuf;
- if (stat(fname.c_str(), &sbuf) != 0) {
- const char* msg = strerror(errno);
- ASSERT_TRUE(false) << fname << ": " << msg;
- }
+ uint64_t file_size;
+ ASSERT_OK(env_.target()->GetFileSize(fname, &file_size));
if (offset < 0) {
// Relative to end of file; make it absolute
- if (-offset > sbuf.st_size) {
+ if (-offset > file_size) {
offset = 0;
} else {
- offset = sbuf.st_size + offset;
+ offset = file_size + offset;
}
}
- if (offset > sbuf.st_size) {
- offset = sbuf.st_size;
+ if (offset > file_size) {
+ offset = file_size;
}
- if (offset + bytes_to_corrupt > sbuf.st_size) {
- bytes_to_corrupt = sbuf.st_size - offset;
+ if (offset + bytes_to_corrupt > file_size) {
+ bytes_to_corrupt = file_size - offset;
}
// Do it
std::string contents;
- Status s = ReadFileToString(Env::Default(), fname, &contents);
+ Status s = ReadFileToString(env_.target(), fname, &contents);
ASSERT_TRUE(s.ok()) << s.ToString();
for (int i = 0; i < bytes_to_corrupt; i++) {
contents[i + offset] ^= 0x80;
}
- s = WriteStringToFile(Env::Default(), contents, fname);
+ s = WriteStringToFile(env_.target(), contents, fname);
ASSERT_TRUE(s.ok()) << s.ToString();
}
@@ -197,12 +179,20 @@ class CorruptionTest {
Random r(k);
return test::RandomString(&r, kValueSize, storage);
}
+
+ test::ErrorEnv env_;
+ Options options_;
+ DB* db_;
+
+ private:
+ std::string dbname_;
+ Cache* tiny_cache_;
};
TEST(CorruptionTest, Recovery) {
Build(100);
Check(100, 100);
- Corrupt(kLogFile, 19, 1); // WriteBatch tag for first record
+ Corrupt(kLogFile, 19, 1); // WriteBatch tag for first record
Corrupt(kLogFile, log::kBlockSize + 1000, 1); // Somewhere in second block
Reopen();
@@ -237,8 +227,8 @@ TEST(CorruptionTest, TableFile) {
Build(100);
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
dbi->TEST_CompactMemTable();
- dbi->TEST_CompactRange(0, NULL, NULL);
- dbi->TEST_CompactRange(1, NULL, NULL);
+ dbi->TEST_CompactRange(0, nullptr, nullptr);
+ dbi->TEST_CompactRange(1, nullptr, nullptr);
Corrupt(kTableFile, 100, 1);
Check(90, 99);
@@ -251,8 +241,8 @@ TEST(CorruptionTest, TableFileRepair) {
Build(100);
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
dbi->TEST_CompactMemTable();
- dbi->TEST_CompactRange(0, NULL, NULL);
- dbi->TEST_CompactRange(1, NULL, NULL);
+ dbi->TEST_CompactRange(0, nullptr, nullptr);
+ dbi->TEST_CompactRange(1, nullptr, nullptr);
Corrupt(kTableFile, 100, 1);
RepairDB();
@@ -302,7 +292,7 @@ TEST(CorruptionTest, CorruptedDescriptor) {
ASSERT_OK(db_->Put(WriteOptions(), "foo", "hello"));
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
dbi->TEST_CompactMemTable();
- dbi->TEST_CompactRange(0, NULL, NULL);
+ dbi->TEST_CompactRange(0, nullptr, nullptr);
Corrupt(kDescriptorFile, 0, 1000);
Status s = TryReopen();
@@ -343,7 +333,7 @@ TEST(CorruptionTest, CompactionInputErrorParanoid) {
Corrupt(kTableFile, 100, 1);
env_.SleepForMicroseconds(100000);
}
- dbi->CompactRange(NULL, NULL);
+ dbi->CompactRange(nullptr, nullptr);
// Write must fail because of corrupted table
std::string tmp1, tmp2;
@@ -369,6 +359,4 @@ TEST(CorruptionTest, UnrelatedKeys) {
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/db/db_impl.cc b/src/leveldb/db/db_impl.cc
index 3bb58e560a..65e31724bc 100644
--- a/src/leveldb/db/db_impl.cc
+++ b/src/leveldb/db/db_impl.cc
@@ -4,12 +4,15 @@
#include "db/db_impl.h"
+#include <stdint.h>
+#include <stdio.h>
+
#include <algorithm>
+#include <atomic>
#include <set>
#include <string>
-#include <stdint.h>
-#include <stdio.h>
#include <vector>
+
#include "db/builder.h"
#include "db/db_iter.h"
#include "db/dbformat.h"
@@ -39,16 +42,33 @@ const int kNumNonTableCacheFiles = 10;
// Information kept for every waiting writer
struct DBImpl::Writer {
+ explicit Writer(port::Mutex* mu)
+ : batch(nullptr), sync(false), done(false), cv(mu) {}
+
Status status;
WriteBatch* batch;
bool sync;
bool done;
port::CondVar cv;
-
- explicit Writer(port::Mutex* mu) : cv(mu) { }
};
struct DBImpl::CompactionState {
+ // Files produced by compaction
+ struct Output {
+ uint64_t number;
+ uint64_t file_size;
+ InternalKey smallest, largest;
+ };
+
+ Output* current_output() { return &outputs[outputs.size() - 1]; }
+
+ explicit CompactionState(Compaction* c)
+ : compaction(c),
+ smallest_snapshot(0),
+ outfile(nullptr),
+ builder(nullptr),
+ total_bytes(0) {}
+
Compaction* const compaction;
// Sequence numbers < smallest_snapshot are not significant since we
@@ -57,12 +77,6 @@ struct DBImpl::CompactionState {
// we can drop all entries for the same key with sequence numbers < S.
SequenceNumber smallest_snapshot;
- // Files produced by compaction
- struct Output {
- uint64_t number;
- uint64_t file_size;
- InternalKey smallest, largest;
- };
std::vector<Output> outputs;
// State kept for output being generated
@@ -70,19 +84,10 @@ struct DBImpl::CompactionState {
TableBuilder* builder;
uint64_t total_bytes;
-
- Output* current_output() { return &outputs[outputs.size()-1]; }
-
- explicit CompactionState(Compaction* c)
- : compaction(c),
- outfile(NULL),
- builder(NULL),
- total_bytes(0) {
- }
};
// Fix user-supplied options to be reasonable
-template <class T,class V>
+template <class T, class V>
static void ClipToRange(T* ptr, V minvalue, V maxvalue) {
if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
@@ -93,27 +98,32 @@ Options SanitizeOptions(const std::string& dbname,
const Options& src) {
Options result = src;
result.comparator = icmp;
- result.filter_policy = (src.filter_policy != NULL) ? ipolicy : NULL;
- ClipToRange(&result.max_open_files, 64 + kNumNonTableCacheFiles, 50000);
- ClipToRange(&result.write_buffer_size, 64<<10, 1<<30);
- ClipToRange(&result.max_file_size, 1<<20, 1<<30);
- ClipToRange(&result.block_size, 1<<10, 4<<20);
- if (result.info_log == NULL) {
+ result.filter_policy = (src.filter_policy != nullptr) ? ipolicy : nullptr;
+ ClipToRange(&result.max_open_files, 64 + kNumNonTableCacheFiles, 50000);
+ ClipToRange(&result.write_buffer_size, 64 << 10, 1 << 30);
+ ClipToRange(&result.max_file_size, 1 << 20, 1 << 30);
+ ClipToRange(&result.block_size, 1 << 10, 4 << 20);
+ if (result.info_log == nullptr) {
// Open a log file in the same directory as the db
src.env->CreateDir(dbname); // In case it does not exist
src.env->RenameFile(InfoLogFileName(dbname), OldInfoLogFileName(dbname));
Status s = src.env->NewLogger(InfoLogFileName(dbname), &result.info_log);
if (!s.ok()) {
// No place suitable for logging
- result.info_log = NULL;
+ result.info_log = nullptr;
}
}
- if (result.block_cache == NULL) {
+ if (result.block_cache == nullptr) {
result.block_cache = NewLRUCache(8 << 20);
}
return result;
}
+static int TableCacheSize(const Options& sanitized_options) {
+ // Reserve ten files or so for other uses and give the rest to TableCache.
+ return sanitized_options.max_open_files - kNumNonTableCacheFiles;
+}
+
DBImpl::DBImpl(const Options& raw_options, const std::string& dbname)
: env_(raw_options.env),
internal_comparator_(raw_options.comparator),
@@ -123,44 +133,39 @@ DBImpl::DBImpl(const Options& raw_options, const std::string& dbname)
owns_info_log_(options_.info_log != raw_options.info_log),
owns_cache_(options_.block_cache != raw_options.block_cache),
dbname_(dbname),
- db_lock_(NULL),
- shutting_down_(NULL),
- bg_cv_(&mutex_),
- mem_(NULL),
- imm_(NULL),
- logfile_(NULL),
+ table_cache_(new TableCache(dbname_, options_, TableCacheSize(options_))),
+ db_lock_(nullptr),
+ shutting_down_(false),
+ background_work_finished_signal_(&mutex_),
+ mem_(nullptr),
+ imm_(nullptr),
+ has_imm_(false),
+ logfile_(nullptr),
logfile_number_(0),
- log_(NULL),
+ log_(nullptr),
seed_(0),
tmp_batch_(new WriteBatch),
- bg_compaction_scheduled_(false),
- manual_compaction_(NULL) {
- has_imm_.Release_Store(NULL);
-
- // Reserve ten files or so for other uses and give the rest to TableCache.
- const int table_cache_size = options_.max_open_files - kNumNonTableCacheFiles;
- table_cache_ = new TableCache(dbname_, &options_, table_cache_size);
-
- versions_ = new VersionSet(dbname_, &options_, table_cache_,
- &internal_comparator_);
-}
+ background_compaction_scheduled_(false),
+ manual_compaction_(nullptr),
+ versions_(new VersionSet(dbname_, &options_, table_cache_,
+ &internal_comparator_)) {}
DBImpl::~DBImpl() {
- // Wait for background work to finish
+ // Wait for background work to finish.
mutex_.Lock();
- shutting_down_.Release_Store(this); // Any non-NULL value is ok
- while (bg_compaction_scheduled_) {
- bg_cv_.Wait();
+ shutting_down_.store(true, std::memory_order_release);
+ while (background_compaction_scheduled_) {
+ background_work_finished_signal_.Wait();
}
mutex_.Unlock();
- if (db_lock_ != NULL) {
+ if (db_lock_ != nullptr) {
env_->UnlockFile(db_lock_);
}
delete versions_;
- if (mem_ != NULL) mem_->Unref();
- if (imm_ != NULL) imm_->Unref();
+ if (mem_ != nullptr) mem_->Unref();
+ if (imm_ != nullptr) imm_->Unref();
delete tmp_batch_;
delete log_;
delete logfile_;
@@ -216,6 +221,8 @@ void DBImpl::MaybeIgnoreError(Status* s) const {
}
void DBImpl::DeleteObsoleteFiles() {
+ mutex_.AssertHeld();
+
if (!bg_error_.ok()) {
// After a background error, we don't know whether a new version may
// or may not have been committed, so we cannot safely garbage collect.
@@ -227,11 +234,12 @@ void DBImpl::DeleteObsoleteFiles() {
versions_->AddLiveFiles(&live);
std::vector<std::string> filenames;
- env_->GetChildren(dbname_, &filenames); // Ignoring errors on purpose
+ env_->GetChildren(dbname_, &filenames); // Ignoring errors on purpose
uint64_t number;
FileType type;
- for (size_t i = 0; i < filenames.size(); i++) {
- if (ParseFileName(filenames[i], &number, &type)) {
+ std::vector<std::string> files_to_delete;
+ for (std::string& filename : filenames) {
+ if (ParseFileName(filename, &number, &type)) {
bool keep = true;
switch (type) {
case kLogFile:
@@ -259,26 +267,34 @@ void DBImpl::DeleteObsoleteFiles() {
}
if (!keep) {
+ files_to_delete.push_back(std::move(filename));
if (type == kTableFile) {
table_cache_->Evict(number);
}
- Log(options_.info_log, "Delete type=%d #%lld\n",
- int(type),
+ Log(options_.info_log, "Delete type=%d #%lld\n", static_cast<int>(type),
static_cast<unsigned long long>(number));
- env_->DeleteFile(dbname_ + "/" + filenames[i]);
}
}
}
+
+ // While deleting all files unblock other threads. All files being deleted
+ // have unique names which will not collide with newly created files and
+ // are therefore safe to delete while allowing other threads to proceed.
+ mutex_.Unlock();
+ for (const std::string& filename : files_to_delete) {
+ env_->DeleteFile(dbname_ + "/" + filename);
+ }
+ mutex_.Lock();
}
-Status DBImpl::Recover(VersionEdit* edit, bool *save_manifest) {
+Status DBImpl::Recover(VersionEdit* edit, bool* save_manifest) {
mutex_.AssertHeld();
// Ignore error from CreateDir since the creation of the DB is
// committed only when the descriptor is created, and this directory
// may already exist from a previous failed creation attempt.
env_->CreateDir(dbname_);
- assert(db_lock_ == NULL);
+ assert(db_lock_ == nullptr);
Status s = env_->LockFile(LockFileName(dbname_), &db_lock_);
if (!s.ok()) {
return s;
@@ -296,8 +312,8 @@ Status DBImpl::Recover(VersionEdit* edit, bool *save_manifest) {
}
} else {
if (options_.error_if_exists) {
- return Status::InvalidArgument(
- dbname_, "exists (error_if_exists is true)");
+ return Status::InvalidArgument(dbname_,
+ "exists (error_if_exists is true)");
}
}
@@ -369,12 +385,12 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
Env* env;
Logger* info_log;
const char* fname;
- Status* status; // NULL if options_.paranoid_checks==false
- virtual void Corruption(size_t bytes, const Status& s) {
+ Status* status; // null if options_.paranoid_checks==false
+ void Corruption(size_t bytes, const Status& s) override {
Log(info_log, "%s%s: dropping %d bytes; %s",
- (this->status == NULL ? "(ignoring error) " : ""),
- fname, static_cast<int>(bytes), s.ToString().c_str());
- if (this->status != NULL && this->status->ok()) *this->status = s;
+ (this->status == nullptr ? "(ignoring error) " : ""), fname,
+ static_cast<int>(bytes), s.ToString().c_str());
+ if (this->status != nullptr && this->status->ok()) *this->status = s;
}
};
@@ -394,32 +410,30 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
reporter.env = env_;
reporter.info_log = options_.info_log;
reporter.fname = fname.c_str();
- reporter.status = (options_.paranoid_checks ? &status : NULL);
+ reporter.status = (options_.paranoid_checks ? &status : nullptr);
// We intentionally make log::Reader do checksumming even if
// paranoid_checks==false so that corruptions cause entire commits
// to be skipped instead of propagating bad information (like overly
// large sequence numbers).
- log::Reader reader(file, &reporter, true/*checksum*/,
- 0/*initial_offset*/);
+ log::Reader reader(file, &reporter, true /*checksum*/, 0 /*initial_offset*/);
Log(options_.info_log, "Recovering log #%llu",
- (unsigned long long) log_number);
+ (unsigned long long)log_number);
// Read all the records and add to a memtable
std::string scratch;
Slice record;
WriteBatch batch;
int compactions = 0;
- MemTable* mem = NULL;
- while (reader.ReadRecord(&record, &scratch) &&
- status.ok()) {
+ MemTable* mem = nullptr;
+ while (reader.ReadRecord(&record, &scratch) && status.ok()) {
if (record.size() < 12) {
- reporter.Corruption(
- record.size(), Status::Corruption("log record too small", fname));
+ reporter.Corruption(record.size(),
+ Status::Corruption("log record too small", fname));
continue;
}
WriteBatchInternal::SetContents(&batch, record);
- if (mem == NULL) {
+ if (mem == nullptr) {
mem = new MemTable(internal_comparator_);
mem->Ref();
}
@@ -428,9 +442,8 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
if (!status.ok()) {
break;
}
- const SequenceNumber last_seq =
- WriteBatchInternal::Sequence(&batch) +
- WriteBatchInternal::Count(&batch) - 1;
+ const SequenceNumber last_seq = WriteBatchInternal::Sequence(&batch) +
+ WriteBatchInternal::Count(&batch) - 1;
if (last_seq > *max_sequence) {
*max_sequence = last_seq;
}
@@ -438,9 +451,9 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
if (mem->ApproximateMemoryUsage() > options_.write_buffer_size) {
compactions++;
*save_manifest = true;
- status = WriteLevel0Table(mem, edit, NULL);
+ status = WriteLevel0Table(mem, edit, nullptr);
mem->Unref();
- mem = NULL;
+ mem = nullptr;
if (!status.ok()) {
// Reflect errors immediately so that conditions like full
// file-systems cause the DB::Open() to fail.
@@ -453,31 +466,31 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
// See if we should keep reusing the last log file.
if (status.ok() && options_.reuse_logs && last_log && compactions == 0) {
- assert(logfile_ == NULL);
- assert(log_ == NULL);
- assert(mem_ == NULL);
+ assert(logfile_ == nullptr);
+ assert(log_ == nullptr);
+ assert(mem_ == nullptr);
uint64_t lfile_size;
if (env_->GetFileSize(fname, &lfile_size).ok() &&
env_->NewAppendableFile(fname, &logfile_).ok()) {
Log(options_.info_log, "Reusing old log %s \n", fname.c_str());
log_ = new log::Writer(logfile_, lfile_size);
logfile_number_ = log_number;
- if (mem != NULL) {
+ if (mem != nullptr) {
mem_ = mem;
- mem = NULL;
+ mem = nullptr;
} else {
- // mem can be NULL if lognum exists but was empty.
+ // mem can be nullptr if lognum exists but was empty.
mem_ = new MemTable(internal_comparator_);
mem_->Ref();
}
}
}
- if (mem != NULL) {
+ if (mem != nullptr) {
// mem did not get reused; compact it.
if (status.ok()) {
*save_manifest = true;
- status = WriteLevel0Table(mem, edit, NULL);
+ status = WriteLevel0Table(mem, edit, nullptr);
}
mem->Unref();
}
@@ -494,7 +507,7 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit,
pending_outputs_.insert(meta.number);
Iterator* iter = mem->NewIterator();
Log(options_.info_log, "Level-0 table #%llu: started",
- (unsigned long long) meta.number);
+ (unsigned long long)meta.number);
Status s;
{
@@ -504,24 +517,22 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit,
}
Log(options_.info_log, "Level-0 table #%llu: %lld bytes %s",
- (unsigned long long) meta.number,
- (unsigned long long) meta.file_size,
+ (unsigned long long)meta.number, (unsigned long long)meta.file_size,
s.ToString().c_str());
delete iter;
pending_outputs_.erase(meta.number);
-
// Note that if file_size is zero, the file has been deleted and
// should not be added to the manifest.
int level = 0;
if (s.ok() && meta.file_size > 0) {
const Slice min_user_key = meta.smallest.user_key();
const Slice max_user_key = meta.largest.user_key();
- if (base != NULL) {
+ if (base != nullptr) {
level = base->PickLevelForMemTableOutput(min_user_key, max_user_key);
}
- edit->AddFile(level, meta.number, meta.file_size,
- meta.smallest, meta.largest);
+ edit->AddFile(level, meta.number, meta.file_size, meta.smallest,
+ meta.largest);
}
CompactionStats stats;
@@ -533,7 +544,7 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit,
void DBImpl::CompactMemTable() {
mutex_.AssertHeld();
- assert(imm_ != NULL);
+ assert(imm_ != nullptr);
// Save the contents of the memtable as a new Table
VersionEdit edit;
@@ -542,7 +553,7 @@ void DBImpl::CompactMemTable() {
Status s = WriteLevel0Table(imm_, &edit, base);
base->Unref();
- if (s.ok() && shutting_down_.Acquire_Load()) {
+ if (s.ok() && shutting_down_.load(std::memory_order_acquire)) {
s = Status::IOError("Deleting DB during memtable compaction");
}
@@ -556,8 +567,8 @@ void DBImpl::CompactMemTable() {
if (s.ok()) {
// Commit to the new state
imm_->Unref();
- imm_ = NULL;
- has_imm_.Release_Store(NULL);
+ imm_ = nullptr;
+ has_imm_.store(false, std::memory_order_release);
DeleteObsoleteFiles();
} else {
RecordBackgroundError(s);
@@ -575,13 +586,14 @@ void DBImpl::CompactRange(const Slice* begin, const Slice* end) {
}
}
}
- TEST_CompactMemTable(); // TODO(sanjay): Skip if memtable does not overlap
+ TEST_CompactMemTable(); // TODO(sanjay): Skip if memtable does not overlap
for (int level = 0; level < max_level_with_files; level++) {
TEST_CompactRange(level, begin, end);
}
}
-void DBImpl::TEST_CompactRange(int level, const Slice* begin,const Slice* end) {
+void DBImpl::TEST_CompactRange(int level, const Slice* begin,
+ const Slice* end) {
assert(level >= 0);
assert(level + 1 < config::kNumLevels);
@@ -590,44 +602,45 @@ void DBImpl::TEST_CompactRange(int level, const Slice* begin,const Slice* end) {
ManualCompaction manual;
manual.level = level;
manual.done = false;
- if (begin == NULL) {
- manual.begin = NULL;
+ if (begin == nullptr) {
+ manual.begin = nullptr;
} else {
begin_storage = InternalKey(*begin, kMaxSequenceNumber, kValueTypeForSeek);
manual.begin = &begin_storage;
}
- if (end == NULL) {
- manual.end = NULL;
+ if (end == nullptr) {
+ manual.end = nullptr;
} else {
end_storage = InternalKey(*end, 0, static_cast<ValueType>(0));
manual.end = &end_storage;
}
MutexLock l(&mutex_);
- while (!manual.done && !shutting_down_.Acquire_Load() && bg_error_.ok()) {
- if (manual_compaction_ == NULL) { // Idle
+ while (!manual.done && !shutting_down_.load(std::memory_order_acquire) &&
+ bg_error_.ok()) {
+ if (manual_compaction_ == nullptr) { // Idle
manual_compaction_ = &manual;
MaybeScheduleCompaction();
} else { // Running either my compaction or another compaction.
- bg_cv_.Wait();
+ background_work_finished_signal_.Wait();
}
}
if (manual_compaction_ == &manual) {
// Cancel my manual compaction since we aborted early for some reason.
- manual_compaction_ = NULL;
+ manual_compaction_ = nullptr;
}
}
Status DBImpl::TEST_CompactMemTable() {
- // NULL batch means just wait for earlier writes to be done
- Status s = Write(WriteOptions(), NULL);
+ // nullptr batch means just wait for earlier writes to be done
+ Status s = Write(WriteOptions(), nullptr);
if (s.ok()) {
// Wait until the compaction completes
MutexLock l(&mutex_);
- while (imm_ != NULL && bg_error_.ok()) {
- bg_cv_.Wait();
+ while (imm_ != nullptr && bg_error_.ok()) {
+ background_work_finished_signal_.Wait();
}
- if (imm_ != NULL) {
+ if (imm_ != nullptr) {
s = bg_error_;
}
}
@@ -638,24 +651,23 @@ void DBImpl::RecordBackgroundError(const Status& s) {
mutex_.AssertHeld();
if (bg_error_.ok()) {
bg_error_ = s;
- bg_cv_.SignalAll();
+ background_work_finished_signal_.SignalAll();
}
}
void DBImpl::MaybeScheduleCompaction() {
mutex_.AssertHeld();
- if (bg_compaction_scheduled_) {
+ if (background_compaction_scheduled_) {
// Already scheduled
- } else if (shutting_down_.Acquire_Load()) {
+ } else if (shutting_down_.load(std::memory_order_acquire)) {
// DB is being deleted; no more background compactions
} else if (!bg_error_.ok()) {
// Already got an error; no more changes
- } else if (imm_ == NULL &&
- manual_compaction_ == NULL &&
+ } else if (imm_ == nullptr && manual_compaction_ == nullptr &&
!versions_->NeedsCompaction()) {
// No work to be done
} else {
- bg_compaction_scheduled_ = true;
+ background_compaction_scheduled_ = true;
env_->Schedule(&DBImpl::BGWork, this);
}
}
@@ -666,8 +678,8 @@ void DBImpl::BGWork(void* db) {
void DBImpl::BackgroundCall() {
MutexLock l(&mutex_);
- assert(bg_compaction_scheduled_);
- if (shutting_down_.Acquire_Load()) {
+ assert(background_compaction_scheduled_);
+ if (shutting_down_.load(std::memory_order_acquire)) {
// No more background work when shutting down.
} else if (!bg_error_.ok()) {
// No more background work after a background error.
@@ -675,36 +687,35 @@ void DBImpl::BackgroundCall() {
BackgroundCompaction();
}
- bg_compaction_scheduled_ = false;
+ background_compaction_scheduled_ = false;
// Previous compaction may have produced too many files in a level,
// so reschedule another compaction if needed.
MaybeScheduleCompaction();
- bg_cv_.SignalAll();
+ background_work_finished_signal_.SignalAll();
}
void DBImpl::BackgroundCompaction() {
mutex_.AssertHeld();
- if (imm_ != NULL) {
+ if (imm_ != nullptr) {
CompactMemTable();
return;
}
Compaction* c;
- bool is_manual = (manual_compaction_ != NULL);
+ bool is_manual = (manual_compaction_ != nullptr);
InternalKey manual_end;
if (is_manual) {
ManualCompaction* m = manual_compaction_;
c = versions_->CompactRange(m->level, m->begin, m->end);
- m->done = (c == NULL);
- if (c != NULL) {
+ m->done = (c == nullptr);
+ if (c != nullptr) {
manual_end = c->input(0, c->num_input_files(0) - 1)->largest;
}
Log(options_.info_log,
"Manual compaction at level-%d from %s .. %s; will stop at %s\n",
- m->level,
- (m->begin ? m->begin->DebugString().c_str() : "(begin)"),
+ m->level, (m->begin ? m->begin->DebugString().c_str() : "(begin)"),
(m->end ? m->end->DebugString().c_str() : "(end)"),
(m->done ? "(end)" : manual_end.DebugString().c_str()));
} else {
@@ -712,26 +723,24 @@ void DBImpl::BackgroundCompaction() {
}
Status status;
- if (c == NULL) {
+ if (c == nullptr) {
// Nothing to do
} else if (!is_manual && c->IsTrivialMove()) {
// Move file to next level
assert(c->num_input_files(0) == 1);
FileMetaData* f = c->input(0, 0);
c->edit()->DeleteFile(c->level(), f->number);
- c->edit()->AddFile(c->level() + 1, f->number, f->file_size,
- f->smallest, f->largest);
+ c->edit()->AddFile(c->level() + 1, f->number, f->file_size, f->smallest,
+ f->largest);
status = versions_->LogAndApply(c->edit(), &mutex_);
if (!status.ok()) {
RecordBackgroundError(status);
}
VersionSet::LevelSummaryStorage tmp;
Log(options_.info_log, "Moved #%lld to level-%d %lld bytes %s: %s\n",
- static_cast<unsigned long long>(f->number),
- c->level() + 1,
+ static_cast<unsigned long long>(f->number), c->level() + 1,
static_cast<unsigned long long>(f->file_size),
- status.ToString().c_str(),
- versions_->LevelSummary(&tmp));
+ status.ToString().c_str(), versions_->LevelSummary(&tmp));
} else {
CompactionState* compact = new CompactionState(c);
status = DoCompactionWork(compact);
@@ -746,11 +755,10 @@ void DBImpl::BackgroundCompaction() {
if (status.ok()) {
// Done
- } else if (shutting_down_.Acquire_Load()) {
+ } else if (shutting_down_.load(std::memory_order_acquire)) {
// Ignore compaction errors found during shutting down
} else {
- Log(options_.info_log,
- "Compaction error: %s", status.ToString().c_str());
+ Log(options_.info_log, "Compaction error: %s", status.ToString().c_str());
}
if (is_manual) {
@@ -764,18 +772,18 @@ void DBImpl::BackgroundCompaction() {
m->tmp_storage = manual_end;
m->begin = &m->tmp_storage;
}
- manual_compaction_ = NULL;
+ manual_compaction_ = nullptr;
}
}
void DBImpl::CleanupCompaction(CompactionState* compact) {
mutex_.AssertHeld();
- if (compact->builder != NULL) {
+ if (compact->builder != nullptr) {
// May happen if we get a shutdown call in the middle of compaction
compact->builder->Abandon();
delete compact->builder;
} else {
- assert(compact->outfile == NULL);
+ assert(compact->outfile == nullptr);
}
delete compact->outfile;
for (size_t i = 0; i < compact->outputs.size(); i++) {
@@ -786,8 +794,8 @@ void DBImpl::CleanupCompaction(CompactionState* compact) {
}
Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) {
- assert(compact != NULL);
- assert(compact->builder == NULL);
+ assert(compact != nullptr);
+ assert(compact->builder == nullptr);
uint64_t file_number;
{
mutex_.Lock();
@@ -812,9 +820,9 @@ Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) {
Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
Iterator* input) {
- assert(compact != NULL);
- assert(compact->outfile != NULL);
- assert(compact->builder != NULL);
+ assert(compact != nullptr);
+ assert(compact->outfile != nullptr);
+ assert(compact->builder != nullptr);
const uint64_t output_number = compact->current_output()->number;
assert(output_number != 0);
@@ -831,7 +839,7 @@ Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
compact->current_output()->file_size = current_bytes;
compact->total_bytes += current_bytes;
delete compact->builder;
- compact->builder = NULL;
+ compact->builder = nullptr;
// Finish and check for file errors
if (s.ok()) {
@@ -841,35 +849,29 @@ Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
s = compact->outfile->Close();
}
delete compact->outfile;
- compact->outfile = NULL;
+ compact->outfile = nullptr;
if (s.ok() && current_entries > 0) {
// Verify that the table is usable
- Iterator* iter = table_cache_->NewIterator(ReadOptions(),
- output_number,
- current_bytes);
+ Iterator* iter =
+ table_cache_->NewIterator(ReadOptions(), output_number, current_bytes);
s = iter->status();
delete iter;
if (s.ok()) {
- Log(options_.info_log,
- "Generated table #%llu@%d: %lld keys, %lld bytes",
- (unsigned long long) output_number,
- compact->compaction->level(),
- (unsigned long long) current_entries,
- (unsigned long long) current_bytes);
+ Log(options_.info_log, "Generated table #%llu@%d: %lld keys, %lld bytes",
+ (unsigned long long)output_number, compact->compaction->level(),
+ (unsigned long long)current_entries,
+ (unsigned long long)current_bytes);
}
}
return s;
}
-
Status DBImpl::InstallCompactionResults(CompactionState* compact) {
mutex_.AssertHeld();
- Log(options_.info_log, "Compacted %d@%d + %d@%d files => %lld bytes",
- compact->compaction->num_input_files(0),
- compact->compaction->level(),
- compact->compaction->num_input_files(1),
- compact->compaction->level() + 1,
+ Log(options_.info_log, "Compacted %d@%d + %d@%d files => %lld bytes",
+ compact->compaction->num_input_files(0), compact->compaction->level(),
+ compact->compaction->num_input_files(1), compact->compaction->level() + 1,
static_cast<long long>(compact->total_bytes));
// Add compaction outputs
@@ -877,9 +879,8 @@ Status DBImpl::InstallCompactionResults(CompactionState* compact) {
const int level = compact->compaction->level();
for (size_t i = 0; i < compact->outputs.size(); i++) {
const CompactionState::Output& out = compact->outputs[i];
- compact->compaction->edit()->AddFile(
- level + 1,
- out.number, out.file_size, out.smallest, out.largest);
+ compact->compaction->edit()->AddFile(level + 1, out.number, out.file_size,
+ out.smallest, out.largest);
}
return versions_->LogAndApply(compact->compaction->edit(), &mutex_);
}
@@ -888,39 +889,40 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
const uint64_t start_micros = env_->NowMicros();
int64_t imm_micros = 0; // Micros spent doing imm_ compactions
- Log(options_.info_log, "Compacting %d@%d + %d@%d files",
- compact->compaction->num_input_files(0),
- compact->compaction->level(),
+ Log(options_.info_log, "Compacting %d@%d + %d@%d files",
+ compact->compaction->num_input_files(0), compact->compaction->level(),
compact->compaction->num_input_files(1),
compact->compaction->level() + 1);
assert(versions_->NumLevelFiles(compact->compaction->level()) > 0);
- assert(compact->builder == NULL);
- assert(compact->outfile == NULL);
+ assert(compact->builder == nullptr);
+ assert(compact->outfile == nullptr);
if (snapshots_.empty()) {
compact->smallest_snapshot = versions_->LastSequence();
} else {
- compact->smallest_snapshot = snapshots_.oldest()->number_;
+ compact->smallest_snapshot = snapshots_.oldest()->sequence_number();
}
+ Iterator* input = versions_->MakeInputIterator(compact->compaction);
+
// Release mutex while we're actually doing the compaction work
mutex_.Unlock();
- Iterator* input = versions_->MakeInputIterator(compact->compaction);
input->SeekToFirst();
Status status;
ParsedInternalKey ikey;
std::string current_user_key;
bool has_current_user_key = false;
SequenceNumber last_sequence_for_key = kMaxSequenceNumber;
- for (; input->Valid() && !shutting_down_.Acquire_Load(); ) {
+ while (input->Valid() && !shutting_down_.load(std::memory_order_acquire)) {
// Prioritize immutable compaction work
- if (has_imm_.NoBarrier_Load() != NULL) {
+ if (has_imm_.load(std::memory_order_relaxed)) {
const uint64_t imm_start = env_->NowMicros();
mutex_.Lock();
- if (imm_ != NULL) {
+ if (imm_ != nullptr) {
CompactMemTable();
- bg_cv_.SignalAll(); // Wakeup MakeRoomForWrite() if necessary
+ // Wake up MakeRoomForWrite() if necessary.
+ background_work_finished_signal_.SignalAll();
}
mutex_.Unlock();
imm_micros += (env_->NowMicros() - imm_start);
@@ -928,7 +930,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
Slice key = input->key();
if (compact->compaction->ShouldStopBefore(key) &&
- compact->builder != NULL) {
+ compact->builder != nullptr) {
status = FinishCompactionOutputFile(compact, input);
if (!status.ok()) {
break;
@@ -944,8 +946,8 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
last_sequence_for_key = kMaxSequenceNumber;
} else {
if (!has_current_user_key ||
- user_comparator()->Compare(ikey.user_key,
- Slice(current_user_key)) != 0) {
+ user_comparator()->Compare(ikey.user_key, Slice(current_user_key)) !=
+ 0) {
// First occurrence of this user key
current_user_key.assign(ikey.user_key.data(), ikey.user_key.size());
has_current_user_key = true;
@@ -954,7 +956,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
if (last_sequence_for_key <= compact->smallest_snapshot) {
// Hidden by an newer entry for same user key
- drop = true; // (A)
+ drop = true; // (A)
} else if (ikey.type == kTypeDeletion &&
ikey.sequence <= compact->smallest_snapshot &&
compact->compaction->IsBaseLevelForKey(ikey.user_key)) {
@@ -982,7 +984,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
if (!drop) {
// Open output file if necessary
- if (compact->builder == NULL) {
+ if (compact->builder == nullptr) {
status = OpenCompactionOutputFile(compact);
if (!status.ok()) {
break;
@@ -1007,17 +1009,17 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
input->Next();
}
- if (status.ok() && shutting_down_.Acquire_Load()) {
+ if (status.ok() && shutting_down_.load(std::memory_order_acquire)) {
status = Status::IOError("Deleting DB during compaction");
}
- if (status.ok() && compact->builder != NULL) {
+ if (status.ok() && compact->builder != nullptr) {
status = FinishCompactionOutputFile(compact, input);
}
if (status.ok()) {
status = input->status();
}
delete input;
- input = NULL;
+ input = nullptr;
CompactionStats stats;
stats.micros = env_->NowMicros() - start_micros - imm_micros;
@@ -1040,34 +1042,37 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
RecordBackgroundError(status);
}
VersionSet::LevelSummaryStorage tmp;
- Log(options_.info_log,
- "compacted to: %s", versions_->LevelSummary(&tmp));
+ Log(options_.info_log, "compacted to: %s", versions_->LevelSummary(&tmp));
return status;
}
namespace {
+
struct IterState {
- port::Mutex* mu;
- Version* version;
- MemTable* mem;
- MemTable* imm;
+ port::Mutex* const mu;
+ Version* const version GUARDED_BY(mu);
+ MemTable* const mem GUARDED_BY(mu);
+ MemTable* const imm GUARDED_BY(mu);
+
+ IterState(port::Mutex* mutex, MemTable* mem, MemTable* imm, Version* version)
+ : mu(mutex), version(version), mem(mem), imm(imm) {}
};
static void CleanupIteratorState(void* arg1, void* arg2) {
IterState* state = reinterpret_cast<IterState*>(arg1);
state->mu->Lock();
state->mem->Unref();
- if (state->imm != NULL) state->imm->Unref();
+ if (state->imm != nullptr) state->imm->Unref();
state->version->Unref();
state->mu->Unlock();
delete state;
}
-} // namespace
+
+} // anonymous namespace
Iterator* DBImpl::NewInternalIterator(const ReadOptions& options,
SequenceNumber* latest_snapshot,
uint32_t* seed) {
- IterState* cleanup = new IterState;
mutex_.Lock();
*latest_snapshot = versions_->LastSequence();
@@ -1075,7 +1080,7 @@ Iterator* DBImpl::NewInternalIterator(const ReadOptions& options,
std::vector<Iterator*> list;
list.push_back(mem_->NewIterator());
mem_->Ref();
- if (imm_ != NULL) {
+ if (imm_ != nullptr) {
list.push_back(imm_->NewIterator());
imm_->Ref();
}
@@ -1084,11 +1089,8 @@ Iterator* DBImpl::NewInternalIterator(const ReadOptions& options,
NewMergingIterator(&internal_comparator_, &list[0], list.size());
versions_->current()->Ref();
- cleanup->mu = &mutex_;
- cleanup->mem = mem_;
- cleanup->imm = imm_;
- cleanup->version = versions_->current();
- internal_iter->RegisterCleanup(CleanupIteratorState, cleanup, NULL);
+ IterState* cleanup = new IterState(&mutex_, mem_, imm_, versions_->current());
+ internal_iter->RegisterCleanup(CleanupIteratorState, cleanup, nullptr);
*seed = ++seed_;
mutex_.Unlock();
@@ -1106,14 +1108,14 @@ int64_t DBImpl::TEST_MaxNextLevelOverlappingBytes() {
return versions_->MaxNextLevelOverlappingBytes();
}
-Status DBImpl::Get(const ReadOptions& options,
- const Slice& key,
+Status DBImpl::Get(const ReadOptions& options, const Slice& key,
std::string* value) {
Status s;
MutexLock l(&mutex_);
SequenceNumber snapshot;
- if (options.snapshot != NULL) {
- snapshot = reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_;
+ if (options.snapshot != nullptr) {
+ snapshot =
+ static_cast<const SnapshotImpl*>(options.snapshot)->sequence_number();
} else {
snapshot = versions_->LastSequence();
}
@@ -1122,7 +1124,7 @@ Status DBImpl::Get(const ReadOptions& options,
MemTable* imm = imm_;
Version* current = versions_->current();
mem->Ref();
- if (imm != NULL) imm->Ref();
+ if (imm != nullptr) imm->Ref();
current->Ref();
bool have_stat_update = false;
@@ -1135,7 +1137,7 @@ Status DBImpl::Get(const ReadOptions& options,
LookupKey lkey(key, snapshot);
if (mem->Get(lkey, value, &s)) {
// Done
- } else if (imm != NULL && imm->Get(lkey, value, &s)) {
+ } else if (imm != nullptr && imm->Get(lkey, value, &s)) {
// Done
} else {
s = current->Get(options, lkey, value, &stats);
@@ -1148,7 +1150,7 @@ Status DBImpl::Get(const ReadOptions& options,
MaybeScheduleCompaction();
}
mem->Unref();
- if (imm != NULL) imm->Unref();
+ if (imm != nullptr) imm->Unref();
current->Unref();
return s;
}
@@ -1157,12 +1159,12 @@ Iterator* DBImpl::NewIterator(const ReadOptions& options) {
SequenceNumber latest_snapshot;
uint32_t seed;
Iterator* iter = NewInternalIterator(options, &latest_snapshot, &seed);
- return NewDBIterator(
- this, user_comparator(), iter,
- (options.snapshot != NULL
- ? reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_
- : latest_snapshot),
- seed);
+ return NewDBIterator(this, user_comparator(), iter,
+ (options.snapshot != nullptr
+ ? static_cast<const SnapshotImpl*>(options.snapshot)
+ ->sequence_number()
+ : latest_snapshot),
+ seed);
}
void DBImpl::RecordReadSample(Slice key) {
@@ -1177,9 +1179,9 @@ const Snapshot* DBImpl::GetSnapshot() {
return snapshots_.New(versions_->LastSequence());
}
-void DBImpl::ReleaseSnapshot(const Snapshot* s) {
+void DBImpl::ReleaseSnapshot(const Snapshot* snapshot) {
MutexLock l(&mutex_);
- snapshots_.Delete(reinterpret_cast<const SnapshotImpl*>(s));
+ snapshots_.Delete(static_cast<const SnapshotImpl*>(snapshot));
}
// Convenience methods
@@ -1191,9 +1193,9 @@ Status DBImpl::Delete(const WriteOptions& options, const Slice& key) {
return DB::Delete(options, key);
}
-Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
+Status DBImpl::Write(const WriteOptions& options, WriteBatch* updates) {
Writer w(&mutex_);
- w.batch = my_batch;
+ w.batch = updates;
w.sync = options.sync;
w.done = false;
@@ -1207,13 +1209,13 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
}
// May temporarily unlock and wait.
- Status status = MakeRoomForWrite(my_batch == NULL);
+ Status status = MakeRoomForWrite(updates == nullptr);
uint64_t last_sequence = versions_->LastSequence();
Writer* last_writer = &w;
- if (status.ok() && my_batch != NULL) { // NULL batch is for compactions
- WriteBatch* updates = BuildBatchGroup(&last_writer);
- WriteBatchInternal::SetSequence(updates, last_sequence + 1);
- last_sequence += WriteBatchInternal::Count(updates);
+ if (status.ok() && updates != nullptr) { // nullptr batch is for compactions
+ WriteBatch* write_batch = BuildBatchGroup(&last_writer);
+ WriteBatchInternal::SetSequence(write_batch, last_sequence + 1);
+ last_sequence += WriteBatchInternal::Count(write_batch);
// Add to log and apply to memtable. We can release the lock
// during this phase since &w is currently responsible for logging
@@ -1221,7 +1223,7 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
// into mem_.
{
mutex_.Unlock();
- status = log_->AddRecord(WriteBatchInternal::Contents(updates));
+ status = log_->AddRecord(WriteBatchInternal::Contents(write_batch));
bool sync_error = false;
if (status.ok() && options.sync) {
status = logfile_->Sync();
@@ -1230,7 +1232,7 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
}
}
if (status.ok()) {
- status = WriteBatchInternal::InsertInto(updates, mem_);
+ status = WriteBatchInternal::InsertInto(write_batch, mem_);
}
mutex_.Lock();
if (sync_error) {
@@ -1240,7 +1242,7 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
RecordBackgroundError(status);
}
}
- if (updates == tmp_batch_) tmp_batch_->Clear();
+ if (write_batch == tmp_batch_) tmp_batch_->Clear();
versions_->SetLastSequence(last_sequence);
}
@@ -1265,12 +1267,13 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
}
// REQUIRES: Writer list must be non-empty
-// REQUIRES: First writer must have a non-NULL batch
+// REQUIRES: First writer must have a non-null batch
WriteBatch* DBImpl::BuildBatchGroup(Writer** last_writer) {
+ mutex_.AssertHeld();
assert(!writers_.empty());
Writer* first = writers_.front();
WriteBatch* result = first->batch;
- assert(result != NULL);
+ assert(result != nullptr);
size_t size = WriteBatchInternal::ByteSize(first->batch);
@@ -1278,8 +1281,8 @@ WriteBatch* DBImpl::BuildBatchGroup(Writer** last_writer) {
// original write is small, limit the growth so we do not slow
// down the small write too much.
size_t max_size = 1 << 20;
- if (size <= (128<<10)) {
- max_size = size + (128<<10);
+ if (size <= (128 << 10)) {
+ max_size = size + (128 << 10);
}
*last_writer = first;
@@ -1292,7 +1295,7 @@ WriteBatch* DBImpl::BuildBatchGroup(Writer** last_writer) {
break;
}
- if (w->batch != NULL) {
+ if (w->batch != nullptr) {
size += WriteBatchInternal::ByteSize(w->batch);
if (size > max_size) {
// Do not make batch too big
@@ -1325,9 +1328,8 @@ Status DBImpl::MakeRoomForWrite(bool force) {
// Yield previous error
s = bg_error_;
break;
- } else if (
- allow_delay &&
- versions_->NumLevelFiles(0) >= config::kL0_SlowdownWritesTrigger) {
+ } else if (allow_delay && versions_->NumLevelFiles(0) >=
+ config::kL0_SlowdownWritesTrigger) {
// We are getting close to hitting a hard limit on the number of
// L0 files. Rather than delaying a single write by several
// seconds when we hit the hard limit, start delaying each
@@ -1342,20 +1344,20 @@ Status DBImpl::MakeRoomForWrite(bool force) {
(mem_->ApproximateMemoryUsage() <= options_.write_buffer_size)) {
// There is room in current memtable
break;
- } else if (imm_ != NULL) {
+ } else if (imm_ != nullptr) {
// We have filled up the current memtable, but the previous
// one is still being compacted, so we wait.
Log(options_.info_log, "Current memtable full; waiting...\n");
- bg_cv_.Wait();
+ background_work_finished_signal_.Wait();
} else if (versions_->NumLevelFiles(0) >= config::kL0_StopWritesTrigger) {
// There are too many level-0 files.
Log(options_.info_log, "Too many L0 files; waiting...\n");
- bg_cv_.Wait();
+ background_work_finished_signal_.Wait();
} else {
// Attempt to switch to a new memtable and trigger compaction of old
assert(versions_->PrevLogNumber() == 0);
uint64_t new_log_number = versions_->NewFileNumber();
- WritableFile* lfile = NULL;
+ WritableFile* lfile = nullptr;
s = env_->NewWritableFile(LogFileName(dbname_, new_log_number), &lfile);
if (!s.ok()) {
// Avoid chewing through file number space in a tight loop.
@@ -1368,10 +1370,10 @@ Status DBImpl::MakeRoomForWrite(bool force) {
logfile_number_ = new_log_number;
log_ = new log::Writer(lfile);
imm_ = mem_;
- has_imm_.Release_Store(imm_);
+ has_imm_.store(true, std::memory_order_release);
mem_ = new MemTable(internal_comparator_);
mem_->Ref();
- force = false; // Do not force another compaction if have room
+ force = false; // Do not force another compaction if have room
MaybeScheduleCompaction();
}
}
@@ -1405,21 +1407,16 @@ bool DBImpl::GetProperty(const Slice& property, std::string* value) {
snprintf(buf, sizeof(buf),
" Compactions\n"
"Level Files Size(MB) Time(sec) Read(MB) Write(MB)\n"
- "--------------------------------------------------\n"
- );
+ "--------------------------------------------------\n");
value->append(buf);
for (int level = 0; level < config::kNumLevels; level++) {
int files = versions_->NumLevelFiles(level);
if (stats_[level].micros > 0 || files > 0) {
- snprintf(
- buf, sizeof(buf),
- "%3d %8d %8.0f %9.0f %8.0f %9.0f\n",
- level,
- files,
- versions_->NumLevelBytes(level) / 1048576.0,
- stats_[level].micros / 1e6,
- stats_[level].bytes_read / 1048576.0,
- stats_[level].bytes_written / 1048576.0);
+ snprintf(buf, sizeof(buf), "%3d %8d %8.0f %9.0f %8.0f %9.0f\n", level,
+ files, versions_->NumLevelBytes(level) / 1048576.0,
+ stats_[level].micros / 1e6,
+ stats_[level].bytes_read / 1048576.0,
+ stats_[level].bytes_written / 1048576.0);
value->append(buf);
}
}
@@ -1445,16 +1442,11 @@ bool DBImpl::GetProperty(const Slice& property, std::string* value) {
return false;
}
-void DBImpl::GetApproximateSizes(
- const Range* range, int n,
- uint64_t* sizes) {
+void DBImpl::GetApproximateSizes(const Range* range, int n, uint64_t* sizes) {
// TODO(opt): better implementation
- Version* v;
- {
- MutexLock l(&mutex_);
- versions_->current()->Ref();
- v = versions_->current();
- }
+ MutexLock l(&mutex_);
+ Version* v = versions_->current();
+ v->Ref();
for (int i = 0; i < n; i++) {
// Convert user_key into a corresponding internal key.
@@ -1465,10 +1457,7 @@ void DBImpl::GetApproximateSizes(
sizes[i] = (limit >= start ? limit - start : 0);
}
- {
- MutexLock l(&mutex_);
- v->Unref();
- }
+ v->Unref();
}
// Default implementations of convenience methods that subclasses of DB
@@ -1485,11 +1474,10 @@ Status DB::Delete(const WriteOptions& opt, const Slice& key) {
return Write(opt, &batch);
}
-DB::~DB() { }
+DB::~DB() = default;
-Status DB::Open(const Options& options, const std::string& dbname,
- DB** dbptr) {
- *dbptr = NULL;
+Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) {
+ *dbptr = nullptr;
DBImpl* impl = new DBImpl(options, dbname);
impl->mutex_.Lock();
@@ -1497,7 +1485,7 @@ Status DB::Open(const Options& options, const std::string& dbname,
// Recover handles create_if_missing, error_if_exists
bool save_manifest = false;
Status s = impl->Recover(&edit, &save_manifest);
- if (s.ok() && impl->mem_ == NULL) {
+ if (s.ok() && impl->mem_ == nullptr) {
// Create new log and a corresponding memtable.
uint64_t new_log_number = impl->versions_->NewFileNumber();
WritableFile* lfile;
@@ -1523,7 +1511,7 @@ Status DB::Open(const Options& options, const std::string& dbname,
}
impl->mutex_.Unlock();
if (s.ok()) {
- assert(impl->mem_ != NULL);
+ assert(impl->mem_ != nullptr);
*dbptr = impl;
} else {
delete impl;
@@ -1531,21 +1519,20 @@ Status DB::Open(const Options& options, const std::string& dbname,
return s;
}
-Snapshot::~Snapshot() {
-}
+Snapshot::~Snapshot() = default;
Status DestroyDB(const std::string& dbname, const Options& options) {
Env* env = options.env;
std::vector<std::string> filenames;
- // Ignore error in case directory does not exist
- env->GetChildren(dbname, &filenames);
- if (filenames.empty()) {
+ Status result = env->GetChildren(dbname, &filenames);
+ if (!result.ok()) {
+ // Ignore error in case directory does not exist
return Status::OK();
}
FileLock* lock;
const std::string lockname = LockFileName(dbname);
- Status result = env->LockFile(lockname, &lock);
+ result = env->LockFile(lockname, &lock);
if (result.ok()) {
uint64_t number;
FileType type;
diff --git a/src/leveldb/db/db_impl.h b/src/leveldb/db/db_impl.h
index 8ff323e728..685735c733 100644
--- a/src/leveldb/db/db_impl.h
+++ b/src/leveldb/db/db_impl.h
@@ -5,8 +5,11 @@
#ifndef STORAGE_LEVELDB_DB_DB_IMPL_H_
#define STORAGE_LEVELDB_DB_DB_IMPL_H_
+#include <atomic>
#include <deque>
#include <set>
+#include <string>
+
#include "db/dbformat.h"
#include "db/log_writer.h"
#include "db/snapshot.h"
@@ -26,21 +29,25 @@ class VersionSet;
class DBImpl : public DB {
public:
DBImpl(const Options& options, const std::string& dbname);
- virtual ~DBImpl();
+
+ DBImpl(const DBImpl&) = delete;
+ DBImpl& operator=(const DBImpl&) = delete;
+
+ ~DBImpl() override;
// Implementations of the DB interface
- virtual Status Put(const WriteOptions&, const Slice& key, const Slice& value);
- virtual Status Delete(const WriteOptions&, const Slice& key);
- virtual Status Write(const WriteOptions& options, WriteBatch* updates);
- virtual Status Get(const ReadOptions& options,
- const Slice& key,
- std::string* value);
- virtual Iterator* NewIterator(const ReadOptions&);
- virtual const Snapshot* GetSnapshot();
- virtual void ReleaseSnapshot(const Snapshot* snapshot);
- virtual bool GetProperty(const Slice& property, std::string* value);
- virtual void GetApproximateSizes(const Range* range, int n, uint64_t* sizes);
- virtual void CompactRange(const Slice* begin, const Slice* end);
+ Status Put(const WriteOptions&, const Slice& key,
+ const Slice& value) override;
+ Status Delete(const WriteOptions&, const Slice& key) override;
+ Status Write(const WriteOptions& options, WriteBatch* updates) override;
+ Status Get(const ReadOptions& options, const Slice& key,
+ std::string* value) override;
+ Iterator* NewIterator(const ReadOptions&) override;
+ const Snapshot* GetSnapshot() override;
+ void ReleaseSnapshot(const Snapshot* snapshot) override;
+ bool GetProperty(const Slice& property, std::string* value) override;
+ void GetApproximateSizes(const Range* range, int n, uint64_t* sizes) override;
+ void CompactRange(const Slice* begin, const Slice* end) override;
// Extra methods (for testing) that are not in the public DB interface
@@ -69,6 +76,31 @@ class DBImpl : public DB {
struct CompactionState;
struct Writer;
+ // Information for a manual compaction
+ struct ManualCompaction {
+ int level;
+ bool done;
+ const InternalKey* begin; // null means beginning of key range
+ const InternalKey* end; // null means end of key range
+ InternalKey tmp_storage; // Used to keep track of compaction progress
+ };
+
+ // Per level compaction stats. stats_[level] stores the stats for
+ // compactions that produced data for the specified "level".
+ struct CompactionStats {
+ CompactionStats() : micros(0), bytes_read(0), bytes_written(0) {}
+
+ void Add(const CompactionStats& c) {
+ this->micros += c.micros;
+ this->bytes_read += c.bytes_read;
+ this->bytes_written += c.bytes_written;
+ }
+
+ int64_t micros;
+ int64_t bytes_read;
+ int64_t bytes_written;
+ };
+
Iterator* NewInternalIterator(const ReadOptions&,
SequenceNumber* latest_snapshot,
uint32_t* seed);
@@ -84,7 +116,7 @@ class DBImpl : public DB {
void MaybeIgnoreError(Status* s) const;
// Delete any unneeded files and stale in-memory entries.
- void DeleteObsoleteFiles();
+ void DeleteObsoleteFiles() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// Compact the in-memory write buffer to disk. Switches to a new
// log-file/memtable and writes a new descriptor iff successful.
@@ -100,14 +132,15 @@ class DBImpl : public DB {
Status MakeRoomForWrite(bool force /* compact even if there is room? */)
EXCLUSIVE_LOCKS_REQUIRED(mutex_);
- WriteBatch* BuildBatchGroup(Writer** last_writer);
+ WriteBatch* BuildBatchGroup(Writer** last_writer)
+ EXCLUSIVE_LOCKS_REQUIRED(mutex_);
void RecordBackgroundError(const Status& s);
void MaybeScheduleCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
static void BGWork(void* db);
void BackgroundCall();
- void BackgroundCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ void BackgroundCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
void CleanupCompaction(CompactionState* compact)
EXCLUSIVE_LOCKS_REQUIRED(mutex_);
Status DoCompactionWork(CompactionState* compact)
@@ -118,93 +151,66 @@ class DBImpl : public DB {
Status InstallCompactionResults(CompactionState* compact)
EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ const Comparator* user_comparator() const {
+ return internal_comparator_.user_comparator();
+ }
+
// Constant after construction
Env* const env_;
const InternalKeyComparator internal_comparator_;
const InternalFilterPolicy internal_filter_policy_;
const Options options_; // options_.comparator == &internal_comparator_
- bool owns_info_log_;
- bool owns_cache_;
+ const bool owns_info_log_;
+ const bool owns_cache_;
const std::string dbname_;
// table_cache_ provides its own synchronization
- TableCache* table_cache_;
+ TableCache* const table_cache_;
- // Lock over the persistent DB state. Non-NULL iff successfully acquired.
+ // Lock over the persistent DB state. Non-null iff successfully acquired.
FileLock* db_lock_;
// State below is protected by mutex_
port::Mutex mutex_;
- port::AtomicPointer shutting_down_;
- port::CondVar bg_cv_; // Signalled when background work finishes
+ std::atomic<bool> shutting_down_;
+ port::CondVar background_work_finished_signal_ GUARDED_BY(mutex_);
MemTable* mem_;
- MemTable* imm_; // Memtable being compacted
- port::AtomicPointer has_imm_; // So bg thread can detect non-NULL imm_
+ MemTable* imm_ GUARDED_BY(mutex_); // Memtable being compacted
+ std::atomic<bool> has_imm_; // So bg thread can detect non-null imm_
WritableFile* logfile_;
- uint64_t logfile_number_;
+ uint64_t logfile_number_ GUARDED_BY(mutex_);
log::Writer* log_;
- uint32_t seed_; // For sampling.
+ uint32_t seed_ GUARDED_BY(mutex_); // For sampling.
// Queue of writers.
- std::deque<Writer*> writers_;
- WriteBatch* tmp_batch_;
+ std::deque<Writer*> writers_ GUARDED_BY(mutex_);
+ WriteBatch* tmp_batch_ GUARDED_BY(mutex_);
- SnapshotList snapshots_;
+ SnapshotList snapshots_ GUARDED_BY(mutex_);
// Set of table files to protect from deletion because they are
// part of ongoing compactions.
- std::set<uint64_t> pending_outputs_;
+ std::set<uint64_t> pending_outputs_ GUARDED_BY(mutex_);
// Has a background compaction been scheduled or is running?
- bool bg_compaction_scheduled_;
+ bool background_compaction_scheduled_ GUARDED_BY(mutex_);
- // Information for a manual compaction
- struct ManualCompaction {
- int level;
- bool done;
- const InternalKey* begin; // NULL means beginning of key range
- const InternalKey* end; // NULL means end of key range
- InternalKey tmp_storage; // Used to keep track of compaction progress
- };
- ManualCompaction* manual_compaction_;
+ ManualCompaction* manual_compaction_ GUARDED_BY(mutex_);
- VersionSet* versions_;
+ VersionSet* const versions_ GUARDED_BY(mutex_);
// Have we encountered a background error in paranoid mode?
- Status bg_error_;
-
- // Per level compaction stats. stats_[level] stores the stats for
- // compactions that produced data for the specified "level".
- struct CompactionStats {
- int64_t micros;
- int64_t bytes_read;
- int64_t bytes_written;
-
- CompactionStats() : micros(0), bytes_read(0), bytes_written(0) { }
+ Status bg_error_ GUARDED_BY(mutex_);
- void Add(const CompactionStats& c) {
- this->micros += c.micros;
- this->bytes_read += c.bytes_read;
- this->bytes_written += c.bytes_written;
- }
- };
- CompactionStats stats_[config::kNumLevels];
-
- // No copying allowed
- DBImpl(const DBImpl&);
- void operator=(const DBImpl&);
-
- const Comparator* user_comparator() const {
- return internal_comparator_.user_comparator();
- }
+ CompactionStats stats_[config::kNumLevels] GUARDED_BY(mutex_);
};
// Sanitize db options. The caller should delete result.info_log if
// it is not equal to src.info_log.
-extern Options SanitizeOptions(const std::string& db,
- const InternalKeyComparator* icmp,
- const InternalFilterPolicy* ipolicy,
- const Options& src);
+Options SanitizeOptions(const std::string& db,
+ const InternalKeyComparator* icmp,
+ const InternalFilterPolicy* ipolicy,
+ const Options& src);
} // namespace leveldb
diff --git a/src/leveldb/db/db_iter.cc b/src/leveldb/db/db_iter.cc
index 3b2035e9e3..98715a9502 100644
--- a/src/leveldb/db/db_iter.cc
+++ b/src/leveldb/db/db_iter.cc
@@ -4,9 +4,9 @@
#include "db/db_iter.h"
-#include "db/filename.h"
#include "db/db_impl.h"
#include "db/dbformat.h"
+#include "db/filename.h"
#include "leveldb/env.h"
#include "leveldb/iterator.h"
#include "port/port.h"
@@ -36,17 +36,14 @@ namespace {
// combines multiple entries for the same userkey found in the DB
// representation into a single entry while accounting for sequence
// numbers, deletion markers, overwrites, etc.
-class DBIter: public Iterator {
+class DBIter : public Iterator {
public:
// Which direction is the iterator currently moving?
// (1) When moving forward, the internal iterator is positioned at
// the exact entry that yields this->key(), this->value()
// (2) When moving backwards, the internal iterator is positioned
// just before all entries whose user key == this->key().
- enum Direction {
- kForward,
- kReverse
- };
+ enum Direction { kForward, kReverse };
DBIter(DBImpl* db, const Comparator* cmp, Iterator* iter, SequenceNumber s,
uint32_t seed)
@@ -57,21 +54,22 @@ class DBIter: public Iterator {
direction_(kForward),
valid_(false),
rnd_(seed),
- bytes_counter_(RandomPeriod()) {
- }
- virtual ~DBIter() {
- delete iter_;
- }
- virtual bool Valid() const { return valid_; }
- virtual Slice key() const {
+ bytes_until_read_sampling_(RandomCompactionPeriod()) {}
+
+ DBIter(const DBIter&) = delete;
+ DBIter& operator=(const DBIter&) = delete;
+
+ ~DBIter() override { delete iter_; }
+ bool Valid() const override { return valid_; }
+ Slice key() const override {
assert(valid_);
return (direction_ == kForward) ? ExtractUserKey(iter_->key()) : saved_key_;
}
- virtual Slice value() const {
+ Slice value() const override {
assert(valid_);
return (direction_ == kForward) ? iter_->value() : saved_value_;
}
- virtual Status status() const {
+ Status status() const override {
if (status_.ok()) {
return iter_->status();
} else {
@@ -79,11 +77,11 @@ class DBIter: public Iterator {
}
}
- virtual void Next();
- virtual void Prev();
- virtual void Seek(const Slice& target);
- virtual void SeekToFirst();
- virtual void SeekToLast();
+ void Next() override;
+ void Prev() override;
+ void Seek(const Slice& target) override;
+ void SeekToFirst() override;
+ void SeekToLast() override;
private:
void FindNextUserEntry(bool skipping, std::string* skip);
@@ -103,38 +101,35 @@ class DBIter: public Iterator {
}
}
- // Pick next gap with average value of config::kReadBytesPeriod.
- ssize_t RandomPeriod() {
- return rnd_.Uniform(2*config::kReadBytesPeriod);
+ // Picks the number of bytes that can be read until a compaction is scheduled.
+ size_t RandomCompactionPeriod() {
+ return rnd_.Uniform(2 * config::kReadBytesPeriod);
}
DBImpl* db_;
const Comparator* const user_comparator_;
Iterator* const iter_;
SequenceNumber const sequence_;
-
Status status_;
- std::string saved_key_; // == current key when direction_==kReverse
- std::string saved_value_; // == current raw value when direction_==kReverse
+ std::string saved_key_; // == current key when direction_==kReverse
+ std::string saved_value_; // == current raw value when direction_==kReverse
Direction direction_;
bool valid_;
-
Random rnd_;
- ssize_t bytes_counter_;
-
- // No copying allowed
- DBIter(const DBIter&);
- void operator=(const DBIter&);
+ size_t bytes_until_read_sampling_;
};
inline bool DBIter::ParseKey(ParsedInternalKey* ikey) {
Slice k = iter_->key();
- ssize_t n = k.size() + iter_->value().size();
- bytes_counter_ -= n;
- while (bytes_counter_ < 0) {
- bytes_counter_ += RandomPeriod();
+
+ size_t bytes_read = k.size() + iter_->value().size();
+ while (bytes_until_read_sampling_ < bytes_read) {
+ bytes_until_read_sampling_ += RandomCompactionPeriod();
db_->RecordReadSample(k);
}
+ assert(bytes_until_read_sampling_ >= bytes_read);
+ bytes_until_read_sampling_ -= bytes_read;
+
if (!ParseInternalKey(k, ikey)) {
status_ = Status::Corruption("corrupted internal key in DBIter");
return false;
@@ -165,6 +160,15 @@ void DBIter::Next() {
} else {
// Store in saved_key_ the current key so we skip it below.
SaveKey(ExtractUserKey(iter_->key()), &saved_key_);
+
+ // iter_ is pointing to current key. We can now safely move to the next to
+ // avoid checking current key.
+ iter_->Next();
+ if (!iter_->Valid()) {
+ valid_ = false;
+ saved_key_.clear();
+ return;
+ }
}
FindNextUserEntry(true, &saved_key_);
@@ -218,8 +222,8 @@ void DBIter::Prev() {
ClearSavedValue();
return;
}
- if (user_comparator_->Compare(ExtractUserKey(iter_->key()),
- saved_key_) < 0) {
+ if (user_comparator_->Compare(ExtractUserKey(iter_->key()), saved_key_) <
+ 0) {
break;
}
}
@@ -275,8 +279,8 @@ void DBIter::Seek(const Slice& target) {
direction_ = kForward;
ClearSavedValue();
saved_key_.clear();
- AppendInternalKey(
- &saved_key_, ParsedInternalKey(target, sequence_, kValueTypeForSeek));
+ AppendInternalKey(&saved_key_,
+ ParsedInternalKey(target, sequence_, kValueTypeForSeek));
iter_->Seek(saved_key_);
if (iter_->Valid()) {
FindNextUserEntry(false, &saved_key_ /* temporary storage */);
@@ -305,12 +309,9 @@ void DBIter::SeekToLast() {
} // anonymous namespace
-Iterator* NewDBIterator(
- DBImpl* db,
- const Comparator* user_key_comparator,
- Iterator* internal_iter,
- SequenceNumber sequence,
- uint32_t seed) {
+Iterator* NewDBIterator(DBImpl* db, const Comparator* user_key_comparator,
+ Iterator* internal_iter, SequenceNumber sequence,
+ uint32_t seed) {
return new DBIter(db, user_key_comparator, internal_iter, sequence, seed);
}
diff --git a/src/leveldb/db/db_iter.h b/src/leveldb/db/db_iter.h
index 04927e937b..fd93e912a0 100644
--- a/src/leveldb/db/db_iter.h
+++ b/src/leveldb/db/db_iter.h
@@ -6,8 +6,9 @@
#define STORAGE_LEVELDB_DB_DB_ITER_H_
#include <stdint.h>
-#include "leveldb/db.h"
+
#include "db/dbformat.h"
+#include "leveldb/db.h"
namespace leveldb {
@@ -16,12 +17,9 @@ class DBImpl;
// Return a new iterator that converts internal keys (yielded by
// "*internal_iter") that were live at the specified "sequence" number
// into appropriate user keys.
-extern Iterator* NewDBIterator(
- DBImpl* db,
- const Comparator* user_key_comparator,
- Iterator* internal_iter,
- SequenceNumber sequence,
- uint32_t seed);
+Iterator* NewDBIterator(DBImpl* db, const Comparator* user_key_comparator,
+ Iterator* internal_iter, SequenceNumber sequence,
+ uint32_t seed);
} // namespace leveldb
diff --git a/src/leveldb/db/db_test.cc b/src/leveldb/db/db_test.cc
index a0b08bc19c..beb1d3bdef 100644
--- a/src/leveldb/db/db_test.cc
+++ b/src/leveldb/db/db_test.cc
@@ -3,14 +3,20 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "leveldb/db.h"
-#include "leveldb/filter_policy.h"
+
+#include <atomic>
+#include <string>
+
#include "db/db_impl.h"
#include "db/filename.h"
#include "db/version_set.h"
#include "db/write_batch_internal.h"
#include "leveldb/cache.h"
#include "leveldb/env.h"
+#include "leveldb/filter_policy.h"
#include "leveldb/table.h"
+#include "port/port.h"
+#include "port/thread_annotations.h"
#include "util/hash.h"
#include "util/logging.h"
#include "util/mutexlock.h"
@@ -25,83 +31,116 @@ static std::string RandomString(Random* rnd, int len) {
return r;
}
+static std::string RandomKey(Random* rnd) {
+ int len =
+ (rnd->OneIn(3) ? 1 // Short sometimes to encourage collisions
+ : (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
+ return test::RandomKey(rnd, len);
+}
+
namespace {
class AtomicCounter {
- private:
- port::Mutex mu_;
- int count_;
public:
- AtomicCounter() : count_(0) { }
- void Increment() {
- IncrementBy(1);
- }
- void IncrementBy(int count) {
+ AtomicCounter() : count_(0) {}
+ void Increment() { IncrementBy(1); }
+ void IncrementBy(int count) LOCKS_EXCLUDED(mu_) {
MutexLock l(&mu_);
count_ += count;
}
- int Read() {
+ int Read() LOCKS_EXCLUDED(mu_) {
MutexLock l(&mu_);
return count_;
}
- void Reset() {
+ void Reset() LOCKS_EXCLUDED(mu_) {
MutexLock l(&mu_);
count_ = 0;
}
+
+ private:
+ port::Mutex mu_;
+ int count_ GUARDED_BY(mu_);
};
void DelayMilliseconds(int millis) {
Env::Default()->SleepForMicroseconds(millis * 1000);
}
-}
+} // namespace
+
+// Test Env to override default Env behavior for testing.
+class TestEnv : public EnvWrapper {
+ public:
+ explicit TestEnv(Env* base) : EnvWrapper(base), ignore_dot_files_(false) {}
+
+ void SetIgnoreDotFiles(bool ignored) { ignore_dot_files_ = ignored; }
+
+ Status GetChildren(const std::string& dir,
+ std::vector<std::string>* result) override {
+ Status s = target()->GetChildren(dir, result);
+ if (!s.ok() || !ignore_dot_files_) {
+ return s;
+ }
+
+ std::vector<std::string>::iterator it = result->begin();
+ while (it != result->end()) {
+ if ((*it == ".") || (*it == "..")) {
+ it = result->erase(it);
+ } else {
+ ++it;
+ }
+ }
+
+ return s;
+ }
+
+ private:
+ bool ignore_dot_files_;
+};
-// Special Env used to delay background operations
+// Special Env used to delay background operations.
class SpecialEnv : public EnvWrapper {
public:
- // sstable/log Sync() calls are blocked while this pointer is non-NULL.
- port::AtomicPointer delay_data_sync_;
+ // sstable/log Sync() calls are blocked while this pointer is non-null.
+ std::atomic<bool> delay_data_sync_;
// sstable/log Sync() calls return an error.
- port::AtomicPointer data_sync_error_;
+ std::atomic<bool> data_sync_error_;
- // Simulate no-space errors while this pointer is non-NULL.
- port::AtomicPointer no_space_;
+ // Simulate no-space errors while this pointer is non-null.
+ std::atomic<bool> no_space_;
- // Simulate non-writable file system while this pointer is non-NULL
- port::AtomicPointer non_writable_;
+ // Simulate non-writable file system while this pointer is non-null.
+ std::atomic<bool> non_writable_;
- // Force sync of manifest files to fail while this pointer is non-NULL
- port::AtomicPointer manifest_sync_error_;
+ // Force sync of manifest files to fail while this pointer is non-null.
+ std::atomic<bool> manifest_sync_error_;
- // Force write to manifest files to fail while this pointer is non-NULL
- port::AtomicPointer manifest_write_error_;
+ // Force write to manifest files to fail while this pointer is non-null.
+ std::atomic<bool> manifest_write_error_;
bool count_random_reads_;
AtomicCounter random_read_counter_;
- explicit SpecialEnv(Env* base) : EnvWrapper(base) {
- delay_data_sync_.Release_Store(NULL);
- data_sync_error_.Release_Store(NULL);
- no_space_.Release_Store(NULL);
- non_writable_.Release_Store(NULL);
- count_random_reads_ = false;
- manifest_sync_error_.Release_Store(NULL);
- manifest_write_error_.Release_Store(NULL);
- }
+ explicit SpecialEnv(Env* base)
+ : EnvWrapper(base),
+ delay_data_sync_(false),
+ data_sync_error_(false),
+ no_space_(false),
+ non_writable_(false),
+ manifest_sync_error_(false),
+ manifest_write_error_(false),
+ count_random_reads_(false) {}
Status NewWritableFile(const std::string& f, WritableFile** r) {
class DataFile : public WritableFile {
private:
- SpecialEnv* env_;
- WritableFile* base_;
+ SpecialEnv* const env_;
+ WritableFile* const base_;
public:
- DataFile(SpecialEnv* env, WritableFile* base)
- : env_(env),
- base_(base) {
- }
+ DataFile(SpecialEnv* env, WritableFile* base) : env_(env), base_(base) {}
~DataFile() { delete base_; }
Status Append(const Slice& data) {
- if (env_->no_space_.Acquire_Load() != NULL) {
+ if (env_->no_space_.load(std::memory_order_acquire)) {
// Drop writes on the floor
return Status::OK();
} else {
@@ -111,24 +150,26 @@ class SpecialEnv : public EnvWrapper {
Status Close() { return base_->Close(); }
Status Flush() { return base_->Flush(); }
Status Sync() {
- if (env_->data_sync_error_.Acquire_Load() != NULL) {
+ if (env_->data_sync_error_.load(std::memory_order_acquire)) {
return Status::IOError("simulated data sync error");
}
- while (env_->delay_data_sync_.Acquire_Load() != NULL) {
+ while (env_->delay_data_sync_.load(std::memory_order_acquire)) {
DelayMilliseconds(100);
}
return base_->Sync();
}
+ std::string GetName() const override { return ""; }
};
class ManifestFile : public WritableFile {
private:
SpecialEnv* env_;
WritableFile* base_;
+
public:
- ManifestFile(SpecialEnv* env, WritableFile* b) : env_(env), base_(b) { }
+ ManifestFile(SpecialEnv* env, WritableFile* b) : env_(env), base_(b) {}
~ManifestFile() { delete base_; }
Status Append(const Slice& data) {
- if (env_->manifest_write_error_.Acquire_Load() != NULL) {
+ if (env_->manifest_write_error_.load(std::memory_order_acquire)) {
return Status::IOError("simulated writer error");
} else {
return base_->Append(data);
@@ -137,24 +178,25 @@ class SpecialEnv : public EnvWrapper {
Status Close() { return base_->Close(); }
Status Flush() { return base_->Flush(); }
Status Sync() {
- if (env_->manifest_sync_error_.Acquire_Load() != NULL) {
+ if (env_->manifest_sync_error_.load(std::memory_order_acquire)) {
return Status::IOError("simulated sync error");
} else {
return base_->Sync();
}
}
+ std::string GetName() const override { return ""; }
};
- if (non_writable_.Acquire_Load() != NULL) {
+ if (non_writable_.load(std::memory_order_acquire)) {
return Status::IOError("simulated write error");
}
Status s = target()->NewWritableFile(f, r);
if (s.ok()) {
- if (strstr(f.c_str(), ".ldb") != NULL ||
- strstr(f.c_str(), ".log") != NULL) {
+ if (strstr(f.c_str(), ".ldb") != nullptr ||
+ strstr(f.c_str(), ".log") != nullptr) {
*r = new DataFile(this, *r);
- } else if (strstr(f.c_str(), "MANIFEST") != NULL) {
+ } else if (strstr(f.c_str(), "MANIFEST") != nullptr) {
*r = new ManifestFile(this, *r);
}
}
@@ -166,16 +208,17 @@ class SpecialEnv : public EnvWrapper {
private:
RandomAccessFile* target_;
AtomicCounter* counter_;
+
public:
CountingFile(RandomAccessFile* target, AtomicCounter* counter)
- : target_(target), counter_(counter) {
- }
- virtual ~CountingFile() { delete target_; }
- virtual Status Read(uint64_t offset, size_t n, Slice* result,
- char* scratch) const {
+ : target_(target), counter_(counter) {}
+ ~CountingFile() override { delete target_; }
+ Status Read(uint64_t offset, size_t n, Slice* result,
+ char* scratch) const override {
counter_->Increment();
return target_->Read(offset, n, result, scratch);
}
+ std::string GetName() const override { return ""; }
};
Status s = target()->NewRandomAccessFile(f, r);
@@ -187,19 +230,6 @@ class SpecialEnv : public EnvWrapper {
};
class DBTest {
- private:
- const FilterPolicy* filter_policy_;
-
- // Sequence of option configurations to try
- enum OptionConfig {
- kDefault,
- kReuse,
- kFilter,
- kUncompressed,
- kEnd
- };
- int option_config_;
-
public:
std::string dbname_;
SpecialEnv* env_;
@@ -207,12 +237,11 @@ class DBTest {
Options last_options_;
- DBTest() : option_config_(kDefault),
- env_(new SpecialEnv(Env::Default())) {
+ DBTest() : env_(new SpecialEnv(Env::Default())), option_config_(kDefault) {
filter_policy_ = NewBloomFilterPolicy(10);
dbname_ = test::TmpDir() + "/db_test";
DestroyDB(dbname_, Options());
- db_ = NULL;
+ db_ = nullptr;
Reopen();
}
@@ -255,31 +284,27 @@ class DBTest {
return options;
}
- DBImpl* dbfull() {
- return reinterpret_cast<DBImpl*>(db_);
- }
+ DBImpl* dbfull() { return reinterpret_cast<DBImpl*>(db_); }
- void Reopen(Options* options = NULL) {
- ASSERT_OK(TryReopen(options));
- }
+ void Reopen(Options* options = nullptr) { ASSERT_OK(TryReopen(options)); }
void Close() {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
}
- void DestroyAndReopen(Options* options = NULL) {
+ void DestroyAndReopen(Options* options = nullptr) {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
DestroyDB(dbname_, Options());
ASSERT_OK(TryReopen(options));
}
Status TryReopen(Options* options) {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
Options opts;
- if (options != NULL) {
+ if (options != nullptr) {
opts = *options;
} else {
opts = CurrentOptions();
@@ -294,11 +319,9 @@ class DBTest {
return db_->Put(WriteOptions(), k, v);
}
- Status Delete(const std::string& k) {
- return db_->Delete(WriteOptions(), k);
- }
+ Status Delete(const std::string& k) { return db_->Delete(WriteOptions(), k); }
- std::string Get(const std::string& k, const Snapshot* snapshot = NULL) {
+ std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) {
ReadOptions options;
options.snapshot = snapshot;
std::string result;
@@ -382,10 +405,9 @@ class DBTest {
int NumTableFilesAtLevel(int level) {
std::string property;
- ASSERT_TRUE(
- db_->GetProperty("leveldb.num-files-at-level" + NumberToString(level),
- &property));
- return atoi(property.c_str());
+ ASSERT_TRUE(db_->GetProperty(
+ "leveldb.num-files-at-level" + NumberToString(level), &property));
+ return std::stoi(property);
}
int TotalTableFiles() {
@@ -431,11 +453,12 @@ class DBTest {
}
// Do n memtable compactions, each of which produces an sstable
- // covering the range [small,large].
- void MakeTables(int n, const std::string& small, const std::string& large) {
+ // covering the range [small_key,large_key].
+ void MakeTables(int n, const std::string& small_key,
+ const std::string& large_key) {
for (int i = 0; i < n; i++) {
- Put(small, "begin");
- Put(large, "end");
+ Put(small_key, "begin");
+ Put(large_key, "end");
dbfull()->TEST_CompactMemTable();
}
}
@@ -448,9 +471,9 @@ class DBTest {
void DumpFileCounts(const char* label) {
fprintf(stderr, "---\n%s:\n", label);
- fprintf(stderr, "maxoverlap: %lld\n",
- static_cast<long long>(
- dbfull()->TEST_MaxNextLevelOverlappingBytes()));
+ fprintf(
+ stderr, "maxoverlap: %lld\n",
+ static_cast<long long>(dbfull()->TEST_MaxNextLevelOverlappingBytes()));
for (int level = 0; level < config::kNumLevels; level++) {
int num = NumTableFilesAtLevel(level);
if (num > 0) {
@@ -506,15 +529,42 @@ class DBTest {
}
return files_renamed;
}
+
+ private:
+ // Sequence of option configurations to try
+ enum OptionConfig { kDefault, kReuse, kFilter, kUncompressed, kEnd };
+
+ const FilterPolicy* filter_policy_;
+ int option_config_;
};
TEST(DBTest, Empty) {
do {
- ASSERT_TRUE(db_ != NULL);
+ ASSERT_TRUE(db_ != nullptr);
ASSERT_EQ("NOT_FOUND", Get("foo"));
} while (ChangeOptions());
}
+TEST(DBTest, EmptyKey) {
+ do {
+ ASSERT_OK(Put("", "v1"));
+ ASSERT_EQ("v1", Get(""));
+ ASSERT_OK(Put("", "v2"));
+ ASSERT_EQ("v2", Get(""));
+ } while (ChangeOptions());
+}
+
+TEST(DBTest, EmptyValue) {
+ do {
+ ASSERT_OK(Put("key", "v1"));
+ ASSERT_EQ("v1", Get("key"));
+ ASSERT_OK(Put("key", ""));
+ ASSERT_EQ("", Get("key"));
+ ASSERT_OK(Put("key", "v2"));
+ ASSERT_EQ("v2", Get("key"));
+ } while (ChangeOptions());
+}
+
TEST(DBTest, ReadWrite) {
do {
ASSERT_OK(Put("foo", "v1"));
@@ -547,11 +597,13 @@ TEST(DBTest, GetFromImmutableLayer) {
ASSERT_OK(Put("foo", "v1"));
ASSERT_EQ("v1", Get("foo"));
- env_->delay_data_sync_.Release_Store(env_); // Block sync calls
- Put("k1", std::string(100000, 'x')); // Fill memtable
- Put("k2", std::string(100000, 'y')); // Trigger compaction
+ // Block sync calls.
+ env_->delay_data_sync_.store(true, std::memory_order_release);
+ Put("k1", std::string(100000, 'x')); // Fill memtable.
+ Put("k2", std::string(100000, 'y')); // Trigger compaction.
ASSERT_EQ("v1", Get("foo"));
- env_->delay_data_sync_.Release_Store(NULL); // Release sync calls
+ // Release sync calls.
+ env_->delay_data_sync_.store(false, std::memory_order_release);
} while (ChangeOptions());
}
@@ -568,9 +620,9 @@ TEST(DBTest, GetMemUsage) {
ASSERT_OK(Put("foo", "v1"));
std::string val;
ASSERT_TRUE(db_->GetProperty("leveldb.approximate-memory-usage", &val));
- int mem_usage = atoi(val.c_str());
+ int mem_usage = std::stoi(val);
ASSERT_GT(mem_usage, 0);
- ASSERT_LT(mem_usage, 5*1024*1024);
+ ASSERT_LT(mem_usage, 5 * 1024 * 1024);
} while (ChangeOptions());
}
@@ -592,6 +644,55 @@ TEST(DBTest, GetSnapshot) {
} while (ChangeOptions());
}
+TEST(DBTest, GetIdenticalSnapshots) {
+ do {
+ // Try with both a short key and a long key
+ for (int i = 0; i < 2; i++) {
+ std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x');
+ ASSERT_OK(Put(key, "v1"));
+ const Snapshot* s1 = db_->GetSnapshot();
+ const Snapshot* s2 = db_->GetSnapshot();
+ const Snapshot* s3 = db_->GetSnapshot();
+ ASSERT_OK(Put(key, "v2"));
+ ASSERT_EQ("v2", Get(key));
+ ASSERT_EQ("v1", Get(key, s1));
+ ASSERT_EQ("v1", Get(key, s2));
+ ASSERT_EQ("v1", Get(key, s3));
+ db_->ReleaseSnapshot(s1);
+ dbfull()->TEST_CompactMemTable();
+ ASSERT_EQ("v2", Get(key));
+ ASSERT_EQ("v1", Get(key, s2));
+ db_->ReleaseSnapshot(s2);
+ ASSERT_EQ("v1", Get(key, s3));
+ db_->ReleaseSnapshot(s3);
+ }
+ } while (ChangeOptions());
+}
+
+TEST(DBTest, IterateOverEmptySnapshot) {
+ do {
+ const Snapshot* snapshot = db_->GetSnapshot();
+ ReadOptions read_options;
+ read_options.snapshot = snapshot;
+ ASSERT_OK(Put("foo", "v1"));
+ ASSERT_OK(Put("foo", "v2"));
+
+ Iterator* iterator1 = db_->NewIterator(read_options);
+ iterator1->SeekToFirst();
+ ASSERT_TRUE(!iterator1->Valid());
+ delete iterator1;
+
+ dbfull()->TEST_CompactMemTable();
+
+ Iterator* iterator2 = db_->NewIterator(read_options);
+ iterator2->SeekToFirst();
+ ASSERT_TRUE(!iterator2->Valid());
+ delete iterator2;
+
+ db_->ReleaseSnapshot(snapshot);
+ } while (ChangeOptions());
+}
+
TEST(DBTest, GetLevel0Ordering) {
do {
// Check that we process level-0 files in correct order. The code
@@ -646,8 +747,7 @@ TEST(DBTest, GetEncountersEmptyLevel) {
// Step 1: First place sstables in levels 0 and 2
int compaction_count = 0;
- while (NumTableFilesAtLevel(0) == 0 ||
- NumTableFilesAtLevel(2) == 0) {
+ while (NumTableFilesAtLevel(0) == 0 || NumTableFilesAtLevel(2) == 0) {
ASSERT_LE(compaction_count, 100) << "could not fill levels 0 and 2";
compaction_count++;
Put("a", "begin");
@@ -656,7 +756,7 @@ TEST(DBTest, GetEncountersEmptyLevel) {
}
// Step 2: clear level 1 if necessary.
- dbfull()->TEST_CompactRange(1, NULL, NULL);
+ dbfull()->TEST_CompactRange(1, nullptr, nullptr);
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
ASSERT_EQ(NumTableFilesAtLevel(1), 0);
ASSERT_EQ(NumTableFilesAtLevel(2), 1);
@@ -784,10 +884,10 @@ TEST(DBTest, IterMulti) {
ASSERT_EQ(IterStatus(iter), "b->vb");
// Make sure iter stays at snapshot
- ASSERT_OK(Put("a", "va2"));
+ ASSERT_OK(Put("a", "va2"));
ASSERT_OK(Put("a2", "va3"));
- ASSERT_OK(Put("b", "vb2"));
- ASSERT_OK(Put("c", "vc2"));
+ ASSERT_OK(Put("b", "vb2"));
+ ASSERT_OK(Put("c", "vc2"));
ASSERT_OK(Delete("b"));
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->va");
@@ -978,7 +1078,7 @@ TEST(DBTest, RecoverWithLargeLog) {
TEST(DBTest, CompactionsGenerateMultipleFiles) {
Options options = CurrentOptions();
- options.write_buffer_size = 100000000; // Large write buffer
+ options.write_buffer_size = 100000000; // Large write buffer
Reopen(&options);
Random rnd(301);
@@ -993,7 +1093,7 @@ TEST(DBTest, CompactionsGenerateMultipleFiles) {
// Reopening moves updates to level-0
Reopen(&options);
- dbfull()->TEST_CompactRange(0, NULL, NULL);
+ dbfull()->TEST_CompactRange(0, nullptr, nullptr);
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
ASSERT_GT(NumTableFilesAtLevel(1), 1);
@@ -1017,7 +1117,7 @@ TEST(DBTest, RepeatedWritesToSameKey) {
for (int i = 0; i < 5 * kMaxFiles; i++) {
Put("key", value);
ASSERT_LE(TotalTableFiles(), kMaxFiles);
- fprintf(stderr, "after %d: %d files\n", int(i+1), TotalTableFiles());
+ fprintf(stderr, "after %d: %d files\n", i + 1, TotalTableFiles());
}
}
@@ -1044,29 +1144,28 @@ TEST(DBTest, SparseMerge) {
}
Put("C", "vc");
dbfull()->TEST_CompactMemTable();
- dbfull()->TEST_CompactRange(0, NULL, NULL);
+ dbfull()->TEST_CompactRange(0, nullptr, nullptr);
// Make sparse update
- Put("A", "va2");
+ Put("A", "va2");
Put("B100", "bvalue2");
- Put("C", "vc2");
+ Put("C", "vc2");
dbfull()->TEST_CompactMemTable();
// Compactions should not cause us to create a situation where
// a file overlaps too much data at the next level.
- ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
- dbfull()->TEST_CompactRange(0, NULL, NULL);
- ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
- dbfull()->TEST_CompactRange(1, NULL, NULL);
- ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
+ ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
+ dbfull()->TEST_CompactRange(0, nullptr, nullptr);
+ ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
+ dbfull()->TEST_CompactRange(1, nullptr, nullptr);
+ ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
}
static bool Between(uint64_t val, uint64_t low, uint64_t high) {
bool result = (val >= low) && (val <= high);
if (!result) {
fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
- (unsigned long long)(val),
- (unsigned long long)(low),
+ (unsigned long long)(val), (unsigned long long)(low),
(unsigned long long)(high));
}
return result;
@@ -1075,7 +1174,7 @@ static bool Between(uint64_t val, uint64_t low, uint64_t high) {
TEST(DBTest, ApproximateSizes) {
do {
Options options = CurrentOptions();
- options.write_buffer_size = 100000000; // Large write buffer
+ options.write_buffer_size = 100000000; // Large write buffer
options.compression = kNoCompression;
DestroyAndReopen();
@@ -1110,12 +1209,13 @@ TEST(DBTest, ApproximateSizes) {
for (int compact_start = 0; compact_start < N; compact_start += 10) {
for (int i = 0; i < N; i += 10) {
- ASSERT_TRUE(Between(Size("", Key(i)), S1*i, S2*i));
- ASSERT_TRUE(Between(Size("", Key(i)+".suffix"), S1*(i+1), S2*(i+1)));
- ASSERT_TRUE(Between(Size(Key(i), Key(i+10)), S1*10, S2*10));
+ ASSERT_TRUE(Between(Size("", Key(i)), S1 * i, S2 * i));
+ ASSERT_TRUE(Between(Size("", Key(i) + ".suffix"), S1 * (i + 1),
+ S2 * (i + 1)));
+ ASSERT_TRUE(Between(Size(Key(i), Key(i + 10)), S1 * 10, S2 * 10));
}
- ASSERT_TRUE(Between(Size("", Key(50)), S1*50, S2*50));
- ASSERT_TRUE(Between(Size("", Key(50)+".suffix"), S1*50, S2*50));
+ ASSERT_TRUE(Between(Size("", Key(50)), S1 * 50, S2 * 50));
+ ASSERT_TRUE(Between(Size("", Key(50) + ".suffix"), S1 * 50, S2 * 50));
std::string cstart_str = Key(compact_start);
std::string cend_str = Key(compact_start + 9);
@@ -1168,7 +1268,7 @@ TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
ASSERT_TRUE(Between(Size(Key(3), Key(5)), 110000, 111000));
- dbfull()->TEST_CompactRange(0, NULL, NULL);
+ dbfull()->TEST_CompactRange(0, nullptr, nullptr);
}
} while (ChangeOptions());
}
@@ -1182,7 +1282,7 @@ TEST(DBTest, IteratorPinsRef) {
// Write to force compactions
Put("foo", "newvalue1");
for (int i = 0; i < 100; i++) {
- ASSERT_OK(Put(Key(i), Key(i) + std::string(100000, 'v'))); // 100K values
+ ASSERT_OK(Put(Key(i), Key(i) + std::string(100000, 'v'))); // 100K values
}
Put("foo", "newvalue2");
@@ -1234,7 +1334,7 @@ TEST(DBTest, HiddenValuesAreRemoved) {
Put("pastfoo", "v");
const Snapshot* snapshot = db_->GetSnapshot();
Put("foo", "tiny");
- Put("pastfoo2", "v2"); // Advance sequence number one more
+ Put("pastfoo2", "v2"); // Advance sequence number one more
ASSERT_OK(dbfull()->TEST_CompactMemTable());
ASSERT_GT(NumTableFilesAtLevel(0), 0);
@@ -1244,11 +1344,11 @@ TEST(DBTest, HiddenValuesAreRemoved) {
db_->ReleaseSnapshot(snapshot);
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny, " + big + " ]");
Slice x("x");
- dbfull()->TEST_CompactRange(0, NULL, &x);
+ dbfull()->TEST_CompactRange(0, nullptr, &x);
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
ASSERT_GE(NumTableFilesAtLevel(1), 1);
- dbfull()->TEST_CompactRange(1, NULL, &x);
+ dbfull()->TEST_CompactRange(1, nullptr, &x);
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
ASSERT_TRUE(Between(Size("", "pastfoo"), 0, 1000));
@@ -1259,14 +1359,14 @@ TEST(DBTest, DeletionMarkers1) {
Put("foo", "v1");
ASSERT_OK(dbfull()->TEST_CompactMemTable());
const int last = config::kMaxMemCompactLevel;
- ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
+ ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
// Place a table at level last-1 to prevent merging with preceding mutation
Put("a", "begin");
Put("z", "end");
dbfull()->TEST_CompactMemTable();
ASSERT_EQ(NumTableFilesAtLevel(last), 1);
- ASSERT_EQ(NumTableFilesAtLevel(last-1), 1);
+ ASSERT_EQ(NumTableFilesAtLevel(last - 1), 1);
Delete("foo");
Put("foo", "v2");
@@ -1274,11 +1374,11 @@ TEST(DBTest, DeletionMarkers1) {
ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
Slice z("z");
- dbfull()->TEST_CompactRange(last-2, NULL, &z);
+ dbfull()->TEST_CompactRange(last - 2, nullptr, &z);
// DEL eliminated, but v1 remains because we aren't compacting that level
// (DEL can be eliminated because v2 hides v1).
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]");
- dbfull()->TEST_CompactRange(last-1, NULL, NULL);
+ dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr);
// Merging last-1 w/ last, so we are the base level for "foo", so
// DEL is removed. (as is v1).
ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]");
@@ -1288,23 +1388,23 @@ TEST(DBTest, DeletionMarkers2) {
Put("foo", "v1");
ASSERT_OK(dbfull()->TEST_CompactMemTable());
const int last = config::kMaxMemCompactLevel;
- ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
+ ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
// Place a table at level last-1 to prevent merging with preceding mutation
Put("a", "begin");
Put("z", "end");
dbfull()->TEST_CompactMemTable();
ASSERT_EQ(NumTableFilesAtLevel(last), 1);
- ASSERT_EQ(NumTableFilesAtLevel(last-1), 1);
+ ASSERT_EQ(NumTableFilesAtLevel(last - 1), 1);
Delete("foo");
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
- dbfull()->TEST_CompactRange(last-2, NULL, NULL);
+ dbfull()->TEST_CompactRange(last - 2, nullptr, nullptr);
// DEL kept: "last" file overlaps
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
- dbfull()->TEST_CompactRange(last-1, NULL, NULL);
+ dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr);
// Merging last-1 w/ last, so we are the base level for "foo", so
// DEL is removed. (as is v1).
ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
@@ -1314,7 +1414,8 @@ TEST(DBTest, OverlapInLevel0) {
do {
ASSERT_EQ(config::kMaxMemCompactLevel, 2) << "Fix test to match config";
- // Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0.
+ // Fill levels 1 and 2 to disable the pushing of new memtables to levels >
+ // 0.
ASSERT_OK(Put("100", "v100"));
ASSERT_OK(Put("999", "v999"));
dbfull()->TEST_CompactMemTable();
@@ -1337,8 +1438,8 @@ TEST(DBTest, OverlapInLevel0) {
ASSERT_EQ("2,1,1", FilesPerLevel());
// Compact away the placeholder files we created initially
- dbfull()->TEST_CompactRange(1, NULL, NULL);
- dbfull()->TEST_CompactRange(2, NULL, NULL);
+ dbfull()->TEST_CompactRange(1, nullptr, nullptr);
+ dbfull()->TEST_CompactRange(2, nullptr, nullptr);
ASSERT_EQ("2", FilesPerLevel());
// Do a memtable compaction. Before bug-fix, the compaction would
@@ -1370,21 +1471,21 @@ TEST(DBTest, L0_CompactionBug_Issue44_a) {
TEST(DBTest, L0_CompactionBug_Issue44_b) {
Reopen();
- Put("","");
+ Put("", "");
Reopen();
Delete("e");
- Put("","");
+ Put("", "");
Reopen();
Put("c", "cv");
Reopen();
- Put("","");
+ Put("", "");
Reopen();
- Put("","");
+ Put("", "");
DelayMilliseconds(1000); // Wait for compaction to finish
Reopen();
- Put("d","dv");
+ Put("d", "dv");
Reopen();
- Put("","");
+ Put("", "");
Reopen();
Delete("d");
Delete("b");
@@ -1394,17 +1495,26 @@ TEST(DBTest, L0_CompactionBug_Issue44_b) {
ASSERT_EQ("(->)(c->cv)", Contents());
}
+TEST(DBTest, Fflush_Issue474) {
+ static const int kNum = 100000;
+ Random rnd(test::RandomSeed());
+ for (int i = 0; i < kNum; i++) {
+ fflush(nullptr);
+ ASSERT_OK(Put(RandomKey(&rnd), RandomString(&rnd, 100)));
+ }
+}
+
TEST(DBTest, ComparatorCheck) {
class NewComparator : public Comparator {
public:
- virtual const char* Name() const { return "leveldb.NewComparator"; }
- virtual int Compare(const Slice& a, const Slice& b) const {
+ const char* Name() const override { return "leveldb.NewComparator"; }
+ int Compare(const Slice& a, const Slice& b) const override {
return BytewiseComparator()->Compare(a, b);
}
- virtual void FindShortestSeparator(std::string* s, const Slice& l) const {
+ void FindShortestSeparator(std::string* s, const Slice& l) const override {
BytewiseComparator()->FindShortestSeparator(s, l);
}
- virtual void FindShortSuccessor(std::string* key) const {
+ void FindShortSuccessor(std::string* key) const override {
BytewiseComparator()->FindShortSuccessor(key);
}
};
@@ -1420,21 +1530,22 @@ TEST(DBTest, ComparatorCheck) {
TEST(DBTest, CustomComparator) {
class NumberComparator : public Comparator {
public:
- virtual const char* Name() const { return "test.NumberComparator"; }
- virtual int Compare(const Slice& a, const Slice& b) const {
+ const char* Name() const override { return "test.NumberComparator"; }
+ int Compare(const Slice& a, const Slice& b) const override {
return ToNumber(a) - ToNumber(b);
}
- virtual void FindShortestSeparator(std::string* s, const Slice& l) const {
- ToNumber(*s); // Check format
- ToNumber(l); // Check format
+ void FindShortestSeparator(std::string* s, const Slice& l) const override {
+ ToNumber(*s); // Check format
+ ToNumber(l); // Check format
}
- virtual void FindShortSuccessor(std::string* key) const {
- ToNumber(*key); // Check format
+ void FindShortSuccessor(std::string* key) const override {
+ ToNumber(*key); // Check format
}
+
private:
static int ToNumber(const Slice& x) {
// Check that there are no extra characters.
- ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size()-1] == ']')
+ ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size() - 1] == ']')
<< EscapeString(x);
int val;
char ignored;
@@ -1447,7 +1558,7 @@ TEST(DBTest, CustomComparator) {
Options new_options = CurrentOptions();
new_options.create_if_missing = true;
new_options.comparator = &cmp;
- new_options.filter_policy = NULL; // Cannot use bloom filters
+ new_options.filter_policy = nullptr; // Cannot use bloom filters
new_options.write_buffer_size = 1000; // Compact more often
DestroyAndReopen(&new_options);
ASSERT_OK(Put("[10]", "ten"));
@@ -1465,7 +1576,7 @@ TEST(DBTest, CustomComparator) {
for (int run = 0; run < 2; run++) {
for (int i = 0; i < 1000; i++) {
char buf[100];
- snprintf(buf, sizeof(buf), "[%d]", i*10);
+ snprintf(buf, sizeof(buf), "[%d]", i * 10);
ASSERT_OK(Put(buf, buf));
}
Compact("[0]", "[1000000]");
@@ -1502,7 +1613,7 @@ TEST(DBTest, ManualCompaction) {
// Compact all
MakeTables(1, "a", "z");
ASSERT_EQ("0,1,2", FilesPerLevel());
- db_->CompactRange(NULL, NULL);
+ db_->CompactRange(nullptr, nullptr);
ASSERT_EQ("0,0,1", FilesPerLevel());
}
@@ -1511,42 +1622,94 @@ TEST(DBTest, DBOpen_Options) {
DestroyDB(dbname, Options());
// Does not exist, and create_if_missing == false: error
- DB* db = NULL;
+ DB* db = nullptr;
Options opts;
opts.create_if_missing = false;
Status s = DB::Open(opts, dbname, &db);
- ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != NULL);
- ASSERT_TRUE(db == NULL);
+ ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != nullptr);
+ ASSERT_TRUE(db == nullptr);
// Does not exist, and create_if_missing == true: OK
opts.create_if_missing = true;
s = DB::Open(opts, dbname, &db);
ASSERT_OK(s);
- ASSERT_TRUE(db != NULL);
+ ASSERT_TRUE(db != nullptr);
delete db;
- db = NULL;
+ db = nullptr;
// Does exist, and error_if_exists == true: error
opts.create_if_missing = false;
opts.error_if_exists = true;
s = DB::Open(opts, dbname, &db);
- ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != NULL);
- ASSERT_TRUE(db == NULL);
+ ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != nullptr);
+ ASSERT_TRUE(db == nullptr);
// Does exist, and error_if_exists == false: OK
opts.create_if_missing = true;
opts.error_if_exists = false;
s = DB::Open(opts, dbname, &db);
ASSERT_OK(s);
- ASSERT_TRUE(db != NULL);
+ ASSERT_TRUE(db != nullptr);
+
+ delete db;
+ db = nullptr;
+}
+
+TEST(DBTest, DestroyEmptyDir) {
+ std::string dbname = test::TmpDir() + "/db_empty_dir";
+ TestEnv env(Env::Default());
+ env.DeleteDir(dbname);
+ ASSERT_TRUE(!env.FileExists(dbname));
+
+ Options opts;
+ opts.env = &env;
+
+ ASSERT_OK(env.CreateDir(dbname));
+ ASSERT_TRUE(env.FileExists(dbname));
+ std::vector<std::string> children;
+ ASSERT_OK(env.GetChildren(dbname, &children));
+ // The stock Env's do not filter out '.' and '..' special files.
+ ASSERT_EQ(2, children.size());
+ ASSERT_OK(DestroyDB(dbname, opts));
+ ASSERT_TRUE(!env.FileExists(dbname));
+
+ // Should also be destroyed if Env is filtering out dot files.
+ env.SetIgnoreDotFiles(true);
+ ASSERT_OK(env.CreateDir(dbname));
+ ASSERT_TRUE(env.FileExists(dbname));
+ ASSERT_OK(env.GetChildren(dbname, &children));
+ ASSERT_EQ(0, children.size());
+ ASSERT_OK(DestroyDB(dbname, opts));
+ ASSERT_TRUE(!env.FileExists(dbname));
+}
+
+TEST(DBTest, DestroyOpenDB) {
+ std::string dbname = test::TmpDir() + "/open_db_dir";
+ env_->DeleteDir(dbname);
+ ASSERT_TRUE(!env_->FileExists(dbname));
+
+ Options opts;
+ opts.create_if_missing = true;
+ DB* db = nullptr;
+ ASSERT_OK(DB::Open(opts, dbname, &db));
+ ASSERT_TRUE(db != nullptr);
+
+ // Must fail to destroy an open db.
+ ASSERT_TRUE(env_->FileExists(dbname));
+ ASSERT_TRUE(!DestroyDB(dbname, Options()).ok());
+ ASSERT_TRUE(env_->FileExists(dbname));
delete db;
- db = NULL;
+ db = nullptr;
+
+ // Should succeed destroying a closed db.
+ ASSERT_OK(DestroyDB(dbname, Options()));
+ ASSERT_TRUE(!env_->FileExists(dbname));
}
TEST(DBTest, Locking) {
- DB* db2 = NULL;
+ DB* db2 = nullptr;
Status s = DB::Open(CurrentOptions(), dbname_, &db2);
ASSERT_TRUE(!s.ok()) << "Locking did not prevent re-opening db";
}
@@ -1561,13 +1724,14 @@ TEST(DBTest, NoSpace) {
ASSERT_EQ("v1", Get("foo"));
Compact("a", "z");
const int num_files = CountFiles();
- env_->no_space_.Release_Store(env_); // Force out-of-space errors
+ // Force out-of-space errors.
+ env_->no_space_.store(true, std::memory_order_release);
for (int i = 0; i < 10; i++) {
- for (int level = 0; level < config::kNumLevels-1; level++) {
- dbfull()->TEST_CompactRange(level, NULL, NULL);
+ for (int level = 0; level < config::kNumLevels - 1; level++) {
+ dbfull()->TEST_CompactRange(level, nullptr, nullptr);
}
}
- env_->no_space_.Release_Store(NULL);
+ env_->no_space_.store(false, std::memory_order_release);
ASSERT_LT(CountFiles(), num_files + 3);
}
@@ -1577,7 +1741,8 @@ TEST(DBTest, NonWritableFileSystem) {
options.env = env_;
Reopen(&options);
ASSERT_OK(Put("foo", "v1"));
- env_->non_writable_.Release_Store(env_); // Force errors for new files
+ // Force errors for new files.
+ env_->non_writable_.store(true, std::memory_order_release);
std::string big(100000, 'x');
int errors = 0;
for (int i = 0; i < 20; i++) {
@@ -1588,7 +1753,7 @@ TEST(DBTest, NonWritableFileSystem) {
}
}
ASSERT_GT(errors, 0);
- env_->non_writable_.Release_Store(NULL);
+ env_->non_writable_.store(false, std::memory_order_release);
}
TEST(DBTest, WriteSyncError) {
@@ -1598,7 +1763,7 @@ TEST(DBTest, WriteSyncError) {
Options options = CurrentOptions();
options.env = env_;
Reopen(&options);
- env_->data_sync_error_.Release_Store(env_);
+ env_->data_sync_error_.store(true, std::memory_order_release);
// (b) Normal write should succeed
WriteOptions w;
@@ -1612,7 +1777,7 @@ TEST(DBTest, WriteSyncError) {
ASSERT_EQ("NOT_FOUND", Get("k2"));
// (d) make sync behave normally
- env_->data_sync_error_.Release_Store(NULL);
+ env_->data_sync_error_.store(false, std::memory_order_release);
// (e) Do a non-sync write; should fail
w.sync = false;
@@ -1632,9 +1797,8 @@ TEST(DBTest, ManifestWriteError) {
// We iterate twice. In the second iteration, everything is the
// same except the log record never makes it to the MANIFEST file.
for (int iter = 0; iter < 2; iter++) {
- port::AtomicPointer* error_type = (iter == 0)
- ? &env_->manifest_sync_error_
- : &env_->manifest_write_error_;
+ std::atomic<bool>* error_type = (iter == 0) ? &env_->manifest_sync_error_
+ : &env_->manifest_write_error_;
// Insert foo=>bar mapping
Options options = CurrentOptions();
@@ -1649,15 +1813,15 @@ TEST(DBTest, ManifestWriteError) {
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("bar", Get("foo"));
const int last = config::kMaxMemCompactLevel;
- ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo=>bar is now in last level
+ ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo=>bar is now in last level
// Merging compaction (will fail)
- error_type->Release_Store(env_);
- dbfull()->TEST_CompactRange(last, NULL, NULL); // Should fail
+ error_type->store(true, std::memory_order_release);
+ dbfull()->TEST_CompactRange(last, nullptr, nullptr); // Should fail
ASSERT_EQ("bar", Get("foo"));
// Recovery: should not lose data
- error_type->Release_Store(NULL);
+ error_type->store(false, std::memory_order_release);
Reopen(&options);
ASSERT_EQ("bar", Get("foo"));
}
@@ -1677,8 +1841,7 @@ TEST(DBTest, MissingSSTFile) {
options.paranoid_checks = true;
Status s = TryReopen(&options);
ASSERT_TRUE(!s.ok());
- ASSERT_TRUE(s.ToString().find("issing") != std::string::npos)
- << s.ToString();
+ ASSERT_TRUE(s.ToString().find("issing") != std::string::npos) << s.ToString();
}
TEST(DBTest, StillReadSST) {
@@ -1728,7 +1891,7 @@ TEST(DBTest, BloomFilter) {
dbfull()->TEST_CompactMemTable();
// Prevent auto compactions triggered by seeks
- env_->delay_data_sync_.Release_Store(env_);
+ env_->delay_data_sync_.store(true, std::memory_order_release);
// Lookup present keys. Should rarely read from small sstable.
env_->random_read_counter_.Reset();
@@ -1738,7 +1901,7 @@ TEST(DBTest, BloomFilter) {
int reads = env_->random_read_counter_.Read();
fprintf(stderr, "%d present => %d reads\n", N, reads);
ASSERT_GE(reads, N);
- ASSERT_LE(reads, N + 2*N/100);
+ ASSERT_LE(reads, N + 2 * N / 100);
// Lookup present keys. Should rarely read from either sstable.
env_->random_read_counter_.Reset();
@@ -1747,9 +1910,9 @@ TEST(DBTest, BloomFilter) {
}
reads = env_->random_read_counter_.Read();
fprintf(stderr, "%d missing => %d reads\n", N, reads);
- ASSERT_LE(reads, 3*N/100);
+ ASSERT_LE(reads, 3 * N / 100);
- env_->delay_data_sync_.Release_Store(NULL);
+ env_->delay_data_sync_.store(false, std::memory_order_release);
Close();
delete options.block_cache;
delete options.filter_policy;
@@ -1764,9 +1927,9 @@ static const int kNumKeys = 1000;
struct MTState {
DBTest* test;
- port::AtomicPointer stop;
- port::AtomicPointer counter[kNumThreads];
- port::AtomicPointer thread_done[kNumThreads];
+ std::atomic<bool> stop;
+ std::atomic<int> counter[kNumThreads];
+ std::atomic<bool> thread_done[kNumThreads];
};
struct MTThread {
@@ -1778,13 +1941,13 @@ static void MTThreadBody(void* arg) {
MTThread* t = reinterpret_cast<MTThread*>(arg);
int id = t->id;
DB* db = t->state->test->db_;
- uintptr_t counter = 0;
+ int counter = 0;
fprintf(stderr, "... starting thread %d\n", id);
Random rnd(1000 + id);
std::string value;
char valbuf[1500];
- while (t->state->stop.Acquire_Load() == NULL) {
- t->state->counter[id].Release_Store(reinterpret_cast<void*>(counter));
+ while (!t->state->stop.load(std::memory_order_acquire)) {
+ t->state->counter[id].store(counter, std::memory_order_release);
int key = rnd.Uniform(kNumKeys);
char keybuf[20];
@@ -1793,8 +1956,8 @@ static void MTThreadBody(void* arg) {
if (rnd.OneIn(2)) {
// Write values of the form <key, my id, counter>.
// We add some padding for force compactions.
- snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d",
- key, id, static_cast<int>(counter));
+ snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d", key, id,
+ static_cast<int>(counter));
ASSERT_OK(db->Put(WriteOptions(), Slice(keybuf), Slice(valbuf)));
} else {
// Read a value and verify that it matches the pattern written above.
@@ -1809,14 +1972,13 @@ static void MTThreadBody(void* arg) {
ASSERT_EQ(k, key);
ASSERT_GE(w, 0);
ASSERT_LT(w, kNumThreads);
- ASSERT_LE(static_cast<uintptr_t>(c), reinterpret_cast<uintptr_t>(
- t->state->counter[w].Acquire_Load()));
+ ASSERT_LE(c, t->state->counter[w].load(std::memory_order_acquire));
}
}
counter++;
}
- t->state->thread_done[id].Release_Store(t);
- fprintf(stderr, "... stopping thread %d after %d ops\n", id, int(counter));
+ t->state->thread_done[id].store(true, std::memory_order_release);
+ fprintf(stderr, "... stopping thread %d after %d ops\n", id, counter);
}
} // namespace
@@ -1826,10 +1988,10 @@ TEST(DBTest, MultiThreaded) {
// Initialize state
MTState mt;
mt.test = this;
- mt.stop.Release_Store(0);
+ mt.stop.store(false, std::memory_order_release);
for (int id = 0; id < kNumThreads; id++) {
- mt.counter[id].Release_Store(0);
- mt.thread_done[id].Release_Store(0);
+ mt.counter[id].store(false, std::memory_order_release);
+ mt.thread_done[id].store(false, std::memory_order_release);
}
// Start threads
@@ -1844,9 +2006,9 @@ TEST(DBTest, MultiThreaded) {
DelayMilliseconds(kTestSeconds * 1000);
// Stop the threads and wait for them to finish
- mt.stop.Release_Store(&mt);
+ mt.stop.store(true, std::memory_order_release);
for (int id = 0; id < kNumThreads; id++) {
- while (mt.thread_done[id].Acquire_Load() == NULL) {
+ while (!mt.thread_done[id].load(std::memory_order_acquire)) {
DelayMilliseconds(100);
}
}
@@ -1857,28 +2019,28 @@ namespace {
typedef std::map<std::string, std::string> KVMap;
}
-class ModelDB: public DB {
+class ModelDB : public DB {
public:
class ModelSnapshot : public Snapshot {
public:
KVMap map_;
};
- explicit ModelDB(const Options& options): options_(options) { }
- ~ModelDB() { }
- virtual Status Put(const WriteOptions& o, const Slice& k, const Slice& v) {
+ explicit ModelDB(const Options& options) : options_(options) {}
+ ~ModelDB() override = default;
+ Status Put(const WriteOptions& o, const Slice& k, const Slice& v) override {
return DB::Put(o, k, v);
}
- virtual Status Delete(const WriteOptions& o, const Slice& key) {
+ Status Delete(const WriteOptions& o, const Slice& key) override {
return DB::Delete(o, key);
}
- virtual Status Get(const ReadOptions& options,
- const Slice& key, std::string* value) {
- assert(false); // Not implemented
+ Status Get(const ReadOptions& options, const Slice& key,
+ std::string* value) override {
+ assert(false); // Not implemented
return Status::NotFound(key);
}
- virtual Iterator* NewIterator(const ReadOptions& options) {
- if (options.snapshot == NULL) {
+ Iterator* NewIterator(const ReadOptions& options) override {
+ if (options.snapshot == nullptr) {
KVMap* saved = new KVMap;
*saved = map_;
return new ModelIter(saved, true);
@@ -1888,68 +2050,65 @@ class ModelDB: public DB {
return new ModelIter(snapshot_state, false);
}
}
- virtual const Snapshot* GetSnapshot() {
+ const Snapshot* GetSnapshot() override {
ModelSnapshot* snapshot = new ModelSnapshot;
snapshot->map_ = map_;
return snapshot;
}
- virtual void ReleaseSnapshot(const Snapshot* snapshot) {
+ void ReleaseSnapshot(const Snapshot* snapshot) override {
delete reinterpret_cast<const ModelSnapshot*>(snapshot);
}
- virtual Status Write(const WriteOptions& options, WriteBatch* batch) {
+ Status Write(const WriteOptions& options, WriteBatch* batch) override {
class Handler : public WriteBatch::Handler {
public:
KVMap* map_;
- virtual void Put(const Slice& key, const Slice& value) {
+ void Put(const Slice& key, const Slice& value) override {
(*map_)[key.ToString()] = value.ToString();
}
- virtual void Delete(const Slice& key) {
- map_->erase(key.ToString());
- }
+ void Delete(const Slice& key) override { map_->erase(key.ToString()); }
};
Handler handler;
handler.map_ = &map_;
return batch->Iterate(&handler);
}
- virtual bool GetProperty(const Slice& property, std::string* value) {
+ bool GetProperty(const Slice& property, std::string* value) override {
return false;
}
- virtual void GetApproximateSizes(const Range* r, int n, uint64_t* sizes) {
+ void GetApproximateSizes(const Range* r, int n, uint64_t* sizes) override {
for (int i = 0; i < n; i++) {
sizes[i] = 0;
}
}
- virtual void CompactRange(const Slice* start, const Slice* end) {
- }
+ void CompactRange(const Slice* start, const Slice* end) override {}
private:
- class ModelIter: public Iterator {
+ class ModelIter : public Iterator {
public:
ModelIter(const KVMap* map, bool owned)
- : map_(map), owned_(owned), iter_(map_->end()) {
- }
- ~ModelIter() {
+ : map_(map), owned_(owned), iter_(map_->end()) {}
+ ~ModelIter() override {
if (owned_) delete map_;
}
- virtual bool Valid() const { return iter_ != map_->end(); }
- virtual void SeekToFirst() { iter_ = map_->begin(); }
- virtual void SeekToLast() {
+ bool Valid() const override { return iter_ != map_->end(); }
+ void SeekToFirst() override { iter_ = map_->begin(); }
+ void SeekToLast() override {
if (map_->empty()) {
iter_ = map_->end();
} else {
iter_ = map_->find(map_->rbegin()->first);
}
}
- virtual void Seek(const Slice& k) {
+ void Seek(const Slice& k) override {
iter_ = map_->lower_bound(k.ToString());
}
- virtual void Next() { ++iter_; }
- virtual void Prev() { --iter_; }
- virtual Slice key() const { return iter_->first; }
- virtual Slice value() const { return iter_->second; }
- virtual Status status() const { return Status::OK(); }
+ void Next() override { ++iter_; }
+ void Prev() override { --iter_; }
+ Slice key() const override { return iter_->first; }
+ Slice value() const override { return iter_->second; }
+ Status status() const override { return Status::OK(); }
+
private:
const KVMap* const map_;
const bool owned_; // Do we own map_
@@ -1959,16 +2118,7 @@ class ModelDB: public DB {
KVMap map_;
};
-static std::string RandomKey(Random* rnd) {
- int len = (rnd->OneIn(3)
- ? 1 // Short sometimes to encourage collisions
- : (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
- return test::RandomKey(rnd, len);
-}
-
-static bool CompareIterators(int step,
- DB* model,
- DB* db,
+static bool CompareIterators(int step, DB* model, DB* db,
const Snapshot* model_snap,
const Snapshot* db_snap) {
ReadOptions options;
@@ -1979,12 +2129,10 @@ static bool CompareIterators(int step,
bool ok = true;
int count = 0;
for (miter->SeekToFirst(), dbiter->SeekToFirst();
- ok && miter->Valid() && dbiter->Valid();
- miter->Next(), dbiter->Next()) {
+ ok && miter->Valid() && dbiter->Valid(); miter->Next(), dbiter->Next()) {
count++;
if (miter->key().compare(dbiter->key()) != 0) {
- fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n",
- step,
+ fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n", step,
EscapeString(miter->key()).c_str(),
EscapeString(dbiter->key()).c_str());
ok = false;
@@ -1993,8 +2141,7 @@ static bool CompareIterators(int step,
if (miter->value().compare(dbiter->value()) != 0) {
fprintf(stderr, "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n",
- step,
- EscapeString(miter->key()).c_str(),
+ step, EscapeString(miter->key()).c_str(),
EscapeString(miter->value()).c_str(),
EscapeString(miter->value()).c_str());
ok = false;
@@ -2019,8 +2166,8 @@ TEST(DBTest, Randomized) {
do {
ModelDB model(CurrentOptions());
const int N = 10000;
- const Snapshot* model_snap = NULL;
- const Snapshot* db_snap = NULL;
+ const Snapshot* model_snap = nullptr;
+ const Snapshot* db_snap = nullptr;
std::string k, v;
for (int step = 0; step < N; step++) {
if (step % 100 == 0) {
@@ -2028,22 +2175,19 @@ TEST(DBTest, Randomized) {
}
// TODO(sanjay): Test Get() works
int p = rnd.Uniform(100);
- if (p < 45) { // Put
+ if (p < 45) { // Put
k = RandomKey(&rnd);
- v = RandomString(&rnd,
- rnd.OneIn(20)
- ? 100 + rnd.Uniform(100)
- : rnd.Uniform(8));
+ v = RandomString(
+ &rnd, rnd.OneIn(20) ? 100 + rnd.Uniform(100) : rnd.Uniform(8));
ASSERT_OK(model.Put(WriteOptions(), k, v));
ASSERT_OK(db_->Put(WriteOptions(), k, v));
- } else if (p < 90) { // Delete
+ } else if (p < 90) { // Delete
k = RandomKey(&rnd);
ASSERT_OK(model.Delete(WriteOptions(), k));
ASSERT_OK(db_->Delete(WriteOptions(), k));
-
- } else { // Multi-element batch
+ } else { // Multi-element batch
WriteBatch b;
const int num = rnd.Uniform(8);
for (int i = 0; i < num; i++) {
@@ -2065,23 +2209,23 @@ TEST(DBTest, Randomized) {
}
if ((step % 100) == 0) {
- ASSERT_TRUE(CompareIterators(step, &model, db_, NULL, NULL));
+ ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
ASSERT_TRUE(CompareIterators(step, &model, db_, model_snap, db_snap));
// Save a snapshot from each DB this time that we'll use next
// time we compare things, to make sure the current state is
// preserved with the snapshot
- if (model_snap != NULL) model.ReleaseSnapshot(model_snap);
- if (db_snap != NULL) db_->ReleaseSnapshot(db_snap);
+ if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
+ if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
Reopen();
- ASSERT_TRUE(CompareIterators(step, &model, db_, NULL, NULL));
+ ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
model_snap = model.GetSnapshot();
db_snap = db_->GetSnapshot();
}
}
- if (model_snap != NULL) model.ReleaseSnapshot(model_snap);
- if (db_snap != NULL) db_->ReleaseSnapshot(db_snap);
+ if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
+ if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
} while (ChangeOptions());
}
@@ -2095,15 +2239,15 @@ void BM_LogAndApply(int iters, int num_base_files) {
std::string dbname = test::TmpDir() + "/leveldb_test_benchmark";
DestroyDB(dbname, Options());
- DB* db = NULL;
+ DB* db = nullptr;
Options opts;
opts.create_if_missing = true;
Status s = DB::Open(opts, dbname, &db);
ASSERT_OK(s);
- ASSERT_TRUE(db != NULL);
+ ASSERT_TRUE(db != nullptr);
delete db;
- db = NULL;
+ db = nullptr;
Env* env = Env::Default();
@@ -2112,14 +2256,14 @@ void BM_LogAndApply(int iters, int num_base_files) {
InternalKeyComparator cmp(BytewiseComparator());
Options options;
- VersionSet vset(dbname, &options, NULL, &cmp);
+ VersionSet vset(dbname, &options, nullptr, &cmp);
bool save_manifest;
ASSERT_OK(vset.Recover(&save_manifest));
VersionEdit vbase;
uint64_t fnum = 1;
for (int i = 0; i < num_base_files; i++) {
- InternalKey start(MakeKey(2*fnum), 1, kTypeValue);
- InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion);
+ InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
+ InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
vbase.AddFile(2, fnum++, 1 /* file size */, start, limit);
}
ASSERT_OK(vset.LogAndApply(&vbase, &mu));
@@ -2129,8 +2273,8 @@ void BM_LogAndApply(int iters, int num_base_files) {
for (int i = 0; i < iters; i++) {
VersionEdit vedit;
vedit.DeleteFile(2, fnum);
- InternalKey start(MakeKey(2*fnum), 1, kTypeValue);
- InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion);
+ InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
+ InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
vedit.AddFile(2, fnum++, 1 /* file size */, start, limit);
vset.LogAndApply(&vedit, &mu);
}
@@ -2139,8 +2283,8 @@ void BM_LogAndApply(int iters, int num_base_files) {
char buf[16];
snprintf(buf, sizeof(buf), "%d", num_base_files);
fprintf(stderr,
- "BM_LogAndApply/%-6s %8d iters : %9u us (%7.0f us / iter)\n",
- buf, iters, us, ((float)us) / iters);
+ "BM_LogAndApply/%-6s %8d iters : %9u us (%7.0f us / iter)\n", buf,
+ iters, us, ((float)us) / iters);
}
} // namespace leveldb
diff --git a/src/leveldb/db/dbformat.cc b/src/leveldb/db/dbformat.cc
index 20a7ca4462..459eddf5b1 100644
--- a/src/leveldb/db/dbformat.cc
+++ b/src/leveldb/db/dbformat.cc
@@ -2,8 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include <stdio.h>
#include "db/dbformat.h"
+
+#include <stdio.h>
+
+#include <sstream>
+
#include "port/port.h"
#include "util/coding.h"
@@ -21,26 +25,20 @@ void AppendInternalKey(std::string* result, const ParsedInternalKey& key) {
}
std::string ParsedInternalKey::DebugString() const {
- char buf[50];
- snprintf(buf, sizeof(buf), "' @ %llu : %d",
- (unsigned long long) sequence,
- int(type));
- std::string result = "'";
- result += EscapeString(user_key.ToString());
- result += buf;
- return result;
+ std::ostringstream ss;
+ ss << '\'' << EscapeString(user_key.ToString()) << "' @ " << sequence << " : "
+ << static_cast<int>(type);
+ return ss.str();
}
std::string InternalKey::DebugString() const {
- std::string result;
ParsedInternalKey parsed;
if (ParseInternalKey(rep_, &parsed)) {
- result = parsed.DebugString();
- } else {
- result = "(bad)";
- result.append(EscapeString(rep_));
+ return parsed.DebugString();
}
- return result;
+ std::ostringstream ss;
+ ss << "(bad)" << EscapeString(rep_);
+ return ss.str();
}
const char* InternalKeyComparator::Name() const {
@@ -65,9 +63,8 @@ int InternalKeyComparator::Compare(const Slice& akey, const Slice& bkey) const {
return r;
}
-void InternalKeyComparator::FindShortestSeparator(
- std::string* start,
- const Slice& limit) const {
+void InternalKeyComparator::FindShortestSeparator(std::string* start,
+ const Slice& limit) const {
// Attempt to shorten the user portion of the key
Slice user_start = ExtractUserKey(*start);
Slice user_limit = ExtractUserKey(limit);
@@ -77,7 +74,8 @@ void InternalKeyComparator::FindShortestSeparator(
user_comparator_->Compare(user_start, tmp) < 0) {
// User key has become shorter physically, but larger logically.
// Tack on the earliest possible number to the shortened user key.
- PutFixed64(&tmp, PackSequenceAndType(kMaxSequenceNumber,kValueTypeForSeek));
+ PutFixed64(&tmp,
+ PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek));
assert(this->Compare(*start, tmp) < 0);
assert(this->Compare(tmp, limit) < 0);
start->swap(tmp);
@@ -92,15 +90,14 @@ void InternalKeyComparator::FindShortSuccessor(std::string* key) const {
user_comparator_->Compare(user_key, tmp) < 0) {
// User key has become shorter physically, but larger logically.
// Tack on the earliest possible number to the shortened user key.
- PutFixed64(&tmp, PackSequenceAndType(kMaxSequenceNumber,kValueTypeForSeek));
+ PutFixed64(&tmp,
+ PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek));
assert(this->Compare(*key, tmp) < 0);
key->swap(tmp);
}
}
-const char* InternalFilterPolicy::Name() const {
- return user_policy_->Name();
-}
+const char* InternalFilterPolicy::Name() const { return user_policy_->Name(); }
void InternalFilterPolicy::CreateFilter(const Slice* keys, int n,
std::string* dst) const {
diff --git a/src/leveldb/db/dbformat.h b/src/leveldb/db/dbformat.h
index ea897b13c0..a1c30ed88c 100644
--- a/src/leveldb/db/dbformat.h
+++ b/src/leveldb/db/dbformat.h
@@ -5,7 +5,10 @@
#ifndef STORAGE_LEVELDB_DB_DBFORMAT_H_
#define STORAGE_LEVELDB_DB_DBFORMAT_H_
-#include <stdio.h>
+#include <cstddef>
+#include <cstdint>
+#include <string>
+
#include "leveldb/comparator.h"
#include "leveldb/db.h"
#include "leveldb/filter_policy.h"
@@ -48,10 +51,7 @@ class InternalKey;
// Value types encoded as the last component of internal keys.
// DO NOT CHANGE THESE ENUM VALUES: they are embedded in the on-disk
// data structures.
-enum ValueType {
- kTypeDeletion = 0x0,
- kTypeValue = 0x1
-};
+enum ValueType { kTypeDeletion = 0x0, kTypeValue = 0x1 };
// kValueTypeForSeek defines the ValueType that should be passed when
// constructing a ParsedInternalKey object for seeking to a particular
// sequence number (since we sort sequence numbers in decreasing order
@@ -64,17 +64,16 @@ typedef uint64_t SequenceNumber;
// We leave eight bits empty at the bottom so a type and sequence#
// can be packed together into 64-bits.
-static const SequenceNumber kMaxSequenceNumber =
- ((0x1ull << 56) - 1);
+static const SequenceNumber kMaxSequenceNumber = ((0x1ull << 56) - 1);
struct ParsedInternalKey {
Slice user_key;
SequenceNumber sequence;
ValueType type;
- ParsedInternalKey() { } // Intentionally left uninitialized (for speed)
+ ParsedInternalKey() {} // Intentionally left uninitialized (for speed)
ParsedInternalKey(const Slice& u, const SequenceNumber& seq, ValueType t)
- : user_key(u), sequence(seq), type(t) { }
+ : user_key(u), sequence(seq), type(t) {}
std::string DebugString() const;
};
@@ -84,15 +83,13 @@ inline size_t InternalKeyEncodingLength(const ParsedInternalKey& key) {
}
// Append the serialization of "key" to *result.
-extern void AppendInternalKey(std::string* result,
- const ParsedInternalKey& key);
+void AppendInternalKey(std::string* result, const ParsedInternalKey& key);
// Attempt to parse an internal key from "internal_key". On success,
// stores the parsed data in "*result", and returns true.
//
// On error, returns false, leaves "*result" in an undefined state.
-extern bool ParseInternalKey(const Slice& internal_key,
- ParsedInternalKey* result);
+bool ParseInternalKey(const Slice& internal_key, ParsedInternalKey* result);
// Returns the user key portion of an internal key.
inline Slice ExtractUserKey(const Slice& internal_key) {
@@ -100,27 +97,19 @@ inline Slice ExtractUserKey(const Slice& internal_key) {
return Slice(internal_key.data(), internal_key.size() - 8);
}
-inline ValueType ExtractValueType(const Slice& internal_key) {
- assert(internal_key.size() >= 8);
- const size_t n = internal_key.size();
- uint64_t num = DecodeFixed64(internal_key.data() + n - 8);
- unsigned char c = num & 0xff;
- return static_cast<ValueType>(c);
-}
-
// A comparator for internal keys that uses a specified comparator for
// the user key portion and breaks ties by decreasing sequence number.
class InternalKeyComparator : public Comparator {
private:
const Comparator* user_comparator_;
+
public:
- explicit InternalKeyComparator(const Comparator* c) : user_comparator_(c) { }
- virtual const char* Name() const;
- virtual int Compare(const Slice& a, const Slice& b) const;
- virtual void FindShortestSeparator(
- std::string* start,
- const Slice& limit) const;
- virtual void FindShortSuccessor(std::string* key) const;
+ explicit InternalKeyComparator(const Comparator* c) : user_comparator_(c) {}
+ const char* Name() const override;
+ int Compare(const Slice& a, const Slice& b) const override;
+ void FindShortestSeparator(std::string* start,
+ const Slice& limit) const override;
+ void FindShortSuccessor(std::string* key) const override;
const Comparator* user_comparator() const { return user_comparator_; }
@@ -131,11 +120,12 @@ class InternalKeyComparator : public Comparator {
class InternalFilterPolicy : public FilterPolicy {
private:
const FilterPolicy* const user_policy_;
+
public:
- explicit InternalFilterPolicy(const FilterPolicy* p) : user_policy_(p) { }
- virtual const char* Name() const;
- virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const;
- virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const;
+ explicit InternalFilterPolicy(const FilterPolicy* p) : user_policy_(p) {}
+ const char* Name() const override;
+ void CreateFilter(const Slice* keys, int n, std::string* dst) const override;
+ bool KeyMayMatch(const Slice& key, const Slice& filter) const override;
};
// Modules in this directory should keep internal keys wrapped inside
@@ -144,13 +134,18 @@ class InternalFilterPolicy : public FilterPolicy {
class InternalKey {
private:
std::string rep_;
+
public:
- InternalKey() { } // Leave rep_ as empty to indicate it is invalid
+ InternalKey() {} // Leave rep_ as empty to indicate it is invalid
InternalKey(const Slice& user_key, SequenceNumber s, ValueType t) {
AppendInternalKey(&rep_, ParsedInternalKey(user_key, s, t));
}
- void DecodeFrom(const Slice& s) { rep_.assign(s.data(), s.size()); }
+ bool DecodeFrom(const Slice& s) {
+ rep_.assign(s.data(), s.size());
+ return !rep_.empty();
+ }
+
Slice Encode() const {
assert(!rep_.empty());
return rep_;
@@ -168,8 +163,8 @@ class InternalKey {
std::string DebugString() const;
};
-inline int InternalKeyComparator::Compare(
- const InternalKey& a, const InternalKey& b) const {
+inline int InternalKeyComparator::Compare(const InternalKey& a,
+ const InternalKey& b) const {
return Compare(a.Encode(), b.Encode());
}
@@ -178,11 +173,11 @@ inline bool ParseInternalKey(const Slice& internal_key,
const size_t n = internal_key.size();
if (n < 8) return false;
uint64_t num = DecodeFixed64(internal_key.data() + n - 8);
- unsigned char c = num & 0xff;
+ uint8_t c = num & 0xff;
result->sequence = num >> 8;
result->type = static_cast<ValueType>(c);
result->user_key = Slice(internal_key.data(), n - 8);
- return (c <= static_cast<unsigned char>(kTypeValue));
+ return (c <= static_cast<uint8_t>(kTypeValue));
}
// A helper class useful for DBImpl::Get()
@@ -192,6 +187,9 @@ class LookupKey {
// the specified sequence number.
LookupKey(const Slice& user_key, SequenceNumber sequence);
+ LookupKey(const LookupKey&) = delete;
+ LookupKey& operator=(const LookupKey&) = delete;
+
~LookupKey();
// Return a key suitable for lookup in a MemTable.
@@ -214,11 +212,7 @@ class LookupKey {
const char* start_;
const char* kstart_;
const char* end_;
- char space_[200]; // Avoid allocation for short keys
-
- // No copying allowed
- LookupKey(const LookupKey&);
- void operator=(const LookupKey&);
+ char space_[200]; // Avoid allocation for short keys
};
inline LookupKey::~LookupKey() {
diff --git a/src/leveldb/db/dbformat_test.cc b/src/leveldb/db/dbformat_test.cc
index 5d82f5d313..1209369c31 100644
--- a/src/leveldb/db/dbformat_test.cc
+++ b/src/leveldb/db/dbformat_test.cc
@@ -8,8 +8,7 @@
namespace leveldb {
-static std::string IKey(const std::string& user_key,
- uint64_t seq,
+static std::string IKey(const std::string& user_key, uint64_t seq,
ValueType vt) {
std::string encoded;
AppendInternalKey(&encoded, ParsedInternalKey(user_key, seq, vt));
@@ -28,9 +27,7 @@ static std::string ShortSuccessor(const std::string& s) {
return result;
}
-static void TestKey(const std::string& key,
- uint64_t seq,
- ValueType vt) {
+static void TestKey(const std::string& key, uint64_t seq, ValueType vt) {
std::string encoded = IKey(key, seq, vt);
Slice in(encoded);
@@ -44,16 +41,22 @@ static void TestKey(const std::string& key,
ASSERT_TRUE(!ParseInternalKey(Slice("bar"), &decoded));
}
-class FormatTest { };
+class FormatTest {};
TEST(FormatTest, InternalKey_EncodeDecode) {
- const char* keys[] = { "", "k", "hello", "longggggggggggggggggggggg" };
- const uint64_t seq[] = {
- 1, 2, 3,
- (1ull << 8) - 1, 1ull << 8, (1ull << 8) + 1,
- (1ull << 16) - 1, 1ull << 16, (1ull << 16) + 1,
- (1ull << 32) - 1, 1ull << 32, (1ull << 32) + 1
- };
+ const char* keys[] = {"", "k", "hello", "longggggggggggggggggggggg"};
+ const uint64_t seq[] = {1,
+ 2,
+ 3,
+ (1ull << 8) - 1,
+ 1ull << 8,
+ (1ull << 8) + 1,
+ (1ull << 16) - 1,
+ 1ull << 16,
+ (1ull << 16) + 1,
+ (1ull << 32) - 1,
+ 1ull << 32,
+ (1ull << 32) + 1};
for (int k = 0; k < sizeof(keys) / sizeof(keys[0]); k++) {
for (int s = 0; s < sizeof(seq) / sizeof(seq[0]); s++) {
TestKey(keys[k], seq[s], kTypeValue);
@@ -62,40 +65,44 @@ TEST(FormatTest, InternalKey_EncodeDecode) {
}
}
+TEST(FormatTest, InternalKey_DecodeFromEmpty) {
+ InternalKey internal_key;
+
+ ASSERT_TRUE(!internal_key.DecodeFrom(""));
+}
+
TEST(FormatTest, InternalKeyShortSeparator) {
// When user keys are same
ASSERT_EQ(IKey("foo", 100, kTypeValue),
- Shorten(IKey("foo", 100, kTypeValue),
- IKey("foo", 99, kTypeValue)));
- ASSERT_EQ(IKey("foo", 100, kTypeValue),
- Shorten(IKey("foo", 100, kTypeValue),
- IKey("foo", 101, kTypeValue)));
- ASSERT_EQ(IKey("foo", 100, kTypeValue),
- Shorten(IKey("foo", 100, kTypeValue),
- IKey("foo", 100, kTypeValue)));
- ASSERT_EQ(IKey("foo", 100, kTypeValue),
- Shorten(IKey("foo", 100, kTypeValue),
- IKey("foo", 100, kTypeDeletion)));
+ Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 99, kTypeValue)));
+ ASSERT_EQ(
+ IKey("foo", 100, kTypeValue),
+ Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 101, kTypeValue)));
+ ASSERT_EQ(
+ IKey("foo", 100, kTypeValue),
+ Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeValue)));
+ ASSERT_EQ(
+ IKey("foo", 100, kTypeValue),
+ Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeDeletion)));
// When user keys are misordered
ASSERT_EQ(IKey("foo", 100, kTypeValue),
- Shorten(IKey("foo", 100, kTypeValue),
- IKey("bar", 99, kTypeValue)));
+ Shorten(IKey("foo", 100, kTypeValue), IKey("bar", 99, kTypeValue)));
// When user keys are different, but correctly ordered
- ASSERT_EQ(IKey("g", kMaxSequenceNumber, kValueTypeForSeek),
- Shorten(IKey("foo", 100, kTypeValue),
- IKey("hello", 200, kTypeValue)));
+ ASSERT_EQ(
+ IKey("g", kMaxSequenceNumber, kValueTypeForSeek),
+ Shorten(IKey("foo", 100, kTypeValue), IKey("hello", 200, kTypeValue)));
// When start user key is prefix of limit user key
- ASSERT_EQ(IKey("foo", 100, kTypeValue),
- Shorten(IKey("foo", 100, kTypeValue),
- IKey("foobar", 200, kTypeValue)));
+ ASSERT_EQ(
+ IKey("foo", 100, kTypeValue),
+ Shorten(IKey("foo", 100, kTypeValue), IKey("foobar", 200, kTypeValue)));
// When limit user key is prefix of start user key
- ASSERT_EQ(IKey("foobar", 100, kTypeValue),
- Shorten(IKey("foobar", 100, kTypeValue),
- IKey("foo", 200, kTypeValue)));
+ ASSERT_EQ(
+ IKey("foobar", 100, kTypeValue),
+ Shorten(IKey("foobar", 100, kTypeValue), IKey("foo", 200, kTypeValue)));
}
TEST(FormatTest, InternalKeyShortestSuccessor) {
@@ -105,8 +112,20 @@ TEST(FormatTest, InternalKeyShortestSuccessor) {
ShortSuccessor(IKey("\xff\xff", 100, kTypeValue)));
}
-} // namespace leveldb
+TEST(FormatTest, ParsedInternalKeyDebugString) {
+ ParsedInternalKey key("The \"key\" in 'single quotes'", 42, kTypeValue);
+
+ ASSERT_EQ("'The \"key\" in 'single quotes'' @ 42 : 1", key.DebugString());
+}
+
+TEST(FormatTest, InternalKeyDebugString) {
+ InternalKey key("The \"key\" in 'single quotes'", 42, kTypeValue);
+ ASSERT_EQ("'The \"key\" in 'single quotes'' @ 42 : 1", key.DebugString());
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
+ InternalKey invalid_key;
+ ASSERT_EQ("(bad)", invalid_key.DebugString());
}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/db/dumpfile.cc b/src/leveldb/db/dumpfile.cc
index 61c47c2ff9..77d59003cf 100644
--- a/src/leveldb/db/dumpfile.cc
+++ b/src/leveldb/db/dumpfile.cc
@@ -2,7 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include "leveldb/dumpfile.h"
+
#include <stdio.h>
+
#include "db/dbformat.h"
#include "db/filename.h"
#include "db/log_reader.h"
@@ -35,8 +38,7 @@ bool GuessType(const std::string& fname, FileType* type) {
// Notified when log reader encounters corruption.
class CorruptionReporter : public log::Reader::Reporter {
public:
- WritableFile* dst_;
- virtual void Corruption(size_t bytes, const Status& status) {
+ void Corruption(size_t bytes, const Status& status) override {
std::string r = "corruption: ";
AppendNumberTo(&r, bytes);
r += " bytes; ";
@@ -44,6 +46,8 @@ class CorruptionReporter : public log::Reader::Reporter {
r.push_back('\n');
dst_->Append(r);
}
+
+ WritableFile* dst_;
};
// Print contents of a log file. (*func)() is called on every record.
@@ -70,8 +74,7 @@ Status PrintLogContents(Env* env, const std::string& fname,
// Called on every item found in a WriteBatch.
class WriteBatchItemPrinter : public WriteBatch::Handler {
public:
- WritableFile* dst_;
- virtual void Put(const Slice& key, const Slice& value) {
+ void Put(const Slice& key, const Slice& value) override {
std::string r = " put '";
AppendEscapedStringTo(&r, key);
r += "' '";
@@ -79,14 +82,15 @@ class WriteBatchItemPrinter : public WriteBatch::Handler {
r += "'\n";
dst_->Append(r);
}
- virtual void Delete(const Slice& key) {
+ void Delete(const Slice& key) override {
std::string r = " del '";
AppendEscapedStringTo(&r, key);
r += "'\n";
dst_->Append(r);
}
-};
+ WritableFile* dst_;
+};
// Called on every log record (each one of which is a WriteBatch)
// found in a kLogFile.
@@ -142,8 +146,8 @@ Status DumpDescriptor(Env* env, const std::string& fname, WritableFile* dst) {
Status DumpTable(Env* env, const std::string& fname, WritableFile* dst) {
uint64_t file_size;
- RandomAccessFile* file = NULL;
- Table* table = NULL;
+ RandomAccessFile* file = nullptr;
+ Table* table = nullptr;
Status s = env->GetFileSize(fname, &file_size);
if (s.ok()) {
s = env->NewRandomAccessFile(fname, &file);
@@ -213,9 +217,12 @@ Status DumpFile(Env* env, const std::string& fname, WritableFile* dst) {
return Status::InvalidArgument(fname + ": unknown file type");
}
switch (ftype) {
- case kLogFile: return DumpLog(env, fname, dst);
- case kDescriptorFile: return DumpDescriptor(env, fname, dst);
- case kTableFile: return DumpTable(env, fname, dst);
+ case kLogFile:
+ return DumpLog(env, fname, dst);
+ case kDescriptorFile:
+ return DumpDescriptor(env, fname, dst);
+ case kTableFile:
+ return DumpTable(env, fname, dst);
default:
break;
}
diff --git a/src/leveldb/db/fault_injection_test.cc b/src/leveldb/db/fault_injection_test.cc
index 875dfe81ee..bf705cb60f 100644
--- a/src/leveldb/db/fault_injection_test.cc
+++ b/src/leveldb/db/fault_injection_test.cc
@@ -6,18 +6,20 @@
// the last "sync". It then checks for data loss errors by purposely dropping
// file data (or entire files) not protected by a "sync".
-#include "leveldb/db.h"
-
#include <map>
#include <set>
+
#include "db/db_impl.h"
#include "db/filename.h"
#include "db/log_format.h"
#include "db/version_set.h"
#include "leveldb/cache.h"
+#include "leveldb/db.h"
#include "leveldb/env.h"
#include "leveldb/table.h"
#include "leveldb/write_batch.h"
+#include "port/port.h"
+#include "port/thread_annotations.h"
#include "util/logging.h"
#include "util/mutexlock.h"
#include "util/testharness.h"
@@ -34,7 +36,7 @@ class FaultInjectionTestEnv;
namespace {
// Assume a filename, and not a directory name like "/foo/bar/"
-static std::string GetDirName(const std::string filename) {
+static std::string GetDirName(const std::string& filename) {
size_t found = filename.find_last_of("/\\");
if (found == std::string::npos) {
return "";
@@ -54,8 +56,7 @@ Status Truncate(const std::string& filename, uint64_t length) {
SequentialFile* orig_file;
Status s = env->NewSequentialFile(filename, &orig_file);
- if (!s.ok())
- return s;
+ if (!s.ok()) return s;
char* scratch = new char[length];
leveldb::Slice result;
@@ -83,15 +84,15 @@ Status Truncate(const std::string& filename, uint64_t length) {
struct FileState {
std::string filename_;
- ssize_t pos_;
- ssize_t pos_at_last_sync_;
- ssize_t pos_at_last_flush_;
+ int64_t pos_;
+ int64_t pos_at_last_sync_;
+ int64_t pos_at_last_flush_;
FileState(const std::string& filename)
: filename_(filename),
pos_(-1),
pos_at_last_sync_(-1),
- pos_at_last_flush_(-1) { }
+ pos_at_last_flush_(-1) {}
FileState() : pos_(-1), pos_at_last_sync_(-1), pos_at_last_flush_(-1) {}
@@ -106,14 +107,14 @@ struct FileState {
// is written to or sync'ed.
class TestWritableFile : public WritableFile {
public:
- TestWritableFile(const FileState& state,
- WritableFile* f,
+ TestWritableFile(const FileState& state, WritableFile* f,
FaultInjectionTestEnv* env);
- virtual ~TestWritableFile();
- virtual Status Append(const Slice& data);
- virtual Status Close();
- virtual Status Flush();
- virtual Status Sync();
+ ~TestWritableFile() override;
+ Status Append(const Slice& data) override;
+ Status Close() override;
+ Status Flush() override;
+ Status Sync() override;
+ std::string GetName() const override { return ""; }
private:
FileState state_;
@@ -126,14 +127,15 @@ class TestWritableFile : public WritableFile {
class FaultInjectionTestEnv : public EnvWrapper {
public:
- FaultInjectionTestEnv() : EnvWrapper(Env::Default()), filesystem_active_(true) {}
- virtual ~FaultInjectionTestEnv() { }
- virtual Status NewWritableFile(const std::string& fname,
- WritableFile** result);
- virtual Status NewAppendableFile(const std::string& fname,
- WritableFile** result);
- virtual Status DeleteFile(const std::string& f);
- virtual Status RenameFile(const std::string& s, const std::string& t);
+ FaultInjectionTestEnv()
+ : EnvWrapper(Env::Default()), filesystem_active_(true) {}
+ ~FaultInjectionTestEnv() override = default;
+ Status NewWritableFile(const std::string& fname,
+ WritableFile** result) override;
+ Status NewAppendableFile(const std::string& fname,
+ WritableFile** result) override;
+ Status DeleteFile(const std::string& f) override;
+ Status RenameFile(const std::string& s, const std::string& t) override;
void WritableFileClosed(const FileState& state);
Status DropUnsyncedFileData();
@@ -146,24 +148,26 @@ class FaultInjectionTestEnv : public EnvWrapper {
// system reset. Setting to inactive will freeze our saved filesystem state so
// that it will stop being recorded. It can then be reset back to the state at
// the time of the reset.
- bool IsFilesystemActive() const { return filesystem_active_; }
- void SetFilesystemActive(bool active) { filesystem_active_ = active; }
+ bool IsFilesystemActive() LOCKS_EXCLUDED(mutex_) {
+ MutexLock l(&mutex_);
+ return filesystem_active_;
+ }
+ void SetFilesystemActive(bool active) LOCKS_EXCLUDED(mutex_) {
+ MutexLock l(&mutex_);
+ filesystem_active_ = active;
+ }
private:
port::Mutex mutex_;
- std::map<std::string, FileState> db_file_state_;
- std::set<std::string> new_files_since_last_dir_sync_;
- bool filesystem_active_; // Record flushes, syncs, writes
+ std::map<std::string, FileState> db_file_state_ GUARDED_BY(mutex_);
+ std::set<std::string> new_files_since_last_dir_sync_ GUARDED_BY(mutex_);
+ bool filesystem_active_ GUARDED_BY(mutex_); // Record flushes, syncs, writes
};
-TestWritableFile::TestWritableFile(const FileState& state,
- WritableFile* f,
+TestWritableFile::TestWritableFile(const FileState& state, WritableFile* f,
FaultInjectionTestEnv* env)
- : state_(state),
- target_(f),
- writable_file_opened_(true),
- env_(env) {
- assert(f != NULL);
+ : state_(state), target_(f), writable_file_opened_(true), env_(env) {
+ assert(f != nullptr);
}
TestWritableFile::~TestWritableFile() {
@@ -265,10 +269,11 @@ Status FaultInjectionTestEnv::NewAppendableFile(const std::string& fname,
Status FaultInjectionTestEnv::DropUnsyncedFileData() {
Status s;
MutexLock l(&mutex_);
- for (std::map<std::string, FileState>::const_iterator it =
- db_file_state_.begin();
- s.ok() && it != db_file_state_.end(); ++it) {
- const FileState& state = it->second;
+ for (const auto& kvp : db_file_state_) {
+ if (!s.ok()) {
+ break;
+ }
+ const FileState& state = kvp.second;
if (!state.IsFullySynced()) {
s = state.DropUnsyncedData();
}
@@ -328,7 +333,6 @@ void FaultInjectionTestEnv::ResetState() {
// Since we are not destroying the database, the existing files
// should keep their recorded synced/flushed state. Therefore
// we do not reset db_file_state_ and new_files_since_last_dir_sync_.
- MutexLock l(&mutex_);
SetFilesystemActive(true);
}
@@ -338,12 +342,14 @@ Status FaultInjectionTestEnv::DeleteFilesCreatedAfterLastDirSync() {
std::set<std::string> new_files(new_files_since_last_dir_sync_.begin(),
new_files_since_last_dir_sync_.end());
mutex_.Unlock();
- Status s;
- std::set<std::string>::const_iterator it;
- for (it = new_files.begin(); s.ok() && it != new_files.end(); ++it) {
- s = DeleteFile(*it);
+ Status status;
+ for (const auto& new_file : new_files) {
+ Status delete_status = DeleteFile(new_file);
+ if (!delete_status.ok() && status.ok()) {
+ status = std::move(delete_status);
+ }
}
- return s;
+ return status;
}
void FaultInjectionTestEnv::WritableFileClosed(const FileState& state) {
@@ -352,7 +358,7 @@ void FaultInjectionTestEnv::WritableFileClosed(const FileState& state) {
}
Status FileState::DropUnsyncedData() const {
- ssize_t sync_pos = pos_at_last_sync_ == -1 ? 0 : pos_at_last_sync_;
+ int64_t sync_pos = pos_at_last_sync_ == -1 ? 0 : pos_at_last_sync_;
return Truncate(filename_, sync_pos);
}
@@ -370,7 +376,7 @@ class FaultInjectionTest {
FaultInjectionTest()
: env_(new FaultInjectionTestEnv),
tiny_cache_(NewLRUCache(100)),
- db_(NULL) {
+ db_(nullptr) {
dbname_ = test::TmpDir() + "/fault_test";
DestroyDB(dbname_, Options()); // Destroy any db from earlier run
options_.reuse_logs = true;
@@ -387,9 +393,7 @@ class FaultInjectionTest {
delete env_;
}
- void ReuseLogs(bool reuse) {
- options_.reuse_logs = reuse;
- }
+ void ReuseLogs(bool reuse) { options_.reuse_logs = reuse; }
void Build(int start_idx, int num_vals) {
std::string key_space, value_space;
@@ -449,19 +453,18 @@ class FaultInjectionTest {
Status OpenDB() {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
env_->ResetState();
return DB::Open(options_, dbname_, &db_);
}
void CloseDB() {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
}
void DeleteAllData() {
Iterator* iter = db_->NewIterator(ReadOptions());
- WriteOptions options;
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
ASSERT_OK(db_->Delete(WriteOptions(), iter->key()));
}
@@ -485,23 +488,22 @@ class FaultInjectionTest {
void PartialCompactTestPreFault(int num_pre_sync, int num_post_sync) {
DeleteAllData();
Build(0, num_pre_sync);
- db_->CompactRange(NULL, NULL);
+ db_->CompactRange(nullptr, nullptr);
Build(num_pre_sync, num_post_sync);
}
void PartialCompactTestReopenWithFault(ResetMethod reset_method,
- int num_pre_sync,
- int num_post_sync) {
+ int num_pre_sync, int num_post_sync) {
env_->SetFilesystemActive(false);
CloseDB();
ResetDBState(reset_method);
ASSERT_OK(OpenDB());
ASSERT_OK(Verify(0, num_pre_sync, FaultInjectionTest::VAL_EXPECT_NO_ERROR));
- ASSERT_OK(Verify(num_pre_sync, num_post_sync, FaultInjectionTest::VAL_EXPECT_ERROR));
+ ASSERT_OK(Verify(num_pre_sync, num_post_sync,
+ FaultInjectionTest::VAL_EXPECT_ERROR));
}
- void NoWriteTestPreFault() {
- }
+ void NoWriteTestPreFault() {}
void NoWriteTestReopenWithFault(ResetMethod reset_method) {
CloseDB();
@@ -517,8 +519,7 @@ class FaultInjectionTest {
int num_post_sync = rnd.Uniform(kMaxNumValues);
PartialCompactTestPreFault(num_pre_sync, num_post_sync);
- PartialCompactTestReopenWithFault(RESET_DROP_UNSYNCED_DATA,
- num_pre_sync,
+ PartialCompactTestReopenWithFault(RESET_DROP_UNSYNCED_DATA, num_pre_sync,
num_post_sync);
NoWriteTestPreFault();
@@ -528,8 +529,7 @@ class FaultInjectionTest {
// No new files created so we expect all values since no files will be
// dropped.
PartialCompactTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES,
- num_pre_sync + num_post_sync,
- 0);
+ num_pre_sync + num_post_sync, 0);
NoWriteTestPreFault();
NoWriteTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES);
@@ -549,6 +549,4 @@ TEST(FaultInjectionTest, FaultTestWithLogReuse) {
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/db/filename.cc b/src/leveldb/db/filename.cc
index da32946d99..85de45c507 100644
--- a/src/leveldb/db/filename.cc
+++ b/src/leveldb/db/filename.cc
@@ -2,9 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include "db/filename.h"
+
#include <ctype.h>
#include <stdio.h>
-#include "db/filename.h"
+
#include "db/dbformat.h"
#include "leveldb/env.h"
#include "util/logging.h"
@@ -12,31 +14,30 @@
namespace leveldb {
// A utility routine: write "data" to the named file and Sync() it.
-extern Status WriteStringToFileSync(Env* env, const Slice& data,
- const std::string& fname);
+Status WriteStringToFileSync(Env* env, const Slice& data,
+ const std::string& fname);
-static std::string MakeFileName(const std::string& name, uint64_t number,
+static std::string MakeFileName(const std::string& dbname, uint64_t number,
const char* suffix) {
char buf[100];
snprintf(buf, sizeof(buf), "/%06llu.%s",
- static_cast<unsigned long long>(number),
- suffix);
- return name + buf;
+ static_cast<unsigned long long>(number), suffix);
+ return dbname + buf;
}
-std::string LogFileName(const std::string& name, uint64_t number) {
+std::string LogFileName(const std::string& dbname, uint64_t number) {
assert(number > 0);
- return MakeFileName(name, number, "log");
+ return MakeFileName(dbname, number, "log");
}
-std::string TableFileName(const std::string& name, uint64_t number) {
+std::string TableFileName(const std::string& dbname, uint64_t number) {
assert(number > 0);
- return MakeFileName(name, number, "ldb");
+ return MakeFileName(dbname, number, "ldb");
}
-std::string SSTTableFileName(const std::string& name, uint64_t number) {
+std::string SSTTableFileName(const std::string& dbname, uint64_t number) {
assert(number > 0);
- return MakeFileName(name, number, "sst");
+ return MakeFileName(dbname, number, "sst");
}
std::string DescriptorFileName(const std::string& dbname, uint64_t number) {
@@ -51,9 +52,7 @@ std::string CurrentFileName(const std::string& dbname) {
return dbname + "/CURRENT";
}
-std::string LockFileName(const std::string& dbname) {
- return dbname + "/LOCK";
-}
+std::string LockFileName(const std::string& dbname) { return dbname + "/LOCK"; }
std::string TempFileName(const std::string& dbname, uint64_t number) {
assert(number > 0);
@@ -69,7 +68,6 @@ std::string OldInfoLogFileName(const std::string& dbname) {
return dbname + "/LOG.old";
}
-
// Owned filenames have the form:
// dbname/CURRENT
// dbname/LOCK
@@ -77,10 +75,9 @@ std::string OldInfoLogFileName(const std::string& dbname) {
// dbname/LOG.old
// dbname/MANIFEST-[0-9]+
// dbname/[0-9]+.(log|sst|ldb)
-bool ParseFileName(const std::string& fname,
- uint64_t* number,
+bool ParseFileName(const std::string& filename, uint64_t* number,
FileType* type) {
- Slice rest(fname);
+ Slice rest(filename);
if (rest == "CURRENT") {
*number = 0;
*type = kCurrentFile;
diff --git a/src/leveldb/db/filename.h b/src/leveldb/db/filename.h
index 87a752605d..524e813c06 100644
--- a/src/leveldb/db/filename.h
+++ b/src/leveldb/db/filename.h
@@ -8,7 +8,9 @@
#define STORAGE_LEVELDB_DB_FILENAME_H_
#include <stdint.h>
+
#include <string>
+
#include "leveldb/slice.h"
#include "leveldb/status.h"
#include "port/port.h"
@@ -30,55 +32,52 @@ enum FileType {
// Return the name of the log file with the specified number
// in the db named by "dbname". The result will be prefixed with
// "dbname".
-extern std::string LogFileName(const std::string& dbname, uint64_t number);
+std::string LogFileName(const std::string& dbname, uint64_t number);
// Return the name of the sstable with the specified number
// in the db named by "dbname". The result will be prefixed with
// "dbname".
-extern std::string TableFileName(const std::string& dbname, uint64_t number);
+std::string TableFileName(const std::string& dbname, uint64_t number);
// Return the legacy file name for an sstable with the specified number
// in the db named by "dbname". The result will be prefixed with
// "dbname".
-extern std::string SSTTableFileName(const std::string& dbname, uint64_t number);
+std::string SSTTableFileName(const std::string& dbname, uint64_t number);
// Return the name of the descriptor file for the db named by
// "dbname" and the specified incarnation number. The result will be
// prefixed with "dbname".
-extern std::string DescriptorFileName(const std::string& dbname,
- uint64_t number);
+std::string DescriptorFileName(const std::string& dbname, uint64_t number);
// Return the name of the current file. This file contains the name
// of the current manifest file. The result will be prefixed with
// "dbname".
-extern std::string CurrentFileName(const std::string& dbname);
+std::string CurrentFileName(const std::string& dbname);
// Return the name of the lock file for the db named by
// "dbname". The result will be prefixed with "dbname".
-extern std::string LockFileName(const std::string& dbname);
+std::string LockFileName(const std::string& dbname);
// Return the name of a temporary file owned by the db named "dbname".
// The result will be prefixed with "dbname".
-extern std::string TempFileName(const std::string& dbname, uint64_t number);
+std::string TempFileName(const std::string& dbname, uint64_t number);
// Return the name of the info log file for "dbname".
-extern std::string InfoLogFileName(const std::string& dbname);
+std::string InfoLogFileName(const std::string& dbname);
// Return the name of the old info log file for "dbname".
-extern std::string OldInfoLogFileName(const std::string& dbname);
+std::string OldInfoLogFileName(const std::string& dbname);
// If filename is a leveldb file, store the type of the file in *type.
// The number encoded in the filename is stored in *number. If the
// filename was successfully parsed, returns true. Else return false.
-extern bool ParseFileName(const std::string& filename,
- uint64_t* number,
- FileType* type);
+bool ParseFileName(const std::string& filename, uint64_t* number,
+ FileType* type);
// Make the CURRENT file point to the descriptor file with the
// specified number.
-extern Status SetCurrentFile(Env* env, const std::string& dbname,
- uint64_t descriptor_number);
-
+Status SetCurrentFile(Env* env, const std::string& dbname,
+ uint64_t descriptor_number);
} // namespace leveldb
diff --git a/src/leveldb/db/filename_test.cc b/src/leveldb/db/filename_test.cc
index a32556deaf..952f32008e 100644
--- a/src/leveldb/db/filename_test.cc
+++ b/src/leveldb/db/filename_test.cc
@@ -11,7 +11,7 @@
namespace leveldb {
-class FileNameTest { };
+class FileNameTest {};
TEST(FileNameTest, Parse) {
Slice db;
@@ -24,17 +24,17 @@ TEST(FileNameTest, Parse) {
uint64_t number;
FileType type;
} cases[] = {
- { "100.log", 100, kLogFile },
- { "0.log", 0, kLogFile },
- { "0.sst", 0, kTableFile },
- { "0.ldb", 0, kTableFile },
- { "CURRENT", 0, kCurrentFile },
- { "LOCK", 0, kDBLockFile },
- { "MANIFEST-2", 2, kDescriptorFile },
- { "MANIFEST-7", 7, kDescriptorFile },
- { "LOG", 0, kInfoLogFile },
- { "LOG.old", 0, kInfoLogFile },
- { "18446744073709551615.log", 18446744073709551615ull, kLogFile },
+ {"100.log", 100, kLogFile},
+ {"0.log", 0, kLogFile},
+ {"0.sst", 0, kTableFile},
+ {"0.ldb", 0, kTableFile},
+ {"CURRENT", 0, kCurrentFile},
+ {"LOCK", 0, kDBLockFile},
+ {"MANIFEST-2", 2, kDescriptorFile},
+ {"MANIFEST-7", 7, kDescriptorFile},
+ {"LOG", 0, kInfoLogFile},
+ {"LOG.old", 0, kInfoLogFile},
+ {"18446744073709551615.log", 18446744073709551615ull, kLogFile},
};
for (int i = 0; i < sizeof(cases) / sizeof(cases[0]); i++) {
std::string f = cases[i].fname;
@@ -44,30 +44,28 @@ TEST(FileNameTest, Parse) {
}
// Errors
- static const char* errors[] = {
- "",
- "foo",
- "foo-dx-100.log",
- ".log",
- "",
- "manifest",
- "CURREN",
- "CURRENTX",
- "MANIFES",
- "MANIFEST",
- "MANIFEST-",
- "XMANIFEST-3",
- "MANIFEST-3x",
- "LOC",
- "LOCKx",
- "LO",
- "LOGx",
- "18446744073709551616.log",
- "184467440737095516150.log",
- "100",
- "100.",
- "100.lop"
- };
+ static const char* errors[] = {"",
+ "foo",
+ "foo-dx-100.log",
+ ".log",
+ "",
+ "manifest",
+ "CURREN",
+ "CURRENTX",
+ "MANIFES",
+ "MANIFEST",
+ "MANIFEST-",
+ "XMANIFEST-3",
+ "MANIFEST-3x",
+ "LOC",
+ "LOCKx",
+ "LO",
+ "LOGx",
+ "18446744073709551616.log",
+ "184467440737095516150.log",
+ "100",
+ "100.",
+ "100.lop"};
for (int i = 0; i < sizeof(errors) / sizeof(errors[0]); i++) {
std::string f = errors[i];
ASSERT_TRUE(!ParseFileName(f, &number, &type)) << f;
@@ -114,10 +112,20 @@ TEST(FileNameTest, Construction) {
ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
ASSERT_EQ(999, number);
ASSERT_EQ(kTempFile, type);
+
+ fname = InfoLogFileName("foo");
+ ASSERT_EQ("foo/", std::string(fname.data(), 4));
+ ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
+ ASSERT_EQ(0, number);
+ ASSERT_EQ(kInfoLogFile, type);
+
+ fname = OldInfoLogFileName("foo");
+ ASSERT_EQ("foo/", std::string(fname.data(), 4));
+ ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
+ ASSERT_EQ(0, number);
+ ASSERT_EQ(kInfoLogFile, type);
}
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/db/leveldbutil.cc b/src/leveldb/db/leveldbutil.cc
index d06d64d640..9ed9667d37 100644
--- a/src/leveldb/db/leveldbutil.cc
+++ b/src/leveldb/db/leveldbutil.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include <stdio.h>
+
#include "leveldb/dumpfile.h"
#include "leveldb/env.h"
#include "leveldb/status.h"
@@ -12,14 +13,14 @@ namespace {
class StdoutPrinter : public WritableFile {
public:
- virtual Status Append(const Slice& data) {
+ Status Append(const Slice& data) override {
fwrite(data.data(), 1, data.size(), stdout);
return Status::OK();
}
- virtual Status Close() { return Status::OK(); }
- virtual Status Flush() { return Status::OK(); }
- virtual Status Sync() { return Status::OK(); }
- virtual std::string GetName() const { return "[stdout]"; }
+ Status Close() override { return Status::OK(); }
+ Status Flush() override { return Status::OK(); }
+ Status Sync() override { return Status::OK(); }
+ std::string GetName() const override { return "[stdout]"; }
};
bool HandleDumpCommand(Env* env, char** files, int num) {
@@ -39,11 +40,9 @@ bool HandleDumpCommand(Env* env, char** files, int num) {
} // namespace leveldb
static void Usage() {
- fprintf(
- stderr,
- "Usage: leveldbutil command...\n"
- " dump files... -- dump contents of specified files\n"
- );
+ fprintf(stderr,
+ "Usage: leveldbutil command...\n"
+ " dump files... -- dump contents of specified files\n");
}
int main(int argc, char** argv) {
@@ -55,7 +54,7 @@ int main(int argc, char** argv) {
} else {
std::string command = argv[1];
if (command == "dump") {
- ok = leveldb::HandleDumpCommand(env, argv+2, argc-2);
+ ok = leveldb::HandleDumpCommand(env, argv + 2, argc - 2);
} else {
Usage();
ok = false;
diff --git a/src/leveldb/db/log_reader.cc b/src/leveldb/db/log_reader.cc
index 8b6ad136d7..1ccfb7b34a 100644
--- a/src/leveldb/db/log_reader.cc
+++ b/src/leveldb/db/log_reader.cc
@@ -5,6 +5,7 @@
#include "db/log_reader.h"
#include <stdio.h>
+
#include "leveldb/env.h"
#include "util/coding.h"
#include "util/crc32c.h"
@@ -12,8 +13,7 @@
namespace leveldb {
namespace log {
-Reader::Reporter::~Reporter() {
-}
+Reader::Reporter::~Reporter() = default;
Reader::Reader(SequentialFile* file, Reporter* reporter, bool checksum,
uint64_t initial_offset)
@@ -26,20 +26,16 @@ Reader::Reader(SequentialFile* file, Reporter* reporter, bool checksum,
last_record_offset_(0),
end_of_buffer_offset_(0),
initial_offset_(initial_offset),
- resyncing_(initial_offset > 0) {
-}
+ resyncing_(initial_offset > 0) {}
-Reader::~Reader() {
- delete[] backing_store_;
-}
+Reader::~Reader() { delete[] backing_store_; }
bool Reader::SkipToInitialBlock() {
- size_t offset_in_block = initial_offset_ % kBlockSize;
+ const size_t offset_in_block = initial_offset_ % kBlockSize;
uint64_t block_start_location = initial_offset_ - offset_in_block;
// Don't search a block if we'd be in the trailer
if (offset_in_block > kBlockSize - 6) {
- offset_in_block = 0;
block_start_location += kBlockSize;
}
@@ -99,9 +95,7 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch) {
// it could emit an empty kFirstType record at the tail end
// of a block followed by a kFullType or kFirstType record
// at the beginning of the next block.
- if (scratch->empty()) {
- in_fragmented_record = false;
- } else {
+ if (!scratch->empty()) {
ReportCorruption(scratch->size(), "partial record without end(1)");
}
}
@@ -117,9 +111,7 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch) {
// it could emit an empty kFirstType record at the tail end
// of a block followed by a kFullType or kFirstType record
// at the beginning of the next block.
- if (scratch->empty()) {
- in_fragmented_record = false;
- } else {
+ if (!scratch->empty()) {
ReportCorruption(scratch->size(), "partial record without end(2)");
}
}
@@ -181,16 +173,14 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch) {
return false;
}
-uint64_t Reader::LastRecordOffset() {
- return last_record_offset_;
-}
+uint64_t Reader::LastRecordOffset() { return last_record_offset_; }
void Reader::ReportCorruption(uint64_t bytes, const char* reason) {
ReportDrop(bytes, Status::Corruption(reason, file_->GetName()));
}
void Reader::ReportDrop(uint64_t bytes, const Status& reason) {
- if (reporter_ != NULL &&
+ if (reporter_ != nullptr &&
end_of_buffer_offset_ - buffer_.size() - bytes >= initial_offset_) {
reporter_->Corruption(static_cast<size_t>(bytes), reason);
}
diff --git a/src/leveldb/db/log_reader.h b/src/leveldb/db/log_reader.h
index 8389d61f8f..001da8948a 100644
--- a/src/leveldb/db/log_reader.h
+++ b/src/leveldb/db/log_reader.h
@@ -32,7 +32,7 @@ class Reader {
// Create a reader that will return log records from "*file".
// "*file" must remain live while this Reader is in use.
//
- // If "reporter" is non-NULL, it is notified whenever some data is
+ // If "reporter" is non-null, it is notified whenever some data is
// dropped due to a detected corruption. "*reporter" must remain
// live while this Reader is in use.
//
@@ -43,6 +43,9 @@ class Reader {
Reader(SequentialFile* file, Reporter* reporter, bool checksum,
uint64_t initial_offset);
+ Reader(const Reader&) = delete;
+ Reader& operator=(const Reader&) = delete;
+
~Reader();
// Read the next record into *record. Returns true if read
@@ -58,26 +61,6 @@ class Reader {
uint64_t LastRecordOffset();
private:
- SequentialFile* const file_;
- Reporter* const reporter_;
- bool const checksum_;
- char* const backing_store_;
- Slice buffer_;
- bool eof_; // Last Read() indicated EOF by returning < kBlockSize
-
- // Offset of the last record returned by ReadRecord.
- uint64_t last_record_offset_;
- // Offset of the first location past the end of buffer_.
- uint64_t end_of_buffer_offset_;
-
- // Offset at which to start looking for the first record to return
- uint64_t const initial_offset_;
-
- // True if we are resynchronizing after a seek (initial_offset_ > 0). In
- // particular, a run of kMiddleType and kLastType records can be silently
- // skipped in this mode
- bool resyncing_;
-
// Extend record types with the following special values
enum {
kEof = kMaxRecordType + 1,
@@ -102,9 +85,25 @@ class Reader {
void ReportCorruption(uint64_t bytes, const char* reason);
void ReportDrop(uint64_t bytes, const Status& reason);
- // No copying allowed
- Reader(const Reader&);
- void operator=(const Reader&);
+ SequentialFile* const file_;
+ Reporter* const reporter_;
+ bool const checksum_;
+ char* const backing_store_;
+ Slice buffer_;
+ bool eof_; // Last Read() indicated EOF by returning < kBlockSize
+
+ // Offset of the last record returned by ReadRecord.
+ uint64_t last_record_offset_;
+ // Offset of the first location past the end of buffer_.
+ uint64_t end_of_buffer_offset_;
+
+ // Offset at which to start looking for the first record to return
+ uint64_t const initial_offset_;
+
+ // True if we are resynchronizing after a seek (initial_offset_ > 0). In
+ // particular, a run of kMiddleType and kLastType records can be silently
+ // skipped in this mode
+ bool resyncing_;
};
} // namespace log
diff --git a/src/leveldb/db/log_test.cc b/src/leveldb/db/log_test.cc
index 48a5928657..41fc043068 100644
--- a/src/leveldb/db/log_test.cc
+++ b/src/leveldb/db/log_test.cc
@@ -37,87 +37,12 @@ static std::string RandomSkewedString(int i, Random* rnd) {
}
class LogTest {
- private:
- class StringDest : public WritableFile {
- public:
- std::string contents_;
-
- virtual Status Close() { return Status::OK(); }
- virtual Status Flush() { return Status::OK(); }
- virtual Status Sync() { return Status::OK(); }
- virtual Status Append(const Slice& slice) {
- contents_.append(slice.data(), slice.size());
- return Status::OK();
- }
- };
-
- class StringSource : public SequentialFile {
- public:
- Slice contents_;
- bool force_error_;
- bool returned_partial_;
- StringSource() : force_error_(false), returned_partial_(false) { }
-
- virtual Status Read(size_t n, Slice* result, char* scratch) {
- ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error";
-
- if (force_error_) {
- force_error_ = false;
- returned_partial_ = true;
- return Status::Corruption("read error");
- }
-
- if (contents_.size() < n) {
- n = contents_.size();
- returned_partial_ = true;
- }
- *result = Slice(contents_.data(), n);
- contents_.remove_prefix(n);
- return Status::OK();
- }
-
- virtual Status Skip(uint64_t n) {
- if (n > contents_.size()) {
- contents_.clear();
- return Status::NotFound("in-memory file skipped past end");
- }
-
- contents_.remove_prefix(n);
-
- return Status::OK();
- }
- };
-
- class ReportCollector : public Reader::Reporter {
- public:
- size_t dropped_bytes_;
- std::string message_;
-
- ReportCollector() : dropped_bytes_(0) { }
- virtual void Corruption(size_t bytes, const Status& status) {
- dropped_bytes_ += bytes;
- message_.append(status.ToString());
- }
- };
-
- StringDest dest_;
- StringSource source_;
- ReportCollector report_;
- bool reading_;
- Writer* writer_;
- Reader* reader_;
-
- // Record metadata for testing initial offset functionality
- static size_t initial_offset_record_sizes_[];
- static uint64_t initial_offset_last_record_offsets_[];
- static int num_initial_offset_records_;
-
public:
- LogTest() : reading_(false),
- writer_(new Writer(&dest_)),
- reader_(new Reader(&source_, &report_, true/*checksum*/,
- 0/*initial_offset*/)) {
- }
+ LogTest()
+ : reading_(false),
+ writer_(new Writer(&dest_)),
+ reader_(new Reader(&source_, &report_, true /*checksum*/,
+ 0 /*initial_offset*/)) {}
~LogTest() {
delete writer_;
@@ -134,9 +59,7 @@ class LogTest {
writer_->AddRecord(Slice(msg));
}
- size_t WrittenBytes() const {
- return dest_.contents_.size();
- }
+ size_t WrittenBytes() const { return dest_.contents_.size(); }
std::string Read() {
if (!reading_) {
@@ -166,22 +89,16 @@ class LogTest {
void FixChecksum(int header_offset, int len) {
// Compute crc of type/len/data
- uint32_t crc = crc32c::Value(&dest_.contents_[header_offset+6], 1 + len);
+ uint32_t crc = crc32c::Value(&dest_.contents_[header_offset + 6], 1 + len);
crc = crc32c::Mask(crc);
EncodeFixed32(&dest_.contents_[header_offset], crc);
}
- void ForceError() {
- source_.force_error_ = true;
- }
+ void ForceError() { source_.force_error_ = true; }
- size_t DroppedBytes() const {
- return report_.dropped_bytes_;
- }
+ size_t DroppedBytes() const { return report_.dropped_bytes_; }
- std::string ReportMessage() const {
- return report_.message_;
- }
+ std::string ReportMessage() const { return report_.message_; }
// Returns OK iff recorded error message contains "msg"
std::string MatchError(const std::string& msg) const {
@@ -202,14 +119,14 @@ class LogTest {
void StartReadingAt(uint64_t initial_offset) {
delete reader_;
- reader_ = new Reader(&source_, &report_, true/*checksum*/, initial_offset);
+ reader_ = new Reader(&source_, &report_, true /*checksum*/, initial_offset);
}
void CheckOffsetPastEndReturnsNoRecords(uint64_t offset_past_end) {
WriteInitialOffsetLog();
reading_ = true;
source_.contents_ = Slice(dest_.contents_);
- Reader* offset_reader = new Reader(&source_, &report_, true/*checksum*/,
+ Reader* offset_reader = new Reader(&source_, &report_, true /*checksum*/,
WrittenBytes() + offset_past_end);
Slice record;
std::string scratch;
@@ -222,8 +139,8 @@ class LogTest {
WriteInitialOffsetLog();
reading_ = true;
source_.contents_ = Slice(dest_.contents_);
- Reader* offset_reader = new Reader(&source_, &report_, true/*checksum*/,
- initial_offset);
+ Reader* offset_reader =
+ new Reader(&source_, &report_, true /*checksum*/, initial_offset);
// Read all records from expected_record_offset through the last one.
ASSERT_LT(expected_record_offset, num_initial_offset_records_);
@@ -240,36 +157,110 @@ class LogTest {
}
delete offset_reader;
}
+
+ private:
+ class StringDest : public WritableFile {
+ public:
+ Status Close() override { return Status::OK(); }
+ Status Flush() override { return Status::OK(); }
+ Status Sync() override { return Status::OK(); }
+ Status Append(const Slice& slice) override {
+ contents_.append(slice.data(), slice.size());
+ return Status::OK();
+ }
+ std::string GetName() const override { return ""; }
+
+ std::string contents_;
+ };
+
+ class StringSource : public SequentialFile {
+ public:
+ StringSource() : force_error_(false), returned_partial_(false) {}
+
+ Status Read(size_t n, Slice* result, char* scratch) override {
+ ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error";
+
+ if (force_error_) {
+ force_error_ = false;
+ returned_partial_ = true;
+ return Status::Corruption("read error");
+ }
+
+ if (contents_.size() < n) {
+ n = contents_.size();
+ returned_partial_ = true;
+ }
+ *result = Slice(contents_.data(), n);
+ contents_.remove_prefix(n);
+ return Status::OK();
+ }
+
+ Status Skip(uint64_t n) override {
+ if (n > contents_.size()) {
+ contents_.clear();
+ return Status::NotFound("in-memory file skipped past end");
+ }
+
+ contents_.remove_prefix(n);
+
+ return Status::OK();
+ }
+ std::string GetName() const { return ""; }
+
+ Slice contents_;
+ bool force_error_;
+ bool returned_partial_;
+ };
+
+ class ReportCollector : public Reader::Reporter {
+ public:
+ ReportCollector() : dropped_bytes_(0) {}
+ void Corruption(size_t bytes, const Status& status) override {
+ dropped_bytes_ += bytes;
+ message_.append(status.ToString());
+ }
+
+ size_t dropped_bytes_;
+ std::string message_;
+ };
+
+ // Record metadata for testing initial offset functionality
+ static size_t initial_offset_record_sizes_[];
+ static uint64_t initial_offset_last_record_offsets_[];
+ static int num_initial_offset_records_;
+
+ StringDest dest_;
+ StringSource source_;
+ ReportCollector report_;
+ bool reading_;
+ Writer* writer_;
+ Reader* reader_;
+};
+
+size_t LogTest::initial_offset_record_sizes_[] = {
+ 10000, // Two sizable records in first block
+ 10000,
+ 2 * log::kBlockSize - 1000, // Span three blocks
+ 1,
+ 13716, // Consume all but two bytes of block 3.
+ log::kBlockSize - kHeaderSize, // Consume the entirety of block 4.
};
-size_t LogTest::initial_offset_record_sizes_[] =
- {10000, // Two sizable records in first block
- 10000,
- 2 * log::kBlockSize - 1000, // Span three blocks
- 1,
- 13716, // Consume all but two bytes of block 3.
- log::kBlockSize - kHeaderSize, // Consume the entirety of block 4.
- };
-
-uint64_t LogTest::initial_offset_last_record_offsets_[] =
- {0,
- kHeaderSize + 10000,
- 2 * (kHeaderSize + 10000),
- 2 * (kHeaderSize + 10000) +
- (2 * log::kBlockSize - 1000) + 3 * kHeaderSize,
- 2 * (kHeaderSize + 10000) +
- (2 * log::kBlockSize - 1000) + 3 * kHeaderSize
- + kHeaderSize + 1,
- 3 * log::kBlockSize,
- };
+uint64_t LogTest::initial_offset_last_record_offsets_[] = {
+ 0,
+ kHeaderSize + 10000,
+ 2 * (kHeaderSize + 10000),
+ 2 * (kHeaderSize + 10000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize,
+ 2 * (kHeaderSize + 10000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize +
+ kHeaderSize + 1,
+ 3 * log::kBlockSize,
+};
// LogTest::initial_offset_last_record_offsets_ must be defined before this.
int LogTest::num_initial_offset_records_ =
- sizeof(LogTest::initial_offset_last_record_offsets_)/sizeof(uint64_t);
+ sizeof(LogTest::initial_offset_last_record_offsets_) / sizeof(uint64_t);
-TEST(LogTest, Empty) {
- ASSERT_EQ("EOF", Read());
-}
+TEST(LogTest, Empty) { ASSERT_EQ("EOF", Read()); }
TEST(LogTest, ReadWrite) {
Write("foo");
@@ -306,7 +297,7 @@ TEST(LogTest, Fragmentation) {
TEST(LogTest, MarginalTrailer) {
// Make a trailer that is exactly the same length as an empty record.
- const int n = kBlockSize - 2*kHeaderSize;
+ const int n = kBlockSize - 2 * kHeaderSize;
Write(BigString("foo", n));
ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes());
Write("");
@@ -319,7 +310,7 @@ TEST(LogTest, MarginalTrailer) {
TEST(LogTest, MarginalTrailer2) {
// Make a trailer that is exactly the same length as an empty record.
- const int n = kBlockSize - 2*kHeaderSize;
+ const int n = kBlockSize - 2 * kHeaderSize;
Write(BigString("foo", n));
ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes());
Write("bar");
@@ -331,7 +322,7 @@ TEST(LogTest, MarginalTrailer2) {
}
TEST(LogTest, ShortTrailer) {
- const int n = kBlockSize - 2*kHeaderSize + 4;
+ const int n = kBlockSize - 2 * kHeaderSize + 4;
Write(BigString("foo", n));
ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
Write("");
@@ -343,7 +334,7 @@ TEST(LogTest, ShortTrailer) {
}
TEST(LogTest, AlignedEof) {
- const int n = kBlockSize - 2*kHeaderSize + 4;
+ const int n = kBlockSize - 2 * kHeaderSize + 4;
Write(BigString("foo", n));
ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
ASSERT_EQ(BigString("foo", n), Read());
@@ -394,7 +385,7 @@ TEST(LogTest, BadRecordType) {
TEST(LogTest, TruncatedTrailingRecordIsIgnored) {
Write("foo");
- ShrinkSize(4); // Drop all payload as well as a header byte
+ ShrinkSize(4); // Drop all payload as well as a header byte
ASSERT_EQ("EOF", Read());
// Truncated last record is ignored, not treated as an error.
ASSERT_EQ(0, DroppedBytes());
@@ -492,7 +483,7 @@ TEST(LogTest, SkipIntoMultiRecord) {
// If initial_offset points to a record after first(R1) but before first(R2)
// incomplete fragment errors are not actual errors, and must be suppressed
// until a new first or full record is encountered.
- Write(BigString("foo", 3*kBlockSize));
+ Write(BigString("foo", 3 * kBlockSize));
Write("correct");
StartReadingAt(kBlockSize);
@@ -514,44 +505,30 @@ TEST(LogTest, ErrorJoinsRecords) {
Write("correct");
// Wipe the middle block
- for (int offset = kBlockSize; offset < 2*kBlockSize; offset++) {
+ for (int offset = kBlockSize; offset < 2 * kBlockSize; offset++) {
SetByte(offset, 'x');
}
ASSERT_EQ("correct", Read());
ASSERT_EQ("EOF", Read());
const size_t dropped = DroppedBytes();
- ASSERT_LE(dropped, 2*kBlockSize + 100);
- ASSERT_GE(dropped, 2*kBlockSize);
+ ASSERT_LE(dropped, 2 * kBlockSize + 100);
+ ASSERT_GE(dropped, 2 * kBlockSize);
}
-TEST(LogTest, ReadStart) {
- CheckInitialOffsetRecord(0, 0);
-}
+TEST(LogTest, ReadStart) { CheckInitialOffsetRecord(0, 0); }
-TEST(LogTest, ReadSecondOneOff) {
- CheckInitialOffsetRecord(1, 1);
-}
+TEST(LogTest, ReadSecondOneOff) { CheckInitialOffsetRecord(1, 1); }
-TEST(LogTest, ReadSecondTenThousand) {
- CheckInitialOffsetRecord(10000, 1);
-}
+TEST(LogTest, ReadSecondTenThousand) { CheckInitialOffsetRecord(10000, 1); }
-TEST(LogTest, ReadSecondStart) {
- CheckInitialOffsetRecord(10007, 1);
-}
+TEST(LogTest, ReadSecondStart) { CheckInitialOffsetRecord(10007, 1); }
-TEST(LogTest, ReadThirdOneOff) {
- CheckInitialOffsetRecord(10008, 2);
-}
+TEST(LogTest, ReadThirdOneOff) { CheckInitialOffsetRecord(10008, 2); }
-TEST(LogTest, ReadThirdStart) {
- CheckInitialOffsetRecord(20014, 2);
-}
+TEST(LogTest, ReadThirdStart) { CheckInitialOffsetRecord(20014, 2); }
-TEST(LogTest, ReadFourthOneOff) {
- CheckInitialOffsetRecord(20015, 3);
-}
+TEST(LogTest, ReadFourthOneOff) { CheckInitialOffsetRecord(20015, 3); }
TEST(LogTest, ReadFourthFirstBlockTrailer) {
CheckInitialOffsetRecord(log::kBlockSize - 4, 3);
@@ -575,17 +552,11 @@ TEST(LogTest, ReadInitialOffsetIntoBlockPadding) {
CheckInitialOffsetRecord(3 * log::kBlockSize - 3, 5);
}
-TEST(LogTest, ReadEnd) {
- CheckOffsetPastEndReturnsNoRecords(0);
-}
+TEST(LogTest, ReadEnd) { CheckOffsetPastEndReturnsNoRecords(0); }
-TEST(LogTest, ReadPastEnd) {
- CheckOffsetPastEndReturnsNoRecords(5);
-}
+TEST(LogTest, ReadPastEnd) { CheckOffsetPastEndReturnsNoRecords(5); }
} // namespace log
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/db/log_writer.cc b/src/leveldb/db/log_writer.cc
index 74a03270da..bfb16fb486 100644
--- a/src/leveldb/db/log_writer.cc
+++ b/src/leveldb/db/log_writer.cc
@@ -5,6 +5,7 @@
#include "db/log_writer.h"
#include <stdint.h>
+
#include "leveldb/env.h"
#include "util/coding.h"
#include "util/crc32c.h"
@@ -19,9 +20,7 @@ static void InitTypeCrc(uint32_t* type_crc) {
}
}
-Writer::Writer(WritableFile* dest)
- : dest_(dest),
- block_offset_(0) {
+Writer::Writer(WritableFile* dest) : dest_(dest), block_offset_(0) {
InitTypeCrc(type_crc_);
}
@@ -30,8 +29,7 @@ Writer::Writer(WritableFile* dest, uint64_t dest_length)
InitTypeCrc(type_crc_);
}
-Writer::~Writer() {
-}
+Writer::~Writer() = default;
Status Writer::AddRecord(const Slice& slice) {
const char* ptr = slice.data();
@@ -49,7 +47,7 @@ Status Writer::AddRecord(const Slice& slice) {
// Switch to a new block
if (leftover > 0) {
// Fill the trailer (literal below relies on kHeaderSize being 7)
- assert(kHeaderSize == 7);
+ static_assert(kHeaderSize == 7, "");
dest_->Append(Slice("\x00\x00\x00\x00\x00\x00", leftover));
}
block_offset_ = 0;
@@ -81,30 +79,31 @@ Status Writer::AddRecord(const Slice& slice) {
return s;
}
-Status Writer::EmitPhysicalRecord(RecordType t, const char* ptr, size_t n) {
- assert(n <= 0xffff); // Must fit in two bytes
- assert(block_offset_ + kHeaderSize + n <= kBlockSize);
+Status Writer::EmitPhysicalRecord(RecordType t, const char* ptr,
+ size_t length) {
+ assert(length <= 0xffff); // Must fit in two bytes
+ assert(block_offset_ + kHeaderSize + length <= kBlockSize);
// Format the header
char buf[kHeaderSize];
- buf[4] = static_cast<char>(n & 0xff);
- buf[5] = static_cast<char>(n >> 8);
+ buf[4] = static_cast<char>(length & 0xff);
+ buf[5] = static_cast<char>(length >> 8);
buf[6] = static_cast<char>(t);
// Compute the crc of the record type and the payload.
- uint32_t crc = crc32c::Extend(type_crc_[t], ptr, n);
- crc = crc32c::Mask(crc); // Adjust for storage
+ uint32_t crc = crc32c::Extend(type_crc_[t], ptr, length);
+ crc = crc32c::Mask(crc); // Adjust for storage
EncodeFixed32(buf, crc);
// Write the header and the payload
Status s = dest_->Append(Slice(buf, kHeaderSize));
if (s.ok()) {
- s = dest_->Append(Slice(ptr, n));
+ s = dest_->Append(Slice(ptr, length));
if (s.ok()) {
s = dest_->Flush();
}
}
- block_offset_ += kHeaderSize + n;
+ block_offset_ += kHeaderSize + length;
return s;
}
diff --git a/src/leveldb/db/log_writer.h b/src/leveldb/db/log_writer.h
index 9e7cc4705b..c0a21147ee 100644
--- a/src/leveldb/db/log_writer.h
+++ b/src/leveldb/db/log_writer.h
@@ -6,6 +6,7 @@
#define STORAGE_LEVELDB_DB_LOG_WRITER_H_
#include <stdint.h>
+
#include "db/log_format.h"
#include "leveldb/slice.h"
#include "leveldb/status.h"
@@ -28,24 +29,23 @@ class Writer {
// "*dest" must remain live while this Writer is in use.
Writer(WritableFile* dest, uint64_t dest_length);
+ Writer(const Writer&) = delete;
+ Writer& operator=(const Writer&) = delete;
+
~Writer();
Status AddRecord(const Slice& slice);
private:
+ Status EmitPhysicalRecord(RecordType type, const char* ptr, size_t length);
+
WritableFile* dest_;
- int block_offset_; // Current offset in block
+ int block_offset_; // Current offset in block
// crc32c values for all supported record types. These are
// pre-computed to reduce the overhead of computing the crc of the
// record type stored in the header.
uint32_t type_crc_[kMaxRecordType + 1];
-
- Status EmitPhysicalRecord(RecordType type, const char* ptr, size_t length);
-
- // No copying allowed
- Writer(const Writer&);
- void operator=(const Writer&);
};
} // namespace log
diff --git a/src/leveldb/db/memtable.cc b/src/leveldb/db/memtable.cc
index 287afdbdcb..00931d4671 100644
--- a/src/leveldb/db/memtable.cc
+++ b/src/leveldb/db/memtable.cc
@@ -18,20 +18,15 @@ static Slice GetLengthPrefixedSlice(const char* data) {
return Slice(p, len);
}
-MemTable::MemTable(const InternalKeyComparator& cmp)
- : comparator_(cmp),
- refs_(0),
- table_(comparator_, &arena_) {
-}
+MemTable::MemTable(const InternalKeyComparator& comparator)
+ : comparator_(comparator), refs_(0), table_(comparator_, &arena_) {}
-MemTable::~MemTable() {
- assert(refs_ == 0);
-}
+MemTable::~MemTable() { assert(refs_ == 0); }
size_t MemTable::ApproximateMemoryUsage() { return arena_.MemoryUsage(); }
-int MemTable::KeyComparator::operator()(const char* aptr, const char* bptr)
- const {
+int MemTable::KeyComparator::operator()(const char* aptr,
+ const char* bptr) const {
// Internal keys are encoded as length-prefixed strings.
Slice a = GetLengthPrefixedSlice(aptr);
Slice b = GetLengthPrefixedSlice(bptr);
@@ -48,39 +43,37 @@ static const char* EncodeKey(std::string* scratch, const Slice& target) {
return scratch->data();
}
-class MemTableIterator: public Iterator {
+class MemTableIterator : public Iterator {
public:
- explicit MemTableIterator(MemTable::Table* table) : iter_(table) { }
-
- virtual bool Valid() const { return iter_.Valid(); }
- virtual void Seek(const Slice& k) { iter_.Seek(EncodeKey(&tmp_, k)); }
- virtual void SeekToFirst() { iter_.SeekToFirst(); }
- virtual void SeekToLast() { iter_.SeekToLast(); }
- virtual void Next() { iter_.Next(); }
- virtual void Prev() { iter_.Prev(); }
- virtual Slice key() const { return GetLengthPrefixedSlice(iter_.key()); }
- virtual Slice value() const {
+ explicit MemTableIterator(MemTable::Table* table) : iter_(table) {}
+
+ MemTableIterator(const MemTableIterator&) = delete;
+ MemTableIterator& operator=(const MemTableIterator&) = delete;
+
+ ~MemTableIterator() override = default;
+
+ bool Valid() const override { return iter_.Valid(); }
+ void Seek(const Slice& k) override { iter_.Seek(EncodeKey(&tmp_, k)); }
+ void SeekToFirst() override { iter_.SeekToFirst(); }
+ void SeekToLast() override { iter_.SeekToLast(); }
+ void Next() override { iter_.Next(); }
+ void Prev() override { iter_.Prev(); }
+ Slice key() const override { return GetLengthPrefixedSlice(iter_.key()); }
+ Slice value() const override {
Slice key_slice = GetLengthPrefixedSlice(iter_.key());
return GetLengthPrefixedSlice(key_slice.data() + key_slice.size());
}
- virtual Status status() const { return Status::OK(); }
+ Status status() const override { return Status::OK(); }
private:
MemTable::Table::Iterator iter_;
- std::string tmp_; // For passing to EncodeKey
-
- // No copying allowed
- MemTableIterator(const MemTableIterator&);
- void operator=(const MemTableIterator&);
+ std::string tmp_; // For passing to EncodeKey
};
-Iterator* MemTable::NewIterator() {
- return new MemTableIterator(&table_);
-}
+Iterator* MemTable::NewIterator() { return new MemTableIterator(&table_); }
-void MemTable::Add(SequenceNumber s, ValueType type,
- const Slice& key,
+void MemTable::Add(SequenceNumber s, ValueType type, const Slice& key,
const Slice& value) {
// Format of an entry is concatenation of:
// key_size : varint32 of internal_key.size()
@@ -90,9 +83,9 @@ void MemTable::Add(SequenceNumber s, ValueType type,
size_t key_size = key.size();
size_t val_size = value.size();
size_t internal_key_size = key_size + 8;
- const size_t encoded_len =
- VarintLength(internal_key_size) + internal_key_size +
- VarintLength(val_size) + val_size;
+ const size_t encoded_len = VarintLength(internal_key_size) +
+ internal_key_size + VarintLength(val_size) +
+ val_size;
char* buf = arena_.Allocate(encoded_len);
char* p = EncodeVarint32(buf, internal_key_size);
memcpy(p, key.data(), key_size);
@@ -121,10 +114,9 @@ bool MemTable::Get(const LookupKey& key, std::string* value, Status* s) {
// all entries with overly large sequence numbers.
const char* entry = iter.key();
uint32_t key_length;
- const char* key_ptr = GetVarint32Ptr(entry, entry+5, &key_length);
+ const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
if (comparator_.comparator.user_comparator()->Compare(
- Slice(key_ptr, key_length - 8),
- key.user_key()) == 0) {
+ Slice(key_ptr, key_length - 8), key.user_key()) == 0) {
// Correct user key
const uint64_t tag = DecodeFixed64(key_ptr + key_length - 8);
switch (static_cast<ValueType>(tag & 0xff)) {
diff --git a/src/leveldb/db/memtable.h b/src/leveldb/db/memtable.h
index 9f41567cde..9d986b1070 100644
--- a/src/leveldb/db/memtable.h
+++ b/src/leveldb/db/memtable.h
@@ -6,15 +6,15 @@
#define STORAGE_LEVELDB_DB_MEMTABLE_H_
#include <string>
-#include "leveldb/db.h"
+
#include "db/dbformat.h"
#include "db/skiplist.h"
+#include "leveldb/db.h"
#include "util/arena.h"
namespace leveldb {
class InternalKeyComparator;
-class Mutex;
class MemTableIterator;
class MemTable {
@@ -23,6 +23,9 @@ class MemTable {
// is zero and the caller must call Ref() at least once.
explicit MemTable(const InternalKeyComparator& comparator);
+ MemTable(const MemTable&) = delete;
+ MemTable& operator=(const MemTable&) = delete;
+
// Increase reference count.
void Ref() { ++refs_; }
@@ -50,8 +53,7 @@ class MemTable {
// Add an entry into memtable that maps key to value at the
// specified sequence number and with the specified type.
// Typically value will be empty if type==kTypeDeletion.
- void Add(SequenceNumber seq, ValueType type,
- const Slice& key,
+ void Add(SequenceNumber seq, ValueType type, const Slice& key,
const Slice& value);
// If memtable contains a value for key, store it in *value and return true.
@@ -61,26 +63,23 @@ class MemTable {
bool Get(const LookupKey& key, std::string* value, Status* s);
private:
- ~MemTable(); // Private since only Unref() should be used to delete it
+ friend class MemTableIterator;
+ friend class MemTableBackwardIterator;
struct KeyComparator {
const InternalKeyComparator comparator;
- explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) { }
+ explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) {}
int operator()(const char* a, const char* b) const;
};
- friend class MemTableIterator;
- friend class MemTableBackwardIterator;
typedef SkipList<const char*, KeyComparator> Table;
+ ~MemTable(); // Private since only Unref() should be used to delete it
+
KeyComparator comparator_;
int refs_;
Arena arena_;
Table table_;
-
- // No copying allowed
- MemTable(const MemTable&);
- void operator=(const MemTable&);
};
} // namespace leveldb
diff --git a/src/leveldb/db/recovery_test.cc b/src/leveldb/db/recovery_test.cc
index 9596f4288a..547a9591ea 100644
--- a/src/leveldb/db/recovery_test.cc
+++ b/src/leveldb/db/recovery_test.cc
@@ -17,7 +17,7 @@ namespace leveldb {
class RecoveryTest {
public:
- RecoveryTest() : env_(Env::Default()), db_(NULL) {
+ RecoveryTest() : env_(Env::Default()), db_(nullptr) {
dbname_ = test::TmpDir() + "/recovery_test";
DestroyDB(dbname_, Options());
Open();
@@ -44,22 +44,26 @@ class RecoveryTest {
void Close() {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
}
- void Open(Options* options = NULL) {
+ Status OpenWithStatus(Options* options = nullptr) {
Close();
Options opts;
- if (options != NULL) {
+ if (options != nullptr) {
opts = *options;
} else {
opts.reuse_logs = true; // TODO(sanjay): test both ways
opts.create_if_missing = true;
}
- if (opts.env == NULL) {
+ if (opts.env == nullptr) {
opts.env = env_;
}
- ASSERT_OK(DB::Open(opts, dbname_, &db_));
+ return DB::Open(opts, dbname_, &db_);
+ }
+
+ void Open(Options* options = nullptr) {
+ ASSERT_OK(OpenWithStatus(options));
ASSERT_EQ(1, NumLogs());
}
@@ -67,7 +71,7 @@ class RecoveryTest {
return db_->Put(WriteOptions(), k, v);
}
- std::string Get(const std::string& k, const Snapshot* snapshot = NULL) {
+ std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) {
std::string result;
Status s = db_->Get(ReadOptions(), k, &result);
if (s.IsNotFound()) {
@@ -82,17 +86,18 @@ class RecoveryTest {
std::string current;
ASSERT_OK(ReadFileToString(env_, CurrentFileName(dbname_), &current));
size_t len = current.size();
- if (len > 0 && current[len-1] == '\n') {
+ if (len > 0 && current[len - 1] == '\n') {
current.resize(len - 1);
}
return dbname_ + "/" + current;
}
- std::string LogName(uint64_t number) {
- return LogFileName(dbname_, number);
- }
+ std::string LogName(uint64_t number) { return LogFileName(dbname_, number); }
size_t DeleteLogFiles() {
+ // Linux allows unlinking open files, but Windows does not.
+ // Closing the db allows for file deletion.
+ Close();
std::vector<uint64_t> logs = GetFiles(kLogFile);
for (size_t i = 0; i < logs.size(); i++) {
ASSERT_OK(env_->DeleteFile(LogName(logs[i]))) << LogName(logs[i]);
@@ -100,9 +105,9 @@ class RecoveryTest {
return logs.size();
}
- uint64_t FirstLogFile() {
- return GetFiles(kLogFile)[0];
- }
+ void DeleteManifestFile() { ASSERT_OK(env_->DeleteFile(ManifestFileName())); }
+
+ uint64_t FirstLogFile() { return GetFiles(kLogFile)[0]; }
std::vector<uint64_t> GetFiles(FileType t) {
std::vector<std::string> filenames;
@@ -118,13 +123,9 @@ class RecoveryTest {
return result;
}
- int NumLogs() {
- return GetFiles(kLogFile).size();
- }
+ int NumLogs() { return GetFiles(kLogFile).size(); }
- int NumTables() {
- return GetFiles(kTableFile).size();
- }
+ int NumTables() { return GetFiles(kTableFile).size(); }
uint64_t FileSize(const std::string& fname) {
uint64_t result;
@@ -132,9 +133,7 @@ class RecoveryTest {
return result;
}
- void CompactMemTable() {
- dbfull()->TEST_CompactMemTable();
- }
+ void CompactMemTable() { dbfull()->TEST_CompactMemTable(); }
// Directly construct a log file that sets key to val.
void MakeLogFile(uint64_t lognum, SequenceNumber seq, Slice key, Slice val) {
@@ -186,7 +185,7 @@ TEST(RecoveryTest, LargeManifestCompacted) {
uint64_t len = FileSize(old_manifest);
WritableFile* file;
ASSERT_OK(env()->NewAppendableFile(old_manifest, &file));
- std::string zeroes(3*1048576 - static_cast<size_t>(len), 0);
+ std::string zeroes(3 * 1048576 - static_cast<size_t>(len), 0);
ASSERT_OK(file->Append(zeroes));
ASSERT_OK(file->Flush());
delete file;
@@ -259,7 +258,7 @@ TEST(RecoveryTest, MultipleMemTables) {
// Force creation of multiple memtables by reducing the write buffer size.
Options opt;
opt.reuse_logs = true;
- opt.write_buffer_size = (kNum*100) / 2;
+ opt.write_buffer_size = (kNum * 100) / 2;
Open(&opt);
ASSERT_LE(2, NumTables());
ASSERT_EQ(1, NumLogs());
@@ -278,16 +277,16 @@ TEST(RecoveryTest, MultipleLogFiles) {
// Make a bunch of uncompacted log files.
uint64_t old_log = FirstLogFile();
- MakeLogFile(old_log+1, 1000, "hello", "world");
- MakeLogFile(old_log+2, 1001, "hi", "there");
- MakeLogFile(old_log+3, 1002, "foo", "bar2");
+ MakeLogFile(old_log + 1, 1000, "hello", "world");
+ MakeLogFile(old_log + 2, 1001, "hi", "there");
+ MakeLogFile(old_log + 3, 1002, "foo", "bar2");
// Recover and check that all log files were processed.
Open();
ASSERT_LE(1, NumTables());
ASSERT_EQ(1, NumLogs());
uint64_t new_log = FirstLogFile();
- ASSERT_LE(old_log+3, new_log);
+ ASSERT_LE(old_log + 3, new_log);
ASSERT_EQ("bar2", Get("foo"));
ASSERT_EQ("world", Get("hello"));
ASSERT_EQ("there", Get("hi"));
@@ -305,7 +304,7 @@ TEST(RecoveryTest, MultipleLogFiles) {
// Check that introducing an older log file does not cause it to be re-read.
Close();
- MakeLogFile(old_log+1, 2000, "hello", "stale write");
+ MakeLogFile(old_log + 1, 2000, "hello", "stale write");
Open();
ASSERT_LE(1, NumTables());
ASSERT_EQ(1, NumLogs());
@@ -317,8 +316,15 @@ TEST(RecoveryTest, MultipleLogFiles) {
ASSERT_EQ("there", Get("hi"));
}
-} // namespace leveldb
+TEST(RecoveryTest, ManifestMissing) {
+ ASSERT_OK(Put("foo", "bar"));
+ Close();
+ DeleteManifestFile();
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
+ Status status = OpenWithStatus();
+ ASSERT_TRUE(status.IsCorruption());
}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/db/repair.cc b/src/leveldb/db/repair.cc
index 7281e3d345..04847c3bbf 100644
--- a/src/leveldb/db/repair.cc
+++ b/src/leveldb/db/repair.cc
@@ -54,7 +54,7 @@ class Repairer {
owns_cache_(options_.block_cache != options.block_cache),
next_file_number_(1) {
// TableCache can be small since we expect each table to be opened once.
- table_cache_ = new TableCache(dbname_, &options_, 10);
+ table_cache_ = new TableCache(dbname_, options_, 10);
}
~Repairer() {
@@ -84,9 +84,7 @@ class Repairer {
"recovered %d files; %llu bytes. "
"Some data may have been lost. "
"****",
- dbname_.c_str(),
- static_cast<int>(tables_.size()),
- bytes);
+ dbname_.c_str(), static_cast<int>(tables_.size()), bytes);
}
return status;
}
@@ -97,22 +95,6 @@ class Repairer {
SequenceNumber max_sequence;
};
- std::string const dbname_;
- Env* const env_;
- InternalKeyComparator const icmp_;
- InternalFilterPolicy const ipolicy_;
- Options const options_;
- bool owns_info_log_;
- bool owns_cache_;
- TableCache* table_cache_;
- VersionEdit edit_;
-
- std::vector<std::string> manifests_;
- std::vector<uint64_t> table_numbers_;
- std::vector<uint64_t> logs_;
- std::vector<TableInfo> tables_;
- uint64_t next_file_number_;
-
Status FindFiles() {
std::vector<std::string> filenames;
Status status = env_->GetChildren(dbname_, &filenames);
@@ -152,8 +134,7 @@ class Repairer {
Status status = ConvertLogToTable(logs_[i]);
if (!status.ok()) {
Log(options_.info_log, "Log #%llu: ignoring conversion error: %s",
- (unsigned long long) logs_[i],
- status.ToString().c_str());
+ (unsigned long long)logs_[i], status.ToString().c_str());
}
ArchiveFile(logname);
}
@@ -164,11 +145,10 @@ class Repairer {
Env* env;
Logger* info_log;
uint64_t lognum;
- virtual void Corruption(size_t bytes, const Status& s) {
+ void Corruption(size_t bytes, const Status& s) override {
// We print error messages for corruption, but continue repairing.
Log(info_log, "Log #%llu: dropping %d bytes; %s",
- (unsigned long long) lognum,
- static_cast<int>(bytes),
+ (unsigned long long)lognum, static_cast<int>(bytes),
s.ToString().c_str());
}
};
@@ -190,8 +170,8 @@ class Repairer {
// corruptions cause entire commits to be skipped instead of
// propagating bad information (like overly large sequence
// numbers).
- log::Reader reader(lfile, &reporter, false/*do not checksum*/,
- 0/*initial_offset*/);
+ log::Reader reader(lfile, &reporter, false /*do not checksum*/,
+ 0 /*initial_offset*/);
// Read all the records and add to a memtable
std::string scratch;
@@ -202,8 +182,8 @@ class Repairer {
int counter = 0;
while (reader.ReadRecord(&record, &scratch)) {
if (record.size() < 12) {
- reporter.Corruption(
- record.size(), Status::Corruption("log record too small", logname));
+ reporter.Corruption(record.size(),
+ Status::Corruption("log record too small", logname));
continue;
}
WriteBatchInternal::SetContents(&batch, record);
@@ -212,8 +192,7 @@ class Repairer {
counter += WriteBatchInternal::Count(&batch);
} else {
Log(options_.info_log, "Log #%llu: ignoring %s",
- (unsigned long long) log,
- status.ToString().c_str());
+ (unsigned long long)log, status.ToString().c_str());
status = Status::OK(); // Keep going with rest of file
}
}
@@ -227,16 +206,14 @@ class Repairer {
status = BuildTable(dbname_, env_, options_, table_cache_, iter, &meta);
delete iter;
mem->Unref();
- mem = NULL;
+ mem = nullptr;
if (status.ok()) {
if (meta.file_size > 0) {
table_numbers_.push_back(meta.number);
}
}
Log(options_.info_log, "Log #%llu: %d ops saved to Table #%llu %s",
- (unsigned long long) log,
- counter,
- (unsigned long long) meta.number,
+ (unsigned long long)log, counter, (unsigned long long)meta.number,
status.ToString().c_str());
return status;
}
@@ -272,8 +249,7 @@ class Repairer {
ArchiveFile(TableFileName(dbname_, number));
ArchiveFile(SSTTableFileName(dbname_, number));
Log(options_.info_log, "Table #%llu: dropped: %s",
- (unsigned long long) t.meta.number,
- status.ToString().c_str());
+ (unsigned long long)t.meta.number, status.ToString().c_str());
return;
}
@@ -287,8 +263,7 @@ class Repairer {
Slice key = iter->key();
if (!ParseInternalKey(key, &parsed)) {
Log(options_.info_log, "Table #%llu: unparsable key %s",
- (unsigned long long) t.meta.number,
- EscapeString(key).c_str());
+ (unsigned long long)t.meta.number, EscapeString(key).c_str());
continue;
}
@@ -307,9 +282,7 @@ class Repairer {
}
delete iter;
Log(options_.info_log, "Table #%llu: %d entries %s",
- (unsigned long long) t.meta.number,
- counter,
- status.ToString().c_str());
+ (unsigned long long)t.meta.number, counter, status.ToString().c_str());
if (status.ok()) {
tables_.push_back(t);
@@ -350,20 +323,20 @@ class Repairer {
}
}
delete builder;
- builder = NULL;
+ builder = nullptr;
if (s.ok()) {
s = file->Close();
}
delete file;
- file = NULL;
+ file = nullptr;
if (counter > 0 && s.ok()) {
std::string orig = TableFileName(dbname_, t.meta.number);
s = env_->RenameFile(copy, orig);
if (s.ok()) {
Log(options_.info_log, "Table #%llu: %d entries repaired",
- (unsigned long long) t.meta.number, counter);
+ (unsigned long long)t.meta.number, counter);
tables_.push_back(t);
}
}
@@ -395,11 +368,11 @@ class Repairer {
for (size_t i = 0; i < tables_.size(); i++) {
// TODO(opt): separate out into multiple levels
const TableInfo& t = tables_[i];
- edit_.AddFile(0, t.meta.number, t.meta.file_size,
- t.meta.smallest, t.meta.largest);
+ edit_.AddFile(0, t.meta.number, t.meta.file_size, t.meta.smallest,
+ t.meta.largest);
}
- //fprintf(stderr, "NewDescriptor:\n%s\n", edit_.DebugString().c_str());
+ // fprintf(stderr, "NewDescriptor:\n%s\n", edit_.DebugString().c_str());
{
log::Writer log(file);
std::string record;
@@ -410,7 +383,7 @@ class Repairer {
status = file->Close();
}
delete file;
- file = NULL;
+ file = nullptr;
if (!status.ok()) {
env_->DeleteFile(tmp);
@@ -438,18 +411,34 @@ class Repairer {
// dir/lost/foo
const char* slash = strrchr(fname.c_str(), '/');
std::string new_dir;
- if (slash != NULL) {
+ if (slash != nullptr) {
new_dir.assign(fname.data(), slash - fname.data());
}
new_dir.append("/lost");
env_->CreateDir(new_dir); // Ignore error
std::string new_file = new_dir;
new_file.append("/");
- new_file.append((slash == NULL) ? fname.c_str() : slash + 1);
+ new_file.append((slash == nullptr) ? fname.c_str() : slash + 1);
Status s = env_->RenameFile(fname, new_file);
- Log(options_.info_log, "Archiving %s: %s\n",
- fname.c_str(), s.ToString().c_str());
+ Log(options_.info_log, "Archiving %s: %s\n", fname.c_str(),
+ s.ToString().c_str());
}
+
+ const std::string dbname_;
+ Env* const env_;
+ InternalKeyComparator const icmp_;
+ InternalFilterPolicy const ipolicy_;
+ const Options options_;
+ bool owns_info_log_;
+ bool owns_cache_;
+ TableCache* table_cache_;
+ VersionEdit edit_;
+
+ std::vector<std::string> manifests_;
+ std::vector<uint64_t> table_numbers_;
+ std::vector<uint64_t> logs_;
+ std::vector<TableInfo> tables_;
+ uint64_t next_file_number_;
};
} // namespace
diff --git a/src/leveldb/db/skiplist.h b/src/leveldb/db/skiplist.h
index 8bd77764d8..a59b45b380 100644
--- a/src/leveldb/db/skiplist.h
+++ b/src/leveldb/db/skiplist.h
@@ -27,9 +27,10 @@
//
// ... prev vs. next pointer ordering ...
-#include <assert.h>
-#include <stdlib.h>
-#include "port/port.h"
+#include <atomic>
+#include <cassert>
+#include <cstdlib>
+
#include "util/arena.h"
#include "util/random.h"
@@ -37,7 +38,7 @@ namespace leveldb {
class Arena;
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
class SkipList {
private:
struct Node;
@@ -48,6 +49,9 @@ class SkipList {
// must remain allocated for the lifetime of the skiplist object.
explicit SkipList(Comparator cmp, Arena* arena);
+ SkipList(const SkipList&) = delete;
+ SkipList& operator=(const SkipList&) = delete;
+
// Insert key into the list.
// REQUIRES: nothing that compares equal to key is currently in the list.
void Insert(const Key& key);
@@ -97,24 +101,10 @@ class SkipList {
private:
enum { kMaxHeight = 12 };
- // Immutable after construction
- Comparator const compare_;
- Arena* const arena_; // Arena used for allocations of nodes
-
- Node* const head_;
-
- // Modified only by Insert(). Read racily by readers, but stale
- // values are ok.
- port::AtomicPointer max_height_; // Height of the entire list
-
inline int GetMaxHeight() const {
- return static_cast<int>(
- reinterpret_cast<intptr_t>(max_height_.NoBarrier_Load()));
+ return max_height_.load(std::memory_order_relaxed);
}
- // Read/written only by Insert().
- Random rnd_;
-
Node* NewNode(const Key& key, int height);
int RandomHeight();
bool Equal(const Key& a, const Key& b) const { return (compare_(a, b) == 0); }
@@ -123,9 +113,9 @@ class SkipList {
bool KeyIsAfterNode(const Key& key, Node* n) const;
// Return the earliest node that comes at or after key.
- // Return NULL if there is no such node.
+ // Return nullptr if there is no such node.
//
- // If prev is non-NULL, fills prev[level] with pointer to previous
+ // If prev is non-null, fills prev[level] with pointer to previous
// node at "level" for every level in [0..max_height_-1].
Node* FindGreaterOrEqual(const Key& key, Node** prev) const;
@@ -137,15 +127,24 @@ class SkipList {
// Return head_ if list is empty.
Node* FindLast() const;
- // No copying allowed
- SkipList(const SkipList&);
- void operator=(const SkipList&);
+ // Immutable after construction
+ Comparator const compare_;
+ Arena* const arena_; // Arena used for allocations of nodes
+
+ Node* const head_;
+
+ // Modified only by Insert(). Read racily by readers, but stale
+ // values are ok.
+ std::atomic<int> max_height_; // Height of the entire list
+
+ // Read/written only by Insert().
+ Random rnd_;
};
// Implementation details follow
-template<typename Key, class Comparator>
-struct SkipList<Key,Comparator>::Node {
- explicit Node(const Key& k) : key(k) { }
+template <typename Key, class Comparator>
+struct SkipList<Key, Comparator>::Node {
+ explicit Node(const Key& k) : key(k) {}
Key const key;
@@ -155,92 +154,92 @@ struct SkipList<Key,Comparator>::Node {
assert(n >= 0);
// Use an 'acquire load' so that we observe a fully initialized
// version of the returned Node.
- return reinterpret_cast<Node*>(next_[n].Acquire_Load());
+ return next_[n].load(std::memory_order_acquire);
}
void SetNext(int n, Node* x) {
assert(n >= 0);
// Use a 'release store' so that anybody who reads through this
// pointer observes a fully initialized version of the inserted node.
- next_[n].Release_Store(x);
+ next_[n].store(x, std::memory_order_release);
}
// No-barrier variants that can be safely used in a few locations.
Node* NoBarrier_Next(int n) {
assert(n >= 0);
- return reinterpret_cast<Node*>(next_[n].NoBarrier_Load());
+ return next_[n].load(std::memory_order_relaxed);
}
void NoBarrier_SetNext(int n, Node* x) {
assert(n >= 0);
- next_[n].NoBarrier_Store(x);
+ next_[n].store(x, std::memory_order_relaxed);
}
private:
// Array of length equal to the node height. next_[0] is lowest level link.
- port::AtomicPointer next_[1];
+ std::atomic<Node*> next_[1];
};
-template<typename Key, class Comparator>
-typename SkipList<Key,Comparator>::Node*
-SkipList<Key,Comparator>::NewNode(const Key& key, int height) {
- char* mem = arena_->AllocateAligned(
- sizeof(Node) + sizeof(port::AtomicPointer) * (height - 1));
- return new (mem) Node(key);
+template <typename Key, class Comparator>
+typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::NewNode(
+ const Key& key, int height) {
+ char* const node_memory = arena_->AllocateAligned(
+ sizeof(Node) + sizeof(std::atomic<Node*>) * (height - 1));
+ return new (node_memory) Node(key);
}
-template<typename Key, class Comparator>
-inline SkipList<Key,Comparator>::Iterator::Iterator(const SkipList* list) {
+template <typename Key, class Comparator>
+inline SkipList<Key, Comparator>::Iterator::Iterator(const SkipList* list) {
list_ = list;
- node_ = NULL;
+ node_ = nullptr;
}
-template<typename Key, class Comparator>
-inline bool SkipList<Key,Comparator>::Iterator::Valid() const {
- return node_ != NULL;
+template <typename Key, class Comparator>
+inline bool SkipList<Key, Comparator>::Iterator::Valid() const {
+ return node_ != nullptr;
}
-template<typename Key, class Comparator>
-inline const Key& SkipList<Key,Comparator>::Iterator::key() const {
+template <typename Key, class Comparator>
+inline const Key& SkipList<Key, Comparator>::Iterator::key() const {
assert(Valid());
return node_->key;
}
-template<typename Key, class Comparator>
-inline void SkipList<Key,Comparator>::Iterator::Next() {
+template <typename Key, class Comparator>
+inline void SkipList<Key, Comparator>::Iterator::Next() {
assert(Valid());
node_ = node_->Next(0);
}
-template<typename Key, class Comparator>
-inline void SkipList<Key,Comparator>::Iterator::Prev() {
+template <typename Key, class Comparator>
+inline void SkipList<Key, Comparator>::Iterator::Prev() {
// Instead of using explicit "prev" links, we just search for the
// last node that falls before key.
assert(Valid());
node_ = list_->FindLessThan(node_->key);
if (node_ == list_->head_) {
- node_ = NULL;
+ node_ = nullptr;
}
}
-template<typename Key, class Comparator>
-inline void SkipList<Key,Comparator>::Iterator::Seek(const Key& target) {
- node_ = list_->FindGreaterOrEqual(target, NULL);
+template <typename Key, class Comparator>
+inline void SkipList<Key, Comparator>::Iterator::Seek(const Key& target) {
+ node_ = list_->FindGreaterOrEqual(target, nullptr);
}
-template<typename Key, class Comparator>
-inline void SkipList<Key,Comparator>::Iterator::SeekToFirst() {
+template <typename Key, class Comparator>
+inline void SkipList<Key, Comparator>::Iterator::SeekToFirst() {
node_ = list_->head_->Next(0);
}
-template<typename Key, class Comparator>
-inline void SkipList<Key,Comparator>::Iterator::SeekToLast() {
+template <typename Key, class Comparator>
+inline void SkipList<Key, Comparator>::Iterator::SeekToLast() {
node_ = list_->FindLast();
if (node_ == list_->head_) {
- node_ = NULL;
+ node_ = nullptr;
}
}
-template<typename Key, class Comparator>
-int SkipList<Key,Comparator>::RandomHeight() {
+template <typename Key, class Comparator>
+int SkipList<Key, Comparator>::RandomHeight() {
// Increase height with probability 1 in kBranching
static const unsigned int kBranching = 4;
int height = 1;
@@ -252,15 +251,16 @@ int SkipList<Key,Comparator>::RandomHeight() {
return height;
}
-template<typename Key, class Comparator>
-bool SkipList<Key,Comparator>::KeyIsAfterNode(const Key& key, Node* n) const {
- // NULL n is considered infinite
- return (n != NULL) && (compare_(n->key, key) < 0);
+template <typename Key, class Comparator>
+bool SkipList<Key, Comparator>::KeyIsAfterNode(const Key& key, Node* n) const {
+ // null n is considered infinite
+ return (n != nullptr) && (compare_(n->key, key) < 0);
}
-template<typename Key, class Comparator>
-typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindGreaterOrEqual(const Key& key, Node** prev)
- const {
+template <typename Key, class Comparator>
+typename SkipList<Key, Comparator>::Node*
+SkipList<Key, Comparator>::FindGreaterOrEqual(const Key& key,
+ Node** prev) const {
Node* x = head_;
int level = GetMaxHeight() - 1;
while (true) {
@@ -269,7 +269,7 @@ typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindGreaterOr
// Keep searching in this list
x = next;
} else {
- if (prev != NULL) prev[level] = x;
+ if (prev != nullptr) prev[level] = x;
if (level == 0) {
return next;
} else {
@@ -280,15 +280,15 @@ typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindGreaterOr
}
}
-template<typename Key, class Comparator>
-typename SkipList<Key,Comparator>::Node*
-SkipList<Key,Comparator>::FindLessThan(const Key& key) const {
+template <typename Key, class Comparator>
+typename SkipList<Key, Comparator>::Node*
+SkipList<Key, Comparator>::FindLessThan(const Key& key) const {
Node* x = head_;
int level = GetMaxHeight() - 1;
while (true) {
assert(x == head_ || compare_(x->key, key) < 0);
Node* next = x->Next(level);
- if (next == NULL || compare_(next->key, key) >= 0) {
+ if (next == nullptr || compare_(next->key, key) >= 0) {
if (level == 0) {
return x;
} else {
@@ -301,14 +301,14 @@ SkipList<Key,Comparator>::FindLessThan(const Key& key) const {
}
}
-template<typename Key, class Comparator>
-typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindLast()
+template <typename Key, class Comparator>
+typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::FindLast()
const {
Node* x = head_;
int level = GetMaxHeight() - 1;
while (true) {
Node* next = x->Next(level);
- if (next == NULL) {
+ if (next == nullptr) {
if (level == 0) {
return x;
} else {
@@ -321,43 +321,41 @@ typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindLast()
}
}
-template<typename Key, class Comparator>
-SkipList<Key,Comparator>::SkipList(Comparator cmp, Arena* arena)
+template <typename Key, class Comparator>
+SkipList<Key, Comparator>::SkipList(Comparator cmp, Arena* arena)
: compare_(cmp),
arena_(arena),
head_(NewNode(0 /* any key will do */, kMaxHeight)),
- max_height_(reinterpret_cast<void*>(1)),
+ max_height_(1),
rnd_(0xdeadbeef) {
for (int i = 0; i < kMaxHeight; i++) {
- head_->SetNext(i, NULL);
+ head_->SetNext(i, nullptr);
}
}
-template<typename Key, class Comparator>
-void SkipList<Key,Comparator>::Insert(const Key& key) {
+template <typename Key, class Comparator>
+void SkipList<Key, Comparator>::Insert(const Key& key) {
// TODO(opt): We can use a barrier-free variant of FindGreaterOrEqual()
// here since Insert() is externally synchronized.
Node* prev[kMaxHeight];
Node* x = FindGreaterOrEqual(key, prev);
// Our data structure does not allow duplicate insertion
- assert(x == NULL || !Equal(key, x->key));
+ assert(x == nullptr || !Equal(key, x->key));
int height = RandomHeight();
if (height > GetMaxHeight()) {
for (int i = GetMaxHeight(); i < height; i++) {
prev[i] = head_;
}
- //fprintf(stderr, "Change height from %d to %d\n", max_height_, height);
-
// It is ok to mutate max_height_ without any synchronization
// with concurrent readers. A concurrent reader that observes
// the new value of max_height_ will see either the old value of
- // new level pointers from head_ (NULL), or a new value set in
+ // new level pointers from head_ (nullptr), or a new value set in
// the loop below. In the former case the reader will
- // immediately drop to the next level since NULL sorts after all
+ // immediately drop to the next level since nullptr sorts after all
// keys. In the latter case the reader will use the new node.
- max_height_.NoBarrier_Store(reinterpret_cast<void*>(height));
+ max_height_.store(height, std::memory_order_relaxed);
}
x = NewNode(key, height);
@@ -369,10 +367,10 @@ void SkipList<Key,Comparator>::Insert(const Key& key) {
}
}
-template<typename Key, class Comparator>
-bool SkipList<Key,Comparator>::Contains(const Key& key) const {
- Node* x = FindGreaterOrEqual(key, NULL);
- if (x != NULL && Equal(key, x->key)) {
+template <typename Key, class Comparator>
+bool SkipList<Key, Comparator>::Contains(const Key& key) const {
+ Node* x = FindGreaterOrEqual(key, nullptr);
+ if (x != nullptr && Equal(key, x->key)) {
return true;
} else {
return false;
diff --git a/src/leveldb/db/skiplist_test.cc b/src/leveldb/db/skiplist_test.cc
index aee1461e1b..9fa2d96829 100644
--- a/src/leveldb/db/skiplist_test.cc
+++ b/src/leveldb/db/skiplist_test.cc
@@ -3,8 +3,13 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "db/skiplist.h"
+
+#include <atomic>
#include <set>
+
#include "leveldb/env.h"
+#include "port/port.h"
+#include "port/thread_annotations.h"
#include "util/arena.h"
#include "util/hash.h"
#include "util/random.h"
@@ -26,7 +31,7 @@ struct Comparator {
}
};
-class SkipTest { };
+class SkipTest {};
TEST(SkipTest, Empty) {
Arena arena;
@@ -112,8 +117,7 @@ TEST(SkipTest, InsertAndLookup) {
// Compare against model iterator
for (std::set<Key>::reverse_iterator model_iter = keys.rbegin();
- model_iter != keys.rend();
- ++model_iter) {
+ model_iter != keys.rend(); ++model_iter) {
ASSERT_TRUE(iter.Valid());
ASSERT_EQ(*model_iter, iter.key());
iter.Prev();
@@ -126,7 +130,7 @@ TEST(SkipTest, InsertAndLookup) {
// concurrent readers (with no synchronization other than when a
// reader's iterator is created), the reader always observes all the
// data that was present in the skip list when the iterator was
-// constructor. Because insertions are happening concurrently, we may
+// constructed. Because insertions are happening concurrently, we may
// also observe new values that were inserted since the iterator was
// constructed, but we should never miss any values that were present
// at iterator construction time.
@@ -155,12 +159,12 @@ class ConcurrentTest {
static uint64_t hash(Key key) { return key & 0xff; }
static uint64_t HashNumbers(uint64_t k, uint64_t g) {
- uint64_t data[2] = { k, g };
+ uint64_t data[2] = {k, g};
return Hash(reinterpret_cast<char*>(data), sizeof(data), 0);
}
static Key MakeKey(uint64_t k, uint64_t g) {
- assert(sizeof(Key) == sizeof(uint64_t));
+ static_assert(sizeof(Key) == sizeof(uint64_t), "");
assert(k <= K); // We sometimes pass K to seek to the end of the skiplist
assert(g <= 0xffffffffu);
return ((k << 40) | (g << 8) | (HashNumbers(k, g) & 0xff));
@@ -186,13 +190,11 @@ class ConcurrentTest {
// Per-key generation
struct State {
- port::AtomicPointer generation[K];
- void Set(int k, intptr_t v) {
- generation[k].Release_Store(reinterpret_cast<void*>(v));
- }
- intptr_t Get(int k) {
- return reinterpret_cast<intptr_t>(generation[k].Acquire_Load());
+ std::atomic<int> generation[K];
+ void Set(int k, int v) {
+ generation[k].store(v, std::memory_order_release);
}
+ int Get(int k) { return generation[k].load(std::memory_order_acquire); }
State() {
for (int k = 0; k < K; k++) {
@@ -211,7 +213,7 @@ class ConcurrentTest {
SkipList<Key, Comparator> list_;
public:
- ConcurrentTest() : list_(Comparator(), &arena_) { }
+ ConcurrentTest() : list_(Comparator(), &arena_) {}
// REQUIRES: External synchronization
void WriteStep(Random* rnd) {
@@ -250,11 +252,9 @@ class ConcurrentTest {
// Note that generation 0 is never inserted, so it is ok if
// <*,0,*> is missing.
ASSERT_TRUE((gen(pos) == 0) ||
- (gen(pos) > static_cast<Key>(initial_state.Get(key(pos))))
- ) << "key: " << key(pos)
- << "; gen: " << gen(pos)
- << "; initgen: "
- << initial_state.Get(key(pos));
+ (gen(pos) > static_cast<Key>(initial_state.Get(key(pos)))))
+ << "key: " << key(pos) << "; gen: " << gen(pos)
+ << "; initgen: " << initial_state.Get(key(pos));
// Advance to next key in the valid key space
if (key(pos) < key(current)) {
@@ -298,21 +298,14 @@ class TestState {
public:
ConcurrentTest t_;
int seed_;
- port::AtomicPointer quit_flag_;
+ std::atomic<bool> quit_flag_;
- enum ReaderState {
- STARTING,
- RUNNING,
- DONE
- };
+ enum ReaderState { STARTING, RUNNING, DONE };
explicit TestState(int s)
- : seed_(s),
- quit_flag_(NULL),
- state_(STARTING),
- state_cv_(&mu_) {}
+ : seed_(s), quit_flag_(false), state_(STARTING), state_cv_(&mu_) {}
- void Wait(ReaderState s) {
+ void Wait(ReaderState s) LOCKS_EXCLUDED(mu_) {
mu_.Lock();
while (state_ != s) {
state_cv_.Wait();
@@ -320,7 +313,7 @@ class TestState {
mu_.Unlock();
}
- void Change(ReaderState s) {
+ void Change(ReaderState s) LOCKS_EXCLUDED(mu_) {
mu_.Lock();
state_ = s;
state_cv_.Signal();
@@ -329,8 +322,8 @@ class TestState {
private:
port::Mutex mu_;
- ReaderState state_;
- port::CondVar state_cv_;
+ ReaderState state_ GUARDED_BY(mu_);
+ port::CondVar state_cv_ GUARDED_BY(mu_);
};
static void ConcurrentReader(void* arg) {
@@ -338,7 +331,7 @@ static void ConcurrentReader(void* arg) {
Random rnd(state->seed_);
int64_t reads = 0;
state->Change(TestState::RUNNING);
- while (!state->quit_flag_.Acquire_Load()) {
+ while (!state->quit_flag_.load(std::memory_order_acquire)) {
state->t_.ReadStep(&rnd);
++reads;
}
@@ -360,7 +353,7 @@ static void RunConcurrent(int run) {
for (int i = 0; i < kSize; i++) {
state.t_.WriteStep(&rnd);
}
- state.quit_flag_.Release_Store(&state); // Any non-NULL arg will do
+ state.quit_flag_.store(true, std::memory_order_release);
state.Wait(TestState::DONE);
}
}
@@ -373,6 +366,4 @@ TEST(SkipTest, Concurrent5) { RunConcurrent(5); }
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/db/snapshot.h b/src/leveldb/db/snapshot.h
index 6ed413c42d..9f1d66491d 100644
--- a/src/leveldb/db/snapshot.h
+++ b/src/leveldb/db/snapshot.h
@@ -16,50 +16,78 @@ class SnapshotList;
// Each SnapshotImpl corresponds to a particular sequence number.
class SnapshotImpl : public Snapshot {
public:
- SequenceNumber number_; // const after creation
+ SnapshotImpl(SequenceNumber sequence_number)
+ : sequence_number_(sequence_number) {}
+
+ SequenceNumber sequence_number() const { return sequence_number_; }
private:
friend class SnapshotList;
- // SnapshotImpl is kept in a doubly-linked circular list
+ // SnapshotImpl is kept in a doubly-linked circular list. The SnapshotList
+ // implementation operates on the next/previous fields direcly.
SnapshotImpl* prev_;
SnapshotImpl* next_;
- SnapshotList* list_; // just for sanity checks
+ const SequenceNumber sequence_number_;
+
+#if !defined(NDEBUG)
+ SnapshotList* list_ = nullptr;
+#endif // !defined(NDEBUG)
};
class SnapshotList {
public:
- SnapshotList() {
- list_.prev_ = &list_;
- list_.next_ = &list_;
+ SnapshotList() : head_(0) {
+ head_.prev_ = &head_;
+ head_.next_ = &head_;
+ }
+
+ bool empty() const { return head_.next_ == &head_; }
+ SnapshotImpl* oldest() const {
+ assert(!empty());
+ return head_.next_;
}
+ SnapshotImpl* newest() const {
+ assert(!empty());
+ return head_.prev_;
+ }
+
+ // Creates a SnapshotImpl and appends it to the end of the list.
+ SnapshotImpl* New(SequenceNumber sequence_number) {
+ assert(empty() || newest()->sequence_number_ <= sequence_number);
+
+ SnapshotImpl* snapshot = new SnapshotImpl(sequence_number);
- bool empty() const { return list_.next_ == &list_; }
- SnapshotImpl* oldest() const { assert(!empty()); return list_.next_; }
- SnapshotImpl* newest() const { assert(!empty()); return list_.prev_; }
-
- const SnapshotImpl* New(SequenceNumber seq) {
- SnapshotImpl* s = new SnapshotImpl;
- s->number_ = seq;
- s->list_ = this;
- s->next_ = &list_;
- s->prev_ = list_.prev_;
- s->prev_->next_ = s;
- s->next_->prev_ = s;
- return s;
+#if !defined(NDEBUG)
+ snapshot->list_ = this;
+#endif // !defined(NDEBUG)
+ snapshot->next_ = &head_;
+ snapshot->prev_ = head_.prev_;
+ snapshot->prev_->next_ = snapshot;
+ snapshot->next_->prev_ = snapshot;
+ return snapshot;
}
- void Delete(const SnapshotImpl* s) {
- assert(s->list_ == this);
- s->prev_->next_ = s->next_;
- s->next_->prev_ = s->prev_;
- delete s;
+ // Removes a SnapshotImpl from this list.
+ //
+ // The snapshot must have been created by calling New() on this list.
+ //
+ // The snapshot pointer should not be const, because its memory is
+ // deallocated. However, that would force us to change DB::ReleaseSnapshot(),
+ // which is in the API, and currently takes a const Snapshot.
+ void Delete(const SnapshotImpl* snapshot) {
+#if !defined(NDEBUG)
+ assert(snapshot->list_ == this);
+#endif // !defined(NDEBUG)
+ snapshot->prev_->next_ = snapshot->next_;
+ snapshot->next_->prev_ = snapshot->prev_;
+ delete snapshot;
}
private:
// Dummy head of doubly-linked list of snapshots
- SnapshotImpl list_;
+ SnapshotImpl head_;
};
} // namespace leveldb
diff --git a/src/leveldb/db/table_cache.cc b/src/leveldb/db/table_cache.cc
index e3d82cd3ea..73f05fd7b1 100644
--- a/src/leveldb/db/table_cache.cc
+++ b/src/leveldb/db/table_cache.cc
@@ -29,18 +29,14 @@ static void UnrefEntry(void* arg1, void* arg2) {
cache->Release(h);
}
-TableCache::TableCache(const std::string& dbname,
- const Options* options,
+TableCache::TableCache(const std::string& dbname, const Options& options,
int entries)
- : env_(options->env),
+ : env_(options.env),
dbname_(dbname),
options_(options),
- cache_(NewLRUCache(entries)) {
-}
+ cache_(NewLRUCache(entries)) {}
-TableCache::~TableCache() {
- delete cache_;
-}
+TableCache::~TableCache() { delete cache_; }
Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
Cache::Handle** handle) {
@@ -49,10 +45,10 @@ Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
EncodeFixed64(buf, file_number);
Slice key(buf, sizeof(buf));
*handle = cache_->Lookup(key);
- if (*handle == NULL) {
+ if (*handle == nullptr) {
std::string fname = TableFileName(dbname_, file_number);
- RandomAccessFile* file = NULL;
- Table* table = NULL;
+ RandomAccessFile* file = nullptr;
+ Table* table = nullptr;
s = env_->NewRandomAccessFile(fname, &file);
if (!s.ok()) {
std::string old_fname = SSTTableFileName(dbname_, file_number);
@@ -61,11 +57,11 @@ Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
}
}
if (s.ok()) {
- s = Table::Open(*options_, file, file_size, &table);
+ s = Table::Open(options_, file, file_size, &table);
}
if (!s.ok()) {
- assert(table == NULL);
+ assert(table == nullptr);
delete file;
// We do not cache error results so that if the error is transient,
// or somebody repairs the file, we recover automatically.
@@ -80,14 +76,13 @@ Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
}
Iterator* TableCache::NewIterator(const ReadOptions& options,
- uint64_t file_number,
- uint64_t file_size,
+ uint64_t file_number, uint64_t file_size,
Table** tableptr) {
- if (tableptr != NULL) {
- *tableptr = NULL;
+ if (tableptr != nullptr) {
+ *tableptr = nullptr;
}
- Cache::Handle* handle = NULL;
+ Cache::Handle* handle = nullptr;
Status s = FindTable(file_number, file_size, &handle);
if (!s.ok()) {
return NewErrorIterator(s);
@@ -96,23 +91,21 @@ Iterator* TableCache::NewIterator(const ReadOptions& options,
Table* table = reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table;
Iterator* result = table->NewIterator(options);
result->RegisterCleanup(&UnrefEntry, cache_, handle);
- if (tableptr != NULL) {
+ if (tableptr != nullptr) {
*tableptr = table;
}
return result;
}
-Status TableCache::Get(const ReadOptions& options,
- uint64_t file_number,
- uint64_t file_size,
- const Slice& k,
- void* arg,
- void (*saver)(void*, const Slice&, const Slice&)) {
- Cache::Handle* handle = NULL;
+Status TableCache::Get(const ReadOptions& options, uint64_t file_number,
+ uint64_t file_size, const Slice& k, void* arg,
+ void (*handle_result)(void*, const Slice&,
+ const Slice&)) {
+ Cache::Handle* handle = nullptr;
Status s = FindTable(file_number, file_size, &handle);
if (s.ok()) {
Table* t = reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table;
- s = t->InternalGet(options, k, arg, saver);
+ s = t->InternalGet(options, k, arg, handle_result);
cache_->Release(handle);
}
return s;
diff --git a/src/leveldb/db/table_cache.h b/src/leveldb/db/table_cache.h
index 8cf4aaf12d..93069c8844 100644
--- a/src/leveldb/db/table_cache.h
+++ b/src/leveldb/db/table_cache.h
@@ -7,8 +7,10 @@
#ifndef STORAGE_LEVELDB_DB_TABLE_CACHE_H_
#define STORAGE_LEVELDB_DB_TABLE_CACHE_H_
-#include <string>
#include <stdint.h>
+
+#include <string>
+
#include "db/dbformat.h"
#include "leveldb/cache.h"
#include "leveldb/table.h"
@@ -20,40 +22,35 @@ class Env;
class TableCache {
public:
- TableCache(const std::string& dbname, const Options* options, int entries);
+ TableCache(const std::string& dbname, const Options& options, int entries);
~TableCache();
// Return an iterator for the specified file number (the corresponding
// file length must be exactly "file_size" bytes). If "tableptr" is
- // non-NULL, also sets "*tableptr" to point to the Table object
- // underlying the returned iterator, or NULL if no Table object underlies
- // the returned iterator. The returned "*tableptr" object is owned by
- // the cache and should not be deleted, and is valid for as long as the
+ // non-null, also sets "*tableptr" to point to the Table object
+ // underlying the returned iterator, or to nullptr if no Table object
+ // underlies the returned iterator. The returned "*tableptr" object is owned
+ // by the cache and should not be deleted, and is valid for as long as the
// returned iterator is live.
- Iterator* NewIterator(const ReadOptions& options,
- uint64_t file_number,
- uint64_t file_size,
- Table** tableptr = NULL);
+ Iterator* NewIterator(const ReadOptions& options, uint64_t file_number,
+ uint64_t file_size, Table** tableptr = nullptr);
// If a seek to internal key "k" in specified file finds an entry,
// call (*handle_result)(arg, found_key, found_value).
- Status Get(const ReadOptions& options,
- uint64_t file_number,
- uint64_t file_size,
- const Slice& k,
- void* arg,
+ Status Get(const ReadOptions& options, uint64_t file_number,
+ uint64_t file_size, const Slice& k, void* arg,
void (*handle_result)(void*, const Slice&, const Slice&));
// Evict any entry for the specified file number
void Evict(uint64_t file_number);
private:
+ Status FindTable(uint64_t file_number, uint64_t file_size, Cache::Handle**);
+
Env* const env_;
const std::string dbname_;
- const Options* options_;
+ const Options& options_;
Cache* cache_;
-
- Status FindTable(uint64_t file_number, uint64_t file_size, Cache::Handle**);
};
} // namespace leveldb
diff --git a/src/leveldb/db/version_edit.cc b/src/leveldb/db/version_edit.cc
index f10a2d58b2..cd770ef12d 100644
--- a/src/leveldb/db/version_edit.cc
+++ b/src/leveldb/db/version_edit.cc
@@ -12,15 +12,15 @@ namespace leveldb {
// Tag numbers for serialized VersionEdit. These numbers are written to
// disk and should not be changed.
enum Tag {
- kComparator = 1,
- kLogNumber = 2,
- kNextFileNumber = 3,
- kLastSequence = 4,
- kCompactPointer = 5,
- kDeletedFile = 6,
- kNewFile = 7,
+ kComparator = 1,
+ kLogNumber = 2,
+ kNextFileNumber = 3,
+ kLastSequence = 4,
+ kCompactPointer = 5,
+ kDeletedFile = 6,
+ kNewFile = 7,
// 8 was used for large value refs
- kPrevLogNumber = 9
+ kPrevLogNumber = 9
};
void VersionEdit::Clear() {
@@ -66,12 +66,10 @@ void VersionEdit::EncodeTo(std::string* dst) const {
PutLengthPrefixedSlice(dst, compact_pointers_[i].second.Encode());
}
- for (DeletedFileSet::const_iterator iter = deleted_files_.begin();
- iter != deleted_files_.end();
- ++iter) {
+ for (const auto& deleted_file_kvp : deleted_files_) {
PutVarint32(dst, kDeletedFile);
- PutVarint32(dst, iter->first); // level
- PutVarint64(dst, iter->second); // file number
+ PutVarint32(dst, deleted_file_kvp.first); // level
+ PutVarint64(dst, deleted_file_kvp.second); // file number
}
for (size_t i = 0; i < new_files_.size(); i++) {
@@ -88,8 +86,7 @@ void VersionEdit::EncodeTo(std::string* dst) const {
static bool GetInternalKey(Slice* input, InternalKey* dst) {
Slice str;
if (GetLengthPrefixedSlice(input, &str)) {
- dst->DecodeFrom(str);
- return true;
+ return dst->DecodeFrom(str);
} else {
return false;
}
@@ -97,8 +94,7 @@ static bool GetInternalKey(Slice* input, InternalKey* dst) {
static bool GetLevel(Slice* input, int* level) {
uint32_t v;
- if (GetVarint32(input, &v) &&
- v < config::kNumLevels) {
+ if (GetVarint32(input, &v) && v < config::kNumLevels) {
*level = v;
return true;
} else {
@@ -109,7 +105,7 @@ static bool GetLevel(Slice* input, int* level) {
Status VersionEdit::DecodeFrom(const Slice& src) {
Clear();
Slice input = src;
- const char* msg = NULL;
+ const char* msg = nullptr;
uint32_t tag;
// Temporary storage for parsing
@@ -119,7 +115,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
Slice str;
InternalKey key;
- while (msg == NULL && GetVarint32(&input, &tag)) {
+ while (msg == nullptr && GetVarint32(&input, &tag)) {
switch (tag) {
case kComparator:
if (GetLengthPrefixedSlice(&input, &str)) {
@@ -163,8 +159,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
break;
case kCompactPointer:
- if (GetLevel(&input, &level) &&
- GetInternalKey(&input, &key)) {
+ if (GetLevel(&input, &level) && GetInternalKey(&input, &key)) {
compact_pointers_.push_back(std::make_pair(level, key));
} else {
msg = "compaction pointer";
@@ -172,8 +167,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
break;
case kDeletedFile:
- if (GetLevel(&input, &level) &&
- GetVarint64(&input, &number)) {
+ if (GetLevel(&input, &level) && GetVarint64(&input, &number)) {
deleted_files_.insert(std::make_pair(level, number));
} else {
msg = "deleted file";
@@ -181,8 +175,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
break;
case kNewFile:
- if (GetLevel(&input, &level) &&
- GetVarint64(&input, &f.number) &&
+ if (GetLevel(&input, &level) && GetVarint64(&input, &f.number) &&
GetVarint64(&input, &f.file_size) &&
GetInternalKey(&input, &f.smallest) &&
GetInternalKey(&input, &f.largest)) {
@@ -198,12 +191,12 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
}
}
- if (msg == NULL && !input.empty()) {
+ if (msg == nullptr && !input.empty()) {
msg = "invalid tag";
}
Status result;
- if (msg != NULL) {
+ if (msg != nullptr) {
result = Status::Corruption("VersionEdit", msg);
}
return result;
@@ -238,13 +231,11 @@ std::string VersionEdit::DebugString() const {
r.append(" ");
r.append(compact_pointers_[i].second.DebugString());
}
- for (DeletedFileSet::const_iterator iter = deleted_files_.begin();
- iter != deleted_files_.end();
- ++iter) {
+ for (const auto& deleted_files_kvp : deleted_files_) {
r.append("\n DeleteFile: ");
- AppendNumberTo(&r, iter->first);
+ AppendNumberTo(&r, deleted_files_kvp.first);
r.append(" ");
- AppendNumberTo(&r, iter->second);
+ AppendNumberTo(&r, deleted_files_kvp.second);
}
for (size_t i = 0; i < new_files_.size(); i++) {
const FileMetaData& f = new_files_[i].second;
diff --git a/src/leveldb/db/version_edit.h b/src/leveldb/db/version_edit.h
index eaef77b327..0de4531773 100644
--- a/src/leveldb/db/version_edit.h
+++ b/src/leveldb/db/version_edit.h
@@ -8,6 +8,7 @@
#include <set>
#include <utility>
#include <vector>
+
#include "db/dbformat.h"
namespace leveldb {
@@ -15,20 +16,20 @@ namespace leveldb {
class VersionSet;
struct FileMetaData {
+ FileMetaData() : refs(0), allowed_seeks(1 << 30), file_size(0) {}
+
int refs;
- int allowed_seeks; // Seeks allowed until compaction
+ int allowed_seeks; // Seeks allowed until compaction
uint64_t number;
- uint64_t file_size; // File size in bytes
- InternalKey smallest; // Smallest internal key served by table
- InternalKey largest; // Largest internal key served by table
-
- FileMetaData() : refs(0), allowed_seeks(1 << 30), file_size(0) { }
+ uint64_t file_size; // File size in bytes
+ InternalKey smallest; // Smallest internal key served by table
+ InternalKey largest; // Largest internal key served by table
};
class VersionEdit {
public:
VersionEdit() { Clear(); }
- ~VersionEdit() { }
+ ~VersionEdit() = default;
void Clear();
@@ -59,10 +60,8 @@ class VersionEdit {
// Add the specified file at the specified number.
// REQUIRES: This version has not been saved (see VersionSet::SaveTo)
// REQUIRES: "smallest" and "largest" are smallest and largest keys in file
- void AddFile(int level, uint64_t file,
- uint64_t file_size,
- const InternalKey& smallest,
- const InternalKey& largest) {
+ void AddFile(int level, uint64_t file, uint64_t file_size,
+ const InternalKey& smallest, const InternalKey& largest) {
FileMetaData f;
f.number = file;
f.file_size = file_size;
@@ -84,7 +83,7 @@ class VersionEdit {
private:
friend class VersionSet;
- typedef std::set< std::pair<int, uint64_t> > DeletedFileSet;
+ typedef std::set<std::pair<int, uint64_t>> DeletedFileSet;
std::string comparator_;
uint64_t log_number_;
@@ -97,9 +96,9 @@ class VersionEdit {
bool has_next_file_number_;
bool has_last_sequence_;
- std::vector< std::pair<int, InternalKey> > compact_pointers_;
+ std::vector<std::pair<int, InternalKey>> compact_pointers_;
DeletedFileSet deleted_files_;
- std::vector< std::pair<int, FileMetaData> > new_files_;
+ std::vector<std::pair<int, FileMetaData>> new_files_;
};
} // namespace leveldb
diff --git a/src/leveldb/db/version_edit_test.cc b/src/leveldb/db/version_edit_test.cc
index 280310b49d..0b7cda8854 100644
--- a/src/leveldb/db/version_edit_test.cc
+++ b/src/leveldb/db/version_edit_test.cc
@@ -17,7 +17,7 @@ static void TestEncodeDecode(const VersionEdit& edit) {
ASSERT_EQ(encoded, encoded2);
}
-class VersionEditTest { };
+class VersionEditTest {};
TEST(VersionEditTest, EncodeDecode) {
static const uint64_t kBig = 1ull << 50;
@@ -41,6 +41,4 @@ TEST(VersionEditTest, EncodeDecode) {
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/db/version_set.cc b/src/leveldb/db/version_set.cc
index 2cb6d80ed3..cd07346ea8 100644
--- a/src/leveldb/db/version_set.cc
+++ b/src/leveldb/db/version_set.cc
@@ -4,8 +4,10 @@
#include "db/version_set.h"
-#include <algorithm>
#include <stdio.h>
+
+#include <algorithm>
+
#include "db/filename.h"
#include "db/log_reader.h"
#include "db/log_writer.h"
@@ -84,8 +86,7 @@ Version::~Version() {
}
int FindFile(const InternalKeyComparator& icmp,
- const std::vector<FileMetaData*>& files,
- const Slice& key) {
+ const std::vector<FileMetaData*>& files, const Slice& key) {
uint32_t left = 0;
uint32_t right = files.size();
while (left < right) {
@@ -104,26 +105,25 @@ int FindFile(const InternalKeyComparator& icmp,
return right;
}
-static bool AfterFile(const Comparator* ucmp,
- const Slice* user_key, const FileMetaData* f) {
- // NULL user_key occurs before all keys and is therefore never after *f
- return (user_key != NULL &&
+static bool AfterFile(const Comparator* ucmp, const Slice* user_key,
+ const FileMetaData* f) {
+ // null user_key occurs before all keys and is therefore never after *f
+ return (user_key != nullptr &&
ucmp->Compare(*user_key, f->largest.user_key()) > 0);
}
-static bool BeforeFile(const Comparator* ucmp,
- const Slice* user_key, const FileMetaData* f) {
- // NULL user_key occurs after all keys and is therefore never before *f
- return (user_key != NULL &&
+static bool BeforeFile(const Comparator* ucmp, const Slice* user_key,
+ const FileMetaData* f) {
+ // null user_key occurs after all keys and is therefore never before *f
+ return (user_key != nullptr &&
ucmp->Compare(*user_key, f->smallest.user_key()) < 0);
}
-bool SomeFileOverlapsRange(
- const InternalKeyComparator& icmp,
- bool disjoint_sorted_files,
- const std::vector<FileMetaData*>& files,
- const Slice* smallest_user_key,
- const Slice* largest_user_key) {
+bool SomeFileOverlapsRange(const InternalKeyComparator& icmp,
+ bool disjoint_sorted_files,
+ const std::vector<FileMetaData*>& files,
+ const Slice* smallest_user_key,
+ const Slice* largest_user_key) {
const Comparator* ucmp = icmp.user_comparator();
if (!disjoint_sorted_files) {
// Need to check against all files
@@ -141,10 +141,11 @@ bool SomeFileOverlapsRange(
// Binary search over file list
uint32_t index = 0;
- if (smallest_user_key != NULL) {
+ if (smallest_user_key != nullptr) {
// Find the earliest possible internal key for smallest_user_key
- InternalKey small(*smallest_user_key, kMaxSequenceNumber,kValueTypeForSeek);
- index = FindFile(icmp, files, small.Encode());
+ InternalKey small_key(*smallest_user_key, kMaxSequenceNumber,
+ kValueTypeForSeek);
+ index = FindFile(icmp, files, small_key.Encode());
}
if (index >= files.size()) {
@@ -164,25 +165,21 @@ class Version::LevelFileNumIterator : public Iterator {
public:
LevelFileNumIterator(const InternalKeyComparator& icmp,
const std::vector<FileMetaData*>* flist)
- : icmp_(icmp),
- flist_(flist),
- index_(flist->size()) { // Marks as invalid
- }
- virtual bool Valid() const {
- return index_ < flist_->size();
+ : icmp_(icmp), flist_(flist), index_(flist->size()) { // Marks as invalid
}
- virtual void Seek(const Slice& target) {
+ bool Valid() const override { return index_ < flist_->size(); }
+ void Seek(const Slice& target) override {
index_ = FindFile(icmp_, *flist_, target);
}
- virtual void SeekToFirst() { index_ = 0; }
- virtual void SeekToLast() {
+ void SeekToFirst() override { index_ = 0; }
+ void SeekToLast() override {
index_ = flist_->empty() ? 0 : flist_->size() - 1;
}
- virtual void Next() {
+ void Next() override {
assert(Valid());
index_++;
}
- virtual void Prev() {
+ void Prev() override {
assert(Valid());
if (index_ == 0) {
index_ = flist_->size(); // Marks as invalid
@@ -190,17 +187,18 @@ class Version::LevelFileNumIterator : public Iterator {
index_--;
}
}
- Slice key() const {
+ Slice key() const override {
assert(Valid());
return (*flist_)[index_]->largest.Encode();
}
- Slice value() const {
+ Slice value() const override {
assert(Valid());
EncodeFixed64(value_buf_, (*flist_)[index_]->number);
- EncodeFixed64(value_buf_+8, (*flist_)[index_]->file_size);
+ EncodeFixed64(value_buf_ + 8, (*flist_)[index_]->file_size);
return Slice(value_buf_, sizeof(value_buf_));
}
- virtual Status status() const { return Status::OK(); }
+ Status status() const override { return Status::OK(); }
+
private:
const InternalKeyComparator icmp_;
const std::vector<FileMetaData*>* const flist_;
@@ -210,16 +208,14 @@ class Version::LevelFileNumIterator : public Iterator {
mutable char value_buf_[16];
};
-static Iterator* GetFileIterator(void* arg,
- const ReadOptions& options,
+static Iterator* GetFileIterator(void* arg, const ReadOptions& options,
const Slice& file_value) {
TableCache* cache = reinterpret_cast<TableCache*>(arg);
if (file_value.size() != 16) {
return NewErrorIterator(
Status::Corruption("FileReader invoked with unexpected value"));
} else {
- return cache->NewIterator(options,
- DecodeFixed64(file_value.data()),
+ return cache->NewIterator(options, DecodeFixed64(file_value.data()),
DecodeFixed64(file_value.data() + 8));
}
}
@@ -227,17 +223,16 @@ static Iterator* GetFileIterator(void* arg,
Iterator* Version::NewConcatenatingIterator(const ReadOptions& options,
int level) const {
return NewTwoLevelIterator(
- new LevelFileNumIterator(vset_->icmp_, &files_[level]),
- &GetFileIterator, vset_->table_cache_, options);
+ new LevelFileNumIterator(vset_->icmp_, &files_[level]), &GetFileIterator,
+ vset_->table_cache_, options);
}
void Version::AddIterators(const ReadOptions& options,
std::vector<Iterator*>* iters) {
// Merge all level zero files together since they may overlap
for (size_t i = 0; i < files_[0].size(); i++) {
- iters->push_back(
- vset_->table_cache_->NewIterator(
- options, files_[0][i]->number, files_[0][i]->file_size));
+ iters->push_back(vset_->table_cache_->NewIterator(
+ options, files_[0][i]->number, files_[0][i]->file_size));
}
// For levels > 0, we can use a concatenating iterator that sequentially
@@ -264,7 +259,7 @@ struct Saver {
Slice user_key;
std::string* value;
};
-}
+} // namespace
static void SaveValue(void* arg, const Slice& ikey, const Slice& v) {
Saver* s = reinterpret_cast<Saver*>(arg);
ParsedInternalKey parsed_key;
@@ -284,10 +279,8 @@ static bool NewestFirst(FileMetaData* a, FileMetaData* b) {
return a->number > b->number;
}
-void Version::ForEachOverlapping(Slice user_key, Slice internal_key,
- void* arg,
+void Version::ForEachOverlapping(Slice user_key, Slice internal_key, void* arg,
bool (*func)(void*, int, FileMetaData*)) {
- // TODO(sanjay): Change Version::Get() to use this function.
const Comparator* ucmp = vset_->icmp_.user_comparator();
// Search level-0 in order from newest to oldest.
@@ -329,110 +322,89 @@ void Version::ForEachOverlapping(Slice user_key, Slice internal_key,
}
}
-Status Version::Get(const ReadOptions& options,
- const LookupKey& k,
- std::string* value,
- GetStats* stats) {
- Slice ikey = k.internal_key();
- Slice user_key = k.user_key();
- const Comparator* ucmp = vset_->icmp_.user_comparator();
- Status s;
-
- stats->seek_file = NULL;
+Status Version::Get(const ReadOptions& options, const LookupKey& k,
+ std::string* value, GetStats* stats) {
+ stats->seek_file = nullptr;
stats->seek_file_level = -1;
- FileMetaData* last_file_read = NULL;
- int last_file_read_level = -1;
- // We can search level-by-level since entries never hop across
- // levels. Therefore we are guaranteed that if we find data
- // in an smaller level, later levels are irrelevant.
- std::vector<FileMetaData*> tmp;
- FileMetaData* tmp2;
- for (int level = 0; level < config::kNumLevels; level++) {
- size_t num_files = files_[level].size();
- if (num_files == 0) continue;
+ struct State {
+ Saver saver;
+ GetStats* stats;
+ const ReadOptions* options;
+ Slice ikey;
+ FileMetaData* last_file_read;
+ int last_file_read_level;
- // Get the list of files to search in this level
- FileMetaData* const* files = &files_[level][0];
- if (level == 0) {
- // Level-0 files may overlap each other. Find all files that
- // overlap user_key and process them in order from newest to oldest.
- tmp.reserve(num_files);
- for (uint32_t i = 0; i < num_files; i++) {
- FileMetaData* f = files[i];
- if (ucmp->Compare(user_key, f->smallest.user_key()) >= 0 &&
- ucmp->Compare(user_key, f->largest.user_key()) <= 0) {
- tmp.push_back(f);
- }
- }
- if (tmp.empty()) continue;
+ VersionSet* vset;
+ Status s;
+ bool found;
- std::sort(tmp.begin(), tmp.end(), NewestFirst);
- files = &tmp[0];
- num_files = tmp.size();
- } else {
- // Binary search to find earliest index whose largest key >= ikey.
- uint32_t index = FindFile(vset_->icmp_, files_[level], ikey);
- if (index >= num_files) {
- files = NULL;
- num_files = 0;
- } else {
- tmp2 = files[index];
- if (ucmp->Compare(user_key, tmp2->smallest.user_key()) < 0) {
- // All of "tmp2" is past any data for user_key
- files = NULL;
- num_files = 0;
- } else {
- files = &tmp2;
- num_files = 1;
- }
- }
- }
+ static bool Match(void* arg, int level, FileMetaData* f) {
+ State* state = reinterpret_cast<State*>(arg);
- for (uint32_t i = 0; i < num_files; ++i) {
- if (last_file_read != NULL && stats->seek_file == NULL) {
+ if (state->stats->seek_file == nullptr &&
+ state->last_file_read != nullptr) {
// We have had more than one seek for this read. Charge the 1st file.
- stats->seek_file = last_file_read;
- stats->seek_file_level = last_file_read_level;
+ state->stats->seek_file = state->last_file_read;
+ state->stats->seek_file_level = state->last_file_read_level;
}
- FileMetaData* f = files[i];
- last_file_read = f;
- last_file_read_level = level;
-
- Saver saver;
- saver.state = kNotFound;
- saver.ucmp = ucmp;
- saver.user_key = user_key;
- saver.value = value;
- s = vset_->table_cache_->Get(options, f->number, f->file_size,
- ikey, &saver, SaveValue);
- if (!s.ok()) {
- return s;
+ state->last_file_read = f;
+ state->last_file_read_level = level;
+
+ state->s = state->vset->table_cache_->Get(*state->options, f->number,
+ f->file_size, state->ikey,
+ &state->saver, SaveValue);
+ if (!state->s.ok()) {
+ state->found = true;
+ return false;
}
- switch (saver.state) {
+ switch (state->saver.state) {
case kNotFound:
- break; // Keep searching in other files
+ return true; // Keep searching in other files
case kFound:
- return s;
+ state->found = true;
+ return false;
case kDeleted:
- s = Status::NotFound(Slice()); // Use empty error message for speed
- return s;
+ return false;
case kCorrupt:
- s = Status::Corruption("corrupted key for ", user_key);
- return s;
+ state->s =
+ Status::Corruption("corrupted key for ", state->saver.user_key);
+ state->found = true;
+ return false;
}
+
+ // Not reached. Added to avoid false compilation warnings of
+ // "control reaches end of non-void function".
+ return false;
}
- }
+ };
+
+ State state;
+ state.found = false;
+ state.stats = stats;
+ state.last_file_read = nullptr;
+ state.last_file_read_level = -1;
- return Status::NotFound(Slice()); // Use an empty error message for speed
+ state.options = &options;
+ state.ikey = k.internal_key();
+ state.vset = vset_;
+
+ state.saver.state = kNotFound;
+ state.saver.ucmp = vset_->icmp_.user_comparator();
+ state.saver.user_key = k.user_key();
+ state.saver.value = value;
+
+ ForEachOverlapping(state.saver.user_key, state.ikey, &state, &State::Match);
+
+ return state.found ? state.s : Status::NotFound(Slice());
}
bool Version::UpdateStats(const GetStats& stats) {
FileMetaData* f = stats.seek_file;
- if (f != NULL) {
+ if (f != nullptr) {
f->allowed_seeks--;
- if (f->allowed_seeks <= 0 && file_to_compact_ == NULL) {
+ if (f->allowed_seeks <= 0 && file_to_compact_ == nullptr) {
file_to_compact_ = f;
file_to_compact_level_ = stats.seek_file_level;
return true;
@@ -479,9 +451,7 @@ bool Version::RecordReadSample(Slice internal_key) {
return false;
}
-void Version::Ref() {
- ++refs_;
-}
+void Version::Ref() { ++refs_; }
void Version::Unref() {
assert(this != &vset_->dummy_versions_);
@@ -492,16 +462,14 @@ void Version::Unref() {
}
}
-bool Version::OverlapInLevel(int level,
- const Slice* smallest_user_key,
+bool Version::OverlapInLevel(int level, const Slice* smallest_user_key,
const Slice* largest_user_key) {
return SomeFileOverlapsRange(vset_->icmp_, (level > 0), files_[level],
smallest_user_key, largest_user_key);
}
-int Version::PickLevelForMemTableOutput(
- const Slice& smallest_user_key,
- const Slice& largest_user_key) {
+int Version::PickLevelForMemTableOutput(const Slice& smallest_user_key,
+ const Slice& largest_user_key) {
int level = 0;
if (!OverlapInLevel(0, &smallest_user_key, &largest_user_key)) {
// Push to next level if there is no overlap in next level,
@@ -528,40 +496,39 @@ int Version::PickLevelForMemTableOutput(
}
// Store in "*inputs" all files in "level" that overlap [begin,end]
-void Version::GetOverlappingInputs(
- int level,
- const InternalKey* begin,
- const InternalKey* end,
- std::vector<FileMetaData*>* inputs) {
+void Version::GetOverlappingInputs(int level, const InternalKey* begin,
+ const InternalKey* end,
+ std::vector<FileMetaData*>* inputs) {
assert(level >= 0);
assert(level < config::kNumLevels);
inputs->clear();
Slice user_begin, user_end;
- if (begin != NULL) {
+ if (begin != nullptr) {
user_begin = begin->user_key();
}
- if (end != NULL) {
+ if (end != nullptr) {
user_end = end->user_key();
}
const Comparator* user_cmp = vset_->icmp_.user_comparator();
- for (size_t i = 0; i < files_[level].size(); ) {
+ for (size_t i = 0; i < files_[level].size();) {
FileMetaData* f = files_[level][i++];
const Slice file_start = f->smallest.user_key();
const Slice file_limit = f->largest.user_key();
- if (begin != NULL && user_cmp->Compare(file_limit, user_begin) < 0) {
+ if (begin != nullptr && user_cmp->Compare(file_limit, user_begin) < 0) {
// "f" is completely before specified range; skip it
- } else if (end != NULL && user_cmp->Compare(file_start, user_end) > 0) {
+ } else if (end != nullptr && user_cmp->Compare(file_start, user_end) > 0) {
// "f" is completely after specified range; skip it
} else {
inputs->push_back(f);
if (level == 0) {
// Level-0 files may overlap each other. So check if the newly
// added file has expanded the range. If so, restart search.
- if (begin != NULL && user_cmp->Compare(file_start, user_begin) < 0) {
+ if (begin != nullptr && user_cmp->Compare(file_start, user_begin) < 0) {
user_begin = file_start;
inputs->clear();
i = 0;
- } else if (end != NULL && user_cmp->Compare(file_limit, user_end) > 0) {
+ } else if (end != nullptr &&
+ user_cmp->Compare(file_limit, user_end) > 0) {
user_end = file_limit;
inputs->clear();
i = 0;
@@ -629,9 +596,7 @@ class VersionSet::Builder {
public:
// Initialize a builder with the files from *base and other info from *vset
- Builder(VersionSet* vset, Version* base)
- : vset_(vset),
- base_(base) {
+ Builder(VersionSet* vset, Version* base) : vset_(vset), base_(base) {
base_->Ref();
BySmallestKey cmp;
cmp.internal_comparator = &vset_->icmp_;
@@ -645,8 +610,8 @@ class VersionSet::Builder {
const FileSet* added = levels_[level].added_files;
std::vector<FileMetaData*> to_unref;
to_unref.reserve(added->size());
- for (FileSet::const_iterator it = added->begin();
- it != added->end(); ++it) {
+ for (FileSet::const_iterator it = added->begin(); it != added->end();
+ ++it) {
to_unref.push_back(*it);
}
delete added;
@@ -671,12 +636,9 @@ class VersionSet::Builder {
}
// Delete files
- const VersionEdit::DeletedFileSet& del = edit->deleted_files_;
- for (VersionEdit::DeletedFileSet::const_iterator iter = del.begin();
- iter != del.end();
- ++iter) {
- const int level = iter->first;
- const uint64_t number = iter->second;
+ for (const auto& deleted_file_set_kvp : edit->deleted_files_) {
+ const int level = deleted_file_set_kvp.first;
+ const uint64_t number = deleted_file_set_kvp.second;
levels_[level].deleted_files.insert(number);
}
@@ -699,7 +661,7 @@ class VersionSet::Builder {
// same as the compaction of 40KB of data. We are a little
// conservative and allow approximately one seek for every 16KB
// of data before triggering a compaction.
- f->allowed_seeks = (f->file_size / 16384);
+ f->allowed_seeks = static_cast<int>((f->file_size / 16384U));
if (f->allowed_seeks < 100) f->allowed_seeks = 100;
levels_[level].deleted_files.erase(f->number);
@@ -717,20 +679,17 @@ class VersionSet::Builder {
const std::vector<FileMetaData*>& base_files = base_->files_[level];
std::vector<FileMetaData*>::const_iterator base_iter = base_files.begin();
std::vector<FileMetaData*>::const_iterator base_end = base_files.end();
- const FileSet* added = levels_[level].added_files;
- v->files_[level].reserve(base_files.size() + added->size());
- for (FileSet::const_iterator added_iter = added->begin();
- added_iter != added->end();
- ++added_iter) {
+ const FileSet* added_files = levels_[level].added_files;
+ v->files_[level].reserve(base_files.size() + added_files->size());
+ for (const auto& added_file : *added_files) {
// Add all smaller files listed in base_
- for (std::vector<FileMetaData*>::const_iterator bpos
- = std::upper_bound(base_iter, base_end, *added_iter, cmp);
- base_iter != bpos;
- ++base_iter) {
+ for (std::vector<FileMetaData*>::const_iterator bpos =
+ std::upper_bound(base_iter, base_end, added_file, cmp);
+ base_iter != bpos; ++base_iter) {
MaybeAddFile(v, level, *base_iter);
}
- MaybeAddFile(v, level, *added_iter);
+ MaybeAddFile(v, level, added_file);
}
// Add remaining base files
@@ -742,7 +701,7 @@ class VersionSet::Builder {
// Make sure there is no overlap in levels > 0
if (level > 0) {
for (uint32_t i = 1; i < v->files_[level].size(); i++) {
- const InternalKey& prev_end = v->files_[level][i-1]->largest;
+ const InternalKey& prev_end = v->files_[level][i - 1]->largest;
const InternalKey& this_begin = v->files_[level][i]->smallest;
if (vset_->icmp_.Compare(prev_end, this_begin) >= 0) {
fprintf(stderr, "overlapping ranges in same level %s vs. %s\n",
@@ -763,7 +722,7 @@ class VersionSet::Builder {
std::vector<FileMetaData*>* files = &v->files_[level];
if (level > 0 && !files->empty()) {
// Must not overlap
- assert(vset_->icmp_.Compare((*files)[files->size()-1]->largest,
+ assert(vset_->icmp_.Compare((*files)[files->size() - 1]->largest,
f->smallest) < 0);
}
f->refs++;
@@ -772,8 +731,7 @@ class VersionSet::Builder {
}
};
-VersionSet::VersionSet(const std::string& dbname,
- const Options* options,
+VersionSet::VersionSet(const std::string& dbname, const Options* options,
TableCache* table_cache,
const InternalKeyComparator* cmp)
: env_(options->env),
@@ -786,10 +744,10 @@ VersionSet::VersionSet(const std::string& dbname,
last_sequence_(0),
log_number_(0),
prev_log_number_(0),
- descriptor_file_(NULL),
- descriptor_log_(NULL),
+ descriptor_file_(nullptr),
+ descriptor_log_(nullptr),
dummy_versions_(this),
- current_(NULL) {
+ current_(nullptr) {
AppendVersion(new Version(this));
}
@@ -804,7 +762,7 @@ void VersionSet::AppendVersion(Version* v) {
// Make "v" current
assert(v->refs_ == 0);
assert(v != current_);
- if (current_ != NULL) {
+ if (current_ != nullptr) {
current_->Unref();
}
current_ = v;
@@ -844,10 +802,10 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) {
// a temporary file that contains a snapshot of the current version.
std::string new_manifest_file;
Status s;
- if (descriptor_log_ == NULL) {
+ if (descriptor_log_ == nullptr) {
// No reason to unlock *mu here since we only hit this path in the
// first call to LogAndApply (when opening the database).
- assert(descriptor_file_ == NULL);
+ assert(descriptor_file_ == nullptr);
new_manifest_file = DescriptorFileName(dbname_, manifest_file_number_);
edit->SetNextFile(next_file_number_);
s = env_->NewWritableFile(new_manifest_file, &descriptor_file_);
@@ -893,8 +851,8 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) {
if (!new_manifest_file.empty()) {
delete descriptor_log_;
delete descriptor_file_;
- descriptor_log_ = NULL;
- descriptor_file_ = NULL;
+ descriptor_log_ = nullptr;
+ descriptor_file_ = nullptr;
env_->DeleteFile(new_manifest_file);
}
}
@@ -902,10 +860,10 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) {
return s;
}
-Status VersionSet::Recover(bool *save_manifest) {
+Status VersionSet::Recover(bool* save_manifest) {
struct LogReporter : public log::Reader::Reporter {
Status* status;
- virtual void Corruption(size_t bytes, const Status& s) {
+ void Corruption(size_t bytes, const Status& s) override {
if (this->status->ok()) *this->status = s;
}
};
@@ -916,7 +874,7 @@ Status VersionSet::Recover(bool *save_manifest) {
if (!s.ok()) {
return s;
}
- if (current.empty() || current[current.size()-1] != '\n') {
+ if (current.empty() || current[current.size() - 1] != '\n') {
return Status::Corruption("CURRENT file does not end with newline");
}
current.resize(current.size() - 1);
@@ -925,6 +883,10 @@ Status VersionSet::Recover(bool *save_manifest) {
SequentialFile* file;
s = env_->NewSequentialFile(dscname, &file);
if (!s.ok()) {
+ if (s.IsNotFound()) {
+ return Status::Corruption("CURRENT points to a non-existent file",
+ s.ToString());
+ }
return s;
}
@@ -941,7 +903,8 @@ Status VersionSet::Recover(bool *save_manifest) {
{
LogReporter reporter;
reporter.status = &s;
- log::Reader reader(file, &reporter, true/*checksum*/, 0/*initial_offset*/);
+ log::Reader reader(file, &reporter, true /*checksum*/,
+ 0 /*initial_offset*/);
Slice record;
std::string scratch;
while (reader.ReadRecord(&record, &scratch) && s.ok()) {
@@ -982,7 +945,7 @@ Status VersionSet::Recover(bool *save_manifest) {
}
}
delete file;
- file = NULL;
+ file = nullptr;
if (s.ok()) {
if (!have_next_file) {
@@ -1040,12 +1003,12 @@ bool VersionSet::ReuseManifest(const std::string& dscname,
return false;
}
- assert(descriptor_file_ == NULL);
- assert(descriptor_log_ == NULL);
+ assert(descriptor_file_ == nullptr);
+ assert(descriptor_log_ == nullptr);
Status r = env_->NewAppendableFile(dscname, &descriptor_file_);
if (!r.ok()) {
Log(options_->info_log, "Reuse MANIFEST: %s\n", r.ToString().c_str());
- assert(descriptor_file_ == NULL);
+ assert(descriptor_file_ == nullptr);
return false;
}
@@ -1066,7 +1029,7 @@ void VersionSet::Finalize(Version* v) {
int best_level = -1;
double best_score = -1;
- for (int level = 0; level < config::kNumLevels-1; level++) {
+ for (int level = 0; level < config::kNumLevels - 1; level++) {
double score;
if (level == 0) {
// We treat level-0 specially by bounding the number of files
@@ -1081,7 +1044,7 @@ void VersionSet::Finalize(Version* v) {
// setting, or very high compression ratios, or lots of
// overwrites/deletions).
score = v->files_[level].size() /
- static_cast<double>(config::kL0_CompactionTrigger);
+ static_cast<double>(config::kL0_CompactionTrigger);
} else {
// Compute the ratio of current size to size limit.
const uint64_t level_bytes = TotalFileSize(v->files_[level]);
@@ -1137,16 +1100,12 @@ int VersionSet::NumLevelFiles(int level) const {
const char* VersionSet::LevelSummary(LevelSummaryStorage* scratch) const {
// Update code if kNumLevels changes
- assert(config::kNumLevels == 7);
+ static_assert(config::kNumLevels == 7, "");
snprintf(scratch->buffer, sizeof(scratch->buffer),
- "files[ %d %d %d %d %d %d %d ]",
- int(current_->files_[0].size()),
- int(current_->files_[1].size()),
- int(current_->files_[2].size()),
- int(current_->files_[3].size()),
- int(current_->files_[4].size()),
- int(current_->files_[5].size()),
- int(current_->files_[6].size()));
+ "files[ %d %d %d %d %d %d %d ]", int(current_->files_[0].size()),
+ int(current_->files_[1].size()), int(current_->files_[2].size()),
+ int(current_->files_[3].size()), int(current_->files_[4].size()),
+ int(current_->files_[5].size()), int(current_->files_[6].size()));
return scratch->buffer;
}
@@ -1172,7 +1131,7 @@ uint64_t VersionSet::ApproximateOffsetOf(Version* v, const InternalKey& ikey) {
Table* tableptr;
Iterator* iter = table_cache_->NewIterator(
ReadOptions(), files[i]->number, files[i]->file_size, &tableptr);
- if (tableptr != NULL) {
+ if (tableptr != nullptr) {
result += tableptr->ApproximateOffsetOf(ikey.Encode());
}
delete iter;
@@ -1183,8 +1142,7 @@ uint64_t VersionSet::ApproximateOffsetOf(Version* v, const InternalKey& ikey) {
}
void VersionSet::AddLiveFiles(std::set<uint64_t>* live) {
- for (Version* v = dummy_versions_.next_;
- v != &dummy_versions_;
+ for (Version* v = dummy_versions_.next_; v != &dummy_versions_;
v = v->next_) {
for (int level = 0; level < config::kNumLevels; level++) {
const std::vector<FileMetaData*>& files = v->files_[level];
@@ -1207,7 +1165,7 @@ int64_t VersionSet::MaxNextLevelOverlappingBytes() {
for (int level = 1; level < config::kNumLevels - 1; level++) {
for (size_t i = 0; i < current_->files_[level].size(); i++) {
const FileMetaData* f = current_->files_[level][i];
- current_->GetOverlappingInputs(level+1, &f->smallest, &f->largest,
+ current_->GetOverlappingInputs(level + 1, &f->smallest, &f->largest,
&overlaps);
const int64_t sum = TotalFileSize(overlaps);
if (sum > result) {
@@ -1222,8 +1180,7 @@ int64_t VersionSet::MaxNextLevelOverlappingBytes() {
// *smallest, *largest.
// REQUIRES: inputs is not empty
void VersionSet::GetRange(const std::vector<FileMetaData*>& inputs,
- InternalKey* smallest,
- InternalKey* largest) {
+ InternalKey* smallest, InternalKey* largest) {
assert(!inputs.empty());
smallest->Clear();
largest->Clear();
@@ -1248,8 +1205,7 @@ void VersionSet::GetRange(const std::vector<FileMetaData*>& inputs,
// REQUIRES: inputs is not empty
void VersionSet::GetRange2(const std::vector<FileMetaData*>& inputs1,
const std::vector<FileMetaData*>& inputs2,
- InternalKey* smallest,
- InternalKey* largest) {
+ InternalKey* smallest, InternalKey* largest) {
std::vector<FileMetaData*> all = inputs1;
all.insert(all.end(), inputs2.begin(), inputs2.end());
GetRange(all, smallest, largest);
@@ -1271,8 +1227,8 @@ Iterator* VersionSet::MakeInputIterator(Compaction* c) {
if (c->level() + which == 0) {
const std::vector<FileMetaData*>& files = c->inputs_[which];
for (size_t i = 0; i < files.size(); i++) {
- list[num++] = table_cache_->NewIterator(
- options, files[i]->number, files[i]->file_size);
+ list[num++] = table_cache_->NewIterator(options, files[i]->number,
+ files[i]->file_size);
}
} else {
// Create concatenating iterator for the files from this level
@@ -1295,11 +1251,11 @@ Compaction* VersionSet::PickCompaction() {
// We prefer compactions triggered by too much data in a level over
// the compactions triggered by seeks.
const bool size_compaction = (current_->compaction_score_ >= 1);
- const bool seek_compaction = (current_->file_to_compact_ != NULL);
+ const bool seek_compaction = (current_->file_to_compact_ != nullptr);
if (size_compaction) {
level = current_->compaction_level_;
assert(level >= 0);
- assert(level+1 < config::kNumLevels);
+ assert(level + 1 < config::kNumLevels);
c = new Compaction(options_, level);
// Pick the first file that comes after compact_pointer_[level]
@@ -1320,7 +1276,7 @@ Compaction* VersionSet::PickCompaction() {
c = new Compaction(options_, level);
c->inputs_[0].push_back(current_->file_to_compact_);
} else {
- return NULL;
+ return nullptr;
}
c->input_version_ = current_;
@@ -1342,12 +1298,94 @@ Compaction* VersionSet::PickCompaction() {
return c;
}
+// Finds the largest key in a vector of files. Returns true if files it not
+// empty.
+bool FindLargestKey(const InternalKeyComparator& icmp,
+ const std::vector<FileMetaData*>& files,
+ InternalKey* largest_key) {
+ if (files.empty()) {
+ return false;
+ }
+ *largest_key = files[0]->largest;
+ for (size_t i = 1; i < files.size(); ++i) {
+ FileMetaData* f = files[i];
+ if (icmp.Compare(f->largest, *largest_key) > 0) {
+ *largest_key = f->largest;
+ }
+ }
+ return true;
+}
+
+// Finds minimum file b2=(l2, u2) in level file for which l2 > u1 and
+// user_key(l2) = user_key(u1)
+FileMetaData* FindSmallestBoundaryFile(
+ const InternalKeyComparator& icmp,
+ const std::vector<FileMetaData*>& level_files,
+ const InternalKey& largest_key) {
+ const Comparator* user_cmp = icmp.user_comparator();
+ FileMetaData* smallest_boundary_file = nullptr;
+ for (size_t i = 0; i < level_files.size(); ++i) {
+ FileMetaData* f = level_files[i];
+ if (icmp.Compare(f->smallest, largest_key) > 0 &&
+ user_cmp->Compare(f->smallest.user_key(), largest_key.user_key()) ==
+ 0) {
+ if (smallest_boundary_file == nullptr ||
+ icmp.Compare(f->smallest, smallest_boundary_file->smallest) < 0) {
+ smallest_boundary_file = f;
+ }
+ }
+ }
+ return smallest_boundary_file;
+}
+
+// Extracts the largest file b1 from |compaction_files| and then searches for a
+// b2 in |level_files| for which user_key(u1) = user_key(l2). If it finds such a
+// file b2 (known as a boundary file) it adds it to |compaction_files| and then
+// searches again using this new upper bound.
+//
+// If there are two blocks, b1=(l1, u1) and b2=(l2, u2) and
+// user_key(u1) = user_key(l2), and if we compact b1 but not b2 then a
+// subsequent get operation will yield an incorrect result because it will
+// return the record from b2 in level i rather than from b1 because it searches
+// level by level for records matching the supplied user key.
+//
+// parameters:
+// in level_files: List of files to search for boundary files.
+// in/out compaction_files: List of files to extend by adding boundary files.
+void AddBoundaryInputs(const InternalKeyComparator& icmp,
+ const std::vector<FileMetaData*>& level_files,
+ std::vector<FileMetaData*>* compaction_files) {
+ InternalKey largest_key;
+
+ // Quick return if compaction_files is empty.
+ if (!FindLargestKey(icmp, *compaction_files, &largest_key)) {
+ return;
+ }
+
+ bool continue_searching = true;
+ while (continue_searching) {
+ FileMetaData* smallest_boundary_file =
+ FindSmallestBoundaryFile(icmp, level_files, largest_key);
+
+ // If a boundary file was found advance largest_key, otherwise we're done.
+ if (smallest_boundary_file != NULL) {
+ compaction_files->push_back(smallest_boundary_file);
+ largest_key = smallest_boundary_file->largest;
+ } else {
+ continue_searching = false;
+ }
+ }
+}
+
void VersionSet::SetupOtherInputs(Compaction* c) {
const int level = c->level();
InternalKey smallest, largest;
+
+ AddBoundaryInputs(icmp_, current_->files_[level], &c->inputs_[0]);
GetRange(c->inputs_[0], &smallest, &largest);
- current_->GetOverlappingInputs(level+1, &smallest, &largest, &c->inputs_[1]);
+ current_->GetOverlappingInputs(level + 1, &smallest, &largest,
+ &c->inputs_[1]);
// Get entire range covered by compaction
InternalKey all_start, all_limit;
@@ -1358,6 +1396,7 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
if (!c->inputs_[1].empty()) {
std::vector<FileMetaData*> expanded0;
current_->GetOverlappingInputs(level, &all_start, &all_limit, &expanded0);
+ AddBoundaryInputs(icmp_, current_->files_[level], &expanded0);
const int64_t inputs0_size = TotalFileSize(c->inputs_[0]);
const int64_t inputs1_size = TotalFileSize(c->inputs_[1]);
const int64_t expanded0_size = TotalFileSize(expanded0);
@@ -1367,18 +1406,14 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
InternalKey new_start, new_limit;
GetRange(expanded0, &new_start, &new_limit);
std::vector<FileMetaData*> expanded1;
- current_->GetOverlappingInputs(level+1, &new_start, &new_limit,
+ current_->GetOverlappingInputs(level + 1, &new_start, &new_limit,
&expanded1);
if (expanded1.size() == c->inputs_[1].size()) {
Log(options_->info_log,
"Expanding@%d %d+%d (%ld+%ld bytes) to %d+%d (%ld+%ld bytes)\n",
- level,
- int(c->inputs_[0].size()),
- int(c->inputs_[1].size()),
- long(inputs0_size), long(inputs1_size),
- int(expanded0.size()),
- int(expanded1.size()),
- long(expanded0_size), long(inputs1_size));
+ level, int(c->inputs_[0].size()), int(c->inputs_[1].size()),
+ long(inputs0_size), long(inputs1_size), int(expanded0.size()),
+ int(expanded1.size()), long(expanded0_size), long(inputs1_size));
smallest = new_start;
largest = new_limit;
c->inputs_[0] = expanded0;
@@ -1395,13 +1430,6 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
&c->grandparents_);
}
- if (false) {
- Log(options_->info_log, "Compacting %d '%s' .. '%s'",
- level,
- smallest.DebugString().c_str(),
- largest.DebugString().c_str());
- }
-
// Update the place where we will do the next compaction for this level.
// We update this immediately instead of waiting for the VersionEdit
// to be applied so that if the compaction fails, we will try a different
@@ -1410,14 +1438,12 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
c->edit_.SetCompactPointer(level, largest);
}
-Compaction* VersionSet::CompactRange(
- int level,
- const InternalKey* begin,
- const InternalKey* end) {
+Compaction* VersionSet::CompactRange(int level, const InternalKey* begin,
+ const InternalKey* end) {
std::vector<FileMetaData*> inputs;
current_->GetOverlappingInputs(level, begin, end, &inputs);
if (inputs.empty()) {
- return NULL;
+ return nullptr;
}
// Avoid compacting too much in one shot in case the range is large.
@@ -1448,7 +1474,7 @@ Compaction* VersionSet::CompactRange(
Compaction::Compaction(const Options* options, int level)
: level_(level),
max_output_file_size_(MaxFileSizeForLevel(options, level)),
- input_version_(NULL),
+ input_version_(nullptr),
grandparent_index_(0),
seen_key_(false),
overlapped_bytes_(0) {
@@ -1458,7 +1484,7 @@ Compaction::Compaction(const Options* options, int level)
}
Compaction::~Compaction() {
- if (input_version_ != NULL) {
+ if (input_version_ != nullptr) {
input_version_->Unref();
}
}
@@ -1486,7 +1512,7 @@ bool Compaction::IsBaseLevelForKey(const Slice& user_key) {
const Comparator* user_cmp = input_version_->vset_->icmp_.user_comparator();
for (int lvl = level_ + 2; lvl < config::kNumLevels; lvl++) {
const std::vector<FileMetaData*>& files = input_version_->files_[lvl];
- for (; level_ptrs_[lvl] < files.size(); ) {
+ while (level_ptrs_[lvl] < files.size()) {
FileMetaData* f = files[level_ptrs_[lvl]];
if (user_cmp->Compare(user_key, f->largest.user_key()) <= 0) {
// We've advanced far enough
@@ -1507,8 +1533,9 @@ bool Compaction::ShouldStopBefore(const Slice& internal_key) {
// Scan to find earliest grandparent file that contains key.
const InternalKeyComparator* icmp = &vset->icmp_;
while (grandparent_index_ < grandparents_.size() &&
- icmp->Compare(internal_key,
- grandparents_[grandparent_index_]->largest.Encode()) > 0) {
+ icmp->Compare(internal_key,
+ grandparents_[grandparent_index_]->largest.Encode()) >
+ 0) {
if (seen_key_) {
overlapped_bytes_ += grandparents_[grandparent_index_]->file_size;
}
@@ -1526,9 +1553,9 @@ bool Compaction::ShouldStopBefore(const Slice& internal_key) {
}
void Compaction::ReleaseInputs() {
- if (input_version_ != NULL) {
+ if (input_version_ != nullptr) {
input_version_->Unref();
- input_version_ = NULL;
+ input_version_ = nullptr;
}
}
diff --git a/src/leveldb/db/version_set.h b/src/leveldb/db/version_set.h
index 7935a965a7..69f3d70133 100644
--- a/src/leveldb/db/version_set.h
+++ b/src/leveldb/db/version_set.h
@@ -18,6 +18,7 @@
#include <map>
#include <set>
#include <vector>
+
#include "db/dbformat.h"
#include "db/version_edit.h"
#include "port/port.h"
@@ -25,7 +26,9 @@
namespace leveldb {
-namespace log { class Writer; }
+namespace log {
+class Writer;
+}
class Compaction;
class Iterator;
@@ -39,30 +42,23 @@ class WritableFile;
// Return the smallest index i such that files[i]->largest >= key.
// Return files.size() if there is no such file.
// REQUIRES: "files" contains a sorted list of non-overlapping files.
-extern int FindFile(const InternalKeyComparator& icmp,
- const std::vector<FileMetaData*>& files,
- const Slice& key);
+int FindFile(const InternalKeyComparator& icmp,
+ const std::vector<FileMetaData*>& files, const Slice& key);
// Returns true iff some file in "files" overlaps the user key range
// [*smallest,*largest].
-// smallest==NULL represents a key smaller than all keys in the DB.
-// largest==NULL represents a key largest than all keys in the DB.
+// smallest==nullptr represents a key smaller than all keys in the DB.
+// largest==nullptr represents a key largest than all keys in the DB.
// REQUIRES: If disjoint_sorted_files, files[] contains disjoint ranges
// in sorted order.
-extern bool SomeFileOverlapsRange(
- const InternalKeyComparator& icmp,
- bool disjoint_sorted_files,
- const std::vector<FileMetaData*>& files,
- const Slice* smallest_user_key,
- const Slice* largest_user_key);
+bool SomeFileOverlapsRange(const InternalKeyComparator& icmp,
+ bool disjoint_sorted_files,
+ const std::vector<FileMetaData*>& files,
+ const Slice* smallest_user_key,
+ const Slice* largest_user_key);
class Version {
public:
- // Append to *iters a sequence of iterators that will
- // yield the contents of this Version when merged together.
- // REQUIRES: This version has been saved (see VersionSet::SaveTo)
- void AddIterators(const ReadOptions&, std::vector<Iterator*>* iters);
-
// Lookup the value for key. If found, store it in *val and
// return OK. Else return a non-OK status. Fills *stats.
// REQUIRES: lock is not held
@@ -70,6 +66,12 @@ class Version {
FileMetaData* seek_file;
int seek_file_level;
};
+
+ // Append to *iters a sequence of iterators that will
+ // yield the contents of this Version when merged together.
+ // REQUIRES: This version has been saved (see VersionSet::SaveTo)
+ void AddIterators(const ReadOptions&, std::vector<Iterator*>* iters);
+
Status Get(const ReadOptions&, const LookupKey& key, std::string* val,
GetStats* stats);
@@ -91,16 +93,15 @@ class Version {
void GetOverlappingInputs(
int level,
- const InternalKey* begin, // NULL means before all keys
- const InternalKey* end, // NULL means after all keys
+ const InternalKey* begin, // nullptr means before all keys
+ const InternalKey* end, // nullptr means after all keys
std::vector<FileMetaData*>* inputs);
// Returns true iff some file in the specified level overlaps
// some part of [*smallest_user_key,*largest_user_key].
- // smallest_user_key==NULL represents a key smaller than all keys in the DB.
- // largest_user_key==NULL represents a key largest than all keys in the DB.
- bool OverlapInLevel(int level,
- const Slice* smallest_user_key,
+ // smallest_user_key==nullptr represents a key smaller than all the DB's keys.
+ // largest_user_key==nullptr represents a key largest than all the DB's keys.
+ bool OverlapInLevel(int level, const Slice* smallest_user_key,
const Slice* largest_user_key);
// Return the level at which we should place a new memtable compaction
@@ -118,6 +119,22 @@ class Version {
friend class VersionSet;
class LevelFileNumIterator;
+
+ explicit Version(VersionSet* vset)
+ : vset_(vset),
+ next_(this),
+ prev_(this),
+ refs_(0),
+ file_to_compact_(nullptr),
+ file_to_compact_level_(-1),
+ compaction_score_(-1),
+ compaction_level_(-1) {}
+
+ Version(const Version&) = delete;
+ Version& operator=(const Version&) = delete;
+
+ ~Version();
+
Iterator* NewConcatenatingIterator(const ReadOptions&, int level) const;
// Call func(arg, level, f) for every file that overlaps user_key in
@@ -125,14 +142,13 @@ class Version {
// false, makes no more calls.
//
// REQUIRES: user portion of internal_key == user_key.
- void ForEachOverlapping(Slice user_key, Slice internal_key,
- void* arg,
+ void ForEachOverlapping(Slice user_key, Slice internal_key, void* arg,
bool (*func)(void*, int, FileMetaData*));
- VersionSet* vset_; // VersionSet to which this Version belongs
- Version* next_; // Next version in linked list
- Version* prev_; // Previous version in linked list
- int refs_; // Number of live refs to this version
+ VersionSet* vset_; // VersionSet to which this Version belongs
+ Version* next_; // Next version in linked list
+ Version* prev_; // Previous version in linked list
+ int refs_; // Number of live refs to this version
// List of files per level
std::vector<FileMetaData*> files_[config::kNumLevels];
@@ -146,28 +162,15 @@ class Version {
// are initialized by Finalize().
double compaction_score_;
int compaction_level_;
-
- explicit Version(VersionSet* vset)
- : vset_(vset), next_(this), prev_(this), refs_(0),
- file_to_compact_(NULL),
- file_to_compact_level_(-1),
- compaction_score_(-1),
- compaction_level_(-1) {
- }
-
- ~Version();
-
- // No copying allowed
- Version(const Version&);
- void operator=(const Version&);
};
class VersionSet {
public:
- VersionSet(const std::string& dbname,
- const Options* options,
- TableCache* table_cache,
- const InternalKeyComparator*);
+ VersionSet(const std::string& dbname, const Options* options,
+ TableCache* table_cache, const InternalKeyComparator*);
+ VersionSet(const VersionSet&) = delete;
+ VersionSet& operator=(const VersionSet&) = delete;
+
~VersionSet();
// Apply *edit to the current version to form a new descriptor that
@@ -179,7 +182,7 @@ class VersionSet {
EXCLUSIVE_LOCKS_REQUIRED(mu);
// Recover the last saved descriptor from persistent storage.
- Status Recover(bool *save_manifest);
+ Status Recover(bool* save_manifest);
// Return the current version.
Version* current() const { return current_; }
@@ -225,19 +228,17 @@ class VersionSet {
uint64_t PrevLogNumber() const { return prev_log_number_; }
// Pick level and inputs for a new compaction.
- // Returns NULL if there is no compaction to be done.
+ // Returns nullptr if there is no compaction to be done.
// Otherwise returns a pointer to a heap-allocated object that
// describes the compaction. Caller should delete the result.
Compaction* PickCompaction();
// Return a compaction object for compacting the range [begin,end] in
- // the specified level. Returns NULL if there is nothing in that
+ // the specified level. Returns nullptr if there is nothing in that
// level that overlaps the specified range. Caller should delete
// the result.
- Compaction* CompactRange(
- int level,
- const InternalKey* begin,
- const InternalKey* end);
+ Compaction* CompactRange(int level, const InternalKey* begin,
+ const InternalKey* end);
// Return the maximum overlapping data (in bytes) at next level for any
// file at a level >= 1.
@@ -250,7 +251,7 @@ class VersionSet {
// Returns true iff some level needs a compaction.
bool NeedsCompaction() const {
Version* v = current_;
- return (v->compaction_score_ >= 1) || (v->file_to_compact_ != NULL);
+ return (v->compaction_score_ >= 1) || (v->file_to_compact_ != nullptr);
}
// Add all files listed in any live version to *live.
@@ -278,14 +279,12 @@ class VersionSet {
void Finalize(Version* v);
- void GetRange(const std::vector<FileMetaData*>& inputs,
- InternalKey* smallest,
+ void GetRange(const std::vector<FileMetaData*>& inputs, InternalKey* smallest,
InternalKey* largest);
void GetRange2(const std::vector<FileMetaData*>& inputs1,
const std::vector<FileMetaData*>& inputs2,
- InternalKey* smallest,
- InternalKey* largest);
+ InternalKey* smallest, InternalKey* largest);
void SetupOtherInputs(Compaction* c);
@@ -314,10 +313,6 @@ class VersionSet {
// Per-level key at which the next compaction at that level should start.
// Either an empty string, or a valid InternalKey.
std::string compact_pointer_[config::kNumLevels];
-
- // No copying allowed
- VersionSet(const VersionSet&);
- void operator=(const VersionSet&);
};
// A Compaction encapsulates information about a compaction.
@@ -374,7 +369,7 @@ class Compaction {
VersionEdit edit_;
// Each compaction reads inputs from "level_" and "level_+1"
- std::vector<FileMetaData*> inputs_[2]; // The two sets of inputs
+ std::vector<FileMetaData*> inputs_[2]; // The two sets of inputs
// State used to check for number of overlapping grandparent files
// (parent == level_ + 1, grandparent == level_ + 2)
diff --git a/src/leveldb/db/version_set_test.cc b/src/leveldb/db/version_set_test.cc
index 501e34d133..c1056a1e7d 100644
--- a/src/leveldb/db/version_set_test.cc
+++ b/src/leveldb/db/version_set_test.cc
@@ -11,10 +11,7 @@ namespace leveldb {
class FindFileTest {
public:
- std::vector<FileMetaData*> files_;
- bool disjoint_sorted_files_;
-
- FindFileTest() : disjoint_sorted_files_(true) { }
+ FindFileTest() : disjoint_sorted_files_(true) {}
~FindFileTest() {
for (int i = 0; i < files_.size(); i++) {
@@ -40,20 +37,25 @@ class FindFileTest {
bool Overlaps(const char* smallest, const char* largest) {
InternalKeyComparator cmp(BytewiseComparator());
- Slice s(smallest != NULL ? smallest : "");
- Slice l(largest != NULL ? largest : "");
+ Slice s(smallest != nullptr ? smallest : "");
+ Slice l(largest != nullptr ? largest : "");
return SomeFileOverlapsRange(cmp, disjoint_sorted_files_, files_,
- (smallest != NULL ? &s : NULL),
- (largest != NULL ? &l : NULL));
+ (smallest != nullptr ? &s : nullptr),
+ (largest != nullptr ? &l : nullptr));
}
+
+ bool disjoint_sorted_files_;
+
+ private:
+ std::vector<FileMetaData*> files_;
};
TEST(FindFileTest, Empty) {
ASSERT_EQ(0, Find("foo"));
- ASSERT_TRUE(! Overlaps("a", "z"));
- ASSERT_TRUE(! Overlaps(NULL, "z"));
- ASSERT_TRUE(! Overlaps("a", NULL));
- ASSERT_TRUE(! Overlaps(NULL, NULL));
+ ASSERT_TRUE(!Overlaps("a", "z"));
+ ASSERT_TRUE(!Overlaps(nullptr, "z"));
+ ASSERT_TRUE(!Overlaps("a", nullptr));
+ ASSERT_TRUE(!Overlaps(nullptr, nullptr));
}
TEST(FindFileTest, Single) {
@@ -65,8 +67,8 @@ TEST(FindFileTest, Single) {
ASSERT_EQ(1, Find("q1"));
ASSERT_EQ(1, Find("z"));
- ASSERT_TRUE(! Overlaps("a", "b"));
- ASSERT_TRUE(! Overlaps("z1", "z2"));
+ ASSERT_TRUE(!Overlaps("a", "b"));
+ ASSERT_TRUE(!Overlaps("z1", "z2"));
ASSERT_TRUE(Overlaps("a", "p"));
ASSERT_TRUE(Overlaps("a", "q"));
ASSERT_TRUE(Overlaps("a", "z"));
@@ -78,15 +80,14 @@ TEST(FindFileTest, Single) {
ASSERT_TRUE(Overlaps("q", "q"));
ASSERT_TRUE(Overlaps("q", "q1"));
- ASSERT_TRUE(! Overlaps(NULL, "j"));
- ASSERT_TRUE(! Overlaps("r", NULL));
- ASSERT_TRUE(Overlaps(NULL, "p"));
- ASSERT_TRUE(Overlaps(NULL, "p1"));
- ASSERT_TRUE(Overlaps("q", NULL));
- ASSERT_TRUE(Overlaps(NULL, NULL));
+ ASSERT_TRUE(!Overlaps(nullptr, "j"));
+ ASSERT_TRUE(!Overlaps("r", nullptr));
+ ASSERT_TRUE(Overlaps(nullptr, "p"));
+ ASSERT_TRUE(Overlaps(nullptr, "p1"));
+ ASSERT_TRUE(Overlaps("q", nullptr));
+ ASSERT_TRUE(Overlaps(nullptr, nullptr));
}
-
TEST(FindFileTest, Multiple) {
Add("150", "200");
Add("200", "250");
@@ -110,10 +111,10 @@ TEST(FindFileTest, Multiple) {
ASSERT_EQ(3, Find("450"));
ASSERT_EQ(4, Find("451"));
- ASSERT_TRUE(! Overlaps("100", "149"));
- ASSERT_TRUE(! Overlaps("251", "299"));
- ASSERT_TRUE(! Overlaps("451", "500"));
- ASSERT_TRUE(! Overlaps("351", "399"));
+ ASSERT_TRUE(!Overlaps("100", "149"));
+ ASSERT_TRUE(!Overlaps("251", "299"));
+ ASSERT_TRUE(!Overlaps("451", "500"));
+ ASSERT_TRUE(!Overlaps("351", "399"));
ASSERT_TRUE(Overlaps("100", "150"));
ASSERT_TRUE(Overlaps("100", "200"));
@@ -130,25 +131,25 @@ TEST(FindFileTest, MultipleNullBoundaries) {
Add("200", "250");
Add("300", "350");
Add("400", "450");
- ASSERT_TRUE(! Overlaps(NULL, "149"));
- ASSERT_TRUE(! Overlaps("451", NULL));
- ASSERT_TRUE(Overlaps(NULL, NULL));
- ASSERT_TRUE(Overlaps(NULL, "150"));
- ASSERT_TRUE(Overlaps(NULL, "199"));
- ASSERT_TRUE(Overlaps(NULL, "200"));
- ASSERT_TRUE(Overlaps(NULL, "201"));
- ASSERT_TRUE(Overlaps(NULL, "400"));
- ASSERT_TRUE(Overlaps(NULL, "800"));
- ASSERT_TRUE(Overlaps("100", NULL));
- ASSERT_TRUE(Overlaps("200", NULL));
- ASSERT_TRUE(Overlaps("449", NULL));
- ASSERT_TRUE(Overlaps("450", NULL));
+ ASSERT_TRUE(!Overlaps(nullptr, "149"));
+ ASSERT_TRUE(!Overlaps("451", nullptr));
+ ASSERT_TRUE(Overlaps(nullptr, nullptr));
+ ASSERT_TRUE(Overlaps(nullptr, "150"));
+ ASSERT_TRUE(Overlaps(nullptr, "199"));
+ ASSERT_TRUE(Overlaps(nullptr, "200"));
+ ASSERT_TRUE(Overlaps(nullptr, "201"));
+ ASSERT_TRUE(Overlaps(nullptr, "400"));
+ ASSERT_TRUE(Overlaps(nullptr, "800"));
+ ASSERT_TRUE(Overlaps("100", nullptr));
+ ASSERT_TRUE(Overlaps("200", nullptr));
+ ASSERT_TRUE(Overlaps("449", nullptr));
+ ASSERT_TRUE(Overlaps("450", nullptr));
}
TEST(FindFileTest, OverlapSequenceChecks) {
Add("200", "200", 5000, 3000);
- ASSERT_TRUE(! Overlaps("199", "199"));
- ASSERT_TRUE(! Overlaps("201", "300"));
+ ASSERT_TRUE(!Overlaps("199", "199"));
+ ASSERT_TRUE(!Overlaps("201", "300"));
ASSERT_TRUE(Overlaps("200", "200"));
ASSERT_TRUE(Overlaps("190", "200"));
ASSERT_TRUE(Overlaps("200", "210"));
@@ -158,8 +159,8 @@ TEST(FindFileTest, OverlappingFiles) {
Add("150", "600");
Add("400", "500");
disjoint_sorted_files_ = false;
- ASSERT_TRUE(! Overlaps("100", "149"));
- ASSERT_TRUE(! Overlaps("601", "700"));
+ ASSERT_TRUE(!Overlaps("100", "149"));
+ ASSERT_TRUE(!Overlaps("601", "700"));
ASSERT_TRUE(Overlaps("100", "150"));
ASSERT_TRUE(Overlaps("100", "200"));
ASSERT_TRUE(Overlaps("100", "300"));
@@ -172,8 +173,160 @@ TEST(FindFileTest, OverlappingFiles) {
ASSERT_TRUE(Overlaps("600", "700"));
}
-} // namespace leveldb
+void AddBoundaryInputs(const InternalKeyComparator& icmp,
+ const std::vector<FileMetaData*>& level_files,
+ std::vector<FileMetaData*>* compaction_files);
+
+class AddBoundaryInputsTest {
+ public:
+ std::vector<FileMetaData*> level_files_;
+ std::vector<FileMetaData*> compaction_files_;
+ std::vector<FileMetaData*> all_files_;
+ InternalKeyComparator icmp_;
+
+ AddBoundaryInputsTest() : icmp_(BytewiseComparator()) {}
+
+ ~AddBoundaryInputsTest() {
+ for (size_t i = 0; i < all_files_.size(); ++i) {
+ delete all_files_[i];
+ }
+ all_files_.clear();
+ }
+
+ FileMetaData* CreateFileMetaData(uint64_t number, InternalKey smallest,
+ InternalKey largest) {
+ FileMetaData* f = new FileMetaData();
+ f->number = number;
+ f->smallest = smallest;
+ f->largest = largest;
+ all_files_.push_back(f);
+ return f;
+ }
+};
+
+TEST(AddBoundaryInputsTest, TestEmptyFileSets) {
+ AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+ ASSERT_TRUE(compaction_files_.empty());
+ ASSERT_TRUE(level_files_.empty());
+}
+
+TEST(AddBoundaryInputsTest, TestEmptyLevelFiles) {
+ FileMetaData* f1 =
+ CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
+ InternalKey(InternalKey("100", 1, kTypeValue)));
+ compaction_files_.push_back(f1);
+
+ AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+ ASSERT_EQ(1, compaction_files_.size());
+ ASSERT_EQ(f1, compaction_files_[0]);
+ ASSERT_TRUE(level_files_.empty());
+}
+
+TEST(AddBoundaryInputsTest, TestEmptyCompactionFiles) {
+ FileMetaData* f1 =
+ CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
+ InternalKey(InternalKey("100", 1, kTypeValue)));
+ level_files_.push_back(f1);
+
+ AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+ ASSERT_TRUE(compaction_files_.empty());
+ ASSERT_EQ(1, level_files_.size());
+ ASSERT_EQ(f1, level_files_[0]);
+}
+
+TEST(AddBoundaryInputsTest, TestNoBoundaryFiles) {
+ FileMetaData* f1 =
+ CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
+ InternalKey(InternalKey("100", 1, kTypeValue)));
+ FileMetaData* f2 =
+ CreateFileMetaData(1, InternalKey("200", 2, kTypeValue),
+ InternalKey(InternalKey("200", 1, kTypeValue)));
+ FileMetaData* f3 =
+ CreateFileMetaData(1, InternalKey("300", 2, kTypeValue),
+ InternalKey(InternalKey("300", 1, kTypeValue)));
+
+ level_files_.push_back(f3);
+ level_files_.push_back(f2);
+ level_files_.push_back(f1);
+ compaction_files_.push_back(f2);
+ compaction_files_.push_back(f3);
+
+ AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+ ASSERT_EQ(2, compaction_files_.size());
+}
+
+TEST(AddBoundaryInputsTest, TestOneBoundaryFiles) {
+ FileMetaData* f1 =
+ CreateFileMetaData(1, InternalKey("100", 3, kTypeValue),
+ InternalKey(InternalKey("100", 2, kTypeValue)));
+ FileMetaData* f2 =
+ CreateFileMetaData(1, InternalKey("100", 1, kTypeValue),
+ InternalKey(InternalKey("200", 3, kTypeValue)));
+ FileMetaData* f3 =
+ CreateFileMetaData(1, InternalKey("300", 2, kTypeValue),
+ InternalKey(InternalKey("300", 1, kTypeValue)));
+
+ level_files_.push_back(f3);
+ level_files_.push_back(f2);
+ level_files_.push_back(f1);
+ compaction_files_.push_back(f1);
+
+ AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+ ASSERT_EQ(2, compaction_files_.size());
+ ASSERT_EQ(f1, compaction_files_[0]);
+ ASSERT_EQ(f2, compaction_files_[1]);
+}
+
+TEST(AddBoundaryInputsTest, TestTwoBoundaryFiles) {
+ FileMetaData* f1 =
+ CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
+ InternalKey(InternalKey("100", 5, kTypeValue)));
+ FileMetaData* f2 =
+ CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
+ InternalKey(InternalKey("300", 1, kTypeValue)));
+ FileMetaData* f3 =
+ CreateFileMetaData(1, InternalKey("100", 4, kTypeValue),
+ InternalKey(InternalKey("100", 3, kTypeValue)));
+
+ level_files_.push_back(f2);
+ level_files_.push_back(f3);
+ level_files_.push_back(f1);
+ compaction_files_.push_back(f1);
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
+ AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+ ASSERT_EQ(3, compaction_files_.size());
+ ASSERT_EQ(f1, compaction_files_[0]);
+ ASSERT_EQ(f3, compaction_files_[1]);
+ ASSERT_EQ(f2, compaction_files_[2]);
}
+
+TEST(AddBoundaryInputsTest, TestDisjoinFilePointers) {
+ FileMetaData* f1 =
+ CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
+ InternalKey(InternalKey("100", 5, kTypeValue)));
+ FileMetaData* f2 =
+ CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
+ InternalKey(InternalKey("100", 5, kTypeValue)));
+ FileMetaData* f3 =
+ CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
+ InternalKey(InternalKey("300", 1, kTypeValue)));
+ FileMetaData* f4 =
+ CreateFileMetaData(1, InternalKey("100", 4, kTypeValue),
+ InternalKey(InternalKey("100", 3, kTypeValue)));
+
+ level_files_.push_back(f2);
+ level_files_.push_back(f3);
+ level_files_.push_back(f4);
+
+ compaction_files_.push_back(f1);
+
+ AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+ ASSERT_EQ(3, compaction_files_.size());
+ ASSERT_EQ(f1, compaction_files_[0]);
+ ASSERT_EQ(f4, compaction_files_[1]);
+ ASSERT_EQ(f3, compaction_files_[2]);
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/db/write_batch.cc b/src/leveldb/db/write_batch.cc
index 33f4a4257e..b54313c35e 100644
--- a/src/leveldb/db/write_batch.cc
+++ b/src/leveldb/db/write_batch.cc
@@ -15,10 +15,10 @@
#include "leveldb/write_batch.h"
-#include "leveldb/db.h"
#include "db/dbformat.h"
#include "db/memtable.h"
#include "db/write_batch_internal.h"
+#include "leveldb/db.h"
#include "util/coding.h"
namespace leveldb {
@@ -26,19 +26,19 @@ namespace leveldb {
// WriteBatch header has an 8-byte sequence number followed by a 4-byte count.
static const size_t kHeader = 12;
-WriteBatch::WriteBatch() {
- Clear();
-}
+WriteBatch::WriteBatch() { Clear(); }
-WriteBatch::~WriteBatch() { }
+WriteBatch::~WriteBatch() = default;
-WriteBatch::Handler::~Handler() { }
+WriteBatch::Handler::~Handler() = default;
void WriteBatch::Clear() {
rep_.clear();
rep_.resize(kHeader);
}
+size_t WriteBatch::ApproximateSize() const { return rep_.size(); }
+
Status WriteBatch::Iterate(Handler* handler) const {
Slice input(rep_);
if (input.size() < kHeader) {
@@ -108,25 +108,28 @@ void WriteBatch::Delete(const Slice& key) {
PutLengthPrefixedSlice(&rep_, key);
}
+void WriteBatch::Append(const WriteBatch& source) {
+ WriteBatchInternal::Append(this, &source);
+}
+
namespace {
class MemTableInserter : public WriteBatch::Handler {
public:
SequenceNumber sequence_;
MemTable* mem_;
- virtual void Put(const Slice& key, const Slice& value) {
+ void Put(const Slice& key, const Slice& value) override {
mem_->Add(sequence_, kTypeValue, key, value);
sequence_++;
}
- virtual void Delete(const Slice& key) {
+ void Delete(const Slice& key) override {
mem_->Add(sequence_, kTypeDeletion, key, Slice());
sequence_++;
}
};
} // namespace
-Status WriteBatchInternal::InsertInto(const WriteBatch* b,
- MemTable* memtable) {
+Status WriteBatchInternal::InsertInto(const WriteBatch* b, MemTable* memtable) {
MemTableInserter inserter;
inserter.sequence_ = WriteBatchInternal::Sequence(b);
inserter.mem_ = memtable;
diff --git a/src/leveldb/db/write_batch_internal.h b/src/leveldb/db/write_batch_internal.h
index 9448ef7b21..fce86e3f1f 100644
--- a/src/leveldb/db/write_batch_internal.h
+++ b/src/leveldb/db/write_batch_internal.h
@@ -29,13 +29,9 @@ class WriteBatchInternal {
// this batch.
static void SetSequence(WriteBatch* batch, SequenceNumber seq);
- static Slice Contents(const WriteBatch* batch) {
- return Slice(batch->rep_);
- }
+ static Slice Contents(const WriteBatch* batch) { return Slice(batch->rep_); }
- static size_t ByteSize(const WriteBatch* batch) {
- return batch->rep_.size();
- }
+ static size_t ByteSize(const WriteBatch* batch) { return batch->rep_.size(); }
static void SetContents(WriteBatch* batch, const Slice& contents);
@@ -46,5 +42,4 @@ class WriteBatchInternal {
} // namespace leveldb
-
#endif // STORAGE_LEVELDB_DB_WRITE_BATCH_INTERNAL_H_
diff --git a/src/leveldb/db/write_batch_test.cc b/src/leveldb/db/write_batch_test.cc
index 9064e3d85e..c32317fb5e 100644
--- a/src/leveldb/db/write_batch_test.cc
+++ b/src/leveldb/db/write_batch_test.cc
@@ -52,7 +52,7 @@ static std::string PrintContents(WriteBatch* b) {
return state;
}
-class WriteBatchTest { };
+class WriteBatchTest {};
TEST(WriteBatchTest, Empty) {
WriteBatch batch;
@@ -68,10 +68,11 @@ TEST(WriteBatchTest, Multiple) {
WriteBatchInternal::SetSequence(&batch, 100);
ASSERT_EQ(100, WriteBatchInternal::Sequence(&batch));
ASSERT_EQ(3, WriteBatchInternal::Count(&batch));
- ASSERT_EQ("Put(baz, boo)@102"
- "Delete(box)@101"
- "Put(foo, bar)@100",
- PrintContents(&batch));
+ ASSERT_EQ(
+ "Put(baz, boo)@102"
+ "Delete(box)@101"
+ "Put(foo, bar)@100",
+ PrintContents(&batch));
}
TEST(WriteBatchTest, Corruption) {
@@ -81,40 +82,56 @@ TEST(WriteBatchTest, Corruption) {
WriteBatchInternal::SetSequence(&batch, 200);
Slice contents = WriteBatchInternal::Contents(&batch);
WriteBatchInternal::SetContents(&batch,
- Slice(contents.data(),contents.size()-1));
- ASSERT_EQ("Put(foo, bar)@200"
- "ParseError()",
- PrintContents(&batch));
+ Slice(contents.data(), contents.size() - 1));
+ ASSERT_EQ(
+ "Put(foo, bar)@200"
+ "ParseError()",
+ PrintContents(&batch));
}
TEST(WriteBatchTest, Append) {
WriteBatch b1, b2;
WriteBatchInternal::SetSequence(&b1, 200);
WriteBatchInternal::SetSequence(&b2, 300);
- WriteBatchInternal::Append(&b1, &b2);
- ASSERT_EQ("",
- PrintContents(&b1));
+ b1.Append(b2);
+ ASSERT_EQ("", PrintContents(&b1));
b2.Put("a", "va");
- WriteBatchInternal::Append(&b1, &b2);
- ASSERT_EQ("Put(a, va)@200",
- PrintContents(&b1));
+ b1.Append(b2);
+ ASSERT_EQ("Put(a, va)@200", PrintContents(&b1));
b2.Clear();
b2.Put("b", "vb");
- WriteBatchInternal::Append(&b1, &b2);
- ASSERT_EQ("Put(a, va)@200"
- "Put(b, vb)@201",
- PrintContents(&b1));
+ b1.Append(b2);
+ ASSERT_EQ(
+ "Put(a, va)@200"
+ "Put(b, vb)@201",
+ PrintContents(&b1));
b2.Delete("foo");
- WriteBatchInternal::Append(&b1, &b2);
- ASSERT_EQ("Put(a, va)@200"
- "Put(b, vb)@202"
- "Put(b, vb)@201"
- "Delete(foo)@203",
- PrintContents(&b1));
+ b1.Append(b2);
+ ASSERT_EQ(
+ "Put(a, va)@200"
+ "Put(b, vb)@202"
+ "Put(b, vb)@201"
+ "Delete(foo)@203",
+ PrintContents(&b1));
}
-} // namespace leveldb
+TEST(WriteBatchTest, ApproximateSize) {
+ WriteBatch batch;
+ size_t empty_size = batch.ApproximateSize();
+
+ batch.Put(Slice("foo"), Slice("bar"));
+ size_t one_key_size = batch.ApproximateSize();
+ ASSERT_LT(empty_size, one_key_size);
+
+ batch.Put(Slice("baz"), Slice("boo"));
+ size_t two_keys_size = batch.ApproximateSize();
+ ASSERT_LT(one_key_size, two_keys_size);
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
+ batch.Delete(Slice("box"));
+ size_t post_delete_size = batch.ApproximateSize();
+ ASSERT_LT(two_keys_size, post_delete_size);
}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/doc/benchmark.html b/src/leveldb/doc/benchmark.html
index c4639772c1..f3fd77144c 100644
--- a/src/leveldb/doc/benchmark.html
+++ b/src/leveldb/doc/benchmark.html
@@ -90,9 +90,9 @@ div.bsql {
<h4>Benchmark Source Code</h4>
<p>We wrote benchmark tools for SQLite and Kyoto TreeDB based on LevelDB's <span class="code">db_bench</span>. The code for each of the benchmarks resides here:</p>
<ul>
- <li> <b>LevelDB:</b> <a href="http://code.google.com/p/leveldb/source/browse/trunk/db/db_bench.cc">db/db_bench.cc</a>.</li>
- <li> <b>SQLite:</b> <a href="http://code.google.com/p/leveldb/source/browse/#svn%2Ftrunk%2Fdoc%2Fbench%2Fdb_bench_sqlite3.cc">doc/bench/db_bench_sqlite3.cc</a>.</li>
- <li> <b>Kyoto TreeDB:</b> <a href="http://code.google.com/p/leveldb/source/browse/#svn%2Ftrunk%2Fdoc%2Fbench%2Fdb_bench_tree_db.cc">doc/bench/db_bench_tree_db.cc</a>.</li>
+ <li> <b>LevelDB:</b> <a href="https://github.com/google/leveldb/blob/master/benchmarks/db_bench.cc">benchmarks/db_bench.cc</a>.</li>
+ <li> <b>SQLite:</b> <a href="https://github.com/google/leveldb/blob/master/benchmarks/db_bench_sqlite3.cc">benchmarks/db_bench_sqlite3.cc</a>.</li>
+ <li> <b>Kyoto TreeDB:</b> <a href="https://github.com/google/leveldb/blob/master/benchmarks/db_bench_tree_db.cc">benchmarks/db_bench_tree_db.cc</a>.</li>
</ul>
<h4>Custom Build Specifications</h4>
diff --git a/src/leveldb/doc/impl.md b/src/leveldb/doc/impl.md
index 4b13f2a6ba..cacabb96fc 100644
--- a/src/leveldb/doc/impl.md
+++ b/src/leveldb/doc/impl.md
@@ -64,13 +64,15 @@ Other files used for miscellaneous purposes may also be present (LOCK, *.dbtmp).
## Level 0
-When the log file grows above a certain size (1MB by default):
-Create a brand new memtable and log file and direct future updates here
+When the log file grows above a certain size (4MB by default):
+Create a brand new memtable and log file and direct future updates here.
+
In the background:
-Write the contents of the previous memtable to an sstable
-Discard the memtable
-Delete the old log file and the old memtable
-Add the new sstable to the young (level-0) level.
+
+1. Write the contents of the previous memtable to an sstable.
+2. Discard the memtable.
+3. Delete the old log file and the old memtable.
+4. Add the new sstable to the young (level-0) level.
## Compactions
diff --git a/src/leveldb/doc/index.md b/src/leveldb/doc/index.md
index be8569692b..3d9a25805b 100644
--- a/src/leveldb/doc/index.md
+++ b/src/leveldb/doc/index.md
@@ -307,7 +307,7 @@ version numbers found in the keys to decide how to interpret them.
## Performance
Performance can be tuned by changing the default values of the types defined in
-`include/leveldb/options.h`.
+`include/options.h`.
### Block size
@@ -338,19 +338,19 @@ options.compression = leveldb::kNoCompression;
### Cache
The contents of the database are stored in a set of files in the filesystem and
-each file stores a sequence of compressed blocks. If options.cache is non-NULL,
-it is used to cache frequently used uncompressed block contents.
+each file stores a sequence of compressed blocks. If options.block_cache is
+non-NULL, it is used to cache frequently used uncompressed block contents.
```c++
#include "leveldb/cache.h"
leveldb::Options options;
-options.cache = leveldb::NewLRUCache(100 * 1048576); // 100MB cache
+options.block_cache = leveldb::NewLRUCache(100 * 1048576); // 100MB cache
leveldb::DB* db;
leveldb::DB::Open(options, name, &db);
... use the db ...
delete db
-delete options.cache;
+delete options.block_cache;
```
Note that the cache holds uncompressed data, and therefore it should be sized
diff --git a/src/leveldb/helpers/memenv/memenv.cc b/src/leveldb/helpers/memenv/memenv.cc
index 68c0614a59..47e4481f7c 100644
--- a/src/leveldb/helpers/memenv/memenv.cc
+++ b/src/leveldb/helpers/memenv/memenv.cc
@@ -4,14 +4,18 @@
#include "helpers/memenv/memenv.h"
+#include <string.h>
+
+#include <limits>
+#include <map>
+#include <string>
+#include <vector>
+
#include "leveldb/env.h"
#include "leveldb/status.h"
#include "port/port.h"
+#include "port/thread_annotations.h"
#include "util/mutexlock.h"
-#include <map>
-#include <string.h>
-#include <string>
-#include <vector>
namespace leveldb {
@@ -23,6 +27,10 @@ class FileState {
// and the caller must call Ref() at least once.
FileState() : refs_(0), size_(0) {}
+ // No copying allowed.
+ FileState(const FileState&) = delete;
+ FileState& operator=(const FileState&) = delete;
+
// Increase the reference count.
void Ref() {
MutexLock lock(&refs_mutex_);
@@ -47,9 +55,22 @@ class FileState {
}
}
- uint64_t Size() const { return size_; }
+ uint64_t Size() const {
+ MutexLock lock(&blocks_mutex_);
+ return size_;
+ }
+
+ void Truncate() {
+ MutexLock lock(&blocks_mutex_);
+ for (char*& block : blocks_) {
+ delete[] block;
+ }
+ blocks_.clear();
+ size_ = 0;
+ }
Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const {
+ MutexLock lock(&blocks_mutex_);
if (offset > size_) {
return Status::IOError("Offset greater than file size.");
}
@@ -62,16 +83,9 @@ class FileState {
return Status::OK();
}
- assert(offset / kBlockSize <= SIZE_MAX);
+ assert(offset / kBlockSize <= std::numeric_limits<size_t>::max());
size_t block = static_cast<size_t>(offset / kBlockSize);
size_t block_offset = offset % kBlockSize;
-
- if (n <= kBlockSize - block_offset) {
- // The requested bytes are all in the first block.
- *result = Slice(blocks_[block] + block_offset, n);
- return Status::OK();
- }
-
size_t bytes_to_copy = n;
char* dst = scratch;
@@ -96,6 +110,7 @@ class FileState {
const char* src = data.data();
size_t src_len = data.size();
+ MutexLock lock(&blocks_mutex_);
while (src_len > 0) {
size_t avail;
size_t offset = size_ % kBlockSize;
@@ -122,28 +137,17 @@ class FileState {
}
private:
- // Private since only Unref() should be used to delete it.
- ~FileState() {
- for (std::vector<char*>::iterator i = blocks_.begin(); i != blocks_.end();
- ++i) {
- delete [] *i;
- }
- }
+ enum { kBlockSize = 8 * 1024 };
- // No copying allowed.
- FileState(const FileState&);
- void operator=(const FileState&);
+ // Private since only Unref() should be used to delete it.
+ ~FileState() { Truncate(); }
port::Mutex refs_mutex_;
- int refs_; // Protected by refs_mutex_;
+ int refs_ GUARDED_BY(refs_mutex_);
- // The following fields are not protected by any mutex. They are only mutable
- // while the file is being written, and concurrent access is not allowed
- // to writable files.
- std::vector<char*> blocks_;
- uint64_t size_;
-
- enum { kBlockSize = 8 * 1024 };
+ mutable port::Mutex blocks_mutex_;
+ std::vector<char*> blocks_ GUARDED_BY(blocks_mutex_);
+ uint64_t size_ GUARDED_BY(blocks_mutex_);
};
class SequentialFileImpl : public SequentialFile {
@@ -152,11 +156,9 @@ class SequentialFileImpl : public SequentialFile {
file_->Ref();
}
- ~SequentialFileImpl() {
- file_->Unref();
- }
+ ~SequentialFileImpl() override { file_->Unref(); }
- virtual Status Read(size_t n, Slice* result, char* scratch) {
+ Status Read(size_t n, Slice* result, char* scratch) override {
Status s = file_->Read(pos_, n, result, scratch);
if (s.ok()) {
pos_ += result->size();
@@ -164,7 +166,7 @@ class SequentialFileImpl : public SequentialFile {
return s;
}
- virtual Status Skip(uint64_t n) {
+ Status Skip(uint64_t n) override {
if (pos_ > file_->Size()) {
return Status::IOError("pos_ > file_->Size()");
}
@@ -176,7 +178,7 @@ class SequentialFileImpl : public SequentialFile {
return Status::OK();
}
- virtual std::string GetName() const { return "[memenv]"; }
+ virtual std::string GetName() const override { return "[memenv]"; }
private:
FileState* file_;
uint64_t pos_;
@@ -184,68 +186,58 @@ class SequentialFileImpl : public SequentialFile {
class RandomAccessFileImpl : public RandomAccessFile {
public:
- explicit RandomAccessFileImpl(FileState* file) : file_(file) {
- file_->Ref();
- }
+ explicit RandomAccessFileImpl(FileState* file) : file_(file) { file_->Ref(); }
- ~RandomAccessFileImpl() {
- file_->Unref();
- }
+ ~RandomAccessFileImpl() override { file_->Unref(); }
- virtual Status Read(uint64_t offset, size_t n, Slice* result,
- char* scratch) const {
+ Status Read(uint64_t offset, size_t n, Slice* result,
+ char* scratch) const override {
return file_->Read(offset, n, result, scratch);
}
- virtual std::string GetName() const { return "[memenv]"; }
+ virtual std::string GetName() const override { return "[memenv]"; }
private:
FileState* file_;
};
class WritableFileImpl : public WritableFile {
public:
- WritableFileImpl(FileState* file) : file_(file) {
- file_->Ref();
- }
+ WritableFileImpl(FileState* file) : file_(file) { file_->Ref(); }
- ~WritableFileImpl() {
- file_->Unref();
- }
+ ~WritableFileImpl() override { file_->Unref(); }
- virtual Status Append(const Slice& data) {
- return file_->Append(data);
- }
+ Status Append(const Slice& data) override { return file_->Append(data); }
- virtual Status Close() { return Status::OK(); }
- virtual Status Flush() { return Status::OK(); }
- virtual Status Sync() { return Status::OK(); }
+ Status Close() override { return Status::OK(); }
+ Status Flush() override { return Status::OK(); }
+ Status Sync() override { return Status::OK(); }
- virtual std::string GetName() const { return "[memenv]"; }
+ virtual std::string GetName() const override { return "[memenv]"; }
private:
FileState* file_;
};
class NoOpLogger : public Logger {
public:
- virtual void Logv(const char* format, va_list ap) { }
+ void Logv(const char* format, va_list ap) override {}
};
class InMemoryEnv : public EnvWrapper {
public:
- explicit InMemoryEnv(Env* base_env) : EnvWrapper(base_env) { }
+ explicit InMemoryEnv(Env* base_env) : EnvWrapper(base_env) {}
- virtual ~InMemoryEnv() {
- for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end(); ++i){
- i->second->Unref();
+ ~InMemoryEnv() override {
+ for (const auto& kvp : file_map_) {
+ kvp.second->Unref();
}
}
// Partial implementation of the Env interface.
- virtual Status NewSequentialFile(const std::string& fname,
- SequentialFile** result) {
+ Status NewSequentialFile(const std::string& fname,
+ SequentialFile** result) override {
MutexLock lock(&mutex_);
if (file_map_.find(fname) == file_map_.end()) {
- *result = NULL;
+ *result = nullptr;
return Status::IOError(fname, "File not found");
}
@@ -253,11 +245,11 @@ class InMemoryEnv : public EnvWrapper {
return Status::OK();
}
- virtual Status NewRandomAccessFile(const std::string& fname,
- RandomAccessFile** result) {
+ Status NewRandomAccessFile(const std::string& fname,
+ RandomAccessFile** result) override {
MutexLock lock(&mutex_);
if (file_map_.find(fname) == file_map_.end()) {
- *result = NULL;
+ *result = nullptr;
return Status::IOError(fname, "File not found");
}
@@ -265,27 +257,32 @@ class InMemoryEnv : public EnvWrapper {
return Status::OK();
}
- virtual Status NewWritableFile(const std::string& fname,
- WritableFile** result) {
+ Status NewWritableFile(const std::string& fname,
+ WritableFile** result) override {
MutexLock lock(&mutex_);
- if (file_map_.find(fname) != file_map_.end()) {
- DeleteFileInternal(fname);
- }
+ FileSystem::iterator it = file_map_.find(fname);
- FileState* file = new FileState();
- file->Ref();
- file_map_[fname] = file;
+ FileState* file;
+ if (it == file_map_.end()) {
+ // File is not currently open.
+ file = new FileState();
+ file->Ref();
+ file_map_[fname] = file;
+ } else {
+ file = it->second;
+ file->Truncate();
+ }
*result = new WritableFileImpl(file);
return Status::OK();
}
- virtual Status NewAppendableFile(const std::string& fname,
- WritableFile** result) {
+ Status NewAppendableFile(const std::string& fname,
+ WritableFile** result) override {
MutexLock lock(&mutex_);
FileState** sptr = &file_map_[fname];
FileState* file = *sptr;
- if (file == NULL) {
+ if (file == nullptr) {
file = new FileState();
file->Ref();
}
@@ -293,18 +290,18 @@ class InMemoryEnv : public EnvWrapper {
return Status::OK();
}
- virtual bool FileExists(const std::string& fname) {
+ bool FileExists(const std::string& fname) override {
MutexLock lock(&mutex_);
return file_map_.find(fname) != file_map_.end();
}
- virtual Status GetChildren(const std::string& dir,
- std::vector<std::string>* result) {
+ Status GetChildren(const std::string& dir,
+ std::vector<std::string>* result) override {
MutexLock lock(&mutex_);
result->clear();
- for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end(); ++i){
- const std::string& filename = i->first;
+ for (const auto& kvp : file_map_) {
+ const std::string& filename = kvp.first;
if (filename.size() >= dir.size() + 1 && filename[dir.size()] == '/' &&
Slice(filename).starts_with(Slice(dir))) {
@@ -315,7 +312,8 @@ class InMemoryEnv : public EnvWrapper {
return Status::OK();
}
- void DeleteFileInternal(const std::string& fname) {
+ void DeleteFileInternal(const std::string& fname)
+ EXCLUSIVE_LOCKS_REQUIRED(mutex_) {
if (file_map_.find(fname) == file_map_.end()) {
return;
}
@@ -324,7 +322,7 @@ class InMemoryEnv : public EnvWrapper {
file_map_.erase(fname);
}
- virtual Status DeleteFile(const std::string& fname) {
+ Status DeleteFile(const std::string& fname) override {
MutexLock lock(&mutex_);
if (file_map_.find(fname) == file_map_.end()) {
return Status::IOError(fname, "File not found");
@@ -334,15 +332,11 @@ class InMemoryEnv : public EnvWrapper {
return Status::OK();
}
- virtual Status CreateDir(const std::string& dirname) {
- return Status::OK();
- }
+ Status CreateDir(const std::string& dirname) override { return Status::OK(); }
- virtual Status DeleteDir(const std::string& dirname) {
- return Status::OK();
- }
+ Status DeleteDir(const std::string& dirname) override { return Status::OK(); }
- virtual Status GetFileSize(const std::string& fname, uint64_t* file_size) {
+ Status GetFileSize(const std::string& fname, uint64_t* file_size) override {
MutexLock lock(&mutex_);
if (file_map_.find(fname) == file_map_.end()) {
return Status::IOError(fname, "File not found");
@@ -352,8 +346,8 @@ class InMemoryEnv : public EnvWrapper {
return Status::OK();
}
- virtual Status RenameFile(const std::string& src,
- const std::string& target) {
+ Status RenameFile(const std::string& src,
+ const std::string& target) override {
MutexLock lock(&mutex_);
if (file_map_.find(src) == file_map_.end()) {
return Status::IOError(src, "File not found");
@@ -365,22 +359,22 @@ class InMemoryEnv : public EnvWrapper {
return Status::OK();
}
- virtual Status LockFile(const std::string& fname, FileLock** lock) {
+ Status LockFile(const std::string& fname, FileLock** lock) override {
*lock = new FileLock;
return Status::OK();
}
- virtual Status UnlockFile(FileLock* lock) {
+ Status UnlockFile(FileLock* lock) override {
delete lock;
return Status::OK();
}
- virtual Status GetTestDirectory(std::string* path) {
+ Status GetTestDirectory(std::string* path) override {
*path = "/test";
return Status::OK();
}
- virtual Status NewLogger(const std::string& fname, Logger** result) {
+ Status NewLogger(const std::string& fname, Logger** result) override {
*result = new NoOpLogger;
return Status::OK();
}
@@ -388,14 +382,13 @@ class InMemoryEnv : public EnvWrapper {
private:
// Map from filenames to FileState objects, representing a simple file system.
typedef std::map<std::string, FileState*> FileSystem;
+
port::Mutex mutex_;
- FileSystem file_map_; // Protected by mutex_.
+ FileSystem file_map_ GUARDED_BY(mutex_);
};
} // namespace
-Env* NewMemEnv(Env* base_env) {
- return new InMemoryEnv(base_env);
-}
+Env* NewMemEnv(Env* base_env) { return new InMemoryEnv(base_env); }
} // namespace leveldb
diff --git a/src/leveldb/helpers/memenv/memenv.h b/src/leveldb/helpers/memenv/memenv.h
index 03b88de761..3d929e4c4e 100644
--- a/src/leveldb/helpers/memenv/memenv.h
+++ b/src/leveldb/helpers/memenv/memenv.h
@@ -5,6 +5,8 @@
#ifndef STORAGE_LEVELDB_HELPERS_MEMENV_MEMENV_H_
#define STORAGE_LEVELDB_HELPERS_MEMENV_MEMENV_H_
+#include "leveldb/export.h"
+
namespace leveldb {
class Env;
@@ -13,7 +15,7 @@ class Env;
// all non-file-storage tasks to base_env. The caller must delete the result
// when it is no longer needed.
// *base_env must remain live while the result is in use.
-Env* NewMemEnv(Env* base_env);
+LEVELDB_EXPORT Env* NewMemEnv(Env* base_env);
} // namespace leveldb
diff --git a/src/leveldb/helpers/memenv/memenv_test.cc b/src/leveldb/helpers/memenv/memenv_test.cc
index 5cff77613f..94ad06be68 100644
--- a/src/leveldb/helpers/memenv/memenv_test.cc
+++ b/src/leveldb/helpers/memenv/memenv_test.cc
@@ -4,25 +4,22 @@
#include "helpers/memenv/memenv.h"
+#include <string>
+#include <vector>
+
#include "db/db_impl.h"
#include "leveldb/db.h"
#include "leveldb/env.h"
#include "util/testharness.h"
-#include <string>
-#include <vector>
namespace leveldb {
class MemEnvTest {
public:
- Env* env_;
+ MemEnvTest() : env_(NewMemEnv(Env::Default())) {}
+ ~MemEnvTest() { delete env_; }
- MemEnvTest()
- : env_(NewMemEnv(Env::Default())) {
- }
- ~MemEnvTest() {
- delete env_;
- }
+ Env* env_;
};
TEST(MemEnvTest, Basics) {
@@ -109,25 +106,25 @@ TEST(MemEnvTest, ReadWrite) {
// Read sequentially.
ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
- ASSERT_OK(seq_file->Read(5, &result, scratch)); // Read "hello".
+ ASSERT_OK(seq_file->Read(5, &result, scratch)); // Read "hello".
ASSERT_EQ(0, result.compare("hello"));
ASSERT_OK(seq_file->Skip(1));
- ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Read "world".
+ ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Read "world".
ASSERT_EQ(0, result.compare("world"));
- ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Try reading past EOF.
+ ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Try reading past EOF.
ASSERT_EQ(0, result.size());
- ASSERT_OK(seq_file->Skip(100)); // Try to skip past end of file.
+ ASSERT_OK(seq_file->Skip(100)); // Try to skip past end of file.
ASSERT_OK(seq_file->Read(1000, &result, scratch));
ASSERT_EQ(0, result.size());
delete seq_file;
// Random reads.
ASSERT_OK(env_->NewRandomAccessFile("/dir/f", &rand_file));
- ASSERT_OK(rand_file->Read(6, 5, &result, scratch)); // Read "world".
+ ASSERT_OK(rand_file->Read(6, 5, &result, scratch)); // Read "world".
ASSERT_EQ(0, result.compare("world"));
- ASSERT_OK(rand_file->Read(0, 5, &result, scratch)); // Read "hello".
+ ASSERT_OK(rand_file->Read(0, 5, &result, scratch)); // Read "hello".
ASSERT_EQ(0, result.compare("hello"));
- ASSERT_OK(rand_file->Read(10, 100, &result, scratch)); // Read "d".
+ ASSERT_OK(rand_file->Read(10, 100, &result, scratch)); // Read "d".
ASSERT_EQ(0, result.compare("d"));
// Too high offset.
@@ -176,7 +173,7 @@ TEST(MemEnvTest, LargeWrite) {
SequentialFile* seq_file;
Slice result;
ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
- ASSERT_OK(seq_file->Read(3, &result, scratch)); // Read "foo".
+ ASSERT_OK(seq_file->Read(3, &result, scratch)); // Read "foo".
ASSERT_EQ(0, result.compare("foo"));
size_t read = 0;
@@ -188,7 +185,30 @@ TEST(MemEnvTest, LargeWrite) {
}
ASSERT_TRUE(write_data == read_data);
delete seq_file;
- delete [] scratch;
+ delete[] scratch;
+}
+
+TEST(MemEnvTest, OverwriteOpenFile) {
+ const char kWrite1Data[] = "Write #1 data";
+ const size_t kFileDataLen = sizeof(kWrite1Data) - 1;
+ const std::string kTestFileName = test::TmpDir() + "/leveldb-TestFile.dat";
+
+ ASSERT_OK(WriteStringToFile(env_, kWrite1Data, kTestFileName));
+
+ RandomAccessFile* rand_file;
+ ASSERT_OK(env_->NewRandomAccessFile(kTestFileName, &rand_file));
+
+ const char kWrite2Data[] = "Write #2 data";
+ ASSERT_OK(WriteStringToFile(env_, kWrite2Data, kTestFileName));
+
+ // Verify that overwriting an open file will result in the new file data
+ // being read from files opened before the write.
+ Slice result;
+ char scratch[kFileDataLen];
+ ASSERT_OK(rand_file->Read(0, kFileDataLen, &result, scratch));
+ ASSERT_EQ(0, result.compare(kWrite2Data));
+
+ delete rand_file;
}
TEST(MemEnvTest, DBTest) {
@@ -236,6 +256,4 @@ TEST(MemEnvTest, DBTest) {
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/include/leveldb/c.h b/src/leveldb/include/leveldb/c.h
index 1048fe3b86..02c79ba72e 100644
--- a/src/leveldb/include/leveldb/c.h
+++ b/src/leveldb/include/leveldb/c.h
@@ -32,7 +32,7 @@
On failure, leveldb frees the old value of *errptr and
set *errptr to a malloc()ed error message.
- (4) Bools have the type unsigned char (0 == false; rest == true)
+ (4) Bools have the type uint8_t (0 == false; rest == true)
(5) All of the pointer arguments must be non-NULL.
*/
@@ -48,225 +48,205 @@ extern "C" {
#include <stddef.h>
#include <stdint.h>
+#include "leveldb/export.h"
+
/* Exported types */
-typedef struct leveldb_t leveldb_t;
-typedef struct leveldb_cache_t leveldb_cache_t;
-typedef struct leveldb_comparator_t leveldb_comparator_t;
-typedef struct leveldb_env_t leveldb_env_t;
-typedef struct leveldb_filelock_t leveldb_filelock_t;
-typedef struct leveldb_filterpolicy_t leveldb_filterpolicy_t;
-typedef struct leveldb_iterator_t leveldb_iterator_t;
-typedef struct leveldb_logger_t leveldb_logger_t;
-typedef struct leveldb_options_t leveldb_options_t;
-typedef struct leveldb_randomfile_t leveldb_randomfile_t;
-typedef struct leveldb_readoptions_t leveldb_readoptions_t;
-typedef struct leveldb_seqfile_t leveldb_seqfile_t;
-typedef struct leveldb_snapshot_t leveldb_snapshot_t;
-typedef struct leveldb_writablefile_t leveldb_writablefile_t;
-typedef struct leveldb_writebatch_t leveldb_writebatch_t;
-typedef struct leveldb_writeoptions_t leveldb_writeoptions_t;
+typedef struct leveldb_t leveldb_t;
+typedef struct leveldb_cache_t leveldb_cache_t;
+typedef struct leveldb_comparator_t leveldb_comparator_t;
+typedef struct leveldb_env_t leveldb_env_t;
+typedef struct leveldb_filelock_t leveldb_filelock_t;
+typedef struct leveldb_filterpolicy_t leveldb_filterpolicy_t;
+typedef struct leveldb_iterator_t leveldb_iterator_t;
+typedef struct leveldb_logger_t leveldb_logger_t;
+typedef struct leveldb_options_t leveldb_options_t;
+typedef struct leveldb_randomfile_t leveldb_randomfile_t;
+typedef struct leveldb_readoptions_t leveldb_readoptions_t;
+typedef struct leveldb_seqfile_t leveldb_seqfile_t;
+typedef struct leveldb_snapshot_t leveldb_snapshot_t;
+typedef struct leveldb_writablefile_t leveldb_writablefile_t;
+typedef struct leveldb_writebatch_t leveldb_writebatch_t;
+typedef struct leveldb_writeoptions_t leveldb_writeoptions_t;
/* DB operations */
-extern leveldb_t* leveldb_open(
- const leveldb_options_t* options,
- const char* name,
- char** errptr);
+LEVELDB_EXPORT leveldb_t* leveldb_open(const leveldb_options_t* options,
+ const char* name, char** errptr);
-extern void leveldb_close(leveldb_t* db);
+LEVELDB_EXPORT void leveldb_close(leveldb_t* db);
-extern void leveldb_put(
- leveldb_t* db,
- const leveldb_writeoptions_t* options,
- const char* key, size_t keylen,
- const char* val, size_t vallen,
- char** errptr);
+LEVELDB_EXPORT void leveldb_put(leveldb_t* db,
+ const leveldb_writeoptions_t* options,
+ const char* key, size_t keylen, const char* val,
+ size_t vallen, char** errptr);
-extern void leveldb_delete(
- leveldb_t* db,
- const leveldb_writeoptions_t* options,
- const char* key, size_t keylen,
- char** errptr);
+LEVELDB_EXPORT void leveldb_delete(leveldb_t* db,
+ const leveldb_writeoptions_t* options,
+ const char* key, size_t keylen,
+ char** errptr);
-extern void leveldb_write(
- leveldb_t* db,
- const leveldb_writeoptions_t* options,
- leveldb_writebatch_t* batch,
- char** errptr);
+LEVELDB_EXPORT void leveldb_write(leveldb_t* db,
+ const leveldb_writeoptions_t* options,
+ leveldb_writebatch_t* batch, char** errptr);
/* Returns NULL if not found. A malloc()ed array otherwise.
Stores the length of the array in *vallen. */
-extern char* leveldb_get(
- leveldb_t* db,
- const leveldb_readoptions_t* options,
- const char* key, size_t keylen,
- size_t* vallen,
- char** errptr);
+LEVELDB_EXPORT char* leveldb_get(leveldb_t* db,
+ const leveldb_readoptions_t* options,
+ const char* key, size_t keylen, size_t* vallen,
+ char** errptr);
-extern leveldb_iterator_t* leveldb_create_iterator(
- leveldb_t* db,
- const leveldb_readoptions_t* options);
+LEVELDB_EXPORT leveldb_iterator_t* leveldb_create_iterator(
+ leveldb_t* db, const leveldb_readoptions_t* options);
-extern const leveldb_snapshot_t* leveldb_create_snapshot(
- leveldb_t* db);
+LEVELDB_EXPORT const leveldb_snapshot_t* leveldb_create_snapshot(leveldb_t* db);
-extern void leveldb_release_snapshot(
- leveldb_t* db,
- const leveldb_snapshot_t* snapshot);
+LEVELDB_EXPORT void leveldb_release_snapshot(
+ leveldb_t* db, const leveldb_snapshot_t* snapshot);
/* Returns NULL if property name is unknown.
Else returns a pointer to a malloc()-ed null-terminated value. */
-extern char* leveldb_property_value(
- leveldb_t* db,
- const char* propname);
-
-extern void leveldb_approximate_sizes(
- leveldb_t* db,
- int num_ranges,
- const char* const* range_start_key, const size_t* range_start_key_len,
- const char* const* range_limit_key, const size_t* range_limit_key_len,
- uint64_t* sizes);
-
-extern void leveldb_compact_range(
- leveldb_t* db,
- const char* start_key, size_t start_key_len,
- const char* limit_key, size_t limit_key_len);
+LEVELDB_EXPORT char* leveldb_property_value(leveldb_t* db,
+ const char* propname);
+
+LEVELDB_EXPORT void leveldb_approximate_sizes(
+ leveldb_t* db, int num_ranges, const char* const* range_start_key,
+ const size_t* range_start_key_len, const char* const* range_limit_key,
+ const size_t* range_limit_key_len, uint64_t* sizes);
+
+LEVELDB_EXPORT void leveldb_compact_range(leveldb_t* db, const char* start_key,
+ size_t start_key_len,
+ const char* limit_key,
+ size_t limit_key_len);
/* Management operations */
-extern void leveldb_destroy_db(
- const leveldb_options_t* options,
- const char* name,
- char** errptr);
+LEVELDB_EXPORT void leveldb_destroy_db(const leveldb_options_t* options,
+ const char* name, char** errptr);
-extern void leveldb_repair_db(
- const leveldb_options_t* options,
- const char* name,
- char** errptr);
+LEVELDB_EXPORT void leveldb_repair_db(const leveldb_options_t* options,
+ const char* name, char** errptr);
/* Iterator */
-extern void leveldb_iter_destroy(leveldb_iterator_t*);
-extern unsigned char leveldb_iter_valid(const leveldb_iterator_t*);
-extern void leveldb_iter_seek_to_first(leveldb_iterator_t*);
-extern void leveldb_iter_seek_to_last(leveldb_iterator_t*);
-extern void leveldb_iter_seek(leveldb_iterator_t*, const char* k, size_t klen);
-extern void leveldb_iter_next(leveldb_iterator_t*);
-extern void leveldb_iter_prev(leveldb_iterator_t*);
-extern const char* leveldb_iter_key(const leveldb_iterator_t*, size_t* klen);
-extern const char* leveldb_iter_value(const leveldb_iterator_t*, size_t* vlen);
-extern void leveldb_iter_get_error(const leveldb_iterator_t*, char** errptr);
+LEVELDB_EXPORT void leveldb_iter_destroy(leveldb_iterator_t*);
+LEVELDB_EXPORT uint8_t leveldb_iter_valid(const leveldb_iterator_t*);
+LEVELDB_EXPORT void leveldb_iter_seek_to_first(leveldb_iterator_t*);
+LEVELDB_EXPORT void leveldb_iter_seek_to_last(leveldb_iterator_t*);
+LEVELDB_EXPORT void leveldb_iter_seek(leveldb_iterator_t*, const char* k,
+ size_t klen);
+LEVELDB_EXPORT void leveldb_iter_next(leveldb_iterator_t*);
+LEVELDB_EXPORT void leveldb_iter_prev(leveldb_iterator_t*);
+LEVELDB_EXPORT const char* leveldb_iter_key(const leveldb_iterator_t*,
+ size_t* klen);
+LEVELDB_EXPORT const char* leveldb_iter_value(const leveldb_iterator_t*,
+ size_t* vlen);
+LEVELDB_EXPORT void leveldb_iter_get_error(const leveldb_iterator_t*,
+ char** errptr);
/* Write batch */
-extern leveldb_writebatch_t* leveldb_writebatch_create();
-extern void leveldb_writebatch_destroy(leveldb_writebatch_t*);
-extern void leveldb_writebatch_clear(leveldb_writebatch_t*);
-extern void leveldb_writebatch_put(
- leveldb_writebatch_t*,
- const char* key, size_t klen,
- const char* val, size_t vlen);
-extern void leveldb_writebatch_delete(
- leveldb_writebatch_t*,
- const char* key, size_t klen);
-extern void leveldb_writebatch_iterate(
- leveldb_writebatch_t*,
- void* state,
+LEVELDB_EXPORT leveldb_writebatch_t* leveldb_writebatch_create(void);
+LEVELDB_EXPORT void leveldb_writebatch_destroy(leveldb_writebatch_t*);
+LEVELDB_EXPORT void leveldb_writebatch_clear(leveldb_writebatch_t*);
+LEVELDB_EXPORT void leveldb_writebatch_put(leveldb_writebatch_t*,
+ const char* key, size_t klen,
+ const char* val, size_t vlen);
+LEVELDB_EXPORT void leveldb_writebatch_delete(leveldb_writebatch_t*,
+ const char* key, size_t klen);
+LEVELDB_EXPORT void leveldb_writebatch_iterate(
+ const leveldb_writebatch_t*, void* state,
void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen),
void (*deleted)(void*, const char* k, size_t klen));
+LEVELDB_EXPORT void leveldb_writebatch_append(
+ leveldb_writebatch_t* destination, const leveldb_writebatch_t* source);
/* Options */
-extern leveldb_options_t* leveldb_options_create();
-extern void leveldb_options_destroy(leveldb_options_t*);
-extern void leveldb_options_set_comparator(
- leveldb_options_t*,
- leveldb_comparator_t*);
-extern void leveldb_options_set_filter_policy(
- leveldb_options_t*,
- leveldb_filterpolicy_t*);
-extern void leveldb_options_set_create_if_missing(
- leveldb_options_t*, unsigned char);
-extern void leveldb_options_set_error_if_exists(
- leveldb_options_t*, unsigned char);
-extern void leveldb_options_set_paranoid_checks(
- leveldb_options_t*, unsigned char);
-extern void leveldb_options_set_env(leveldb_options_t*, leveldb_env_t*);
-extern void leveldb_options_set_info_log(leveldb_options_t*, leveldb_logger_t*);
-extern void leveldb_options_set_write_buffer_size(leveldb_options_t*, size_t);
-extern void leveldb_options_set_max_open_files(leveldb_options_t*, int);
-extern void leveldb_options_set_cache(leveldb_options_t*, leveldb_cache_t*);
-extern void leveldb_options_set_block_size(leveldb_options_t*, size_t);
-extern void leveldb_options_set_block_restart_interval(leveldb_options_t*, int);
-
-enum {
- leveldb_no_compression = 0,
- leveldb_snappy_compression = 1
-};
-extern void leveldb_options_set_compression(leveldb_options_t*, int);
+LEVELDB_EXPORT leveldb_options_t* leveldb_options_create(void);
+LEVELDB_EXPORT void leveldb_options_destroy(leveldb_options_t*);
+LEVELDB_EXPORT void leveldb_options_set_comparator(leveldb_options_t*,
+ leveldb_comparator_t*);
+LEVELDB_EXPORT void leveldb_options_set_filter_policy(leveldb_options_t*,
+ leveldb_filterpolicy_t*);
+LEVELDB_EXPORT void leveldb_options_set_create_if_missing(leveldb_options_t*,
+ uint8_t);
+LEVELDB_EXPORT void leveldb_options_set_error_if_exists(leveldb_options_t*,
+ uint8_t);
+LEVELDB_EXPORT void leveldb_options_set_paranoid_checks(leveldb_options_t*,
+ uint8_t);
+LEVELDB_EXPORT void leveldb_options_set_env(leveldb_options_t*, leveldb_env_t*);
+LEVELDB_EXPORT void leveldb_options_set_info_log(leveldb_options_t*,
+ leveldb_logger_t*);
+LEVELDB_EXPORT void leveldb_options_set_write_buffer_size(leveldb_options_t*,
+ size_t);
+LEVELDB_EXPORT void leveldb_options_set_max_open_files(leveldb_options_t*, int);
+LEVELDB_EXPORT void leveldb_options_set_cache(leveldb_options_t*,
+ leveldb_cache_t*);
+LEVELDB_EXPORT void leveldb_options_set_block_size(leveldb_options_t*, size_t);
+LEVELDB_EXPORT void leveldb_options_set_block_restart_interval(
+ leveldb_options_t*, int);
+LEVELDB_EXPORT void leveldb_options_set_max_file_size(leveldb_options_t*,
+ size_t);
+
+enum { leveldb_no_compression = 0, leveldb_snappy_compression = 1 };
+LEVELDB_EXPORT void leveldb_options_set_compression(leveldb_options_t*, int);
/* Comparator */
-extern leveldb_comparator_t* leveldb_comparator_create(
- void* state,
- void (*destructor)(void*),
- int (*compare)(
- void*,
- const char* a, size_t alen,
- const char* b, size_t blen),
+LEVELDB_EXPORT leveldb_comparator_t* leveldb_comparator_create(
+ void* state, void (*destructor)(void*),
+ int (*compare)(void*, const char* a, size_t alen, const char* b,
+ size_t blen),
const char* (*name)(void*));
-extern void leveldb_comparator_destroy(leveldb_comparator_t*);
+LEVELDB_EXPORT void leveldb_comparator_destroy(leveldb_comparator_t*);
/* Filter policy */
-extern leveldb_filterpolicy_t* leveldb_filterpolicy_create(
- void* state,
- void (*destructor)(void*),
- char* (*create_filter)(
- void*,
- const char* const* key_array, const size_t* key_length_array,
- int num_keys,
- size_t* filter_length),
- unsigned char (*key_may_match)(
- void*,
- const char* key, size_t length,
- const char* filter, size_t filter_length),
+LEVELDB_EXPORT leveldb_filterpolicy_t* leveldb_filterpolicy_create(
+ void* state, void (*destructor)(void*),
+ char* (*create_filter)(void*, const char* const* key_array,
+ const size_t* key_length_array, int num_keys,
+ size_t* filter_length),
+ uint8_t (*key_may_match)(void*, const char* key, size_t length,
+ const char* filter, size_t filter_length),
const char* (*name)(void*));
-extern void leveldb_filterpolicy_destroy(leveldb_filterpolicy_t*);
+LEVELDB_EXPORT void leveldb_filterpolicy_destroy(leveldb_filterpolicy_t*);
-extern leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(
+LEVELDB_EXPORT leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(
int bits_per_key);
/* Read options */
-extern leveldb_readoptions_t* leveldb_readoptions_create();
-extern void leveldb_readoptions_destroy(leveldb_readoptions_t*);
-extern void leveldb_readoptions_set_verify_checksums(
- leveldb_readoptions_t*,
- unsigned char);
-extern void leveldb_readoptions_set_fill_cache(
- leveldb_readoptions_t*, unsigned char);
-extern void leveldb_readoptions_set_snapshot(
- leveldb_readoptions_t*,
- const leveldb_snapshot_t*);
+LEVELDB_EXPORT leveldb_readoptions_t* leveldb_readoptions_create(void);
+LEVELDB_EXPORT void leveldb_readoptions_destroy(leveldb_readoptions_t*);
+LEVELDB_EXPORT void leveldb_readoptions_set_verify_checksums(
+ leveldb_readoptions_t*, uint8_t);
+LEVELDB_EXPORT void leveldb_readoptions_set_fill_cache(leveldb_readoptions_t*,
+ uint8_t);
+LEVELDB_EXPORT void leveldb_readoptions_set_snapshot(leveldb_readoptions_t*,
+ const leveldb_snapshot_t*);
/* Write options */
-extern leveldb_writeoptions_t* leveldb_writeoptions_create();
-extern void leveldb_writeoptions_destroy(leveldb_writeoptions_t*);
-extern void leveldb_writeoptions_set_sync(
- leveldb_writeoptions_t*, unsigned char);
+LEVELDB_EXPORT leveldb_writeoptions_t* leveldb_writeoptions_create(void);
+LEVELDB_EXPORT void leveldb_writeoptions_destroy(leveldb_writeoptions_t*);
+LEVELDB_EXPORT void leveldb_writeoptions_set_sync(leveldb_writeoptions_t*,
+ uint8_t);
/* Cache */
-extern leveldb_cache_t* leveldb_cache_create_lru(size_t capacity);
-extern void leveldb_cache_destroy(leveldb_cache_t* cache);
+LEVELDB_EXPORT leveldb_cache_t* leveldb_cache_create_lru(size_t capacity);
+LEVELDB_EXPORT void leveldb_cache_destroy(leveldb_cache_t* cache);
/* Env */
-extern leveldb_env_t* leveldb_create_default_env();
-extern void leveldb_env_destroy(leveldb_env_t*);
+LEVELDB_EXPORT leveldb_env_t* leveldb_create_default_env(void);
+LEVELDB_EXPORT void leveldb_env_destroy(leveldb_env_t*);
+
+/* If not NULL, the returned buffer must be released using leveldb_free(). */
+LEVELDB_EXPORT char* leveldb_env_get_test_directory(leveldb_env_t*);
/* Utility */
@@ -275,16 +255,16 @@ extern void leveldb_env_destroy(leveldb_env_t*);
in this file. Note that in certain cases (typically on Windows), you
may need to call this routine instead of free(ptr) to dispose of
malloc()-ed memory returned by this library. */
-extern void leveldb_free(void* ptr);
+LEVELDB_EXPORT void leveldb_free(void* ptr);
/* Return the major version number for this release. */
-extern int leveldb_major_version();
+LEVELDB_EXPORT int leveldb_major_version(void);
/* Return the minor version number for this release. */
-extern int leveldb_minor_version();
+LEVELDB_EXPORT int leveldb_minor_version(void);
#ifdef __cplusplus
-} /* end extern "C" */
+} /* end extern "C" */
#endif
-#endif /* STORAGE_LEVELDB_INCLUDE_C_H_ */
+#endif /* STORAGE_LEVELDB_INCLUDE_C_H_ */
diff --git a/src/leveldb/include/leveldb/cache.h b/src/leveldb/include/leveldb/cache.h
index 6819d5bc49..7d1a221193 100644
--- a/src/leveldb/include/leveldb/cache.h
+++ b/src/leveldb/include/leveldb/cache.h
@@ -19,26 +19,31 @@
#define STORAGE_LEVELDB_INCLUDE_CACHE_H_
#include <stdint.h>
+
+#include "leveldb/export.h"
#include "leveldb/slice.h"
namespace leveldb {
-class Cache;
+class LEVELDB_EXPORT Cache;
// Create a new cache with a fixed size capacity. This implementation
// of Cache uses a least-recently-used eviction policy.
-extern Cache* NewLRUCache(size_t capacity);
+LEVELDB_EXPORT Cache* NewLRUCache(size_t capacity);
-class Cache {
+class LEVELDB_EXPORT Cache {
public:
- Cache() { }
+ Cache() = default;
+
+ Cache(const Cache&) = delete;
+ Cache& operator=(const Cache&) = delete;
// Destroys all existing entries by calling the "deleter"
// function that was passed to the constructor.
virtual ~Cache();
// Opaque handle to an entry stored in the cache.
- struct Handle { };
+ struct Handle {};
// Insert a mapping from key->value into the cache and assign it
// the specified charge against the total cache capacity.
@@ -52,7 +57,7 @@ class Cache {
virtual Handle* Insert(const Slice& key, void* value, size_t charge,
void (*deleter)(const Slice& key, void* value)) = 0;
- // If the cache has no mapping for "key", returns NULL.
+ // If the cache has no mapping for "key", returns nullptr.
//
// Else return a handle that corresponds to the mapping. The caller
// must call this->Release(handle) when the returned mapping is no
@@ -99,10 +104,6 @@ class Cache {
struct Rep;
Rep* rep_;
-
- // No copying allowed
- Cache(const Cache&);
- void operator=(const Cache&);
};
} // namespace leveldb
diff --git a/src/leveldb/include/leveldb/comparator.h b/src/leveldb/include/leveldb/comparator.h
index 556b984c76..a85b51ebd8 100644
--- a/src/leveldb/include/leveldb/comparator.h
+++ b/src/leveldb/include/leveldb/comparator.h
@@ -7,6 +7,8 @@
#include <string>
+#include "leveldb/export.h"
+
namespace leveldb {
class Slice;
@@ -15,7 +17,7 @@ class Slice;
// used as keys in an sstable or a database. A Comparator implementation
// must be thread-safe since leveldb may invoke its methods concurrently
// from multiple threads.
-class Comparator {
+class LEVELDB_EXPORT Comparator {
public:
virtual ~Comparator();
@@ -43,9 +45,8 @@ class Comparator {
// If *start < limit, changes *start to a short string in [start,limit).
// Simple comparator implementations may return with *start unchanged,
// i.e., an implementation of this method that does nothing is correct.
- virtual void FindShortestSeparator(
- std::string* start,
- const Slice& limit) const = 0;
+ virtual void FindShortestSeparator(std::string* start,
+ const Slice& limit) const = 0;
// Changes *key to a short string >= *key.
// Simple comparator implementations may return with *key unchanged,
@@ -56,7 +57,7 @@ class Comparator {
// Return a builtin comparator that uses lexicographic byte-wise
// ordering. The result remains the property of this module and
// must not be deleted.
-extern const Comparator* BytewiseComparator();
+LEVELDB_EXPORT const Comparator* BytewiseComparator();
} // namespace leveldb
diff --git a/src/leveldb/include/leveldb/db.h b/src/leveldb/include/leveldb/db.h
index bfab10a0b7..b73014a221 100644
--- a/src/leveldb/include/leveldb/db.h
+++ b/src/leveldb/include/leveldb/db.h
@@ -7,14 +7,16 @@
#include <stdint.h>
#include <stdio.h>
+
+#include "leveldb/export.h"
#include "leveldb/iterator.h"
#include "leveldb/options.h"
namespace leveldb {
-// Update Makefile if you change these
+// Update CMakeLists.txt if you change these
static const int kMajorVersion = 1;
-static const int kMinorVersion = 20;
+static const int kMinorVersion = 22;
struct Options;
struct ReadOptions;
@@ -24,42 +26,44 @@ class WriteBatch;
// Abstract handle to particular state of a DB.
// A Snapshot is an immutable object and can therefore be safely
// accessed from multiple threads without any external synchronization.
-class Snapshot {
+class LEVELDB_EXPORT Snapshot {
protected:
virtual ~Snapshot();
};
// A range of keys
-struct Range {
- Slice start; // Included in the range
- Slice limit; // Not included in the range
+struct LEVELDB_EXPORT Range {
+ Range() = default;
+ Range(const Slice& s, const Slice& l) : start(s), limit(l) {}
- Range() { }
- Range(const Slice& s, const Slice& l) : start(s), limit(l) { }
+ Slice start; // Included in the range
+ Slice limit; // Not included in the range
};
// A DB is a persistent ordered map from keys to values.
// A DB is safe for concurrent access from multiple threads without
// any external synchronization.
-class DB {
+class LEVELDB_EXPORT DB {
public:
// Open the database with the specified "name".
// Stores a pointer to a heap-allocated database in *dbptr and returns
// OK on success.
- // Stores NULL in *dbptr and returns a non-OK status on error.
+ // Stores nullptr in *dbptr and returns a non-OK status on error.
// Caller should delete *dbptr when it is no longer needed.
- static Status Open(const Options& options,
- const std::string& name,
+ static Status Open(const Options& options, const std::string& name,
DB** dbptr);
- DB() { }
+ DB() = default;
+
+ DB(const DB&) = delete;
+ DB& operator=(const DB&) = delete;
+
virtual ~DB();
// Set the database entry for "key" to "value". Returns OK on success,
// and a non-OK status on error.
// Note: consider setting options.sync = true.
- virtual Status Put(const WriteOptions& options,
- const Slice& key,
+ virtual Status Put(const WriteOptions& options, const Slice& key,
const Slice& value) = 0;
// Remove the database entry (if any) for "key". Returns OK on
@@ -80,8 +84,8 @@ class DB {
// a status for which Status::IsNotFound() returns true.
//
// May return some other Status on an error.
- virtual Status Get(const ReadOptions& options,
- const Slice& key, std::string* value) = 0;
+ virtual Status Get(const ReadOptions& options, const Slice& key,
+ std::string* value) = 0;
// Return a heap-allocated iterator over the contents of the database.
// The result of NewIterator() is initially invalid (caller must
@@ -136,27 +140,27 @@ class DB {
// needed to access the data. This operation should typically only
// be invoked by users who understand the underlying implementation.
//
- // begin==NULL is treated as a key before all keys in the database.
- // end==NULL is treated as a key after all keys in the database.
+ // begin==nullptr is treated as a key before all keys in the database.
+ // end==nullptr is treated as a key after all keys in the database.
// Therefore the following call will compact the entire database:
- // db->CompactRange(NULL, NULL);
+ // db->CompactRange(nullptr, nullptr);
virtual void CompactRange(const Slice* begin, const Slice* end) = 0;
-
- private:
- // No copying allowed
- DB(const DB&);
- void operator=(const DB&);
};
// Destroy the contents of the specified database.
// Be very careful using this method.
-Status DestroyDB(const std::string& name, const Options& options);
+//
+// Note: For backwards compatibility, if DestroyDB is unable to list the
+// database files, Status::OK() will still be returned masking this failure.
+LEVELDB_EXPORT Status DestroyDB(const std::string& name,
+ const Options& options);
// If a DB cannot be opened, you may attempt to call this method to
// resurrect as much of the contents of the database as possible.
// Some data may be lost, so be careful when calling this function
// on a database that contains important information.
-Status RepairDB(const std::string& dbname, const Options& options);
+LEVELDB_EXPORT Status RepairDB(const std::string& dbname,
+ const Options& options);
} // namespace leveldb
diff --git a/src/leveldb/include/leveldb/dumpfile.h b/src/leveldb/include/leveldb/dumpfile.h
index 3f97fda16b..a58bc6b36c 100644
--- a/src/leveldb/include/leveldb/dumpfile.h
+++ b/src/leveldb/include/leveldb/dumpfile.h
@@ -6,7 +6,9 @@
#define STORAGE_LEVELDB_INCLUDE_DUMPFILE_H_
#include <string>
+
#include "leveldb/env.h"
+#include "leveldb/export.h"
#include "leveldb/status.h"
namespace leveldb {
@@ -18,7 +20,8 @@ namespace leveldb {
//
// Returns a non-OK result if fname does not name a leveldb storage
// file, or if the file cannot be read.
-Status DumpFile(Env* env, const std::string& fname, WritableFile* dst);
+LEVELDB_EXPORT Status DumpFile(Env* env, const std::string& fname,
+ WritableFile* dst);
} // namespace leveldb
diff --git a/src/leveldb/include/leveldb/env.h b/src/leveldb/include/leveldb/env.h
index 275d441eae..96c21b3966 100644
--- a/src/leveldb/include/leveldb/env.h
+++ b/src/leveldb/include/leveldb/env.h
@@ -13,12 +13,36 @@
#ifndef STORAGE_LEVELDB_INCLUDE_ENV_H_
#define STORAGE_LEVELDB_INCLUDE_ENV_H_
-#include <string>
-#include <vector>
#include <stdarg.h>
#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+#include "leveldb/export.h"
#include "leveldb/status.h"
+#if defined(_WIN32)
+// The leveldb::Env class below contains a DeleteFile method.
+// At the same time, <windows.h>, a fairly popular header
+// file for Windows applications, defines a DeleteFile macro.
+//
+// Without any intervention on our part, the result of this
+// unfortunate coincidence is that the name of the
+// leveldb::Env::DeleteFile method seen by the compiler depends on
+// whether <windows.h> was included before or after the LevelDB
+// headers.
+//
+// To avoid headaches, we undefined DeleteFile (if defined) and
+// redefine it at the bottom of this file. This way <windows.h>
+// can be included before this file (or not at all) and the
+// exported method will always be leveldb::Env::DeleteFile.
+#if defined(DeleteFile)
+#undef DeleteFile
+#define LEVELDB_DELETEFILE_UNDEFINED
+#endif // defined(DeleteFile)
+#endif // defined(_WIN32)
+
namespace leveldb {
class FileLock;
@@ -28,9 +52,13 @@ class SequentialFile;
class Slice;
class WritableFile;
-class Env {
+class LEVELDB_EXPORT Env {
public:
- Env() { }
+ Env() = default;
+
+ Env(const Env&) = delete;
+ Env& operator=(const Env&) = delete;
+
virtual ~Env();
// Return a default environment suitable for the current operating
@@ -40,20 +68,22 @@ class Env {
// The result of Default() belongs to leveldb and must never be deleted.
static Env* Default();
- // Create a brand new sequentially-readable file with the specified name.
+ // Create an object that sequentially reads the file with the specified name.
// On success, stores a pointer to the new file in *result and returns OK.
- // On failure stores NULL in *result and returns non-OK. If the file does
- // not exist, returns a non-OK status.
+ // On failure stores nullptr in *result and returns non-OK. If the file does
+ // not exist, returns a non-OK status. Implementations should return a
+ // NotFound status when the file does not exist.
//
// The returned file will only be accessed by one thread at a time.
virtual Status NewSequentialFile(const std::string& fname,
SequentialFile** result) = 0;
- // Create a brand new random access read-only file with the
+ // Create an object supporting random-access reads from the file with the
// specified name. On success, stores a pointer to the new file in
- // *result and returns OK. On failure stores NULL in *result and
+ // *result and returns OK. On failure stores nullptr in *result and
// returns non-OK. If the file does not exist, returns a non-OK
- // status.
+ // status. Implementations should return a NotFound status when the file does
+ // not exist.
//
// The returned file may be concurrently accessed by multiple threads.
virtual Status NewRandomAccessFile(const std::string& fname,
@@ -62,7 +92,7 @@ class Env {
// Create an object that writes to a new file with the specified
// name. Deletes any existing file with the same name and creates a
// new file. On success, stores a pointer to the new file in
- // *result and returns OK. On failure stores NULL in *result and
+ // *result and returns OK. On failure stores nullptr in *result and
// returns non-OK.
//
// The returned file will only be accessed by one thread at a time.
@@ -72,7 +102,7 @@ class Env {
// Create an object that either appends to an existing file, or
// writes to a new file (if the file does not exist to begin with).
// On success, stores a pointer to the new file in *result and
- // returns OK. On failure stores NULL in *result and returns
+ // returns OK. On failure stores nullptr in *result and returns
// non-OK.
//
// The returned file will only be accessed by one thread at a time.
@@ -110,7 +140,7 @@ class Env {
const std::string& target) = 0;
// Lock the specified file. Used to prevent concurrent access to
- // the same db by multiple processes. On failure, stores NULL in
+ // the same db by multiple processes. On failure, stores nullptr in
// *lock and returns non-OK.
//
// On success, stores a pointer to the object that represents the
@@ -136,16 +166,14 @@ class Env {
// added to the same Env may run concurrently in different threads.
// I.e., the caller may not assume that background work items are
// serialized.
- virtual void Schedule(
- void (*function)(void* arg),
- void* arg) = 0;
+ virtual void Schedule(void (*function)(void* arg), void* arg) = 0;
// Start a new thread, invoking "function(arg)" within the new thread.
// When "function(arg)" returns, the thread will be destroyed.
virtual void StartThread(void (*function)(void* arg), void* arg) = 0;
// *path is set to a temporary directory that can be used for testing. It may
- // or many not have just been created. The directory may or may not differ
+ // or may not have just been created. The directory may or may not differ
// between runs of the same process, but subsequent calls will return the
// same directory.
virtual Status GetTestDirectory(std::string* path) = 0;
@@ -159,17 +187,16 @@ class Env {
// Sleep/delay the thread for the prescribed number of micro-seconds.
virtual void SleepForMicroseconds(int micros) = 0;
-
- private:
- // No copying allowed
- Env(const Env&);
- void operator=(const Env&);
};
// A file abstraction for reading sequentially through a file
-class SequentialFile {
+class LEVELDB_EXPORT SequentialFile {
public:
- SequentialFile() { }
+ SequentialFile() = default;
+
+ SequentialFile(const SequentialFile&) = delete;
+ SequentialFile& operator=(const SequentialFile&) = delete;
+
virtual ~SequentialFile();
// Read up to "n" bytes from the file. "scratch[0..n-1]" may be
@@ -193,17 +220,16 @@ class SequentialFile {
// Get a name for the file, only for error reporting
virtual std::string GetName() const = 0;
-
- private:
- // No copying allowed
- SequentialFile(const SequentialFile&);
- void operator=(const SequentialFile&);
};
// A file abstraction for randomly reading the contents of a file.
-class RandomAccessFile {
+class LEVELDB_EXPORT RandomAccessFile {
public:
- RandomAccessFile() { }
+ RandomAccessFile() = default;
+
+ RandomAccessFile(const RandomAccessFile&) = delete;
+ RandomAccessFile& operator=(const RandomAccessFile&) = delete;
+
virtual ~RandomAccessFile();
// Read up to "n" bytes from the file starting at "offset".
@@ -220,19 +246,18 @@ class RandomAccessFile {
// Get a name for the file, only for error reporting
virtual std::string GetName() const = 0;
-
- private:
- // No copying allowed
- RandomAccessFile(const RandomAccessFile&);
- void operator=(const RandomAccessFile&);
};
// A file abstraction for sequential writing. The implementation
// must provide buffering since callers may append small fragments
// at a time to the file.
-class WritableFile {
+class LEVELDB_EXPORT WritableFile {
public:
- WritableFile() { }
+ WritableFile() = default;
+
+ WritableFile(const WritableFile&) = delete;
+ WritableFile& operator=(const WritableFile&) = delete;
+
virtual ~WritableFile();
virtual Status Append(const Slice& data) = 0;
@@ -242,119 +267,130 @@ class WritableFile {
// Get a name for the file, only for error reporting
virtual std::string GetName() const = 0;
-
- private:
- // No copying allowed
- WritableFile(const WritableFile&);
- void operator=(const WritableFile&);
};
// An interface for writing log messages.
-class Logger {
+class LEVELDB_EXPORT Logger {
public:
- Logger() { }
+ Logger() = default;
+
+ Logger(const Logger&) = delete;
+ Logger& operator=(const Logger&) = delete;
+
virtual ~Logger();
// Write an entry to the log file with the specified format.
virtual void Logv(const char* format, va_list ap) = 0;
-
- private:
- // No copying allowed
- Logger(const Logger&);
- void operator=(const Logger&);
};
-
// Identifies a locked file.
-class FileLock {
+class LEVELDB_EXPORT FileLock {
public:
- FileLock() { }
+ FileLock() = default;
+
+ FileLock(const FileLock&) = delete;
+ FileLock& operator=(const FileLock&) = delete;
+
virtual ~FileLock();
- private:
- // No copying allowed
- FileLock(const FileLock&);
- void operator=(const FileLock&);
};
-// Log the specified data to *info_log if info_log is non-NULL.
-extern void Log(Logger* info_log, const char* format, ...)
-# if defined(__GNUC__) || defined(__clang__)
- __attribute__((__format__ (__printf__, 2, 3)))
-# endif
+// Log the specified data to *info_log if info_log is non-null.
+void Log(Logger* info_log, const char* format, ...)
+#if defined(__GNUC__) || defined(__clang__)
+ __attribute__((__format__(__printf__, 2, 3)))
+#endif
;
// A utility routine: write "data" to the named file.
-extern Status WriteStringToFile(Env* env, const Slice& data,
- const std::string& fname);
+LEVELDB_EXPORT Status WriteStringToFile(Env* env, const Slice& data,
+ const std::string& fname);
// A utility routine: read contents of named file into *data
-extern Status ReadFileToString(Env* env, const std::string& fname,
- std::string* data);
+LEVELDB_EXPORT Status ReadFileToString(Env* env, const std::string& fname,
+ std::string* data);
// An implementation of Env that forwards all calls to another Env.
// May be useful to clients who wish to override just part of the
// functionality of another Env.
-class EnvWrapper : public Env {
+class LEVELDB_EXPORT EnvWrapper : public Env {
public:
- // Initialize an EnvWrapper that delegates all calls to *t
- explicit EnvWrapper(Env* t) : target_(t) { }
+ // Initialize an EnvWrapper that delegates all calls to *t.
+ explicit EnvWrapper(Env* t) : target_(t) {}
virtual ~EnvWrapper();
- // Return the target to which this Env forwards all calls
+ // Return the target to which this Env forwards all calls.
Env* target() const { return target_; }
- // The following text is boilerplate that forwards all methods to target()
- Status NewSequentialFile(const std::string& f, SequentialFile** r) {
+ // The following text is boilerplate that forwards all methods to target().
+ Status NewSequentialFile(const std::string& f, SequentialFile** r) override {
return target_->NewSequentialFile(f, r);
}
- Status NewRandomAccessFile(const std::string& f, RandomAccessFile** r) {
+ Status NewRandomAccessFile(const std::string& f,
+ RandomAccessFile** r) override {
return target_->NewRandomAccessFile(f, r);
}
- Status NewWritableFile(const std::string& f, WritableFile** r) {
+ Status NewWritableFile(const std::string& f, WritableFile** r) override {
return target_->NewWritableFile(f, r);
}
- Status NewAppendableFile(const std::string& f, WritableFile** r) {
+ Status NewAppendableFile(const std::string& f, WritableFile** r) override {
return target_->NewAppendableFile(f, r);
}
- bool FileExists(const std::string& f) { return target_->FileExists(f); }
- Status GetChildren(const std::string& dir, std::vector<std::string>* r) {
+ bool FileExists(const std::string& f) override {
+ return target_->FileExists(f);
+ }
+ Status GetChildren(const std::string& dir,
+ std::vector<std::string>* r) override {
return target_->GetChildren(dir, r);
}
- Status DeleteFile(const std::string& f) { return target_->DeleteFile(f); }
- Status CreateDir(const std::string& d) { return target_->CreateDir(d); }
- Status DeleteDir(const std::string& d) { return target_->DeleteDir(d); }
- Status GetFileSize(const std::string& f, uint64_t* s) {
+ Status DeleteFile(const std::string& f) override {
+ return target_->DeleteFile(f);
+ }
+ Status CreateDir(const std::string& d) override {
+ return target_->CreateDir(d);
+ }
+ Status DeleteDir(const std::string& d) override {
+ return target_->DeleteDir(d);
+ }
+ Status GetFileSize(const std::string& f, uint64_t* s) override {
return target_->GetFileSize(f, s);
}
- Status RenameFile(const std::string& s, const std::string& t) {
+ Status RenameFile(const std::string& s, const std::string& t) override {
return target_->RenameFile(s, t);
}
- Status LockFile(const std::string& f, FileLock** l) {
+ Status LockFile(const std::string& f, FileLock** l) override {
return target_->LockFile(f, l);
}
- Status UnlockFile(FileLock* l) { return target_->UnlockFile(l); }
- void Schedule(void (*f)(void*), void* a) {
+ Status UnlockFile(FileLock* l) override { return target_->UnlockFile(l); }
+ void Schedule(void (*f)(void*), void* a) override {
return target_->Schedule(f, a);
}
- void StartThread(void (*f)(void*), void* a) {
+ void StartThread(void (*f)(void*), void* a) override {
return target_->StartThread(f, a);
}
- virtual Status GetTestDirectory(std::string* path) {
+ Status GetTestDirectory(std::string* path) override {
return target_->GetTestDirectory(path);
}
- virtual Status NewLogger(const std::string& fname, Logger** result) {
+ Status NewLogger(const std::string& fname, Logger** result) override {
return target_->NewLogger(fname, result);
}
- uint64_t NowMicros() {
- return target_->NowMicros();
- }
- void SleepForMicroseconds(int micros) {
+ uint64_t NowMicros() override { return target_->NowMicros(); }
+ void SleepForMicroseconds(int micros) override {
target_->SleepForMicroseconds(micros);
}
+
private:
Env* target_;
};
} // namespace leveldb
+// Redefine DeleteFile if necessary.
+#if defined(_WIN32) && defined(LEVELDB_DELETEFILE_UNDEFINED)
+#if defined(UNICODE)
+#define DeleteFile DeleteFileW
+#else
+#define DeleteFile DeleteFileA
+#endif // defined(UNICODE)
+#endif // defined(_WIN32) && defined(LEVELDB_DELETEFILE_UNDEFINED)
+
#endif // STORAGE_LEVELDB_INCLUDE_ENV_H_
diff --git a/src/leveldb/include/leveldb/export.h b/src/leveldb/include/leveldb/export.h
new file mode 100644
index 0000000000..6ba9b183da
--- /dev/null
+++ b/src/leveldb/include/leveldb/export.h
@@ -0,0 +1,33 @@
+// Copyright (c) 2017 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef STORAGE_LEVELDB_INCLUDE_EXPORT_H_
+#define STORAGE_LEVELDB_INCLUDE_EXPORT_H_
+
+#if !defined(LEVELDB_EXPORT)
+
+#if defined(LEVELDB_SHARED_LIBRARY)
+#if defined(_WIN32)
+
+#if defined(LEVELDB_COMPILE_LIBRARY)
+#define LEVELDB_EXPORT __declspec(dllexport)
+#else
+#define LEVELDB_EXPORT __declspec(dllimport)
+#endif // defined(LEVELDB_COMPILE_LIBRARY)
+
+#else // defined(_WIN32)
+#if defined(LEVELDB_COMPILE_LIBRARY)
+#define LEVELDB_EXPORT __attribute__((visibility("default")))
+#else
+#define LEVELDB_EXPORT
+#endif
+#endif // defined(_WIN32)
+
+#else // defined(LEVELDB_SHARED_LIBRARY)
+#define LEVELDB_EXPORT
+#endif
+
+#endif // !defined(LEVELDB_EXPORT)
+
+#endif // STORAGE_LEVELDB_INCLUDE_EXPORT_H_
diff --git a/src/leveldb/include/leveldb/filter_policy.h b/src/leveldb/include/leveldb/filter_policy.h
index 1fba08001f..49c8eda776 100644
--- a/src/leveldb/include/leveldb/filter_policy.h
+++ b/src/leveldb/include/leveldb/filter_policy.h
@@ -18,11 +18,13 @@
#include <string>
+#include "leveldb/export.h"
+
namespace leveldb {
class Slice;
-class FilterPolicy {
+class LEVELDB_EXPORT FilterPolicy {
public:
virtual ~FilterPolicy();
@@ -38,8 +40,8 @@ class FilterPolicy {
//
// Warning: do not change the initial contents of *dst. Instead,
// append the newly constructed filter to *dst.
- virtual void CreateFilter(const Slice* keys, int n, std::string* dst)
- const = 0;
+ virtual void CreateFilter(const Slice* keys, int n,
+ std::string* dst) const = 0;
// "filter" contains the data appended by a preceding call to
// CreateFilter() on this class. This method must return true if
@@ -63,8 +65,8 @@ class FilterPolicy {
// ignores trailing spaces, it would be incorrect to use a
// FilterPolicy (like NewBloomFilterPolicy) that does not ignore
// trailing spaces in keys.
-extern const FilterPolicy* NewBloomFilterPolicy(int bits_per_key);
+LEVELDB_EXPORT const FilterPolicy* NewBloomFilterPolicy(int bits_per_key);
-}
+} // namespace leveldb
#endif // STORAGE_LEVELDB_INCLUDE_FILTER_POLICY_H_
diff --git a/src/leveldb/include/leveldb/iterator.h b/src/leveldb/include/leveldb/iterator.h
index da631ed9d8..bb9a5df8f5 100644
--- a/src/leveldb/include/leveldb/iterator.h
+++ b/src/leveldb/include/leveldb/iterator.h
@@ -15,14 +15,19 @@
#ifndef STORAGE_LEVELDB_INCLUDE_ITERATOR_H_
#define STORAGE_LEVELDB_INCLUDE_ITERATOR_H_
+#include "leveldb/export.h"
#include "leveldb/slice.h"
#include "leveldb/status.h"
namespace leveldb {
-class Iterator {
+class LEVELDB_EXPORT Iterator {
public:
Iterator();
+
+ Iterator(const Iterator&) = delete;
+ Iterator& operator=(const Iterator&) = delete;
+
virtual ~Iterator();
// An iterator is either positioned at a key/value pair, or
@@ -72,28 +77,35 @@ class Iterator {
//
// Note that unlike all of the preceding methods, this method is
// not abstract and therefore clients should not override it.
- typedef void (*CleanupFunction)(void* arg1, void* arg2);
+ using CleanupFunction = void (*)(void* arg1, void* arg2);
void RegisterCleanup(CleanupFunction function, void* arg1, void* arg2);
private:
- struct Cleanup {
+ // Cleanup functions are stored in a single-linked list.
+ // The list's head node is inlined in the iterator.
+ struct CleanupNode {
+ // True if the node is not used. Only head nodes might be unused.
+ bool IsEmpty() const { return function == nullptr; }
+ // Invokes the cleanup function.
+ void Run() {
+ assert(function != nullptr);
+ (*function)(arg1, arg2);
+ }
+
+ // The head node is used if the function pointer is not null.
CleanupFunction function;
void* arg1;
void* arg2;
- Cleanup* next;
+ CleanupNode* next;
};
- Cleanup cleanup_;
-
- // No copying allowed
- Iterator(const Iterator&);
- void operator=(const Iterator&);
+ CleanupNode cleanup_head_;
};
// Return an empty iterator (yields nothing).
-extern Iterator* NewEmptyIterator();
+LEVELDB_EXPORT Iterator* NewEmptyIterator();
// Return an empty iterator with the specified status.
-extern Iterator* NewErrorIterator(const Status& status);
+LEVELDB_EXPORT Iterator* NewErrorIterator(const Status& status);
} // namespace leveldb
diff --git a/src/leveldb/include/leveldb/options.h b/src/leveldb/include/leveldb/options.h
index 976e38122a..b7487726bc 100644
--- a/src/leveldb/include/leveldb/options.h
+++ b/src/leveldb/include/leveldb/options.h
@@ -7,6 +7,8 @@
#include <stddef.h>
+#include "leveldb/export.h"
+
namespace leveldb {
class Cache;
@@ -23,12 +25,15 @@ class Snapshot;
enum CompressionType {
// NOTE: do not change the values of existing entries, as these are
// part of the persistent format on disk.
- kNoCompression = 0x0,
+ kNoCompression = 0x0,
kSnappyCompression = 0x1
};
// Options to control the behavior of a database (passed to DB::Open)
-struct Options {
+struct LEVELDB_EXPORT Options {
+ // Create an Options object with default values for all fields.
+ Options();
+
// -------------------
// Parameters that affect behavior
@@ -41,20 +46,17 @@ struct Options {
const Comparator* comparator;
// If true, the database will be created if it is missing.
- // Default: false
- bool create_if_missing;
+ bool create_if_missing = false;
// If true, an error is raised if the database already exists.
- // Default: false
- bool error_if_exists;
+ bool error_if_exists = false;
// If true, the implementation will do aggressive checking of the
// data it is processing and will stop early if it detects any
// errors. This may have unforeseen ramifications: for example, a
// corruption of one DB entry may cause a large number of entries to
// become unreadable or for the entire DB to become unopenable.
- // Default: false
- bool paranoid_checks;
+ bool paranoid_checks = false;
// Use the specified object to interact with the environment,
// e.g. to read/write files, schedule background work, etc.
@@ -62,10 +64,9 @@ struct Options {
Env* env;
// Any internal progress/error information generated by the db will
- // be written to info_log if it is non-NULL, or to a file stored
- // in the same directory as the DB contents if info_log is NULL.
- // Default: NULL
- Logger* info_log;
+ // be written to info_log if it is non-null, or to a file stored
+ // in the same directory as the DB contents if info_log is null.
+ Logger* info_log = nullptr;
// -------------------
// Parameters that affect performance
@@ -78,39 +79,30 @@ struct Options {
// so you may wish to adjust this parameter to control memory usage.
// Also, a larger write buffer will result in a longer recovery time
// the next time the database is opened.
- //
- // Default: 4MB
- size_t write_buffer_size;
+ size_t write_buffer_size = 4 * 1024 * 1024;
// Number of open files that can be used by the DB. You may need to
// increase this if your database has a large working set (budget
// one open file per 2MB of working set).
- //
- // Default: 1000
- int max_open_files;
+ int max_open_files = 1000;
// Control over blocks (user data is stored in a set of blocks, and
// a block is the unit of reading from disk).
- // If non-NULL, use the specified cache for blocks.
- // If NULL, leveldb will automatically create and use an 8MB internal cache.
- // Default: NULL
- Cache* block_cache;
+ // If non-null, use the specified cache for blocks.
+ // If null, leveldb will automatically create and use an 8MB internal cache.
+ Cache* block_cache = nullptr;
// Approximate size of user data packed per block. Note that the
// block size specified here corresponds to uncompressed data. The
// actual size of the unit read from disk may be smaller if
// compression is enabled. This parameter can be changed dynamically.
- //
- // Default: 4K
- size_t block_size;
+ size_t block_size = 4 * 1024;
// Number of keys between restart points for delta encoding of keys.
// This parameter can be changed dynamically. Most clients should
// leave this parameter alone.
- //
- // Default: 16
- int block_restart_interval;
+ int block_restart_interval = 16;
// Leveldb will write up to this amount of bytes to a file before
// switching to a new one.
@@ -120,9 +112,7 @@ struct Options {
// compactions and hence longer latency/performance hiccups.
// Another reason to increase this parameter might be when you are
// initially populating a large database.
- //
- // Default: 2MB
- size_t max_file_size;
+ size_t max_file_size = 2 * 1024 * 1024;
// Compress blocks using the specified compression algorithm. This
// parameter can be changed dynamically.
@@ -138,53 +128,43 @@ struct Options {
// worth switching to kNoCompression. Even if the input data is
// incompressible, the kSnappyCompression implementation will
// efficiently detect that and will switch to uncompressed mode.
- CompressionType compression;
+ CompressionType compression = kSnappyCompression;
// EXPERIMENTAL: If true, append to existing MANIFEST and log files
// when a database is opened. This can significantly speed up open.
//
// Default: currently false, but may become true later.
- bool reuse_logs;
+ bool reuse_logs = false;
- // If non-NULL, use the specified filter policy to reduce disk reads.
+ // If non-null, use the specified filter policy to reduce disk reads.
// Many applications will benefit from passing the result of
// NewBloomFilterPolicy() here.
- //
- // Default: NULL
- const FilterPolicy* filter_policy;
-
- // Create an Options object with default values for all fields.
- Options();
+ const FilterPolicy* filter_policy = nullptr;
};
// Options that control read operations
-struct ReadOptions {
+struct LEVELDB_EXPORT ReadOptions {
+ ReadOptions() = default;
+
// If true, all data read from underlying storage will be
// verified against corresponding checksums.
- // Default: false
- bool verify_checksums;
+ bool verify_checksums = false;
// Should the data read for this iteration be cached in memory?
// Callers may wish to set this field to false for bulk scans.
- // Default: true
- bool fill_cache;
+ bool fill_cache = true;
- // If "snapshot" is non-NULL, read as of the supplied snapshot
+ // If "snapshot" is non-null, read as of the supplied snapshot
// (which must belong to the DB that is being read and which must
- // not have been released). If "snapshot" is NULL, use an implicit
+ // not have been released). If "snapshot" is null, use an implicit
// snapshot of the state at the beginning of this read operation.
- // Default: NULL
- const Snapshot* snapshot;
-
- ReadOptions()
- : verify_checksums(false),
- fill_cache(true),
- snapshot(NULL) {
- }
+ const Snapshot* snapshot = nullptr;
};
// Options that control write operations
-struct WriteOptions {
+struct LEVELDB_EXPORT WriteOptions {
+ WriteOptions() = default;
+
// If true, the write will be flushed from the operating system
// buffer cache (by calling WritableFile::Sync()) before the write
// is considered complete. If this flag is true, writes will be
@@ -199,13 +179,7 @@ struct WriteOptions {
// crash semantics as the "write()" system call. A DB write
// with sync==true has similar crash semantics to a "write()"
// system call followed by "fsync()".
- //
- // Default: false
- bool sync;
-
- WriteOptions()
- : sync(false) {
- }
+ bool sync = false;
};
} // namespace leveldb
diff --git a/src/leveldb/include/leveldb/slice.h b/src/leveldb/include/leveldb/slice.h
index bc367986f7..2df417dc31 100644
--- a/src/leveldb/include/leveldb/slice.h
+++ b/src/leveldb/include/leveldb/slice.h
@@ -18,23 +18,30 @@
#include <assert.h>
#include <stddef.h>
#include <string.h>
+
#include <string>
+#include "leveldb/export.h"
+
namespace leveldb {
-class Slice {
+class LEVELDB_EXPORT Slice {
public:
// Create an empty slice.
- Slice() : data_(""), size_(0) { }
+ Slice() : data_(""), size_(0) {}
// Create a slice that refers to d[0,n-1].
- Slice(const char* d, size_t n) : data_(d), size_(n) { }
+ Slice(const char* d, size_t n) : data_(d), size_(n) {}
// Create a slice that refers to the contents of "s"
- Slice(const std::string& s) : data_(s.data()), size_(s.size()) { }
+ Slice(const std::string& s) : data_(s.data()), size_(s.size()) {}
// Create a slice that refers to s[0,strlen(s)-1]
- Slice(const char* s) : data_(s), size_(strlen(s)) { }
+ Slice(const char* s) : data_(s), size_(strlen(s)) {}
+
+ // Intentionally copyable.
+ Slice(const Slice&) = default;
+ Slice& operator=(const Slice&) = default;
// Return a pointer to the beginning of the referenced data
const char* data() const { return data_; }
@@ -53,7 +60,10 @@ class Slice {
}
// Change this slice to refer to an empty array
- void clear() { data_ = ""; size_ = 0; }
+ void clear() {
+ data_ = "";
+ size_ = 0;
+ }
// Drop the first "n" bytes from this slice.
void remove_prefix(size_t n) {
@@ -73,15 +83,12 @@ class Slice {
// Return true iff "x" is a prefix of "*this"
bool starts_with(const Slice& x) const {
- return ((size_ >= x.size_) &&
- (memcmp(data_, x.data_, x.size_) == 0));
+ return ((size_ >= x.size_) && (memcmp(data_, x.data_, x.size_) == 0));
}
private:
const char* data_;
size_t size_;
-
- // Intentionally copyable
};
inline bool operator==(const Slice& x, const Slice& y) {
@@ -89,21 +96,20 @@ inline bool operator==(const Slice& x, const Slice& y) {
(memcmp(x.data(), y.data(), x.size()) == 0));
}
-inline bool operator!=(const Slice& x, const Slice& y) {
- return !(x == y);
-}
+inline bool operator!=(const Slice& x, const Slice& y) { return !(x == y); }
inline int Slice::compare(const Slice& b) const {
const size_t min_len = (size_ < b.size_) ? size_ : b.size_;
int r = memcmp(data_, b.data_, min_len);
if (r == 0) {
- if (size_ < b.size_) r = -1;
- else if (size_ > b.size_) r = +1;
+ if (size_ < b.size_)
+ r = -1;
+ else if (size_ > b.size_)
+ r = +1;
}
return r;
}
} // namespace leveldb
-
#endif // STORAGE_LEVELDB_INCLUDE_SLICE_H_
diff --git a/src/leveldb/include/leveldb/status.h b/src/leveldb/include/leveldb/status.h
index d9575f9753..e3273144e4 100644
--- a/src/leveldb/include/leveldb/status.h
+++ b/src/leveldb/include/leveldb/status.h
@@ -13,20 +13,25 @@
#ifndef STORAGE_LEVELDB_INCLUDE_STATUS_H_
#define STORAGE_LEVELDB_INCLUDE_STATUS_H_
+#include <algorithm>
#include <string>
+
+#include "leveldb/export.h"
#include "leveldb/slice.h"
namespace leveldb {
-class Status {
+class LEVELDB_EXPORT Status {
public:
// Create a success status.
- Status() : state_(NULL) { }
+ Status() noexcept : state_(nullptr) {}
~Status() { delete[] state_; }
- // Copy the specified status.
- Status(const Status& s);
- void operator=(const Status& s);
+ Status(const Status& rhs);
+ Status& operator=(const Status& rhs);
+
+ Status(Status&& rhs) noexcept : state_(rhs.state_) { rhs.state_ = nullptr; }
+ Status& operator=(Status&& rhs) noexcept;
// Return a success status.
static Status OK() { return Status(); }
@@ -49,7 +54,7 @@ class Status {
}
// Returns true iff the status indicates success.
- bool ok() const { return (state_ == NULL); }
+ bool ok() const { return (state_ == nullptr); }
// Returns true iff the status indicates a NotFound error.
bool IsNotFound() const { return code() == kNotFound; }
@@ -71,13 +76,6 @@ class Status {
std::string ToString() const;
private:
- // OK status has a NULL state_. Otherwise, state_ is a new[] array
- // of the following form:
- // state_[0..3] == length of message
- // state_[4] == code
- // state_[5..] == message
- const char* state_;
-
enum Code {
kOk = 0,
kNotFound = 1,
@@ -88,23 +86,35 @@ class Status {
};
Code code() const {
- return (state_ == NULL) ? kOk : static_cast<Code>(state_[4]);
+ return (state_ == nullptr) ? kOk : static_cast<Code>(state_[4]);
}
Status(Code code, const Slice& msg, const Slice& msg2);
static const char* CopyState(const char* s);
+
+ // OK status has a null state_. Otherwise, state_ is a new[] array
+ // of the following form:
+ // state_[0..3] == length of message
+ // state_[4] == code
+ // state_[5..] == message
+ const char* state_;
};
-inline Status::Status(const Status& s) {
- state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_);
+inline Status::Status(const Status& rhs) {
+ state_ = (rhs.state_ == nullptr) ? nullptr : CopyState(rhs.state_);
}
-inline void Status::operator=(const Status& s) {
- // The following condition catches both aliasing (when this == &s),
- // and the common case where both s and *this are ok.
- if (state_ != s.state_) {
+inline Status& Status::operator=(const Status& rhs) {
+ // The following condition catches both aliasing (when this == &rhs),
+ // and the common case where both rhs and *this are ok.
+ if (state_ != rhs.state_) {
delete[] state_;
- state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_);
+ state_ = (rhs.state_ == nullptr) ? nullptr : CopyState(rhs.state_);
}
+ return *this;
+}
+inline Status& Status::operator=(Status&& rhs) noexcept {
+ std::swap(state_, rhs.state_);
+ return *this;
}
} // namespace leveldb
diff --git a/src/leveldb/include/leveldb/table.h b/src/leveldb/include/leveldb/table.h
index a9746c3f5e..25c6013116 100644
--- a/src/leveldb/include/leveldb/table.h
+++ b/src/leveldb/include/leveldb/table.h
@@ -6,6 +6,8 @@
#define STORAGE_LEVELDB_INCLUDE_TABLE_H_
#include <stdint.h>
+
+#include "leveldb/export.h"
#include "leveldb/iterator.h"
namespace leveldb {
@@ -21,7 +23,7 @@ class TableCache;
// A Table is a sorted map from strings to strings. Tables are
// immutable and persistent. A Table may be safely accessed from
// multiple threads without external synchronization.
-class Table {
+class LEVELDB_EXPORT Table {
public:
// Attempt to open the table that is stored in bytes [0..file_size)
// of "file", and read the metadata entries necessary to allow
@@ -30,15 +32,16 @@ class Table {
// If successful, returns ok and sets "*table" to the newly opened
// table. The client should delete "*table" when no longer needed.
// If there was an error while initializing the table, sets "*table"
- // to NULL and returns a non-ok status. Does not take ownership of
+ // to nullptr and returns a non-ok status. Does not take ownership of
// "*source", but the client must ensure that "source" remains live
// for the duration of the returned table's lifetime.
//
// *file must remain live while this Table is in use.
- static Status Open(const Options& options,
- RandomAccessFile* file,
- uint64_t file_size,
- Table** table);
+ static Status Open(const Options& options, RandomAccessFile* file,
+ uint64_t file_size, Table** table);
+
+ Table(const Table&) = delete;
+ Table& operator=(const Table&) = delete;
~Table();
@@ -56,28 +59,24 @@ class Table {
uint64_t ApproximateOffsetOf(const Slice& key) const;
private:
+ friend class TableCache;
struct Rep;
- Rep* rep_;
- explicit Table(Rep* rep) { rep_ = rep; }
static Iterator* BlockReader(void*, const ReadOptions&, const Slice&);
+ explicit Table(Rep* rep) : rep_(rep) {}
+
// Calls (*handle_result)(arg, ...) with the entry found after a call
// to Seek(key). May not make such a call if filter policy says
// that key is not present.
- friend class TableCache;
- Status InternalGet(
- const ReadOptions&, const Slice& key,
- void* arg,
- void (*handle_result)(void* arg, const Slice& k, const Slice& v));
-
+ Status InternalGet(const ReadOptions&, const Slice& key, void* arg,
+ void (*handle_result)(void* arg, const Slice& k,
+ const Slice& v));
void ReadMeta(const Footer& footer);
void ReadFilter(const Slice& filter_handle_value);
- // No copying allowed
- Table(const Table&);
- void operator=(const Table&);
+ Rep* const rep_;
};
} // namespace leveldb
diff --git a/src/leveldb/include/leveldb/table_builder.h b/src/leveldb/include/leveldb/table_builder.h
index 5fd1dc71f1..7d8896bb89 100644
--- a/src/leveldb/include/leveldb/table_builder.h
+++ b/src/leveldb/include/leveldb/table_builder.h
@@ -14,6 +14,8 @@
#define STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_
#include <stdint.h>
+
+#include "leveldb/export.h"
#include "leveldb/options.h"
#include "leveldb/status.h"
@@ -23,13 +25,16 @@ class BlockBuilder;
class BlockHandle;
class WritableFile;
-class TableBuilder {
+class LEVELDB_EXPORT TableBuilder {
public:
// Create a builder that will store the contents of the table it is
// building in *file. Does not close the file. It is up to the
// caller to close the file after calling Finish().
TableBuilder(const Options& options, WritableFile* file);
+ TableBuilder(const TableBuilder&) = delete;
+ TableBuilder& operator=(const TableBuilder&) = delete;
+
// REQUIRES: Either Finish() or Abandon() has been called.
~TableBuilder();
@@ -81,10 +86,6 @@ class TableBuilder {
struct Rep;
Rep* rep_;
-
- // No copying allowed
- TableBuilder(const TableBuilder&);
- void operator=(const TableBuilder&);
};
} // namespace leveldb
diff --git a/src/leveldb/include/leveldb/write_batch.h b/src/leveldb/include/leveldb/write_batch.h
index ee9aab68e0..94d4115fed 100644
--- a/src/leveldb/include/leveldb/write_batch.h
+++ b/src/leveldb/include/leveldb/write_batch.h
@@ -22,15 +22,29 @@
#define STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_
#include <string>
+
+#include "leveldb/export.h"
#include "leveldb/status.h"
namespace leveldb {
class Slice;
-class WriteBatch {
+class LEVELDB_EXPORT WriteBatch {
public:
+ class LEVELDB_EXPORT Handler {
+ public:
+ virtual ~Handler();
+ virtual void Put(const Slice& key, const Slice& value) = 0;
+ virtual void Delete(const Slice& key) = 0;
+ };
+
WriteBatch();
+
+ // Intentionally copyable.
+ WriteBatch(const WriteBatch&) = default;
+ WriteBatch& operator=(const WriteBatch&) = default;
+
~WriteBatch();
// Store the mapping "key->value" in the database.
@@ -42,21 +56,26 @@ class WriteBatch {
// Clear all updates buffered in this batch.
void Clear();
+ // The size of the database changes caused by this batch.
+ //
+ // This number is tied to implementation details, and may change across
+ // releases. It is intended for LevelDB usage metrics.
+ size_t ApproximateSize() const;
+
+ // Copies the operations in "source" to this batch.
+ //
+ // This runs in O(source size) time. However, the constant factor is better
+ // than calling Iterate() over the source batch with a Handler that replicates
+ // the operations into this batch.
+ void Append(const WriteBatch& source);
+
// Support for iterating over the contents of a batch.
- class Handler {
- public:
- virtual ~Handler();
- virtual void Put(const Slice& key, const Slice& value) = 0;
- virtual void Delete(const Slice& key) = 0;
- };
Status Iterate(Handler* handler) const;
private:
friend class WriteBatchInternal;
std::string rep_; // See comment in write_batch.cc for the format of rep_
-
- // Intentionally copyable
};
} // namespace leveldb
diff --git a/src/leveldb/issues/issue178_test.cc b/src/leveldb/issues/issue178_test.cc
index 1b1cf8bb28..d50ffeb9d4 100644
--- a/src/leveldb/issues/issue178_test.cc
+++ b/src/leveldb/issues/issue178_test.cc
@@ -3,9 +3,9 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
// Test for issue 178: a manual compaction causes deleted data to reappear.
+#include <cstdlib>
#include <iostream>
#include <sstream>
-#include <cstdlib>
#include "leveldb/db.h"
#include "leveldb/write_batch.h"
@@ -21,11 +21,9 @@ std::string Key1(int i) {
return buf;
}
-std::string Key2(int i) {
- return Key1(i) + "_xxx";
-}
+std::string Key2(int i) { return Key1(i) + "_xxx"; }
-class Issue178 { };
+class Issue178 {};
TEST(Issue178, Test) {
// Get rid of any state from an old run.
@@ -87,6 +85,4 @@ TEST(Issue178, Test) {
} // anonymous namespace
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/issues/issue200_test.cc b/src/leveldb/issues/issue200_test.cc
index 1cec79f443..877b2afc47 100644
--- a/src/leveldb/issues/issue200_test.cc
+++ b/src/leveldb/issues/issue200_test.cc
@@ -11,14 +11,14 @@
namespace leveldb {
-class Issue200 { };
+class Issue200 {};
TEST(Issue200, Test) {
// Get rid of any state from an old run.
std::string dbpath = test::TmpDir() + "/leveldb_issue200_test";
DestroyDB(dbpath, Options());
- DB *db;
+ DB* db;
Options options;
options.create_if_missing = true;
ASSERT_OK(DB::Open(options, dbpath, &db));
@@ -31,7 +31,7 @@ TEST(Issue200, Test) {
ASSERT_OK(db->Put(write_options, "5", "f"));
ReadOptions read_options;
- Iterator *iter = db->NewIterator(read_options);
+ Iterator* iter = db->NewIterator(read_options);
// Add an element that should not be reflected in the iterator.
ASSERT_OK(db->Put(write_options, "25", "cd"));
@@ -54,6 +54,4 @@ TEST(Issue200, Test) {
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/issues/issue320_test.cc b/src/leveldb/issues/issue320_test.cc
new file mode 100644
index 0000000000..c5fcbfc6e7
--- /dev/null
+++ b/src/leveldb/issues/issue320_test.cc
@@ -0,0 +1,128 @@
+// Copyright (c) 2019 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include <cstdint>
+#include <cstdlib>
+#include <iostream>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "leveldb/db.h"
+#include "leveldb/write_batch.h"
+#include "util/testharness.h"
+
+namespace leveldb {
+
+namespace {
+
+// Creates a random number in the range of [0, max).
+int GenerateRandomNumber(int max) { return std::rand() % max; }
+
+std::string CreateRandomString(int32_t index) {
+ static const size_t len = 1024;
+ char bytes[len];
+ size_t i = 0;
+ while (i < 8) {
+ bytes[i] = 'a' + ((index >> (4 * i)) & 0xf);
+ ++i;
+ }
+ while (i < sizeof(bytes)) {
+ bytes[i] = 'a' + GenerateRandomNumber(26);
+ ++i;
+ }
+ return std::string(bytes, sizeof(bytes));
+}
+
+} // namespace
+
+class Issue320 {};
+
+TEST(Issue320, Test) {
+ std::srand(0);
+
+ bool delete_before_put = false;
+ bool keep_snapshots = true;
+
+ std::vector<std::unique_ptr<std::pair<std::string, std::string>>> test_map(
+ 10000);
+ std::vector<Snapshot const*> snapshots(100, nullptr);
+
+ DB* db;
+ Options options;
+ options.create_if_missing = true;
+
+ std::string dbpath = test::TmpDir() + "/leveldb_issue320_test";
+ ASSERT_OK(DB::Open(options, dbpath, &db));
+
+ uint32_t target_size = 10000;
+ uint32_t num_items = 0;
+ uint32_t count = 0;
+ std::string key;
+ std::string value, old_value;
+
+ WriteOptions writeOptions;
+ ReadOptions readOptions;
+ while (count < 200000) {
+ if ((++count % 1000) == 0) {
+ std::cout << "count: " << count << std::endl;
+ }
+
+ int index = GenerateRandomNumber(test_map.size());
+ WriteBatch batch;
+
+ if (test_map[index] == nullptr) {
+ num_items++;
+ test_map[index].reset(new std::pair<std::string, std::string>(
+ CreateRandomString(index), CreateRandomString(index)));
+ batch.Put(test_map[index]->first, test_map[index]->second);
+ } else {
+ ASSERT_OK(db->Get(readOptions, test_map[index]->first, &old_value));
+ if (old_value != test_map[index]->second) {
+ std::cout << "ERROR incorrect value returned by Get" << std::endl;
+ std::cout << " count=" << count << std::endl;
+ std::cout << " old value=" << old_value << std::endl;
+ std::cout << " test_map[index]->second=" << test_map[index]->second
+ << std::endl;
+ std::cout << " test_map[index]->first=" << test_map[index]->first
+ << std::endl;
+ std::cout << " index=" << index << std::endl;
+ ASSERT_EQ(old_value, test_map[index]->second);
+ }
+
+ if (num_items >= target_size && GenerateRandomNumber(100) > 30) {
+ batch.Delete(test_map[index]->first);
+ test_map[index] = nullptr;
+ --num_items;
+ } else {
+ test_map[index]->second = CreateRandomString(index);
+ if (delete_before_put) batch.Delete(test_map[index]->first);
+ batch.Put(test_map[index]->first, test_map[index]->second);
+ }
+ }
+
+ ASSERT_OK(db->Write(writeOptions, &batch));
+
+ if (keep_snapshots && GenerateRandomNumber(10) == 0) {
+ int i = GenerateRandomNumber(snapshots.size());
+ if (snapshots[i] != nullptr) {
+ db->ReleaseSnapshot(snapshots[i]);
+ }
+ snapshots[i] = db->GetSnapshot();
+ }
+ }
+
+ for (Snapshot const* snapshot : snapshots) {
+ if (snapshot) {
+ db->ReleaseSnapshot(snapshot);
+ }
+ }
+
+ delete db;
+ DestroyDB(dbpath, options);
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/port/README b/src/leveldb/port/README.md
index 422563e25c..8b171532e1 100644
--- a/src/leveldb/port/README
+++ b/src/leveldb/port/README.md
@@ -5,6 +5,6 @@ Code in the rest of the package includes "port.h" from this directory.
"port.h" in turn includes a platform specific "port_<platform>.h" file
that provides the platform specific implementation.
-See port_posix.h for an example of what must be provided in a platform
+See port_stdcxx.h for an example of what must be provided in a platform
specific header file.
diff --git a/src/leveldb/port/atomic_pointer.h b/src/leveldb/port/atomic_pointer.h
deleted file mode 100644
index d79a02230d..0000000000
--- a/src/leveldb/port/atomic_pointer.h
+++ /dev/null
@@ -1,245 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-// AtomicPointer provides storage for a lock-free pointer.
-// Platform-dependent implementation of AtomicPointer:
-// - If the platform provides a cheap barrier, we use it with raw pointers
-// - If <atomic> is present (on newer versions of gcc, it is), we use
-// a <atomic>-based AtomicPointer. However we prefer the memory
-// barrier based version, because at least on a gcc 4.4 32-bit build
-// on linux, we have encountered a buggy <atomic> implementation.
-// Also, some <atomic> implementations are much slower than a memory-barrier
-// based implementation (~16ns for <atomic> based acquire-load vs. ~1ns for
-// a barrier based acquire-load).
-// This code is based on atomicops-internals-* in Google's perftools:
-// http://code.google.com/p/google-perftools/source/browse/#svn%2Ftrunk%2Fsrc%2Fbase
-
-#ifndef PORT_ATOMIC_POINTER_H_
-#define PORT_ATOMIC_POINTER_H_
-
-#include <stdint.h>
-#ifdef LEVELDB_ATOMIC_PRESENT
-#include <atomic>
-#endif
-#ifdef OS_WIN
-#include <windows.h>
-#endif
-#ifdef OS_MACOSX
-#include <libkern/OSAtomic.h>
-#endif
-
-#if defined(_M_X64) || defined(__x86_64__)
-#define ARCH_CPU_X86_FAMILY 1
-#elif defined(_M_IX86) || defined(__i386__) || defined(__i386)
-#define ARCH_CPU_X86_FAMILY 1
-#elif defined(__ARMEL__)
-#define ARCH_CPU_ARM_FAMILY 1
-#elif defined(__aarch64__)
-#define ARCH_CPU_ARM64_FAMILY 1
-#elif defined(__ppc__) || defined(__powerpc__) || defined(__powerpc64__)
-#define ARCH_CPU_PPC_FAMILY 1
-#elif defined(__mips__)
-#define ARCH_CPU_MIPS_FAMILY 1
-#endif
-
-namespace leveldb {
-namespace port {
-
-// AtomicPointer based on <cstdatomic> if available
-#if defined(LEVELDB_ATOMIC_PRESENT)
-class AtomicPointer {
- private:
- std::atomic<void*> rep_;
- public:
- AtomicPointer() { }
- explicit AtomicPointer(void* v) : rep_(v) { }
- inline void* Acquire_Load() const {
- return rep_.load(std::memory_order_acquire);
- }
- inline void Release_Store(void* v) {
- rep_.store(v, std::memory_order_release);
- }
- inline void* NoBarrier_Load() const {
- return rep_.load(std::memory_order_relaxed);
- }
- inline void NoBarrier_Store(void* v) {
- rep_.store(v, std::memory_order_relaxed);
- }
-};
-
-#else
-
-// Define MemoryBarrier() if available
-// Windows on x86
-#if defined(OS_WIN) && defined(COMPILER_MSVC) && defined(ARCH_CPU_X86_FAMILY)
-// windows.h already provides a MemoryBarrier(void) macro
-// http://msdn.microsoft.com/en-us/library/ms684208(v=vs.85).aspx
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// Mac OS
-#elif defined(OS_MACOSX)
-inline void MemoryBarrier() {
- OSMemoryBarrier();
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// Gcc on x86
-#elif defined(ARCH_CPU_X86_FAMILY) && defined(__GNUC__)
-inline void MemoryBarrier() {
- // See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on
- // this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering.
- __asm__ __volatile__("" : : : "memory");
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// Sun Studio
-#elif defined(ARCH_CPU_X86_FAMILY) && defined(__SUNPRO_CC)
-inline void MemoryBarrier() {
- // See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on
- // this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering.
- asm volatile("" : : : "memory");
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// ARM Linux
-#elif defined(ARCH_CPU_ARM_FAMILY) && defined(__linux__)
-typedef void (*LinuxKernelMemoryBarrierFunc)(void);
-// The Linux ARM kernel provides a highly optimized device-specific memory
-// barrier function at a fixed memory address that is mapped in every
-// user-level process.
-//
-// This beats using CPU-specific instructions which are, on single-core
-// devices, un-necessary and very costly (e.g. ARMv7-A "dmb" takes more
-// than 180ns on a Cortex-A8 like the one on a Nexus One). Benchmarking
-// shows that the extra function call cost is completely negligible on
-// multi-core devices.
-//
-inline void MemoryBarrier() {
- (*(LinuxKernelMemoryBarrierFunc)0xffff0fa0)();
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// ARM64
-#elif defined(ARCH_CPU_ARM64_FAMILY)
-inline void MemoryBarrier() {
- asm volatile("dmb sy" : : : "memory");
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// PPC
-#elif defined(ARCH_CPU_PPC_FAMILY) && defined(__GNUC__)
-inline void MemoryBarrier() {
- // TODO for some powerpc expert: is there a cheaper suitable variant?
- // Perhaps by having separate barriers for acquire and release ops.
- asm volatile("sync" : : : "memory");
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// MIPS
-#elif defined(ARCH_CPU_MIPS_FAMILY) && defined(__GNUC__)
-inline void MemoryBarrier() {
- __asm__ __volatile__("sync" : : : "memory");
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-#endif
-
-// AtomicPointer built using platform-specific MemoryBarrier()
-#if defined(LEVELDB_HAVE_MEMORY_BARRIER)
-class AtomicPointer {
- private:
- void* rep_;
- public:
- AtomicPointer() { }
- explicit AtomicPointer(void* p) : rep_(p) {}
- inline void* NoBarrier_Load() const { return rep_; }
- inline void NoBarrier_Store(void* v) { rep_ = v; }
- inline void* Acquire_Load() const {
- void* result = rep_;
- MemoryBarrier();
- return result;
- }
- inline void Release_Store(void* v) {
- MemoryBarrier();
- rep_ = v;
- }
-};
-
-// Atomic pointer based on sparc memory barriers
-#elif defined(__sparcv9) && defined(__GNUC__)
-class AtomicPointer {
- private:
- void* rep_;
- public:
- AtomicPointer() { }
- explicit AtomicPointer(void* v) : rep_(v) { }
- inline void* Acquire_Load() const {
- void* val;
- __asm__ __volatile__ (
- "ldx [%[rep_]], %[val] \n\t"
- "membar #LoadLoad|#LoadStore \n\t"
- : [val] "=r" (val)
- : [rep_] "r" (&rep_)
- : "memory");
- return val;
- }
- inline void Release_Store(void* v) {
- __asm__ __volatile__ (
- "membar #LoadStore|#StoreStore \n\t"
- "stx %[v], [%[rep_]] \n\t"
- :
- : [rep_] "r" (&rep_), [v] "r" (v)
- : "memory");
- }
- inline void* NoBarrier_Load() const { return rep_; }
- inline void NoBarrier_Store(void* v) { rep_ = v; }
-};
-
-// Atomic pointer based on ia64 acq/rel
-#elif defined(__ia64) && defined(__GNUC__)
-class AtomicPointer {
- private:
- void* rep_;
- public:
- AtomicPointer() { }
- explicit AtomicPointer(void* v) : rep_(v) { }
- inline void* Acquire_Load() const {
- void* val ;
- __asm__ __volatile__ (
- "ld8.acq %[val] = [%[rep_]] \n\t"
- : [val] "=r" (val)
- : [rep_] "r" (&rep_)
- : "memory"
- );
- return val;
- }
- inline void Release_Store(void* v) {
- __asm__ __volatile__ (
- "st8.rel [%[rep_]] = %[v] \n\t"
- :
- : [rep_] "r" (&rep_), [v] "r" (v)
- : "memory"
- );
- }
- inline void* NoBarrier_Load() const { return rep_; }
- inline void NoBarrier_Store(void* v) { rep_ = v; }
-};
-
-// We have neither MemoryBarrier(), nor <atomic>
-#else
-#error Please implement AtomicPointer for this platform.
-
-#endif
-#endif
-
-#undef LEVELDB_HAVE_MEMORY_BARRIER
-#undef ARCH_CPU_X86_FAMILY
-#undef ARCH_CPU_ARM_FAMILY
-#undef ARCH_CPU_ARM64_FAMILY
-#undef ARCH_CPU_PPC_FAMILY
-
-} // namespace port
-} // namespace leveldb
-
-#endif // PORT_ATOMIC_POINTER_H_
diff --git a/src/leveldb/port/port.h b/src/leveldb/port/port.h
index 4baafa8e22..4b247f74f9 100644
--- a/src/leveldb/port/port.h
+++ b/src/leveldb/port/port.h
@@ -10,12 +10,10 @@
// Include the appropriate platform specific file below. If you are
// porting to a new platform, see "port_example.h" for documentation
// of what the new port_<platform>.h file must provide.
-#if defined(LEVELDB_PLATFORM_POSIX)
-# include "port/port_posix.h"
+#if defined(LEVELDB_PLATFORM_POSIX) || defined(LEVELDB_PLATFORM_WINDOWS)
+#include "port/port_stdcxx.h"
#elif defined(LEVELDB_PLATFORM_CHROMIUM)
-# include "port/port_chromium.h"
-#elif defined(LEVELDB_PLATFORM_WINDOWS)
-# include "port/port_win.h"
+#include "port/port_chromium.h"
#endif
#endif // STORAGE_LEVELDB_PORT_PORT_H_
diff --git a/src/leveldb/port/port_config.h.in b/src/leveldb/port/port_config.h.in
new file mode 100644
index 0000000000..21273153a3
--- /dev/null
+++ b/src/leveldb/port/port_config.h.in
@@ -0,0 +1,39 @@
+// Copyright 2017 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef STORAGE_LEVELDB_PORT_PORT_CONFIG_H_
+#define STORAGE_LEVELDB_PORT_PORT_CONFIG_H_
+
+// Define to 1 if you have a definition for fdatasync() in <unistd.h>.
+#if !defined(HAVE_FDATASYNC)
+#cmakedefine01 HAVE_FDATASYNC
+#endif // !defined(HAVE_FDATASYNC)
+
+// Define to 1 if you have a definition for F_FULLFSYNC in <fcntl.h>.
+#if !defined(HAVE_FULLFSYNC)
+#cmakedefine01 HAVE_FULLFSYNC
+#endif // !defined(HAVE_FULLFSYNC)
+
+// Define to 1 if you have a definition for O_CLOEXEC in <fcntl.h>.
+#if !defined(HAVE_O_CLOEXEC)
+#cmakedefine01 HAVE_O_CLOEXEC
+#endif // !defined(HAVE_O_CLOEXEC)
+
+// Define to 1 if you have Google CRC32C.
+#if !defined(HAVE_CRC32C)
+#cmakedefine01 HAVE_CRC32C
+#endif // !defined(HAVE_CRC32C)
+
+// Define to 1 if you have Google Snappy.
+#if !defined(HAVE_SNAPPY)
+#cmakedefine01 HAVE_SNAPPY
+#endif // !defined(HAVE_SNAPPY)
+
+// Define to 1 if your processor stores words with the most significant byte
+// first (like Motorola and SPARC, unlike Intel and VAX).
+#if !defined(LEVELDB_IS_BIG_ENDIAN)
+#cmakedefine01 LEVELDB_IS_BIG_ENDIAN
+#endif // !defined(LEVELDB_IS_BIG_ENDIAN)
+
+#endif // STORAGE_LEVELDB_PORT_PORT_CONFIG_H_ \ No newline at end of file
diff --git a/src/leveldb/port/port_example.h b/src/leveldb/port/port_example.h
index 5b1d027de5..1a8fca24b3 100644
--- a/src/leveldb/port/port_example.h
+++ b/src/leveldb/port/port_example.h
@@ -10,6 +10,8 @@
#ifndef STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_
#define STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_
+#include "port/thread_annotations.h"
+
namespace leveldb {
namespace port {
@@ -23,23 +25,23 @@ static const bool kLittleEndian = true /* or some other expression */;
// ------------------ Threading -------------------
// A Mutex represents an exclusive lock.
-class Mutex {
+class LOCKABLE Mutex {
public:
Mutex();
~Mutex();
// Lock the mutex. Waits until other lockers have exited.
// Will deadlock if the mutex is already locked by this thread.
- void Lock();
+ void Lock() EXCLUSIVE_LOCK_FUNCTION();
// Unlock the mutex.
// REQUIRES: This mutex was locked by this thread.
- void Unlock();
+ void Unlock() UNLOCK_FUNCTION();
// Optionally crash if this thread does not hold this mutex.
// The implementation must be fast, especially if NDEBUG is
// defined. The implementation is allowed to skip all checks.
- void AssertHeld();
+ void AssertHeld() ASSERT_EXCLUSIVE_LOCK();
};
class CondVar {
@@ -60,57 +62,18 @@ class CondVar {
void SignallAll();
};
-// Thread-safe initialization.
-// Used as follows:
-// static port::OnceType init_control = LEVELDB_ONCE_INIT;
-// static void Initializer() { ... do something ...; }
-// ...
-// port::InitOnce(&init_control, &Initializer);
-typedef intptr_t OnceType;
-#define LEVELDB_ONCE_INIT 0
-extern void InitOnce(port::OnceType*, void (*initializer)());
-
-// A type that holds a pointer that can be read or written atomically
-// (i.e., without word-tearing.)
-class AtomicPointer {
- private:
- intptr_t rep_;
- public:
- // Initialize to arbitrary value
- AtomicPointer();
-
- // Initialize to hold v
- explicit AtomicPointer(void* v) : rep_(v) { }
-
- // Read and return the stored pointer with the guarantee that no
- // later memory access (read or write) by this thread can be
- // reordered ahead of this read.
- void* Acquire_Load() const;
-
- // Set v as the stored pointer with the guarantee that no earlier
- // memory access (read or write) by this thread can be reordered
- // after this store.
- void Release_Store(void* v);
-
- // Read the stored pointer with no ordering guarantees.
- void* NoBarrier_Load() const;
-
- // Set va as the stored pointer with no ordering guarantees.
- void NoBarrier_Store(void* v);
-};
-
// ------------------ Compression -------------------
// Store the snappy compression of "input[0,input_length-1]" in *output.
// Returns false if snappy is not supported by this port.
-extern bool Snappy_Compress(const char* input, size_t input_length,
- std::string* output);
+bool Snappy_Compress(const char* input, size_t input_length,
+ std::string* output);
// If input[0,input_length-1] looks like a valid snappy compressed
// buffer, store the size of the uncompressed data in *result and
// return true. Else return false.
-extern bool Snappy_GetUncompressedLength(const char* input, size_t length,
- size_t* result);
+bool Snappy_GetUncompressedLength(const char* input, size_t length,
+ size_t* result);
// Attempt to snappy uncompress input[0,input_length-1] into *output.
// Returns true if successful, false if the input is invalid lightweight
@@ -119,19 +82,15 @@ extern bool Snappy_GetUncompressedLength(const char* input, size_t length,
// REQUIRES: at least the first "n" bytes of output[] must be writable
// where "n" is the result of a successful call to
// Snappy_GetUncompressedLength.
-extern bool Snappy_Uncompress(const char* input_data, size_t input_length,
- char* output);
+bool Snappy_Uncompress(const char* input_data, size_t input_length,
+ char* output);
// ------------------ Miscellaneous -------------------
// If heap profiling is not supported, returns false.
// Else repeatedly calls (*func)(arg, data, n) and then returns true.
// The concatenation of all "data[0,n-1]" fragments is the heap profile.
-extern bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg);
-
-// Determine whether a working accelerated crc32 implementation exists
-// Returns true if AcceleratedCRC32C is safe to call
-bool HasAcceleratedCRC32C();
+bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg);
// Extend the CRC to include the first n bytes of buf.
//
diff --git a/src/leveldb/port/port_posix.cc b/src/leveldb/port/port_posix.cc
deleted file mode 100644
index ec39e92195..0000000000
--- a/src/leveldb/port/port_posix.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "port/port_posix.h"
-
-#include <cstdlib>
-#include <stdio.h>
-#include <string.h>
-
-#if (defined(__x86_64__) || defined(__i386__)) && defined(__GNUC__)
-#include <cpuid.h>
-#endif
-
-namespace leveldb {
-namespace port {
-
-static void PthreadCall(const char* label, int result) {
- if (result != 0) {
- fprintf(stderr, "pthread %s: %s\n", label, strerror(result));
- abort();
- }
-}
-
-Mutex::Mutex() { PthreadCall("init mutex", pthread_mutex_init(&mu_, NULL)); }
-
-Mutex::~Mutex() { PthreadCall("destroy mutex", pthread_mutex_destroy(&mu_)); }
-
-void Mutex::Lock() { PthreadCall("lock", pthread_mutex_lock(&mu_)); }
-
-void Mutex::Unlock() { PthreadCall("unlock", pthread_mutex_unlock(&mu_)); }
-
-CondVar::CondVar(Mutex* mu)
- : mu_(mu) {
- PthreadCall("init cv", pthread_cond_init(&cv_, NULL));
-}
-
-CondVar::~CondVar() { PthreadCall("destroy cv", pthread_cond_destroy(&cv_)); }
-
-void CondVar::Wait() {
- PthreadCall("wait", pthread_cond_wait(&cv_, &mu_->mu_));
-}
-
-void CondVar::Signal() {
- PthreadCall("signal", pthread_cond_signal(&cv_));
-}
-
-void CondVar::SignalAll() {
- PthreadCall("broadcast", pthread_cond_broadcast(&cv_));
-}
-
-void InitOnce(OnceType* once, void (*initializer)()) {
- PthreadCall("once", pthread_once(once, initializer));
-}
-
-bool HasAcceleratedCRC32C() {
-#if (defined(__x86_64__) || defined(__i386__)) && defined(__GNUC__)
- unsigned int eax, ebx, ecx, edx;
- __get_cpuid(1, &eax, &ebx, &ecx, &edx);
- return (ecx & (1 << 20)) != 0;
-#else
- return false;
-#endif
-}
-
-} // namespace port
-} // namespace leveldb
diff --git a/src/leveldb/port/port_posix.h b/src/leveldb/port/port_posix.h
deleted file mode 100644
index d85fa5d63f..0000000000
--- a/src/leveldb/port/port_posix.h
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// See port_example.h for documentation for the following types/functions.
-
-#ifndef STORAGE_LEVELDB_PORT_PORT_POSIX_H_
-#define STORAGE_LEVELDB_PORT_PORT_POSIX_H_
-
-#undef PLATFORM_IS_LITTLE_ENDIAN
-#if defined(OS_MACOSX)
- #include <machine/endian.h>
- #if defined(__DARWIN_LITTLE_ENDIAN) && defined(__DARWIN_BYTE_ORDER)
- #define PLATFORM_IS_LITTLE_ENDIAN \
- (__DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN)
- #endif
-#elif defined(OS_SOLARIS)
- #include <sys/isa_defs.h>
- #ifdef _LITTLE_ENDIAN
- #define PLATFORM_IS_LITTLE_ENDIAN true
- #else
- #define PLATFORM_IS_LITTLE_ENDIAN false
- #endif
-#elif defined(OS_FREEBSD) || defined(OS_OPENBSD) ||\
- defined(OS_NETBSD) || defined(OS_DRAGONFLYBSD)
- #include <sys/types.h>
- #include <sys/endian.h>
- #define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN)
-#elif defined(OS_HPUX)
- #define PLATFORM_IS_LITTLE_ENDIAN false
-#elif defined(OS_ANDROID)
- // Due to a bug in the NDK x86 <sys/endian.h> definition,
- // _BYTE_ORDER must be used instead of __BYTE_ORDER on Android.
- // See http://code.google.com/p/android/issues/detail?id=39824
- #include <endian.h>
- #define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN)
-#else
- #include <endian.h>
-#endif
-
-#include <pthread.h>
-#ifdef SNAPPY
-#include <snappy.h>
-#endif
-#include <stdint.h>
-#include <string>
-#include "port/atomic_pointer.h"
-
-#ifndef PLATFORM_IS_LITTLE_ENDIAN
-#define PLATFORM_IS_LITTLE_ENDIAN (__BYTE_ORDER == __LITTLE_ENDIAN)
-#endif
-
-#if defined(OS_MACOSX) || defined(OS_SOLARIS) || defined(OS_FREEBSD) ||\
- defined(OS_NETBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLYBSD) ||\
- defined(OS_ANDROID) || defined(OS_HPUX) || defined(CYGWIN)
-// Use fread/fwrite/fflush on platforms without _unlocked variants
-#define fread_unlocked fread
-#define fwrite_unlocked fwrite
-#define fflush_unlocked fflush
-#endif
-
-#if defined(OS_FREEBSD) ||\
- defined(OS_OPENBSD) || defined(OS_DRAGONFLYBSD)
-// Use fsync() on platforms without fdatasync()
-#define fdatasync fsync
-#endif
-
-#if defined(OS_MACOSX)
-#define fdatasync(fd) fcntl(fd, F_FULLFSYNC, 0)
-#endif
-
-#if defined(OS_ANDROID) && __ANDROID_API__ < 9
-// fdatasync() was only introduced in API level 9 on Android. Use fsync()
-// when targetting older platforms.
-#define fdatasync fsync
-#endif
-
-namespace leveldb {
-namespace port {
-
-static const bool kLittleEndian = PLATFORM_IS_LITTLE_ENDIAN;
-#undef PLATFORM_IS_LITTLE_ENDIAN
-
-class CondVar;
-
-class Mutex {
- public:
- Mutex();
- ~Mutex();
-
- void Lock();
- void Unlock();
- void AssertHeld() { }
-
- private:
- friend class CondVar;
- pthread_mutex_t mu_;
-
- // No copying
- Mutex(const Mutex&);
- void operator=(const Mutex&);
-};
-
-class CondVar {
- public:
- explicit CondVar(Mutex* mu);
- ~CondVar();
- void Wait();
- void Signal();
- void SignalAll();
- private:
- pthread_cond_t cv_;
- Mutex* mu_;
-};
-
-typedef pthread_once_t OnceType;
-#define LEVELDB_ONCE_INIT PTHREAD_ONCE_INIT
-extern void InitOnce(OnceType* once, void (*initializer)());
-
-inline bool Snappy_Compress(const char* input, size_t length,
- ::std::string* output) {
-#ifdef SNAPPY
- output->resize(snappy::MaxCompressedLength(length));
- size_t outlen;
- snappy::RawCompress(input, length, &(*output)[0], &outlen);
- output->resize(outlen);
- return true;
-#endif
-
- return false;
-}
-
-inline bool Snappy_GetUncompressedLength(const char* input, size_t length,
- size_t* result) {
-#ifdef SNAPPY
- return snappy::GetUncompressedLength(input, length, result);
-#else
- return false;
-#endif
-}
-
-inline bool Snappy_Uncompress(const char* input, size_t length,
- char* output) {
-#ifdef SNAPPY
- return snappy::RawUncompress(input, length, output);
-#else
- return false;
-#endif
-}
-
-inline bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg) {
- return false;
-}
-
-bool HasAcceleratedCRC32C();
-uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size);
-
-} // namespace port
-} // namespace leveldb
-
-#endif // STORAGE_LEVELDB_PORT_PORT_POSIX_H_
diff --git a/src/leveldb/port/port_posix_sse.cc b/src/leveldb/port/port_posix_sse.cc
deleted file mode 100644
index 2d49c21dd8..0000000000
--- a/src/leveldb/port/port_posix_sse.cc
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2016 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// A portable implementation of crc32c, optimized to handle
-// four bytes at a time.
-//
-// In a separate source file to allow this accelerated CRC32C function to be
-// compiled with the appropriate compiler flags to enable x86 SSE 4.2
-// instructions.
-
-#include <stdint.h>
-#include <string.h>
-#include "port/port.h"
-
-#if defined(LEVELDB_PLATFORM_POSIX_SSE)
-
-#if defined(_MSC_VER)
-#include <intrin.h>
-#elif defined(__GNUC__) && defined(__SSE4_2__)
-#include <nmmintrin.h>
-#endif
-
-#endif // defined(LEVELDB_PLATFORM_POSIX_SSE)
-
-namespace leveldb {
-namespace port {
-
-#if defined(LEVELDB_PLATFORM_POSIX_SSE)
-
-// Used to fetch a naturally-aligned 32-bit word in little endian byte-order
-static inline uint32_t LE_LOAD32(const uint8_t *p) {
- // SSE is x86 only, so ensured that |p| is always little-endian.
- uint32_t word;
- memcpy(&word, p, sizeof(word));
- return word;
-}
-
-#if defined(_M_X64) || defined(__x86_64__) // LE_LOAD64 is only used on x64.
-
-// Used to fetch a naturally-aligned 64-bit word in little endian byte-order
-static inline uint64_t LE_LOAD64(const uint8_t *p) {
- uint64_t dword;
- memcpy(&dword, p, sizeof(dword));
- return dword;
-}
-
-#endif // defined(_M_X64) || defined(__x86_64__)
-
-#endif // defined(LEVELDB_PLATFORM_POSIX_SSE)
-
-// For further improvements see Intel publication at:
-// http://download.intel.com/design/intarch/papers/323405.pdf
-uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size) {
-#if !defined(LEVELDB_PLATFORM_POSIX_SSE)
- return 0;
-#else
-
- const uint8_t *p = reinterpret_cast<const uint8_t *>(buf);
- const uint8_t *e = p + size;
- uint32_t l = crc ^ 0xffffffffu;
-
-#define STEP1 do { \
- l = _mm_crc32_u8(l, *p++); \
-} while (0)
-#define STEP4 do { \
- l = _mm_crc32_u32(l, LE_LOAD32(p)); \
- p += 4; \
-} while (0)
-#define STEP8 do { \
- l = _mm_crc32_u64(l, LE_LOAD64(p)); \
- p += 8; \
-} while (0)
-
- if (size > 16) {
- // Process unaligned bytes
- for (unsigned int i = reinterpret_cast<uintptr_t>(p) % 8; i; --i) {
- STEP1;
- }
-
- // _mm_crc32_u64 is only available on x64.
-#if defined(_M_X64) || defined(__x86_64__)
- // Process 8 bytes at a time
- while ((e-p) >= 8) {
- STEP8;
- }
- // Process 4 bytes at a time
- if ((e-p) >= 4) {
- STEP4;
- }
-#else // !(defined(_M_X64) || defined(__x86_64__))
- // Process 4 bytes at a time
- while ((e-p) >= 4) {
- STEP4;
- }
-#endif // defined(_M_X64) || defined(__x86_64__)
- }
- // Process the last few bytes
- while (p != e) {
- STEP1;
- }
-#undef STEP8
-#undef STEP4
-#undef STEP1
- return l ^ 0xffffffffu;
-#endif // defined(LEVELDB_PLATFORM_POSIX_SSE)
-}
-
-} // namespace port
-} // namespace leveldb
diff --git a/src/leveldb/port/port_stdcxx.h b/src/leveldb/port/port_stdcxx.h
new file mode 100644
index 0000000000..e9cb0e53af
--- /dev/null
+++ b/src/leveldb/port/port_stdcxx.h
@@ -0,0 +1,153 @@
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef STORAGE_LEVELDB_PORT_PORT_STDCXX_H_
+#define STORAGE_LEVELDB_PORT_PORT_STDCXX_H_
+
+// port/port_config.h availability is automatically detected via __has_include
+// in newer compilers. If LEVELDB_HAS_PORT_CONFIG_H is defined, it overrides the
+// configuration detection.
+#if defined(LEVELDB_HAS_PORT_CONFIG_H)
+
+#if LEVELDB_HAS_PORT_CONFIG_H
+#include "port/port_config.h"
+#endif // LEVELDB_HAS_PORT_CONFIG_H
+
+#elif defined(__has_include)
+
+#if __has_include("port/port_config.h")
+#include "port/port_config.h"
+#endif // __has_include("port/port_config.h")
+
+#endif // defined(LEVELDB_HAS_PORT_CONFIG_H)
+
+#if HAVE_CRC32C
+#include <crc32c/crc32c.h>
+#endif // HAVE_CRC32C
+#if HAVE_SNAPPY
+#include <snappy.h>
+#endif // HAVE_SNAPPY
+
+#include <cassert>
+#include <condition_variable> // NOLINT
+#include <cstddef>
+#include <cstdint>
+#include <mutex> // NOLINT
+#include <string>
+
+#include "port/thread_annotations.h"
+
+namespace leveldb {
+namespace port {
+
+static const bool kLittleEndian = !LEVELDB_IS_BIG_ENDIAN;
+
+class CondVar;
+
+// Thinly wraps std::mutex.
+class LOCKABLE Mutex {
+ public:
+ Mutex() = default;
+ ~Mutex() = default;
+
+ Mutex(const Mutex&) = delete;
+ Mutex& operator=(const Mutex&) = delete;
+
+ void Lock() EXCLUSIVE_LOCK_FUNCTION() { mu_.lock(); }
+ void Unlock() UNLOCK_FUNCTION() { mu_.unlock(); }
+ void AssertHeld() ASSERT_EXCLUSIVE_LOCK() {}
+
+ private:
+ friend class CondVar;
+ std::mutex mu_;
+};
+
+// Thinly wraps std::condition_variable.
+class CondVar {
+ public:
+ explicit CondVar(Mutex* mu) : mu_(mu) { assert(mu != nullptr); }
+ ~CondVar() = default;
+
+ CondVar(const CondVar&) = delete;
+ CondVar& operator=(const CondVar&) = delete;
+
+ void Wait() {
+ std::unique_lock<std::mutex> lock(mu_->mu_, std::adopt_lock);
+ cv_.wait(lock);
+ lock.release();
+ }
+ void Signal() { cv_.notify_one(); }
+ void SignalAll() { cv_.notify_all(); }
+
+ private:
+ std::condition_variable cv_;
+ Mutex* const mu_;
+};
+
+inline bool Snappy_Compress(const char* input, size_t length,
+ std::string* output) {
+#if HAVE_SNAPPY
+ output->resize(snappy::MaxCompressedLength(length));
+ size_t outlen;
+ snappy::RawCompress(input, length, &(*output)[0], &outlen);
+ output->resize(outlen);
+ return true;
+#else
+ // Silence compiler warnings about unused arguments.
+ (void)input;
+ (void)length;
+ (void)output;
+#endif // HAVE_SNAPPY
+
+ return false;
+}
+
+inline bool Snappy_GetUncompressedLength(const char* input, size_t length,
+ size_t* result) {
+#if HAVE_SNAPPY
+ return snappy::GetUncompressedLength(input, length, result);
+#else
+ // Silence compiler warnings about unused arguments.
+ (void)input;
+ (void)length;
+ (void)result;
+ return false;
+#endif // HAVE_SNAPPY
+}
+
+inline bool Snappy_Uncompress(const char* input, size_t length, char* output) {
+#if HAVE_SNAPPY
+ return snappy::RawUncompress(input, length, output);
+#else
+ // Silence compiler warnings about unused arguments.
+ (void)input;
+ (void)length;
+ (void)output;
+ return false;
+#endif // HAVE_SNAPPY
+}
+
+inline bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg) {
+ // Silence compiler warnings about unused arguments.
+ (void)func;
+ (void)arg;
+ return false;
+}
+
+inline uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size) {
+#if HAVE_CRC32C
+ return ::crc32c::Extend(crc, reinterpret_cast<const uint8_t*>(buf), size);
+#else
+ // Silence compiler warnings about unused arguments.
+ (void)crc;
+ (void)buf;
+ (void)size;
+ return 0;
+#endif // HAVE_CRC32C
+}
+
+} // namespace port
+} // namespace leveldb
+
+#endif // STORAGE_LEVELDB_PORT_PORT_STDCXX_H_
diff --git a/src/leveldb/port/port_win.cc b/src/leveldb/port/port_win.cc
deleted file mode 100644
index 1be9e8d5b0..0000000000
--- a/src/leveldb/port/port_win.cc
+++ /dev/null
@@ -1,158 +0,0 @@
-// LevelDB Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// See port_example.h for documentation for the following types/functions.
-
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-// * Neither the name of the University of California, Berkeley nor the
-// names of its contributors may be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-// DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
-// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-
-#include "port/port_win.h"
-
-#include <windows.h>
-#include <cassert>
-#include <intrin.h>
-
-namespace leveldb {
-namespace port {
-
-Mutex::Mutex() :
- cs_(NULL) {
- assert(!cs_);
- cs_ = static_cast<void *>(new CRITICAL_SECTION());
- ::InitializeCriticalSection(static_cast<CRITICAL_SECTION *>(cs_));
- assert(cs_);
-}
-
-Mutex::~Mutex() {
- assert(cs_);
- ::DeleteCriticalSection(static_cast<CRITICAL_SECTION *>(cs_));
- delete static_cast<CRITICAL_SECTION *>(cs_);
- cs_ = NULL;
- assert(!cs_);
-}
-
-void Mutex::Lock() {
- assert(cs_);
- ::EnterCriticalSection(static_cast<CRITICAL_SECTION *>(cs_));
-}
-
-void Mutex::Unlock() {
- assert(cs_);
- ::LeaveCriticalSection(static_cast<CRITICAL_SECTION *>(cs_));
-}
-
-void Mutex::AssertHeld() {
- assert(cs_);
- assert(1);
-}
-
-CondVar::CondVar(Mutex* mu) :
- waiting_(0),
- mu_(mu),
- sem1_(::CreateSemaphore(NULL, 0, 10000, NULL)),
- sem2_(::CreateSemaphore(NULL, 0, 10000, NULL)) {
- assert(mu_);
-}
-
-CondVar::~CondVar() {
- ::CloseHandle(sem1_);
- ::CloseHandle(sem2_);
-}
-
-void CondVar::Wait() {
- mu_->AssertHeld();
-
- wait_mtx_.Lock();
- ++waiting_;
- wait_mtx_.Unlock();
-
- mu_->Unlock();
-
- // initiate handshake
- ::WaitForSingleObject(sem1_, INFINITE);
- ::ReleaseSemaphore(sem2_, 1, NULL);
- mu_->Lock();
-}
-
-void CondVar::Signal() {
- wait_mtx_.Lock();
- if (waiting_ > 0) {
- --waiting_;
-
- // finalize handshake
- ::ReleaseSemaphore(sem1_, 1, NULL);
- ::WaitForSingleObject(sem2_, INFINITE);
- }
- wait_mtx_.Unlock();
-}
-
-void CondVar::SignalAll() {
- wait_mtx_.Lock();
- ::ReleaseSemaphore(sem1_, waiting_, NULL);
- while(waiting_ > 0) {
- --waiting_;
- ::WaitForSingleObject(sem2_, INFINITE);
- }
- wait_mtx_.Unlock();
-}
-
-AtomicPointer::AtomicPointer(void* v) {
- Release_Store(v);
-}
-
-void InitOnce(OnceType* once, void (*initializer)()) {
- once->InitOnce(initializer);
-}
-
-void* AtomicPointer::Acquire_Load() const {
- void * p = NULL;
- InterlockedExchangePointer(&p, rep_);
- return p;
-}
-
-void AtomicPointer::Release_Store(void* v) {
- InterlockedExchangePointer(&rep_, v);
-}
-
-void* AtomicPointer::NoBarrier_Load() const {
- return rep_;
-}
-
-void AtomicPointer::NoBarrier_Store(void* v) {
- rep_ = v;
-}
-
-bool HasAcceleratedCRC32C() {
-#if defined(__x86_64__) || defined(__i386__)
- int cpu_info[4];
- __cpuid(cpu_info, 1);
- return (cpu_info[2] & (1 << 20)) != 0;
-#else
- return false;
-#endif
-}
-
-}
-}
diff --git a/src/leveldb/port/port_win.h b/src/leveldb/port/port_win.h
deleted file mode 100644
index 989c15cd91..0000000000
--- a/src/leveldb/port/port_win.h
+++ /dev/null
@@ -1,184 +0,0 @@
-// LevelDB Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// See port_example.h for documentation for the following types/functions.
-
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-// * Neither the name of the University of California, Berkeley nor the
-// names of its contributors may be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-// DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
-// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-
-#ifndef STORAGE_LEVELDB_PORT_PORT_WIN_H_
-#define STORAGE_LEVELDB_PORT_PORT_WIN_H_
-
-#ifdef _MSC_VER
-#if !(_MSC_VER >= 1900)
-#define snprintf _snprintf
-#endif
-#define close _close
-#define fread_unlocked _fread_nolock
-#ifdef _WIN64
-#define ssize_t int64_t
-#else
-#define ssize_t int32_t
-#endif
-#endif
-
-#include <string>
-#include <stdint.h>
-#ifdef SNAPPY
-#include <snappy.h>
-#endif
-
-namespace leveldb {
-namespace port {
-
-// Windows is little endian (for now :p)
-static const bool kLittleEndian = true;
-
-class CondVar;
-
-class Mutex {
- public:
- Mutex();
- ~Mutex();
-
- void Lock();
- void Unlock();
- void AssertHeld();
-
- private:
- friend class CondVar;
- // critical sections are more efficient than mutexes
- // but they are not recursive and can only be used to synchronize threads within the same process
- // we use opaque void * to avoid including windows.h in port_win.h
- void * cs_;
-
- // No copying
- Mutex(const Mutex&);
- void operator=(const Mutex&);
-};
-
-// the Win32 API offers a dependable condition variable mechanism, but only starting with
-// Windows 2008 and Vista
-// no matter what we will implement our own condition variable with a semaphore
-// implementation as described in a paper written by Andrew D. Birrell in 2003
-class CondVar {
- public:
- explicit CondVar(Mutex* mu);
- ~CondVar();
- void Wait();
- void Signal();
- void SignalAll();
- private:
- Mutex* mu_;
-
- Mutex wait_mtx_;
- long waiting_;
-
- void * sem1_;
- void * sem2_;
-
-
-};
-
-class OnceType {
-public:
-// OnceType() : init_(false) {}
- OnceType(const OnceType &once) : init_(once.init_) {}
- OnceType(bool f) : init_(f) {}
- void InitOnce(void (*initializer)()) {
- mutex_.Lock();
- if (!init_) {
- init_ = true;
- initializer();
- }
- mutex_.Unlock();
- }
-
-private:
- bool init_;
- Mutex mutex_;
-};
-
-#define LEVELDB_ONCE_INIT false
-extern void InitOnce(port::OnceType*, void (*initializer)());
-
-// Storage for a lock-free pointer
-class AtomicPointer {
- private:
- void * rep_;
- public:
- AtomicPointer() : rep_(NULL) { }
- explicit AtomicPointer(void* v);
- void* Acquire_Load() const;
-
- void Release_Store(void* v);
-
- void* NoBarrier_Load() const;
-
- void NoBarrier_Store(void* v);
-};
-
-inline bool Snappy_Compress(const char* input, size_t length,
- ::std::string* output) {
-#ifdef SNAPPY
- output->resize(snappy::MaxCompressedLength(length));
- size_t outlen;
- snappy::RawCompress(input, length, &(*output)[0], &outlen);
- output->resize(outlen);
- return true;
-#endif
-
- return false;
-}
-
-inline bool Snappy_GetUncompressedLength(const char* input, size_t length,
- size_t* result) {
-#ifdef SNAPPY
- return snappy::GetUncompressedLength(input, length, result);
-#else
- return false;
-#endif
-}
-
-inline bool Snappy_Uncompress(const char* input, size_t length,
- char* output) {
-#ifdef SNAPPY
- return snappy::RawUncompress(input, length, output);
-#else
- return false;
-#endif
-}
-
-inline bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg) {
- return false;
-}
-
-bool HasAcceleratedCRC32C();
-uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size);
-
-}
-}
-
-#endif // STORAGE_LEVELDB_PORT_PORT_WIN_H_
diff --git a/src/leveldb/port/thread_annotations.h b/src/leveldb/port/thread_annotations.h
index 9470ef587c..1547df908f 100644
--- a/src/leveldb/port/thread_annotations.h
+++ b/src/leveldb/port/thread_annotations.h
@@ -5,56 +5,104 @@
#ifndef STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
#define STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
-// Some environments provide custom macros to aid in static thread-safety
-// analysis. Provide empty definitions of such macros unless they are already
-// defined.
+// Use Clang's thread safety analysis annotations when available. In other
+// environments, the macros receive empty definitions.
+// Usage documentation: https://clang.llvm.org/docs/ThreadSafetyAnalysis.html
+
+#if !defined(THREAD_ANNOTATION_ATTRIBUTE__)
+
+#if defined(__clang__)
+
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
+#else
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
+#endif
+
+#endif // !defined(THREAD_ANNOTATION_ATTRIBUTE__)
+
+#ifndef GUARDED_BY
+#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
+#endif
+
+#ifndef PT_GUARDED_BY
+#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
+#endif
+
+#ifndef ACQUIRED_AFTER
+#define ACQUIRED_AFTER(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
+#endif
+
+#ifndef ACQUIRED_BEFORE
+#define ACQUIRED_BEFORE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
+#endif
#ifndef EXCLUSIVE_LOCKS_REQUIRED
-#define EXCLUSIVE_LOCKS_REQUIRED(...)
+#define EXCLUSIVE_LOCKS_REQUIRED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__))
#endif
#ifndef SHARED_LOCKS_REQUIRED
-#define SHARED_LOCKS_REQUIRED(...)
+#define SHARED_LOCKS_REQUIRED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__))
#endif
#ifndef LOCKS_EXCLUDED
-#define LOCKS_EXCLUDED(...)
+#define LOCKS_EXCLUDED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
#endif
#ifndef LOCK_RETURNED
-#define LOCK_RETURNED(x)
+#define LOCK_RETURNED(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
#endif
#ifndef LOCKABLE
-#define LOCKABLE
+#define LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(lockable)
#endif
#ifndef SCOPED_LOCKABLE
-#define SCOPED_LOCKABLE
+#define SCOPED_LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
#endif
#ifndef EXCLUSIVE_LOCK_FUNCTION
-#define EXCLUSIVE_LOCK_FUNCTION(...)
+#define EXCLUSIVE_LOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__))
#endif
#ifndef SHARED_LOCK_FUNCTION
-#define SHARED_LOCK_FUNCTION(...)
+#define SHARED_LOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__))
#endif
#ifndef EXCLUSIVE_TRYLOCK_FUNCTION
-#define EXCLUSIVE_TRYLOCK_FUNCTION(...)
+#define EXCLUSIVE_TRYLOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__))
#endif
#ifndef SHARED_TRYLOCK_FUNCTION
-#define SHARED_TRYLOCK_FUNCTION(...)
+#define SHARED_TRYLOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__))
#endif
#ifndef UNLOCK_FUNCTION
-#define UNLOCK_FUNCTION(...)
+#define UNLOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__))
#endif
#ifndef NO_THREAD_SAFETY_ANALYSIS
-#define NO_THREAD_SAFETY_ANALYSIS
+#define NO_THREAD_SAFETY_ANALYSIS \
+ THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
+#endif
+
+#ifndef ASSERT_EXCLUSIVE_LOCK
+#define ASSERT_EXCLUSIVE_LOCK(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__))
+#endif
+
+#ifndef ASSERT_SHARED_LOCK
+#define ASSERT_SHARED_LOCK(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__))
#endif
#endif // STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
diff --git a/src/leveldb/port/win/stdint.h b/src/leveldb/port/win/stdint.h
deleted file mode 100644
index 39edd0db13..0000000000
--- a/src/leveldb/port/win/stdint.h
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-// MSVC didn't ship with this file until the 2010 version.
-
-#ifndef STORAGE_LEVELDB_PORT_WIN_STDINT_H_
-#define STORAGE_LEVELDB_PORT_WIN_STDINT_H_
-
-#if !defined(_MSC_VER)
-#error This file should only be included when compiling with MSVC.
-#endif
-
-// Define C99 equivalent types.
-typedef signed char int8_t;
-typedef signed short int16_t;
-typedef signed int int32_t;
-typedef signed long long int64_t;
-typedef unsigned char uint8_t;
-typedef unsigned short uint16_t;
-typedef unsigned int uint32_t;
-typedef unsigned long long uint64_t;
-
-#endif // STORAGE_LEVELDB_PORT_WIN_STDINT_H_
diff --git a/src/leveldb/table/block.cc b/src/leveldb/table/block.cc
index 43e402c9c0..2fe89eaa45 100644
--- a/src/leveldb/table/block.cc
+++ b/src/leveldb/table/block.cc
@@ -6,8 +6,10 @@
#include "table/block.h"
-#include <vector>
#include <algorithm>
+#include <cstdint>
+#include <vector>
+
#include "leveldb/comparator.h"
#include "table/format.h"
#include "util/coding.h"
@@ -27,7 +29,7 @@ Block::Block(const BlockContents& contents)
if (size_ < sizeof(uint32_t)) {
size_ = 0; // Error marker
} else {
- size_t max_restarts_allowed = (size_-sizeof(uint32_t)) / sizeof(uint32_t);
+ size_t max_restarts_allowed = (size_ - sizeof(uint32_t)) / sizeof(uint32_t);
if (NumRestarts() > max_restarts_allowed) {
// The size is too small for NumRestarts()
size_ = 0;
@@ -48,27 +50,26 @@ Block::~Block() {
// and the length of the value in "*shared", "*non_shared", and
// "*value_length", respectively. Will not dereference past "limit".
//
-// If any errors are detected, returns NULL. Otherwise, returns a
+// If any errors are detected, returns nullptr. Otherwise, returns a
// pointer to the key delta (just past the three decoded values).
static inline const char* DecodeEntry(const char* p, const char* limit,
- uint32_t* shared,
- uint32_t* non_shared,
+ uint32_t* shared, uint32_t* non_shared,
uint32_t* value_length) {
- if (limit - p < 3) return NULL;
- *shared = reinterpret_cast<const unsigned char*>(p)[0];
- *non_shared = reinterpret_cast<const unsigned char*>(p)[1];
- *value_length = reinterpret_cast<const unsigned char*>(p)[2];
+ if (limit - p < 3) return nullptr;
+ *shared = reinterpret_cast<const uint8_t*>(p)[0];
+ *non_shared = reinterpret_cast<const uint8_t*>(p)[1];
+ *value_length = reinterpret_cast<const uint8_t*>(p)[2];
if ((*shared | *non_shared | *value_length) < 128) {
// Fast path: all three values are encoded in one byte each
p += 3;
} else {
- if ((p = GetVarint32Ptr(p, limit, shared)) == NULL) return NULL;
- if ((p = GetVarint32Ptr(p, limit, non_shared)) == NULL) return NULL;
- if ((p = GetVarint32Ptr(p, limit, value_length)) == NULL) return NULL;
+ if ((p = GetVarint32Ptr(p, limit, shared)) == nullptr) return nullptr;
+ if ((p = GetVarint32Ptr(p, limit, non_shared)) == nullptr) return nullptr;
+ if ((p = GetVarint32Ptr(p, limit, value_length)) == nullptr) return nullptr;
}
if (static_cast<uint32_t>(limit - p) < (*non_shared + *value_length)) {
- return NULL;
+ return nullptr;
}
return p;
}
@@ -76,9 +77,9 @@ static inline const char* DecodeEntry(const char* p, const char* limit,
class Block::Iter : public Iterator {
private:
const Comparator* const comparator_;
- const char* const data_; // underlying block contents
- uint32_t const restarts_; // Offset of restart array (list of fixed32)
- uint32_t const num_restarts_; // Number of uint32_t entries in restart array
+ const char* const data_; // underlying block contents
+ uint32_t const restarts_; // Offset of restart array (list of fixed32)
+ uint32_t const num_restarts_; // Number of uint32_t entries in restart array
// current_ is offset in data_ of current entry. >= restarts_ if !Valid
uint32_t current_;
@@ -112,9 +113,7 @@ class Block::Iter : public Iterator {
}
public:
- Iter(const Comparator* comparator,
- const char* data,
- uint32_t restarts,
+ Iter(const Comparator* comparator, const char* data, uint32_t restarts,
uint32_t num_restarts)
: comparator_(comparator),
data_(data),
@@ -125,23 +124,23 @@ class Block::Iter : public Iterator {
assert(num_restarts_ > 0);
}
- virtual bool Valid() const { return current_ < restarts_; }
- virtual Status status() const { return status_; }
- virtual Slice key() const {
+ bool Valid() const override { return current_ < restarts_; }
+ Status status() const override { return status_; }
+ Slice key() const override {
assert(Valid());
return key_;
}
- virtual Slice value() const {
+ Slice value() const override {
assert(Valid());
return value_;
}
- virtual void Next() {
+ void Next() override {
assert(Valid());
ParseNextKey();
}
- virtual void Prev() {
+ void Prev() override {
assert(Valid());
// Scan backwards to a restart point before current_
@@ -162,7 +161,7 @@ class Block::Iter : public Iterator {
} while (ParseNextKey() && NextEntryOffset() < original);
}
- virtual void Seek(const Slice& target) {
+ void Seek(const Slice& target) override {
// Binary search in restart array to find the last restart point
// with a key < target
uint32_t left = 0;
@@ -171,10 +170,10 @@ class Block::Iter : public Iterator {
uint32_t mid = (left + right + 1) / 2;
uint32_t region_offset = GetRestartPoint(mid);
uint32_t shared, non_shared, value_length;
- const char* key_ptr = DecodeEntry(data_ + region_offset,
- data_ + restarts_,
- &shared, &non_shared, &value_length);
- if (key_ptr == NULL || (shared != 0)) {
+ const char* key_ptr =
+ DecodeEntry(data_ + region_offset, data_ + restarts_, &shared,
+ &non_shared, &value_length);
+ if (key_ptr == nullptr || (shared != 0)) {
CorruptionError();
return;
}
@@ -202,12 +201,12 @@ class Block::Iter : public Iterator {
}
}
- virtual void SeekToFirst() {
+ void SeekToFirst() override {
SeekToRestartPoint(0);
ParseNextKey();
}
- virtual void SeekToLast() {
+ void SeekToLast() override {
SeekToRestartPoint(num_restarts_ - 1);
while (ParseNextKey() && NextEntryOffset() < restarts_) {
// Keep skipping
@@ -237,7 +236,7 @@ class Block::Iter : public Iterator {
// Decode next entry
uint32_t shared, non_shared, value_length;
p = DecodeEntry(p, limit, &shared, &non_shared, &value_length);
- if (p == NULL || key_.size() < shared) {
+ if (p == nullptr || key_.size() < shared) {
CorruptionError();
return false;
} else {
@@ -253,7 +252,7 @@ class Block::Iter : public Iterator {
}
};
-Iterator* Block::NewIterator(const Comparator* cmp) {
+Iterator* Block::NewIterator(const Comparator* comparator) {
if (size_ < sizeof(uint32_t)) {
return NewErrorIterator(Status::Corruption("bad block contents"));
}
@@ -261,7 +260,7 @@ Iterator* Block::NewIterator(const Comparator* cmp) {
if (num_restarts == 0) {
return NewEmptyIterator();
} else {
- return new Iter(cmp, data_, restart_offset_, num_restarts);
+ return new Iter(comparator, data_, restart_offset_, num_restarts);
}
}
diff --git a/src/leveldb/table/block.h b/src/leveldb/table/block.h
index 2493eb9f9f..c8f1f7b436 100644
--- a/src/leveldb/table/block.h
+++ b/src/leveldb/table/block.h
@@ -7,6 +7,7 @@
#include <stddef.h>
#include <stdint.h>
+
#include "leveldb/iterator.h"
namespace leveldb {
@@ -19,24 +20,23 @@ class Block {
// Initialize the block with the specified contents.
explicit Block(const BlockContents& contents);
+ Block(const Block&) = delete;
+ Block& operator=(const Block&) = delete;
+
~Block();
size_t size() const { return size_; }
Iterator* NewIterator(const Comparator* comparator);
private:
+ class Iter;
+
uint32_t NumRestarts() const;
const char* data_;
size_t size_;
- uint32_t restart_offset_; // Offset in data_ of restart array
- bool owned_; // Block owns data_[]
-
- // No copying allowed
- Block(const Block&);
- void operator=(const Block&);
-
- class Iter;
+ uint32_t restart_offset_; // Offset in data_ of restart array
+ bool owned_; // Block owns data_[]
};
} // namespace leveldb
diff --git a/src/leveldb/table/block_builder.cc b/src/leveldb/table/block_builder.cc
index db660cd07c..919cff5c93 100644
--- a/src/leveldb/table/block_builder.cc
+++ b/src/leveldb/table/block_builder.cc
@@ -28,36 +28,35 @@
#include "table/block_builder.h"
-#include <algorithm>
#include <assert.h>
+
+#include <algorithm>
+
#include "leveldb/comparator.h"
-#include "leveldb/table_builder.h"
+#include "leveldb/options.h"
#include "util/coding.h"
namespace leveldb {
BlockBuilder::BlockBuilder(const Options* options)
- : options_(options),
- restarts_(),
- counter_(0),
- finished_(false) {
+ : options_(options), restarts_(), counter_(0), finished_(false) {
assert(options->block_restart_interval >= 1);
- restarts_.push_back(0); // First restart point is at offset 0
+ restarts_.push_back(0); // First restart point is at offset 0
}
void BlockBuilder::Reset() {
buffer_.clear();
restarts_.clear();
- restarts_.push_back(0); // First restart point is at offset 0
+ restarts_.push_back(0); // First restart point is at offset 0
counter_ = 0;
finished_ = false;
last_key_.clear();
}
size_t BlockBuilder::CurrentSizeEstimate() const {
- return (buffer_.size() + // Raw data buffer
- restarts_.size() * sizeof(uint32_t) + // Restart array
- sizeof(uint32_t)); // Restart array length
+ return (buffer_.size() + // Raw data buffer
+ restarts_.size() * sizeof(uint32_t) + // Restart array
+ sizeof(uint32_t)); // Restart array length
}
Slice BlockBuilder::Finish() {
@@ -74,7 +73,7 @@ void BlockBuilder::Add(const Slice& key, const Slice& value) {
Slice last_key_piece(last_key_);
assert(!finished_);
assert(counter_ <= options_->block_restart_interval);
- assert(buffer_.empty() // No values yet?
+ assert(buffer_.empty() // No values yet?
|| options_->comparator->Compare(key, last_key_piece) > 0);
size_t shared = 0;
if (counter_ < options_->block_restart_interval) {
diff --git a/src/leveldb/table/block_builder.h b/src/leveldb/table/block_builder.h
index 4fbcb33972..f91f5e6d47 100644
--- a/src/leveldb/table/block_builder.h
+++ b/src/leveldb/table/block_builder.h
@@ -5,9 +5,10 @@
#ifndef STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_
#define STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_
+#include <stdint.h>
+
#include <vector>
-#include <stdint.h>
#include "leveldb/slice.h"
namespace leveldb {
@@ -18,6 +19,9 @@ class BlockBuilder {
public:
explicit BlockBuilder(const Options* options);
+ BlockBuilder(const BlockBuilder&) = delete;
+ BlockBuilder& operator=(const BlockBuilder&) = delete;
+
// Reset the contents as if the BlockBuilder was just constructed.
void Reset();
@@ -35,21 +39,15 @@ class BlockBuilder {
size_t CurrentSizeEstimate() const;
// Return true iff no entries have been added since the last Reset()
- bool empty() const {
- return buffer_.empty();
- }
+ bool empty() const { return buffer_.empty(); }
private:
- const Options* options_;
- std::string buffer_; // Destination buffer
- std::vector<uint32_t> restarts_; // Restart points
- int counter_; // Number of entries emitted since restart
- bool finished_; // Has Finish() been called?
- std::string last_key_;
-
- // No copying allowed
- BlockBuilder(const BlockBuilder&);
- void operator=(const BlockBuilder&);
+ const Options* options_;
+ std::string buffer_; // Destination buffer
+ std::vector<uint32_t> restarts_; // Restart points
+ int counter_; // Number of entries emitted since restart
+ bool finished_; // Has Finish() been called?
+ std::string last_key_;
};
} // namespace leveldb
diff --git a/src/leveldb/table/filter_block.cc b/src/leveldb/table/filter_block.cc
index 1ed5134170..09ec0094bd 100644
--- a/src/leveldb/table/filter_block.cc
+++ b/src/leveldb/table/filter_block.cc
@@ -16,8 +16,7 @@ static const size_t kFilterBaseLg = 11;
static const size_t kFilterBase = 1 << kFilterBaseLg;
FilterBlockBuilder::FilterBlockBuilder(const FilterPolicy* policy)
- : policy_(policy) {
-}
+ : policy_(policy) {}
void FilterBlockBuilder::StartBlock(uint64_t block_offset) {
uint64_t filter_index = (block_offset / kFilterBase);
@@ -62,7 +61,7 @@ void FilterBlockBuilder::GenerateFilter() {
tmp_keys_.resize(num_keys);
for (size_t i = 0; i < num_keys; i++) {
const char* base = keys_.data() + start_[i];
- size_t length = start_[i+1] - start_[i];
+ size_t length = start_[i + 1] - start_[i];
tmp_keys_[i] = Slice(base, length);
}
@@ -77,14 +76,10 @@ void FilterBlockBuilder::GenerateFilter() {
FilterBlockReader::FilterBlockReader(const FilterPolicy* policy,
const Slice& contents)
- : policy_(policy),
- data_(NULL),
- offset_(NULL),
- num_(0),
- base_lg_(0) {
+ : policy_(policy), data_(nullptr), offset_(nullptr), num_(0), base_lg_(0) {
size_t n = contents.size();
if (n < 5) return; // 1 byte for base_lg_ and 4 for start of offset array
- base_lg_ = contents[n-1];
+ base_lg_ = contents[n - 1];
uint32_t last_word = DecodeFixed32(contents.data() + n - 5);
if (last_word > n - 5) return;
data_ = contents.data();
@@ -95,8 +90,8 @@ FilterBlockReader::FilterBlockReader(const FilterPolicy* policy,
bool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) {
uint64_t index = block_offset >> base_lg_;
if (index < num_) {
- uint32_t start = DecodeFixed32(offset_ + index*4);
- uint32_t limit = DecodeFixed32(offset_ + index*4 + 4);
+ uint32_t start = DecodeFixed32(offset_ + index * 4);
+ uint32_t limit = DecodeFixed32(offset_ + index * 4 + 4);
if (start <= limit && limit <= static_cast<size_t>(offset_ - data_)) {
Slice filter = Slice(data_ + start, limit - start);
return policy_->KeyMayMatch(key, filter);
@@ -108,4 +103,4 @@ bool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) {
return true; // Errors are treated as potential matches
}
-}
+} // namespace leveldb
diff --git a/src/leveldb/table/filter_block.h b/src/leveldb/table/filter_block.h
index c67d010bd1..73b5399249 100644
--- a/src/leveldb/table/filter_block.h
+++ b/src/leveldb/table/filter_block.h
@@ -11,8 +11,10 @@
#include <stddef.h>
#include <stdint.h>
+
#include <string>
#include <vector>
+
#include "leveldb/slice.h"
#include "util/hash.h"
@@ -30,6 +32,9 @@ class FilterBlockBuilder {
public:
explicit FilterBlockBuilder(const FilterPolicy*);
+ FilterBlockBuilder(const FilterBlockBuilder&) = delete;
+ FilterBlockBuilder& operator=(const FilterBlockBuilder&) = delete;
+
void StartBlock(uint64_t block_offset);
void AddKey(const Slice& key);
Slice Finish();
@@ -38,20 +43,16 @@ class FilterBlockBuilder {
void GenerateFilter();
const FilterPolicy* policy_;
- std::string keys_; // Flattened key contents
- std::vector<size_t> start_; // Starting index in keys_ of each key
- std::string result_; // Filter data computed so far
- std::vector<Slice> tmp_keys_; // policy_->CreateFilter() argument
+ std::string keys_; // Flattened key contents
+ std::vector<size_t> start_; // Starting index in keys_ of each key
+ std::string result_; // Filter data computed so far
+ std::vector<Slice> tmp_keys_; // policy_->CreateFilter() argument
std::vector<uint32_t> filter_offsets_;
-
- // No copying allowed
- FilterBlockBuilder(const FilterBlockBuilder&);
- void operator=(const FilterBlockBuilder&);
};
class FilterBlockReader {
public:
- // REQUIRES: "contents" and *policy must stay live while *this is live.
+ // REQUIRES: "contents" and *policy must stay live while *this is live.
FilterBlockReader(const FilterPolicy* policy, const Slice& contents);
bool KeyMayMatch(uint64_t block_offset, const Slice& key);
@@ -63,6 +64,6 @@ class FilterBlockReader {
size_t base_lg_; // Encoding parameter (see kFilterBaseLg in .cc file)
};
-}
+} // namespace leveldb
#endif // STORAGE_LEVELDB_TABLE_FILTER_BLOCK_H_
diff --git a/src/leveldb/table/filter_block_test.cc b/src/leveldb/table/filter_block_test.cc
index 8c4a4741f2..8b33bbdd18 100644
--- a/src/leveldb/table/filter_block_test.cc
+++ b/src/leveldb/table/filter_block_test.cc
@@ -16,18 +16,16 @@ namespace leveldb {
// For testing: emit an array with one hash value per key
class TestHashFilter : public FilterPolicy {
public:
- virtual const char* Name() const {
- return "TestHashFilter";
- }
+ const char* Name() const override { return "TestHashFilter"; }
- virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
+ void CreateFilter(const Slice* keys, int n, std::string* dst) const override {
for (int i = 0; i < n; i++) {
uint32_t h = Hash(keys[i].data(), keys[i].size(), 1);
PutFixed32(dst, h);
}
}
- virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const {
+ bool KeyMayMatch(const Slice& key, const Slice& filter) const override {
uint32_t h = Hash(key.data(), key.size(), 1);
for (size_t i = 0; i + 4 <= filter.size(); i += 4) {
if (h == DecodeFixed32(filter.data() + i)) {
@@ -69,8 +67,8 @@ TEST(FilterBlockTest, SingleChunk) {
ASSERT_TRUE(reader.KeyMayMatch(100, "box"));
ASSERT_TRUE(reader.KeyMayMatch(100, "hello"));
ASSERT_TRUE(reader.KeyMayMatch(100, "foo"));
- ASSERT_TRUE(! reader.KeyMayMatch(100, "missing"));
- ASSERT_TRUE(! reader.KeyMayMatch(100, "other"));
+ ASSERT_TRUE(!reader.KeyMayMatch(100, "missing"));
+ ASSERT_TRUE(!reader.KeyMayMatch(100, "other"));
}
TEST(FilterBlockTest, MultiChunk) {
@@ -99,30 +97,28 @@ TEST(FilterBlockTest, MultiChunk) {
// Check first filter
ASSERT_TRUE(reader.KeyMayMatch(0, "foo"));
ASSERT_TRUE(reader.KeyMayMatch(2000, "bar"));
- ASSERT_TRUE(! reader.KeyMayMatch(0, "box"));
- ASSERT_TRUE(! reader.KeyMayMatch(0, "hello"));
+ ASSERT_TRUE(!reader.KeyMayMatch(0, "box"));
+ ASSERT_TRUE(!reader.KeyMayMatch(0, "hello"));
// Check second filter
ASSERT_TRUE(reader.KeyMayMatch(3100, "box"));
- ASSERT_TRUE(! reader.KeyMayMatch(3100, "foo"));
- ASSERT_TRUE(! reader.KeyMayMatch(3100, "bar"));
- ASSERT_TRUE(! reader.KeyMayMatch(3100, "hello"));
+ ASSERT_TRUE(!reader.KeyMayMatch(3100, "foo"));
+ ASSERT_TRUE(!reader.KeyMayMatch(3100, "bar"));
+ ASSERT_TRUE(!reader.KeyMayMatch(3100, "hello"));
// Check third filter (empty)
- ASSERT_TRUE(! reader.KeyMayMatch(4100, "foo"));
- ASSERT_TRUE(! reader.KeyMayMatch(4100, "bar"));
- ASSERT_TRUE(! reader.KeyMayMatch(4100, "box"));
- ASSERT_TRUE(! reader.KeyMayMatch(4100, "hello"));
+ ASSERT_TRUE(!reader.KeyMayMatch(4100, "foo"));
+ ASSERT_TRUE(!reader.KeyMayMatch(4100, "bar"));
+ ASSERT_TRUE(!reader.KeyMayMatch(4100, "box"));
+ ASSERT_TRUE(!reader.KeyMayMatch(4100, "hello"));
// Check last filter
ASSERT_TRUE(reader.KeyMayMatch(9000, "box"));
ASSERT_TRUE(reader.KeyMayMatch(9000, "hello"));
- ASSERT_TRUE(! reader.KeyMayMatch(9000, "foo"));
- ASSERT_TRUE(! reader.KeyMayMatch(9000, "bar"));
+ ASSERT_TRUE(!reader.KeyMayMatch(9000, "foo"));
+ ASSERT_TRUE(!reader.KeyMayMatch(9000, "bar"));
}
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/table/format.cc b/src/leveldb/table/format.cc
index 285e1c0de3..a3d67de2e4 100644
--- a/src/leveldb/table/format.cc
+++ b/src/leveldb/table/format.cc
@@ -21,8 +21,7 @@ void BlockHandle::EncodeTo(std::string* dst) const {
}
Status BlockHandle::DecodeFrom(Slice* input) {
- if (GetVarint64(input, &offset_) &&
- GetVarint64(input, &size_)) {
+ if (GetVarint64(input, &offset_) && GetVarint64(input, &size_)) {
return Status::OK();
} else {
return Status::Corruption("bad block handle");
@@ -62,10 +61,8 @@ Status Footer::DecodeFrom(Slice* input) {
return result;
}
-Status ReadBlock(RandomAccessFile* file,
- const ReadOptions& options,
- const BlockHandle& handle,
- BlockContents* result) {
+Status ReadBlock(RandomAccessFile* file, const ReadOptions& options,
+ const BlockHandle& handle, BlockContents* result) {
result->data = Slice();
result->cachable = false;
result->heap_allocated = false;
@@ -86,7 +83,7 @@ Status ReadBlock(RandomAccessFile* file,
}
// Check the crc of the type and the block contents
- const char* data = contents.data(); // Pointer to where Read put the data
+ const char* data = contents.data(); // Pointer to where Read put the data
if (options.verify_checksums) {
const uint32_t crc = crc32c::Unmask(DecodeFixed32(data + n + 1));
const uint32_t actual = crc32c::Value(data, n + 1);
diff --git a/src/leveldb/table/format.h b/src/leveldb/table/format.h
index 6c0b80c017..e49dfdc047 100644
--- a/src/leveldb/table/format.h
+++ b/src/leveldb/table/format.h
@@ -5,8 +5,10 @@
#ifndef STORAGE_LEVELDB_TABLE_FORMAT_H_
#define STORAGE_LEVELDB_TABLE_FORMAT_H_
-#include <string>
#include <stdint.h>
+
+#include <string>
+
#include "leveldb/slice.h"
#include "leveldb/status.h"
#include "leveldb/table_builder.h"
@@ -21,6 +23,9 @@ struct ReadOptions;
// block or a meta block.
class BlockHandle {
public:
+ // Maximum encoding length of a BlockHandle
+ enum { kMaxEncodedLength = 10 + 10 };
+
BlockHandle();
// The offset of the block in the file.
@@ -34,9 +39,6 @@ class BlockHandle {
void EncodeTo(std::string* dst) const;
Status DecodeFrom(Slice* input);
- // Maximum encoding length of a BlockHandle
- enum { kMaxEncodedLength = 10 + 10 };
-
private:
uint64_t offset_;
uint64_t size_;
@@ -46,30 +48,24 @@ class BlockHandle {
// end of every table file.
class Footer {
public:
- Footer() { }
+ // Encoded length of a Footer. Note that the serialization of a
+ // Footer will always occupy exactly this many bytes. It consists
+ // of two block handles and a magic number.
+ enum { kEncodedLength = 2 * BlockHandle::kMaxEncodedLength + 8 };
+
+ Footer() = default;
// The block handle for the metaindex block of the table
const BlockHandle& metaindex_handle() const { return metaindex_handle_; }
void set_metaindex_handle(const BlockHandle& h) { metaindex_handle_ = h; }
// The block handle for the index block of the table
- const BlockHandle& index_handle() const {
- return index_handle_;
- }
- void set_index_handle(const BlockHandle& h) {
- index_handle_ = h;
- }
+ const BlockHandle& index_handle() const { return index_handle_; }
+ void set_index_handle(const BlockHandle& h) { index_handle_ = h; }
void EncodeTo(std::string* dst) const;
Status DecodeFrom(Slice* input);
- // Encoded length of a Footer. Note that the serialization of a
- // Footer will always occupy exactly this many bytes. It consists
- // of two block handles and a magic number.
- enum {
- kEncodedLength = 2*BlockHandle::kMaxEncodedLength + 8
- };
-
private:
BlockHandle metaindex_handle_;
BlockHandle index_handle_;
@@ -91,17 +87,13 @@ struct BlockContents {
// Read the block identified by "handle" from "file". On failure
// return non-OK. On success fill *result and return OK.
-extern Status ReadBlock(RandomAccessFile* file,
- const ReadOptions& options,
- const BlockHandle& handle,
- BlockContents* result);
+Status ReadBlock(RandomAccessFile* file, const ReadOptions& options,
+ const BlockHandle& handle, BlockContents* result);
// Implementation details follow. Clients should ignore,
inline BlockHandle::BlockHandle()
- : offset_(~static_cast<uint64_t>(0)),
- size_(~static_cast<uint64_t>(0)) {
-}
+ : offset_(~static_cast<uint64_t>(0)), size_(~static_cast<uint64_t>(0)) {}
} // namespace leveldb
diff --git a/src/leveldb/table/iterator.cc b/src/leveldb/table/iterator.cc
index 3d1c87fdec..dfef083d4d 100644
--- a/src/leveldb/table/iterator.cc
+++ b/src/leveldb/table/iterator.cc
@@ -7,58 +7,67 @@
namespace leveldb {
Iterator::Iterator() {
- cleanup_.function = NULL;
- cleanup_.next = NULL;
+ cleanup_head_.function = nullptr;
+ cleanup_head_.next = nullptr;
}
Iterator::~Iterator() {
- if (cleanup_.function != NULL) {
- (*cleanup_.function)(cleanup_.arg1, cleanup_.arg2);
- for (Cleanup* c = cleanup_.next; c != NULL; ) {
- (*c->function)(c->arg1, c->arg2);
- Cleanup* next = c->next;
- delete c;
- c = next;
+ if (!cleanup_head_.IsEmpty()) {
+ cleanup_head_.Run();
+ for (CleanupNode* node = cleanup_head_.next; node != nullptr;) {
+ node->Run();
+ CleanupNode* next_node = node->next;
+ delete node;
+ node = next_node;
}
}
}
void Iterator::RegisterCleanup(CleanupFunction func, void* arg1, void* arg2) {
- assert(func != NULL);
- Cleanup* c;
- if (cleanup_.function == NULL) {
- c = &cleanup_;
+ assert(func != nullptr);
+ CleanupNode* node;
+ if (cleanup_head_.IsEmpty()) {
+ node = &cleanup_head_;
} else {
- c = new Cleanup;
- c->next = cleanup_.next;
- cleanup_.next = c;
+ node = new CleanupNode();
+ node->next = cleanup_head_.next;
+ cleanup_head_.next = node;
}
- c->function = func;
- c->arg1 = arg1;
- c->arg2 = arg2;
+ node->function = func;
+ node->arg1 = arg1;
+ node->arg2 = arg2;
}
namespace {
+
class EmptyIterator : public Iterator {
public:
- EmptyIterator(const Status& s) : status_(s) { }
- virtual bool Valid() const { return false; }
- virtual void Seek(const Slice& target) { }
- virtual void SeekToFirst() { }
- virtual void SeekToLast() { }
- virtual void Next() { assert(false); }
- virtual void Prev() { assert(false); }
- Slice key() const { assert(false); return Slice(); }
- Slice value() const { assert(false); return Slice(); }
- virtual Status status() const { return status_; }
+ EmptyIterator(const Status& s) : status_(s) {}
+ ~EmptyIterator() override = default;
+
+ bool Valid() const override { return false; }
+ void Seek(const Slice& target) override {}
+ void SeekToFirst() override {}
+ void SeekToLast() override {}
+ void Next() override { assert(false); }
+ void Prev() override { assert(false); }
+ Slice key() const override {
+ assert(false);
+ return Slice();
+ }
+ Slice value() const override {
+ assert(false);
+ return Slice();
+ }
+ Status status() const override { return status_; }
+
private:
Status status_;
};
-} // namespace
-Iterator* NewEmptyIterator() {
- return new EmptyIterator(Status::OK());
-}
+} // anonymous namespace
+
+Iterator* NewEmptyIterator() { return new EmptyIterator(Status::OK()); }
Iterator* NewErrorIterator(const Status& status) {
return new EmptyIterator(status);
diff --git a/src/leveldb/table/iterator_wrapper.h b/src/leveldb/table/iterator_wrapper.h
index f410c3fabe..c230572529 100644
--- a/src/leveldb/table/iterator_wrapper.h
+++ b/src/leveldb/table/iterator_wrapper.h
@@ -16,10 +16,8 @@ namespace leveldb {
// cache locality.
class IteratorWrapper {
public:
- IteratorWrapper(): iter_(NULL), valid_(false) { }
- explicit IteratorWrapper(Iterator* iter): iter_(NULL) {
- Set(iter);
- }
+ IteratorWrapper() : iter_(nullptr), valid_(false) {}
+ explicit IteratorWrapper(Iterator* iter) : iter_(nullptr) { Set(iter); }
~IteratorWrapper() { delete iter_; }
Iterator* iter() const { return iter_; }
@@ -28,25 +26,53 @@ class IteratorWrapper {
void Set(Iterator* iter) {
delete iter_;
iter_ = iter;
- if (iter_ == NULL) {
+ if (iter_ == nullptr) {
valid_ = false;
} else {
Update();
}
}
-
// Iterator interface methods
- bool Valid() const { return valid_; }
- Slice key() const { assert(Valid()); return key_; }
- Slice value() const { assert(Valid()); return iter_->value(); }
- // Methods below require iter() != NULL
- Status status() const { assert(iter_); return iter_->status(); }
- void Next() { assert(iter_); iter_->Next(); Update(); }
- void Prev() { assert(iter_); iter_->Prev(); Update(); }
- void Seek(const Slice& k) { assert(iter_); iter_->Seek(k); Update(); }
- void SeekToFirst() { assert(iter_); iter_->SeekToFirst(); Update(); }
- void SeekToLast() { assert(iter_); iter_->SeekToLast(); Update(); }
+ bool Valid() const { return valid_; }
+ Slice key() const {
+ assert(Valid());
+ return key_;
+ }
+ Slice value() const {
+ assert(Valid());
+ return iter_->value();
+ }
+ // Methods below require iter() != nullptr
+ Status status() const {
+ assert(iter_);
+ return iter_->status();
+ }
+ void Next() {
+ assert(iter_);
+ iter_->Next();
+ Update();
+ }
+ void Prev() {
+ assert(iter_);
+ iter_->Prev();
+ Update();
+ }
+ void Seek(const Slice& k) {
+ assert(iter_);
+ iter_->Seek(k);
+ Update();
+ }
+ void SeekToFirst() {
+ assert(iter_);
+ iter_->SeekToFirst();
+ Update();
+ }
+ void SeekToLast() {
+ assert(iter_);
+ iter_->SeekToLast();
+ Update();
+ }
private:
void Update() {
diff --git a/src/leveldb/table/merger.cc b/src/leveldb/table/merger.cc
index 2dde4dc21f..76441b1cc2 100644
--- a/src/leveldb/table/merger.cc
+++ b/src/leveldb/table/merger.cc
@@ -17,22 +17,18 @@ class MergingIterator : public Iterator {
: comparator_(comparator),
children_(new IteratorWrapper[n]),
n_(n),
- current_(NULL),
+ current_(nullptr),
direction_(kForward) {
for (int i = 0; i < n; i++) {
children_[i].Set(children[i]);
}
}
- virtual ~MergingIterator() {
- delete[] children_;
- }
+ ~MergingIterator() override { delete[] children_; }
- virtual bool Valid() const {
- return (current_ != NULL);
- }
+ bool Valid() const override { return (current_ != nullptr); }
- virtual void SeekToFirst() {
+ void SeekToFirst() override {
for (int i = 0; i < n_; i++) {
children_[i].SeekToFirst();
}
@@ -40,7 +36,7 @@ class MergingIterator : public Iterator {
direction_ = kForward;
}
- virtual void SeekToLast() {
+ void SeekToLast() override {
for (int i = 0; i < n_; i++) {
children_[i].SeekToLast();
}
@@ -48,7 +44,7 @@ class MergingIterator : public Iterator {
direction_ = kReverse;
}
- virtual void Seek(const Slice& target) {
+ void Seek(const Slice& target) override {
for (int i = 0; i < n_; i++) {
children_[i].Seek(target);
}
@@ -56,7 +52,7 @@ class MergingIterator : public Iterator {
direction_ = kForward;
}
- virtual void Next() {
+ void Next() override {
assert(Valid());
// Ensure that all children are positioned after key().
@@ -82,7 +78,7 @@ class MergingIterator : public Iterator {
FindSmallest();
}
- virtual void Prev() {
+ void Prev() override {
assert(Valid());
// Ensure that all children are positioned before key().
@@ -111,17 +107,17 @@ class MergingIterator : public Iterator {
FindLargest();
}
- virtual Slice key() const {
+ Slice key() const override {
assert(Valid());
return current_->key();
}
- virtual Slice value() const {
+ Slice value() const override {
assert(Valid());
return current_->value();
}
- virtual Status status() const {
+ Status status() const override {
Status status;
for (int i = 0; i < n_; i++) {
status = children_[i].status();
@@ -133,6 +129,9 @@ class MergingIterator : public Iterator {
}
private:
+ // Which direction is the iterator moving?
+ enum Direction { kForward, kReverse };
+
void FindSmallest();
void FindLargest();
@@ -143,21 +142,15 @@ class MergingIterator : public Iterator {
IteratorWrapper* children_;
int n_;
IteratorWrapper* current_;
-
- // Which direction is the iterator moving?
- enum Direction {
- kForward,
- kReverse
- };
Direction direction_;
};
void MergingIterator::FindSmallest() {
- IteratorWrapper* smallest = NULL;
+ IteratorWrapper* smallest = nullptr;
for (int i = 0; i < n_; i++) {
IteratorWrapper* child = &children_[i];
if (child->Valid()) {
- if (smallest == NULL) {
+ if (smallest == nullptr) {
smallest = child;
} else if (comparator_->Compare(child->key(), smallest->key()) < 0) {
smallest = child;
@@ -168,11 +161,11 @@ void MergingIterator::FindSmallest() {
}
void MergingIterator::FindLargest() {
- IteratorWrapper* largest = NULL;
- for (int i = n_-1; i >= 0; i--) {
+ IteratorWrapper* largest = nullptr;
+ for (int i = n_ - 1; i >= 0; i--) {
IteratorWrapper* child = &children_[i];
if (child->Valid()) {
- if (largest == NULL) {
+ if (largest == nullptr) {
largest = child;
} else if (comparator_->Compare(child->key(), largest->key()) > 0) {
largest = child;
@@ -183,14 +176,15 @@ void MergingIterator::FindLargest() {
}
} // namespace
-Iterator* NewMergingIterator(const Comparator* cmp, Iterator** list, int n) {
+Iterator* NewMergingIterator(const Comparator* comparator, Iterator** children,
+ int n) {
assert(n >= 0);
if (n == 0) {
return NewEmptyIterator();
} else if (n == 1) {
- return list[0];
+ return children[0];
} else {
- return new MergingIterator(cmp, list, n);
+ return new MergingIterator(comparator, children, n);
}
}
diff --git a/src/leveldb/table/merger.h b/src/leveldb/table/merger.h
index 91ddd80faa..41cedc5254 100644
--- a/src/leveldb/table/merger.h
+++ b/src/leveldb/table/merger.h
@@ -18,8 +18,8 @@ class Iterator;
// key is present in K child iterators, it will be yielded K times.
//
// REQUIRES: n >= 0
-extern Iterator* NewMergingIterator(
- const Comparator* comparator, Iterator** children, int n);
+Iterator* NewMergingIterator(const Comparator* comparator, Iterator** children,
+ int n);
} // namespace leveldb
diff --git a/src/leveldb/table/table.cc b/src/leveldb/table/table.cc
index decf8082cc..b07bc88c7e 100644
--- a/src/leveldb/table/table.cc
+++ b/src/leveldb/table/table.cc
@@ -20,7 +20,7 @@ namespace leveldb {
struct Table::Rep {
~Rep() {
delete filter;
- delete [] filter_data;
+ delete[] filter_data;
delete index_block;
}
@@ -35,11 +35,9 @@ struct Table::Rep {
Block* index_block;
};
-Status Table::Open(const Options& options,
- RandomAccessFile* file,
- uint64_t size,
- Table** table) {
- *table = NULL;
+Status Table::Open(const Options& options, RandomAccessFile* file,
+ uint64_t size, Table** table) {
+ *table = nullptr;
if (size < Footer::kEncodedLength) {
return Status::Corruption("file is too short to be an sstable");
}
@@ -55,41 +53,36 @@ Status Table::Open(const Options& options,
if (!s.ok()) return s;
// Read the index block
- BlockContents contents;
- Block* index_block = NULL;
+ BlockContents index_block_contents;
if (s.ok()) {
ReadOptions opt;
if (options.paranoid_checks) {
opt.verify_checksums = true;
}
- s = ReadBlock(file, opt, footer.index_handle(), &contents);
- if (s.ok()) {
- index_block = new Block(contents);
- }
+ s = ReadBlock(file, opt, footer.index_handle(), &index_block_contents);
}
if (s.ok()) {
// We've successfully read the footer and the index block: we're
// ready to serve requests.
+ Block* index_block = new Block(index_block_contents);
Rep* rep = new Table::Rep;
rep->options = options;
rep->file = file;
rep->metaindex_handle = footer.metaindex_handle();
rep->index_block = index_block;
rep->cache_id = (options.block_cache ? options.block_cache->NewId() : 0);
- rep->filter_data = NULL;
- rep->filter = NULL;
+ rep->filter_data = nullptr;
+ rep->filter = nullptr;
*table = new Table(rep);
(*table)->ReadMeta(footer);
- } else {
- delete index_block;
}
return s;
}
void Table::ReadMeta(const Footer& footer) {
- if (rep_->options.filter_policy == NULL) {
+ if (rep_->options.filter_policy == nullptr) {
return; // Do not need any metadata
}
@@ -135,14 +128,12 @@ void Table::ReadFilter(const Slice& filter_handle_value) {
return;
}
if (block.heap_allocated) {
- rep_->filter_data = block.data.data(); // Will need to delete later
+ rep_->filter_data = block.data.data(); // Will need to delete later
}
rep_->filter = new FilterBlockReader(rep_->options.filter_policy, block.data);
}
-Table::~Table() {
- delete rep_;
-}
+Table::~Table() { delete rep_; }
static void DeleteBlock(void* arg, void* ignored) {
delete reinterpret_cast<Block*>(arg);
@@ -161,13 +152,12 @@ static void ReleaseBlock(void* arg, void* h) {
// Convert an index iterator value (i.e., an encoded BlockHandle)
// into an iterator over the contents of the corresponding block.
-Iterator* Table::BlockReader(void* arg,
- const ReadOptions& options,
+Iterator* Table::BlockReader(void* arg, const ReadOptions& options,
const Slice& index_value) {
Table* table = reinterpret_cast<Table*>(arg);
Cache* block_cache = table->rep_->options.block_cache;
- Block* block = NULL;
- Cache::Handle* cache_handle = NULL;
+ Block* block = nullptr;
+ Cache::Handle* cache_handle = nullptr;
BlockHandle handle;
Slice input = index_value;
@@ -177,21 +167,21 @@ Iterator* Table::BlockReader(void* arg,
if (s.ok()) {
BlockContents contents;
- if (block_cache != NULL) {
+ if (block_cache != nullptr) {
char cache_key_buffer[16];
EncodeFixed64(cache_key_buffer, table->rep_->cache_id);
- EncodeFixed64(cache_key_buffer+8, handle.offset());
+ EncodeFixed64(cache_key_buffer + 8, handle.offset());
Slice key(cache_key_buffer, sizeof(cache_key_buffer));
cache_handle = block_cache->Lookup(key);
- if (cache_handle != NULL) {
+ if (cache_handle != nullptr) {
block = reinterpret_cast<Block*>(block_cache->Value(cache_handle));
} else {
s = ReadBlock(table->rep_->file, options, handle, &contents);
if (s.ok()) {
block = new Block(contents);
if (contents.cachable && options.fill_cache) {
- cache_handle = block_cache->Insert(
- key, block, block->size(), &DeleteCachedBlock);
+ cache_handle = block_cache->Insert(key, block, block->size(),
+ &DeleteCachedBlock);
}
}
}
@@ -204,10 +194,10 @@ Iterator* Table::BlockReader(void* arg,
}
Iterator* iter;
- if (block != NULL) {
+ if (block != nullptr) {
iter = block->NewIterator(table->rep_->options.comparator);
- if (cache_handle == NULL) {
- iter->RegisterCleanup(&DeleteBlock, block, NULL);
+ if (cache_handle == nullptr) {
+ iter->RegisterCleanup(&DeleteBlock, block, nullptr);
} else {
iter->RegisterCleanup(&ReleaseBlock, block_cache, cache_handle);
}
@@ -223,9 +213,9 @@ Iterator* Table::NewIterator(const ReadOptions& options) const {
&Table::BlockReader, const_cast<Table*>(this), options);
}
-Status Table::InternalGet(const ReadOptions& options, const Slice& k,
- void* arg,
- void (*saver)(void*, const Slice&, const Slice&)) {
+Status Table::InternalGet(const ReadOptions& options, const Slice& k, void* arg,
+ void (*handle_result)(void*, const Slice&,
+ const Slice&)) {
Status s;
Iterator* iiter = rep_->index_block->NewIterator(rep_->options.comparator);
iiter->Seek(k);
@@ -233,15 +223,14 @@ Status Table::InternalGet(const ReadOptions& options, const Slice& k,
Slice handle_value = iiter->value();
FilterBlockReader* filter = rep_->filter;
BlockHandle handle;
- if (filter != NULL &&
- handle.DecodeFrom(&handle_value).ok() &&
+ if (filter != nullptr && handle.DecodeFrom(&handle_value).ok() &&
!filter->KeyMayMatch(handle.offset(), k)) {
// Not found
} else {
Iterator* block_iter = BlockReader(this, options, iiter->value());
block_iter->Seek(k);
if (block_iter->Valid()) {
- (*saver)(arg, block_iter->key(), block_iter->value());
+ (*handle_result)(arg, block_iter->key(), block_iter->value());
}
s = block_iter->status();
delete block_iter;
@@ -254,7 +243,6 @@ Status Table::InternalGet(const ReadOptions& options, const Slice& k,
return s;
}
-
uint64_t Table::ApproximateOffsetOf(const Slice& key) const {
Iterator* index_iter =
rep_->index_block->NewIterator(rep_->options.comparator);
diff --git a/src/leveldb/table/table_builder.cc b/src/leveldb/table/table_builder.cc
index 62002c84f2..278febf94f 100644
--- a/src/leveldb/table/table_builder.cc
+++ b/src/leveldb/table/table_builder.cc
@@ -5,6 +5,7 @@
#include "leveldb/table_builder.h"
#include <assert.h>
+
#include "leveldb/comparator.h"
#include "leveldb/env.h"
#include "leveldb/filter_policy.h"
@@ -18,6 +19,22 @@
namespace leveldb {
struct TableBuilder::Rep {
+ Rep(const Options& opt, WritableFile* f)
+ : options(opt),
+ index_block_options(opt),
+ file(f),
+ offset(0),
+ data_block(&options),
+ index_block(&index_block_options),
+ num_entries(0),
+ closed(false),
+ filter_block(opt.filter_policy == nullptr
+ ? nullptr
+ : new FilterBlockBuilder(opt.filter_policy)),
+ pending_index_entry(false) {
+ index_block_options.block_restart_interval = 1;
+ }
+
Options options;
Options index_block_options;
WritableFile* file;
@@ -27,7 +44,7 @@ struct TableBuilder::Rep {
BlockBuilder index_block;
std::string last_key;
int64_t num_entries;
- bool closed; // Either Finish() or Abandon() has been called.
+ bool closed; // Either Finish() or Abandon() has been called.
FilterBlockBuilder* filter_block;
// We do not emit the index entry for a block until we have seen the
@@ -43,26 +60,11 @@ struct TableBuilder::Rep {
BlockHandle pending_handle; // Handle to add to index block
std::string compressed_output;
-
- Rep(const Options& opt, WritableFile* f)
- : options(opt),
- index_block_options(opt),
- file(f),
- offset(0),
- data_block(&options),
- index_block(&index_block_options),
- num_entries(0),
- closed(false),
- filter_block(opt.filter_policy == NULL ? NULL
- : new FilterBlockBuilder(opt.filter_policy)),
- pending_index_entry(false) {
- index_block_options.block_restart_interval = 1;
- }
};
TableBuilder::TableBuilder(const Options& options, WritableFile* file)
: rep_(new Rep(options, file)) {
- if (rep_->filter_block != NULL) {
+ if (rep_->filter_block != nullptr) {
rep_->filter_block->StartBlock(0);
}
}
@@ -106,7 +108,7 @@ void TableBuilder::Add(const Slice& key, const Slice& value) {
r->pending_index_entry = false;
}
- if (r->filter_block != NULL) {
+ if (r->filter_block != nullptr) {
r->filter_block->AddKey(key);
}
@@ -131,7 +133,7 @@ void TableBuilder::Flush() {
r->pending_index_entry = true;
r->status = r->file->Flush();
}
- if (r->filter_block != NULL) {
+ if (r->filter_block != nullptr) {
r->filter_block->StartBlock(r->offset);
}
}
@@ -173,8 +175,7 @@ void TableBuilder::WriteBlock(BlockBuilder* block, BlockHandle* handle) {
}
void TableBuilder::WriteRawBlock(const Slice& block_contents,
- CompressionType type,
- BlockHandle* handle) {
+ CompressionType type, BlockHandle* handle) {
Rep* r = rep_;
handle->set_offset(r->offset);
handle->set_size(block_contents.size());
@@ -184,7 +185,7 @@ void TableBuilder::WriteRawBlock(const Slice& block_contents,
trailer[0] = type;
uint32_t crc = crc32c::Value(block_contents.data(), block_contents.size());
crc = crc32c::Extend(crc, trailer, 1); // Extend crc to cover block type
- EncodeFixed32(trailer+1, crc32c::Mask(crc));
+ EncodeFixed32(trailer + 1, crc32c::Mask(crc));
r->status = r->file->Append(Slice(trailer, kBlockTrailerSize));
if (r->status.ok()) {
r->offset += block_contents.size() + kBlockTrailerSize;
@@ -192,9 +193,7 @@ void TableBuilder::WriteRawBlock(const Slice& block_contents,
}
}
-Status TableBuilder::status() const {
- return rep_->status;
-}
+Status TableBuilder::status() const { return rep_->status; }
Status TableBuilder::Finish() {
Rep* r = rep_;
@@ -205,7 +204,7 @@ Status TableBuilder::Finish() {
BlockHandle filter_block_handle, metaindex_block_handle, index_block_handle;
// Write filter block
- if (ok() && r->filter_block != NULL) {
+ if (ok() && r->filter_block != nullptr) {
WriteRawBlock(r->filter_block->Finish(), kNoCompression,
&filter_block_handle);
}
@@ -213,7 +212,7 @@ Status TableBuilder::Finish() {
// Write metaindex block
if (ok()) {
BlockBuilder meta_index_block(&r->options);
- if (r->filter_block != NULL) {
+ if (r->filter_block != nullptr) {
// Add mapping from "filter.Name" to location of filter data
std::string key = "filter.";
key.append(r->options.filter_policy->Name());
@@ -259,12 +258,8 @@ void TableBuilder::Abandon() {
r->closed = true;
}
-uint64_t TableBuilder::NumEntries() const {
- return rep_->num_entries;
-}
+uint64_t TableBuilder::NumEntries() const { return rep_->num_entries; }
-uint64_t TableBuilder::FileSize() const {
- return rep_->offset;
-}
+uint64_t TableBuilder::FileSize() const { return rep_->offset; }
} // namespace leveldb
diff --git a/src/leveldb/table/table_test.cc b/src/leveldb/table/table_test.cc
index abf6e246ff..17aaea2f9e 100644
--- a/src/leveldb/table/table_test.cc
+++ b/src/leveldb/table/table_test.cc
@@ -6,6 +6,7 @@
#include <map>
#include <string>
+
#include "db/dbformat.h"
#include "db/memtable.h"
#include "db/write_batch_internal.h"
@@ -27,8 +28,8 @@ namespace leveldb {
static std::string Reverse(const Slice& key) {
std::string str(key.ToString());
std::string rev("");
- for (std::string::reverse_iterator rit = str.rbegin();
- rit != str.rend(); ++rit) {
+ for (std::string::reverse_iterator rit = str.rbegin(); rit != str.rend();
+ ++rit) {
rev.push_back(*rit);
}
return rev;
@@ -37,24 +38,23 @@ static std::string Reverse(const Slice& key) {
namespace {
class ReverseKeyComparator : public Comparator {
public:
- virtual const char* Name() const {
+ const char* Name() const override {
return "leveldb.ReverseBytewiseComparator";
}
- virtual int Compare(const Slice& a, const Slice& b) const {
+ int Compare(const Slice& a, const Slice& b) const override {
return BytewiseComparator()->Compare(Reverse(a), Reverse(b));
}
- virtual void FindShortestSeparator(
- std::string* start,
- const Slice& limit) const {
+ void FindShortestSeparator(std::string* start,
+ const Slice& limit) const override {
std::string s = Reverse(*start);
std::string l = Reverse(limit);
BytewiseComparator()->FindShortestSeparator(&s, l);
*start = Reverse(s);
}
- virtual void FindShortSuccessor(std::string* key) const {
+ void FindShortSuccessor(std::string* key) const override {
std::string s = Reverse(*key);
BytewiseComparator()->FindShortSuccessor(&s);
*key = Reverse(s);
@@ -79,47 +79,46 @@ namespace {
struct STLLessThan {
const Comparator* cmp;
- STLLessThan() : cmp(BytewiseComparator()) { }
- STLLessThan(const Comparator* c) : cmp(c) { }
+ STLLessThan() : cmp(BytewiseComparator()) {}
+ STLLessThan(const Comparator* c) : cmp(c) {}
bool operator()(const std::string& a, const std::string& b) const {
return cmp->Compare(Slice(a), Slice(b)) < 0;
}
};
} // namespace
-class StringSink: public WritableFile {
+class StringSink : public WritableFile {
public:
- ~StringSink() { }
+ ~StringSink() override = default;
const std::string& contents() const { return contents_; }
- virtual Status Close() { return Status::OK(); }
- virtual Status Flush() { return Status::OK(); }
- virtual Status Sync() { return Status::OK(); }
+ Status Close() override { return Status::OK(); }
+ Status Flush() override { return Status::OK(); }
+ Status Sync() override { return Status::OK(); }
- virtual Status Append(const Slice& data) {
+ Status Append(const Slice& data) override {
contents_.append(data.data(), data.size());
return Status::OK();
}
+ std::string GetName() const override { return ""; }
private:
std::string contents_;
};
-
-class StringSource: public RandomAccessFile {
+class StringSource : public RandomAccessFile {
public:
StringSource(const Slice& contents)
- : contents_(contents.data(), contents.size()) {
- }
+ : contents_(contents.data(), contents.size()) {}
- virtual ~StringSource() { }
+ ~StringSource() override = default;
uint64_t Size() const { return contents_.size(); }
- virtual Status Read(uint64_t offset, size_t n, Slice* result,
- char* scratch) const {
- if (offset > contents_.size()) {
+ Status Read(uint64_t offset, size_t n, Slice* result,
+ char* scratch) const override {
+ if (offset >= contents_.size()) {
return Status::InvalidArgument("invalid Read offset");
}
if (offset + n > contents_.size()) {
@@ -130,6 +129,7 @@ class StringSource: public RandomAccessFile {
return Status::OK();
}
+ std::string GetName() const { return ""; }
private:
std::string contents_;
};
@@ -140,8 +140,8 @@ typedef std::map<std::string, std::string, STLLessThan> KVMap;
// BlockBuilder/TableBuilder and Block/Table.
class Constructor {
public:
- explicit Constructor(const Comparator* cmp) : data_(STLLessThan(cmp)) { }
- virtual ~Constructor() { }
+ explicit Constructor(const Comparator* cmp) : data_(STLLessThan(cmp)) {}
+ virtual ~Constructor() = default;
void Add(const std::string& key, const Slice& value) {
data_[key] = value.ToString();
@@ -150,15 +150,12 @@ class Constructor {
// Finish constructing the data structure with all the keys that have
// been added so far. Returns the keys in sorted order in "*keys"
// and stores the key/value pairs in "*kvmap"
- void Finish(const Options& options,
- std::vector<std::string>* keys,
+ void Finish(const Options& options, std::vector<std::string>* keys,
KVMap* kvmap) {
*kvmap = data_;
keys->clear();
- for (KVMap::const_iterator it = data_.begin();
- it != data_.end();
- ++it) {
- keys->push_back(it->first);
+ for (const auto& kvp : data_) {
+ keys->push_back(kvp.first);
}
data_.clear();
Status s = FinishImpl(options, *kvmap);
@@ -170,32 +167,26 @@ class Constructor {
virtual Iterator* NewIterator() const = 0;
- virtual const KVMap& data() { return data_; }
+ const KVMap& data() const { return data_; }
- virtual DB* db() const { return NULL; } // Overridden in DBConstructor
+ virtual DB* db() const { return nullptr; } // Overridden in DBConstructor
private:
KVMap data_;
};
-class BlockConstructor: public Constructor {
+class BlockConstructor : public Constructor {
public:
explicit BlockConstructor(const Comparator* cmp)
- : Constructor(cmp),
- comparator_(cmp),
- block_(NULL) { }
- ~BlockConstructor() {
+ : Constructor(cmp), comparator_(cmp), block_(nullptr) {}
+ ~BlockConstructor() override { delete block_; }
+ Status FinishImpl(const Options& options, const KVMap& data) override {
delete block_;
- }
- virtual Status FinishImpl(const Options& options, const KVMap& data) {
- delete block_;
- block_ = NULL;
+ block_ = nullptr;
BlockBuilder builder(&options);
- for (KVMap::const_iterator it = data.begin();
- it != data.end();
- ++it) {
- builder.Add(it->first, it->second);
+ for (const auto& kvp : data) {
+ builder.Add(kvp.first, kvp.second);
}
// Open the block
data_ = builder.Finish().ToString();
@@ -206,36 +197,30 @@ class BlockConstructor: public Constructor {
block_ = new Block(contents);
return Status::OK();
}
- virtual Iterator* NewIterator() const {
+ Iterator* NewIterator() const override {
return block_->NewIterator(comparator_);
}
private:
- const Comparator* comparator_;
+ const Comparator* const comparator_;
std::string data_;
Block* block_;
BlockConstructor();
};
-class TableConstructor: public Constructor {
+class TableConstructor : public Constructor {
public:
TableConstructor(const Comparator* cmp)
- : Constructor(cmp),
- source_(NULL), table_(NULL) {
- }
- ~TableConstructor() {
- Reset();
- }
- virtual Status FinishImpl(const Options& options, const KVMap& data) {
+ : Constructor(cmp), source_(nullptr), table_(nullptr) {}
+ ~TableConstructor() override { Reset(); }
+ Status FinishImpl(const Options& options, const KVMap& data) override {
Reset();
StringSink sink;
TableBuilder builder(options, &sink);
- for (KVMap::const_iterator it = data.begin();
- it != data.end();
- ++it) {
- builder.Add(it->first, it->second);
+ for (const auto& kvp : data) {
+ builder.Add(kvp.first, kvp.second);
ASSERT_TRUE(builder.status().ok());
}
Status s = builder.Finish();
@@ -250,7 +235,7 @@ class TableConstructor: public Constructor {
return Table::Open(table_options, source_, sink.contents().size(), &table_);
}
- virtual Iterator* NewIterator() const {
+ Iterator* NewIterator() const override {
return table_->NewIterator(ReadOptions());
}
@@ -262,8 +247,8 @@ class TableConstructor: public Constructor {
void Reset() {
delete table_;
delete source_;
- table_ = NULL;
- source_ = NULL;
+ table_ = nullptr;
+ source_ = nullptr;
}
StringSource* source_;
@@ -273,23 +258,28 @@ class TableConstructor: public Constructor {
};
// A helper class that converts internal format keys into user keys
-class KeyConvertingIterator: public Iterator {
+class KeyConvertingIterator : public Iterator {
public:
- explicit KeyConvertingIterator(Iterator* iter) : iter_(iter) { }
- virtual ~KeyConvertingIterator() { delete iter_; }
- virtual bool Valid() const { return iter_->Valid(); }
- virtual void Seek(const Slice& target) {
+ explicit KeyConvertingIterator(Iterator* iter) : iter_(iter) {}
+
+ KeyConvertingIterator(const KeyConvertingIterator&) = delete;
+ KeyConvertingIterator& operator=(const KeyConvertingIterator&) = delete;
+
+ ~KeyConvertingIterator() override { delete iter_; }
+
+ bool Valid() const override { return iter_->Valid(); }
+ void Seek(const Slice& target) override {
ParsedInternalKey ikey(target, kMaxSequenceNumber, kTypeValue);
std::string encoded;
AppendInternalKey(&encoded, ikey);
iter_->Seek(encoded);
}
- virtual void SeekToFirst() { iter_->SeekToFirst(); }
- virtual void SeekToLast() { iter_->SeekToLast(); }
- virtual void Next() { iter_->Next(); }
- virtual void Prev() { iter_->Prev(); }
+ void SeekToFirst() override { iter_->SeekToFirst(); }
+ void SeekToLast() override { iter_->SeekToLast(); }
+ void Next() override { iter_->Next(); }
+ void Prev() override { iter_->Prev(); }
- virtual Slice key() const {
+ Slice key() const override {
assert(Valid());
ParsedInternalKey key;
if (!ParseInternalKey(iter_->key(), &key)) {
@@ -299,82 +289,68 @@ class KeyConvertingIterator: public Iterator {
return key.user_key;
}
- virtual Slice value() const { return iter_->value(); }
- virtual Status status() const {
+ Slice value() const override { return iter_->value(); }
+ Status status() const override {
return status_.ok() ? iter_->status() : status_;
}
private:
mutable Status status_;
Iterator* iter_;
-
- // No copying allowed
- KeyConvertingIterator(const KeyConvertingIterator&);
- void operator=(const KeyConvertingIterator&);
};
-class MemTableConstructor: public Constructor {
+class MemTableConstructor : public Constructor {
public:
explicit MemTableConstructor(const Comparator* cmp)
- : Constructor(cmp),
- internal_comparator_(cmp) {
+ : Constructor(cmp), internal_comparator_(cmp) {
memtable_ = new MemTable(internal_comparator_);
memtable_->Ref();
}
- ~MemTableConstructor() {
- memtable_->Unref();
- }
- virtual Status FinishImpl(const Options& options, const KVMap& data) {
+ ~MemTableConstructor() override { memtable_->Unref(); }
+ Status FinishImpl(const Options& options, const KVMap& data) override {
memtable_->Unref();
memtable_ = new MemTable(internal_comparator_);
memtable_->Ref();
int seq = 1;
- for (KVMap::const_iterator it = data.begin();
- it != data.end();
- ++it) {
- memtable_->Add(seq, kTypeValue, it->first, it->second);
+ for (const auto& kvp : data) {
+ memtable_->Add(seq, kTypeValue, kvp.first, kvp.second);
seq++;
}
return Status::OK();
}
- virtual Iterator* NewIterator() const {
+ Iterator* NewIterator() const override {
return new KeyConvertingIterator(memtable_->NewIterator());
}
private:
- InternalKeyComparator internal_comparator_;
+ const InternalKeyComparator internal_comparator_;
MemTable* memtable_;
};
-class DBConstructor: public Constructor {
+class DBConstructor : public Constructor {
public:
explicit DBConstructor(const Comparator* cmp)
- : Constructor(cmp),
- comparator_(cmp) {
- db_ = NULL;
+ : Constructor(cmp), comparator_(cmp) {
+ db_ = nullptr;
NewDB();
}
- ~DBConstructor() {
- delete db_;
- }
- virtual Status FinishImpl(const Options& options, const KVMap& data) {
+ ~DBConstructor() override { delete db_; }
+ Status FinishImpl(const Options& options, const KVMap& data) override {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
NewDB();
- for (KVMap::const_iterator it = data.begin();
- it != data.end();
- ++it) {
+ for (const auto& kvp : data) {
WriteBatch batch;
- batch.Put(it->first, it->second);
+ batch.Put(kvp.first, kvp.second);
ASSERT_TRUE(db_->Write(WriteOptions(), &batch).ok());
}
return Status::OK();
}
- virtual Iterator* NewIterator() const {
+ Iterator* NewIterator() const override {
return db_->NewIterator(ReadOptions());
}
- virtual DB* db() const { return db_; }
+ DB* db() const override { return db_; }
private:
void NewDB() {
@@ -392,16 +368,11 @@ class DBConstructor: public Constructor {
ASSERT_TRUE(status.ok()) << status.ToString();
}
- const Comparator* comparator_;
+ const Comparator* const comparator_;
DB* db_;
};
-enum TestType {
- TABLE_TEST,
- BLOCK_TEST,
- MEMTABLE_TEST,
- DB_TEST
-};
+enum TestType { TABLE_TEST, BLOCK_TEST, MEMTABLE_TEST, DB_TEST };
struct TestArgs {
TestType type;
@@ -410,37 +381,37 @@ struct TestArgs {
};
static const TestArgs kTestArgList[] = {
- { TABLE_TEST, false, 16 },
- { TABLE_TEST, false, 1 },
- { TABLE_TEST, false, 1024 },
- { TABLE_TEST, true, 16 },
- { TABLE_TEST, true, 1 },
- { TABLE_TEST, true, 1024 },
-
- { BLOCK_TEST, false, 16 },
- { BLOCK_TEST, false, 1 },
- { BLOCK_TEST, false, 1024 },
- { BLOCK_TEST, true, 16 },
- { BLOCK_TEST, true, 1 },
- { BLOCK_TEST, true, 1024 },
-
- // Restart interval does not matter for memtables
- { MEMTABLE_TEST, false, 16 },
- { MEMTABLE_TEST, true, 16 },
-
- // Do not bother with restart interval variations for DB
- { DB_TEST, false, 16 },
- { DB_TEST, true, 16 },
+ {TABLE_TEST, false, 16},
+ {TABLE_TEST, false, 1},
+ {TABLE_TEST, false, 1024},
+ {TABLE_TEST, true, 16},
+ {TABLE_TEST, true, 1},
+ {TABLE_TEST, true, 1024},
+
+ {BLOCK_TEST, false, 16},
+ {BLOCK_TEST, false, 1},
+ {BLOCK_TEST, false, 1024},
+ {BLOCK_TEST, true, 16},
+ {BLOCK_TEST, true, 1},
+ {BLOCK_TEST, true, 1024},
+
+ // Restart interval does not matter for memtables
+ {MEMTABLE_TEST, false, 16},
+ {MEMTABLE_TEST, true, 16},
+
+ // Do not bother with restart interval variations for DB
+ {DB_TEST, false, 16},
+ {DB_TEST, true, 16},
};
static const int kNumTestArgs = sizeof(kTestArgList) / sizeof(kTestArgList[0]);
class Harness {
public:
- Harness() : constructor_(NULL) { }
+ Harness() : constructor_(nullptr) {}
void Init(const TestArgs& args) {
delete constructor_;
- constructor_ = NULL;
+ constructor_ = nullptr;
options_ = Options();
options_.block_restart_interval = args.restart_interval;
@@ -466,9 +437,7 @@ class Harness {
}
}
- ~Harness() {
- delete constructor_;
- }
+ ~Harness() { delete constructor_; }
void Add(const std::string& key, const std::string& value) {
constructor_->Add(key, value);
@@ -490,8 +459,7 @@ class Harness {
ASSERT_TRUE(!iter->Valid());
iter->SeekToFirst();
for (KVMap::const_iterator model_iter = data.begin();
- model_iter != data.end();
- ++model_iter) {
+ model_iter != data.end(); ++model_iter) {
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
iter->Next();
}
@@ -505,8 +473,7 @@ class Harness {
ASSERT_TRUE(!iter->Valid());
iter->SeekToLast();
for (KVMap::const_reverse_iterator model_iter = data.rbegin();
- model_iter != data.rend();
- ++model_iter) {
+ model_iter != data.rend(); ++model_iter) {
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
iter->Prev();
}
@@ -514,8 +481,7 @@ class Harness {
delete iter;
}
- void TestRandomAccess(Random* rnd,
- const std::vector<std::string>& keys,
+ void TestRandomAccess(Random* rnd, const std::vector<std::string>& keys,
const KVMap& data) {
static const bool kVerbose = false;
Iterator* iter = constructor_->NewIterator();
@@ -546,8 +512,8 @@ class Harness {
case 2: {
std::string key = PickRandomKey(rnd, keys);
model_iter = data.lower_bound(key);
- if (kVerbose) fprintf(stderr, "Seek '%s'\n",
- EscapeString(key).c_str());
+ if (kVerbose)
+ fprintf(stderr, "Seek '%s'\n", EscapeString(key).c_str());
iter->Seek(Slice(key));
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
break;
@@ -558,7 +524,7 @@ class Harness {
if (kVerbose) fprintf(stderr, "Prev\n");
iter->Prev();
if (model_iter == data.begin()) {
- model_iter = data.end(); // Wrap around to invalid value
+ model_iter = data.end(); // Wrap around to invalid value
} else {
--model_iter;
}
@@ -621,8 +587,8 @@ class Harness {
break;
case 1: {
// Attempt to return something smaller than an existing key
- if (result.size() > 0 && result[result.size()-1] > '\0') {
- result[result.size()-1]--;
+ if (!result.empty() && result[result.size() - 1] > '\0') {
+ result[result.size() - 1]--;
}
break;
}
@@ -636,7 +602,7 @@ class Harness {
}
}
- // Returns NULL if not running against a DB
+ // Returns nullptr if not running against a DB
DB* db() const { return constructor_->db(); }
private:
@@ -720,8 +686,8 @@ TEST(Harness, Randomized) {
for (int num_entries = 0; num_entries < 2000;
num_entries += (num_entries < 50 ? 1 : 200)) {
if ((num_entries % 10) == 0) {
- fprintf(stderr, "case %d of %d: num_entries = %d\n",
- (i + 1), int(kNumTestArgs), num_entries);
+ fprintf(stderr, "case %d of %d: num_entries = %d\n", (i + 1),
+ int(kNumTestArgs), num_entries);
}
for (int e = 0; e < num_entries; e++) {
std::string v;
@@ -735,7 +701,7 @@ TEST(Harness, Randomized) {
TEST(Harness, RandomizedLongDB) {
Random rnd(test::RandomSeed());
- TestArgs args = { DB_TEST, false, 16 };
+ TestArgs args = {DB_TEST, false, 16};
Init(args);
int num_entries = 100000;
for (int e = 0; e < num_entries; e++) {
@@ -757,7 +723,7 @@ TEST(Harness, RandomizedLongDB) {
ASSERT_GT(files, 0);
}
-class MemTableTest { };
+class MemTableTest {};
TEST(MemTableTest, Simple) {
InternalKeyComparator cmp(BytewiseComparator());
@@ -774,8 +740,7 @@ TEST(MemTableTest, Simple) {
Iterator* iter = memtable->NewIterator();
iter->SeekToFirst();
while (iter->Valid()) {
- fprintf(stderr, "key: '%s' -> '%s'\n",
- iter->key().ToString().c_str(),
+ fprintf(stderr, "key: '%s' -> '%s'\n", iter->key().ToString().c_str(),
iter->value().ToString().c_str());
iter->Next();
}
@@ -788,14 +753,13 @@ static bool Between(uint64_t val, uint64_t low, uint64_t high) {
bool result = (val >= low) && (val <= high);
if (!result) {
fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
- (unsigned long long)(val),
- (unsigned long long)(low),
+ (unsigned long long)(val), (unsigned long long)(low),
(unsigned long long)(high));
}
return result;
}
-class TableTest { };
+class TableTest {};
TEST(TableTest, ApproximateOffsetOfPlain) {
TableConstructor c(BytewiseComparator());
@@ -813,18 +777,17 @@ TEST(TableTest, ApproximateOffsetOfPlain) {
options.compression = kNoCompression;
c.Finish(options, &keys, &kvmap);
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"), 0, 0));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 0, 0));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 10000, 11000));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"), 0, 0));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 0, 0));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 10000, 11000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04a"), 210000, 211000));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"), 210000, 211000));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"), 510000, 511000));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"), 510000, 511000));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 610000, 612000));
-
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"), 210000, 211000));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"), 510000, 511000));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"), 510000, 511000));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 610000, 612000));
}
static bool SnappyCompressionSupported() {
@@ -855,7 +818,7 @@ TEST(TableTest, ApproximateOffsetOfCompressed) {
// Expected upper and lower bounds of space used by compressible strings.
static const int kSlop = 1000; // Compressor effectiveness varies.
- const int expected = 2500; // 10000 * compression ratio (0.25)
+ const int expected = 2500; // 10000 * compression ratio (0.25)
const int min_z = expected - kSlop;
const int max_z = expected + kSlop;
@@ -871,6 +834,4 @@ TEST(TableTest, ApproximateOffsetOfCompressed) {
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/table/two_level_iterator.cc b/src/leveldb/table/two_level_iterator.cc
index 7822ebab9c..144790dd97 100644
--- a/src/leveldb/table/two_level_iterator.cc
+++ b/src/leveldb/table/two_level_iterator.cc
@@ -15,38 +15,33 @@ namespace {
typedef Iterator* (*BlockFunction)(void*, const ReadOptions&, const Slice&);
-class TwoLevelIterator: public Iterator {
+class TwoLevelIterator : public Iterator {
public:
- TwoLevelIterator(
- Iterator* index_iter,
- BlockFunction block_function,
- void* arg,
- const ReadOptions& options);
-
- virtual ~TwoLevelIterator();
-
- virtual void Seek(const Slice& target);
- virtual void SeekToFirst();
- virtual void SeekToLast();
- virtual void Next();
- virtual void Prev();
-
- virtual bool Valid() const {
- return data_iter_.Valid();
- }
- virtual Slice key() const {
+ TwoLevelIterator(Iterator* index_iter, BlockFunction block_function,
+ void* arg, const ReadOptions& options);
+
+ ~TwoLevelIterator() override;
+
+ void Seek(const Slice& target) override;
+ void SeekToFirst() override;
+ void SeekToLast() override;
+ void Next() override;
+ void Prev() override;
+
+ bool Valid() const override { return data_iter_.Valid(); }
+ Slice key() const override {
assert(Valid());
return data_iter_.key();
}
- virtual Slice value() const {
+ Slice value() const override {
assert(Valid());
return data_iter_.value();
}
- virtual Status status() const {
+ Status status() const override {
// It'd be nice if status() returned a const Status& instead of a Status
if (!index_iter_.status().ok()) {
return index_iter_.status();
- } else if (data_iter_.iter() != NULL && !data_iter_.status().ok()) {
+ } else if (data_iter_.iter() != nullptr && !data_iter_.status().ok()) {
return data_iter_.status();
} else {
return status_;
@@ -67,45 +62,41 @@ class TwoLevelIterator: public Iterator {
const ReadOptions options_;
Status status_;
IteratorWrapper index_iter_;
- IteratorWrapper data_iter_; // May be NULL
- // If data_iter_ is non-NULL, then "data_block_handle_" holds the
+ IteratorWrapper data_iter_; // May be nullptr
+ // If data_iter_ is non-null, then "data_block_handle_" holds the
// "index_value" passed to block_function_ to create the data_iter_.
std::string data_block_handle_;
};
-TwoLevelIterator::TwoLevelIterator(
- Iterator* index_iter,
- BlockFunction block_function,
- void* arg,
- const ReadOptions& options)
+TwoLevelIterator::TwoLevelIterator(Iterator* index_iter,
+ BlockFunction block_function, void* arg,
+ const ReadOptions& options)
: block_function_(block_function),
arg_(arg),
options_(options),
index_iter_(index_iter),
- data_iter_(NULL) {
-}
+ data_iter_(nullptr) {}
-TwoLevelIterator::~TwoLevelIterator() {
-}
+TwoLevelIterator::~TwoLevelIterator() = default;
void TwoLevelIterator::Seek(const Slice& target) {
index_iter_.Seek(target);
InitDataBlock();
- if (data_iter_.iter() != NULL) data_iter_.Seek(target);
+ if (data_iter_.iter() != nullptr) data_iter_.Seek(target);
SkipEmptyDataBlocksForward();
}
void TwoLevelIterator::SeekToFirst() {
index_iter_.SeekToFirst();
InitDataBlock();
- if (data_iter_.iter() != NULL) data_iter_.SeekToFirst();
+ if (data_iter_.iter() != nullptr) data_iter_.SeekToFirst();
SkipEmptyDataBlocksForward();
}
void TwoLevelIterator::SeekToLast() {
index_iter_.SeekToLast();
InitDataBlock();
- if (data_iter_.iter() != NULL) data_iter_.SeekToLast();
+ if (data_iter_.iter() != nullptr) data_iter_.SeekToLast();
SkipEmptyDataBlocksBackward();
}
@@ -121,44 +112,44 @@ void TwoLevelIterator::Prev() {
SkipEmptyDataBlocksBackward();
}
-
void TwoLevelIterator::SkipEmptyDataBlocksForward() {
- while (data_iter_.iter() == NULL || !data_iter_.Valid()) {
+ while (data_iter_.iter() == nullptr || !data_iter_.Valid()) {
// Move to next block
if (!index_iter_.Valid()) {
- SetDataIterator(NULL);
+ SetDataIterator(nullptr);
return;
}
index_iter_.Next();
InitDataBlock();
- if (data_iter_.iter() != NULL) data_iter_.SeekToFirst();
+ if (data_iter_.iter() != nullptr) data_iter_.SeekToFirst();
}
}
void TwoLevelIterator::SkipEmptyDataBlocksBackward() {
- while (data_iter_.iter() == NULL || !data_iter_.Valid()) {
+ while (data_iter_.iter() == nullptr || !data_iter_.Valid()) {
// Move to next block
if (!index_iter_.Valid()) {
- SetDataIterator(NULL);
+ SetDataIterator(nullptr);
return;
}
index_iter_.Prev();
InitDataBlock();
- if (data_iter_.iter() != NULL) data_iter_.SeekToLast();
+ if (data_iter_.iter() != nullptr) data_iter_.SeekToLast();
}
}
void TwoLevelIterator::SetDataIterator(Iterator* data_iter) {
- if (data_iter_.iter() != NULL) SaveError(data_iter_.status());
+ if (data_iter_.iter() != nullptr) SaveError(data_iter_.status());
data_iter_.Set(data_iter);
}
void TwoLevelIterator::InitDataBlock() {
if (!index_iter_.Valid()) {
- SetDataIterator(NULL);
+ SetDataIterator(nullptr);
} else {
Slice handle = index_iter_.value();
- if (data_iter_.iter() != NULL && handle.compare(data_block_handle_) == 0) {
+ if (data_iter_.iter() != nullptr &&
+ handle.compare(data_block_handle_) == 0) {
// data_iter_ is already constructed with this iterator, so
// no need to change anything
} else {
@@ -171,11 +162,9 @@ void TwoLevelIterator::InitDataBlock() {
} // namespace
-Iterator* NewTwoLevelIterator(
- Iterator* index_iter,
- BlockFunction block_function,
- void* arg,
- const ReadOptions& options) {
+Iterator* NewTwoLevelIterator(Iterator* index_iter,
+ BlockFunction block_function, void* arg,
+ const ReadOptions& options) {
return new TwoLevelIterator(index_iter, block_function, arg, options);
}
diff --git a/src/leveldb/table/two_level_iterator.h b/src/leveldb/table/two_level_iterator.h
index 629ca34525..81ffe809ac 100644
--- a/src/leveldb/table/two_level_iterator.h
+++ b/src/leveldb/table/two_level_iterator.h
@@ -20,14 +20,11 @@ struct ReadOptions;
//
// Uses a supplied function to convert an index_iter value into
// an iterator over the contents of the corresponding block.
-extern Iterator* NewTwoLevelIterator(
+Iterator* NewTwoLevelIterator(
Iterator* index_iter,
- Iterator* (*block_function)(
- void* arg,
- const ReadOptions& options,
- const Slice& index_value),
- void* arg,
- const ReadOptions& options);
+ Iterator* (*block_function)(void* arg, const ReadOptions& options,
+ const Slice& index_value),
+ void* arg, const ReadOptions& options);
} // namespace leveldb
diff --git a/src/leveldb/util/arena.cc b/src/leveldb/util/arena.cc
index 74078213ee..46e3b2eb8f 100644
--- a/src/leveldb/util/arena.cc
+++ b/src/leveldb/util/arena.cc
@@ -3,16 +3,13 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "util/arena.h"
-#include <assert.h>
namespace leveldb {
static const int kBlockSize = 4096;
-Arena::Arena() : memory_usage_(0) {
- alloc_ptr_ = NULL; // First allocation will allocate a block
- alloc_bytes_remaining_ = 0;
-}
+Arena::Arena()
+ : alloc_ptr_(nullptr), alloc_bytes_remaining_(0), memory_usage_(0) {}
Arena::~Arena() {
for (size_t i = 0; i < blocks_.size(); i++) {
@@ -40,8 +37,9 @@ char* Arena::AllocateFallback(size_t bytes) {
char* Arena::AllocateAligned(size_t bytes) {
const int align = (sizeof(void*) > 8) ? sizeof(void*) : 8;
- assert((align & (align-1)) == 0); // Pointer size should be a power of 2
- size_t current_mod = reinterpret_cast<uintptr_t>(alloc_ptr_) & (align-1);
+ static_assert((align & (align - 1)) == 0,
+ "Pointer size should be a power of 2");
+ size_t current_mod = reinterpret_cast<uintptr_t>(alloc_ptr_) & (align - 1);
size_t slop = (current_mod == 0 ? 0 : align - current_mod);
size_t needed = bytes + slop;
char* result;
@@ -53,15 +51,15 @@ char* Arena::AllocateAligned(size_t bytes) {
// AllocateFallback always returned aligned memory
result = AllocateFallback(bytes);
}
- assert((reinterpret_cast<uintptr_t>(result) & (align-1)) == 0);
+ assert((reinterpret_cast<uintptr_t>(result) & (align - 1)) == 0);
return result;
}
char* Arena::AllocateNewBlock(size_t block_bytes) {
char* result = new char[block_bytes];
blocks_.push_back(result);
- memory_usage_.NoBarrier_Store(
- reinterpret_cast<void*>(MemoryUsage() + block_bytes + sizeof(char*)));
+ memory_usage_.fetch_add(block_bytes + sizeof(char*),
+ std::memory_order_relaxed);
return result;
}
diff --git a/src/leveldb/util/arena.h b/src/leveldb/util/arena.h
index 48bab33741..68fc55d4dd 100644
--- a/src/leveldb/util/arena.h
+++ b/src/leveldb/util/arena.h
@@ -5,29 +5,33 @@
#ifndef STORAGE_LEVELDB_UTIL_ARENA_H_
#define STORAGE_LEVELDB_UTIL_ARENA_H_
+#include <atomic>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
#include <vector>
-#include <assert.h>
-#include <stddef.h>
-#include <stdint.h>
-#include "port/port.h"
namespace leveldb {
class Arena {
public:
Arena();
+
+ Arena(const Arena&) = delete;
+ Arena& operator=(const Arena&) = delete;
+
~Arena();
// Return a pointer to a newly allocated memory block of "bytes" bytes.
char* Allocate(size_t bytes);
- // Allocate memory with the normal alignment guarantees provided by malloc
+ // Allocate memory with the normal alignment guarantees provided by malloc.
char* AllocateAligned(size_t bytes);
// Returns an estimate of the total memory usage of data allocated
// by the arena.
size_t MemoryUsage() const {
- return reinterpret_cast<uintptr_t>(memory_usage_.NoBarrier_Load());
+ return memory_usage_.load(std::memory_order_relaxed);
}
private:
@@ -42,11 +46,10 @@ class Arena {
std::vector<char*> blocks_;
// Total memory usage of the arena.
- port::AtomicPointer memory_usage_;
-
- // No copying allowed
- Arena(const Arena&);
- void operator=(const Arena&);
+ //
+ // TODO(costan): This member is accessed via atomics, but the others are
+ // accessed without any locking. Is this OK?
+ std::atomic<size_t> memory_usage_;
};
inline char* Arena::Allocate(size_t bytes) {
diff --git a/src/leveldb/util/arena_test.cc b/src/leveldb/util/arena_test.cc
index 58e870ec44..e917228f42 100644
--- a/src/leveldb/util/arena_test.cc
+++ b/src/leveldb/util/arena_test.cc
@@ -9,14 +9,12 @@
namespace leveldb {
-class ArenaTest { };
+class ArenaTest {};
-TEST(ArenaTest, Empty) {
- Arena arena;
-}
+TEST(ArenaTest, Empty) { Arena arena; }
TEST(ArenaTest, Simple) {
- std::vector<std::pair<size_t, char*> > allocated;
+ std::vector<std::pair<size_t, char*>> allocated;
Arena arena;
const int N = 100000;
size_t bytes = 0;
@@ -26,8 +24,9 @@ TEST(ArenaTest, Simple) {
if (i % (N / 10) == 0) {
s = i;
} else {
- s = rnd.OneIn(4000) ? rnd.Uniform(6000) :
- (rnd.OneIn(10) ? rnd.Uniform(100) : rnd.Uniform(20));
+ s = rnd.OneIn(4000)
+ ? rnd.Uniform(6000)
+ : (rnd.OneIn(10) ? rnd.Uniform(100) : rnd.Uniform(20));
}
if (s == 0) {
// Our arena disallows size 0 allocations.
@@ -47,7 +46,7 @@ TEST(ArenaTest, Simple) {
bytes += s;
allocated.push_back(std::make_pair(s, r));
ASSERT_GE(arena.MemoryUsage(), bytes);
- if (i > N/10) {
+ if (i > N / 10) {
ASSERT_LE(arena.MemoryUsage(), bytes * 1.10);
}
}
@@ -63,6 +62,4 @@ TEST(ArenaTest, Simple) {
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/util/bloom.cc b/src/leveldb/util/bloom.cc
index bf3e4ca6e9..87547a7e62 100644
--- a/src/leveldb/util/bloom.cc
+++ b/src/leveldb/util/bloom.cc
@@ -15,24 +15,17 @@ static uint32_t BloomHash(const Slice& key) {
}
class BloomFilterPolicy : public FilterPolicy {
- private:
- size_t bits_per_key_;
- size_t k_;
-
public:
- explicit BloomFilterPolicy(int bits_per_key)
- : bits_per_key_(bits_per_key) {
+ explicit BloomFilterPolicy(int bits_per_key) : bits_per_key_(bits_per_key) {
// We intentionally round down to reduce probing cost a little bit
k_ = static_cast<size_t>(bits_per_key * 0.69); // 0.69 =~ ln(2)
if (k_ < 1) k_ = 1;
if (k_ > 30) k_ = 30;
}
- virtual const char* Name() const {
- return "leveldb.BuiltinBloomFilter2";
- }
+ const char* Name() const override { return "leveldb.BuiltinBloomFilter2"; }
- virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
+ void CreateFilter(const Slice* keys, int n, std::string* dst) const override {
// Compute bloom filter size (in both bits and bytes)
size_t bits = n * bits_per_key_;
@@ -54,13 +47,13 @@ class BloomFilterPolicy : public FilterPolicy {
const uint32_t delta = (h >> 17) | (h << 15); // Rotate right 17 bits
for (size_t j = 0; j < k_; j++) {
const uint32_t bitpos = h % bits;
- array[bitpos/8] |= (1 << (bitpos % 8));
+ array[bitpos / 8] |= (1 << (bitpos % 8));
h += delta;
}
}
}
- virtual bool KeyMayMatch(const Slice& key, const Slice& bloom_filter) const {
+ bool KeyMayMatch(const Slice& key, const Slice& bloom_filter) const override {
const size_t len = bloom_filter.size();
if (len < 2) return false;
@@ -69,7 +62,7 @@ class BloomFilterPolicy : public FilterPolicy {
// Use the encoded k so that we can read filters generated by
// bloom filters created using different parameters.
- const size_t k = array[len-1];
+ const size_t k = array[len - 1];
if (k > 30) {
// Reserved for potentially new encodings for short bloom filters.
// Consider it a match.
@@ -80,13 +73,17 @@ class BloomFilterPolicy : public FilterPolicy {
const uint32_t delta = (h >> 17) | (h << 15); // Rotate right 17 bits
for (size_t j = 0; j < k; j++) {
const uint32_t bitpos = h % bits;
- if ((array[bitpos/8] & (1 << (bitpos % 8))) == 0) return false;
+ if ((array[bitpos / 8] & (1 << (bitpos % 8))) == 0) return false;
h += delta;
}
return true;
}
+
+ private:
+ size_t bits_per_key_;
+ size_t k_;
};
-}
+} // namespace
const FilterPolicy* NewBloomFilterPolicy(int bits_per_key) {
return new BloomFilterPolicy(bits_per_key);
diff --git a/src/leveldb/util/bloom_test.cc b/src/leveldb/util/bloom_test.cc
index 1b87a2be3f..436daa9e99 100644
--- a/src/leveldb/util/bloom_test.cc
+++ b/src/leveldb/util/bloom_test.cc
@@ -19,26 +19,17 @@ static Slice Key(int i, char* buffer) {
}
class BloomTest {
- private:
- const FilterPolicy* policy_;
- std::string filter_;
- std::vector<std::string> keys_;
-
public:
- BloomTest() : policy_(NewBloomFilterPolicy(10)) { }
+ BloomTest() : policy_(NewBloomFilterPolicy(10)) {}
- ~BloomTest() {
- delete policy_;
- }
+ ~BloomTest() { delete policy_; }
void Reset() {
keys_.clear();
filter_.clear();
}
- void Add(const Slice& s) {
- keys_.push_back(s.ToString());
- }
+ void Add(const Slice& s) { keys_.push_back(s.ToString()); }
void Build() {
std::vector<Slice> key_slices;
@@ -52,16 +43,14 @@ class BloomTest {
if (kVerbose >= 2) DumpFilter();
}
- size_t FilterSize() const {
- return filter_.size();
- }
+ size_t FilterSize() const { return filter_.size(); }
void DumpFilter() {
fprintf(stderr, "F(");
- for (size_t i = 0; i+1 < filter_.size(); i++) {
+ for (size_t i = 0; i + 1 < filter_.size(); i++) {
const unsigned int c = static_cast<unsigned int>(filter_[i]);
for (int j = 0; j < 8; j++) {
- fprintf(stderr, "%c", (c & (1 <<j)) ? '1' : '.');
+ fprintf(stderr, "%c", (c & (1 << j)) ? '1' : '.');
}
}
fprintf(stderr, ")\n");
@@ -84,11 +73,16 @@ class BloomTest {
}
return result / 10000.0;
}
+
+ private:
+ const FilterPolicy* policy_;
+ std::string filter_;
+ std::vector<std::string> keys_;
};
TEST(BloomTest, EmptyFilter) {
- ASSERT_TRUE(! Matches("hello"));
- ASSERT_TRUE(! Matches("world"));
+ ASSERT_TRUE(!Matches("hello"));
+ ASSERT_TRUE(!Matches("world"));
}
TEST(BloomTest, Small) {
@@ -96,8 +90,8 @@ TEST(BloomTest, Small) {
Add("world");
ASSERT_TRUE(Matches("hello"));
ASSERT_TRUE(Matches("world"));
- ASSERT_TRUE(! Matches("x"));
- ASSERT_TRUE(! Matches("foo"));
+ ASSERT_TRUE(!Matches("x"));
+ ASSERT_TRUE(!Matches("foo"));
}
static int NextLength(int length) {
@@ -140,23 +134,23 @@ TEST(BloomTest, VaryingLengths) {
double rate = FalsePositiveRate();
if (kVerbose >= 1) {
fprintf(stderr, "False positives: %5.2f%% @ length = %6d ; bytes = %6d\n",
- rate*100.0, length, static_cast<int>(FilterSize()));
+ rate * 100.0, length, static_cast<int>(FilterSize()));
}
- ASSERT_LE(rate, 0.02); // Must not be over 2%
- if (rate > 0.0125) mediocre_filters++; // Allowed, but not too often
- else good_filters++;
+ ASSERT_LE(rate, 0.02); // Must not be over 2%
+ if (rate > 0.0125)
+ mediocre_filters++; // Allowed, but not too often
+ else
+ good_filters++;
}
if (kVerbose >= 1) {
- fprintf(stderr, "Filters: %d good, %d mediocre\n",
- good_filters, mediocre_filters);
+ fprintf(stderr, "Filters: %d good, %d mediocre\n", good_filters,
+ mediocre_filters);
}
- ASSERT_LE(mediocre_filters, good_filters/5);
+ ASSERT_LE(mediocre_filters, good_filters / 5);
}
// Different bits-per-byte
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/util/cache.cc b/src/leveldb/util/cache.cc
index ce46886171..12de306cad 100644
--- a/src/leveldb/util/cache.cc
+++ b/src/leveldb/util/cache.cc
@@ -8,13 +8,13 @@
#include "leveldb/cache.h"
#include "port/port.h"
+#include "port/thread_annotations.h"
#include "util/hash.h"
#include "util/mutexlock.h"
namespace leveldb {
-Cache::~Cache() {
-}
+Cache::~Cache() {}
namespace {
@@ -45,21 +45,19 @@ struct LRUHandle {
LRUHandle* next_hash;
LRUHandle* next;
LRUHandle* prev;
- size_t charge; // TODO(opt): Only allow uint32_t?
+ size_t charge; // TODO(opt): Only allow uint32_t?
size_t key_length;
- bool in_cache; // Whether entry is in the cache.
- uint32_t refs; // References, including cache reference, if present.
- uint32_t hash; // Hash of key(); used for fast sharding and comparisons
- char key_data[1]; // Beginning of key
+ bool in_cache; // Whether entry is in the cache.
+ uint32_t refs; // References, including cache reference, if present.
+ uint32_t hash; // Hash of key(); used for fast sharding and comparisons
+ char key_data[1]; // Beginning of key
Slice key() const {
- // For cheaper lookups, we allow a temporary Handle object
- // to store a pointer to a key in "value".
- if (next == this) {
- return *(reinterpret_cast<Slice*>(value));
- } else {
- return Slice(key_data, key_length);
- }
+ // next_ is only equal to this if the LRU handle is the list head of an
+ // empty list. List heads never have meaningful keys.
+ assert(next != this);
+
+ return Slice(key_data, key_length);
}
};
@@ -70,7 +68,7 @@ struct LRUHandle {
// 4.4.3's builtin hashtable.
class HandleTable {
public:
- HandleTable() : length_(0), elems_(0), list_(NULL) { Resize(); }
+ HandleTable() : length_(0), elems_(0), list_(nullptr) { Resize(); }
~HandleTable() { delete[] list_; }
LRUHandle* Lookup(const Slice& key, uint32_t hash) {
@@ -80,9 +78,9 @@ class HandleTable {
LRUHandle* Insert(LRUHandle* h) {
LRUHandle** ptr = FindPointer(h->key(), h->hash);
LRUHandle* old = *ptr;
- h->next_hash = (old == NULL ? NULL : old->next_hash);
+ h->next_hash = (old == nullptr ? nullptr : old->next_hash);
*ptr = h;
- if (old == NULL) {
+ if (old == nullptr) {
++elems_;
if (elems_ > length_) {
// Since each cache entry is fairly large, we aim for a small
@@ -96,7 +94,7 @@ class HandleTable {
LRUHandle* Remove(const Slice& key, uint32_t hash) {
LRUHandle** ptr = FindPointer(key, hash);
LRUHandle* result = *ptr;
- if (result != NULL) {
+ if (result != nullptr) {
*ptr = result->next_hash;
--elems_;
}
@@ -115,8 +113,7 @@ class HandleTable {
// pointer to the trailing slot in the corresponding linked list.
LRUHandle** FindPointer(const Slice& key, uint32_t hash) {
LRUHandle** ptr = &list_[hash & (length_ - 1)];
- while (*ptr != NULL &&
- ((*ptr)->hash != hash || key != (*ptr)->key())) {
+ while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) {
ptr = &(*ptr)->next_hash;
}
return ptr;
@@ -132,7 +129,7 @@ class HandleTable {
uint32_t count = 0;
for (uint32_t i = 0; i < length_; i++) {
LRUHandle* h = list_[i];
- while (h != NULL) {
+ while (h != nullptr) {
LRUHandle* next = h->next_hash;
uint32_t hash = h->hash;
LRUHandle** ptr = &new_list[hash & (new_length - 1)];
@@ -159,8 +156,8 @@ class LRUCache {
void SetCapacity(size_t capacity) { capacity_ = capacity; }
// Like Cache methods, but with an extra "hash" parameter.
- Cache::Handle* Insert(const Slice& key, uint32_t hash,
- void* value, size_t charge,
+ Cache::Handle* Insert(const Slice& key, uint32_t hash, void* value,
+ size_t charge,
void (*deleter)(const Slice& key, void* value));
Cache::Handle* Lookup(const Slice& key, uint32_t hash);
void Release(Cache::Handle* handle);
@@ -173,32 +170,31 @@ class LRUCache {
private:
void LRU_Remove(LRUHandle* e);
- void LRU_Append(LRUHandle*list, LRUHandle* e);
+ void LRU_Append(LRUHandle* list, LRUHandle* e);
void Ref(LRUHandle* e);
void Unref(LRUHandle* e);
- bool FinishErase(LRUHandle* e);
+ bool FinishErase(LRUHandle* e) EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// Initialized before use.
size_t capacity_;
// mutex_ protects the following state.
mutable port::Mutex mutex_;
- size_t usage_;
+ size_t usage_ GUARDED_BY(mutex_);
// Dummy head of LRU list.
// lru.prev is newest entry, lru.next is oldest entry.
// Entries have refs==1 and in_cache==true.
- LRUHandle lru_;
+ LRUHandle lru_ GUARDED_BY(mutex_);
// Dummy head of in-use list.
// Entries are in use by clients, and have refs >= 2 and in_cache==true.
- LRUHandle in_use_;
+ LRUHandle in_use_ GUARDED_BY(mutex_);
- HandleTable table_;
+ HandleTable table_ GUARDED_BY(mutex_);
};
-LRUCache::LRUCache()
- : usage_(0) {
+LRUCache::LRUCache() : capacity_(0), usage_(0) {
// Make empty circular linked lists.
lru_.next = &lru_;
lru_.prev = &lru_;
@@ -208,7 +204,7 @@ LRUCache::LRUCache()
LRUCache::~LRUCache() {
assert(in_use_.next == &in_use_); // Error if caller has an unreleased handle
- for (LRUHandle* e = lru_.next; e != &lru_; ) {
+ for (LRUHandle* e = lru_.next; e != &lru_;) {
LRUHandle* next = e->next;
assert(e->in_cache);
e->in_cache = false;
@@ -229,11 +225,12 @@ void LRUCache::Ref(LRUHandle* e) {
void LRUCache::Unref(LRUHandle* e) {
assert(e->refs > 0);
e->refs--;
- if (e->refs == 0) { // Deallocate.
+ if (e->refs == 0) { // Deallocate.
assert(!e->in_cache);
(*e->deleter)(e->key(), e->value);
free(e);
- } else if (e->in_cache && e->refs == 1) { // No longer in use; move to lru_ list.
+ } else if (e->in_cache && e->refs == 1) {
+ // No longer in use; move to lru_ list.
LRU_Remove(e);
LRU_Append(&lru_, e);
}
@@ -255,7 +252,7 @@ void LRUCache::LRU_Append(LRUHandle* list, LRUHandle* e) {
Cache::Handle* LRUCache::Lookup(const Slice& key, uint32_t hash) {
MutexLock l(&mutex_);
LRUHandle* e = table_.Lookup(key, hash);
- if (e != NULL) {
+ if (e != nullptr) {
Ref(e);
}
return reinterpret_cast<Cache::Handle*>(e);
@@ -266,13 +263,14 @@ void LRUCache::Release(Cache::Handle* handle) {
Unref(reinterpret_cast<LRUHandle*>(handle));
}
-Cache::Handle* LRUCache::Insert(
- const Slice& key, uint32_t hash, void* value, size_t charge,
- void (*deleter)(const Slice& key, void* value)) {
+Cache::Handle* LRUCache::Insert(const Slice& key, uint32_t hash, void* value,
+ size_t charge,
+ void (*deleter)(const Slice& key,
+ void* value)) {
MutexLock l(&mutex_);
- LRUHandle* e = reinterpret_cast<LRUHandle*>(
- malloc(sizeof(LRUHandle)-1 + key.size()));
+ LRUHandle* e =
+ reinterpret_cast<LRUHandle*>(malloc(sizeof(LRUHandle) - 1 + key.size()));
e->value = value;
e->deleter = deleter;
e->charge = charge;
@@ -288,8 +286,10 @@ Cache::Handle* LRUCache::Insert(
LRU_Append(&in_use_, e);
usage_ += charge;
FinishErase(table_.Insert(e));
- } // else don't cache. (Tests use capacity_==0 to turn off caching.)
-
+ } else { // don't cache. (capacity_==0 is supported and turns off caching.)
+ // next is read by key() in an assert, so it must be initialized
+ e->next = nullptr;
+ }
while (usage_ > capacity_ && lru_.next != &lru_) {
LRUHandle* old = lru_.next;
assert(old->refs == 1);
@@ -302,17 +302,17 @@ Cache::Handle* LRUCache::Insert(
return reinterpret_cast<Cache::Handle*>(e);
}
-// If e != NULL, finish removing *e from the cache; it has already been removed
-// from the hash table. Return whether e != NULL. Requires mutex_ held.
+// If e != nullptr, finish removing *e from the cache; it has already been
+// removed from the hash table. Return whether e != nullptr.
bool LRUCache::FinishErase(LRUHandle* e) {
- if (e != NULL) {
+ if (e != nullptr) {
assert(e->in_cache);
LRU_Remove(e);
e->in_cache = false;
usage_ -= e->charge;
Unref(e);
}
- return e != NULL;
+ return e != nullptr;
}
void LRUCache::Erase(const Slice& key, uint32_t hash) {
@@ -345,49 +345,46 @@ class ShardedLRUCache : public Cache {
return Hash(s.data(), s.size(), 0);
}
- static uint32_t Shard(uint32_t hash) {
- return hash >> (32 - kNumShardBits);
- }
+ static uint32_t Shard(uint32_t hash) { return hash >> (32 - kNumShardBits); }
public:
- explicit ShardedLRUCache(size_t capacity)
- : last_id_(0) {
+ explicit ShardedLRUCache(size_t capacity) : last_id_(0) {
const size_t per_shard = (capacity + (kNumShards - 1)) / kNumShards;
for (int s = 0; s < kNumShards; s++) {
shard_[s].SetCapacity(per_shard);
}
}
- virtual ~ShardedLRUCache() { }
- virtual Handle* Insert(const Slice& key, void* value, size_t charge,
- void (*deleter)(const Slice& key, void* value)) {
+ ~ShardedLRUCache() override {}
+ Handle* Insert(const Slice& key, void* value, size_t charge,
+ void (*deleter)(const Slice& key, void* value)) override {
const uint32_t hash = HashSlice(key);
return shard_[Shard(hash)].Insert(key, hash, value, charge, deleter);
}
- virtual Handle* Lookup(const Slice& key) {
+ Handle* Lookup(const Slice& key) override {
const uint32_t hash = HashSlice(key);
return shard_[Shard(hash)].Lookup(key, hash);
}
- virtual void Release(Handle* handle) {
+ void Release(Handle* handle) override {
LRUHandle* h = reinterpret_cast<LRUHandle*>(handle);
shard_[Shard(h->hash)].Release(handle);
}
- virtual void Erase(const Slice& key) {
+ void Erase(const Slice& key) override {
const uint32_t hash = HashSlice(key);
shard_[Shard(hash)].Erase(key, hash);
}
- virtual void* Value(Handle* handle) {
+ void* Value(Handle* handle) override {
return reinterpret_cast<LRUHandle*>(handle)->value;
}
- virtual uint64_t NewId() {
+ uint64_t NewId() override {
MutexLock l(&id_mutex_);
return ++(last_id_);
}
- virtual void Prune() {
+ void Prune() override {
for (int s = 0; s < kNumShards; s++) {
shard_[s].Prune();
}
}
- virtual size_t TotalCharge() const {
+ size_t TotalCharge() const override {
size_t total = 0;
for (int s = 0; s < kNumShards; s++) {
total += shard_[s].TotalCharge();
@@ -398,8 +395,6 @@ class ShardedLRUCache : public Cache {
} // end anonymous namespace
-Cache* NewLRUCache(size_t capacity) {
- return new ShardedLRUCache(capacity);
-}
+Cache* NewLRUCache(size_t capacity) { return new ShardedLRUCache(capacity); }
} // namespace leveldb
diff --git a/src/leveldb/util/cache_test.cc b/src/leveldb/util/cache_test.cc
index 468f7a6425..974334b9f8 100644
--- a/src/leveldb/util/cache_test.cc
+++ b/src/leveldb/util/cache_test.cc
@@ -25,8 +25,6 @@ static int DecodeValue(void* v) { return reinterpret_cast<uintptr_t>(v); }
class CacheTest {
public:
- static CacheTest* current_;
-
static void Deleter(const Slice& key, void* v) {
current_->deleted_keys_.push_back(DecodeKey(key));
current_->deleted_values_.push_back(DecodeValue(v));
@@ -37,18 +35,14 @@ class CacheTest {
std::vector<int> deleted_values_;
Cache* cache_;
- CacheTest() : cache_(NewLRUCache(kCacheSize)) {
- current_ = this;
- }
+ CacheTest() : cache_(NewLRUCache(kCacheSize)) { current_ = this; }
- ~CacheTest() {
- delete cache_;
- }
+ ~CacheTest() { delete cache_; }
int Lookup(int key) {
Cache::Handle* handle = cache_->Lookup(EncodeKey(key));
- const int r = (handle == NULL) ? -1 : DecodeValue(cache_->Value(handle));
- if (handle != NULL) {
+ const int r = (handle == nullptr) ? -1 : DecodeValue(cache_->Value(handle));
+ if (handle != nullptr) {
cache_->Release(handle);
}
return r;
@@ -64,9 +58,9 @@ class CacheTest {
&CacheTest::Deleter);
}
- void Erase(int key) {
- cache_->Erase(EncodeKey(key));
- }
+ void Erase(int key) { cache_->Erase(EncodeKey(key)); }
+
+ static CacheTest* current_;
};
CacheTest* CacheTest::current_;
@@ -75,18 +69,18 @@ TEST(CacheTest, HitAndMiss) {
Insert(100, 101);
ASSERT_EQ(101, Lookup(100));
- ASSERT_EQ(-1, Lookup(200));
- ASSERT_EQ(-1, Lookup(300));
+ ASSERT_EQ(-1, Lookup(200));
+ ASSERT_EQ(-1, Lookup(300));
Insert(200, 201);
ASSERT_EQ(101, Lookup(100));
ASSERT_EQ(201, Lookup(200));
- ASSERT_EQ(-1, Lookup(300));
+ ASSERT_EQ(-1, Lookup(300));
Insert(100, 102);
ASSERT_EQ(102, Lookup(100));
ASSERT_EQ(201, Lookup(200));
- ASSERT_EQ(-1, Lookup(300));
+ ASSERT_EQ(-1, Lookup(300));
ASSERT_EQ(1, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[0]);
@@ -100,14 +94,14 @@ TEST(CacheTest, Erase) {
Insert(100, 101);
Insert(200, 201);
Erase(100);
- ASSERT_EQ(-1, Lookup(100));
+ ASSERT_EQ(-1, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(1, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[0]);
ASSERT_EQ(101, deleted_values_[0]);
Erase(100);
- ASSERT_EQ(-1, Lookup(100));
+ ASSERT_EQ(-1, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(1, deleted_keys_.size());
}
@@ -146,8 +140,8 @@ TEST(CacheTest, EvictionPolicy) {
// Frequently used entry must be kept around,
// as must things that are still in use.
for (int i = 0; i < kCacheSize + 100; i++) {
- Insert(1000+i, 2000+i);
- ASSERT_EQ(2000+i, Lookup(1000+i));
+ Insert(1000 + i, 2000 + i);
+ ASSERT_EQ(2000 + i, Lookup(1000 + i));
ASSERT_EQ(101, Lookup(100));
}
ASSERT_EQ(101, Lookup(100));
@@ -160,12 +154,12 @@ TEST(CacheTest, UseExceedsCacheSize) {
// Overfill the cache, keeping handles on all inserted entries.
std::vector<Cache::Handle*> h;
for (int i = 0; i < kCacheSize + 100; i++) {
- h.push_back(InsertAndReturnHandle(1000+i, 2000+i));
+ h.push_back(InsertAndReturnHandle(1000 + i, 2000 + i));
}
// Check that all the entries can be found in the cache.
for (int i = 0; i < h.size(); i++) {
- ASSERT_EQ(2000+i, Lookup(1000+i));
+ ASSERT_EQ(2000 + i, Lookup(1000 + i));
}
for (int i = 0; i < h.size(); i++) {
@@ -181,9 +175,9 @@ TEST(CacheTest, HeavyEntries) {
const int kHeavy = 10;
int added = 0;
int index = 0;
- while (added < 2*kCacheSize) {
+ while (added < 2 * kCacheSize) {
const int weight = (index & 1) ? kLight : kHeavy;
- Insert(index, 1000+index, weight);
+ Insert(index, 1000 + index, weight);
added += weight;
index++;
}
@@ -194,10 +188,10 @@ TEST(CacheTest, HeavyEntries) {
int r = Lookup(i);
if (r >= 0) {
cached_weight += weight;
- ASSERT_EQ(1000+i, r);
+ ASSERT_EQ(1000 + i, r);
}
}
- ASSERT_LE(cached_weight, kCacheSize + kCacheSize/10);
+ ASSERT_LE(cached_weight, kCacheSize + kCacheSize / 10);
}
TEST(CacheTest, NewId) {
@@ -219,8 +213,14 @@ TEST(CacheTest, Prune) {
ASSERT_EQ(-1, Lookup(2));
}
-} // namespace leveldb
+TEST(CacheTest, ZeroSizeCache) {
+ delete cache_;
+ cache_ = NewLRUCache(0);
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
+ Insert(1, 100);
+ ASSERT_EQ(-1, Lookup(1));
}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/util/coding.cc b/src/leveldb/util/coding.cc
index 21e3186d5d..df3fa10f0d 100644
--- a/src/leveldb/util/coding.cc
+++ b/src/leveldb/util/coding.cc
@@ -6,32 +6,6 @@
namespace leveldb {
-void EncodeFixed32(char* buf, uint32_t value) {
- if (port::kLittleEndian) {
- memcpy(buf, &value, sizeof(value));
- } else {
- buf[0] = value & 0xff;
- buf[1] = (value >> 8) & 0xff;
- buf[2] = (value >> 16) & 0xff;
- buf[3] = (value >> 24) & 0xff;
- }
-}
-
-void EncodeFixed64(char* buf, uint64_t value) {
- if (port::kLittleEndian) {
- memcpy(buf, &value, sizeof(value));
- } else {
- buf[0] = value & 0xff;
- buf[1] = (value >> 8) & 0xff;
- buf[2] = (value >> 16) & 0xff;
- buf[3] = (value >> 24) & 0xff;
- buf[4] = (value >> 32) & 0xff;
- buf[5] = (value >> 40) & 0xff;
- buf[6] = (value >> 48) & 0xff;
- buf[7] = (value >> 56) & 0xff;
- }
-}
-
void PutFixed32(std::string* dst, uint32_t value) {
char buf[sizeof(value)];
EncodeFixed32(buf, value);
@@ -46,28 +20,28 @@ void PutFixed64(std::string* dst, uint64_t value) {
char* EncodeVarint32(char* dst, uint32_t v) {
// Operate on characters as unsigneds
- unsigned char* ptr = reinterpret_cast<unsigned char*>(dst);
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(dst);
static const int B = 128;
- if (v < (1<<7)) {
+ if (v < (1 << 7)) {
*(ptr++) = v;
- } else if (v < (1<<14)) {
+ } else if (v < (1 << 14)) {
*(ptr++) = v | B;
- *(ptr++) = v>>7;
- } else if (v < (1<<21)) {
+ *(ptr++) = v >> 7;
+ } else if (v < (1 << 21)) {
*(ptr++) = v | B;
- *(ptr++) = (v>>7) | B;
- *(ptr++) = v>>14;
- } else if (v < (1<<28)) {
+ *(ptr++) = (v >> 7) | B;
+ *(ptr++) = v >> 14;
+ } else if (v < (1 << 28)) {
*(ptr++) = v | B;
- *(ptr++) = (v>>7) | B;
- *(ptr++) = (v>>14) | B;
- *(ptr++) = v>>21;
+ *(ptr++) = (v >> 7) | B;
+ *(ptr++) = (v >> 14) | B;
+ *(ptr++) = v >> 21;
} else {
*(ptr++) = v | B;
- *(ptr++) = (v>>7) | B;
- *(ptr++) = (v>>14) | B;
- *(ptr++) = (v>>21) | B;
- *(ptr++) = v>>28;
+ *(ptr++) = (v >> 7) | B;
+ *(ptr++) = (v >> 14) | B;
+ *(ptr++) = (v >> 21) | B;
+ *(ptr++) = v >> 28;
}
return reinterpret_cast<char*>(ptr);
}
@@ -80,12 +54,12 @@ void PutVarint32(std::string* dst, uint32_t v) {
char* EncodeVarint64(char* dst, uint64_t v) {
static const int B = 128;
- unsigned char* ptr = reinterpret_cast<unsigned char*>(dst);
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(dst);
while (v >= B) {
- *(ptr++) = (v & (B-1)) | B;
+ *(ptr++) = v | B;
v >>= 7;
}
- *(ptr++) = static_cast<unsigned char>(v);
+ *(ptr++) = static_cast<uint8_t>(v);
return reinterpret_cast<char*>(ptr);
}
@@ -109,12 +83,11 @@ int VarintLength(uint64_t v) {
return len;
}
-const char* GetVarint32PtrFallback(const char* p,
- const char* limit,
+const char* GetVarint32PtrFallback(const char* p, const char* limit,
uint32_t* value) {
uint32_t result = 0;
for (uint32_t shift = 0; shift <= 28 && p < limit; shift += 7) {
- uint32_t byte = *(reinterpret_cast<const unsigned char*>(p));
+ uint32_t byte = *(reinterpret_cast<const uint8_t*>(p));
p++;
if (byte & 128) {
// More bytes are present
@@ -125,14 +98,14 @@ const char* GetVarint32PtrFallback(const char* p,
return reinterpret_cast<const char*>(p);
}
}
- return NULL;
+ return nullptr;
}
bool GetVarint32(Slice* input, uint32_t* value) {
const char* p = input->data();
const char* limit = p + input->size();
const char* q = GetVarint32Ptr(p, limit, value);
- if (q == NULL) {
+ if (q == nullptr) {
return false;
} else {
*input = Slice(q, limit - q);
@@ -143,7 +116,7 @@ bool GetVarint32(Slice* input, uint32_t* value) {
const char* GetVarint64Ptr(const char* p, const char* limit, uint64_t* value) {
uint64_t result = 0;
for (uint32_t shift = 0; shift <= 63 && p < limit; shift += 7) {
- uint64_t byte = *(reinterpret_cast<const unsigned char*>(p));
+ uint64_t byte = *(reinterpret_cast<const uint8_t*>(p));
p++;
if (byte & 128) {
// More bytes are present
@@ -154,14 +127,14 @@ const char* GetVarint64Ptr(const char* p, const char* limit, uint64_t* value) {
return reinterpret_cast<const char*>(p);
}
}
- return NULL;
+ return nullptr;
}
bool GetVarint64(Slice* input, uint64_t* value) {
const char* p = input->data();
const char* limit = p + input->size();
const char* q = GetVarint64Ptr(p, limit, value);
- if (q == NULL) {
+ if (q == nullptr) {
return false;
} else {
*input = Slice(q, limit - q);
@@ -173,16 +146,15 @@ const char* GetLengthPrefixedSlice(const char* p, const char* limit,
Slice* result) {
uint32_t len;
p = GetVarint32Ptr(p, limit, &len);
- if (p == NULL) return NULL;
- if (p + len > limit) return NULL;
+ if (p == nullptr) return nullptr;
+ if (p + len > limit) return nullptr;
*result = Slice(p, len);
return p + len;
}
bool GetLengthPrefixedSlice(Slice* input, Slice* result) {
uint32_t len;
- if (GetVarint32(input, &len) &&
- input->size() >= len) {
+ if (GetVarint32(input, &len) && input->size() >= len) {
*result = Slice(input->data(), len);
input->remove_prefix(len);
return true;
diff --git a/src/leveldb/util/coding.h b/src/leveldb/util/coding.h
index 3993c4a755..1983ae7173 100644
--- a/src/leveldb/util/coding.h
+++ b/src/leveldb/util/coding.h
@@ -10,87 +10,147 @@
#ifndef STORAGE_LEVELDB_UTIL_CODING_H_
#define STORAGE_LEVELDB_UTIL_CODING_H_
-#include <stdint.h>
-#include <string.h>
+#include <cstdint>
+#include <cstring>
#include <string>
+
#include "leveldb/slice.h"
#include "port/port.h"
namespace leveldb {
// Standard Put... routines append to a string
-extern void PutFixed32(std::string* dst, uint32_t value);
-extern void PutFixed64(std::string* dst, uint64_t value);
-extern void PutVarint32(std::string* dst, uint32_t value);
-extern void PutVarint64(std::string* dst, uint64_t value);
-extern void PutLengthPrefixedSlice(std::string* dst, const Slice& value);
+void PutFixed32(std::string* dst, uint32_t value);
+void PutFixed64(std::string* dst, uint64_t value);
+void PutVarint32(std::string* dst, uint32_t value);
+void PutVarint64(std::string* dst, uint64_t value);
+void PutLengthPrefixedSlice(std::string* dst, const Slice& value);
// Standard Get... routines parse a value from the beginning of a Slice
// and advance the slice past the parsed value.
-extern bool GetVarint32(Slice* input, uint32_t* value);
-extern bool GetVarint64(Slice* input, uint64_t* value);
-extern bool GetLengthPrefixedSlice(Slice* input, Slice* result);
+bool GetVarint32(Slice* input, uint32_t* value);
+bool GetVarint64(Slice* input, uint64_t* value);
+bool GetLengthPrefixedSlice(Slice* input, Slice* result);
// Pointer-based variants of GetVarint... These either store a value
// in *v and return a pointer just past the parsed value, or return
-// NULL on error. These routines only look at bytes in the range
+// nullptr on error. These routines only look at bytes in the range
// [p..limit-1]
-extern const char* GetVarint32Ptr(const char* p,const char* limit, uint32_t* v);
-extern const char* GetVarint64Ptr(const char* p,const char* limit, uint64_t* v);
+const char* GetVarint32Ptr(const char* p, const char* limit, uint32_t* v);
+const char* GetVarint64Ptr(const char* p, const char* limit, uint64_t* v);
// Returns the length of the varint32 or varint64 encoding of "v"
-extern int VarintLength(uint64_t v);
+int VarintLength(uint64_t v);
// Lower-level versions of Put... that write directly into a character buffer
+// and return a pointer just past the last byte written.
// REQUIRES: dst has enough space for the value being written
-extern void EncodeFixed32(char* dst, uint32_t value);
-extern void EncodeFixed64(char* dst, uint64_t value);
+char* EncodeVarint32(char* dst, uint32_t value);
+char* EncodeVarint64(char* dst, uint64_t value);
+
+// TODO(costan): Remove port::kLittleEndian and the fast paths based on
+// std::memcpy when clang learns to optimize the generic code, as
+// described in https://bugs.llvm.org/show_bug.cgi?id=41761
+//
+// The platform-independent code in DecodeFixed{32,64}() gets optimized to mov
+// on x86 and ldr on ARM64, by both clang and gcc. However, only gcc optimizes
+// the platform-independent code in EncodeFixed{32,64}() to mov / str.
// Lower-level versions of Put... that write directly into a character buffer
-// and return a pointer just past the last byte written.
// REQUIRES: dst has enough space for the value being written
-extern char* EncodeVarint32(char* dst, uint32_t value);
-extern char* EncodeVarint64(char* dst, uint64_t value);
+
+inline void EncodeFixed32(char* dst, uint32_t value) {
+ uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
+
+ if (port::kLittleEndian) {
+ // Fast path for little-endian CPUs. All major compilers optimize this to a
+ // single mov (x86_64) / str (ARM) instruction.
+ std::memcpy(buffer, &value, sizeof(uint32_t));
+ return;
+ }
+
+ // Platform-independent code.
+ // Currently, only gcc optimizes this to a single mov / str instruction.
+ buffer[0] = static_cast<uint8_t>(value);
+ buffer[1] = static_cast<uint8_t>(value >> 8);
+ buffer[2] = static_cast<uint8_t>(value >> 16);
+ buffer[3] = static_cast<uint8_t>(value >> 24);
+}
+
+inline void EncodeFixed64(char* dst, uint64_t value) {
+ uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
+
+ if (port::kLittleEndian) {
+ // Fast path for little-endian CPUs. All major compilers optimize this to a
+ // single mov (x86_64) / str (ARM) instruction.
+ std::memcpy(buffer, &value, sizeof(uint64_t));
+ return;
+ }
+
+ // Platform-independent code.
+ // Currently, only gcc optimizes this to a single mov / str instruction.
+ buffer[0] = static_cast<uint8_t>(value);
+ buffer[1] = static_cast<uint8_t>(value >> 8);
+ buffer[2] = static_cast<uint8_t>(value >> 16);
+ buffer[3] = static_cast<uint8_t>(value >> 24);
+ buffer[4] = static_cast<uint8_t>(value >> 32);
+ buffer[5] = static_cast<uint8_t>(value >> 40);
+ buffer[6] = static_cast<uint8_t>(value >> 48);
+ buffer[7] = static_cast<uint8_t>(value >> 56);
+}
// Lower-level versions of Get... that read directly from a character buffer
// without any bounds checking.
inline uint32_t DecodeFixed32(const char* ptr) {
+ const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
+
if (port::kLittleEndian) {
- // Load the raw bytes
+ // Fast path for little-endian CPUs. All major compilers optimize this to a
+ // single mov (x86_64) / ldr (ARM) instruction.
uint32_t result;
- memcpy(&result, ptr, sizeof(result)); // gcc optimizes this to a plain load
+ std::memcpy(&result, buffer, sizeof(uint32_t));
return result;
- } else {
- return ((static_cast<uint32_t>(static_cast<unsigned char>(ptr[0])))
- | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[1])) << 8)
- | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[2])) << 16)
- | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[3])) << 24));
}
+
+ // Platform-independent code.
+ // Clang and gcc optimize this to a single mov / ldr instruction.
+ return (static_cast<uint32_t>(buffer[0])) |
+ (static_cast<uint32_t>(buffer[1]) << 8) |
+ (static_cast<uint32_t>(buffer[2]) << 16) |
+ (static_cast<uint32_t>(buffer[3]) << 24);
}
inline uint64_t DecodeFixed64(const char* ptr) {
+ const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
+
if (port::kLittleEndian) {
- // Load the raw bytes
+ // Fast path for little-endian CPUs. All major compilers optimize this to a
+ // single mov (x86_64) / ldr (ARM) instruction.
uint64_t result;
- memcpy(&result, ptr, sizeof(result)); // gcc optimizes this to a plain load
+ std::memcpy(&result, buffer, sizeof(uint64_t));
return result;
- } else {
- uint64_t lo = DecodeFixed32(ptr);
- uint64_t hi = DecodeFixed32(ptr + 4);
- return (hi << 32) | lo;
}
+
+ // Platform-independent code.
+ // Clang and gcc optimize this to a single mov / ldr instruction.
+ return (static_cast<uint64_t>(buffer[0])) |
+ (static_cast<uint64_t>(buffer[1]) << 8) |
+ (static_cast<uint64_t>(buffer[2]) << 16) |
+ (static_cast<uint64_t>(buffer[3]) << 24) |
+ (static_cast<uint64_t>(buffer[4]) << 32) |
+ (static_cast<uint64_t>(buffer[5]) << 40) |
+ (static_cast<uint64_t>(buffer[6]) << 48) |
+ (static_cast<uint64_t>(buffer[7]) << 56);
}
// Internal routine for use by fallback path of GetVarint32Ptr
-extern const char* GetVarint32PtrFallback(const char* p,
- const char* limit,
- uint32_t* value);
-inline const char* GetVarint32Ptr(const char* p,
- const char* limit,
+const char* GetVarint32PtrFallback(const char* p, const char* limit,
+ uint32_t* value);
+inline const char* GetVarint32Ptr(const char* p, const char* limit,
uint32_t* value) {
if (p < limit) {
- uint32_t result = *(reinterpret_cast<const unsigned char*>(p));
+ uint32_t result = *(reinterpret_cast<const uint8_t*>(p));
if ((result & 128) == 0) {
*value = result;
return p + 1;
diff --git a/src/leveldb/util/coding_test.cc b/src/leveldb/util/coding_test.cc
index 521541ea61..0d2a0c51f6 100644
--- a/src/leveldb/util/coding_test.cc
+++ b/src/leveldb/util/coding_test.cc
@@ -2,13 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include "util/coding.h"
+#include <vector>
+#include "util/coding.h"
#include "util/testharness.h"
namespace leveldb {
-class Coding { };
+class Coding {};
TEST(Coding, Fixed32) {
std::string s;
@@ -38,15 +39,15 @@ TEST(Coding, Fixed64) {
uint64_t v = static_cast<uint64_t>(1) << power;
uint64_t actual;
actual = DecodeFixed64(p);
- ASSERT_EQ(v-1, actual);
+ ASSERT_EQ(v - 1, actual);
p += sizeof(uint64_t);
actual = DecodeFixed64(p);
- ASSERT_EQ(v+0, actual);
+ ASSERT_EQ(v + 0, actual);
p += sizeof(uint64_t);
actual = DecodeFixed64(p);
- ASSERT_EQ(v+1, actual);
+ ASSERT_EQ(v + 1, actual);
p += sizeof(uint64_t);
}
}
@@ -88,7 +89,7 @@ TEST(Coding, Varint32) {
uint32_t actual;
const char* start = p;
p = GetVarint32Ptr(p, limit, &actual);
- ASSERT_TRUE(p != NULL);
+ ASSERT_TRUE(p != nullptr);
ASSERT_EQ(expected, actual);
ASSERT_EQ(VarintLength(actual), p - start);
}
@@ -107,8 +108,8 @@ TEST(Coding, Varint64) {
// Test values near powers of two
const uint64_t power = 1ull << k;
values.push_back(power);
- values.push_back(power-1);
- values.push_back(power+1);
+ values.push_back(power - 1);
+ values.push_back(power + 1);
}
std::string s;
@@ -123,19 +124,18 @@ TEST(Coding, Varint64) {
uint64_t actual;
const char* start = p;
p = GetVarint64Ptr(p, limit, &actual);
- ASSERT_TRUE(p != NULL);
+ ASSERT_TRUE(p != nullptr);
ASSERT_EQ(values[i], actual);
ASSERT_EQ(VarintLength(actual), p - start);
}
ASSERT_EQ(p, limit);
-
}
TEST(Coding, Varint32Overflow) {
uint32_t result;
std::string input("\x81\x82\x83\x84\x85\x11");
- ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(), &result)
- == NULL);
+ ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(),
+ &result) == nullptr);
}
TEST(Coding, Varint32Truncation) {
@@ -144,17 +144,18 @@ TEST(Coding, Varint32Truncation) {
PutVarint32(&s, large_value);
uint32_t result;
for (size_t len = 0; len < s.size() - 1; len++) {
- ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == NULL);
+ ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == nullptr);
}
- ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + s.size(), &result) != NULL);
+ ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + s.size(), &result) !=
+ nullptr);
ASSERT_EQ(large_value, result);
}
TEST(Coding, Varint64Overflow) {
uint64_t result;
std::string input("\x81\x82\x83\x84\x85\x81\x82\x83\x84\x85\x11");
- ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(), &result)
- == NULL);
+ ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(),
+ &result) == nullptr);
}
TEST(Coding, Varint64Truncation) {
@@ -163,9 +164,10 @@ TEST(Coding, Varint64Truncation) {
PutVarint64(&s, large_value);
uint64_t result;
for (size_t len = 0; len < s.size() - 1; len++) {
- ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == NULL);
+ ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == nullptr);
}
- ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) != NULL);
+ ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) !=
+ nullptr);
ASSERT_EQ(large_value, result);
}
@@ -191,6 +193,4 @@ TEST(Coding, Strings) {
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/util/comparator.cc b/src/leveldb/util/comparator.cc
index 4b7b5724ef..c5766e9462 100644
--- a/src/leveldb/util/comparator.cc
+++ b/src/leveldb/util/comparator.cc
@@ -2,33 +2,34 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include <algorithm>
-#include <stdint.h>
#include "leveldb/comparator.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <string>
+#include <type_traits>
+
#include "leveldb/slice.h"
-#include "port/port.h"
#include "util/logging.h"
+#include "util/no_destructor.h"
namespace leveldb {
-Comparator::~Comparator() { }
+Comparator::~Comparator() = default;
namespace {
class BytewiseComparatorImpl : public Comparator {
public:
- BytewiseComparatorImpl() { }
+ BytewiseComparatorImpl() = default;
- virtual const char* Name() const {
- return "leveldb.BytewiseComparator";
- }
+ const char* Name() const override { return "leveldb.BytewiseComparator"; }
- virtual int Compare(const Slice& a, const Slice& b) const {
+ int Compare(const Slice& a, const Slice& b) const override {
return a.compare(b);
}
- virtual void FindShortestSeparator(
- std::string* start,
- const Slice& limit) const {
+ void FindShortestSeparator(std::string* start,
+ const Slice& limit) const override {
// Find length of common prefix
size_t min_length = std::min(start->size(), limit.size());
size_t diff_index = 0;
@@ -50,14 +51,14 @@ class BytewiseComparatorImpl : public Comparator {
}
}
- virtual void FindShortSuccessor(std::string* key) const {
+ void FindShortSuccessor(std::string* key) const override {
// Find first character that can be incremented
size_t n = key->size();
for (size_t i = 0; i < n; i++) {
const uint8_t byte = (*key)[i];
if (byte != static_cast<uint8_t>(0xff)) {
(*key)[i] = byte + 1;
- key->resize(i+1);
+ key->resize(i + 1);
return;
}
}
@@ -66,16 +67,9 @@ class BytewiseComparatorImpl : public Comparator {
};
} // namespace
-static port::OnceType once = LEVELDB_ONCE_INIT;
-static const Comparator* bytewise;
-
-static void InitModule() {
- bytewise = new BytewiseComparatorImpl;
-}
-
const Comparator* BytewiseComparator() {
- port::InitOnce(&once, InitModule);
- return bytewise;
+ static NoDestructor<BytewiseComparatorImpl> singleton;
+ return singleton.get();
}
} // namespace leveldb
diff --git a/src/leveldb/util/crc32c.cc b/src/leveldb/util/crc32c.cc
index b3f40eeeed..c2e61f7dba 100644
--- a/src/leveldb/util/crc32c.cc
+++ b/src/leveldb/util/crc32c.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
//
-// A portable implementation of crc32c, optimized to handle
-// four bytes at a time.
+// A portable implementation of crc32c.
#include "util/crc32c.h"
+#include <stddef.h>
#include <stdint.h>
#include "port/port.h"
@@ -15,283 +15,256 @@
namespace leveldb {
namespace crc32c {
-static const uint32_t table0_[256] = {
- 0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4,
- 0xc79a971f, 0x35f1141c, 0x26a1e7e8, 0xd4ca64eb,
- 0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b,
- 0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24,
- 0x105ec76f, 0xe235446c, 0xf165b798, 0x030e349b,
- 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384,
- 0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54,
- 0x5d1d08bf, 0xaf768bbc, 0xbc267848, 0x4e4dfb4b,
- 0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a,
- 0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35,
- 0xaa64d611, 0x580f5512, 0x4b5fa6e6, 0xb93425e5,
- 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa,
- 0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45,
- 0xf779deae, 0x05125dad, 0x1642ae59, 0xe4292d5a,
- 0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a,
- 0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595,
- 0x417b1dbc, 0xb3109ebf, 0xa0406d4b, 0x522bee48,
- 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957,
- 0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687,
- 0x0c38d26c, 0xfe53516f, 0xed03a29b, 0x1f682198,
- 0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927,
- 0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38,
- 0xdbfc821c, 0x2997011f, 0x3ac7f2eb, 0xc8ac71e8,
- 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7,
- 0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096,
- 0xa65c047d, 0x5437877e, 0x4767748a, 0xb50cf789,
- 0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859,
- 0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46,
- 0x7198540d, 0x83f3d70e, 0x90a324fa, 0x62c8a7f9,
- 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6,
- 0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36,
- 0x3cdb9bdd, 0xceb018de, 0xdde0eb2a, 0x2f8b6829,
- 0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c,
- 0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93,
- 0x082f63b7, 0xfa44e0b4, 0xe9141340, 0x1b7f9043,
- 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c,
- 0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3,
- 0x55326b08, 0xa759e80b, 0xb4091bff, 0x466298fc,
- 0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c,
- 0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033,
- 0xa24bb5a6, 0x502036a5, 0x4370c551, 0xb11b4652,
- 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d,
- 0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d,
- 0xef087a76, 0x1d63f975, 0x0e330a81, 0xfc588982,
- 0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d,
- 0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622,
- 0x38cc2a06, 0xcaa7a905, 0xd9f75af1, 0x2b9cd9f2,
- 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed,
- 0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530,
- 0x0417b1db, 0xf67c32d8, 0xe52cc12c, 0x1747422f,
- 0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff,
- 0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0,
- 0xd3d3e1ab, 0x21b862a8, 0x32e8915c, 0xc083125f,
- 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540,
- 0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90,
- 0x9e902e7b, 0x6cfbad78, 0x7fab5e8c, 0x8dc0dd8f,
- 0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee,
- 0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1,
- 0x69e9f0d5, 0x9b8273d6, 0x88d28022, 0x7ab90321,
- 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e,
- 0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81,
- 0x34f4f86a, 0xc69f7b69, 0xd5cf889d, 0x27a40b9e,
- 0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e,
- 0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351
-};
-static const uint32_t table1_[256] = {
- 0x00000000, 0x13a29877, 0x274530ee, 0x34e7a899,
- 0x4e8a61dc, 0x5d28f9ab, 0x69cf5132, 0x7a6dc945,
- 0x9d14c3b8, 0x8eb65bcf, 0xba51f356, 0xa9f36b21,
- 0xd39ea264, 0xc03c3a13, 0xf4db928a, 0xe7790afd,
- 0x3fc5f181, 0x2c6769f6, 0x1880c16f, 0x0b225918,
- 0x714f905d, 0x62ed082a, 0x560aa0b3, 0x45a838c4,
- 0xa2d13239, 0xb173aa4e, 0x859402d7, 0x96369aa0,
- 0xec5b53e5, 0xfff9cb92, 0xcb1e630b, 0xd8bcfb7c,
- 0x7f8be302, 0x6c297b75, 0x58ced3ec, 0x4b6c4b9b,
- 0x310182de, 0x22a31aa9, 0x1644b230, 0x05e62a47,
- 0xe29f20ba, 0xf13db8cd, 0xc5da1054, 0xd6788823,
- 0xac154166, 0xbfb7d911, 0x8b507188, 0x98f2e9ff,
- 0x404e1283, 0x53ec8af4, 0x670b226d, 0x74a9ba1a,
- 0x0ec4735f, 0x1d66eb28, 0x298143b1, 0x3a23dbc6,
- 0xdd5ad13b, 0xcef8494c, 0xfa1fe1d5, 0xe9bd79a2,
- 0x93d0b0e7, 0x80722890, 0xb4958009, 0xa737187e,
- 0xff17c604, 0xecb55e73, 0xd852f6ea, 0xcbf06e9d,
- 0xb19da7d8, 0xa23f3faf, 0x96d89736, 0x857a0f41,
- 0x620305bc, 0x71a19dcb, 0x45463552, 0x56e4ad25,
- 0x2c896460, 0x3f2bfc17, 0x0bcc548e, 0x186eccf9,
- 0xc0d23785, 0xd370aff2, 0xe797076b, 0xf4359f1c,
- 0x8e585659, 0x9dface2e, 0xa91d66b7, 0xbabffec0,
- 0x5dc6f43d, 0x4e646c4a, 0x7a83c4d3, 0x69215ca4,
- 0x134c95e1, 0x00ee0d96, 0x3409a50f, 0x27ab3d78,
- 0x809c2506, 0x933ebd71, 0xa7d915e8, 0xb47b8d9f,
- 0xce1644da, 0xddb4dcad, 0xe9537434, 0xfaf1ec43,
- 0x1d88e6be, 0x0e2a7ec9, 0x3acdd650, 0x296f4e27,
- 0x53028762, 0x40a01f15, 0x7447b78c, 0x67e52ffb,
- 0xbf59d487, 0xacfb4cf0, 0x981ce469, 0x8bbe7c1e,
- 0xf1d3b55b, 0xe2712d2c, 0xd69685b5, 0xc5341dc2,
- 0x224d173f, 0x31ef8f48, 0x050827d1, 0x16aabfa6,
- 0x6cc776e3, 0x7f65ee94, 0x4b82460d, 0x5820de7a,
- 0xfbc3faf9, 0xe861628e, 0xdc86ca17, 0xcf245260,
- 0xb5499b25, 0xa6eb0352, 0x920cabcb, 0x81ae33bc,
- 0x66d73941, 0x7575a136, 0x419209af, 0x523091d8,
- 0x285d589d, 0x3bffc0ea, 0x0f186873, 0x1cbaf004,
- 0xc4060b78, 0xd7a4930f, 0xe3433b96, 0xf0e1a3e1,
- 0x8a8c6aa4, 0x992ef2d3, 0xadc95a4a, 0xbe6bc23d,
- 0x5912c8c0, 0x4ab050b7, 0x7e57f82e, 0x6df56059,
- 0x1798a91c, 0x043a316b, 0x30dd99f2, 0x237f0185,
- 0x844819fb, 0x97ea818c, 0xa30d2915, 0xb0afb162,
- 0xcac27827, 0xd960e050, 0xed8748c9, 0xfe25d0be,
- 0x195cda43, 0x0afe4234, 0x3e19eaad, 0x2dbb72da,
- 0x57d6bb9f, 0x447423e8, 0x70938b71, 0x63311306,
- 0xbb8de87a, 0xa82f700d, 0x9cc8d894, 0x8f6a40e3,
- 0xf50789a6, 0xe6a511d1, 0xd242b948, 0xc1e0213f,
- 0x26992bc2, 0x353bb3b5, 0x01dc1b2c, 0x127e835b,
- 0x68134a1e, 0x7bb1d269, 0x4f567af0, 0x5cf4e287,
- 0x04d43cfd, 0x1776a48a, 0x23910c13, 0x30339464,
- 0x4a5e5d21, 0x59fcc556, 0x6d1b6dcf, 0x7eb9f5b8,
- 0x99c0ff45, 0x8a626732, 0xbe85cfab, 0xad2757dc,
- 0xd74a9e99, 0xc4e806ee, 0xf00fae77, 0xe3ad3600,
- 0x3b11cd7c, 0x28b3550b, 0x1c54fd92, 0x0ff665e5,
- 0x759baca0, 0x663934d7, 0x52de9c4e, 0x417c0439,
- 0xa6050ec4, 0xb5a796b3, 0x81403e2a, 0x92e2a65d,
- 0xe88f6f18, 0xfb2df76f, 0xcfca5ff6, 0xdc68c781,
- 0x7b5fdfff, 0x68fd4788, 0x5c1aef11, 0x4fb87766,
- 0x35d5be23, 0x26772654, 0x12908ecd, 0x013216ba,
- 0xe64b1c47, 0xf5e98430, 0xc10e2ca9, 0xd2acb4de,
- 0xa8c17d9b, 0xbb63e5ec, 0x8f844d75, 0x9c26d502,
- 0x449a2e7e, 0x5738b609, 0x63df1e90, 0x707d86e7,
- 0x0a104fa2, 0x19b2d7d5, 0x2d557f4c, 0x3ef7e73b,
- 0xd98eedc6, 0xca2c75b1, 0xfecbdd28, 0xed69455f,
- 0x97048c1a, 0x84a6146d, 0xb041bcf4, 0xa3e32483
-};
-static const uint32_t table2_[256] = {
- 0x00000000, 0xa541927e, 0x4f6f520d, 0xea2ec073,
- 0x9edea41a, 0x3b9f3664, 0xd1b1f617, 0x74f06469,
- 0x38513ec5, 0x9d10acbb, 0x773e6cc8, 0xd27ffeb6,
- 0xa68f9adf, 0x03ce08a1, 0xe9e0c8d2, 0x4ca15aac,
- 0x70a27d8a, 0xd5e3eff4, 0x3fcd2f87, 0x9a8cbdf9,
- 0xee7cd990, 0x4b3d4bee, 0xa1138b9d, 0x045219e3,
- 0x48f3434f, 0xedb2d131, 0x079c1142, 0xa2dd833c,
- 0xd62de755, 0x736c752b, 0x9942b558, 0x3c032726,
- 0xe144fb14, 0x4405696a, 0xae2ba919, 0x0b6a3b67,
- 0x7f9a5f0e, 0xdadbcd70, 0x30f50d03, 0x95b49f7d,
- 0xd915c5d1, 0x7c5457af, 0x967a97dc, 0x333b05a2,
- 0x47cb61cb, 0xe28af3b5, 0x08a433c6, 0xade5a1b8,
- 0x91e6869e, 0x34a714e0, 0xde89d493, 0x7bc846ed,
- 0x0f382284, 0xaa79b0fa, 0x40577089, 0xe516e2f7,
- 0xa9b7b85b, 0x0cf62a25, 0xe6d8ea56, 0x43997828,
- 0x37691c41, 0x92288e3f, 0x78064e4c, 0xdd47dc32,
- 0xc76580d9, 0x622412a7, 0x880ad2d4, 0x2d4b40aa,
- 0x59bb24c3, 0xfcfab6bd, 0x16d476ce, 0xb395e4b0,
- 0xff34be1c, 0x5a752c62, 0xb05bec11, 0x151a7e6f,
- 0x61ea1a06, 0xc4ab8878, 0x2e85480b, 0x8bc4da75,
- 0xb7c7fd53, 0x12866f2d, 0xf8a8af5e, 0x5de93d20,
- 0x29195949, 0x8c58cb37, 0x66760b44, 0xc337993a,
- 0x8f96c396, 0x2ad751e8, 0xc0f9919b, 0x65b803e5,
- 0x1148678c, 0xb409f5f2, 0x5e273581, 0xfb66a7ff,
- 0x26217bcd, 0x8360e9b3, 0x694e29c0, 0xcc0fbbbe,
- 0xb8ffdfd7, 0x1dbe4da9, 0xf7908dda, 0x52d11fa4,
- 0x1e704508, 0xbb31d776, 0x511f1705, 0xf45e857b,
- 0x80aee112, 0x25ef736c, 0xcfc1b31f, 0x6a802161,
- 0x56830647, 0xf3c29439, 0x19ec544a, 0xbcadc634,
- 0xc85da25d, 0x6d1c3023, 0x8732f050, 0x2273622e,
- 0x6ed23882, 0xcb93aafc, 0x21bd6a8f, 0x84fcf8f1,
- 0xf00c9c98, 0x554d0ee6, 0xbf63ce95, 0x1a225ceb,
- 0x8b277743, 0x2e66e53d, 0xc448254e, 0x6109b730,
- 0x15f9d359, 0xb0b84127, 0x5a968154, 0xffd7132a,
- 0xb3764986, 0x1637dbf8, 0xfc191b8b, 0x595889f5,
- 0x2da8ed9c, 0x88e97fe2, 0x62c7bf91, 0xc7862def,
- 0xfb850ac9, 0x5ec498b7, 0xb4ea58c4, 0x11abcaba,
- 0x655baed3, 0xc01a3cad, 0x2a34fcde, 0x8f756ea0,
- 0xc3d4340c, 0x6695a672, 0x8cbb6601, 0x29faf47f,
- 0x5d0a9016, 0xf84b0268, 0x1265c21b, 0xb7245065,
- 0x6a638c57, 0xcf221e29, 0x250cde5a, 0x804d4c24,
- 0xf4bd284d, 0x51fcba33, 0xbbd27a40, 0x1e93e83e,
- 0x5232b292, 0xf77320ec, 0x1d5de09f, 0xb81c72e1,
- 0xccec1688, 0x69ad84f6, 0x83834485, 0x26c2d6fb,
- 0x1ac1f1dd, 0xbf8063a3, 0x55aea3d0, 0xf0ef31ae,
- 0x841f55c7, 0x215ec7b9, 0xcb7007ca, 0x6e3195b4,
- 0x2290cf18, 0x87d15d66, 0x6dff9d15, 0xc8be0f6b,
- 0xbc4e6b02, 0x190ff97c, 0xf321390f, 0x5660ab71,
- 0x4c42f79a, 0xe90365e4, 0x032da597, 0xa66c37e9,
- 0xd29c5380, 0x77ddc1fe, 0x9df3018d, 0x38b293f3,
- 0x7413c95f, 0xd1525b21, 0x3b7c9b52, 0x9e3d092c,
- 0xeacd6d45, 0x4f8cff3b, 0xa5a23f48, 0x00e3ad36,
- 0x3ce08a10, 0x99a1186e, 0x738fd81d, 0xd6ce4a63,
- 0xa23e2e0a, 0x077fbc74, 0xed517c07, 0x4810ee79,
- 0x04b1b4d5, 0xa1f026ab, 0x4bdee6d8, 0xee9f74a6,
- 0x9a6f10cf, 0x3f2e82b1, 0xd50042c2, 0x7041d0bc,
- 0xad060c8e, 0x08479ef0, 0xe2695e83, 0x4728ccfd,
- 0x33d8a894, 0x96993aea, 0x7cb7fa99, 0xd9f668e7,
- 0x9557324b, 0x3016a035, 0xda386046, 0x7f79f238,
- 0x0b899651, 0xaec8042f, 0x44e6c45c, 0xe1a75622,
- 0xdda47104, 0x78e5e37a, 0x92cb2309, 0x378ab177,
- 0x437ad51e, 0xe63b4760, 0x0c158713, 0xa954156d,
- 0xe5f54fc1, 0x40b4ddbf, 0xaa9a1dcc, 0x0fdb8fb2,
- 0x7b2bebdb, 0xde6a79a5, 0x3444b9d6, 0x91052ba8
-};
-static const uint32_t table3_[256] = {
- 0x00000000, 0xdd45aab8, 0xbf672381, 0x62228939,
- 0x7b2231f3, 0xa6679b4b, 0xc4451272, 0x1900b8ca,
- 0xf64463e6, 0x2b01c95e, 0x49234067, 0x9466eadf,
- 0x8d665215, 0x5023f8ad, 0x32017194, 0xef44db2c,
- 0xe964b13d, 0x34211b85, 0x560392bc, 0x8b463804,
- 0x924680ce, 0x4f032a76, 0x2d21a34f, 0xf06409f7,
- 0x1f20d2db, 0xc2657863, 0xa047f15a, 0x7d025be2,
- 0x6402e328, 0xb9474990, 0xdb65c0a9, 0x06206a11,
- 0xd725148b, 0x0a60be33, 0x6842370a, 0xb5079db2,
- 0xac072578, 0x71428fc0, 0x136006f9, 0xce25ac41,
- 0x2161776d, 0xfc24ddd5, 0x9e0654ec, 0x4343fe54,
- 0x5a43469e, 0x8706ec26, 0xe524651f, 0x3861cfa7,
- 0x3e41a5b6, 0xe3040f0e, 0x81268637, 0x5c632c8f,
- 0x45639445, 0x98263efd, 0xfa04b7c4, 0x27411d7c,
- 0xc805c650, 0x15406ce8, 0x7762e5d1, 0xaa274f69,
- 0xb327f7a3, 0x6e625d1b, 0x0c40d422, 0xd1057e9a,
- 0xaba65fe7, 0x76e3f55f, 0x14c17c66, 0xc984d6de,
- 0xd0846e14, 0x0dc1c4ac, 0x6fe34d95, 0xb2a6e72d,
- 0x5de23c01, 0x80a796b9, 0xe2851f80, 0x3fc0b538,
- 0x26c00df2, 0xfb85a74a, 0x99a72e73, 0x44e284cb,
- 0x42c2eeda, 0x9f874462, 0xfda5cd5b, 0x20e067e3,
- 0x39e0df29, 0xe4a57591, 0x8687fca8, 0x5bc25610,
- 0xb4868d3c, 0x69c32784, 0x0be1aebd, 0xd6a40405,
- 0xcfa4bccf, 0x12e11677, 0x70c39f4e, 0xad8635f6,
- 0x7c834b6c, 0xa1c6e1d4, 0xc3e468ed, 0x1ea1c255,
- 0x07a17a9f, 0xdae4d027, 0xb8c6591e, 0x6583f3a6,
- 0x8ac7288a, 0x57828232, 0x35a00b0b, 0xe8e5a1b3,
- 0xf1e51979, 0x2ca0b3c1, 0x4e823af8, 0x93c79040,
- 0x95e7fa51, 0x48a250e9, 0x2a80d9d0, 0xf7c57368,
- 0xeec5cba2, 0x3380611a, 0x51a2e823, 0x8ce7429b,
- 0x63a399b7, 0xbee6330f, 0xdcc4ba36, 0x0181108e,
- 0x1881a844, 0xc5c402fc, 0xa7e68bc5, 0x7aa3217d,
- 0x52a0c93f, 0x8fe56387, 0xedc7eabe, 0x30824006,
- 0x2982f8cc, 0xf4c75274, 0x96e5db4d, 0x4ba071f5,
- 0xa4e4aad9, 0x79a10061, 0x1b838958, 0xc6c623e0,
- 0xdfc69b2a, 0x02833192, 0x60a1b8ab, 0xbde41213,
- 0xbbc47802, 0x6681d2ba, 0x04a35b83, 0xd9e6f13b,
- 0xc0e649f1, 0x1da3e349, 0x7f816a70, 0xa2c4c0c8,
- 0x4d801be4, 0x90c5b15c, 0xf2e73865, 0x2fa292dd,
- 0x36a22a17, 0xebe780af, 0x89c50996, 0x5480a32e,
- 0x8585ddb4, 0x58c0770c, 0x3ae2fe35, 0xe7a7548d,
- 0xfea7ec47, 0x23e246ff, 0x41c0cfc6, 0x9c85657e,
- 0x73c1be52, 0xae8414ea, 0xcca69dd3, 0x11e3376b,
- 0x08e38fa1, 0xd5a62519, 0xb784ac20, 0x6ac10698,
- 0x6ce16c89, 0xb1a4c631, 0xd3864f08, 0x0ec3e5b0,
- 0x17c35d7a, 0xca86f7c2, 0xa8a47efb, 0x75e1d443,
- 0x9aa50f6f, 0x47e0a5d7, 0x25c22cee, 0xf8878656,
- 0xe1873e9c, 0x3cc29424, 0x5ee01d1d, 0x83a5b7a5,
- 0xf90696d8, 0x24433c60, 0x4661b559, 0x9b241fe1,
- 0x8224a72b, 0x5f610d93, 0x3d4384aa, 0xe0062e12,
- 0x0f42f53e, 0xd2075f86, 0xb025d6bf, 0x6d607c07,
- 0x7460c4cd, 0xa9256e75, 0xcb07e74c, 0x16424df4,
- 0x106227e5, 0xcd278d5d, 0xaf050464, 0x7240aedc,
- 0x6b401616, 0xb605bcae, 0xd4273597, 0x09629f2f,
- 0xe6264403, 0x3b63eebb, 0x59416782, 0x8404cd3a,
- 0x9d0475f0, 0x4041df48, 0x22635671, 0xff26fcc9,
- 0x2e238253, 0xf36628eb, 0x9144a1d2, 0x4c010b6a,
- 0x5501b3a0, 0x88441918, 0xea669021, 0x37233a99,
- 0xd867e1b5, 0x05224b0d, 0x6700c234, 0xba45688c,
- 0xa345d046, 0x7e007afe, 0x1c22f3c7, 0xc167597f,
- 0xc747336e, 0x1a0299d6, 0x782010ef, 0xa565ba57,
- 0xbc65029d, 0x6120a825, 0x0302211c, 0xde478ba4,
- 0x31035088, 0xec46fa30, 0x8e647309, 0x5321d9b1,
- 0x4a21617b, 0x9764cbc3, 0xf54642fa, 0x2803e842
-};
+namespace {
-// Used to fetch a naturally-aligned 32-bit word in little endian byte-order
-static inline uint32_t LE_LOAD32(const uint8_t *p) {
- return DecodeFixed32(reinterpret_cast<const char*>(p));
+const uint32_t kByteExtensionTable[256] = {
+ 0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, 0xc79a971f, 0x35f1141c,
+ 0x26a1e7e8, 0xd4ca64eb, 0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b,
+ 0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24, 0x105ec76f, 0xe235446c,
+ 0xf165b798, 0x030e349b, 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384,
+ 0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54, 0x5d1d08bf, 0xaf768bbc,
+ 0xbc267848, 0x4e4dfb4b, 0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a,
+ 0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35, 0xaa64d611, 0x580f5512,
+ 0x4b5fa6e6, 0xb93425e5, 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa,
+ 0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45, 0xf779deae, 0x05125dad,
+ 0x1642ae59, 0xe4292d5a, 0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a,
+ 0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595, 0x417b1dbc, 0xb3109ebf,
+ 0xa0406d4b, 0x522bee48, 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957,
+ 0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687, 0x0c38d26c, 0xfe53516f,
+ 0xed03a29b, 0x1f682198, 0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927,
+ 0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38, 0xdbfc821c, 0x2997011f,
+ 0x3ac7f2eb, 0xc8ac71e8, 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7,
+ 0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096, 0xa65c047d, 0x5437877e,
+ 0x4767748a, 0xb50cf789, 0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859,
+ 0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46, 0x7198540d, 0x83f3d70e,
+ 0x90a324fa, 0x62c8a7f9, 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6,
+ 0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36, 0x3cdb9bdd, 0xceb018de,
+ 0xdde0eb2a, 0x2f8b6829, 0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c,
+ 0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93, 0x082f63b7, 0xfa44e0b4,
+ 0xe9141340, 0x1b7f9043, 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c,
+ 0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3, 0x55326b08, 0xa759e80b,
+ 0xb4091bff, 0x466298fc, 0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c,
+ 0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033, 0xa24bb5a6, 0x502036a5,
+ 0x4370c551, 0xb11b4652, 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d,
+ 0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d, 0xef087a76, 0x1d63f975,
+ 0x0e330a81, 0xfc588982, 0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d,
+ 0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622, 0x38cc2a06, 0xcaa7a905,
+ 0xd9f75af1, 0x2b9cd9f2, 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed,
+ 0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530, 0x0417b1db, 0xf67c32d8,
+ 0xe52cc12c, 0x1747422f, 0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff,
+ 0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0, 0xd3d3e1ab, 0x21b862a8,
+ 0x32e8915c, 0xc083125f, 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540,
+ 0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90, 0x9e902e7b, 0x6cfbad78,
+ 0x7fab5e8c, 0x8dc0dd8f, 0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee,
+ 0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1, 0x69e9f0d5, 0x9b8273d6,
+ 0x88d28022, 0x7ab90321, 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e,
+ 0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81, 0x34f4f86a, 0xc69f7b69,
+ 0xd5cf889d, 0x27a40b9e, 0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e,
+ 0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351};
+
+const uint32_t kStrideExtensionTable0[256] = {
+ 0x00000000, 0x30d23865, 0x61a470ca, 0x517648af, 0xc348e194, 0xf39ad9f1,
+ 0xa2ec915e, 0x923ea93b, 0x837db5d9, 0xb3af8dbc, 0xe2d9c513, 0xd20bfd76,
+ 0x4035544d, 0x70e76c28, 0x21912487, 0x11431ce2, 0x03171d43, 0x33c52526,
+ 0x62b36d89, 0x526155ec, 0xc05ffcd7, 0xf08dc4b2, 0xa1fb8c1d, 0x9129b478,
+ 0x806aa89a, 0xb0b890ff, 0xe1ced850, 0xd11ce035, 0x4322490e, 0x73f0716b,
+ 0x228639c4, 0x125401a1, 0x062e3a86, 0x36fc02e3, 0x678a4a4c, 0x57587229,
+ 0xc566db12, 0xf5b4e377, 0xa4c2abd8, 0x941093bd, 0x85538f5f, 0xb581b73a,
+ 0xe4f7ff95, 0xd425c7f0, 0x461b6ecb, 0x76c956ae, 0x27bf1e01, 0x176d2664,
+ 0x053927c5, 0x35eb1fa0, 0x649d570f, 0x544f6f6a, 0xc671c651, 0xf6a3fe34,
+ 0xa7d5b69b, 0x97078efe, 0x8644921c, 0xb696aa79, 0xe7e0e2d6, 0xd732dab3,
+ 0x450c7388, 0x75de4bed, 0x24a80342, 0x147a3b27, 0x0c5c750c, 0x3c8e4d69,
+ 0x6df805c6, 0x5d2a3da3, 0xcf149498, 0xffc6acfd, 0xaeb0e452, 0x9e62dc37,
+ 0x8f21c0d5, 0xbff3f8b0, 0xee85b01f, 0xde57887a, 0x4c692141, 0x7cbb1924,
+ 0x2dcd518b, 0x1d1f69ee, 0x0f4b684f, 0x3f99502a, 0x6eef1885, 0x5e3d20e0,
+ 0xcc0389db, 0xfcd1b1be, 0xada7f911, 0x9d75c174, 0x8c36dd96, 0xbce4e5f3,
+ 0xed92ad5c, 0xdd409539, 0x4f7e3c02, 0x7fac0467, 0x2eda4cc8, 0x1e0874ad,
+ 0x0a724f8a, 0x3aa077ef, 0x6bd63f40, 0x5b040725, 0xc93aae1e, 0xf9e8967b,
+ 0xa89eded4, 0x984ce6b1, 0x890ffa53, 0xb9ddc236, 0xe8ab8a99, 0xd879b2fc,
+ 0x4a471bc7, 0x7a9523a2, 0x2be36b0d, 0x1b315368, 0x096552c9, 0x39b76aac,
+ 0x68c12203, 0x58131a66, 0xca2db35d, 0xfaff8b38, 0xab89c397, 0x9b5bfbf2,
+ 0x8a18e710, 0xbacadf75, 0xebbc97da, 0xdb6eafbf, 0x49500684, 0x79823ee1,
+ 0x28f4764e, 0x18264e2b, 0x18b8ea18, 0x286ad27d, 0x791c9ad2, 0x49cea2b7,
+ 0xdbf00b8c, 0xeb2233e9, 0xba547b46, 0x8a864323, 0x9bc55fc1, 0xab1767a4,
+ 0xfa612f0b, 0xcab3176e, 0x588dbe55, 0x685f8630, 0x3929ce9f, 0x09fbf6fa,
+ 0x1baff75b, 0x2b7dcf3e, 0x7a0b8791, 0x4ad9bff4, 0xd8e716cf, 0xe8352eaa,
+ 0xb9436605, 0x89915e60, 0x98d24282, 0xa8007ae7, 0xf9763248, 0xc9a40a2d,
+ 0x5b9aa316, 0x6b489b73, 0x3a3ed3dc, 0x0aecebb9, 0x1e96d09e, 0x2e44e8fb,
+ 0x7f32a054, 0x4fe09831, 0xddde310a, 0xed0c096f, 0xbc7a41c0, 0x8ca879a5,
+ 0x9deb6547, 0xad395d22, 0xfc4f158d, 0xcc9d2de8, 0x5ea384d3, 0x6e71bcb6,
+ 0x3f07f419, 0x0fd5cc7c, 0x1d81cddd, 0x2d53f5b8, 0x7c25bd17, 0x4cf78572,
+ 0xdec92c49, 0xee1b142c, 0xbf6d5c83, 0x8fbf64e6, 0x9efc7804, 0xae2e4061,
+ 0xff5808ce, 0xcf8a30ab, 0x5db49990, 0x6d66a1f5, 0x3c10e95a, 0x0cc2d13f,
+ 0x14e49f14, 0x2436a771, 0x7540efde, 0x4592d7bb, 0xd7ac7e80, 0xe77e46e5,
+ 0xb6080e4a, 0x86da362f, 0x97992acd, 0xa74b12a8, 0xf63d5a07, 0xc6ef6262,
+ 0x54d1cb59, 0x6403f33c, 0x3575bb93, 0x05a783f6, 0x17f38257, 0x2721ba32,
+ 0x7657f29d, 0x4685caf8, 0xd4bb63c3, 0xe4695ba6, 0xb51f1309, 0x85cd2b6c,
+ 0x948e378e, 0xa45c0feb, 0xf52a4744, 0xc5f87f21, 0x57c6d61a, 0x6714ee7f,
+ 0x3662a6d0, 0x06b09eb5, 0x12caa592, 0x22189df7, 0x736ed558, 0x43bced3d,
+ 0xd1824406, 0xe1507c63, 0xb02634cc, 0x80f40ca9, 0x91b7104b, 0xa165282e,
+ 0xf0136081, 0xc0c158e4, 0x52fff1df, 0x622dc9ba, 0x335b8115, 0x0389b970,
+ 0x11ddb8d1, 0x210f80b4, 0x7079c81b, 0x40abf07e, 0xd2955945, 0xe2476120,
+ 0xb331298f, 0x83e311ea, 0x92a00d08, 0xa272356d, 0xf3047dc2, 0xc3d645a7,
+ 0x51e8ec9c, 0x613ad4f9, 0x304c9c56, 0x009ea433};
+
+const uint32_t kStrideExtensionTable1[256] = {
+ 0x00000000, 0x54075546, 0xa80eaa8c, 0xfc09ffca, 0x55f123e9, 0x01f676af,
+ 0xfdff8965, 0xa9f8dc23, 0xabe247d2, 0xffe51294, 0x03eced5e, 0x57ebb818,
+ 0xfe13643b, 0xaa14317d, 0x561dceb7, 0x021a9bf1, 0x5228f955, 0x062fac13,
+ 0xfa2653d9, 0xae21069f, 0x07d9dabc, 0x53de8ffa, 0xafd77030, 0xfbd02576,
+ 0xf9cabe87, 0xadcdebc1, 0x51c4140b, 0x05c3414d, 0xac3b9d6e, 0xf83cc828,
+ 0x043537e2, 0x503262a4, 0xa451f2aa, 0xf056a7ec, 0x0c5f5826, 0x58580d60,
+ 0xf1a0d143, 0xa5a78405, 0x59ae7bcf, 0x0da92e89, 0x0fb3b578, 0x5bb4e03e,
+ 0xa7bd1ff4, 0xf3ba4ab2, 0x5a429691, 0x0e45c3d7, 0xf24c3c1d, 0xa64b695b,
+ 0xf6790bff, 0xa27e5eb9, 0x5e77a173, 0x0a70f435, 0xa3882816, 0xf78f7d50,
+ 0x0b86829a, 0x5f81d7dc, 0x5d9b4c2d, 0x099c196b, 0xf595e6a1, 0xa192b3e7,
+ 0x086a6fc4, 0x5c6d3a82, 0xa064c548, 0xf463900e, 0x4d4f93a5, 0x1948c6e3,
+ 0xe5413929, 0xb1466c6f, 0x18beb04c, 0x4cb9e50a, 0xb0b01ac0, 0xe4b74f86,
+ 0xe6add477, 0xb2aa8131, 0x4ea37efb, 0x1aa42bbd, 0xb35cf79e, 0xe75ba2d8,
+ 0x1b525d12, 0x4f550854, 0x1f676af0, 0x4b603fb6, 0xb769c07c, 0xe36e953a,
+ 0x4a964919, 0x1e911c5f, 0xe298e395, 0xb69fb6d3, 0xb4852d22, 0xe0827864,
+ 0x1c8b87ae, 0x488cd2e8, 0xe1740ecb, 0xb5735b8d, 0x497aa447, 0x1d7df101,
+ 0xe91e610f, 0xbd193449, 0x4110cb83, 0x15179ec5, 0xbcef42e6, 0xe8e817a0,
+ 0x14e1e86a, 0x40e6bd2c, 0x42fc26dd, 0x16fb739b, 0xeaf28c51, 0xbef5d917,
+ 0x170d0534, 0x430a5072, 0xbf03afb8, 0xeb04fafe, 0xbb36985a, 0xef31cd1c,
+ 0x133832d6, 0x473f6790, 0xeec7bbb3, 0xbac0eef5, 0x46c9113f, 0x12ce4479,
+ 0x10d4df88, 0x44d38ace, 0xb8da7504, 0xecdd2042, 0x4525fc61, 0x1122a927,
+ 0xed2b56ed, 0xb92c03ab, 0x9a9f274a, 0xce98720c, 0x32918dc6, 0x6696d880,
+ 0xcf6e04a3, 0x9b6951e5, 0x6760ae2f, 0x3367fb69, 0x317d6098, 0x657a35de,
+ 0x9973ca14, 0xcd749f52, 0x648c4371, 0x308b1637, 0xcc82e9fd, 0x9885bcbb,
+ 0xc8b7de1f, 0x9cb08b59, 0x60b97493, 0x34be21d5, 0x9d46fdf6, 0xc941a8b0,
+ 0x3548577a, 0x614f023c, 0x635599cd, 0x3752cc8b, 0xcb5b3341, 0x9f5c6607,
+ 0x36a4ba24, 0x62a3ef62, 0x9eaa10a8, 0xcaad45ee, 0x3eced5e0, 0x6ac980a6,
+ 0x96c07f6c, 0xc2c72a2a, 0x6b3ff609, 0x3f38a34f, 0xc3315c85, 0x973609c3,
+ 0x952c9232, 0xc12bc774, 0x3d2238be, 0x69256df8, 0xc0ddb1db, 0x94dae49d,
+ 0x68d31b57, 0x3cd44e11, 0x6ce62cb5, 0x38e179f3, 0xc4e88639, 0x90efd37f,
+ 0x39170f5c, 0x6d105a1a, 0x9119a5d0, 0xc51ef096, 0xc7046b67, 0x93033e21,
+ 0x6f0ac1eb, 0x3b0d94ad, 0x92f5488e, 0xc6f21dc8, 0x3afbe202, 0x6efcb744,
+ 0xd7d0b4ef, 0x83d7e1a9, 0x7fde1e63, 0x2bd94b25, 0x82219706, 0xd626c240,
+ 0x2a2f3d8a, 0x7e2868cc, 0x7c32f33d, 0x2835a67b, 0xd43c59b1, 0x803b0cf7,
+ 0x29c3d0d4, 0x7dc48592, 0x81cd7a58, 0xd5ca2f1e, 0x85f84dba, 0xd1ff18fc,
+ 0x2df6e736, 0x79f1b270, 0xd0096e53, 0x840e3b15, 0x7807c4df, 0x2c009199,
+ 0x2e1a0a68, 0x7a1d5f2e, 0x8614a0e4, 0xd213f5a2, 0x7beb2981, 0x2fec7cc7,
+ 0xd3e5830d, 0x87e2d64b, 0x73814645, 0x27861303, 0xdb8fecc9, 0x8f88b98f,
+ 0x267065ac, 0x727730ea, 0x8e7ecf20, 0xda799a66, 0xd8630197, 0x8c6454d1,
+ 0x706dab1b, 0x246afe5d, 0x8d92227e, 0xd9957738, 0x259c88f2, 0x719bddb4,
+ 0x21a9bf10, 0x75aeea56, 0x89a7159c, 0xdda040da, 0x74589cf9, 0x205fc9bf,
+ 0xdc563675, 0x88516333, 0x8a4bf8c2, 0xde4cad84, 0x2245524e, 0x76420708,
+ 0xdfbadb2b, 0x8bbd8e6d, 0x77b471a7, 0x23b324e1};
+
+const uint32_t kStrideExtensionTable2[256] = {
+ 0x00000000, 0x678efd01, 0xcf1dfa02, 0xa8930703, 0x9bd782f5, 0xfc597ff4,
+ 0x54ca78f7, 0x334485f6, 0x3243731b, 0x55cd8e1a, 0xfd5e8919, 0x9ad07418,
+ 0xa994f1ee, 0xce1a0cef, 0x66890bec, 0x0107f6ed, 0x6486e636, 0x03081b37,
+ 0xab9b1c34, 0xcc15e135, 0xff5164c3, 0x98df99c2, 0x304c9ec1, 0x57c263c0,
+ 0x56c5952d, 0x314b682c, 0x99d86f2f, 0xfe56922e, 0xcd1217d8, 0xaa9cead9,
+ 0x020fedda, 0x658110db, 0xc90dcc6c, 0xae83316d, 0x0610366e, 0x619ecb6f,
+ 0x52da4e99, 0x3554b398, 0x9dc7b49b, 0xfa49499a, 0xfb4ebf77, 0x9cc04276,
+ 0x34534575, 0x53ddb874, 0x60993d82, 0x0717c083, 0xaf84c780, 0xc80a3a81,
+ 0xad8b2a5a, 0xca05d75b, 0x6296d058, 0x05182d59, 0x365ca8af, 0x51d255ae,
+ 0xf94152ad, 0x9ecfafac, 0x9fc85941, 0xf846a440, 0x50d5a343, 0x375b5e42,
+ 0x041fdbb4, 0x639126b5, 0xcb0221b6, 0xac8cdcb7, 0x97f7ee29, 0xf0791328,
+ 0x58ea142b, 0x3f64e92a, 0x0c206cdc, 0x6bae91dd, 0xc33d96de, 0xa4b36bdf,
+ 0xa5b49d32, 0xc23a6033, 0x6aa96730, 0x0d279a31, 0x3e631fc7, 0x59ede2c6,
+ 0xf17ee5c5, 0x96f018c4, 0xf371081f, 0x94fff51e, 0x3c6cf21d, 0x5be20f1c,
+ 0x68a68aea, 0x0f2877eb, 0xa7bb70e8, 0xc0358de9, 0xc1327b04, 0xa6bc8605,
+ 0x0e2f8106, 0x69a17c07, 0x5ae5f9f1, 0x3d6b04f0, 0x95f803f3, 0xf276fef2,
+ 0x5efa2245, 0x3974df44, 0x91e7d847, 0xf6692546, 0xc52da0b0, 0xa2a35db1,
+ 0x0a305ab2, 0x6dbea7b3, 0x6cb9515e, 0x0b37ac5f, 0xa3a4ab5c, 0xc42a565d,
+ 0xf76ed3ab, 0x90e02eaa, 0x387329a9, 0x5ffdd4a8, 0x3a7cc473, 0x5df23972,
+ 0xf5613e71, 0x92efc370, 0xa1ab4686, 0xc625bb87, 0x6eb6bc84, 0x09384185,
+ 0x083fb768, 0x6fb14a69, 0xc7224d6a, 0xa0acb06b, 0x93e8359d, 0xf466c89c,
+ 0x5cf5cf9f, 0x3b7b329e, 0x2a03aaa3, 0x4d8d57a2, 0xe51e50a1, 0x8290ada0,
+ 0xb1d42856, 0xd65ad557, 0x7ec9d254, 0x19472f55, 0x1840d9b8, 0x7fce24b9,
+ 0xd75d23ba, 0xb0d3debb, 0x83975b4d, 0xe419a64c, 0x4c8aa14f, 0x2b045c4e,
+ 0x4e854c95, 0x290bb194, 0x8198b697, 0xe6164b96, 0xd552ce60, 0xb2dc3361,
+ 0x1a4f3462, 0x7dc1c963, 0x7cc63f8e, 0x1b48c28f, 0xb3dbc58c, 0xd455388d,
+ 0xe711bd7b, 0x809f407a, 0x280c4779, 0x4f82ba78, 0xe30e66cf, 0x84809bce,
+ 0x2c139ccd, 0x4b9d61cc, 0x78d9e43a, 0x1f57193b, 0xb7c41e38, 0xd04ae339,
+ 0xd14d15d4, 0xb6c3e8d5, 0x1e50efd6, 0x79de12d7, 0x4a9a9721, 0x2d146a20,
+ 0x85876d23, 0xe2099022, 0x878880f9, 0xe0067df8, 0x48957afb, 0x2f1b87fa,
+ 0x1c5f020c, 0x7bd1ff0d, 0xd342f80e, 0xb4cc050f, 0xb5cbf3e2, 0xd2450ee3,
+ 0x7ad609e0, 0x1d58f4e1, 0x2e1c7117, 0x49928c16, 0xe1018b15, 0x868f7614,
+ 0xbdf4448a, 0xda7ab98b, 0x72e9be88, 0x15674389, 0x2623c67f, 0x41ad3b7e,
+ 0xe93e3c7d, 0x8eb0c17c, 0x8fb73791, 0xe839ca90, 0x40aacd93, 0x27243092,
+ 0x1460b564, 0x73ee4865, 0xdb7d4f66, 0xbcf3b267, 0xd972a2bc, 0xbefc5fbd,
+ 0x166f58be, 0x71e1a5bf, 0x42a52049, 0x252bdd48, 0x8db8da4b, 0xea36274a,
+ 0xeb31d1a7, 0x8cbf2ca6, 0x242c2ba5, 0x43a2d6a4, 0x70e65352, 0x1768ae53,
+ 0xbffba950, 0xd8755451, 0x74f988e6, 0x137775e7, 0xbbe472e4, 0xdc6a8fe5,
+ 0xef2e0a13, 0x88a0f712, 0x2033f011, 0x47bd0d10, 0x46bafbfd, 0x213406fc,
+ 0x89a701ff, 0xee29fcfe, 0xdd6d7908, 0xbae38409, 0x1270830a, 0x75fe7e0b,
+ 0x107f6ed0, 0x77f193d1, 0xdf6294d2, 0xb8ec69d3, 0x8ba8ec25, 0xec261124,
+ 0x44b51627, 0x233beb26, 0x223c1dcb, 0x45b2e0ca, 0xed21e7c9, 0x8aaf1ac8,
+ 0xb9eb9f3e, 0xde65623f, 0x76f6653c, 0x1178983d};
+
+const uint32_t kStrideExtensionTable3[256] = {
+ 0x00000000, 0xf20c0dfe, 0xe1f46d0d, 0x13f860f3, 0xc604aceb, 0x3408a115,
+ 0x27f0c1e6, 0xd5fccc18, 0x89e52f27, 0x7be922d9, 0x6811422a, 0x9a1d4fd4,
+ 0x4fe183cc, 0xbded8e32, 0xae15eec1, 0x5c19e33f, 0x162628bf, 0xe42a2541,
+ 0xf7d245b2, 0x05de484c, 0xd0228454, 0x222e89aa, 0x31d6e959, 0xc3dae4a7,
+ 0x9fc30798, 0x6dcf0a66, 0x7e376a95, 0x8c3b676b, 0x59c7ab73, 0xabcba68d,
+ 0xb833c67e, 0x4a3fcb80, 0x2c4c517e, 0xde405c80, 0xcdb83c73, 0x3fb4318d,
+ 0xea48fd95, 0x1844f06b, 0x0bbc9098, 0xf9b09d66, 0xa5a97e59, 0x57a573a7,
+ 0x445d1354, 0xb6511eaa, 0x63add2b2, 0x91a1df4c, 0x8259bfbf, 0x7055b241,
+ 0x3a6a79c1, 0xc866743f, 0xdb9e14cc, 0x29921932, 0xfc6ed52a, 0x0e62d8d4,
+ 0x1d9ab827, 0xef96b5d9, 0xb38f56e6, 0x41835b18, 0x527b3beb, 0xa0773615,
+ 0x758bfa0d, 0x8787f7f3, 0x947f9700, 0x66739afe, 0x5898a2fc, 0xaa94af02,
+ 0xb96ccff1, 0x4b60c20f, 0x9e9c0e17, 0x6c9003e9, 0x7f68631a, 0x8d646ee4,
+ 0xd17d8ddb, 0x23718025, 0x3089e0d6, 0xc285ed28, 0x17792130, 0xe5752cce,
+ 0xf68d4c3d, 0x048141c3, 0x4ebe8a43, 0xbcb287bd, 0xaf4ae74e, 0x5d46eab0,
+ 0x88ba26a8, 0x7ab62b56, 0x694e4ba5, 0x9b42465b, 0xc75ba564, 0x3557a89a,
+ 0x26afc869, 0xd4a3c597, 0x015f098f, 0xf3530471, 0xe0ab6482, 0x12a7697c,
+ 0x74d4f382, 0x86d8fe7c, 0x95209e8f, 0x672c9371, 0xb2d05f69, 0x40dc5297,
+ 0x53243264, 0xa1283f9a, 0xfd31dca5, 0x0f3dd15b, 0x1cc5b1a8, 0xeec9bc56,
+ 0x3b35704e, 0xc9397db0, 0xdac11d43, 0x28cd10bd, 0x62f2db3d, 0x90fed6c3,
+ 0x8306b630, 0x710abbce, 0xa4f677d6, 0x56fa7a28, 0x45021adb, 0xb70e1725,
+ 0xeb17f41a, 0x191bf9e4, 0x0ae39917, 0xf8ef94e9, 0x2d1358f1, 0xdf1f550f,
+ 0xcce735fc, 0x3eeb3802, 0xb13145f8, 0x433d4806, 0x50c528f5, 0xa2c9250b,
+ 0x7735e913, 0x8539e4ed, 0x96c1841e, 0x64cd89e0, 0x38d46adf, 0xcad86721,
+ 0xd92007d2, 0x2b2c0a2c, 0xfed0c634, 0x0cdccbca, 0x1f24ab39, 0xed28a6c7,
+ 0xa7176d47, 0x551b60b9, 0x46e3004a, 0xb4ef0db4, 0x6113c1ac, 0x931fcc52,
+ 0x80e7aca1, 0x72eba15f, 0x2ef24260, 0xdcfe4f9e, 0xcf062f6d, 0x3d0a2293,
+ 0xe8f6ee8b, 0x1afae375, 0x09028386, 0xfb0e8e78, 0x9d7d1486, 0x6f711978,
+ 0x7c89798b, 0x8e857475, 0x5b79b86d, 0xa975b593, 0xba8dd560, 0x4881d89e,
+ 0x14983ba1, 0xe694365f, 0xf56c56ac, 0x07605b52, 0xd29c974a, 0x20909ab4,
+ 0x3368fa47, 0xc164f7b9, 0x8b5b3c39, 0x795731c7, 0x6aaf5134, 0x98a35cca,
+ 0x4d5f90d2, 0xbf539d2c, 0xacabfddf, 0x5ea7f021, 0x02be131e, 0xf0b21ee0,
+ 0xe34a7e13, 0x114673ed, 0xc4babff5, 0x36b6b20b, 0x254ed2f8, 0xd742df06,
+ 0xe9a9e704, 0x1ba5eafa, 0x085d8a09, 0xfa5187f7, 0x2fad4bef, 0xdda14611,
+ 0xce5926e2, 0x3c552b1c, 0x604cc823, 0x9240c5dd, 0x81b8a52e, 0x73b4a8d0,
+ 0xa64864c8, 0x54446936, 0x47bc09c5, 0xb5b0043b, 0xff8fcfbb, 0x0d83c245,
+ 0x1e7ba2b6, 0xec77af48, 0x398b6350, 0xcb876eae, 0xd87f0e5d, 0x2a7303a3,
+ 0x766ae09c, 0x8466ed62, 0x979e8d91, 0x6592806f, 0xb06e4c77, 0x42624189,
+ 0x519a217a, 0xa3962c84, 0xc5e5b67a, 0x37e9bb84, 0x2411db77, 0xd61dd689,
+ 0x03e11a91, 0xf1ed176f, 0xe215779c, 0x10197a62, 0x4c00995d, 0xbe0c94a3,
+ 0xadf4f450, 0x5ff8f9ae, 0x8a0435b6, 0x78083848, 0x6bf058bb, 0x99fc5545,
+ 0xd3c39ec5, 0x21cf933b, 0x3237f3c8, 0xc03bfe36, 0x15c7322e, 0xe7cb3fd0,
+ 0xf4335f23, 0x063f52dd, 0x5a26b1e2, 0xa82abc1c, 0xbbd2dcef, 0x49ded111,
+ 0x9c221d09, 0x6e2e10f7, 0x7dd67004, 0x8fda7dfa};
+
+// CRCs are pre- and post- conditioned by xoring with all ones.
+static constexpr const uint32_t kCRC32Xor = static_cast<uint32_t>(0xffffffffU);
+
+// Reads a little-endian 32-bit integer from a 32-bit-aligned buffer.
+inline uint32_t ReadUint32LE(const uint8_t* buffer) {
+ return DecodeFixed32(reinterpret_cast<const char*>(buffer));
}
+// Returns the smallest address >= the given address that is aligned to N bytes.
+//
+// N must be a power of two.
+template <int N>
+constexpr inline const uint8_t* RoundUp(const uint8_t* pointer) {
+ return reinterpret_cast<uint8_t*>(
+ (reinterpret_cast<uintptr_t>(pointer) + (N - 1)) &
+ ~static_cast<uintptr_t>(N - 1));
+}
+
+} // namespace
+
// Determine if the CPU running this program can accelerate the CRC32C
// calculation.
static bool CanAccelerateCRC32C() {
- if (!port::HasAcceleratedCRC32C())
- return false;
-
- // Double-check that the accelerated implementation functions correctly.
// port::AcceleretedCRC32C returns zero when unable to accelerate.
static const char kTestCRCBuffer[] = "TestCRCBuffer";
static const char kBufSize = sizeof(kTestCRCBuffer) - 1;
@@ -300,54 +273,107 @@ static bool CanAccelerateCRC32C() {
return port::AcceleratedCRC32C(0, kTestCRCBuffer, kBufSize) == kTestCRCValue;
}
-uint32_t Extend(uint32_t crc, const char* buf, size_t size) {
+uint32_t Extend(uint32_t crc, const char* data, size_t n) {
static bool accelerate = CanAccelerateCRC32C();
if (accelerate) {
- return port::AcceleratedCRC32C(crc, buf, size);
+ return port::AcceleratedCRC32C(crc, data, n);
}
- const uint8_t *p = reinterpret_cast<const uint8_t *>(buf);
- const uint8_t *e = p + size;
- uint32_t l = crc ^ 0xffffffffu;
+ const uint8_t* p = reinterpret_cast<const uint8_t*>(data);
+ const uint8_t* e = p + n;
+ uint32_t l = crc ^ kCRC32Xor;
+
+// Process one byte at a time.
+#define STEP1 \
+ do { \
+ int c = (l & 0xff) ^ *p++; \
+ l = kByteExtensionTable[c] ^ (l >> 8); \
+ } while (0)
-#define STEP1 do { \
- int c = (l & 0xff) ^ *p++; \
- l = table0_[c] ^ (l >> 8); \
-} while (0)
-#define STEP4 do { \
- uint32_t c = l ^ LE_LOAD32(p); \
- p += 4; \
- l = table3_[c & 0xff] ^ \
- table2_[(c >> 8) & 0xff] ^ \
- table1_[(c >> 16) & 0xff] ^ \
- table0_[c >> 24]; \
-} while (0)
+// Process one of the 4 strides of 4-byte data.
+#define STEP4(s) \
+ do { \
+ crc##s = ReadUint32LE(p + s * 4) ^ kStrideExtensionTable3[crc##s & 0xff] ^ \
+ kStrideExtensionTable2[(crc##s >> 8) & 0xff] ^ \
+ kStrideExtensionTable1[(crc##s >> 16) & 0xff] ^ \
+ kStrideExtensionTable0[crc##s >> 24]; \
+ } while (0)
- // Point x at first 4-byte aligned byte in string. This might be
- // just past the end of the string.
- const uintptr_t pval = reinterpret_cast<uintptr_t>(p);
- const uint8_t* x = reinterpret_cast<const uint8_t*>(((pval + 3) >> 2) << 2);
+// Process a 16-byte swath of 4 strides, each of which has 4 bytes of data.
+#define STEP16 \
+ do { \
+ STEP4(0); \
+ STEP4(1); \
+ STEP4(2); \
+ STEP4(3); \
+ p += 16; \
+ } while (0)
+
+// Process 4 bytes that were already loaded into a word.
+#define STEP4W(w) \
+ do { \
+ w ^= l; \
+ for (size_t i = 0; i < 4; ++i) { \
+ w = (w >> 8) ^ kByteExtensionTable[w & 0xff]; \
+ } \
+ l = w; \
+ } while (0)
+
+ // Point x at first 4-byte aligned byte in the buffer. This might be past the
+ // end of the buffer.
+ const uint8_t* x = RoundUp<4>(p);
if (x <= e) {
- // Process bytes until finished or p is 4-byte aligned
+ // Process bytes p is 4-byte aligned.
while (p != x) {
STEP1;
}
}
- // Process bytes 16 at a time
- while ((e-p) >= 16) {
- STEP4; STEP4; STEP4; STEP4;
- }
- // Process bytes 4 at a time
- while ((e-p) >= 4) {
- STEP4;
+
+ if ((e - p) >= 16) {
+ // Load a 16-byte swath into the stride partial results.
+ uint32_t crc0 = ReadUint32LE(p + 0 * 4) ^ l;
+ uint32_t crc1 = ReadUint32LE(p + 1 * 4);
+ uint32_t crc2 = ReadUint32LE(p + 2 * 4);
+ uint32_t crc3 = ReadUint32LE(p + 3 * 4);
+ p += 16;
+
+ // It is possible to get better speeds (at least on x86) by interleaving
+ // prefetching 256 bytes ahead with processing 64 bytes at a time. See the
+ // portable implementation in https://github.com/google/crc32c/.
+
+ // Process one 16-byte swath at a time.
+ while ((e - p) >= 16) {
+ STEP16;
+ }
+
+ // Advance one word at a time as far as possible.
+ while ((e - p) >= 4) {
+ STEP4(0);
+ uint32_t tmp = crc0;
+ crc0 = crc1;
+ crc1 = crc2;
+ crc2 = crc3;
+ crc3 = tmp;
+ p += 4;
+ }
+
+ // Combine the 4 partial stride results.
+ l = 0;
+ STEP4W(crc0);
+ STEP4W(crc1);
+ STEP4W(crc2);
+ STEP4W(crc3);
}
- // Process the last few bytes
+
+ // Process the last few bytes.
while (p != e) {
STEP1;
}
+#undef STEP4W
+#undef STEP16
#undef STEP4
#undef STEP1
- return l ^ 0xffffffffu;
+ return l ^ kCRC32Xor;
}
} // namespace crc32c
diff --git a/src/leveldb/util/crc32c.h b/src/leveldb/util/crc32c.h
index 1d7e5c075d..98fabb0d2f 100644
--- a/src/leveldb/util/crc32c.h
+++ b/src/leveldb/util/crc32c.h
@@ -14,12 +14,10 @@ namespace crc32c {
// Return the crc32c of concat(A, data[0,n-1]) where init_crc is the
// crc32c of some string A. Extend() is often used to maintain the
// crc32c of a stream of data.
-extern uint32_t Extend(uint32_t init_crc, const char* data, size_t n);
+uint32_t Extend(uint32_t init_crc, const char* data, size_t n);
// Return the crc32c of data[0,n-1]
-inline uint32_t Value(const char* data, size_t n) {
- return Extend(0, data, n);
-}
+inline uint32_t Value(const char* data, size_t n) { return Extend(0, data, n); }
static const uint32_t kMaskDelta = 0xa282ead8ul;
diff --git a/src/leveldb/util/crc32c_test.cc b/src/leveldb/util/crc32c_test.cc
index 4b957ee120..18a8494824 100644
--- a/src/leveldb/util/crc32c_test.cc
+++ b/src/leveldb/util/crc32c_test.cc
@@ -8,7 +8,7 @@
namespace leveldb {
namespace crc32c {
-class CRC { };
+class CRC {};
TEST(CRC, StandardResults) {
// From rfc3720 section B.4.
@@ -30,30 +30,19 @@ TEST(CRC, StandardResults) {
}
ASSERT_EQ(0x113fdb5c, Value(buf, sizeof(buf)));
- unsigned char data[48] = {
- 0x01, 0xc0, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x14, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x04, 0x00,
- 0x00, 0x00, 0x00, 0x14,
- 0x00, 0x00, 0x00, 0x18,
- 0x28, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x02, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
+ uint8_t data[48] = {
+ 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
ASSERT_EQ(0xd9963a56, Value(reinterpret_cast<char*>(data), sizeof(data)));
}
-TEST(CRC, Values) {
- ASSERT_NE(Value("a", 1), Value("foo", 3));
-}
+TEST(CRC, Values) { ASSERT_NE(Value("a", 1), Value("foo", 3)); }
TEST(CRC, Extend) {
- ASSERT_EQ(Value("hello world", 11),
- Extend(Value("hello ", 6), "world", 5));
+ ASSERT_EQ(Value("hello world", 11), Extend(Value("hello ", 6), "world", 5));
}
TEST(CRC, Mask) {
@@ -67,6 +56,4 @@ TEST(CRC, Mask) {
} // namespace crc32c
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/util/env.cc b/src/leveldb/util/env.cc
index c58a0821ef..d2f0aef326 100644
--- a/src/leveldb/util/env.cc
+++ b/src/leveldb/util/env.cc
@@ -6,30 +6,24 @@
namespace leveldb {
-Env::~Env() {
-}
+Env::~Env() = default;
Status Env::NewAppendableFile(const std::string& fname, WritableFile** result) {
return Status::NotSupported("NewAppendableFile", fname);
}
-SequentialFile::~SequentialFile() {
-}
+SequentialFile::~SequentialFile() = default;
-RandomAccessFile::~RandomAccessFile() {
-}
+RandomAccessFile::~RandomAccessFile() = default;
-WritableFile::~WritableFile() {
-}
+WritableFile::~WritableFile() = default;
-Logger::~Logger() {
-}
+Logger::~Logger() = default;
-FileLock::~FileLock() {
-}
+FileLock::~FileLock() = default;
void Log(Logger* info_log, const char* format, ...) {
- if (info_log != NULL) {
+ if (info_log != nullptr) {
va_list ap;
va_start(ap, format);
info_log->Logv(format, ap);
@@ -38,8 +32,7 @@ void Log(Logger* info_log, const char* format, ...) {
}
static Status DoWriteStringToFile(Env* env, const Slice& data,
- const std::string& fname,
- bool should_sync) {
+ const std::string& fname, bool should_sync) {
WritableFile* file;
Status s = env->NewWritableFile(fname, &file);
if (!s.ok()) {
@@ -94,7 +87,6 @@ Status ReadFileToString(Env* env, const std::string& fname, std::string* data) {
return s;
}
-EnvWrapper::~EnvWrapper() {
-}
+EnvWrapper::~EnvWrapper() {}
} // namespace leveldb
diff --git a/src/leveldb/util/env_posix.cc b/src/leveldb/util/env_posix.cc
index f77918313e..9f5863a0f3 100644
--- a/src/leveldb/util/env_posix.cc
+++ b/src/leveldb/util/env_posix.cc
@@ -1,706 +1,906 @@
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#if !defined(LEVELDB_PLATFORM_WINDOWS)
#include <dirent.h>
-#include <errno.h>
#include <fcntl.h>
#include <pthread.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
-#include <time.h>
#include <unistd.h>
-#include <deque>
+
+#include <atomic>
+#include <cerrno>
+#include <cstddef>
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
#include <limits>
+#include <queue>
#include <set>
+#include <string>
+#include <thread>
+#include <type_traits>
+#include <utility>
+
#include "leveldb/env.h"
#include "leveldb/slice.h"
+#include "leveldb/status.h"
#include "port/port.h"
-#include "util/logging.h"
-#include "util/mutexlock.h"
-#include "util/posix_logger.h"
+#include "port/thread_annotations.h"
#include "util/env_posix_test_helper.h"
+#include "util/posix_logger.h"
namespace leveldb {
namespace {
-static int open_read_only_file_limit = -1;
-static int mmap_limit = -1;
+// Set by EnvPosixTestHelper::SetReadOnlyMMapLimit() and MaxOpenFiles().
+int g_open_read_only_file_limit = -1;
+
+// Up to 4096 mmap regions for 64-bit binaries; none for 32-bit.
+constexpr const int kDefaultMmapLimit = (sizeof(void*) >= 8) ? 4096 : 0;
+
+// Can be set using EnvPosixTestHelper::SetReadOnlyMMapLimit().
+int g_mmap_limit = kDefaultMmapLimit;
-static Status IOError(const std::string& context, int err_number) {
- return Status::IOError(context, strerror(err_number));
+// Common flags defined for all posix open operations
+#if defined(HAVE_O_CLOEXEC)
+constexpr const int kOpenBaseFlags = O_CLOEXEC;
+#else
+constexpr const int kOpenBaseFlags = 0;
+#endif // defined(HAVE_O_CLOEXEC)
+
+constexpr const size_t kWritableFileBufferSize = 65536;
+
+Status PosixError(const std::string& context, int error_number) {
+ if (error_number == ENOENT) {
+ return Status::NotFound(context, std::strerror(error_number));
+ } else {
+ return Status::IOError(context, std::strerror(error_number));
+ }
}
// Helper class to limit resource usage to avoid exhaustion.
// Currently used to limit read-only file descriptors and mmap file usage
-// so that we do not end up running out of file descriptors, virtual memory,
-// or running into kernel performance problems for very large databases.
+// so that we do not run out of file descriptors or virtual memory, or run into
+// kernel performance problems for very large databases.
class Limiter {
public:
- // Limit maximum number of resources to |n|.
- Limiter(intptr_t n) {
- SetAllowed(n);
- }
+ // Limit maximum number of resources to |max_acquires|.
+ Limiter(int max_acquires) : acquires_allowed_(max_acquires) {}
+
+ Limiter(const Limiter&) = delete;
+ Limiter operator=(const Limiter&) = delete;
// If another resource is available, acquire it and return true.
// Else return false.
bool Acquire() {
- if (GetAllowed() <= 0) {
- return false;
- }
- MutexLock l(&mu_);
- intptr_t x = GetAllowed();
- if (x <= 0) {
- return false;
- } else {
- SetAllowed(x - 1);
- return true;
- }
+ int old_acquires_allowed =
+ acquires_allowed_.fetch_sub(1, std::memory_order_relaxed);
+
+ if (old_acquires_allowed > 0) return true;
+
+ acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
+ return false;
}
// Release a resource acquired by a previous call to Acquire() that returned
// true.
- void Release() {
- MutexLock l(&mu_);
- SetAllowed(GetAllowed() + 1);
- }
+ void Release() { acquires_allowed_.fetch_add(1, std::memory_order_relaxed); }
private:
- port::Mutex mu_;
- port::AtomicPointer allowed_;
-
- intptr_t GetAllowed() const {
- return reinterpret_cast<intptr_t>(allowed_.Acquire_Load());
- }
-
- // REQUIRES: mu_ must be held
- void SetAllowed(intptr_t v) {
- allowed_.Release_Store(reinterpret_cast<void*>(v));
- }
-
- Limiter(const Limiter&);
- void operator=(const Limiter&);
+ // The number of available resources.
+ //
+ // This is a counter and is not tied to the invariants of any other class, so
+ // it can be operated on safely using std::memory_order_relaxed.
+ std::atomic<int> acquires_allowed_;
};
-class PosixSequentialFile: public SequentialFile {
- private:
- std::string filename_;
- FILE* file_;
-
+// Implements sequential read access in a file using read().
+//
+// Instances of this class are thread-friendly but not thread-safe, as required
+// by the SequentialFile API.
+class PosixSequentialFile final : public SequentialFile {
public:
- PosixSequentialFile(const std::string& fname, FILE* f)
- : filename_(fname), file_(f) { }
- virtual ~PosixSequentialFile() { fclose(file_); }
-
- virtual Status Read(size_t n, Slice* result, char* scratch) {
- Status s;
- size_t r = fread_unlocked(scratch, 1, n, file_);
- *result = Slice(scratch, r);
- if (r < n) {
- if (feof(file_)) {
- // We leave status as ok if we hit the end of the file
- } else {
- // A partial read with an error: return a non-ok status
- s = IOError(filename_, errno);
+ PosixSequentialFile(std::string filename, int fd)
+ : fd_(fd), filename_(filename) {}
+ ~PosixSequentialFile() override { close(fd_); }
+
+ Status Read(size_t n, Slice* result, char* scratch) override {
+ Status status;
+ while (true) {
+ ::ssize_t read_size = ::read(fd_, scratch, n);
+ if (read_size < 0) { // Read error.
+ if (errno == EINTR) {
+ continue; // Retry
+ }
+ status = PosixError(filename_, errno);
+ break;
}
+ *result = Slice(scratch, read_size);
+ break;
}
- return s;
+ return status;
}
- virtual Status Skip(uint64_t n) {
- if (fseek(file_, n, SEEK_CUR)) {
- return IOError(filename_, errno);
+ Status Skip(uint64_t n) override {
+ if (::lseek(fd_, n, SEEK_CUR) == static_cast<off_t>(-1)) {
+ return PosixError(filename_, errno);
}
return Status::OK();
}
- virtual std::string GetName() const { return filename_; }
-};
+ virtual std::string GetName() const override { return filename_; }
-// pread() based random-access
-class PosixRandomAccessFile: public RandomAccessFile {
private:
- std::string filename_;
- bool temporary_fd_; // If true, fd_ is -1 and we open on every read.
- int fd_;
- Limiter* limiter_;
+ const int fd_;
+ const std::string filename_;
+};
+// Implements random read access in a file using pread().
+//
+// Instances of this class are thread-safe, as required by the RandomAccessFile
+// API. Instances are immutable and Read() only calls thread-safe library
+// functions.
+class PosixRandomAccessFile final : public RandomAccessFile {
public:
- PosixRandomAccessFile(const std::string& fname, int fd, Limiter* limiter)
- : filename_(fname), fd_(fd), limiter_(limiter) {
- temporary_fd_ = !limiter->Acquire();
- if (temporary_fd_) {
- // Open file on every access.
- close(fd_);
- fd_ = -1;
+ // The new instance takes ownership of |fd|. |fd_limiter| must outlive this
+ // instance, and will be used to determine if .
+ PosixRandomAccessFile(std::string filename, int fd, Limiter* fd_limiter)
+ : has_permanent_fd_(fd_limiter->Acquire()),
+ fd_(has_permanent_fd_ ? fd : -1),
+ fd_limiter_(fd_limiter),
+ filename_(std::move(filename)) {
+ if (!has_permanent_fd_) {
+ assert(fd_ == -1);
+ ::close(fd); // The file will be opened on every read.
}
}
- virtual ~PosixRandomAccessFile() {
- if (!temporary_fd_) {
- close(fd_);
- limiter_->Release();
+ ~PosixRandomAccessFile() override {
+ if (has_permanent_fd_) {
+ assert(fd_ != -1);
+ ::close(fd_);
+ fd_limiter_->Release();
}
}
- virtual Status Read(uint64_t offset, size_t n, Slice* result,
- char* scratch) const {
+ Status Read(uint64_t offset, size_t n, Slice* result,
+ char* scratch) const override {
int fd = fd_;
- if (temporary_fd_) {
- fd = open(filename_.c_str(), O_RDONLY);
+ if (!has_permanent_fd_) {
+ fd = ::open(filename_.c_str(), O_RDONLY | kOpenBaseFlags);
if (fd < 0) {
- return IOError(filename_, errno);
+ return PosixError(filename_, errno);
}
}
- Status s;
- ssize_t r = pread(fd, scratch, n, static_cast<off_t>(offset));
- *result = Slice(scratch, (r < 0) ? 0 : r);
- if (r < 0) {
- // An error: return a non-ok status
- s = IOError(filename_, errno);
+ assert(fd != -1);
+
+ Status status;
+ ssize_t read_size = ::pread(fd, scratch, n, static_cast<off_t>(offset));
+ *result = Slice(scratch, (read_size < 0) ? 0 : read_size);
+ if (read_size < 0) {
+ // An error: return a non-ok status.
+ status = PosixError(filename_, errno);
}
- if (temporary_fd_) {
+ if (!has_permanent_fd_) {
// Close the temporary file descriptor opened earlier.
- close(fd);
+ assert(fd != fd_);
+ ::close(fd);
}
- return s;
+ return status;
}
- virtual std::string GetName() const { return filename_; }
-};
+ virtual std::string GetName() const override { return filename_; }
-// mmap() based random-access
-class PosixMmapReadableFile: public RandomAccessFile {
private:
- std::string filename_;
- void* mmapped_region_;
- size_t length_;
- Limiter* limiter_;
+ const bool has_permanent_fd_; // If false, the file is opened on every read.
+ const int fd_; // -1 if has_permanent_fd_ is false.
+ Limiter* const fd_limiter_;
+ const std::string filename_;
+};
+// Implements random read access in a file using mmap().
+//
+// Instances of this class are thread-safe, as required by the RandomAccessFile
+// API. Instances are immutable and Read() only calls thread-safe library
+// functions.
+class PosixMmapReadableFile final : public RandomAccessFile {
public:
- // base[0,length-1] contains the mmapped contents of the file.
- PosixMmapReadableFile(const std::string& fname, void* base, size_t length,
- Limiter* limiter)
- : filename_(fname), mmapped_region_(base), length_(length),
- limiter_(limiter) {
- }
-
- virtual ~PosixMmapReadableFile() {
- munmap(mmapped_region_, length_);
- limiter_->Release();
- }
-
- virtual Status Read(uint64_t offset, size_t n, Slice* result,
- char* scratch) const {
- Status s;
+ // mmap_base[0, length-1] points to the memory-mapped contents of the file. It
+ // must be the result of a successful call to mmap(). This instances takes
+ // over the ownership of the region.
+ //
+ // |mmap_limiter| must outlive this instance. The caller must have already
+ // aquired the right to use one mmap region, which will be released when this
+ // instance is destroyed.
+ PosixMmapReadableFile(std::string filename, char* mmap_base, size_t length,
+ Limiter* mmap_limiter)
+ : mmap_base_(mmap_base),
+ length_(length),
+ mmap_limiter_(mmap_limiter),
+ filename_(std::move(filename)) {}
+
+ ~PosixMmapReadableFile() override {
+ ::munmap(static_cast<void*>(mmap_base_), length_);
+ mmap_limiter_->Release();
+ }
+
+ Status Read(uint64_t offset, size_t n, Slice* result,
+ char* scratch) const override {
if (offset + n > length_) {
*result = Slice();
- s = IOError(filename_, EINVAL);
- } else {
- *result = Slice(reinterpret_cast<char*>(mmapped_region_) + offset, n);
+ return PosixError(filename_, EINVAL);
}
- return s;
+
+ *result = Slice(mmap_base_ + offset, n);
+ return Status::OK();
}
- virtual std::string GetName() const { return filename_; }
-};
+ virtual std::string GetName() const override { return filename_; }
-class PosixWritableFile : public WritableFile {
private:
- std::string filename_;
- FILE* file_;
+ char* const mmap_base_;
+ const size_t length_;
+ Limiter* const mmap_limiter_;
+ const std::string filename_;
+};
+class PosixWritableFile final : public WritableFile {
public:
- PosixWritableFile(const std::string& fname, FILE* f)
- : filename_(fname), file_(f) { }
-
- ~PosixWritableFile() {
- if (file_ != NULL) {
+ PosixWritableFile(std::string filename, int fd)
+ : pos_(0),
+ fd_(fd),
+ is_manifest_(IsManifest(filename)),
+ filename_(std::move(filename)),
+ dirname_(Dirname(filename_)) {}
+
+ ~PosixWritableFile() override {
+ if (fd_ >= 0) {
// Ignoring any potential errors
- fclose(file_);
+ Close();
}
}
- virtual Status Append(const Slice& data) {
- size_t r = fwrite_unlocked(data.data(), 1, data.size(), file_);
- if (r != data.size()) {
- return IOError(filename_, errno);
+ Status Append(const Slice& data) override {
+ size_t write_size = data.size();
+ const char* write_data = data.data();
+
+ // Fit as much as possible into buffer.
+ size_t copy_size = std::min(write_size, kWritableFileBufferSize - pos_);
+ std::memcpy(buf_ + pos_, write_data, copy_size);
+ write_data += copy_size;
+ write_size -= copy_size;
+ pos_ += copy_size;
+ if (write_size == 0) {
+ return Status::OK();
}
- return Status::OK();
+
+ // Can't fit in buffer, so need to do at least one write.
+ Status status = FlushBuffer();
+ if (!status.ok()) {
+ return status;
+ }
+
+ // Small writes go to buffer, large writes are written directly.
+ if (write_size < kWritableFileBufferSize) {
+ std::memcpy(buf_, write_data, write_size);
+ pos_ = write_size;
+ return Status::OK();
+ }
+ return WriteUnbuffered(write_data, write_size);
}
- virtual Status Close() {
- Status result;
- if (fclose(file_) != 0) {
- result = IOError(filename_, errno);
+ Status Close() override {
+ Status status = FlushBuffer();
+ const int close_result = ::close(fd_);
+ if (close_result < 0 && status.ok()) {
+ status = PosixError(filename_, errno);
}
- file_ = NULL;
- return result;
+ fd_ = -1;
+ return status;
}
- virtual Status Flush() {
- if (fflush_unlocked(file_) != 0) {
- return IOError(filename_, errno);
+ Status Flush() override { return FlushBuffer(); }
+
+ Status Sync() override {
+ // Ensure new files referred to by the manifest are in the filesystem.
+ //
+ // This needs to happen before the manifest file is flushed to disk, to
+ // avoid crashing in a state where the manifest refers to files that are not
+ // yet on disk.
+ Status status = SyncDirIfManifest();
+ if (!status.ok()) {
+ return status;
+ }
+
+ status = FlushBuffer();
+ if (!status.ok()) {
+ return status;
+ }
+
+ return SyncFd(fd_, filename_, false);
+ }
+
+ private:
+ Status FlushBuffer() {
+ Status status = WriteUnbuffered(buf_, pos_);
+ pos_ = 0;
+ return status;
+ }
+
+ Status WriteUnbuffered(const char* data, size_t size) {
+ while (size > 0) {
+ ssize_t write_result = ::write(fd_, data, size);
+ if (write_result < 0) {
+ if (errno == EINTR) {
+ continue; // Retry
+ }
+ return PosixError(filename_, errno);
+ }
+ data += write_result;
+ size -= write_result;
}
return Status::OK();
}
Status SyncDirIfManifest() {
- const char* f = filename_.c_str();
- const char* sep = strrchr(f, '/');
- Slice basename;
- std::string dir;
- if (sep == NULL) {
- dir = ".";
- basename = f;
+ Status status;
+ if (!is_manifest_) {
+ return status;
+ }
+
+ int fd = ::open(dirname_.c_str(), O_RDONLY | kOpenBaseFlags);
+ if (fd < 0) {
+ status = PosixError(dirname_, errno);
} else {
- dir = std::string(f, sep - f);
- basename = sep + 1;
+ status = SyncFd(fd, dirname_, true);
+ ::close(fd);
+ }
+ return status;
+ }
+
+ // Ensures that all the caches associated with the given file descriptor's
+ // data are flushed all the way to durable media, and can withstand power
+ // failures.
+ //
+ // The path argument is only used to populate the description string in the
+ // returned Status if an error occurs.
+ static Status SyncFd(int fd, const std::string& fd_path, bool syncing_dir) {
+#if HAVE_FULLFSYNC
+ // On macOS and iOS, fsync() doesn't guarantee durability past power
+ // failures. fcntl(F_FULLFSYNC) is required for that purpose. Some
+ // filesystems don't support fcntl(F_FULLFSYNC), and require a fallback to
+ // fsync().
+ if (::fcntl(fd, F_FULLFSYNC) == 0) {
+ return Status::OK();
}
- Status s;
- if (basename.starts_with("MANIFEST")) {
- int fd = open(dir.c_str(), O_RDONLY);
- if (fd < 0) {
- s = IOError(dir, errno);
- } else {
- if (fsync(fd) < 0 && errno != EINVAL) {
- s = IOError(dir, errno);
- }
- close(fd);
- }
+#endif // HAVE_FULLFSYNC
+
+#if HAVE_FDATASYNC
+ bool sync_success = ::fdatasync(fd) == 0;
+#else
+ bool sync_success = ::fsync(fd) == 0;
+#endif // HAVE_FDATASYNC
+
+ if (sync_success) {
+ return Status::OK();
}
- return s;
+ // Do not crash if filesystem can't fsync directories
+ // (see https://github.com/bitcoin/bitcoin/pull/10000)
+ if (syncing_dir && errno == EINVAL) {
+ return Status::OK();
+ }
+ return PosixError(fd_path, errno);
}
- virtual Status Sync() {
- // Ensure new files referred to by the manifest are in the filesystem.
- Status s = SyncDirIfManifest();
- if (!s.ok()) {
- return s;
+ // Returns the directory name in a path pointing to a file.
+ //
+ // Returns "." if the path does not contain any directory separator.
+ static std::string Dirname(const std::string& filename) {
+ std::string::size_type separator_pos = filename.rfind('/');
+ if (separator_pos == std::string::npos) {
+ return std::string(".");
}
- if (fflush_unlocked(file_) != 0 ||
- fdatasync(fileno(file_)) != 0) {
- s = Status::IOError(filename_, strerror(errno));
+ // The filename component should not contain a path separator. If it does,
+ // the splitting was done incorrectly.
+ assert(filename.find('/', separator_pos + 1) == std::string::npos);
+
+ return filename.substr(0, separator_pos);
+ }
+
+ // Extracts the file name from a path pointing to a file.
+ //
+ // The returned Slice points to |filename|'s data buffer, so it is only valid
+ // while |filename| is alive and unchanged.
+ static Slice Basename(const std::string& filename) {
+ std::string::size_type separator_pos = filename.rfind('/');
+ if (separator_pos == std::string::npos) {
+ return Slice(filename);
}
- return s;
+ // The filename component should not contain a path separator. If it does,
+ // the splitting was done incorrectly.
+ assert(filename.find('/', separator_pos + 1) == std::string::npos);
+
+ return Slice(filename.data() + separator_pos + 1,
+ filename.length() - separator_pos - 1);
}
- virtual std::string GetName() const { return filename_; }
+ // True if the given file is a manifest file.
+ static bool IsManifest(const std::string& filename) {
+ return Basename(filename).starts_with("MANIFEST");
+ }
+
+ virtual std::string GetName() const override { return filename_; }
+
+ // buf_[0, pos_ - 1] contains data to be written to fd_.
+ char buf_[kWritableFileBufferSize];
+ size_t pos_;
+ int fd_;
+
+ const bool is_manifest_; // True if the file's name starts with MANIFEST.
+ const std::string filename_;
+ const std::string dirname_; // The directory of filename_.
};
-static int LockOrUnlock(int fd, bool lock) {
+int LockOrUnlock(int fd, bool lock) {
errno = 0;
- struct flock f;
- memset(&f, 0, sizeof(f));
- f.l_type = (lock ? F_WRLCK : F_UNLCK);
- f.l_whence = SEEK_SET;
- f.l_start = 0;
- f.l_len = 0; // Lock/unlock entire file
- return fcntl(fd, F_SETLK, &f);
+ struct ::flock file_lock_info;
+ std::memset(&file_lock_info, 0, sizeof(file_lock_info));
+ file_lock_info.l_type = (lock ? F_WRLCK : F_UNLCK);
+ file_lock_info.l_whence = SEEK_SET;
+ file_lock_info.l_start = 0;
+ file_lock_info.l_len = 0; // Lock/unlock entire file.
+ return ::fcntl(fd, F_SETLK, &file_lock_info);
}
+// Instances are thread-safe because they are immutable.
class PosixFileLock : public FileLock {
public:
- int fd_;
- std::string name_;
+ PosixFileLock(int fd, std::string filename)
+ : fd_(fd), filename_(std::move(filename)) {}
+
+ int fd() const { return fd_; }
+ const std::string& filename() const { return filename_; }
+
+ private:
+ const int fd_;
+ const std::string filename_;
};
-// Set of locked files. We keep a separate set instead of just
-// relying on fcntrl(F_SETLK) since fcntl(F_SETLK) does not provide
-// any protection against multiple uses from the same process.
+// Tracks the files locked by PosixEnv::LockFile().
+//
+// We maintain a separate set instead of relying on fcntl(F_SETLK) because
+// fcntl(F_SETLK) does not provide any protection against multiple uses from the
+// same process.
+//
+// Instances are thread-safe because all member data is guarded by a mutex.
class PosixLockTable {
- private:
- port::Mutex mu_;
- std::set<std::string> locked_files_;
public:
- bool Insert(const std::string& fname) {
- MutexLock l(&mu_);
- return locked_files_.insert(fname).second;
- }
- void Remove(const std::string& fname) {
- MutexLock l(&mu_);
+ bool Insert(const std::string& fname) LOCKS_EXCLUDED(mu_) {
+ mu_.Lock();
+ bool succeeded = locked_files_.insert(fname).second;
+ mu_.Unlock();
+ return succeeded;
+ }
+ void Remove(const std::string& fname) LOCKS_EXCLUDED(mu_) {
+ mu_.Lock();
locked_files_.erase(fname);
+ mu_.Unlock();
}
+
+ private:
+ port::Mutex mu_;
+ std::set<std::string> locked_files_ GUARDED_BY(mu_);
};
class PosixEnv : public Env {
public:
PosixEnv();
- virtual ~PosixEnv() {
- char msg[] = "Destroying Env::Default()\n";
- fwrite(msg, 1, sizeof(msg), stderr);
- abort();
- }
-
- virtual Status NewSequentialFile(const std::string& fname,
- SequentialFile** result) {
- FILE* f = fopen(fname.c_str(), "r");
- if (f == NULL) {
- *result = NULL;
- return IOError(fname, errno);
- } else {
- *result = new PosixSequentialFile(fname, f);
- return Status::OK();
+ ~PosixEnv() override {
+ static const char msg[] =
+ "PosixEnv singleton destroyed. Unsupported behavior!\n";
+ std::fwrite(msg, 1, sizeof(msg), stderr);
+ std::abort();
+ }
+
+ Status NewSequentialFile(const std::string& filename,
+ SequentialFile** result) override {
+ int fd = ::open(filename.c_str(), O_RDONLY | kOpenBaseFlags);
+ if (fd < 0) {
+ *result = nullptr;
+ return PosixError(filename, errno);
}
+
+ *result = new PosixSequentialFile(filename, fd);
+ return Status::OK();
}
- virtual Status NewRandomAccessFile(const std::string& fname,
- RandomAccessFile** result) {
- *result = NULL;
- Status s;
- int fd = open(fname.c_str(), O_RDONLY);
+ Status NewRandomAccessFile(const std::string& filename,
+ RandomAccessFile** result) override {
+ *result = nullptr;
+ int fd = ::open(filename.c_str(), O_RDONLY | kOpenBaseFlags);
if (fd < 0) {
- s = IOError(fname, errno);
- } else if (mmap_limit_.Acquire()) {
- uint64_t size;
- s = GetFileSize(fname, &size);
- if (s.ok()) {
- void* base = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, 0);
- if (base != MAP_FAILED) {
- *result = new PosixMmapReadableFile(fname, base, size, &mmap_limit_);
- } else {
- s = IOError(fname, errno);
- }
- }
- close(fd);
- if (!s.ok()) {
- mmap_limit_.Release();
+ return PosixError(filename, errno);
+ }
+
+ if (!mmap_limiter_.Acquire()) {
+ *result = new PosixRandomAccessFile(filename, fd, &fd_limiter_);
+ return Status::OK();
+ }
+
+ uint64_t file_size;
+ Status status = GetFileSize(filename, &file_size);
+ if (status.ok()) {
+ void* mmap_base =
+ ::mmap(/*addr=*/nullptr, file_size, PROT_READ, MAP_SHARED, fd, 0);
+ if (mmap_base != MAP_FAILED) {
+ *result = new PosixMmapReadableFile(filename,
+ reinterpret_cast<char*>(mmap_base),
+ file_size, &mmap_limiter_);
+ } else {
+ status = PosixError(filename, errno);
}
- } else {
- *result = new PosixRandomAccessFile(fname, fd, &fd_limit_);
}
- return s;
+ ::close(fd);
+ if (!status.ok()) {
+ mmap_limiter_.Release();
+ }
+ return status;
}
- virtual Status NewWritableFile(const std::string& fname,
- WritableFile** result) {
- Status s;
- FILE* f = fopen(fname.c_str(), "w");
- if (f == NULL) {
- *result = NULL;
- s = IOError(fname, errno);
- } else {
- *result = new PosixWritableFile(fname, f);
+ Status NewWritableFile(const std::string& filename,
+ WritableFile** result) override {
+ int fd = ::open(filename.c_str(),
+ O_TRUNC | O_WRONLY | O_CREAT | kOpenBaseFlags, 0644);
+ if (fd < 0) {
+ *result = nullptr;
+ return PosixError(filename, errno);
}
- return s;
+
+ *result = new PosixWritableFile(filename, fd);
+ return Status::OK();
}
- virtual Status NewAppendableFile(const std::string& fname,
- WritableFile** result) {
- Status s;
- FILE* f = fopen(fname.c_str(), "a");
- if (f == NULL) {
- *result = NULL;
- s = IOError(fname, errno);
- } else {
- *result = new PosixWritableFile(fname, f);
+ Status NewAppendableFile(const std::string& filename,
+ WritableFile** result) override {
+ int fd = ::open(filename.c_str(),
+ O_APPEND | O_WRONLY | O_CREAT | kOpenBaseFlags, 0644);
+ if (fd < 0) {
+ *result = nullptr;
+ return PosixError(filename, errno);
}
- return s;
+
+ *result = new PosixWritableFile(filename, fd);
+ return Status::OK();
}
- virtual bool FileExists(const std::string& fname) {
- return access(fname.c_str(), F_OK) == 0;
+ bool FileExists(const std::string& filename) override {
+ return ::access(filename.c_str(), F_OK) == 0;
}
- virtual Status GetChildren(const std::string& dir,
- std::vector<std::string>* result) {
+ Status GetChildren(const std::string& directory_path,
+ std::vector<std::string>* result) override {
result->clear();
- DIR* d = opendir(dir.c_str());
- if (d == NULL) {
- return IOError(dir, errno);
+ ::DIR* dir = ::opendir(directory_path.c_str());
+ if (dir == nullptr) {
+ return PosixError(directory_path, errno);
}
- struct dirent* entry;
- while ((entry = readdir(d)) != NULL) {
- result->push_back(entry->d_name);
+ struct ::dirent* entry;
+ while ((entry = ::readdir(dir)) != nullptr) {
+ result->emplace_back(entry->d_name);
}
- closedir(d);
+ ::closedir(dir);
return Status::OK();
}
- virtual Status DeleteFile(const std::string& fname) {
- Status result;
- if (unlink(fname.c_str()) != 0) {
- result = IOError(fname, errno);
+ Status DeleteFile(const std::string& filename) override {
+ if (::unlink(filename.c_str()) != 0) {
+ return PosixError(filename, errno);
}
- return result;
+ return Status::OK();
}
- virtual Status CreateDir(const std::string& name) {
- Status result;
- if (mkdir(name.c_str(), 0755) != 0) {
- result = IOError(name, errno);
+ Status CreateDir(const std::string& dirname) override {
+ if (::mkdir(dirname.c_str(), 0755) != 0) {
+ return PosixError(dirname, errno);
}
- return result;
+ return Status::OK();
}
- virtual Status DeleteDir(const std::string& name) {
- Status result;
- if (rmdir(name.c_str()) != 0) {
- result = IOError(name, errno);
+ Status DeleteDir(const std::string& dirname) override {
+ if (::rmdir(dirname.c_str()) != 0) {
+ return PosixError(dirname, errno);
}
- return result;
+ return Status::OK();
}
- virtual Status GetFileSize(const std::string& fname, uint64_t* size) {
- Status s;
- struct stat sbuf;
- if (stat(fname.c_str(), &sbuf) != 0) {
+ Status GetFileSize(const std::string& filename, uint64_t* size) override {
+ struct ::stat file_stat;
+ if (::stat(filename.c_str(), &file_stat) != 0) {
*size = 0;
- s = IOError(fname, errno);
- } else {
- *size = sbuf.st_size;
+ return PosixError(filename, errno);
}
- return s;
+ *size = file_stat.st_size;
+ return Status::OK();
}
- virtual Status RenameFile(const std::string& src, const std::string& target) {
- Status result;
- if (rename(src.c_str(), target.c_str()) != 0) {
- result = IOError(src, errno);
+ Status RenameFile(const std::string& from, const std::string& to) override {
+ if (std::rename(from.c_str(), to.c_str()) != 0) {
+ return PosixError(from, errno);
}
- return result;
+ return Status::OK();
}
- virtual Status LockFile(const std::string& fname, FileLock** lock) {
- *lock = NULL;
- Status result;
- int fd = open(fname.c_str(), O_RDWR | O_CREAT, 0644);
+ Status LockFile(const std::string& filename, FileLock** lock) override {
+ *lock = nullptr;
+
+ int fd = ::open(filename.c_str(), O_RDWR | O_CREAT | kOpenBaseFlags, 0644);
if (fd < 0) {
- result = IOError(fname, errno);
- } else if (!locks_.Insert(fname)) {
- close(fd);
- result = Status::IOError("lock " + fname, "already held by process");
- } else if (LockOrUnlock(fd, true) == -1) {
- result = IOError("lock " + fname, errno);
- close(fd);
- locks_.Remove(fname);
- } else {
- PosixFileLock* my_lock = new PosixFileLock;
- my_lock->fd_ = fd;
- my_lock->name_ = fname;
- *lock = my_lock;
+ return PosixError(filename, errno);
}
- return result;
+
+ if (!locks_.Insert(filename)) {
+ ::close(fd);
+ return Status::IOError("lock " + filename, "already held by process");
+ }
+
+ if (LockOrUnlock(fd, true) == -1) {
+ int lock_errno = errno;
+ ::close(fd);
+ locks_.Remove(filename);
+ return PosixError("lock " + filename, lock_errno);
+ }
+
+ *lock = new PosixFileLock(fd, filename);
+ return Status::OK();
}
- virtual Status UnlockFile(FileLock* lock) {
- PosixFileLock* my_lock = reinterpret_cast<PosixFileLock*>(lock);
- Status result;
- if (LockOrUnlock(my_lock->fd_, false) == -1) {
- result = IOError("unlock", errno);
+ Status UnlockFile(FileLock* lock) override {
+ PosixFileLock* posix_file_lock = static_cast<PosixFileLock*>(lock);
+ if (LockOrUnlock(posix_file_lock->fd(), false) == -1) {
+ return PosixError("unlock " + posix_file_lock->filename(), errno);
}
- locks_.Remove(my_lock->name_);
- close(my_lock->fd_);
- delete my_lock;
- return result;
+ locks_.Remove(posix_file_lock->filename());
+ ::close(posix_file_lock->fd());
+ delete posix_file_lock;
+ return Status::OK();
}
- virtual void Schedule(void (*function)(void*), void* arg);
+ void Schedule(void (*background_work_function)(void* background_work_arg),
+ void* background_work_arg) override;
- virtual void StartThread(void (*function)(void* arg), void* arg);
+ void StartThread(void (*thread_main)(void* thread_main_arg),
+ void* thread_main_arg) override {
+ std::thread new_thread(thread_main, thread_main_arg);
+ new_thread.detach();
+ }
- virtual Status GetTestDirectory(std::string* result) {
- const char* env = getenv("TEST_TMPDIR");
+ Status GetTestDirectory(std::string* result) override {
+ const char* env = std::getenv("TEST_TMPDIR");
if (env && env[0] != '\0') {
*result = env;
} else {
char buf[100];
- snprintf(buf, sizeof(buf), "/tmp/leveldbtest-%d", int(geteuid()));
+ std::snprintf(buf, sizeof(buf), "/tmp/leveldbtest-%d",
+ static_cast<int>(::geteuid()));
*result = buf;
}
- // Directory may already exist
+
+ // The CreateDir status is ignored because the directory may already exist.
CreateDir(*result);
+
return Status::OK();
}
- static uint64_t gettid() {
- pthread_t tid = pthread_self();
- uint64_t thread_id = 0;
- memcpy(&thread_id, &tid, std::min(sizeof(thread_id), sizeof(tid)));
- return thread_id;
- }
+ Status NewLogger(const std::string& filename, Logger** result) override {
+ int fd = ::open(filename.c_str(),
+ O_APPEND | O_WRONLY | O_CREAT | kOpenBaseFlags, 0644);
+ if (fd < 0) {
+ *result = nullptr;
+ return PosixError(filename, errno);
+ }
- virtual Status NewLogger(const std::string& fname, Logger** result) {
- FILE* f = fopen(fname.c_str(), "w");
- if (f == NULL) {
- *result = NULL;
- return IOError(fname, errno);
+ std::FILE* fp = ::fdopen(fd, "w");
+ if (fp == nullptr) {
+ ::close(fd);
+ *result = nullptr;
+ return PosixError(filename, errno);
} else {
- *result = new PosixLogger(f, &PosixEnv::gettid);
+ *result = new PosixLogger(fp);
return Status::OK();
}
}
- virtual uint64_t NowMicros() {
- struct timeval tv;
- gettimeofday(&tv, NULL);
- return static_cast<uint64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
+ uint64_t NowMicros() override {
+ static constexpr uint64_t kUsecondsPerSecond = 1000000;
+ struct ::timeval tv;
+ ::gettimeofday(&tv, nullptr);
+ return static_cast<uint64_t>(tv.tv_sec) * kUsecondsPerSecond + tv.tv_usec;
}
- virtual void SleepForMicroseconds(int micros) {
- usleep(micros);
+ void SleepForMicroseconds(int micros) override {
+ std::this_thread::sleep_for(std::chrono::microseconds(micros));
}
private:
- void PthreadCall(const char* label, int result) {
- if (result != 0) {
- fprintf(stderr, "pthread %s: %s\n", label, strerror(result));
- abort();
- }
- }
+ void BackgroundThreadMain();
- // BGThread() is the body of the background thread
- void BGThread();
- static void* BGThreadWrapper(void* arg) {
- reinterpret_cast<PosixEnv*>(arg)->BGThread();
- return NULL;
+ static void BackgroundThreadEntryPoint(PosixEnv* env) {
+ env->BackgroundThreadMain();
}
- pthread_mutex_t mu_;
- pthread_cond_t bgsignal_;
- pthread_t bgthread_;
- bool started_bgthread_;
+ // Stores the work item data in a Schedule() call.
+ //
+ // Instances are constructed on the thread calling Schedule() and used on the
+ // background thread.
+ //
+ // This structure is thread-safe beacuse it is immutable.
+ struct BackgroundWorkItem {
+ explicit BackgroundWorkItem(void (*function)(void* arg), void* arg)
+ : function(function), arg(arg) {}
+
+ void (*const function)(void*);
+ void* const arg;
+ };
- // Entry per Schedule() call
- struct BGItem { void* arg; void (*function)(void*); };
- typedef std::deque<BGItem> BGQueue;
- BGQueue queue_;
+ port::Mutex background_work_mutex_;
+ port::CondVar background_work_cv_ GUARDED_BY(background_work_mutex_);
+ bool started_background_thread_ GUARDED_BY(background_work_mutex_);
- PosixLockTable locks_;
- Limiter mmap_limit_;
- Limiter fd_limit_;
+ std::queue<BackgroundWorkItem> background_work_queue_
+ GUARDED_BY(background_work_mutex_);
+
+ PosixLockTable locks_; // Thread-safe.
+ Limiter mmap_limiter_; // Thread-safe.
+ Limiter fd_limiter_; // Thread-safe.
};
// Return the maximum number of concurrent mmaps.
-static int MaxMmaps() {
- if (mmap_limit >= 0) {
- return mmap_limit;
- }
- // Up to 4096 mmaps for 64-bit binaries; none for smaller pointer sizes.
- mmap_limit = sizeof(void*) >= 8 ? 4096 : 0;
- return mmap_limit;
-}
+int MaxMmaps() { return g_mmap_limit; }
// Return the maximum number of read-only files to keep open.
-static intptr_t MaxOpenFiles() {
- if (open_read_only_file_limit >= 0) {
- return open_read_only_file_limit;
+int MaxOpenFiles() {
+ if (g_open_read_only_file_limit >= 0) {
+ return g_open_read_only_file_limit;
}
- struct rlimit rlim;
- if (getrlimit(RLIMIT_NOFILE, &rlim)) {
+ struct ::rlimit rlim;
+ if (::getrlimit(RLIMIT_NOFILE, &rlim)) {
// getrlimit failed, fallback to hard-coded default.
- open_read_only_file_limit = 50;
+ g_open_read_only_file_limit = 50;
} else if (rlim.rlim_cur == RLIM_INFINITY) {
- open_read_only_file_limit = std::numeric_limits<int>::max();
+ g_open_read_only_file_limit = std::numeric_limits<int>::max();
} else {
// Allow use of 20% of available file descriptors for read-only files.
- open_read_only_file_limit = rlim.rlim_cur / 5;
+ g_open_read_only_file_limit = rlim.rlim_cur / 5;
}
- return open_read_only_file_limit;
+ return g_open_read_only_file_limit;
}
+} // namespace
+
PosixEnv::PosixEnv()
- : started_bgthread_(false),
- mmap_limit_(MaxMmaps()),
- fd_limit_(MaxOpenFiles()) {
- PthreadCall("mutex_init", pthread_mutex_init(&mu_, NULL));
- PthreadCall("cvar_init", pthread_cond_init(&bgsignal_, NULL));
-}
+ : background_work_cv_(&background_work_mutex_),
+ started_background_thread_(false),
+ mmap_limiter_(MaxMmaps()),
+ fd_limiter_(MaxOpenFiles()) {}
-void PosixEnv::Schedule(void (*function)(void*), void* arg) {
- PthreadCall("lock", pthread_mutex_lock(&mu_));
+void PosixEnv::Schedule(
+ void (*background_work_function)(void* background_work_arg),
+ void* background_work_arg) {
+ background_work_mutex_.Lock();
- // Start background thread if necessary
- if (!started_bgthread_) {
- started_bgthread_ = true;
- PthreadCall(
- "create thread",
- pthread_create(&bgthread_, NULL, &PosixEnv::BGThreadWrapper, this));
+ // Start the background thread, if we haven't done so already.
+ if (!started_background_thread_) {
+ started_background_thread_ = true;
+ std::thread background_thread(PosixEnv::BackgroundThreadEntryPoint, this);
+ background_thread.detach();
}
- // If the queue is currently empty, the background thread may currently be
- // waiting.
- if (queue_.empty()) {
- PthreadCall("signal", pthread_cond_signal(&bgsignal_));
+ // If the queue is empty, the background thread may be waiting for work.
+ if (background_work_queue_.empty()) {
+ background_work_cv_.Signal();
}
- // Add to priority queue
- queue_.push_back(BGItem());
- queue_.back().function = function;
- queue_.back().arg = arg;
-
- PthreadCall("unlock", pthread_mutex_unlock(&mu_));
+ background_work_queue_.emplace(background_work_function, background_work_arg);
+ background_work_mutex_.Unlock();
}
-void PosixEnv::BGThread() {
+void PosixEnv::BackgroundThreadMain() {
while (true) {
- // Wait until there is an item that is ready to run
- PthreadCall("lock", pthread_mutex_lock(&mu_));
- while (queue_.empty()) {
- PthreadCall("wait", pthread_cond_wait(&bgsignal_, &mu_));
+ background_work_mutex_.Lock();
+
+ // Wait until there is work to be done.
+ while (background_work_queue_.empty()) {
+ background_work_cv_.Wait();
}
- void (*function)(void*) = queue_.front().function;
- void* arg = queue_.front().arg;
- queue_.pop_front();
+ assert(!background_work_queue_.empty());
+ auto background_work_function = background_work_queue_.front().function;
+ void* background_work_arg = background_work_queue_.front().arg;
+ background_work_queue_.pop();
- PthreadCall("unlock", pthread_mutex_unlock(&mu_));
- (*function)(arg);
+ background_work_mutex_.Unlock();
+ background_work_function(background_work_arg);
}
}
namespace {
-struct StartThreadState {
- void (*user_function)(void*);
- void* arg;
+
+// Wraps an Env instance whose destructor is never created.
+//
+// Intended usage:
+// using PlatformSingletonEnv = SingletonEnv<PlatformEnv>;
+// void ConfigurePosixEnv(int param) {
+// PlatformSingletonEnv::AssertEnvNotInitialized();
+// // set global configuration flags.
+// }
+// Env* Env::Default() {
+// static PlatformSingletonEnv default_env;
+// return default_env.env();
+// }
+template <typename EnvType>
+class SingletonEnv {
+ public:
+ SingletonEnv() {
+#if !defined(NDEBUG)
+ env_initialized_.store(true, std::memory_order::memory_order_relaxed);
+#endif // !defined(NDEBUG)
+ static_assert(sizeof(env_storage_) >= sizeof(EnvType),
+ "env_storage_ will not fit the Env");
+ static_assert(alignof(decltype(env_storage_)) >= alignof(EnvType),
+ "env_storage_ does not meet the Env's alignment needs");
+ new (&env_storage_) EnvType();
+ }
+ ~SingletonEnv() = default;
+
+ SingletonEnv(const SingletonEnv&) = delete;
+ SingletonEnv& operator=(const SingletonEnv&) = delete;
+
+ Env* env() { return reinterpret_cast<Env*>(&env_storage_); }
+
+ static void AssertEnvNotInitialized() {
+#if !defined(NDEBUG)
+ assert(!env_initialized_.load(std::memory_order::memory_order_relaxed));
+#endif // !defined(NDEBUG)
+ }
+
+ private:
+ typename std::aligned_storage<sizeof(EnvType), alignof(EnvType)>::type
+ env_storage_;
+#if !defined(NDEBUG)
+ static std::atomic<bool> env_initialized_;
+#endif // !defined(NDEBUG)
};
-}
-static void* StartThreadWrapper(void* arg) {
- StartThreadState* state = reinterpret_cast<StartThreadState*>(arg);
- state->user_function(state->arg);
- delete state;
- return NULL;
-}
-void PosixEnv::StartThread(void (*function)(void* arg), void* arg) {
- pthread_t t;
- StartThreadState* state = new StartThreadState;
- state->user_function = function;
- state->arg = arg;
- PthreadCall("start thread",
- pthread_create(&t, NULL, &StartThreadWrapper, state));
-}
+#if !defined(NDEBUG)
+template <typename EnvType>
+std::atomic<bool> SingletonEnv<EnvType>::env_initialized_;
+#endif // !defined(NDEBUG)
-} // namespace
+using PosixDefaultEnv = SingletonEnv<PosixEnv>;
-static pthread_once_t once = PTHREAD_ONCE_INIT;
-static Env* default_env;
-static void InitDefaultEnv() { default_env = new PosixEnv; }
+} // namespace
void EnvPosixTestHelper::SetReadOnlyFDLimit(int limit) {
- assert(default_env == NULL);
- open_read_only_file_limit = limit;
+ PosixDefaultEnv::AssertEnvNotInitialized();
+ g_open_read_only_file_limit = limit;
}
void EnvPosixTestHelper::SetReadOnlyMMapLimit(int limit) {
- assert(default_env == NULL);
- mmap_limit = limit;
+ PosixDefaultEnv::AssertEnvNotInitialized();
+ g_mmap_limit = limit;
}
Env* Env::Default() {
- pthread_once(&once, InitDefaultEnv);
- return default_env;
+ static PosixDefaultEnv env_container;
+ return env_container.env();
}
} // namespace leveldb
-
-#endif
diff --git a/src/leveldb/util/env_posix_test.cc b/src/leveldb/util/env_posix_test.cc
index 295f8ae440..9675d739ad 100644
--- a/src/leveldb/util/env_posix_test.cc
+++ b/src/leveldb/util/env_posix_test.cc
@@ -2,27 +2,182 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include "leveldb/env.h"
+#include <sys/resource.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <string>
+#include <unordered_set>
+#include <vector>
+#include "leveldb/env.h"
#include "port/port.h"
-#include "util/testharness.h"
#include "util/env_posix_test_helper.h"
+#include "util/testharness.h"
+
+#if HAVE_O_CLOEXEC
+
+namespace {
+
+// Exit codes for the helper process spawned by TestCloseOnExec* tests.
+// Useful for debugging test failures.
+constexpr int kTextCloseOnExecHelperExecFailedCode = 61;
+constexpr int kTextCloseOnExecHelperDup2FailedCode = 62;
+constexpr int kTextCloseOnExecHelperFoundOpenFdCode = 63;
+
+// Global set by main() and read in TestCloseOnExec.
+//
+// The argv[0] value is stored in a std::vector instead of a std::string because
+// std::string does not return a mutable pointer to its buffer until C++17.
+//
+// The vector stores the string pointed to by argv[0], plus the trailing null.
+std::vector<char>* GetArgvZero() {
+ static std::vector<char> program_name;
+ return &program_name;
+}
+
+// Command-line switch used to run this test as the CloseOnExecSwitch helper.
+static const char kTestCloseOnExecSwitch[] = "--test-close-on-exec-helper";
+
+// Executed in a separate process by TestCloseOnExec* tests.
+//
+// main() delegates to this function when the test executable is launched with
+// a special command-line switch. TestCloseOnExec* tests fork()+exec() the test
+// executable and pass the special command-line switch.
+//
+
+// main() delegates to this function when the test executable is launched with
+// a special command-line switch. TestCloseOnExec* tests fork()+exec() the test
+// executable and pass the special command-line switch.
+//
+// When main() delegates to this function, the process probes whether a given
+// file descriptor is open, and communicates the result via its exit code.
+int TestCloseOnExecHelperMain(char* pid_arg) {
+ int fd = std::atoi(pid_arg);
+ // When given the same file descriptor twice, dup2() returns -1 if the
+ // file descriptor is closed, or the given file descriptor if it is open.
+ if (::dup2(fd, fd) == fd) {
+ std::fprintf(stderr, "Unexpected open fd %d\n", fd);
+ return kTextCloseOnExecHelperFoundOpenFdCode;
+ }
+ // Double-check that dup2() is saying the file descriptor is closed.
+ if (errno != EBADF) {
+ std::fprintf(stderr, "Unexpected errno after calling dup2 on fd %d: %s\n",
+ fd, std::strerror(errno));
+ return kTextCloseOnExecHelperDup2FailedCode;
+ }
+ return 0;
+}
+
+// File descriptors are small non-negative integers.
+//
+// Returns void so the implementation can use ASSERT_EQ.
+void GetMaxFileDescriptor(int* result_fd) {
+ // Get the maximum file descriptor number.
+ ::rlimit fd_rlimit;
+ ASSERT_EQ(0, ::getrlimit(RLIMIT_NOFILE, &fd_rlimit));
+ *result_fd = fd_rlimit.rlim_cur;
+}
+
+// Iterates through all possible FDs and returns the currently open ones.
+//
+// Returns void so the implementation can use ASSERT_EQ.
+void GetOpenFileDescriptors(std::unordered_set<int>* open_fds) {
+ int max_fd = 0;
+ GetMaxFileDescriptor(&max_fd);
+
+ for (int fd = 0; fd < max_fd; ++fd) {
+ if (::dup2(fd, fd) != fd) {
+ // When given the same file descriptor twice, dup2() returns -1 if the
+ // file descriptor is closed, or the given file descriptor if it is open.
+ //
+ // Double-check that dup2() is saying the fd is closed.
+ ASSERT_EQ(EBADF, errno)
+ << "dup2() should set errno to EBADF on closed file descriptors";
+ continue;
+ }
+ open_fds->insert(fd);
+ }
+}
+
+// Finds an FD open since a previous call to GetOpenFileDescriptors().
+//
+// |baseline_open_fds| is the result of a previous GetOpenFileDescriptors()
+// call. Assumes that exactly one FD was opened since that call.
+//
+// Returns void so the implementation can use ASSERT_EQ.
+void GetNewlyOpenedFileDescriptor(
+ const std::unordered_set<int>& baseline_open_fds, int* result_fd) {
+ std::unordered_set<int> open_fds;
+ GetOpenFileDescriptors(&open_fds);
+ for (int fd : baseline_open_fds) {
+ ASSERT_EQ(1, open_fds.count(fd))
+ << "Previously opened file descriptor was closed during test setup";
+ open_fds.erase(fd);
+ }
+ ASSERT_EQ(1, open_fds.size())
+ << "Expected exactly one newly opened file descriptor during test setup";
+ *result_fd = *open_fds.begin();
+}
+
+// Check that a fork()+exec()-ed child process does not have an extra open FD.
+void CheckCloseOnExecDoesNotLeakFDs(
+ const std::unordered_set<int>& baseline_open_fds) {
+ // Prepare the argument list for the child process.
+ // execv() wants mutable buffers.
+ char switch_buffer[sizeof(kTestCloseOnExecSwitch)];
+ std::memcpy(switch_buffer, kTestCloseOnExecSwitch,
+ sizeof(kTestCloseOnExecSwitch));
+
+ int probed_fd;
+ GetNewlyOpenedFileDescriptor(baseline_open_fds, &probed_fd);
+ std::string fd_string = std::to_string(probed_fd);
+ std::vector<char> fd_buffer(fd_string.begin(), fd_string.end());
+ fd_buffer.emplace_back('\0');
+
+ // The helper process is launched with the command below.
+ // env_posix_tests --test-close-on-exec-helper 3
+ char* child_argv[] = {GetArgvZero()->data(), switch_buffer, fd_buffer.data(),
+ nullptr};
+
+ constexpr int kForkInChildProcessReturnValue = 0;
+ int child_pid = fork();
+ if (child_pid == kForkInChildProcessReturnValue) {
+ ::execv(child_argv[0], child_argv);
+ std::fprintf(stderr, "Error spawning child process: %s\n", strerror(errno));
+ std::exit(kTextCloseOnExecHelperExecFailedCode);
+ }
+
+ int child_status = 0;
+ ASSERT_EQ(child_pid, ::waitpid(child_pid, &child_status, 0));
+ ASSERT_TRUE(WIFEXITED(child_status))
+ << "The helper process did not exit with an exit code";
+ ASSERT_EQ(0, WEXITSTATUS(child_status))
+ << "The helper process encountered an error";
+}
+
+} // namespace
+
+#endif // HAVE_O_CLOEXEC
namespace leveldb {
-static const int kDelayMicros = 100000;
static const int kReadOnlyFileLimit = 4;
static const int kMMapLimit = 4;
class EnvPosixTest {
public:
- Env* env_;
- EnvPosixTest() : env_(Env::Default()) { }
-
static void SetFileLimits(int read_only_file_limit, int mmap_limit) {
EnvPosixTestHelper::SetReadOnlyFDLimit(read_only_file_limit);
EnvPosixTestHelper::SetReadOnlyMMapLimit(mmap_limit);
}
+
+ EnvPosixTest() : env_(Env::Default()) {}
+
+ Env* env_;
};
TEST(EnvPosixTest, TestOpenOnRead) {
@@ -31,8 +186,8 @@ TEST(EnvPosixTest, TestOpenOnRead) {
ASSERT_OK(env_->GetTestDirectory(&test_dir));
std::string test_file = test_dir + "/open_on_read.txt";
- FILE* f = fopen(test_file.c_str(), "w");
- ASSERT_TRUE(f != NULL);
+ FILE* f = fopen(test_file.c_str(), "we");
+ ASSERT_TRUE(f != nullptr);
const char kFileData[] = "abcdefghijklmnopqrstuvwxyz";
fputs(kFileData, f);
fclose(f);
@@ -56,9 +211,138 @@ TEST(EnvPosixTest, TestOpenOnRead) {
ASSERT_OK(env_->DeleteFile(test_file));
}
+#if HAVE_O_CLOEXEC
+
+TEST(EnvPosixTest, TestCloseOnExecSequentialFile) {
+ std::unordered_set<int> open_fds;
+ GetOpenFileDescriptors(&open_fds);
+
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+ std::string file_path = test_dir + "/close_on_exec_sequential.txt";
+ ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path));
+
+ leveldb::SequentialFile* file = nullptr;
+ ASSERT_OK(env_->NewSequentialFile(file_path, &file));
+ CheckCloseOnExecDoesNotLeakFDs(open_fds);
+ delete file;
+
+ ASSERT_OK(env_->DeleteFile(file_path));
+}
+
+TEST(EnvPosixTest, TestCloseOnExecRandomAccessFile) {
+ std::unordered_set<int> open_fds;
+ GetOpenFileDescriptors(&open_fds);
+
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+ std::string file_path = test_dir + "/close_on_exec_random_access.txt";
+ ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path));
+
+ // Exhaust the RandomAccessFile mmap limit. This way, the test
+ // RandomAccessFile instance below is backed by a file descriptor, not by an
+ // mmap region.
+ leveldb::RandomAccessFile* mmapped_files[kReadOnlyFileLimit] = {nullptr};
+ for (int i = 0; i < kReadOnlyFileLimit; i++) {
+ ASSERT_OK(env_->NewRandomAccessFile(file_path, &mmapped_files[i]));
+ }
+
+ leveldb::RandomAccessFile* file = nullptr;
+ ASSERT_OK(env_->NewRandomAccessFile(file_path, &file));
+ CheckCloseOnExecDoesNotLeakFDs(open_fds);
+ delete file;
+
+ for (int i = 0; i < kReadOnlyFileLimit; i++) {
+ delete mmapped_files[i];
+ }
+ ASSERT_OK(env_->DeleteFile(file_path));
+}
+
+TEST(EnvPosixTest, TestCloseOnExecWritableFile) {
+ std::unordered_set<int> open_fds;
+ GetOpenFileDescriptors(&open_fds);
+
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+ std::string file_path = test_dir + "/close_on_exec_writable.txt";
+ ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path));
+
+ leveldb::WritableFile* file = nullptr;
+ ASSERT_OK(env_->NewWritableFile(file_path, &file));
+ CheckCloseOnExecDoesNotLeakFDs(open_fds);
+ delete file;
+
+ ASSERT_OK(env_->DeleteFile(file_path));
+}
+
+TEST(EnvPosixTest, TestCloseOnExecAppendableFile) {
+ std::unordered_set<int> open_fds;
+ GetOpenFileDescriptors(&open_fds);
+
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+ std::string file_path = test_dir + "/close_on_exec_appendable.txt";
+ ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path));
+
+ leveldb::WritableFile* file = nullptr;
+ ASSERT_OK(env_->NewAppendableFile(file_path, &file));
+ CheckCloseOnExecDoesNotLeakFDs(open_fds);
+ delete file;
+
+ ASSERT_OK(env_->DeleteFile(file_path));
+}
+
+TEST(EnvPosixTest, TestCloseOnExecLockFile) {
+ std::unordered_set<int> open_fds;
+ GetOpenFileDescriptors(&open_fds);
+
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+ std::string file_path = test_dir + "/close_on_exec_lock.txt";
+ ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path));
+
+ leveldb::FileLock* lock = nullptr;
+ ASSERT_OK(env_->LockFile(file_path, &lock));
+ CheckCloseOnExecDoesNotLeakFDs(open_fds);
+ ASSERT_OK(env_->UnlockFile(lock));
+
+ ASSERT_OK(env_->DeleteFile(file_path));
+}
+
+TEST(EnvPosixTest, TestCloseOnExecLogger) {
+ std::unordered_set<int> open_fds;
+ GetOpenFileDescriptors(&open_fds);
+
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+ std::string file_path = test_dir + "/close_on_exec_logger.txt";
+ ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path));
+
+ leveldb::Logger* file = nullptr;
+ ASSERT_OK(env_->NewLogger(file_path, &file));
+ CheckCloseOnExecDoesNotLeakFDs(open_fds);
+ delete file;
+
+ ASSERT_OK(env_->DeleteFile(file_path));
+}
+
+#endif // HAVE_O_CLOEXEC
+
} // namespace leveldb
int main(int argc, char** argv) {
+#if HAVE_O_CLOEXEC
+ // Check if we're invoked as a helper program, or as the test suite.
+ for (int i = 1; i < argc; ++i) {
+ if (!std::strcmp(argv[i], kTestCloseOnExecSwitch)) {
+ return TestCloseOnExecHelperMain(argv[i + 1]);
+ }
+ }
+
+ // Save argv[0] early, because googletest may modify argv.
+ GetArgvZero()->assign(argv[0], argv[0] + std::strlen(argv[0]) + 1);
+#endif // HAVE_O_CLOEXEC
+
// All tests currently run with the same read-only file limits.
leveldb::EnvPosixTest::SetFileLimits(leveldb::kReadOnlyFileLimit,
leveldb::kMMapLimit);
diff --git a/src/leveldb/util/env_test.cc b/src/leveldb/util/env_test.cc
index 839ae56a1a..7db03fc11c 100644
--- a/src/leveldb/util/env_test.cc
+++ b/src/leveldb/util/env_test.cc
@@ -4,72 +4,144 @@
#include "leveldb/env.h"
+#include <algorithm>
+
#include "port/port.h"
+#include "port/thread_annotations.h"
+#include "util/mutexlock.h"
#include "util/testharness.h"
+#include "util/testutil.h"
namespace leveldb {
static const int kDelayMicros = 100000;
-static const int kReadOnlyFileLimit = 4;
-static const int kMMapLimit = 4;
class EnvTest {
- private:
- port::Mutex mu_;
- std::string events_;
-
public:
+ EnvTest() : env_(Env::Default()) {}
+
Env* env_;
- EnvTest() : env_(Env::Default()) { }
};
-static void SetBool(void* ptr) {
- reinterpret_cast<port::AtomicPointer*>(ptr)->NoBarrier_Store(ptr);
+TEST(EnvTest, ReadWrite) {
+ Random rnd(test::RandomSeed());
+
+ // Get file to use for testing.
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+ std::string test_file_name = test_dir + "/open_on_read.txt";
+ WritableFile* writable_file;
+ ASSERT_OK(env_->NewWritableFile(test_file_name, &writable_file));
+
+ // Fill a file with data generated via a sequence of randomly sized writes.
+ static const size_t kDataSize = 10 * 1048576;
+ std::string data;
+ while (data.size() < kDataSize) {
+ int len = rnd.Skewed(18); // Up to 2^18 - 1, but typically much smaller
+ std::string r;
+ test::RandomString(&rnd, len, &r);
+ ASSERT_OK(writable_file->Append(r));
+ data += r;
+ if (rnd.OneIn(10)) {
+ ASSERT_OK(writable_file->Flush());
+ }
+ }
+ ASSERT_OK(writable_file->Sync());
+ ASSERT_OK(writable_file->Close());
+ delete writable_file;
+
+ // Read all data using a sequence of randomly sized reads.
+ SequentialFile* sequential_file;
+ ASSERT_OK(env_->NewSequentialFile(test_file_name, &sequential_file));
+ std::string read_result;
+ std::string scratch;
+ while (read_result.size() < data.size()) {
+ int len = std::min<int>(rnd.Skewed(18), data.size() - read_result.size());
+ scratch.resize(std::max(len, 1)); // at least 1 so &scratch[0] is legal
+ Slice read;
+ ASSERT_OK(sequential_file->Read(len, &read, &scratch[0]));
+ if (len > 0) {
+ ASSERT_GT(read.size(), 0);
+ }
+ ASSERT_LE(read.size(), len);
+ read_result.append(read.data(), read.size());
+ }
+ ASSERT_EQ(read_result, data);
+ delete sequential_file;
}
TEST(EnvTest, RunImmediately) {
- port::AtomicPointer called (NULL);
- env_->Schedule(&SetBool, &called);
- env_->SleepForMicroseconds(kDelayMicros);
- ASSERT_TRUE(called.NoBarrier_Load() != NULL);
+ struct RunState {
+ port::Mutex mu;
+ port::CondVar cvar{&mu};
+ bool called = false;
+
+ static void Run(void* arg) {
+ RunState* state = reinterpret_cast<RunState*>(arg);
+ MutexLock l(&state->mu);
+ ASSERT_EQ(state->called, false);
+ state->called = true;
+ state->cvar.Signal();
+ }
+ };
+
+ RunState state;
+ env_->Schedule(&RunState::Run, &state);
+
+ MutexLock l(&state.mu);
+ while (!state.called) {
+ state.cvar.Wait();
+ }
}
TEST(EnvTest, RunMany) {
- port::AtomicPointer last_id (NULL);
+ struct RunState {
+ port::Mutex mu;
+ port::CondVar cvar{&mu};
+ int last_id = 0;
+ };
+
+ struct Callback {
+ RunState* state_; // Pointer to shared state.
+ const int id_; // Order# for the execution of this callback.
- struct CB {
- port::AtomicPointer* last_id_ptr; // Pointer to shared slot
- uintptr_t id; // Order# for the execution of this callback
+ Callback(RunState* s, int id) : state_(s), id_(id) {}
- CB(port::AtomicPointer* p, int i) : last_id_ptr(p), id(i) { }
+ static void Run(void* arg) {
+ Callback* callback = reinterpret_cast<Callback*>(arg);
+ RunState* state = callback->state_;
- static void Run(void* v) {
- CB* cb = reinterpret_cast<CB*>(v);
- void* cur = cb->last_id_ptr->NoBarrier_Load();
- ASSERT_EQ(cb->id-1, reinterpret_cast<uintptr_t>(cur));
- cb->last_id_ptr->Release_Store(reinterpret_cast<void*>(cb->id));
+ MutexLock l(&state->mu);
+ ASSERT_EQ(state->last_id, callback->id_ - 1);
+ state->last_id = callback->id_;
+ state->cvar.Signal();
}
};
- // Schedule in different order than start time
- CB cb1(&last_id, 1);
- CB cb2(&last_id, 2);
- CB cb3(&last_id, 3);
- CB cb4(&last_id, 4);
- env_->Schedule(&CB::Run, &cb1);
- env_->Schedule(&CB::Run, &cb2);
- env_->Schedule(&CB::Run, &cb3);
- env_->Schedule(&CB::Run, &cb4);
-
- env_->SleepForMicroseconds(kDelayMicros);
- void* cur = last_id.Acquire_Load();
- ASSERT_EQ(4, reinterpret_cast<uintptr_t>(cur));
+ RunState state;
+ Callback callback1(&state, 1);
+ Callback callback2(&state, 2);
+ Callback callback3(&state, 3);
+ Callback callback4(&state, 4);
+ env_->Schedule(&Callback::Run, &callback1);
+ env_->Schedule(&Callback::Run, &callback2);
+ env_->Schedule(&Callback::Run, &callback3);
+ env_->Schedule(&Callback::Run, &callback4);
+
+ MutexLock l(&state.mu);
+ while (state.last_id != 4) {
+ state.cvar.Wait();
+ }
}
struct State {
port::Mutex mu;
- int val;
- int num_running;
+ port::CondVar cvar{&mu};
+
+ int val GUARDED_BY(mu);
+ int num_running GUARDED_BY(mu);
+
+ State(int val, int num_running) : val(val), num_running(num_running) {}
};
static void ThreadBody(void* arg) {
@@ -77,30 +149,89 @@ static void ThreadBody(void* arg) {
s->mu.Lock();
s->val += 1;
s->num_running -= 1;
+ s->cvar.Signal();
s->mu.Unlock();
}
TEST(EnvTest, StartThread) {
- State state;
- state.val = 0;
- state.num_running = 3;
+ State state(0, 3);
for (int i = 0; i < 3; i++) {
env_->StartThread(&ThreadBody, &state);
}
- while (true) {
- state.mu.Lock();
- int num = state.num_running;
- state.mu.Unlock();
- if (num == 0) {
- break;
- }
- env_->SleepForMicroseconds(kDelayMicros);
+
+ MutexLock l(&state.mu);
+ while (state.num_running != 0) {
+ state.cvar.Wait();
}
ASSERT_EQ(state.val, 3);
}
-} // namespace leveldb
+TEST(EnvTest, TestOpenNonExistentFile) {
+ // Write some test data to a single file that will be opened |n| times.
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+
+ std::string non_existent_file = test_dir + "/non_existent_file";
+ ASSERT_TRUE(!env_->FileExists(non_existent_file));
+
+ RandomAccessFile* random_access_file;
+ Status status =
+ env_->NewRandomAccessFile(non_existent_file, &random_access_file);
+ ASSERT_TRUE(status.IsNotFound());
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
+ SequentialFile* sequential_file;
+ status = env_->NewSequentialFile(non_existent_file, &sequential_file);
+ ASSERT_TRUE(status.IsNotFound());
}
+
+TEST(EnvTest, ReopenWritableFile) {
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+ std::string test_file_name = test_dir + "/reopen_writable_file.txt";
+ env_->DeleteFile(test_file_name);
+
+ WritableFile* writable_file;
+ ASSERT_OK(env_->NewWritableFile(test_file_name, &writable_file));
+ std::string data("hello world!");
+ ASSERT_OK(writable_file->Append(data));
+ ASSERT_OK(writable_file->Close());
+ delete writable_file;
+
+ ASSERT_OK(env_->NewWritableFile(test_file_name, &writable_file));
+ data = "42";
+ ASSERT_OK(writable_file->Append(data));
+ ASSERT_OK(writable_file->Close());
+ delete writable_file;
+
+ ASSERT_OK(ReadFileToString(env_, test_file_name, &data));
+ ASSERT_EQ(std::string("42"), data);
+ env_->DeleteFile(test_file_name);
+}
+
+TEST(EnvTest, ReopenAppendableFile) {
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+ std::string test_file_name = test_dir + "/reopen_appendable_file.txt";
+ env_->DeleteFile(test_file_name);
+
+ WritableFile* appendable_file;
+ ASSERT_OK(env_->NewAppendableFile(test_file_name, &appendable_file));
+ std::string data("hello world!");
+ ASSERT_OK(appendable_file->Append(data));
+ ASSERT_OK(appendable_file->Close());
+ delete appendable_file;
+
+ ASSERT_OK(env_->NewAppendableFile(test_file_name, &appendable_file));
+ data = "42";
+ ASSERT_OK(appendable_file->Append(data));
+ ASSERT_OK(appendable_file->Close());
+ delete appendable_file;
+
+ ASSERT_OK(ReadFileToString(env_, test_file_name, &data));
+ ASSERT_EQ(std::string("hello world!42"), data);
+ env_->DeleteFile(test_file_name);
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/util/env_win.cc b/src/leveldb/util/env_win.cc
deleted file mode 100644
index 830332abe9..0000000000
--- a/src/leveldb/util/env_win.cc
+++ /dev/null
@@ -1,902 +0,0 @@
-// This file contains source that originates from:
-// http://code.google.com/p/leveldbwin/source/browse/trunk/win32_impl_src/env_win32.h
-// http://code.google.com/p/leveldbwin/source/browse/trunk/win32_impl_src/port_win32.cc
-// Those files don't have any explicit license headers but the
-// project (http://code.google.com/p/leveldbwin/) lists the 'New BSD License'
-// as the license.
-#if defined(LEVELDB_PLATFORM_WINDOWS)
-#include <map>
-
-
-#include "leveldb/env.h"
-
-#include "port/port.h"
-#include "leveldb/slice.h"
-#include "util/logging.h"
-
-#include <shlwapi.h>
-#include <process.h>
-#include <cstring>
-#include <stdio.h>
-#include <errno.h>
-#include <io.h>
-#include <algorithm>
-
-#ifdef max
-#undef max
-#endif
-
-#ifndef va_copy
-#define va_copy(d,s) ((d) = (s))
-#endif
-
-#if defined DeleteFile
-#undef DeleteFile
-#endif
-
-//Declarations
-namespace leveldb
-{
-
-namespace Win32
-{
-
-#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
- TypeName(const TypeName&); \
- void operator=(const TypeName&)
-
-std::string GetCurrentDir();
-std::wstring GetCurrentDirW();
-
-static const std::string CurrentDir = GetCurrentDir();
-static const std::wstring CurrentDirW = GetCurrentDirW();
-
-std::string& ModifyPath(std::string& path);
-std::wstring& ModifyPath(std::wstring& path);
-
-std::string GetLastErrSz();
-std::wstring GetLastErrSzW();
-
-size_t GetPageSize();
-
-typedef void (*ScheduleProc)(void*) ;
-
-struct WorkItemWrapper
-{
- WorkItemWrapper(ScheduleProc proc_,void* content_);
- ScheduleProc proc;
- void* pContent;
-};
-
-DWORD WINAPI WorkItemWrapperProc(LPVOID pContent);
-
-class Win32SequentialFile : public SequentialFile
-{
-public:
- friend class Win32Env;
- virtual ~Win32SequentialFile();
- virtual Status Read(size_t n, Slice* result, char* scratch);
- virtual Status Skip(uint64_t n);
- BOOL isEnable();
- virtual std::string GetName() const { return _filename; }
-private:
- BOOL _Init();
- void _CleanUp();
- Win32SequentialFile(const std::string& fname);
- std::string _filename;
- ::HANDLE _hFile;
- DISALLOW_COPY_AND_ASSIGN(Win32SequentialFile);
-};
-
-class Win32RandomAccessFile : public RandomAccessFile
-{
-public:
- friend class Win32Env;
- virtual ~Win32RandomAccessFile();
- virtual Status Read(uint64_t offset, size_t n, Slice* result,char* scratch) const;
- BOOL isEnable();
- virtual std::string GetName() const { return _filename; }
-private:
- BOOL _Init(LPCWSTR path);
- void _CleanUp();
- Win32RandomAccessFile(const std::string& fname);
- HANDLE _hFile;
- const std::string _filename;
- DISALLOW_COPY_AND_ASSIGN(Win32RandomAccessFile);
-};
-
-class Win32WritableFile : public WritableFile
-{
-public:
- Win32WritableFile(const std::string& fname, bool append);
- ~Win32WritableFile();
-
- virtual Status Append(const Slice& data);
- virtual Status Close();
- virtual Status Flush();
- virtual Status Sync();
- BOOL isEnable();
- virtual std::string GetName() const { return filename_; }
-private:
- std::string filename_;
- ::HANDLE _hFile;
-};
-
-class Win32FileLock : public FileLock
-{
-public:
- friend class Win32Env;
- virtual ~Win32FileLock();
- BOOL isEnable();
-private:
- BOOL _Init(LPCWSTR path);
- void _CleanUp();
- Win32FileLock(const std::string& fname);
- HANDLE _hFile;
- std::string _filename;
- DISALLOW_COPY_AND_ASSIGN(Win32FileLock);
-};
-
-class Win32Logger : public Logger
-{
-public:
- friend class Win32Env;
- virtual ~Win32Logger();
- virtual void Logv(const char* format, va_list ap);
-private:
- explicit Win32Logger(WritableFile* pFile);
- WritableFile* _pFileProxy;
- DISALLOW_COPY_AND_ASSIGN(Win32Logger);
-};
-
-class Win32Env : public Env
-{
-public:
- Win32Env();
- virtual ~Win32Env();
- virtual Status NewSequentialFile(const std::string& fname,
- SequentialFile** result);
-
- virtual Status NewRandomAccessFile(const std::string& fname,
- RandomAccessFile** result);
- virtual Status NewWritableFile(const std::string& fname,
- WritableFile** result);
- virtual Status NewAppendableFile(const std::string& fname,
- WritableFile** result);
-
- virtual bool FileExists(const std::string& fname);
-
- virtual Status GetChildren(const std::string& dir,
- std::vector<std::string>* result);
-
- virtual Status DeleteFile(const std::string& fname);
-
- virtual Status CreateDir(const std::string& dirname);
-
- virtual Status DeleteDir(const std::string& dirname);
-
- virtual Status GetFileSize(const std::string& fname, uint64_t* file_size);
-
- virtual Status RenameFile(const std::string& src,
- const std::string& target);
-
- virtual Status LockFile(const std::string& fname, FileLock** lock);
-
- virtual Status UnlockFile(FileLock* lock);
-
- virtual void Schedule(
- void (*function)(void* arg),
- void* arg);
-
- virtual void StartThread(void (*function)(void* arg), void* arg);
-
- virtual Status GetTestDirectory(std::string* path);
-
- //virtual void Logv(WritableFile* log, const char* format, va_list ap);
-
- virtual Status NewLogger(const std::string& fname, Logger** result);
-
- virtual uint64_t NowMicros();
-
- virtual void SleepForMicroseconds(int micros);
-};
-
-void ToWidePath(const std::string& value, std::wstring& target) {
- wchar_t buffer[MAX_PATH];
- MultiByteToWideChar(CP_UTF8, 0, value.c_str(), -1, buffer, MAX_PATH);
- target = buffer;
-}
-
-void ToNarrowPath(const std::wstring& value, std::string& target) {
- char buffer[MAX_PATH];
- WideCharToMultiByte(CP_UTF8, 0, value.c_str(), -1, buffer, MAX_PATH, NULL, NULL);
- target = buffer;
-}
-
-std::wstring GetCurrentDirW()
-{
- WCHAR path[MAX_PATH];
- ::GetModuleFileNameW(::GetModuleHandleW(NULL),path,MAX_PATH);
- *wcsrchr(path,L'\\') = 0;
- return std::wstring(path);
-}
-
-std::string GetCurrentDir()
-{
- std::string path;
- ToNarrowPath(GetCurrentDirW(), path);
- return path;
-}
-
-std::string& ModifyPath(std::string& path)
-{
- if(path[0] == '/' || path[0] == '\\'){
- path = CurrentDir + path;
- }
- std::replace(path.begin(),path.end(),'/','\\');
-
- return path;
-}
-
-std::wstring& ModifyPath(std::wstring& path)
-{
- if(path[0] == L'/' || path[0] == L'\\'){
- path = CurrentDirW + path;
- }
- std::replace(path.begin(),path.end(),L'/',L'\\');
- return path;
-}
-
-std::string GetLastErrSz()
-{
- LPWSTR lpMsgBuf;
- FormatMessageW(
- FORMAT_MESSAGE_ALLOCATE_BUFFER |
- FORMAT_MESSAGE_FROM_SYSTEM |
- FORMAT_MESSAGE_IGNORE_INSERTS,
- NULL,
- GetLastError(),
- 0, // Default language
- (LPWSTR) &lpMsgBuf,
- 0,
- NULL
- );
- std::string Err;
- ToNarrowPath(lpMsgBuf, Err);
- LocalFree( lpMsgBuf );
- return Err;
-}
-
-std::wstring GetLastErrSzW()
-{
- LPVOID lpMsgBuf;
- FormatMessageW(
- FORMAT_MESSAGE_ALLOCATE_BUFFER |
- FORMAT_MESSAGE_FROM_SYSTEM |
- FORMAT_MESSAGE_IGNORE_INSERTS,
- NULL,
- GetLastError(),
- 0, // Default language
- (LPWSTR) &lpMsgBuf,
- 0,
- NULL
- );
- std::wstring Err = (LPCWSTR)lpMsgBuf;
- LocalFree(lpMsgBuf);
- return Err;
-}
-
-WorkItemWrapper::WorkItemWrapper( ScheduleProc proc_,void* content_ ) :
- proc(proc_),pContent(content_)
-{
-
-}
-
-DWORD WINAPI WorkItemWrapperProc(LPVOID pContent)
-{
- WorkItemWrapper* item = static_cast<WorkItemWrapper*>(pContent);
- ScheduleProc TempProc = item->proc;
- void* arg = item->pContent;
- delete item;
- TempProc(arg);
- return 0;
-}
-
-size_t GetPageSize()
-{
- SYSTEM_INFO si;
- GetSystemInfo(&si);
- return std::max(si.dwPageSize,si.dwAllocationGranularity);
-}
-
-const size_t g_PageSize = GetPageSize();
-
-
-Win32SequentialFile::Win32SequentialFile( const std::string& fname ) :
- _filename(fname),_hFile(NULL)
-{
- _Init();
-}
-
-Win32SequentialFile::~Win32SequentialFile()
-{
- _CleanUp();
-}
-
-Status Win32SequentialFile::Read( size_t n, Slice* result, char* scratch )
-{
- Status sRet;
- DWORD hasRead = 0;
- if(_hFile && ReadFile(_hFile,scratch,n,&hasRead,NULL) ){
- *result = Slice(scratch,hasRead);
- } else {
- sRet = Status::IOError(_filename, Win32::GetLastErrSz() );
- }
- return sRet;
-}
-
-Status Win32SequentialFile::Skip( uint64_t n )
-{
- Status sRet;
- LARGE_INTEGER Move,NowPointer;
- Move.QuadPart = n;
- if(!SetFilePointerEx(_hFile,Move,&NowPointer,FILE_CURRENT)){
- sRet = Status::IOError(_filename,Win32::GetLastErrSz());
- }
- return sRet;
-}
-
-BOOL Win32SequentialFile::isEnable()
-{
- return _hFile ? TRUE : FALSE;
-}
-
-BOOL Win32SequentialFile::_Init()
-{
- std::wstring path;
- ToWidePath(_filename, path);
- _hFile = CreateFileW(path.c_str(),
- GENERIC_READ,
- FILE_SHARE_READ | FILE_SHARE_WRITE,
- NULL,
- OPEN_EXISTING,
- FILE_ATTRIBUTE_NORMAL | FILE_FLAG_SEQUENTIAL_SCAN,
- NULL);
- if (_hFile == INVALID_HANDLE_VALUE)
- _hFile = NULL;
- return _hFile ? TRUE : FALSE;
-}
-
-void Win32SequentialFile::_CleanUp()
-{
- if(_hFile){
- CloseHandle(_hFile);
- _hFile = NULL;
- }
-}
-
-Win32RandomAccessFile::Win32RandomAccessFile( const std::string& fname ) :
- _filename(fname),_hFile(NULL)
-{
- std::wstring path;
- ToWidePath(fname, path);
- _Init( path.c_str() );
-}
-
-Win32RandomAccessFile::~Win32RandomAccessFile()
-{
- _CleanUp();
-}
-
-Status Win32RandomAccessFile::Read(uint64_t offset,size_t n,Slice* result,char* scratch) const
-{
- Status sRet;
- OVERLAPPED ol = {0};
- ZeroMemory(&ol,sizeof(ol));
- ol.Offset = (DWORD)offset;
- ol.OffsetHigh = (DWORD)(offset >> 32);
- DWORD hasRead = 0;
- if(!ReadFile(_hFile,scratch,n,&hasRead,&ol))
- sRet = Status::IOError(_filename,Win32::GetLastErrSz());
- else
- *result = Slice(scratch,hasRead);
- return sRet;
-}
-
-BOOL Win32RandomAccessFile::_Init( LPCWSTR path )
-{
- BOOL bRet = FALSE;
- if(!_hFile)
- _hFile = ::CreateFileW(path,GENERIC_READ,FILE_SHARE_READ|FILE_SHARE_WRITE,NULL,OPEN_EXISTING,
- FILE_ATTRIBUTE_NORMAL | FILE_FLAG_RANDOM_ACCESS,NULL);
- if(!_hFile || _hFile == INVALID_HANDLE_VALUE )
- _hFile = NULL;
- else
- bRet = TRUE;
- return bRet;
-}
-
-BOOL Win32RandomAccessFile::isEnable()
-{
- return _hFile ? TRUE : FALSE;
-}
-
-void Win32RandomAccessFile::_CleanUp()
-{
- if(_hFile){
- ::CloseHandle(_hFile);
- _hFile = NULL;
- }
-}
-
-Win32WritableFile::Win32WritableFile(const std::string& fname, bool append)
- : filename_(fname)
-{
- std::wstring path;
- ToWidePath(fname, path);
- // NewAppendableFile: append to an existing file, or create a new one
- // if none exists - this is OPEN_ALWAYS behavior, with
- // FILE_APPEND_DATA to avoid having to manually position the file
- // pointer at the end of the file.
- // NewWritableFile: create a new file, delete if it exists - this is
- // CREATE_ALWAYS behavior. This file is used for writing only so
- // use GENERIC_WRITE.
- _hFile = CreateFileW(path.c_str(),
- append ? FILE_APPEND_DATA : GENERIC_WRITE,
- FILE_SHARE_READ|FILE_SHARE_DELETE|FILE_SHARE_WRITE,
- NULL,
- append ? OPEN_ALWAYS : CREATE_ALWAYS,
- FILE_ATTRIBUTE_NORMAL,
- NULL);
- // CreateFileW returns INVALID_HANDLE_VALUE in case of error, always check isEnable() before use
-}
-
-Win32WritableFile::~Win32WritableFile()
-{
- if (_hFile != INVALID_HANDLE_VALUE)
- Close();
-}
-
-Status Win32WritableFile::Append(const Slice& data)
-{
- DWORD r = 0;
- if (!WriteFile(_hFile, data.data(), data.size(), &r, NULL) || r != data.size()) {
- return Status::IOError("Win32WritableFile.Append::WriteFile: "+filename_, Win32::GetLastErrSz());
- }
- return Status::OK();
-}
-
-Status Win32WritableFile::Close()
-{
- if (!CloseHandle(_hFile)) {
- return Status::IOError("Win32WritableFile.Close::CloseHandle: "+filename_, Win32::GetLastErrSz());
- }
- _hFile = INVALID_HANDLE_VALUE;
- return Status::OK();
-}
-
-Status Win32WritableFile::Flush()
-{
- // Nothing to do here, there are no application-side buffers
- return Status::OK();
-}
-
-Status Win32WritableFile::Sync()
-{
- if (!FlushFileBuffers(_hFile)) {
- return Status::IOError("Win32WritableFile.Sync::FlushFileBuffers "+filename_, Win32::GetLastErrSz());
- }
- return Status::OK();
-}
-
-BOOL Win32WritableFile::isEnable()
-{
- return _hFile != INVALID_HANDLE_VALUE;
-}
-
-Win32FileLock::Win32FileLock( const std::string& fname ) :
- _hFile(NULL),_filename(fname)
-{
- std::wstring path;
- ToWidePath(fname, path);
- _Init(path.c_str());
-}
-
-Win32FileLock::~Win32FileLock()
-{
- _CleanUp();
-}
-
-BOOL Win32FileLock::_Init( LPCWSTR path )
-{
- BOOL bRet = FALSE;
- if(!_hFile)
- _hFile = ::CreateFileW(path,0,0,NULL,CREATE_ALWAYS,FILE_ATTRIBUTE_NORMAL,NULL);
- if(!_hFile || _hFile == INVALID_HANDLE_VALUE ){
- _hFile = NULL;
- }
- else
- bRet = TRUE;
- return bRet;
-}
-
-void Win32FileLock::_CleanUp()
-{
- ::CloseHandle(_hFile);
- _hFile = NULL;
-}
-
-BOOL Win32FileLock::isEnable()
-{
- return _hFile ? TRUE : FALSE;
-}
-
-Win32Logger::Win32Logger(WritableFile* pFile) : _pFileProxy(pFile)
-{
- assert(_pFileProxy);
-}
-
-Win32Logger::~Win32Logger()
-{
- if(_pFileProxy)
- delete _pFileProxy;
-}
-
-void Win32Logger::Logv( const char* format, va_list ap )
-{
- uint64_t thread_id = ::GetCurrentThreadId();
-
- // We try twice: the first time with a fixed-size stack allocated buffer,
- // and the second time with a much larger dynamically allocated buffer.
- char buffer[500];
- for (int iter = 0; iter < 2; iter++) {
- char* base;
- int bufsize;
- if (iter == 0) {
- bufsize = sizeof(buffer);
- base = buffer;
- } else {
- bufsize = 30000;
- base = new char[bufsize];
- }
- char* p = base;
- char* limit = base + bufsize;
-
- SYSTEMTIME st;
- GetLocalTime(&st);
- p += snprintf(p, limit - p,
- "%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ",
- int(st.wYear),
- int(st.wMonth),
- int(st.wDay),
- int(st.wHour),
- int(st.wMinute),
- int(st.wMinute),
- int(st.wMilliseconds),
- static_cast<long long unsigned int>(thread_id));
-
- // Print the message
- if (p < limit) {
- va_list backup_ap;
- va_copy(backup_ap, ap);
- p += vsnprintf(p, limit - p, format, backup_ap);
- va_end(backup_ap);
- }
-
- // Truncate to available space if necessary
- if (p >= limit) {
- if (iter == 0) {
- continue; // Try again with larger buffer
- } else {
- p = limit - 1;
- }
- }
-
- // Add newline if necessary
- if (p == base || p[-1] != '\n') {
- *p++ = '\n';
- }
-
- assert(p <= limit);
- DWORD hasWritten = 0;
- if(_pFileProxy){
- _pFileProxy->Append(Slice(base, p - base));
- _pFileProxy->Flush();
- }
- if (base != buffer) {
- delete[] base;
- }
- break;
- }
-}
-
-bool Win32Env::FileExists(const std::string& fname)
-{
- std::string path = fname;
- std::wstring wpath;
- ToWidePath(ModifyPath(path), wpath);
- return ::PathFileExistsW(wpath.c_str()) ? true : false;
-}
-
-Status Win32Env::GetChildren(const std::string& dir, std::vector<std::string>* result)
-{
- Status sRet;
- ::WIN32_FIND_DATAW wfd;
- std::string path = dir;
- ModifyPath(path);
- path += "\\*.*";
- std::wstring wpath;
- ToWidePath(path, wpath);
-
- ::HANDLE hFind = ::FindFirstFileW(wpath.c_str() ,&wfd);
- if(hFind && hFind != INVALID_HANDLE_VALUE){
- BOOL hasNext = TRUE;
- std::string child;
- while(hasNext){
- ToNarrowPath(wfd.cFileName, child);
- if(child != ".." && child != ".") {
- result->push_back(child);
- }
- hasNext = ::FindNextFileW(hFind,&wfd);
- }
- ::FindClose(hFind);
- }
- else
- sRet = Status::IOError(dir,"Could not get children.");
- return sRet;
-}
-
-void Win32Env::SleepForMicroseconds( int micros )
-{
- ::Sleep((micros + 999) /1000);
-}
-
-
-Status Win32Env::DeleteFile( const std::string& fname )
-{
- Status sRet;
- std::string path = fname;
- std::wstring wpath;
- ToWidePath(ModifyPath(path), wpath);
-
- if(!::DeleteFileW(wpath.c_str())) {
- sRet = Status::IOError(path, "Could not delete file.");
- }
- return sRet;
-}
-
-Status Win32Env::GetFileSize( const std::string& fname, uint64_t* file_size )
-{
- Status sRet;
- std::string path = fname;
- std::wstring wpath;
- ToWidePath(ModifyPath(path), wpath);
-
- HANDLE file = ::CreateFileW(wpath.c_str(),
- GENERIC_READ,FILE_SHARE_READ|FILE_SHARE_WRITE,NULL,OPEN_EXISTING,FILE_ATTRIBUTE_NORMAL,NULL);
- LARGE_INTEGER li;
- if(::GetFileSizeEx(file,&li)){
- *file_size = (uint64_t)li.QuadPart;
- }else
- sRet = Status::IOError(path,"Could not get the file size.");
- CloseHandle(file);
- return sRet;
-}
-
-Status Win32Env::RenameFile( const std::string& src, const std::string& target )
-{
- Status sRet;
- std::string src_path = src;
- std::wstring wsrc_path;
- ToWidePath(ModifyPath(src_path), wsrc_path);
- std::string target_path = target;
- std::wstring wtarget_path;
- ToWidePath(ModifyPath(target_path), wtarget_path);
-
- if(!MoveFileW(wsrc_path.c_str(), wtarget_path.c_str() ) ){
- DWORD err = GetLastError();
- if(err == 0x000000b7){
- if(!::DeleteFileW(wtarget_path.c_str() ) )
- sRet = Status::IOError(src, "Could not rename file.");
- else if(!::MoveFileW(wsrc_path.c_str(),
- wtarget_path.c_str() ) )
- sRet = Status::IOError(src, "Could not rename file.");
- }
- }
- return sRet;
-}
-
-Status Win32Env::LockFile( const std::string& fname, FileLock** lock )
-{
- Status sRet;
- std::string path = fname;
- ModifyPath(path);
- Win32FileLock* _lock = new Win32FileLock(path);
- if(!_lock->isEnable()){
- delete _lock;
- *lock = NULL;
- sRet = Status::IOError(path, "Could not lock file.");
- }
- else
- *lock = _lock;
- return sRet;
-}
-
-Status Win32Env::UnlockFile( FileLock* lock )
-{
- Status sRet;
- delete lock;
- return sRet;
-}
-
-void Win32Env::Schedule( void (*function)(void* arg), void* arg )
-{
- QueueUserWorkItem(Win32::WorkItemWrapperProc,
- new Win32::WorkItemWrapper(function,arg),
- WT_EXECUTEDEFAULT);
-}
-
-void Win32Env::StartThread( void (*function)(void* arg), void* arg )
-{
- ::_beginthread(function,0,arg);
-}
-
-Status Win32Env::GetTestDirectory( std::string* path )
-{
- Status sRet;
- WCHAR TempPath[MAX_PATH];
- ::GetTempPathW(MAX_PATH,TempPath);
- ToNarrowPath(TempPath, *path);
- path->append("leveldb\\test\\");
- ModifyPath(*path);
- return sRet;
-}
-
-uint64_t Win32Env::NowMicros()
-{
-#ifndef USE_VISTA_API
-#define GetTickCount64 GetTickCount
-#endif
- return (uint64_t)(GetTickCount64()*1000);
-}
-
-static Status CreateDirInner( const std::string& dirname )
-{
- Status sRet;
- std::wstring dirnameW;
- ToWidePath(dirname, dirnameW);
- DWORD attr = ::GetFileAttributesW(dirnameW.c_str());
- if (attr == INVALID_FILE_ATTRIBUTES) { // doesn't exist:
- std::size_t slash = dirname.find_last_of("\\");
- if (slash != std::string::npos){
- sRet = CreateDirInner(dirname.substr(0, slash));
- if (!sRet.ok()) return sRet;
- }
- BOOL result = ::CreateDirectoryW(dirnameW.c_str(), NULL);
- if (result == FALSE) {
- sRet = Status::IOError(dirname, "Could not create directory.");
- return sRet;
- }
- }
- return sRet;
-}
-
-Status Win32Env::CreateDir( const std::string& dirname )
-{
- std::string path = dirname;
- if(path[path.length() - 1] != '\\'){
- path += '\\';
- }
- ModifyPath(path);
-
- return CreateDirInner(path);
-}
-
-Status Win32Env::DeleteDir( const std::string& dirname )
-{
- Status sRet;
- std::wstring path;
- ToWidePath(dirname, path);
- ModifyPath(path);
- if(!::RemoveDirectoryW( path.c_str() ) ){
- sRet = Status::IOError(dirname, "Could not delete directory.");
- }
- return sRet;
-}
-
-Status Win32Env::NewSequentialFile( const std::string& fname, SequentialFile** result )
-{
- Status sRet;
- std::string path = fname;
- ModifyPath(path);
- Win32SequentialFile* pFile = new Win32SequentialFile(path);
- if(pFile->isEnable()){
- *result = pFile;
- }else {
- delete pFile;
- sRet = Status::IOError(path, Win32::GetLastErrSz());
- }
- return sRet;
-}
-
-Status Win32Env::NewRandomAccessFile( const std::string& fname, RandomAccessFile** result )
-{
- Status sRet;
- std::string path = fname;
- Win32RandomAccessFile* pFile = new Win32RandomAccessFile(ModifyPath(path));
- if(!pFile->isEnable()){
- delete pFile;
- *result = NULL;
- sRet = Status::IOError(path, Win32::GetLastErrSz());
- }else
- *result = pFile;
- return sRet;
-}
-
-Status Win32Env::NewLogger( const std::string& fname, Logger** result )
-{
- Status sRet;
- std::string path = fname;
- // Logs are opened with write semantics, not with append semantics
- // (see PosixEnv::NewLogger)
- Win32WritableFile* pMapFile = new Win32WritableFile(ModifyPath(path), false);
- if(!pMapFile->isEnable()){
- delete pMapFile;
- *result = NULL;
- sRet = Status::IOError(path,"could not create a logger.");
- }else
- *result = new Win32Logger(pMapFile);
- return sRet;
-}
-
-Status Win32Env::NewWritableFile( const std::string& fname, WritableFile** result )
-{
- Status sRet;
- std::string path = fname;
- Win32WritableFile* pFile = new Win32WritableFile(ModifyPath(path), false);
- if(!pFile->isEnable()){
- *result = NULL;
- sRet = Status::IOError(fname,Win32::GetLastErrSz());
- }else
- *result = pFile;
- return sRet;
-}
-
-Status Win32Env::NewAppendableFile( const std::string& fname, WritableFile** result )
-{
- Status sRet;
- std::string path = fname;
- Win32WritableFile* pFile = new Win32WritableFile(ModifyPath(path), true);
- if(!pFile->isEnable()){
- *result = NULL;
- sRet = Status::IOError(fname,Win32::GetLastErrSz());
- }else
- *result = pFile;
- return sRet;
-}
-
-Win32Env::Win32Env()
-{
-
-}
-
-Win32Env::~Win32Env()
-{
-
-}
-
-
-} // Win32 namespace
-
-static port::OnceType once = LEVELDB_ONCE_INIT;
-static Env* default_env;
-static void InitDefaultEnv() { default_env = new Win32::Win32Env(); }
-
-Env* Env::Default() {
- port::InitOnce(&once, InitDefaultEnv);
- return default_env;
-}
-
-} // namespace leveldb
-
-#endif // defined(LEVELDB_PLATFORM_WINDOWS)
diff --git a/src/leveldb/util/env_windows.cc b/src/leveldb/util/env_windows.cc
new file mode 100644
index 0000000000..1834206562
--- /dev/null
+++ b/src/leveldb/util/env_windows.cc
@@ -0,0 +1,849 @@
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+// Prevent Windows headers from defining min/max macros and instead
+// use STL.
+#ifndef NOMINMAX
+#define NOMINMAX
+#endif // ifndef NOMINMAX
+#include <windows.h>
+
+#include <algorithm>
+#include <atomic>
+#include <chrono>
+#include <condition_variable>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <memory>
+#include <mutex>
+#include <queue>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include "leveldb/env.h"
+#include "leveldb/slice.h"
+#include "port/port.h"
+#include "port/thread_annotations.h"
+#include "util/env_windows_test_helper.h"
+#include "util/logging.h"
+#include "util/mutexlock.h"
+#include "util/windows_logger.h"
+
+#if defined(DeleteFile)
+#undef DeleteFile
+#endif // defined(DeleteFile)
+
+namespace leveldb {
+
+namespace {
+
+constexpr const size_t kWritableFileBufferSize = 65536;
+
+// Up to 1000 mmaps for 64-bit binaries; none for 32-bit.
+constexpr int kDefaultMmapLimit = (sizeof(void*) >= 8) ? 1000 : 0;
+
+// Can be set by by EnvWindowsTestHelper::SetReadOnlyMMapLimit().
+int g_mmap_limit = kDefaultMmapLimit;
+
+std::string GetWindowsErrorMessage(DWORD error_code) {
+ std::string message;
+ char* error_text = nullptr;
+ // Use MBCS version of FormatMessage to match return value.
+ size_t error_text_size = ::FormatMessageA(
+ FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ nullptr, error_code, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ reinterpret_cast<char*>(&error_text), 0, nullptr);
+ if (!error_text) {
+ return message;
+ }
+ message.assign(error_text, error_text_size);
+ ::LocalFree(error_text);
+ return message;
+}
+
+Status WindowsError(const std::string& context, DWORD error_code) {
+ if (error_code == ERROR_FILE_NOT_FOUND || error_code == ERROR_PATH_NOT_FOUND)
+ return Status::NotFound(context, GetWindowsErrorMessage(error_code));
+ return Status::IOError(context, GetWindowsErrorMessage(error_code));
+}
+
+class ScopedHandle {
+ public:
+ ScopedHandle(HANDLE handle) : handle_(handle) {}
+ ScopedHandle(const ScopedHandle&) = delete;
+ ScopedHandle(ScopedHandle&& other) noexcept : handle_(other.Release()) {}
+ ~ScopedHandle() { Close(); }
+
+ ScopedHandle& operator=(const ScopedHandle&) = delete;
+
+ ScopedHandle& operator=(ScopedHandle&& rhs) noexcept {
+ if (this != &rhs) handle_ = rhs.Release();
+ return *this;
+ }
+
+ bool Close() {
+ if (!is_valid()) {
+ return true;
+ }
+ HANDLE h = handle_;
+ handle_ = INVALID_HANDLE_VALUE;
+ return ::CloseHandle(h);
+ }
+
+ bool is_valid() const {
+ return handle_ != INVALID_HANDLE_VALUE && handle_ != nullptr;
+ }
+
+ HANDLE get() const { return handle_; }
+
+ HANDLE Release() {
+ HANDLE h = handle_;
+ handle_ = INVALID_HANDLE_VALUE;
+ return h;
+ }
+
+ private:
+ HANDLE handle_;
+};
+
+// Helper class to limit resource usage to avoid exhaustion.
+// Currently used to limit read-only file descriptors and mmap file usage
+// so that we do not run out of file descriptors or virtual memory, or run into
+// kernel performance problems for very large databases.
+class Limiter {
+ public:
+ // Limit maximum number of resources to |max_acquires|.
+ Limiter(int max_acquires) : acquires_allowed_(max_acquires) {}
+
+ Limiter(const Limiter&) = delete;
+ Limiter operator=(const Limiter&) = delete;
+
+ // If another resource is available, acquire it and return true.
+ // Else return false.
+ bool Acquire() {
+ int old_acquires_allowed =
+ acquires_allowed_.fetch_sub(1, std::memory_order_relaxed);
+
+ if (old_acquires_allowed > 0) return true;
+
+ acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
+ return false;
+ }
+
+ // Release a resource acquired by a previous call to Acquire() that returned
+ // true.
+ void Release() { acquires_allowed_.fetch_add(1, std::memory_order_relaxed); }
+
+ private:
+ // The number of available resources.
+ //
+ // This is a counter and is not tied to the invariants of any other class, so
+ // it can be operated on safely using std::memory_order_relaxed.
+ std::atomic<int> acquires_allowed_;
+};
+
+class WindowsSequentialFile : public SequentialFile {
+ public:
+ WindowsSequentialFile(std::string filename, ScopedHandle handle)
+ : handle_(std::move(handle)), filename_(std::move(filename)) {}
+ ~WindowsSequentialFile() override {}
+
+ Status Read(size_t n, Slice* result, char* scratch) override {
+ DWORD bytes_read;
+ // DWORD is 32-bit, but size_t could technically be larger. However leveldb
+ // files are limited to leveldb::Options::max_file_size which is clamped to
+ // 1<<30 or 1 GiB.
+ assert(n <= std::numeric_limits<DWORD>::max());
+ if (!::ReadFile(handle_.get(), scratch, static_cast<DWORD>(n), &bytes_read,
+ nullptr)) {
+ return WindowsError(filename_, ::GetLastError());
+ }
+
+ *result = Slice(scratch, bytes_read);
+ return Status::OK();
+ }
+
+ Status Skip(uint64_t n) override {
+ LARGE_INTEGER distance;
+ distance.QuadPart = n;
+ if (!::SetFilePointerEx(handle_.get(), distance, nullptr, FILE_CURRENT)) {
+ return WindowsError(filename_, ::GetLastError());
+ }
+ return Status::OK();
+ }
+
+ std::string GetName() const override { return filename_; }
+
+ private:
+ const ScopedHandle handle_;
+ const std::string filename_;
+};
+
+class WindowsRandomAccessFile : public RandomAccessFile {
+ public:
+ WindowsRandomAccessFile(std::string filename, ScopedHandle handle)
+ : handle_(std::move(handle)), filename_(std::move(filename)) {}
+
+ ~WindowsRandomAccessFile() override = default;
+
+ Status Read(uint64_t offset, size_t n, Slice* result,
+ char* scratch) const override {
+ DWORD bytes_read = 0;
+ OVERLAPPED overlapped = {0};
+
+ overlapped.OffsetHigh = static_cast<DWORD>(offset >> 32);
+ overlapped.Offset = static_cast<DWORD>(offset);
+ if (!::ReadFile(handle_.get(), scratch, static_cast<DWORD>(n), &bytes_read,
+ &overlapped)) {
+ DWORD error_code = ::GetLastError();
+ if (error_code != ERROR_HANDLE_EOF) {
+ *result = Slice(scratch, 0);
+ return Status::IOError(filename_, GetWindowsErrorMessage(error_code));
+ }
+ }
+
+ *result = Slice(scratch, bytes_read);
+ return Status::OK();
+ }
+
+ std::string GetName() const override { return filename_; }
+
+ private:
+ const ScopedHandle handle_;
+ const std::string filename_;
+};
+
+class WindowsMmapReadableFile : public RandomAccessFile {
+ public:
+ // base[0,length-1] contains the mmapped contents of the file.
+ WindowsMmapReadableFile(std::string filename, char* mmap_base, size_t length,
+ Limiter* mmap_limiter)
+ : mmap_base_(mmap_base),
+ length_(length),
+ mmap_limiter_(mmap_limiter),
+ filename_(std::move(filename)) {}
+
+ ~WindowsMmapReadableFile() override {
+ ::UnmapViewOfFile(mmap_base_);
+ mmap_limiter_->Release();
+ }
+
+ Status Read(uint64_t offset, size_t n, Slice* result,
+ char* scratch) const override {
+ if (offset + n > length_) {
+ *result = Slice();
+ return WindowsError(filename_, ERROR_INVALID_PARAMETER);
+ }
+
+ *result = Slice(mmap_base_ + offset, n);
+ return Status::OK();
+ }
+
+ std::string GetName() const override { return filename_; }
+
+ private:
+ char* const mmap_base_;
+ const size_t length_;
+ Limiter* const mmap_limiter_;
+ const std::string filename_;
+};
+
+class WindowsWritableFile : public WritableFile {
+ public:
+ WindowsWritableFile(std::string filename, ScopedHandle handle)
+ : pos_(0), handle_(std::move(handle)), filename_(std::move(filename)) {}
+
+ ~WindowsWritableFile() override = default;
+
+ Status Append(const Slice& data) override {
+ size_t write_size = data.size();
+ const char* write_data = data.data();
+
+ // Fit as much as possible into buffer.
+ size_t copy_size = std::min(write_size, kWritableFileBufferSize - pos_);
+ std::memcpy(buf_ + pos_, write_data, copy_size);
+ write_data += copy_size;
+ write_size -= copy_size;
+ pos_ += copy_size;
+ if (write_size == 0) {
+ return Status::OK();
+ }
+
+ // Can't fit in buffer, so need to do at least one write.
+ Status status = FlushBuffer();
+ if (!status.ok()) {
+ return status;
+ }
+
+ // Small writes go to buffer, large writes are written directly.
+ if (write_size < kWritableFileBufferSize) {
+ std::memcpy(buf_, write_data, write_size);
+ pos_ = write_size;
+ return Status::OK();
+ }
+ return WriteUnbuffered(write_data, write_size);
+ }
+
+ Status Close() override {
+ Status status = FlushBuffer();
+ if (!handle_.Close() && status.ok()) {
+ status = WindowsError(filename_, ::GetLastError());
+ }
+ return status;
+ }
+
+ Status Flush() override { return FlushBuffer(); }
+
+ Status Sync() override {
+ // On Windows no need to sync parent directory. Its metadata will be updated
+ // via the creation of the new file, without an explicit sync.
+
+ Status status = FlushBuffer();
+ if (!status.ok()) {
+ return status;
+ }
+
+ if (!::FlushFileBuffers(handle_.get())) {
+ return Status::IOError(filename_,
+ GetWindowsErrorMessage(::GetLastError()));
+ }
+ return Status::OK();
+ }
+
+ std::string GetName() const override { return filename_; }
+
+ private:
+ Status FlushBuffer() {
+ Status status = WriteUnbuffered(buf_, pos_);
+ pos_ = 0;
+ return status;
+ }
+
+ Status WriteUnbuffered(const char* data, size_t size) {
+ DWORD bytes_written;
+ if (!::WriteFile(handle_.get(), data, static_cast<DWORD>(size),
+ &bytes_written, nullptr)) {
+ return Status::IOError(filename_,
+ GetWindowsErrorMessage(::GetLastError()));
+ }
+ return Status::OK();
+ }
+
+ // buf_[0, pos_-1] contains data to be written to handle_.
+ char buf_[kWritableFileBufferSize];
+ size_t pos_;
+
+ ScopedHandle handle_;
+ const std::string filename_;
+};
+
+// Lock or unlock the entire file as specified by |lock|. Returns true
+// when successful, false upon failure. Caller should call ::GetLastError()
+// to determine cause of failure
+bool LockOrUnlock(HANDLE handle, bool lock) {
+ if (lock) {
+ return ::LockFile(handle,
+ /*dwFileOffsetLow=*/0, /*dwFileOffsetHigh=*/0,
+ /*nNumberOfBytesToLockLow=*/MAXDWORD,
+ /*nNumberOfBytesToLockHigh=*/MAXDWORD);
+ } else {
+ return ::UnlockFile(handle,
+ /*dwFileOffsetLow=*/0, /*dwFileOffsetHigh=*/0,
+ /*nNumberOfBytesToLockLow=*/MAXDWORD,
+ /*nNumberOfBytesToLockHigh=*/MAXDWORD);
+ }
+}
+
+class WindowsFileLock : public FileLock {
+ public:
+ WindowsFileLock(ScopedHandle handle, std::string filename)
+ : handle_(std::move(handle)), filename_(std::move(filename)) {}
+
+ const ScopedHandle& handle() const { return handle_; }
+ const std::string& filename() const { return filename_; }
+
+ private:
+ const ScopedHandle handle_;
+ const std::string filename_;
+};
+
+class WindowsEnv : public Env {
+ public:
+ WindowsEnv();
+ ~WindowsEnv() override {
+ static const char msg[] =
+ "WindowsEnv singleton destroyed. Unsupported behavior!\n";
+ std::fwrite(msg, 1, sizeof(msg), stderr);
+ std::abort();
+ }
+
+ Status NewSequentialFile(const std::string& filename,
+ SequentialFile** result) override {
+ *result = nullptr;
+ DWORD desired_access = GENERIC_READ;
+ DWORD share_mode = FILE_SHARE_READ;
+ auto wFilename = toUtf16(filename);
+ ScopedHandle handle = ::CreateFileW(
+ wFilename.c_str(), desired_access, share_mode,
+ /*lpSecurityAttributes=*/nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL,
+ /*hTemplateFile=*/nullptr);
+ if (!handle.is_valid()) {
+ return WindowsError(filename, ::GetLastError());
+ }
+
+ *result = new WindowsSequentialFile(filename, std::move(handle));
+ return Status::OK();
+ }
+
+ Status NewRandomAccessFile(const std::string& filename,
+ RandomAccessFile** result) override {
+ *result = nullptr;
+ DWORD desired_access = GENERIC_READ;
+ DWORD share_mode = FILE_SHARE_READ;
+ auto wFilename = toUtf16(filename);
+ ScopedHandle handle =
+ ::CreateFileW(wFilename.c_str(), desired_access, share_mode,
+ /*lpSecurityAttributes=*/nullptr, OPEN_EXISTING,
+ FILE_ATTRIBUTE_READONLY,
+ /*hTemplateFile=*/nullptr);
+ if (!handle.is_valid()) {
+ return WindowsError(filename, ::GetLastError());
+ }
+ if (!mmap_limiter_.Acquire()) {
+ *result = new WindowsRandomAccessFile(filename, std::move(handle));
+ return Status::OK();
+ }
+
+ LARGE_INTEGER file_size;
+ Status status;
+ if (!::GetFileSizeEx(handle.get(), &file_size)) {
+ mmap_limiter_.Release();
+ return WindowsError(filename, ::GetLastError());
+ }
+
+ ScopedHandle mapping =
+ ::CreateFileMappingW(handle.get(),
+ /*security attributes=*/nullptr, PAGE_READONLY,
+ /*dwMaximumSizeHigh=*/0,
+ /*dwMaximumSizeLow=*/0,
+ /*lpName=*/nullptr);
+ if (mapping.is_valid()) {
+ void* mmap_base = ::MapViewOfFile(mapping.get(), FILE_MAP_READ,
+ /*dwFileOffsetHigh=*/0,
+ /*dwFileOffsetLow=*/0,
+ /*dwNumberOfBytesToMap=*/0);
+ if (mmap_base) {
+ *result = new WindowsMmapReadableFile(
+ filename, reinterpret_cast<char*>(mmap_base),
+ static_cast<size_t>(file_size.QuadPart), &mmap_limiter_);
+ return Status::OK();
+ }
+ }
+ mmap_limiter_.Release();
+ return WindowsError(filename, ::GetLastError());
+ }
+
+ Status NewWritableFile(const std::string& filename,
+ WritableFile** result) override {
+ DWORD desired_access = GENERIC_WRITE;
+ DWORD share_mode = 0; // Exclusive access.
+ auto wFilename = toUtf16(filename);
+ ScopedHandle handle = ::CreateFileW(
+ wFilename.c_str(), desired_access, share_mode,
+ /*lpSecurityAttributes=*/nullptr, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL,
+ /*hTemplateFile=*/nullptr);
+ if (!handle.is_valid()) {
+ *result = nullptr;
+ return WindowsError(filename, ::GetLastError());
+ }
+
+ *result = new WindowsWritableFile(filename, std::move(handle));
+ return Status::OK();
+ }
+
+ Status NewAppendableFile(const std::string& filename,
+ WritableFile** result) override {
+ DWORD desired_access = FILE_APPEND_DATA;
+ DWORD share_mode = 0; // Exclusive access.
+ auto wFilename = toUtf16(filename);
+ ScopedHandle handle = ::CreateFileW(
+ wFilename.c_str(), desired_access, share_mode,
+ /*lpSecurityAttributes=*/nullptr, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL,
+ /*hTemplateFile=*/nullptr);
+ if (!handle.is_valid()) {
+ *result = nullptr;
+ return WindowsError(filename, ::GetLastError());
+ }
+
+ *result = new WindowsWritableFile(filename, std::move(handle));
+ return Status::OK();
+ }
+
+ bool FileExists(const std::string& filename) override {
+ auto wFilename = toUtf16(filename);
+ return GetFileAttributesW(wFilename.c_str()) != INVALID_FILE_ATTRIBUTES;
+ }
+
+ Status GetChildren(const std::string& directory_path,
+ std::vector<std::string>* result) override {
+ const std::string find_pattern = directory_path + "\\*";
+ WIN32_FIND_DATAW find_data;
+ auto wFind_pattern = toUtf16(find_pattern);
+ HANDLE dir_handle = ::FindFirstFileW(wFind_pattern.c_str(), &find_data);
+ if (dir_handle == INVALID_HANDLE_VALUE) {
+ DWORD last_error = ::GetLastError();
+ if (last_error == ERROR_FILE_NOT_FOUND) {
+ return Status::OK();
+ }
+ return WindowsError(directory_path, last_error);
+ }
+ do {
+ char base_name[_MAX_FNAME];
+ char ext[_MAX_EXT];
+
+ auto find_data_filename = toUtf8(find_data.cFileName);
+ if (!_splitpath_s(find_data_filename.c_str(), nullptr, 0, nullptr, 0,
+ base_name, ARRAYSIZE(base_name), ext, ARRAYSIZE(ext))) {
+ result->emplace_back(std::string(base_name) + ext);
+ }
+ } while (::FindNextFileW(dir_handle, &find_data));
+ DWORD last_error = ::GetLastError();
+ ::FindClose(dir_handle);
+ if (last_error != ERROR_NO_MORE_FILES) {
+ return WindowsError(directory_path, last_error);
+ }
+ return Status::OK();
+ }
+
+ Status DeleteFile(const std::string& filename) override {
+ auto wFilename = toUtf16(filename);
+ if (!::DeleteFileW(wFilename.c_str())) {
+ return WindowsError(filename, ::GetLastError());
+ }
+ return Status::OK();
+ }
+
+ Status CreateDir(const std::string& dirname) override {
+ auto wDirname = toUtf16(dirname);
+ if (!::CreateDirectoryW(wDirname.c_str(), nullptr)) {
+ return WindowsError(dirname, ::GetLastError());
+ }
+ return Status::OK();
+ }
+
+ Status DeleteDir(const std::string& dirname) override {
+ auto wDirname = toUtf16(dirname);
+ if (!::RemoveDirectoryW(wDirname.c_str())) {
+ return WindowsError(dirname, ::GetLastError());
+ }
+ return Status::OK();
+ }
+
+ Status GetFileSize(const std::string& filename, uint64_t* size) override {
+ WIN32_FILE_ATTRIBUTE_DATA file_attributes;
+ auto wFilename = toUtf16(filename);
+ if (!::GetFileAttributesExW(wFilename.c_str(), GetFileExInfoStandard,
+ &file_attributes)) {
+ return WindowsError(filename, ::GetLastError());
+ }
+ ULARGE_INTEGER file_size;
+ file_size.HighPart = file_attributes.nFileSizeHigh;
+ file_size.LowPart = file_attributes.nFileSizeLow;
+ *size = file_size.QuadPart;
+ return Status::OK();
+ }
+
+ Status RenameFile(const std::string& from, const std::string& to) override {
+ // Try a simple move first. It will only succeed when |to| doesn't already
+ // exist.
+ auto wFrom = toUtf16(from);
+ auto wTo = toUtf16(to);
+ if (::MoveFileW(wFrom.c_str(), wTo.c_str())) {
+ return Status::OK();
+ }
+ DWORD move_error = ::GetLastError();
+
+ // Try the full-blown replace if the move fails, as ReplaceFile will only
+ // succeed when |to| does exist. When writing to a network share, we may not
+ // be able to change the ACLs. Ignore ACL errors then
+ // (REPLACEFILE_IGNORE_MERGE_ERRORS).
+ if (::ReplaceFileW(wTo.c_str(), wFrom.c_str(), /*lpBackupFileName=*/nullptr,
+ REPLACEFILE_IGNORE_MERGE_ERRORS,
+ /*lpExclude=*/nullptr, /*lpReserved=*/nullptr)) {
+ return Status::OK();
+ }
+ DWORD replace_error = ::GetLastError();
+ // In the case of FILE_ERROR_NOT_FOUND from ReplaceFile, it is likely that
+ // |to| does not exist. In this case, the more relevant error comes from the
+ // call to MoveFile.
+ if (replace_error == ERROR_FILE_NOT_FOUND ||
+ replace_error == ERROR_PATH_NOT_FOUND) {
+ return WindowsError(from, move_error);
+ } else {
+ return WindowsError(from, replace_error);
+ }
+ }
+
+ Status LockFile(const std::string& filename, FileLock** lock) override {
+ *lock = nullptr;
+ Status result;
+ auto wFilename = toUtf16(filename);
+ ScopedHandle handle = ::CreateFileW(
+ wFilename.c_str(), GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ,
+ /*lpSecurityAttributes=*/nullptr, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL,
+ nullptr);
+ if (!handle.is_valid()) {
+ result = WindowsError(filename, ::GetLastError());
+ } else if (!LockOrUnlock(handle.get(), true)) {
+ result = WindowsError("lock " + filename, ::GetLastError());
+ } else {
+ *lock = new WindowsFileLock(std::move(handle), filename);
+ }
+ return result;
+ }
+
+ Status UnlockFile(FileLock* lock) override {
+ WindowsFileLock* windows_file_lock =
+ reinterpret_cast<WindowsFileLock*>(lock);
+ if (!LockOrUnlock(windows_file_lock->handle().get(), false)) {
+ return WindowsError("unlock " + windows_file_lock->filename(),
+ ::GetLastError());
+ }
+ delete windows_file_lock;
+ return Status::OK();
+ }
+
+ void Schedule(void (*background_work_function)(void* background_work_arg),
+ void* background_work_arg) override;
+
+ void StartThread(void (*thread_main)(void* thread_main_arg),
+ void* thread_main_arg) override {
+ std::thread new_thread(thread_main, thread_main_arg);
+ new_thread.detach();
+ }
+
+ Status GetTestDirectory(std::string* result) override {
+ const char* env = getenv("TEST_TMPDIR");
+ if (env && env[0] != '\0') {
+ *result = env;
+ return Status::OK();
+ }
+
+ wchar_t wtmp_path[MAX_PATH];
+ if (!GetTempPathW(ARRAYSIZE(wtmp_path), wtmp_path)) {
+ return WindowsError("GetTempPath", ::GetLastError());
+ }
+ std::string tmp_path = toUtf8(std::wstring(wtmp_path));
+ std::stringstream ss;
+ ss << tmp_path << "leveldbtest-" << std::this_thread::get_id();
+ *result = ss.str();
+
+ // Directory may already exist
+ CreateDir(*result);
+ return Status::OK();
+ }
+
+ Status NewLogger(const std::string& filename, Logger** result) override {
+ auto wFilename = toUtf16(filename);
+ std::FILE* fp = _wfopen(wFilename.c_str(), L"w");
+ if (fp == nullptr) {
+ *result = nullptr;
+ return WindowsError(filename, ::GetLastError());
+ } else {
+ *result = new WindowsLogger(fp);
+ return Status::OK();
+ }
+ }
+
+ uint64_t NowMicros() override {
+ // GetSystemTimeAsFileTime typically has a resolution of 10-20 msec.
+ // TODO(cmumford): Switch to GetSystemTimePreciseAsFileTime which is
+ // available in Windows 8 and later.
+ FILETIME ft;
+ ::GetSystemTimeAsFileTime(&ft);
+ // Each tick represents a 100-nanosecond intervals since January 1, 1601
+ // (UTC).
+ uint64_t num_ticks =
+ (static_cast<uint64_t>(ft.dwHighDateTime) << 32) + ft.dwLowDateTime;
+ return num_ticks / 10;
+ }
+
+ void SleepForMicroseconds(int micros) override {
+ std::this_thread::sleep_for(std::chrono::microseconds(micros));
+ }
+
+ private:
+ void BackgroundThreadMain();
+
+ static void BackgroundThreadEntryPoint(WindowsEnv* env) {
+ env->BackgroundThreadMain();
+ }
+
+ // Stores the work item data in a Schedule() call.
+ //
+ // Instances are constructed on the thread calling Schedule() and used on the
+ // background thread.
+ //
+ // This structure is thread-safe beacuse it is immutable.
+ struct BackgroundWorkItem {
+ explicit BackgroundWorkItem(void (*function)(void* arg), void* arg)
+ : function(function), arg(arg) {}
+
+ void (*const function)(void*);
+ void* const arg;
+ };
+
+ port::Mutex background_work_mutex_;
+ port::CondVar background_work_cv_ GUARDED_BY(background_work_mutex_);
+ bool started_background_thread_ GUARDED_BY(background_work_mutex_);
+
+ std::queue<BackgroundWorkItem> background_work_queue_
+ GUARDED_BY(background_work_mutex_);
+
+ Limiter mmap_limiter_; // Thread-safe.
+
+ // Converts a Windows wide multi-byte UTF-16 string to a UTF-8 string.
+ // See http://utf8everywhere.org/#windows
+ std::string toUtf8(const std::wstring& wstr) {
+ if (wstr.empty()) return std::string();
+ int size_needed = WideCharToMultiByte(
+ CP_UTF8, 0, &wstr[0], (int)wstr.size(), NULL, 0, NULL, NULL);
+ std::string strTo(size_needed, 0);
+ WideCharToMultiByte(CP_UTF8, 0, &wstr[0], (int)wstr.size(), &strTo[0],
+ size_needed, NULL, NULL);
+ return strTo;
+ }
+
+ // Converts a UTF-8 string to a Windows UTF-16 multi-byte wide character
+ // string.
+ // See http://utf8everywhere.org/#windows
+ std::wstring toUtf16(const std::string& str) {
+ if (str.empty()) return std::wstring();
+ int size_needed =
+ MultiByteToWideChar(CP_UTF8, 0, &str[0], (int)str.size(), NULL, 0);
+ std::wstring strTo(size_needed, 0);
+ MultiByteToWideChar(CP_UTF8, 0, &str[0], (int)str.size(), &strTo[0],
+ size_needed);
+ return strTo;
+ }
+};
+
+// Return the maximum number of concurrent mmaps.
+int MaxMmaps() { return g_mmap_limit; }
+
+WindowsEnv::WindowsEnv()
+ : background_work_cv_(&background_work_mutex_),
+ started_background_thread_(false),
+ mmap_limiter_(MaxMmaps()) {}
+
+void WindowsEnv::Schedule(
+ void (*background_work_function)(void* background_work_arg),
+ void* background_work_arg) {
+ background_work_mutex_.Lock();
+
+ // Start the background thread, if we haven't done so already.
+ if (!started_background_thread_) {
+ started_background_thread_ = true;
+ std::thread background_thread(WindowsEnv::BackgroundThreadEntryPoint, this);
+ background_thread.detach();
+ }
+
+ // If the queue is empty, the background thread may be waiting for work.
+ if (background_work_queue_.empty()) {
+ background_work_cv_.Signal();
+ }
+
+ background_work_queue_.emplace(background_work_function, background_work_arg);
+ background_work_mutex_.Unlock();
+}
+
+void WindowsEnv::BackgroundThreadMain() {
+ while (true) {
+ background_work_mutex_.Lock();
+
+ // Wait until there is work to be done.
+ while (background_work_queue_.empty()) {
+ background_work_cv_.Wait();
+ }
+
+ assert(!background_work_queue_.empty());
+ auto background_work_function = background_work_queue_.front().function;
+ void* background_work_arg = background_work_queue_.front().arg;
+ background_work_queue_.pop();
+
+ background_work_mutex_.Unlock();
+ background_work_function(background_work_arg);
+ }
+}
+
+// Wraps an Env instance whose destructor is never created.
+//
+// Intended usage:
+// using PlatformSingletonEnv = SingletonEnv<PlatformEnv>;
+// void ConfigurePosixEnv(int param) {
+// PlatformSingletonEnv::AssertEnvNotInitialized();
+// // set global configuration flags.
+// }
+// Env* Env::Default() {
+// static PlatformSingletonEnv default_env;
+// return default_env.env();
+// }
+template <typename EnvType>
+class SingletonEnv {
+ public:
+ SingletonEnv() {
+#if !defined(NDEBUG)
+ env_initialized_.store(true, std::memory_order::memory_order_relaxed);
+#endif // !defined(NDEBUG)
+ static_assert(sizeof(env_storage_) >= sizeof(EnvType),
+ "env_storage_ will not fit the Env");
+ static_assert(alignof(decltype(env_storage_)) >= alignof(EnvType),
+ "env_storage_ does not meet the Env's alignment needs");
+ new (&env_storage_) EnvType();
+ }
+ ~SingletonEnv() = default;
+
+ SingletonEnv(const SingletonEnv&) = delete;
+ SingletonEnv& operator=(const SingletonEnv&) = delete;
+
+ Env* env() { return reinterpret_cast<Env*>(&env_storage_); }
+
+ static void AssertEnvNotInitialized() {
+#if !defined(NDEBUG)
+ assert(!env_initialized_.load(std::memory_order::memory_order_relaxed));
+#endif // !defined(NDEBUG)
+ }
+
+ private:
+ typename std::aligned_storage<sizeof(EnvType), alignof(EnvType)>::type
+ env_storage_;
+#if !defined(NDEBUG)
+ static std::atomic<bool> env_initialized_;
+#endif // !defined(NDEBUG)
+};
+
+#if !defined(NDEBUG)
+template <typename EnvType>
+std::atomic<bool> SingletonEnv<EnvType>::env_initialized_;
+#endif // !defined(NDEBUG)
+
+using WindowsDefaultEnv = SingletonEnv<WindowsEnv>;
+
+} // namespace
+
+void EnvWindowsTestHelper::SetReadOnlyMMapLimit(int limit) {
+ WindowsDefaultEnv::AssertEnvNotInitialized();
+ g_mmap_limit = limit;
+}
+
+Env* Env::Default() {
+ static WindowsDefaultEnv env_container;
+ return env_container.env();
+}
+
+} // namespace leveldb
diff --git a/src/leveldb/util/env_windows_test.cc b/src/leveldb/util/env_windows_test.cc
new file mode 100644
index 0000000000..3c22133891
--- /dev/null
+++ b/src/leveldb/util/env_windows_test.cc
@@ -0,0 +1,64 @@
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "leveldb/env.h"
+
+#include "port/port.h"
+#include "util/env_windows_test_helper.h"
+#include "util/testharness.h"
+
+namespace leveldb {
+
+static const int kMMapLimit = 4;
+
+class EnvWindowsTest {
+ public:
+ static void SetFileLimits(int mmap_limit) {
+ EnvWindowsTestHelper::SetReadOnlyMMapLimit(mmap_limit);
+ }
+
+ EnvWindowsTest() : env_(Env::Default()) {}
+
+ Env* env_;
+};
+
+TEST(EnvWindowsTest, TestOpenOnRead) {
+ // Write some test data to a single file that will be opened |n| times.
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+ std::string test_file = test_dir + "/open_on_read.txt";
+
+ FILE* f = fopen(test_file.c_str(), "w");
+ ASSERT_TRUE(f != nullptr);
+ const char kFileData[] = "abcdefghijklmnopqrstuvwxyz";
+ fputs(kFileData, f);
+ fclose(f);
+
+ // Open test file some number above the sum of the two limits to force
+ // leveldb::WindowsEnv to switch from mapping the file into memory
+ // to basic file reading.
+ const int kNumFiles = kMMapLimit + 5;
+ leveldb::RandomAccessFile* files[kNumFiles] = {0};
+ for (int i = 0; i < kNumFiles; i++) {
+ ASSERT_OK(env_->NewRandomAccessFile(test_file, &files[i]));
+ }
+ char scratch;
+ Slice read_result;
+ for (int i = 0; i < kNumFiles; i++) {
+ ASSERT_OK(files[i]->Read(i, 1, &read_result, &scratch));
+ ASSERT_EQ(kFileData[i], read_result[0]);
+ }
+ for (int i = 0; i < kNumFiles; i++) {
+ delete files[i];
+ }
+ ASSERT_OK(env_->DeleteFile(test_file));
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) {
+ // All tests currently run with the same read-only file limits.
+ leveldb::EnvWindowsTest::SetFileLimits(leveldb::kMMapLimit);
+ return leveldb::test::RunAllTests();
+}
diff --git a/src/leveldb/util/env_windows_test_helper.h b/src/leveldb/util/env_windows_test_helper.h
new file mode 100644
index 0000000000..e6f6020561
--- /dev/null
+++ b/src/leveldb/util/env_windows_test_helper.h
@@ -0,0 +1,25 @@
+// Copyright 2018 (c) The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef STORAGE_LEVELDB_UTIL_ENV_WINDOWS_TEST_HELPER_H_
+#define STORAGE_LEVELDB_UTIL_ENV_WINDOWS_TEST_HELPER_H_
+
+namespace leveldb {
+
+class EnvWindowsTest;
+
+// A helper for the Windows Env to facilitate testing.
+class EnvWindowsTestHelper {
+ private:
+ friend class CorruptionTest;
+ friend class EnvWindowsTest;
+
+ // Set the maximum number of read-only files that will be mapped via mmap.
+ // Must be called before creating an Env.
+ static void SetReadOnlyMMapLimit(int limit);
+};
+
+} // namespace leveldb
+
+#endif // STORAGE_LEVELDB_UTIL_ENV_WINDOWS_TEST_HELPER_H_
diff --git a/src/leveldb/util/filter_policy.cc b/src/leveldb/util/filter_policy.cc
index 7b045c8c91..90fd754d64 100644
--- a/src/leveldb/util/filter_policy.cc
+++ b/src/leveldb/util/filter_policy.cc
@@ -6,6 +6,6 @@
namespace leveldb {
-FilterPolicy::~FilterPolicy() { }
+FilterPolicy::~FilterPolicy() {}
} // namespace leveldb
diff --git a/src/leveldb/util/hash.cc b/src/leveldb/util/hash.cc
index ed439ce7a2..dd47c110ee 100644
--- a/src/leveldb/util/hash.cc
+++ b/src/leveldb/util/hash.cc
@@ -2,15 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include "util/hash.h"
+
#include <string.h>
+
#include "util/coding.h"
-#include "util/hash.h"
// The FALLTHROUGH_INTENDED macro can be used to annotate implicit fall-through
// between switch labels. The real definition should be provided externally.
// This one is a fallback version for unsupported compilers.
#ifndef FALLTHROUGH_INTENDED
-#define FALLTHROUGH_INTENDED do { } while (0)
+#define FALLTHROUGH_INTENDED \
+ do { \
+ } while (0)
#endif
namespace leveldb {
@@ -34,13 +38,13 @@ uint32_t Hash(const char* data, size_t n, uint32_t seed) {
// Pick up remaining bytes
switch (limit - data) {
case 3:
- h += static_cast<unsigned char>(data[2]) << 16;
+ h += static_cast<uint8_t>(data[2]) << 16;
FALLTHROUGH_INTENDED;
case 2:
- h += static_cast<unsigned char>(data[1]) << 8;
+ h += static_cast<uint8_t>(data[1]) << 8;
FALLTHROUGH_INTENDED;
case 1:
- h += static_cast<unsigned char>(data[0]);
+ h += static_cast<uint8_t>(data[0]);
h *= m;
h ^= (h >> r);
break;
@@ -48,5 +52,4 @@ uint32_t Hash(const char* data, size_t n, uint32_t seed) {
return h;
}
-
} // namespace leveldb
diff --git a/src/leveldb/util/hash.h b/src/leveldb/util/hash.h
index 8889d56be8..74bdb6e7b2 100644
--- a/src/leveldb/util/hash.h
+++ b/src/leveldb/util/hash.h
@@ -12,8 +12,8 @@
namespace leveldb {
-extern uint32_t Hash(const char* data, size_t n, uint32_t seed);
+uint32_t Hash(const char* data, size_t n, uint32_t seed);
-}
+} // namespace leveldb
#endif // STORAGE_LEVELDB_UTIL_HASH_H_
diff --git a/src/leveldb/util/hash_test.cc b/src/leveldb/util/hash_test.cc
index eaa1c92c23..21f8171da6 100644
--- a/src/leveldb/util/hash_test.cc
+++ b/src/leveldb/util/hash_test.cc
@@ -7,26 +7,18 @@
namespace leveldb {
-class HASH { };
+class HASH {};
TEST(HASH, SignedUnsignedIssue) {
- const unsigned char data1[1] = {0x62};
- const unsigned char data2[2] = {0xc3, 0x97};
- const unsigned char data3[3] = {0xe2, 0x99, 0xa5};
- const unsigned char data4[4] = {0xe1, 0x80, 0xb9, 0x32};
- const unsigned char data5[48] = {
- 0x01, 0xc0, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x14, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x04, 0x00,
- 0x00, 0x00, 0x00, 0x14,
- 0x00, 0x00, 0x00, 0x18,
- 0x28, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x02, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
+ const uint8_t data1[1] = {0x62};
+ const uint8_t data2[2] = {0xc3, 0x97};
+ const uint8_t data3[3] = {0xe2, 0x99, 0xa5};
+ const uint8_t data4[4] = {0xe1, 0x80, 0xb9, 0x32};
+ const uint8_t data5[48] = {
+ 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
ASSERT_EQ(Hash(0, 0, 0xbc9f1d34), 0xbc9f1d34);
@@ -49,6 +41,4 @@ TEST(HASH, SignedUnsignedIssue) {
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/util/histogram.cc b/src/leveldb/util/histogram.cc
index bb95f583ea..65092c88f2 100644
--- a/src/leveldb/util/histogram.cc
+++ b/src/leveldb/util/histogram.cc
@@ -2,36 +2,174 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include "util/histogram.h"
+
#include <math.h>
#include <stdio.h>
+
#include "port/port.h"
-#include "util/histogram.h"
namespace leveldb {
const double Histogram::kBucketLimit[kNumBuckets] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 25, 30, 35, 40, 45,
- 50, 60, 70, 80, 90, 100, 120, 140, 160, 180, 200, 250, 300, 350, 400, 450,
- 500, 600, 700, 800, 900, 1000, 1200, 1400, 1600, 1800, 2000, 2500, 3000,
- 3500, 4000, 4500, 5000, 6000, 7000, 8000, 9000, 10000, 12000, 14000,
- 16000, 18000, 20000, 25000, 30000, 35000, 40000, 45000, 50000, 60000,
- 70000, 80000, 90000, 100000, 120000, 140000, 160000, 180000, 200000,
- 250000, 300000, 350000, 400000, 450000, 500000, 600000, 700000, 800000,
- 900000, 1000000, 1200000, 1400000, 1600000, 1800000, 2000000, 2500000,
- 3000000, 3500000, 4000000, 4500000, 5000000, 6000000, 7000000, 8000000,
- 9000000, 10000000, 12000000, 14000000, 16000000, 18000000, 20000000,
- 25000000, 30000000, 35000000, 40000000, 45000000, 50000000, 60000000,
- 70000000, 80000000, 90000000, 100000000, 120000000, 140000000, 160000000,
- 180000000, 200000000, 250000000, 300000000, 350000000, 400000000,
- 450000000, 500000000, 600000000, 700000000, 800000000, 900000000,
- 1000000000, 1200000000, 1400000000, 1600000000, 1800000000, 2000000000,
- 2500000000.0, 3000000000.0, 3500000000.0, 4000000000.0, 4500000000.0,
- 5000000000.0, 6000000000.0, 7000000000.0, 8000000000.0, 9000000000.0,
- 1e200,
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 12,
+ 14,
+ 16,
+ 18,
+ 20,
+ 25,
+ 30,
+ 35,
+ 40,
+ 45,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 120,
+ 140,
+ 160,
+ 180,
+ 200,
+ 250,
+ 300,
+ 350,
+ 400,
+ 450,
+ 500,
+ 600,
+ 700,
+ 800,
+ 900,
+ 1000,
+ 1200,
+ 1400,
+ 1600,
+ 1800,
+ 2000,
+ 2500,
+ 3000,
+ 3500,
+ 4000,
+ 4500,
+ 5000,
+ 6000,
+ 7000,
+ 8000,
+ 9000,
+ 10000,
+ 12000,
+ 14000,
+ 16000,
+ 18000,
+ 20000,
+ 25000,
+ 30000,
+ 35000,
+ 40000,
+ 45000,
+ 50000,
+ 60000,
+ 70000,
+ 80000,
+ 90000,
+ 100000,
+ 120000,
+ 140000,
+ 160000,
+ 180000,
+ 200000,
+ 250000,
+ 300000,
+ 350000,
+ 400000,
+ 450000,
+ 500000,
+ 600000,
+ 700000,
+ 800000,
+ 900000,
+ 1000000,
+ 1200000,
+ 1400000,
+ 1600000,
+ 1800000,
+ 2000000,
+ 2500000,
+ 3000000,
+ 3500000,
+ 4000000,
+ 4500000,
+ 5000000,
+ 6000000,
+ 7000000,
+ 8000000,
+ 9000000,
+ 10000000,
+ 12000000,
+ 14000000,
+ 16000000,
+ 18000000,
+ 20000000,
+ 25000000,
+ 30000000,
+ 35000000,
+ 40000000,
+ 45000000,
+ 50000000,
+ 60000000,
+ 70000000,
+ 80000000,
+ 90000000,
+ 100000000,
+ 120000000,
+ 140000000,
+ 160000000,
+ 180000000,
+ 200000000,
+ 250000000,
+ 300000000,
+ 350000000,
+ 400000000,
+ 450000000,
+ 500000000,
+ 600000000,
+ 700000000,
+ 800000000,
+ 900000000,
+ 1000000000,
+ 1200000000,
+ 1400000000,
+ 1600000000,
+ 1800000000,
+ 2000000000,
+ 2500000000.0,
+ 3000000000.0,
+ 3500000000.0,
+ 4000000000.0,
+ 4500000000.0,
+ 5000000000.0,
+ 6000000000.0,
+ 7000000000.0,
+ 8000000000.0,
+ 9000000000.0,
+ 1e200,
};
void Histogram::Clear() {
- min_ = kBucketLimit[kNumBuckets-1];
+ min_ = kBucketLimit[kNumBuckets - 1];
max_ = 0;
num_ = 0;
sum_ = 0;
@@ -66,9 +204,7 @@ void Histogram::Merge(const Histogram& other) {
}
}
-double Histogram::Median() const {
- return Percentile(50.0);
-}
+double Histogram::Median() const { return Percentile(50.0); }
double Histogram::Percentile(double p) const {
double threshold = num_ * (p / 100.0);
@@ -77,7 +213,7 @@ double Histogram::Percentile(double p) const {
sum += buckets_[b];
if (sum >= threshold) {
// Scale linearly within this bucket
- double left_point = (b == 0) ? 0 : kBucketLimit[b-1];
+ double left_point = (b == 0) ? 0 : kBucketLimit[b - 1];
double right_point = kBucketLimit[b];
double left_sum = sum - buckets_[b];
double right_sum = sum;
@@ -105,12 +241,10 @@ double Histogram::StandardDeviation() const {
std::string Histogram::ToString() const {
std::string r;
char buf[200];
- snprintf(buf, sizeof(buf),
- "Count: %.0f Average: %.4f StdDev: %.2f\n",
- num_, Average(), StandardDeviation());
+ snprintf(buf, sizeof(buf), "Count: %.0f Average: %.4f StdDev: %.2f\n", num_,
+ Average(), StandardDeviation());
r.append(buf);
- snprintf(buf, sizeof(buf),
- "Min: %.4f Median: %.4f Max: %.4f\n",
+ snprintf(buf, sizeof(buf), "Min: %.4f Median: %.4f Max: %.4f\n",
(num_ == 0.0 ? 0.0 : min_), Median(), max_);
r.append(buf);
r.append("------------------------------------------------------\n");
@@ -119,17 +253,16 @@ std::string Histogram::ToString() const {
for (int b = 0; b < kNumBuckets; b++) {
if (buckets_[b] <= 0.0) continue;
sum += buckets_[b];
- snprintf(buf, sizeof(buf),
- "[ %7.0f, %7.0f ) %7.0f %7.3f%% %7.3f%% ",
- ((b == 0) ? 0.0 : kBucketLimit[b-1]), // left
- kBucketLimit[b], // right
- buckets_[b], // count
- mult * buckets_[b], // percentage
- mult * sum); // cumulative percentage
+ snprintf(buf, sizeof(buf), "[ %7.0f, %7.0f ) %7.0f %7.3f%% %7.3f%% ",
+ ((b == 0) ? 0.0 : kBucketLimit[b - 1]), // left
+ kBucketLimit[b], // right
+ buckets_[b], // count
+ mult * buckets_[b], // percentage
+ mult * sum); // cumulative percentage
r.append(buf);
// Add hash marks based on percentage; 20 marks for 100%.
- int marks = static_cast<int>(20*(buckets_[b] / num_) + 0.5);
+ int marks = static_cast<int>(20 * (buckets_[b] / num_) + 0.5);
r.append(marks, '#');
r.push_back('\n');
}
diff --git a/src/leveldb/util/histogram.h b/src/leveldb/util/histogram.h
index 1ef9f3c8ab..4da60fba45 100644
--- a/src/leveldb/util/histogram.h
+++ b/src/leveldb/util/histogram.h
@@ -11,8 +11,8 @@ namespace leveldb {
class Histogram {
public:
- Histogram() { }
- ~Histogram() { }
+ Histogram() {}
+ ~Histogram() {}
void Clear();
void Add(double value);
@@ -21,20 +21,22 @@ class Histogram {
std::string ToString() const;
private:
+ enum { kNumBuckets = 154 };
+
+ double Median() const;
+ double Percentile(double p) const;
+ double Average() const;
+ double StandardDeviation() const;
+
+ static const double kBucketLimit[kNumBuckets];
+
double min_;
double max_;
double num_;
double sum_;
double sum_squares_;
- enum { kNumBuckets = 154 };
- static const double kBucketLimit[kNumBuckets];
double buckets_[kNumBuckets];
-
- double Median() const;
- double Percentile(double p) const;
- double Average() const;
- double StandardDeviation() const;
};
} // namespace leveldb
diff --git a/src/leveldb/util/logging.cc b/src/leveldb/util/logging.cc
index db6160c8f1..75e9d037d3 100644
--- a/src/leveldb/util/logging.cc
+++ b/src/leveldb/util/logging.cc
@@ -8,6 +8,9 @@
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
+
+#include <limits>
+
#include "leveldb/env.h"
#include "leveldb/slice.h"
@@ -15,7 +18,7 @@ namespace leveldb {
void AppendNumberTo(std::string* str, uint64_t num) {
char buf[30];
- snprintf(buf, sizeof(buf), "%llu", (unsigned long long) num);
+ snprintf(buf, sizeof(buf), "%llu", (unsigned long long)num);
str->append(buf);
}
@@ -46,27 +49,36 @@ std::string EscapeString(const Slice& value) {
}
bool ConsumeDecimalNumber(Slice* in, uint64_t* val) {
- uint64_t v = 0;
- int digits = 0;
- while (!in->empty()) {
- unsigned char c = (*in)[0];
- if (c >= '0' && c <= '9') {
- ++digits;
- const int delta = (c - '0');
- static const uint64_t kMaxUint64 = ~static_cast<uint64_t>(0);
- if (v > kMaxUint64/10 ||
- (v == kMaxUint64/10 && delta > kMaxUint64%10)) {
- // Overflow
- return false;
- }
- v = (v * 10) + delta;
- in->remove_prefix(1);
- } else {
- break;
+ // Constants that will be optimized away.
+ constexpr const uint64_t kMaxUint64 = std::numeric_limits<uint64_t>::max();
+ constexpr const char kLastDigitOfMaxUint64 =
+ '0' + static_cast<char>(kMaxUint64 % 10);
+
+ uint64_t value = 0;
+
+ // reinterpret_cast-ing from char* to uint8_t* to avoid signedness.
+ const uint8_t* start = reinterpret_cast<const uint8_t*>(in->data());
+
+ const uint8_t* end = start + in->size();
+ const uint8_t* current = start;
+ for (; current != end; ++current) {
+ const uint8_t ch = *current;
+ if (ch < '0' || ch > '9') break;
+
+ // Overflow check.
+ // kMaxUint64 / 10 is also constant and will be optimized away.
+ if (value > kMaxUint64 / 10 ||
+ (value == kMaxUint64 / 10 && ch > kLastDigitOfMaxUint64)) {
+ return false;
}
+
+ value = (value * 10) + (ch - '0');
}
- *val = v;
- return (digits > 0);
+
+ *val = value;
+ const size_t digits_consumed = current - start;
+ in->remove_prefix(digits_consumed);
+ return digits_consumed != 0;
}
} // namespace leveldb
diff --git a/src/leveldb/util/logging.h b/src/leveldb/util/logging.h
index 1b450d2480..8ff2da86b4 100644
--- a/src/leveldb/util/logging.h
+++ b/src/leveldb/util/logging.h
@@ -8,9 +8,11 @@
#ifndef STORAGE_LEVELDB_UTIL_LOGGING_H_
#define STORAGE_LEVELDB_UTIL_LOGGING_H_
-#include <stdio.h>
#include <stdint.h>
+#include <stdio.h>
+
#include <string>
+
#include "port/port.h"
namespace leveldb {
@@ -19,24 +21,24 @@ class Slice;
class WritableFile;
// Append a human-readable printout of "num" to *str
-extern void AppendNumberTo(std::string* str, uint64_t num);
+void AppendNumberTo(std::string* str, uint64_t num);
// Append a human-readable printout of "value" to *str.
// Escapes any non-printable characters found in "value".
-extern void AppendEscapedStringTo(std::string* str, const Slice& value);
+void AppendEscapedStringTo(std::string* str, const Slice& value);
// Return a human-readable printout of "num"
-extern std::string NumberToString(uint64_t num);
+std::string NumberToString(uint64_t num);
// Return a human-readable version of "value".
// Escapes any non-printable characters found in "value".
-extern std::string EscapeString(const Slice& value);
+std::string EscapeString(const Slice& value);
// Parse a human-readable number from "*in" into *value. On success,
// advances "*in" past the consumed number and sets "*val" to the
// numeric value. Otherwise, returns false and leaves *in in an
// unspecified state.
-extern bool ConsumeDecimalNumber(Slice* in, uint64_t* val);
+bool ConsumeDecimalNumber(Slice* in, uint64_t* val);
} // namespace leveldb
diff --git a/src/leveldb/util/logging_test.cc b/src/leveldb/util/logging_test.cc
new file mode 100644
index 0000000000..389cbeb14f
--- /dev/null
+++ b/src/leveldb/util/logging_test.cc
@@ -0,0 +1,143 @@
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include <limits>
+#include <string>
+
+#include "leveldb/slice.h"
+#include "util/logging.h"
+#include "util/testharness.h"
+
+namespace leveldb {
+
+class Logging {};
+
+TEST(Logging, NumberToString) {
+ ASSERT_EQ("0", NumberToString(0));
+ ASSERT_EQ("1", NumberToString(1));
+ ASSERT_EQ("9", NumberToString(9));
+
+ ASSERT_EQ("10", NumberToString(10));
+ ASSERT_EQ("11", NumberToString(11));
+ ASSERT_EQ("19", NumberToString(19));
+ ASSERT_EQ("99", NumberToString(99));
+
+ ASSERT_EQ("100", NumberToString(100));
+ ASSERT_EQ("109", NumberToString(109));
+ ASSERT_EQ("190", NumberToString(190));
+ ASSERT_EQ("123", NumberToString(123));
+ ASSERT_EQ("12345678", NumberToString(12345678));
+
+ static_assert(std::numeric_limits<uint64_t>::max() == 18446744073709551615U,
+ "Test consistency check");
+ ASSERT_EQ("18446744073709551000", NumberToString(18446744073709551000U));
+ ASSERT_EQ("18446744073709551600", NumberToString(18446744073709551600U));
+ ASSERT_EQ("18446744073709551610", NumberToString(18446744073709551610U));
+ ASSERT_EQ("18446744073709551614", NumberToString(18446744073709551614U));
+ ASSERT_EQ("18446744073709551615", NumberToString(18446744073709551615U));
+}
+
+void ConsumeDecimalNumberRoundtripTest(uint64_t number,
+ const std::string& padding = "") {
+ std::string decimal_number = NumberToString(number);
+ std::string input_string = decimal_number + padding;
+ Slice input(input_string);
+ Slice output = input;
+ uint64_t result;
+ ASSERT_TRUE(ConsumeDecimalNumber(&output, &result));
+ ASSERT_EQ(number, result);
+ ASSERT_EQ(decimal_number.size(), output.data() - input.data());
+ ASSERT_EQ(padding.size(), output.size());
+}
+
+TEST(Logging, ConsumeDecimalNumberRoundtrip) {
+ ConsumeDecimalNumberRoundtripTest(0);
+ ConsumeDecimalNumberRoundtripTest(1);
+ ConsumeDecimalNumberRoundtripTest(9);
+
+ ConsumeDecimalNumberRoundtripTest(10);
+ ConsumeDecimalNumberRoundtripTest(11);
+ ConsumeDecimalNumberRoundtripTest(19);
+ ConsumeDecimalNumberRoundtripTest(99);
+
+ ConsumeDecimalNumberRoundtripTest(100);
+ ConsumeDecimalNumberRoundtripTest(109);
+ ConsumeDecimalNumberRoundtripTest(190);
+ ConsumeDecimalNumberRoundtripTest(123);
+ ASSERT_EQ("12345678", NumberToString(12345678));
+
+ for (uint64_t i = 0; i < 100; ++i) {
+ uint64_t large_number = std::numeric_limits<uint64_t>::max() - i;
+ ConsumeDecimalNumberRoundtripTest(large_number);
+ }
+}
+
+TEST(Logging, ConsumeDecimalNumberRoundtripWithPadding) {
+ ConsumeDecimalNumberRoundtripTest(0, " ");
+ ConsumeDecimalNumberRoundtripTest(1, "abc");
+ ConsumeDecimalNumberRoundtripTest(9, "x");
+
+ ConsumeDecimalNumberRoundtripTest(10, "_");
+ ConsumeDecimalNumberRoundtripTest(11, std::string("\0\0\0", 3));
+ ConsumeDecimalNumberRoundtripTest(19, "abc");
+ ConsumeDecimalNumberRoundtripTest(99, "padding");
+
+ ConsumeDecimalNumberRoundtripTest(100, " ");
+
+ for (uint64_t i = 0; i < 100; ++i) {
+ uint64_t large_number = std::numeric_limits<uint64_t>::max() - i;
+ ConsumeDecimalNumberRoundtripTest(large_number, "pad");
+ }
+}
+
+void ConsumeDecimalNumberOverflowTest(const std::string& input_string) {
+ Slice input(input_string);
+ Slice output = input;
+ uint64_t result;
+ ASSERT_EQ(false, ConsumeDecimalNumber(&output, &result));
+}
+
+TEST(Logging, ConsumeDecimalNumberOverflow) {
+ static_assert(std::numeric_limits<uint64_t>::max() == 18446744073709551615U,
+ "Test consistency check");
+ ConsumeDecimalNumberOverflowTest("18446744073709551616");
+ ConsumeDecimalNumberOverflowTest("18446744073709551617");
+ ConsumeDecimalNumberOverflowTest("18446744073709551618");
+ ConsumeDecimalNumberOverflowTest("18446744073709551619");
+ ConsumeDecimalNumberOverflowTest("18446744073709551620");
+ ConsumeDecimalNumberOverflowTest("18446744073709551621");
+ ConsumeDecimalNumberOverflowTest("18446744073709551622");
+ ConsumeDecimalNumberOverflowTest("18446744073709551623");
+ ConsumeDecimalNumberOverflowTest("18446744073709551624");
+ ConsumeDecimalNumberOverflowTest("18446744073709551625");
+ ConsumeDecimalNumberOverflowTest("18446744073709551626");
+
+ ConsumeDecimalNumberOverflowTest("18446744073709551700");
+
+ ConsumeDecimalNumberOverflowTest("99999999999999999999");
+}
+
+void ConsumeDecimalNumberNoDigitsTest(const std::string& input_string) {
+ Slice input(input_string);
+ Slice output = input;
+ uint64_t result;
+ ASSERT_EQ(false, ConsumeDecimalNumber(&output, &result));
+ ASSERT_EQ(input.data(), output.data());
+ ASSERT_EQ(input.size(), output.size());
+}
+
+TEST(Logging, ConsumeDecimalNumberNoDigits) {
+ ConsumeDecimalNumberNoDigitsTest("");
+ ConsumeDecimalNumberNoDigitsTest(" ");
+ ConsumeDecimalNumberNoDigitsTest("a");
+ ConsumeDecimalNumberNoDigitsTest(" 123");
+ ConsumeDecimalNumberNoDigitsTest("a123");
+ ConsumeDecimalNumberNoDigitsTest(std::string("\000123", 4));
+ ConsumeDecimalNumberNoDigitsTest(std::string("\177123", 4));
+ ConsumeDecimalNumberNoDigitsTest(std::string("\377123", 4));
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/util/mutexlock.h b/src/leveldb/util/mutexlock.h
index 1ff5a9efa1..0cb2e250fb 100644
--- a/src/leveldb/util/mutexlock.h
+++ b/src/leveldb/util/mutexlock.h
@@ -22,20 +22,18 @@ namespace leveldb {
class SCOPED_LOCKABLE MutexLock {
public:
- explicit MutexLock(port::Mutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu)
- : mu_(mu) {
+ explicit MutexLock(port::Mutex* mu) EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
this->mu_->Lock();
}
~MutexLock() UNLOCK_FUNCTION() { this->mu_->Unlock(); }
+ MutexLock(const MutexLock&) = delete;
+ MutexLock& operator=(const MutexLock&) = delete;
+
private:
- port::Mutex *const mu_;
- // No copying allowed
- MutexLock(const MutexLock&);
- void operator=(const MutexLock&);
+ port::Mutex* const mu_;
};
} // namespace leveldb
-
#endif // STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_
diff --git a/src/leveldb/util/no_destructor.h b/src/leveldb/util/no_destructor.h
new file mode 100644
index 0000000000..a0d3b8703d
--- /dev/null
+++ b/src/leveldb/util/no_destructor.h
@@ -0,0 +1,46 @@
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef STORAGE_LEVELDB_UTIL_NO_DESTRUCTOR_H_
+#define STORAGE_LEVELDB_UTIL_NO_DESTRUCTOR_H_
+
+#include <type_traits>
+#include <utility>
+
+namespace leveldb {
+
+// Wraps an instance whose destructor is never called.
+//
+// This is intended for use with function-level static variables.
+template <typename InstanceType>
+class NoDestructor {
+ public:
+ template <typename... ConstructorArgTypes>
+ explicit NoDestructor(ConstructorArgTypes&&... constructor_args) {
+ static_assert(sizeof(instance_storage_) >= sizeof(InstanceType),
+ "instance_storage_ is not large enough to hold the instance");
+ static_assert(
+ alignof(decltype(instance_storage_)) >= alignof(InstanceType),
+ "instance_storage_ does not meet the instance's alignment requirement");
+ new (&instance_storage_)
+ InstanceType(std::forward<ConstructorArgTypes>(constructor_args)...);
+ }
+
+ ~NoDestructor() = default;
+
+ NoDestructor(const NoDestructor&) = delete;
+ NoDestructor& operator=(const NoDestructor&) = delete;
+
+ InstanceType* get() {
+ return reinterpret_cast<InstanceType*>(&instance_storage_);
+ }
+
+ private:
+ typename std::aligned_storage<sizeof(InstanceType),
+ alignof(InstanceType)>::type instance_storage_;
+};
+
+} // namespace leveldb
+
+#endif // STORAGE_LEVELDB_UTIL_NO_DESTRUCTOR_H_
diff --git a/src/leveldb/util/no_destructor_test.cc b/src/leveldb/util/no_destructor_test.cc
new file mode 100644
index 0000000000..b41caca694
--- /dev/null
+++ b/src/leveldb/util/no_destructor_test.cc
@@ -0,0 +1,47 @@
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include <cstdint>
+#include <cstdlib>
+#include <utility>
+
+#include "util/no_destructor.h"
+#include "util/testharness.h"
+
+namespace leveldb {
+
+namespace {
+
+struct DoNotDestruct {
+ public:
+ DoNotDestruct(uint32_t a, uint64_t b) : a(a), b(b) {}
+ ~DoNotDestruct() { std::abort(); }
+
+ // Used to check constructor argument forwarding.
+ uint32_t a;
+ uint64_t b;
+};
+
+constexpr const uint32_t kGoldenA = 0xdeadbeef;
+constexpr const uint64_t kGoldenB = 0xaabbccddeeffaabb;
+
+} // namespace
+
+class NoDestructorTest {};
+
+TEST(NoDestructorTest, StackInstance) {
+ NoDestructor<DoNotDestruct> instance(kGoldenA, kGoldenB);
+ ASSERT_EQ(kGoldenA, instance.get()->a);
+ ASSERT_EQ(kGoldenB, instance.get()->b);
+}
+
+TEST(NoDestructorTest, StaticInstance) {
+ static NoDestructor<DoNotDestruct> instance(kGoldenA, kGoldenB);
+ ASSERT_EQ(kGoldenA, instance.get()->a);
+ ASSERT_EQ(kGoldenB, instance.get()->b);
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/util/options.cc b/src/leveldb/util/options.cc
index b5e6227613..62de5bf0d2 100644
--- a/src/leveldb/util/options.cc
+++ b/src/leveldb/util/options.cc
@@ -9,22 +9,6 @@
namespace leveldb {
-Options::Options()
- : comparator(BytewiseComparator()),
- create_if_missing(false),
- error_if_exists(false),
- paranoid_checks(false),
- env(Env::Default()),
- info_log(NULL),
- write_buffer_size(4<<20),
- max_open_files(1000),
- block_cache(NULL),
- block_size(4096),
- block_restart_interval(16),
- max_file_size(2<<20),
- compression(kSnappyCompression),
- reuse_logs(false),
- filter_policy(NULL) {
-}
+Options::Options() : comparator(BytewiseComparator()), env(Env::Default()) {}
} // namespace leveldb
diff --git a/src/leveldb/util/posix_logger.h b/src/leveldb/util/posix_logger.h
index c063c2b7cb..28e15d10b4 100644
--- a/src/leveldb/util/posix_logger.h
+++ b/src/leveldb/util/posix_logger.h
@@ -3,94 +3,126 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
//
// Logger implementation that can be shared by all environments
-// where enough Posix functionality is available.
+// where enough posix functionality is available.
#ifndef STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_
#define STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_
-#include <algorithm>
-#include <stdio.h>
#include <sys/time.h>
-#include <time.h>
+
+#include <cassert>
+#include <cstdarg>
+#include <cstdio>
+#include <ctime>
+#include <sstream>
+#include <thread>
+
#include "leveldb/env.h"
namespace leveldb {
-class PosixLogger : public Logger {
- private:
- FILE* file_;
- uint64_t (*gettid_)(); // Return the thread id for the current thread
+class PosixLogger final : public Logger {
public:
- PosixLogger(FILE* f, uint64_t (*gettid)()) : file_(f), gettid_(gettid) { }
- virtual ~PosixLogger() {
- fclose(file_);
- }
- virtual void Logv(const char* format, va_list ap) {
- const uint64_t thread_id = (*gettid_)();
-
- // We try twice: the first time with a fixed-size stack allocated buffer,
- // and the second time with a much larger dynamically allocated buffer.
- char buffer[500];
- for (int iter = 0; iter < 2; iter++) {
- char* base;
- int bufsize;
- if (iter == 0) {
- bufsize = sizeof(buffer);
- base = buffer;
- } else {
- bufsize = 30000;
- base = new char[bufsize];
- }
- char* p = base;
- char* limit = base + bufsize;
-
- struct timeval now_tv;
- gettimeofday(&now_tv, NULL);
- const time_t seconds = now_tv.tv_sec;
- struct tm t;
- localtime_r(&seconds, &t);
- p += snprintf(p, limit - p,
- "%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ",
- t.tm_year + 1900,
- t.tm_mon + 1,
- t.tm_mday,
- t.tm_hour,
- t.tm_min,
- t.tm_sec,
- static_cast<int>(now_tv.tv_usec),
- static_cast<long long unsigned int>(thread_id));
-
- // Print the message
- if (p < limit) {
- va_list backup_ap;
- va_copy(backup_ap, ap);
- p += vsnprintf(p, limit - p, format, backup_ap);
- va_end(backup_ap);
- }
+ // Creates a logger that writes to the given file.
+ //
+ // The PosixLogger instance takes ownership of the file handle.
+ explicit PosixLogger(std::FILE* fp) : fp_(fp) { assert(fp != nullptr); }
+
+ ~PosixLogger() override { std::fclose(fp_); }
+
+ void Logv(const char* format, va_list arguments) override {
+ // Record the time as close to the Logv() call as possible.
+ struct ::timeval now_timeval;
+ ::gettimeofday(&now_timeval, nullptr);
+ const std::time_t now_seconds = now_timeval.tv_sec;
+ struct std::tm now_components;
+ ::localtime_r(&now_seconds, &now_components);
+
+ // Record the thread ID.
+ constexpr const int kMaxThreadIdSize = 32;
+ std::ostringstream thread_stream;
+ thread_stream << std::this_thread::get_id();
+ std::string thread_id = thread_stream.str();
+ if (thread_id.size() > kMaxThreadIdSize) {
+ thread_id.resize(kMaxThreadIdSize);
+ }
- // Truncate to available space if necessary
- if (p >= limit) {
- if (iter == 0) {
- continue; // Try again with larger buffer
- } else {
- p = limit - 1;
+ // We first attempt to print into a stack-allocated buffer. If this attempt
+ // fails, we make a second attempt with a dynamically allocated buffer.
+ constexpr const int kStackBufferSize = 512;
+ char stack_buffer[kStackBufferSize];
+ static_assert(sizeof(stack_buffer) == static_cast<size_t>(kStackBufferSize),
+ "sizeof(char) is expected to be 1 in C++");
+
+ int dynamic_buffer_size = 0; // Computed in the first iteration.
+ for (int iteration = 0; iteration < 2; ++iteration) {
+ const int buffer_size =
+ (iteration == 0) ? kStackBufferSize : dynamic_buffer_size;
+ char* const buffer =
+ (iteration == 0) ? stack_buffer : new char[dynamic_buffer_size];
+
+ // Print the header into the buffer.
+ int buffer_offset = snprintf(
+ buffer, buffer_size, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ",
+ now_components.tm_year + 1900, now_components.tm_mon + 1,
+ now_components.tm_mday, now_components.tm_hour, now_components.tm_min,
+ now_components.tm_sec, static_cast<int>(now_timeval.tv_usec),
+ thread_id.c_str());
+
+ // The header can be at most 28 characters (10 date + 15 time +
+ // 3 delimiters) plus the thread ID, which should fit comfortably into the
+ // static buffer.
+ assert(buffer_offset <= 28 + kMaxThreadIdSize);
+ static_assert(28 + kMaxThreadIdSize < kStackBufferSize,
+ "stack-allocated buffer may not fit the message header");
+ assert(buffer_offset < buffer_size);
+
+ // Print the message into the buffer.
+ std::va_list arguments_copy;
+ va_copy(arguments_copy, arguments);
+ buffer_offset +=
+ std::vsnprintf(buffer + buffer_offset, buffer_size - buffer_offset,
+ format, arguments_copy);
+ va_end(arguments_copy);
+
+ // The code below may append a newline at the end of the buffer, which
+ // requires an extra character.
+ if (buffer_offset >= buffer_size - 1) {
+ // The message did not fit into the buffer.
+ if (iteration == 0) {
+ // Re-run the loop and use a dynamically-allocated buffer. The buffer
+ // will be large enough for the log message, an extra newline and a
+ // null terminator.
+ dynamic_buffer_size = buffer_offset + 2;
+ continue;
}
+
+ // The dynamically-allocated buffer was incorrectly sized. This should
+ // not happen, assuming a correct implementation of (v)snprintf. Fail
+ // in tests, recover by truncating the log message in production.
+ assert(false);
+ buffer_offset = buffer_size - 1;
}
- // Add newline if necessary
- if (p == base || p[-1] != '\n') {
- *p++ = '\n';
+ // Add a newline if necessary.
+ if (buffer[buffer_offset - 1] != '\n') {
+ buffer[buffer_offset] = '\n';
+ ++buffer_offset;
}
- assert(p <= limit);
- fwrite(base, 1, p - base, file_);
- fflush(file_);
- if (base != buffer) {
- delete[] base;
+ assert(buffer_offset <= buffer_size);
+ std::fwrite(buffer, 1, buffer_offset, fp_);
+ std::fflush(fp_);
+
+ if (iteration != 0) {
+ delete[] buffer;
}
break;
}
}
+
+ private:
+ std::FILE* const fp_;
};
} // namespace leveldb
diff --git a/src/leveldb/util/random.h b/src/leveldb/util/random.h
index ddd51b1c7b..76f7daf52a 100644
--- a/src/leveldb/util/random.h
+++ b/src/leveldb/util/random.h
@@ -15,6 +15,7 @@ namespace leveldb {
class Random {
private:
uint32_t seed_;
+
public:
explicit Random(uint32_t s) : seed_(s & 0x7fffffffu) {
// Avoid bad seeds.
@@ -23,8 +24,8 @@ class Random {
}
}
uint32_t Next() {
- static const uint32_t M = 2147483647L; // 2^31-1
- static const uint64_t A = 16807; // bits 14, 8, 7, 5, 2, 1, 0
+ static const uint32_t M = 2147483647L; // 2^31-1
+ static const uint64_t A = 16807; // bits 14, 8, 7, 5, 2, 1, 0
// We are computing
// seed_ = (seed_ * A) % M, where M = 2^31-1
//
@@ -54,9 +55,7 @@ class Random {
// Skewed: pick "base" uniformly from range [0,max_log] and then
// return "base" random bits. The effect is to pick a number in the
// range [0,2^max_log-1] with exponential bias towards smaller numbers.
- uint32_t Skewed(int max_log) {
- return Uniform(1 << Uniform(max_log + 1));
- }
+ uint32_t Skewed(int max_log) { return Uniform(1 << Uniform(max_log + 1)); }
};
} // namespace leveldb
diff --git a/src/leveldb/util/status.cc b/src/leveldb/util/status.cc
index a44f35b314..15ce747d80 100644
--- a/src/leveldb/util/status.cc
+++ b/src/leveldb/util/status.cc
@@ -2,9 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include "leveldb/status.h"
+
#include <stdio.h>
+
#include "port/port.h"
-#include "leveldb/status.h"
namespace leveldb {
@@ -18,8 +20,8 @@ const char* Status::CopyState(const char* state) {
Status::Status(Code code, const Slice& msg, const Slice& msg2) {
assert(code != kOk);
- const uint32_t len1 = msg.size();
- const uint32_t len2 = msg2.size();
+ const uint32_t len1 = static_cast<uint32_t>(msg.size());
+ const uint32_t len2 = static_cast<uint32_t>(msg2.size());
const uint32_t size = len1 + (len2 ? (2 + len2) : 0);
char* result = new char[size + 5];
memcpy(result, &size, sizeof(size));
@@ -34,7 +36,7 @@ Status::Status(Code code, const Slice& msg, const Slice& msg2) {
}
std::string Status::ToString() const {
- if (state_ == NULL) {
+ if (state_ == nullptr) {
return "OK";
} else {
char tmp[30];
@@ -59,8 +61,8 @@ std::string Status::ToString() const {
type = "IO error: ";
break;
default:
- snprintf(tmp, sizeof(tmp), "Unknown code(%d): ",
- static_cast<int>(code()));
+ snprintf(tmp, sizeof(tmp),
+ "Unknown code(%d): ", static_cast<int>(code()));
type = tmp;
break;
}
diff --git a/src/leveldb/util/status_test.cc b/src/leveldb/util/status_test.cc
new file mode 100644
index 0000000000..2842319fbd
--- /dev/null
+++ b/src/leveldb/util/status_test.cc
@@ -0,0 +1,40 @@
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include <utility>
+
+#include "leveldb/slice.h"
+#include "leveldb/status.h"
+#include "util/testharness.h"
+
+namespace leveldb {
+
+TEST(Status, MoveConstructor) {
+ {
+ Status ok = Status::OK();
+ Status ok2 = std::move(ok);
+
+ ASSERT_TRUE(ok2.ok());
+ }
+
+ {
+ Status status = Status::NotFound("custom NotFound status message");
+ Status status2 = std::move(status);
+
+ ASSERT_TRUE(status2.IsNotFound());
+ ASSERT_EQ("NotFound: custom NotFound status message", status2.ToString());
+ }
+
+ {
+ Status self_moved = Status::IOError("custom IOError status message");
+
+ // Needed to bypass compiler warning about explicit move-assignment.
+ Status& self_moved_reference = self_moved;
+ self_moved_reference = std::move(self_moved);
+ }
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/util/testharness.cc b/src/leveldb/util/testharness.cc
index 402fab34d7..318ecfa3b7 100644
--- a/src/leveldb/util/testharness.cc
+++ b/src/leveldb/util/testharness.cc
@@ -4,11 +4,15 @@
#include "util/testharness.h"
-#include <string>
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/types.h>
+#include <string>
+#include <vector>
+
+#include "leveldb/env.h"
+
namespace leveldb {
namespace test {
@@ -19,10 +23,10 @@ struct Test {
void (*func)();
};
std::vector<Test>* tests;
-}
+} // namespace
bool RegisterTest(const char* base, const char* name, void (*func)()) {
- if (tests == NULL) {
+ if (tests == nullptr) {
tests = new std::vector<Test>;
}
Test t;
@@ -37,14 +41,14 @@ int RunAllTests() {
const char* matcher = getenv("LEVELDB_TESTS");
int num = 0;
- if (tests != NULL) {
+ if (tests != nullptr) {
for (size_t i = 0; i < tests->size(); i++) {
const Test& t = (*tests)[i];
- if (matcher != NULL) {
+ if (matcher != nullptr) {
std::string name = t.base;
name.push_back('.');
name.append(t.name);
- if (strstr(name.c_str(), matcher) == NULL) {
+ if (strstr(name.c_str(), matcher) == nullptr) {
continue;
}
}
@@ -66,7 +70,7 @@ std::string TmpDir() {
int RandomSeed() {
const char* env = getenv("TEST_RANDOM_SEED");
- int result = (env != NULL ? atoi(env) : 301);
+ int result = (env != nullptr ? atoi(env) : 301);
if (result <= 0) {
result = 301;
}
diff --git a/src/leveldb/util/testharness.h b/src/leveldb/util/testharness.h
index da4fe68bb4..72cd1629eb 100644
--- a/src/leveldb/util/testharness.h
+++ b/src/leveldb/util/testharness.h
@@ -7,10 +7,10 @@
#include <stdio.h>
#include <stdlib.h>
+
#include <sstream>
-#include "leveldb/env.h"
-#include "leveldb/slice.h"
-#include "util/random.h"
+
+#include "leveldb/status.h"
namespace leveldb {
namespace test {
@@ -27,15 +27,15 @@ namespace test {
//
// Returns 0 if all tests pass.
// Dies or returns a non-zero value if some test fails.
-extern int RunAllTests();
+int RunAllTests();
// Return the directory to use for temporary storage.
-extern std::string TmpDir();
+std::string TmpDir();
// Return a randomization seed for this run. Typically returns the
// same number on repeated invocations of this binary, but automated
// runs may be able to vary the seed.
-extern int RandomSeed();
+int RandomSeed();
// An instance of Tester is allocated to hold temporary state during
// the execution of an assertion.
@@ -47,9 +47,7 @@ class Tester {
std::stringstream ss_;
public:
- Tester(const char* f, int l)
- : ok_(true), fname_(f), line_(l) {
- }
+ Tester(const char* f, int l) : ok_(true), fname_(f), line_(l) {}
~Tester() {
if (!ok_) {
@@ -74,14 +72,14 @@ class Tester {
return *this;
}
-#define BINARY_OP(name,op) \
- template <class X, class Y> \
- Tester& name(const X& x, const Y& y) { \
- if (! (x op y)) { \
- ss_ << " failed: " << x << (" " #op " ") << y; \
- ok_ = false; \
- } \
- return *this; \
+#define BINARY_OP(name, op) \
+ template <class X, class Y> \
+ Tester& name(const X& x, const Y& y) { \
+ if (!(x op y)) { \
+ ss_ << " failed: " << x << (" " #op " ") << y; \
+ ok_ = false; \
+ } \
+ return *this; \
}
BINARY_OP(IsEq, ==)
@@ -104,33 +102,38 @@ class Tester {
#define ASSERT_TRUE(c) ::leveldb::test::Tester(__FILE__, __LINE__).Is((c), #c)
#define ASSERT_OK(s) ::leveldb::test::Tester(__FILE__, __LINE__).IsOk((s))
-#define ASSERT_EQ(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsEq((a),(b))
-#define ASSERT_NE(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsNe((a),(b))
-#define ASSERT_GE(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsGe((a),(b))
-#define ASSERT_GT(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsGt((a),(b))
-#define ASSERT_LE(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsLe((a),(b))
-#define ASSERT_LT(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsLt((a),(b))
-
-#define TCONCAT(a,b) TCONCAT1(a,b)
-#define TCONCAT1(a,b) a##b
-
-#define TEST(base,name) \
-class TCONCAT(_Test_,name) : public base { \
- public: \
- void _Run(); \
- static void _RunIt() { \
- TCONCAT(_Test_,name) t; \
- t._Run(); \
- } \
-}; \
-bool TCONCAT(_Test_ignored_,name) = \
- ::leveldb::test::RegisterTest(#base, #name, &TCONCAT(_Test_,name)::_RunIt); \
-void TCONCAT(_Test_,name)::_Run()
+#define ASSERT_EQ(a, b) \
+ ::leveldb::test::Tester(__FILE__, __LINE__).IsEq((a), (b))
+#define ASSERT_NE(a, b) \
+ ::leveldb::test::Tester(__FILE__, __LINE__).IsNe((a), (b))
+#define ASSERT_GE(a, b) \
+ ::leveldb::test::Tester(__FILE__, __LINE__).IsGe((a), (b))
+#define ASSERT_GT(a, b) \
+ ::leveldb::test::Tester(__FILE__, __LINE__).IsGt((a), (b))
+#define ASSERT_LE(a, b) \
+ ::leveldb::test::Tester(__FILE__, __LINE__).IsLe((a), (b))
+#define ASSERT_LT(a, b) \
+ ::leveldb::test::Tester(__FILE__, __LINE__).IsLt((a), (b))
+
+#define TCONCAT(a, b) TCONCAT1(a, b)
+#define TCONCAT1(a, b) a##b
+
+#define TEST(base, name) \
+ class TCONCAT(_Test_, name) : public base { \
+ public: \
+ void _Run(); \
+ static void _RunIt() { \
+ TCONCAT(_Test_, name) t; \
+ t._Run(); \
+ } \
+ }; \
+ bool TCONCAT(_Test_ignored_, name) = ::leveldb::test::RegisterTest( \
+ #base, #name, &TCONCAT(_Test_, name)::_RunIt); \
+ void TCONCAT(_Test_, name)::_Run()
// Register the specified test. Typically not used directly, but
// invoked via the macro expansion of TEST.
-extern bool RegisterTest(const char* base, const char* name, void (*func)());
-
+bool RegisterTest(const char* base, const char* name, void (*func)());
} // namespace test
} // namespace leveldb
diff --git a/src/leveldb/util/testutil.cc b/src/leveldb/util/testutil.cc
index bee56bf75f..6b151b9e64 100644
--- a/src/leveldb/util/testutil.cc
+++ b/src/leveldb/util/testutil.cc
@@ -12,7 +12,7 @@ namespace test {
Slice RandomString(Random* rnd, int len, std::string* dst) {
dst->resize(len);
for (int i = 0; i < len; i++) {
- (*dst)[i] = static_cast<char>(' ' + rnd->Uniform(95)); // ' ' .. '~'
+ (*dst)[i] = static_cast<char>(' ' + rnd->Uniform(95)); // ' ' .. '~'
}
return Slice(*dst);
}
@@ -20,9 +20,8 @@ Slice RandomString(Random* rnd, int len, std::string* dst) {
std::string RandomKey(Random* rnd, int len) {
// Make sure to generate a wide variety of characters so we
// test the boundary conditions for short-key optimizations.
- static const char kTestChars[] = {
- '\0', '\1', 'a', 'b', 'c', 'd', 'e', '\xfd', '\xfe', '\xff'
- };
+ static const char kTestChars[] = {'\0', '\1', 'a', 'b', 'c',
+ 'd', 'e', '\xfd', '\xfe', '\xff'};
std::string result;
for (int i = 0; i < len; i++) {
result += kTestChars[rnd->Uniform(sizeof(kTestChars))];
@@ -30,9 +29,8 @@ std::string RandomKey(Random* rnd, int len) {
return result;
}
-
-extern Slice CompressibleString(Random* rnd, double compressed_fraction,
- size_t len, std::string* dst) {
+Slice CompressibleString(Random* rnd, double compressed_fraction, size_t len,
+ std::string* dst) {
int raw = static_cast<int>(len * compressed_fraction);
if (raw < 1) raw = 1;
std::string raw_data;
diff --git a/src/leveldb/util/testutil.h b/src/leveldb/util/testutil.h
index d7e4583702..bb4051ba07 100644
--- a/src/leveldb/util/testutil.h
+++ b/src/leveldb/util/testutil.h
@@ -5,6 +5,7 @@
#ifndef STORAGE_LEVELDB_UTIL_TESTUTIL_H_
#define STORAGE_LEVELDB_UTIL_TESTUTIL_H_
+#include "helpers/memenv/memenv.h"
#include "leveldb/env.h"
#include "leveldb/slice.h"
#include "util/random.h"
@@ -14,17 +15,17 @@ namespace test {
// Store in *dst a random string of length "len" and return a Slice that
// references the generated data.
-extern Slice RandomString(Random* rnd, int len, std::string* dst);
+Slice RandomString(Random* rnd, int len, std::string* dst);
// Return a random key with the specified length that may contain interesting
// characters (e.g. \x00, \xff, etc.).
-extern std::string RandomKey(Random* rnd, int len);
+std::string RandomKey(Random* rnd, int len);
// Store in *dst a string of length "len" that will compress to
// "N*compressed_fraction" bytes and return a Slice that references
// the generated data.
-extern Slice CompressibleString(Random* rnd, double compressed_fraction,
- size_t len, std::string* dst);
+Slice CompressibleString(Random* rnd, double compressed_fraction, size_t len,
+ std::string* dst);
// A wrapper that allows injection of errors.
class ErrorEnv : public EnvWrapper {
@@ -32,25 +33,27 @@ class ErrorEnv : public EnvWrapper {
bool writable_file_error_;
int num_writable_file_errors_;
- ErrorEnv() : EnvWrapper(Env::Default()),
- writable_file_error_(false),
- num_writable_file_errors_(0) { }
+ ErrorEnv()
+ : EnvWrapper(NewMemEnv(Env::Default())),
+ writable_file_error_(false),
+ num_writable_file_errors_(0) {}
+ ~ErrorEnv() override { delete target(); }
- virtual Status NewWritableFile(const std::string& fname,
- WritableFile** result) {
+ Status NewWritableFile(const std::string& fname,
+ WritableFile** result) override {
if (writable_file_error_) {
++num_writable_file_errors_;
- *result = NULL;
+ *result = nullptr;
return Status::IOError(fname, "fake error");
}
return target()->NewWritableFile(fname, result);
}
- virtual Status NewAppendableFile(const std::string& fname,
- WritableFile** result) {
+ Status NewAppendableFile(const std::string& fname,
+ WritableFile** result) override {
if (writable_file_error_) {
++num_writable_file_errors_;
- *result = NULL;
+ *result = nullptr;
return Status::IOError(fname, "fake error");
}
return target()->NewAppendableFile(fname, result);
diff --git a/src/leveldb/util/windows_logger.h b/src/leveldb/util/windows_logger.h
new file mode 100644
index 0000000000..92960638d1
--- /dev/null
+++ b/src/leveldb/util/windows_logger.h
@@ -0,0 +1,124 @@
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+//
+// Logger implementation for the Windows platform.
+
+#ifndef STORAGE_LEVELDB_UTIL_WINDOWS_LOGGER_H_
+#define STORAGE_LEVELDB_UTIL_WINDOWS_LOGGER_H_
+
+#include <cassert>
+#include <cstdarg>
+#include <cstdio>
+#include <ctime>
+#include <sstream>
+#include <thread>
+
+#include "leveldb/env.h"
+
+namespace leveldb {
+
+class WindowsLogger final : public Logger {
+ public:
+ // Creates a logger that writes to the given file.
+ //
+ // The PosixLogger instance takes ownership of the file handle.
+ explicit WindowsLogger(std::FILE* fp) : fp_(fp) { assert(fp != nullptr); }
+
+ ~WindowsLogger() override { std::fclose(fp_); }
+
+ void Logv(const char* format, va_list arguments) override {
+ // Record the time as close to the Logv() call as possible.
+ SYSTEMTIME now_components;
+ ::GetLocalTime(&now_components);
+
+ // Record the thread ID.
+ constexpr const int kMaxThreadIdSize = 32;
+ std::ostringstream thread_stream;
+ thread_stream << std::this_thread::get_id();
+ std::string thread_id = thread_stream.str();
+ if (thread_id.size() > kMaxThreadIdSize) {
+ thread_id.resize(kMaxThreadIdSize);
+ }
+
+ // We first attempt to print into a stack-allocated buffer. If this attempt
+ // fails, we make a second attempt with a dynamically allocated buffer.
+ constexpr const int kStackBufferSize = 512;
+ char stack_buffer[kStackBufferSize];
+ static_assert(sizeof(stack_buffer) == static_cast<size_t>(kStackBufferSize),
+ "sizeof(char) is expected to be 1 in C++");
+
+ int dynamic_buffer_size = 0; // Computed in the first iteration.
+ for (int iteration = 0; iteration < 2; ++iteration) {
+ const int buffer_size =
+ (iteration == 0) ? kStackBufferSize : dynamic_buffer_size;
+ char* const buffer =
+ (iteration == 0) ? stack_buffer : new char[dynamic_buffer_size];
+
+ // Print the header into the buffer.
+ int buffer_offset = snprintf(
+ buffer, buffer_size, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ",
+ now_components.wYear, now_components.wMonth, now_components.wDay,
+ now_components.wHour, now_components.wMinute, now_components.wSecond,
+ static_cast<int>(now_components.wMilliseconds * 1000),
+ thread_id.c_str());
+
+ // The header can be at most 28 characters (10 date + 15 time +
+ // 3 delimiters) plus the thread ID, which should fit comfortably into the
+ // static buffer.
+ assert(buffer_offset <= 28 + kMaxThreadIdSize);
+ static_assert(28 + kMaxThreadIdSize < kStackBufferSize,
+ "stack-allocated buffer may not fit the message header");
+ assert(buffer_offset < buffer_size);
+
+ // Print the message into the buffer.
+ std::va_list arguments_copy;
+ va_copy(arguments_copy, arguments);
+ buffer_offset +=
+ std::vsnprintf(buffer + buffer_offset, buffer_size - buffer_offset,
+ format, arguments_copy);
+ va_end(arguments_copy);
+
+ // The code below may append a newline at the end of the buffer, which
+ // requires an extra character.
+ if (buffer_offset >= buffer_size - 1) {
+ // The message did not fit into the buffer.
+ if (iteration == 0) {
+ // Re-run the loop and use a dynamically-allocated buffer. The buffer
+ // will be large enough for the log message, an extra newline and a
+ // null terminator.
+ dynamic_buffer_size = buffer_offset + 2;
+ continue;
+ }
+
+ // The dynamically-allocated buffer was incorrectly sized. This should
+ // not happen, assuming a correct implementation of (v)snprintf. Fail
+ // in tests, recover by truncating the log message in production.
+ assert(false);
+ buffer_offset = buffer_size - 1;
+ }
+
+ // Add a newline if necessary.
+ if (buffer[buffer_offset - 1] != '\n') {
+ buffer[buffer_offset] = '\n';
+ ++buffer_offset;
+ }
+
+ assert(buffer_offset <= buffer_size);
+ std::fwrite(buffer, 1, buffer_offset, fp_);
+ std::fflush(fp_);
+
+ if (iteration != 0) {
+ delete[] buffer;
+ }
+ break;
+ }
+ }
+
+ private:
+ std::FILE* const fp_;
+};
+
+} // namespace leveldb
+
+#endif // STORAGE_LEVELDB_UTIL_WINDOWS_LOGGER_H_
diff --git a/src/logging.cpp b/src/logging.cpp
index 30862e5cbc..6fd916b603 100644
--- a/src/logging.cpp
+++ b/src/logging.cpp
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -162,6 +162,7 @@ const CLogCategoryDesc LogCategories[] =
{BCLog::COINDB, "coindb"},
{BCLog::QT, "qt"},
{BCLog::LEVELDB, "leveldb"},
+ {BCLog::VALIDATION, "validation"},
{BCLog::ALL, "1"},
{BCLog::ALL, "all"},
};
diff --git a/src/logging.h b/src/logging.h
index 5d91ec2135..b2fde1b9ea 100644
--- a/src/logging.h
+++ b/src/logging.h
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -54,6 +54,7 @@ namespace BCLog {
COINDB = (1 << 18),
QT = (1 << 19),
LEVELDB = (1 << 20),
+ VALIDATION = (1 << 21),
ALL = ~(uint32_t)0,
};
diff --git a/src/logging/timer.h b/src/logging/timer.h
index 45bfc4aa65..2b27c71080 100644
--- a/src/logging/timer.h
+++ b/src/logging/timer.h
@@ -85,7 +85,7 @@ private:
const std::string m_title{};
//! Forwarded on to LogPrint if specified - has the effect of only
- //! outputing the timing log when a particular debug= category is specified.
+ //! outputting the timing log when a particular debug= category is specified.
const BCLog::LogFlags m_log_category{};
};
diff --git a/src/memusage.h b/src/memusage.h
index 3ae9face15..24eb450465 100644
--- a/src/memusage.h
+++ b/src/memusage.h
@@ -6,9 +6,11 @@
#define BITCOIN_MEMUSAGE_H
#include <indirectmap.h>
+#include <prevector.h>
#include <stdlib.h>
+#include <cassert>
#include <map>
#include <memory>
#include <set>
diff --git a/src/miner.cpp b/src/miner.cpp
index 6f4e10b6ed..61d27d17c1 100644
--- a/src/miner.cpp
+++ b/src/miner.cpp
@@ -20,7 +20,6 @@
#include <timedata.h>
#include <util/moneystr.h>
#include <util/system.h>
-#include <util/validation.h>
#include <algorithm>
#include <utility>
@@ -167,7 +166,7 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc
BlockValidationState state;
if (!TestBlockValidity(state, chainparams, *pblock, pindexPrev, false, false)) {
- throw std::runtime_error(strprintf("%s: TestBlockValidity failed: %s", __func__, FormatStateMessage(state)));
+ throw std::runtime_error(strprintf("%s: TestBlockValidity failed: %s", __func__, state.ToString()));
}
int64_t nTime2 = GetTimeMicros();
diff --git a/src/net.cpp b/src/net.cpp
index 99dae88bab..d156450394 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -85,7 +85,7 @@ static const uint64_t RANDOMIZER_ID_LOCALHOSTNONCE = 0xd93e69e2bbfa5735ULL; // S
bool fDiscover = true;
bool fListen = true;
bool g_relay_txes = !DEFAULT_BLOCKSONLY;
-CCriticalSection cs_mapLocalHost;
+RecursiveMutex cs_mapLocalHost;
std::map<CNetAddr, LocalServiceInfo> mapLocalHost GUARDED_BY(cs_mapLocalHost);
static bool vfLimited[NET_MAX] GUARDED_BY(cs_mapLocalHost) = {};
std::string strSubVersion;
@@ -410,7 +410,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo
if (hSocket == INVALID_SOCKET) {
return nullptr;
}
- connected = ConnectThroughProxy(proxy, addrConnect.ToStringIP(), addrConnect.GetPort(), hSocket, nConnectTimeout, &proxyConnectionFailed);
+ connected = ConnectThroughProxy(proxy, addrConnect.ToStringIP(), addrConnect.GetPort(), hSocket, nConnectTimeout, proxyConnectionFailed);
} else {
// no proxy needed (none set for target network)
hSocket = CreateSocket(addrConnect);
@@ -432,7 +432,8 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo
std::string host;
int port = default_port;
SplitHostPort(std::string(pszDest), port, host);
- connected = ConnectThroughProxy(proxy, host, port, hSocket, nConnectTimeout, nullptr);
+ bool proxyConnectionFailed;
+ connected = ConnectThroughProxy(proxy, host, port, hSocket, nConnectTimeout, proxyConnectionFailed);
}
if (!connected) {
CloseSocket(hSocket);
@@ -497,12 +498,13 @@ void CNode::SetAddrLocal(const CService& addrLocalIn) {
#undef X
#define X(name) stats.name = name
-void CNode::copyStats(CNodeStats &stats)
+void CNode::copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap)
{
stats.nodeid = this->GetId();
X(nServices);
X(addr);
X(addrBind);
+ stats.m_mapped_as = addr.GetMappedAS(m_asmap);
if (m_tx_relay != nullptr) {
LOCK(m_tx_relay->cs_filter);
stats.fRelayTxes = m_tx_relay->fRelayTxes;
@@ -553,9 +555,9 @@ void CNode::copyStats(CNodeStats &stats)
}
// Raw ping time is in microseconds, but show it to user as whole seconds (Bitcoin users should be well used to small numbers with many decimal places by now :)
- stats.dPingTime = (((double)nPingUsecTime) / 1e6);
- stats.dMinPing = (((double)nMinPingUsecTime) / 1e6);
- stats.dPingWait = (((double)nPingUsecWait) / 1e6);
+ stats.m_ping_usec = nPingUsecTime;
+ stats.m_min_ping_usec = nMinPingUsecTime;
+ stats.m_ping_wait_usec = nPingUsecWait;
// Leave string empty if addrLocal invalid (not filled in yet)
CService addrLocalUnlocked = GetAddrLocal();
@@ -716,6 +718,19 @@ CNetMessage V1TransportDeserializer::GetMessage(const CMessageHeader::MessageSta
return msg;
}
+void V1TransportSerializer::prepareForTransport(CSerializedNetMsg& msg, std::vector<unsigned char>& header) {
+ // create dbl-sha256 checksum
+ uint256 hash = Hash(msg.data.begin(), msg.data.end());
+
+ // create header
+ CMessageHeader hdr(Params().MessageStart(), msg.command.c_str(), msg.data.size());
+ memcpy(hdr.pchChecksum, hash.begin(), CMessageHeader::CHECKSUM_SIZE);
+
+ // serialize header
+ header.reserve(CMessageHeader::HEADER_SIZE);
+ CVectorWriter{SER_NETWORK, INIT_PROTO_VERSION, header, 0, hdr};
+}
+
size_t CConnman::SocketSendData(CNode *pnode) const EXCLUSIVE_LOCKS_REQUIRED(pnode->cs_vSend)
{
auto it = pnode->vSendMsg.begin();
@@ -1609,7 +1624,7 @@ void CConnman::ThreadDNSAddressSeed()
continue;
}
unsigned int nMaxIPs = 256; // Limits number of IPs learned from a DNS seed
- if (LookupHost(host.c_str(), vIPs, nMaxIPs, true)) {
+ if (LookupHost(host, vIPs, nMaxIPs, true)) {
for (const CNetAddr& ip : vIPs) {
int nOneDay = 24*3600;
CAddress addr = CAddress(CService(ip, Params().GetDefaultPort()), requiredServiceBits);
@@ -1768,7 +1783,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
// but inbound and addnode peers do not use our outbound slots. Inbound peers
// also have the added issue that they're attacker controlled and could be used
// to prevent us from connecting to particular hosts if we used them here.
- setConnected.insert(pnode->addr.GetGroup());
+ setConnected.insert(pnode->addr.GetGroup(addrman.m_asmap));
if (pnode->m_tx_relay == nullptr) {
nOutboundBlockRelay++;
} else if (!pnode->fFeeler) {
@@ -1816,7 +1831,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
}
// Require outbound connections, other than feelers, to be to distinct network groups
- if (!fFeeler && setConnected.count(addr.GetGroup())) {
+ if (!fFeeler && setConnected.count(addr.GetGroup(addrman.m_asmap))) {
break;
}
@@ -1907,7 +1922,7 @@ std::vector<AddedNodeInfo> CConnman::GetAddedNodeInfo()
}
for (const std::string& strAddNode : lAddresses) {
- CService service(LookupNumeric(strAddNode.c_str(), Params().GetDefaultPort()));
+ CService service(LookupNumeric(strAddNode, Params().GetDefaultPort()));
AddedNodeInfo addedNode{strAddNode, CService(), false, false};
if (service.IsValid()) {
// strAddNode is an IP:port
@@ -2501,7 +2516,7 @@ void CConnman::GetNodeStats(std::vector<CNodeStats>& vstats)
vstats.reserve(vNodes.size());
for (CNode* pnode : vNodes) {
vstats.emplace_back();
- pnode->copyStats(vstats.back());
+ pnode->copyStats(vstats.back(), addrman.m_asmap);
}
}
@@ -2703,6 +2718,7 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn
}
m_deserializer = MakeUnique<V1TransportDeserializer>(V1TransportDeserializer(Params().MessageStart(), SER_NETWORK, INIT_PROTO_VERSION));
+ m_serializer = MakeUnique<V1TransportSerializer>(V1TransportSerializer());
}
CNode::~CNode()
@@ -2718,16 +2734,12 @@ bool CConnman::NodeFullyConnected(const CNode* pnode)
void CConnman::PushMessage(CNode* pnode, CSerializedNetMsg&& msg)
{
size_t nMessageSize = msg.data.size();
- size_t nTotalSize = nMessageSize + CMessageHeader::HEADER_SIZE;
LogPrint(BCLog::NET, "sending %s (%d bytes) peer=%d\n", SanitizeString(msg.command), nMessageSize, pnode->GetId());
+ // make sure we use the appropriate network transport format
std::vector<unsigned char> serializedHeader;
- serializedHeader.reserve(CMessageHeader::HEADER_SIZE);
- uint256 hash = Hash(msg.data.data(), msg.data.data() + nMessageSize);
- CMessageHeader hdr(Params().MessageStart(), msg.command.c_str(), nMessageSize);
- memcpy(hdr.pchChecksum, hash.begin(), CMessageHeader::CHECKSUM_SIZE);
-
- CVectorWriter{SER_NETWORK, INIT_PROTO_VERSION, serializedHeader, 0, hdr};
+ pnode->m_serializer->prepareForTransport(msg, serializedHeader);
+ size_t nTotalSize = nMessageSize + serializedHeader.size();
size_t nBytesSent = 0;
{
@@ -2788,7 +2800,7 @@ CSipHasher CConnman::GetDeterministicRandomizer(uint64_t id) const
uint64_t CConnman::CalculateKeyedNetGroup(const CAddress& ad) const
{
- std::vector<unsigned char> vchNetGroup(ad.GetGroup());
+ std::vector<unsigned char> vchNetGroup(ad.GetGroup(addrman.m_asmap));
return GetDeterministicRandomizer(RANDOMIZER_ID_NETGROUP).Write(vchNetGroup.data(), vchNetGroup.size()).Finalize();
}
diff --git a/src/net.h b/src/net.h
index 9cd3b769ec..975d7f15d7 100644
--- a/src/net.h
+++ b/src/net.h
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -153,6 +153,7 @@ public:
bool m_use_addrman_outgoing = true;
std::vector<std::string> m_specified_outgoing;
std::vector<std::string> m_added_nodes;
+ std::vector<bool> m_asmap;
};
void Init(const Options& connOptions) {
@@ -330,6 +331,8 @@ public:
*/
int64_t PoissonNextSendInbound(int64_t now, int average_interval_seconds);
+ void SetAsmap(std::vector<bool> asmap) { addrman.m_asmap = std::move(asmap); }
+
private:
struct ListenSocket {
public:
@@ -384,10 +387,10 @@ private:
static bool NodeFullyConnected(const CNode* pnode);
// Network usage totals
- CCriticalSection cs_totalBytesRecv;
- CCriticalSection cs_totalBytesSent;
- uint64_t nTotalBytesRecv GUARDED_BY(cs_totalBytesRecv);
- uint64_t nTotalBytesSent GUARDED_BY(cs_totalBytesSent);
+ RecursiveMutex cs_totalBytesRecv;
+ RecursiveMutex cs_totalBytesSent;
+ uint64_t nTotalBytesRecv GUARDED_BY(cs_totalBytesRecv) {0};
+ uint64_t nTotalBytesSent GUARDED_BY(cs_totalBytesSent) {0};
// outbound limit & stats
uint64_t nMaxOutboundTotalBytesSentInCycle GUARDED_BY(cs_totalBytesSent);
@@ -410,12 +413,12 @@ private:
bool fAddressesInitialized{false};
CAddrMan addrman;
std::deque<std::string> vOneShots GUARDED_BY(cs_vOneShots);
- CCriticalSection cs_vOneShots;
+ RecursiveMutex cs_vOneShots;
std::vector<std::string> vAddedNodes GUARDED_BY(cs_vAddedNodes);
- CCriticalSection cs_vAddedNodes;
+ RecursiveMutex cs_vAddedNodes;
std::vector<CNode*> vNodes GUARDED_BY(cs_vNodes);
std::list<CNode*> vNodesDisconnected;
- mutable CCriticalSection cs_vNodes;
+ mutable RecursiveMutex cs_vNodes;
std::atomic<NodeId> nLastNodeId{0};
unsigned int nPrevNodeCount{0};
@@ -565,7 +568,7 @@ struct LocalServiceInfo {
int nPort;
};
-extern CCriticalSection cs_mapLocalHost;
+extern RecursiveMutex cs_mapLocalHost;
extern std::map<CNetAddr, LocalServiceInfo> mapLocalHost GUARDED_BY(cs_mapLocalHost);
extern const std::string NET_MESSAGE_COMMAND_OTHER;
@@ -593,9 +596,9 @@ public:
mapMsgCmdSize mapRecvBytesPerMsgCmd;
NetPermissionFlags m_permissionFlags;
bool m_legacyWhitelisted;
- double dPingTime;
- double dPingWait;
- double dMinPing;
+ int64_t m_ping_usec;
+ int64_t m_ping_wait_usec;
+ int64_t m_min_ping_usec;
CAmount minFeeFilter;
// Our address, as reported by the peer
std::string addrLocal;
@@ -603,6 +606,7 @@ public:
CAddress addr;
// Bind address of our side of the connection
CAddress addrBind;
+ uint32_t m_mapped_as;
};
@@ -699,12 +703,27 @@ public:
CNetMessage GetMessage(const CMessageHeader::MessageStartChars& message_start, int64_t time) override;
};
+/** The TransportSerializer prepares messages for the network transport
+ */
+class TransportSerializer {
+public:
+ // prepare message for transport (header construction, error-correction computation, payload encryption, etc.)
+ virtual void prepareForTransport(CSerializedNetMsg& msg, std::vector<unsigned char>& header) = 0;
+ virtual ~TransportSerializer() {}
+};
+
+class V1TransportSerializer : public TransportSerializer {
+public:
+ void prepareForTransport(CSerializedNetMsg& msg, std::vector<unsigned char>& header) override;
+};
+
/** Information about a peer */
class CNode
{
friend class CConnman;
public:
std::unique_ptr<TransportDeserializer> m_deserializer;
+ std::unique_ptr<TransportSerializer> m_serializer;
// socket
std::atomic<ServiceFlags> nServices{NODE_NONE};
@@ -713,15 +732,15 @@ public:
size_t nSendOffset{0}; // offset inside the first vSendMsg already sent
uint64_t nSendBytes GUARDED_BY(cs_vSend){0};
std::deque<std::vector<unsigned char>> vSendMsg GUARDED_BY(cs_vSend);
- CCriticalSection cs_vSend;
- CCriticalSection cs_hSocket;
- CCriticalSection cs_vRecv;
+ RecursiveMutex cs_vSend;
+ RecursiveMutex cs_hSocket;
+ RecursiveMutex cs_vRecv;
- CCriticalSection cs_vProcessMsg;
+ RecursiveMutex cs_vProcessMsg;
std::list<CNetMessage> vProcessMsg GUARDED_BY(cs_vProcessMsg);
size_t nProcessQueueSize{0};
- CCriticalSection cs_sendProcessing;
+ RecursiveMutex cs_sendProcessing;
std::deque<CInv> vRecvGetData;
uint64_t nRecvBytes GUARDED_BY(cs_vRecv){0};
@@ -787,11 +806,11 @@ public:
// There is no final sorting before sending, as they are always sent immediately
// and in the order requested.
std::vector<uint256> vInventoryBlockToSend GUARDED_BY(cs_inventory);
- CCriticalSection cs_inventory;
+ RecursiveMutex cs_inventory;
struct TxRelay {
TxRelay() { pfilter = MakeUnique<CBloomFilter>(); }
- mutable CCriticalSection cs_filter;
+ mutable RecursiveMutex cs_filter;
// We use fRelayTxes for two purposes -
// a) it allows us to not relay tx invs before receiving the peer's version message
// b) the peer may tell us in its version message that we should not relay tx invs
@@ -799,7 +818,7 @@ public:
bool fRelayTxes GUARDED_BY(cs_filter){false};
std::unique_ptr<CBloomFilter> pfilter PT_GUARDED_BY(cs_filter) GUARDED_BY(cs_filter);
- mutable CCriticalSection cs_tx_inventory;
+ mutable RecursiveMutex cs_tx_inventory;
CRollingBloomFilter filterInventoryKnown GUARDED_BY(cs_tx_inventory){50000, 0.000001};
// Set of transaction ids we still have to announce.
// They are sorted by the mempool before relay, so the order is not important.
@@ -810,7 +829,7 @@ public:
std::atomic<std::chrono::seconds> m_last_mempool_req{std::chrono::seconds{0}};
std::chrono::microseconds nNextInvSend{0};
- CCriticalSection cs_feeFilter;
+ RecursiveMutex cs_feeFilter;
// Minimum fee rate with which to filter inv's to this node
CAmount minFeeFilter GUARDED_BY(cs_feeFilter){0};
CAmount lastSentFeeFilter{0};
@@ -872,12 +891,12 @@ private:
NetPermissionFlags m_permissionFlags{ PF_NONE };
std::list<CNetMessage> vRecvMsg; // Used only by SocketHandler thread
- mutable CCriticalSection cs_addrName;
+ mutable RecursiveMutex cs_addrName;
std::string addrName GUARDED_BY(cs_addrName);
// Our address, as reported by the peer
CService addrLocal GUARDED_BY(cs_addrLocal);
- mutable CCriticalSection cs_addrLocal;
+ mutable RecursiveMutex cs_addrLocal;
public:
NodeId GetId() const {
@@ -979,7 +998,7 @@ public:
void CloseSocketDisconnect();
- void copyStats(CNodeStats &stats);
+ void copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap);
ServiceFlags GetLocalServices() const
{
diff --git a/src/net_permissions.cpp b/src/net_permissions.cpp
index c3947173de..22fa5ee73b 100644
--- a/src/net_permissions.cpp
+++ b/src/net_permissions.cpp
@@ -71,7 +71,7 @@ bool NetWhitebindPermissions::TryParse(const std::string str, NetWhitebindPermis
const std::string strBind = str.substr(offset);
CService addrBind;
- if (!Lookup(strBind.c_str(), addrBind, 0, false)) {
+ if (!Lookup(strBind, addrBind, 0, false)) {
error = ResolveErrMsg("whitebind", strBind);
return false;
}
@@ -94,7 +94,7 @@ bool NetWhitelistPermissions::TryParse(const std::string str, NetWhitelistPermis
const std::string net = str.substr(offset);
CSubNet subnet;
- LookupSubNet(net.c_str(), subnet);
+ LookupSubNet(net, subnet);
if (!subnet.IsValid()) {
error = strprintf(_("Invalid netmask specified in -whitelist: '%s'").translated, net);
return false;
diff --git a/src/net_permissions.h b/src/net_permissions.h
index a06d2f544d..ad74848347 100644
--- a/src/net_permissions.h
+++ b/src/net_permissions.h
@@ -15,7 +15,7 @@ enum NetPermissionFlags
PF_BLOOMFILTER = (1U << 1),
// Relay and accept transactions from this peer, even if -blocksonly is true
PF_RELAY = (1U << 3),
- // Always relay transactions from this peer, even if already in mempool or rejected from policy
+ // Always relay transactions from this peer, even if already in mempool
// Keep parameter interaction: forcerelay implies relay
PF_FORCERELAY = (1U << 2) | PF_RELAY,
// Can't be banned for misbehavior
@@ -59,4 +59,4 @@ public:
CSubNet m_subnet;
};
-#endif // BITCOIN_NET_PERMISSIONS_H \ No newline at end of file
+#endif // BITCOIN_NET_PERMISSIONS_H
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 783404bcec..e4df9fb90e 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -26,7 +26,6 @@
#include <txmempool.h>
#include <util/system.h>
#include <util/strencodings.h>
-#include <util/validation.h>
#include <memory>
#include <typeinfo>
@@ -90,7 +89,7 @@ struct COrphanTx {
int64_t nTimeExpire;
size_t list_pos;
};
-CCriticalSection g_cs_orphans;
+RecursiveMutex g_cs_orphans;
std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(g_cs_orphans);
void EraseOrphansFor(NodeId peer);
@@ -148,6 +147,14 @@ namespace {
std::unique_ptr<CRollingBloomFilter> recentRejects GUARDED_BY(cs_main);
uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
+ /*
+ * Filter for transactions that have been recently confirmed.
+ * We use this to avoid requesting transactions that have already been
+ * confirnmed.
+ */
+ RecursiveMutex g_cs_recent_confirmed_transactions;
+ std::unique_ptr<CRollingBloomFilter> g_recent_confirmed_transactions GUARDED_BY(g_cs_recent_confirmed_transactions);
+
/** Blocks that are in flight, and that are in the queue to be downloaded. */
struct QueuedBlock {
uint256 hash;
@@ -979,18 +986,9 @@ void Misbehaving(NodeId pnode, int howmuch, const std::string& message) EXCLUSIV
}
/**
- * Returns true if the given validation state result may result in a peer
- * banning/disconnecting us. We use this to determine which unaccepted
- * transactions from a whitelisted peer that we can safely relay.
- */
-static bool TxRelayMayResultInDisconnect(const TxValidationState& state) {
- return state.GetResult() == TxValidationResult::TX_CONSENSUS;
-}
-
-/**
* Potentially ban a node based on the contents of a BlockValidationState object
*
- * @param[in] via_compact_block: this bool is passed in because net_processing should
+ * @param[in] via_compact_block this bool is passed in because net_processing should
* punish peers differently depending on whether the data was provided in a compact
* block message or not. If the compact block had a valid header, but contained invalid
* txs, the peer should not be punished. See BIP 152.
@@ -1056,10 +1054,9 @@ static bool MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& s
* Potentially ban a node based on the contents of a TxValidationState object
*
* @return Returns true if the peer was punished (probably disconnected)
- *
- * Changes here may need to be reflected in TxRelayMayResultInDisconnect().
*/
-static bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state, const std::string& message = "") {
+static bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state, const std::string& message = "")
+{
switch (state.GetResult()) {
case TxValidationResult::TX_RESULT_UNSET:
break;
@@ -1087,11 +1084,6 @@ static bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state,
}
-
-
-
-
-
//////////////////////////////////////////////////////////////////////////////
//
// blockchain -> download logic notification
@@ -1116,6 +1108,16 @@ PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, BanMan* banman, CS
// Initialize global variables that cannot be constructed at startup.
recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
+ // Blocks don't typically have more than 4000 transactions, so this should
+ // be at least six blocks (~1 hr) worth of transactions that we can store.
+ // If the number of transactions appearing in a block goes up, or if we are
+ // seeing getdata requests more than an hour after initial announcement, we
+ // can increase this number.
+ // The false positive rate of 1/1M should come out to less than 1
+ // transaction per day that would be inadvertently ignored (which is the
+ // same probability that we have in the reject filter).
+ g_recent_confirmed_transactions.reset(new CRollingBloomFilter(24000, 0.000001));
+
const Consensus::Params& consensusParams = Params().GetConsensus();
// Stale tip checking and peer eviction are on two different timers, but we
// don't want them to get out of sync due to drift in the scheduler, so we
@@ -1129,40 +1131,63 @@ PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, BanMan* banman, CS
* Evict orphan txn pool entries (EraseOrphanTx) based on a newly connected
* block. Also save the time of the last tip update.
*/
-void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex, const std::vector<CTransactionRef>& vtxConflicted) {
- LOCK(g_cs_orphans);
+void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex, const std::vector<CTransactionRef>& vtxConflicted)
+{
+ {
+ LOCK(g_cs_orphans);
- std::vector<uint256> vOrphanErase;
+ std::vector<uint256> vOrphanErase;
- for (const CTransactionRef& ptx : pblock->vtx) {
- const CTransaction& tx = *ptx;
+ for (const CTransactionRef& ptx : pblock->vtx) {
+ const CTransaction& tx = *ptx;
- // Which orphan pool entries must we evict?
- for (const auto& txin : tx.vin) {
- auto itByPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
- if (itByPrev == mapOrphanTransactionsByPrev.end()) continue;
- for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) {
- const CTransaction& orphanTx = *(*mi)->second.tx;
- const uint256& orphanHash = orphanTx.GetHash();
- vOrphanErase.push_back(orphanHash);
+ // Which orphan pool entries must we evict?
+ for (const auto& txin : tx.vin) {
+ auto itByPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
+ if (itByPrev == mapOrphanTransactionsByPrev.end()) continue;
+ for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) {
+ const CTransaction& orphanTx = *(*mi)->second.tx;
+ const uint256& orphanHash = orphanTx.GetHash();
+ vOrphanErase.push_back(orphanHash);
+ }
}
}
- }
- // Erase orphan transactions included or precluded by this block
- if (vOrphanErase.size()) {
- int nErased = 0;
- for (const uint256& orphanHash : vOrphanErase) {
- nErased += EraseOrphanTx(orphanHash);
+ // Erase orphan transactions included or precluded by this block
+ if (vOrphanErase.size()) {
+ int nErased = 0;
+ for (const uint256& orphanHash : vOrphanErase) {
+ nErased += EraseOrphanTx(orphanHash);
+ }
+ LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased);
+ }
+
+ g_last_tip_update = GetTime();
+ }
+ {
+ LOCK(g_cs_recent_confirmed_transactions);
+ for (const auto& ptx : pblock->vtx) {
+ g_recent_confirmed_transactions->insert(ptx->GetHash());
}
- LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased);
}
+}
- g_last_tip_update = GetTime();
+void PeerLogicValidation::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex)
+{
+ // To avoid relay problems with transactions that were previously
+ // confirmed, clear our filter of recently confirmed transactions whenever
+ // there's a reorg.
+ // This means that in a 1-block reorg (where 1 block is disconnected and
+ // then another block reconnected), our filter will drop to having only one
+ // block's worth of transactions in it, but that should be fine, since
+ // presumably the most common case of relaying a confirmed transaction
+ // should be just after a new block containing it is found.
+ LOCK(g_cs_recent_confirmed_transactions);
+ g_recent_confirmed_transactions->reset();
}
// All of the following cache a recent block, and are protected by cs_most_recent_block
-static CCriticalSection cs_most_recent_block;
+static RecursiveMutex cs_most_recent_block;
static std::shared_ptr<const CBlock> most_recent_block GUARDED_BY(cs_most_recent_block);
static std::shared_ptr<const CBlockHeaderAndShortTxIDs> most_recent_compact_block GUARDED_BY(cs_most_recent_block);
static uint256 most_recent_block_hash GUARDED_BY(cs_most_recent_block);
@@ -1311,12 +1336,14 @@ bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
LOCK(g_cs_orphans);
if (mapOrphanTransactions.count(inv.hash)) return true;
}
- const CCoinsViewCache& coins_cache = ::ChainstateActive().CoinsTip();
+
+ {
+ LOCK(g_cs_recent_confirmed_transactions);
+ if (g_recent_confirmed_transactions->contains(inv.hash)) return true;
+ }
return recentRejects->contains(inv.hash) ||
- mempool.exists(inv.hash) ||
- coins_cache.HaveCoinInCache(COutPoint(inv.hash, 0)) || // Best effort: only try output 0 and 1
- coins_cache.HaveCoinInCache(COutPoint(inv.hash, 1));
+ mempool.exists(inv.hash);
}
case MSG_BLOCK:
case MSG_WITNESS_BLOCK:
@@ -1404,7 +1431,7 @@ void static ProcessGetBlockData(CNode* pfrom, const CChainParams& chainparams, c
if (need_activate_chain) {
BlockValidationState state;
if (!ActivateBestChain(state, Params(), a_recent_block)) {
- LogPrint(BCLog::NET, "failed to activate chain (%s)\n", FormatStateMessage(state));
+ LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
}
}
@@ -1881,7 +1908,7 @@ void static ProcessOrphanTx(CConnman* connman, std::set<uint256>& orphan_work_se
}
}
-bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CConnman* connman, BanMan* banman, const std::atomic<bool>& interruptMsgProc)
+bool ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CConnman* connman, BanMan* banman, const std::atomic<bool>& interruptMsgProc)
{
LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->GetId());
if (gArgs.IsArgSet("-dropmessagestest") && GetRand(gArgs.GetArg("-dropmessagestest", 0)) == 0)
@@ -2314,7 +2341,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
}
BlockValidationState state;
if (!ActivateBestChain(state, Params(), a_recent_block)) {
- LogPrint(BCLog::NET, "failed to activate chain (%s)\n", FormatStateMessage(state));
+ LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
}
}
@@ -2572,14 +2599,11 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
if (pfrom->HasPermission(PF_FORCERELAY)) {
// Always relay transactions received from whitelisted peers, even
- // if they were already in the mempool or rejected from it due
- // to policy, allowing the node to function as a gateway for
+ // if they were already in the mempool,
+ // allowing the node to function as a gateway for
// nodes hidden behind it.
- //
- // Never relay transactions that might result in being
- // disconnected (or banned).
- if (state.IsInvalid() && TxRelayMayResultInDisconnect(state)) {
- LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s)\n", tx.GetHash().ToString(), pfrom->GetId(), FormatStateMessage(state));
+ if (!mempool.exists(tx.GetHash())) {
+ LogPrintf("Not relaying non-mempool transaction %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->GetId());
} else {
LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->GetId());
RelayTransaction(tx.GetHash(), *connman);
@@ -2611,7 +2635,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
{
LogPrint(BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(),
pfrom->GetId(),
- FormatStateMessage(state));
+ state.ToString());
MaybePunishNodeForTx(pfrom->GetId(), state);
}
return true;
diff --git a/src/net_processing.h b/src/net_processing.h
index a034e3b32f..6f26abc209 100644
--- a/src/net_processing.h
+++ b/src/net_processing.h
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -11,7 +11,7 @@
#include <consensus/params.h>
#include <sync.h>
-extern CCriticalSection cs_main;
+extern RecursiveMutex cs_main;
/** Default for -maxorphantx, maximum number of orphan transactions kept in memory */
static const unsigned int DEFAULT_MAX_ORPHAN_TRANSACTIONS = 100;
@@ -33,6 +33,7 @@ public:
* Overridden from CValidationInterface.
*/
void BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected, const std::vector<CTransactionRef>& vtxConflicted) override;
+ void BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) override;
/**
* Overridden from CValidationInterface.
*/
diff --git a/src/netaddress.cpp b/src/netaddress.cpp
index 2ee4ae3ee3..228caf74a9 100644
--- a/src/netaddress.cpp
+++ b/src/netaddress.cpp
@@ -6,6 +6,7 @@
#include <netaddress.h>
#include <hash.h>
#include <util/strencodings.h>
+#include <util/asmap.h>
#include <tinyformat.h>
static const unsigned char pchIPv4[12] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff };
@@ -209,6 +210,11 @@ bool CNetAddr::IsRFC7343() const
return (GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0x00 && (GetByte(12) & 0xF0) == 0x20);
}
+bool CNetAddr::IsHeNet() const
+{
+ return (GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0x04 && GetByte(12) == 0x70);
+}
+
/**
* @returns Whether or not this is a dummy address that maps an onion address
* into IPv6.
@@ -400,6 +406,73 @@ bool CNetAddr::GetIn6Addr(struct in6_addr* pipv6Addr) const
return true;
}
+bool CNetAddr::HasLinkedIPv4() const
+{
+ return IsRoutable() && (IsIPv4() || IsRFC6145() || IsRFC6052() || IsRFC3964() || IsRFC4380());
+}
+
+uint32_t CNetAddr::GetLinkedIPv4() const
+{
+ if (IsIPv4() || IsRFC6145() || IsRFC6052()) {
+ // IPv4, mapped IPv4, SIIT translated IPv4: the IPv4 address is the last 4 bytes of the address
+ return ReadBE32(ip + 12);
+ } else if (IsRFC3964()) {
+ // 6to4 tunneled IPv4: the IPv4 address is in bytes 2-6
+ return ReadBE32(ip + 2);
+ } else if (IsRFC4380()) {
+ // Teredo tunneled IPv4: the IPv4 address is in the last 4 bytes of the address, but bitflipped
+ return ~ReadBE32(ip + 12);
+ }
+ assert(false);
+}
+
+uint32_t CNetAddr::GetNetClass() const {
+ uint32_t net_class = NET_IPV6;
+ if (IsLocal()) {
+ net_class = 255;
+ }
+ if (IsInternal()) {
+ net_class = NET_INTERNAL;
+ } else if (!IsRoutable()) {
+ net_class = NET_UNROUTABLE;
+ } else if (HasLinkedIPv4()) {
+ net_class = NET_IPV4;
+ } else if (IsTor()) {
+ net_class = NET_ONION;
+ }
+ return net_class;
+}
+
+uint32_t CNetAddr::GetMappedAS(const std::vector<bool> &asmap) const {
+ uint32_t net_class = GetNetClass();
+ if (asmap.size() == 0 || (net_class != NET_IPV4 && net_class != NET_IPV6)) {
+ return 0; // Indicates not found, safe because AS0 is reserved per RFC7607.
+ }
+ std::vector<bool> ip_bits(128);
+ if (HasLinkedIPv4()) {
+ // For lookup, treat as if it was just an IPv4 address (pchIPv4 prefix + IPv4 bits)
+ for (int8_t byte_i = 0; byte_i < 12; ++byte_i) {
+ for (uint8_t bit_i = 0; bit_i < 8; ++bit_i) {
+ ip_bits[byte_i * 8 + bit_i] = (pchIPv4[byte_i] >> (7 - bit_i)) & 1;
+ }
+ }
+ uint32_t ipv4 = GetLinkedIPv4();
+ for (int i = 0; i < 32; ++i) {
+ ip_bits[96 + i] = (ipv4 >> (31 - i)) & 1;
+ }
+ } else {
+ // Use all 128 bits of the IPv6 address otherwise
+ for (int8_t byte_i = 0; byte_i < 16; ++byte_i) {
+ uint8_t cur_byte = GetByte(15 - byte_i);
+ for (uint8_t bit_i = 0; bit_i < 8; ++bit_i) {
+ ip_bits[byte_i * 8 + bit_i] = (cur_byte >> (7 - bit_i)) & 1;
+ }
+ }
+ }
+ uint32_t mapped_as = Interpret(asmap, ip_bits);
+ return mapped_as;
+}
+
/**
* Get the canonical identifier of our network group
*
@@ -410,67 +483,51 @@ bool CNetAddr::GetIn6Addr(struct in6_addr* pipv6Addr) const
* @note No two connections will be attempted to addresses with the same network
* group.
*/
-std::vector<unsigned char> CNetAddr::GetGroup() const
+std::vector<unsigned char> CNetAddr::GetGroup(const std::vector<bool> &asmap) const
{
std::vector<unsigned char> vchRet;
- int nClass = NET_IPV6;
+ uint32_t net_class = GetNetClass();
+ // If non-empty asmap is supplied and the address is IPv4/IPv6,
+ // return ASN to be used for bucketing.
+ uint32_t asn = GetMappedAS(asmap);
+ if (asn != 0) { // Either asmap was empty, or address has non-asmappable net class (e.g. TOR).
+ vchRet.push_back(NET_IPV6); // IPv4 and IPv6 with same ASN should be in the same bucket
+ for (int i = 0; i < 4; i++) {
+ vchRet.push_back((asn >> (8 * i)) & 0xFF);
+ }
+ return vchRet;
+ }
+
+ vchRet.push_back(net_class);
int nStartByte = 0;
int nBits = 16;
- // all local addresses belong to the same group
- if (IsLocal())
- {
- nClass = 255;
+ if (IsLocal()) {
+ // all local addresses belong to the same group
nBits = 0;
- }
- // all internal-usage addresses get their own group
- if (IsInternal())
- {
- nClass = NET_INTERNAL;
+ } else if (IsInternal()) {
+ // all internal-usage addresses get their own group
nStartByte = sizeof(g_internal_prefix);
nBits = (sizeof(ip) - sizeof(g_internal_prefix)) * 8;
- }
- // all other unroutable addresses belong to the same group
- else if (!IsRoutable())
- {
- nClass = NET_UNROUTABLE;
+ } else if (!IsRoutable()) {
+ // all other unroutable addresses belong to the same group
nBits = 0;
- }
- // for IPv4 addresses, '1' + the 16 higher-order bits of the IP
- // includes mapped IPv4, SIIT translated IPv4, and the well-known prefix
- else if (IsIPv4() || IsRFC6145() || IsRFC6052())
- {
- nClass = NET_IPV4;
- nStartByte = 12;
- }
- // for 6to4 tunnelled addresses, use the encapsulated IPv4 address
- else if (IsRFC3964())
- {
- nClass = NET_IPV4;
- nStartByte = 2;
- }
- // for Teredo-tunnelled IPv6 addresses, use the encapsulated IPv4 address
- else if (IsRFC4380())
- {
- vchRet.push_back(NET_IPV4);
- vchRet.push_back(GetByte(3) ^ 0xFF);
- vchRet.push_back(GetByte(2) ^ 0xFF);
+ } else if (HasLinkedIPv4()) {
+ // IPv4 addresses (and mapped IPv4 addresses) use /16 groups
+ uint32_t ipv4 = GetLinkedIPv4();
+ vchRet.push_back((ipv4 >> 24) & 0xFF);
+ vchRet.push_back((ipv4 >> 16) & 0xFF);
return vchRet;
- }
- else if (IsTor())
- {
- nClass = NET_ONION;
+ } else if (IsTor()) {
nStartByte = 6;
nBits = 4;
- }
- // for he.net, use /36 groups
- else if (GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0x04 && GetByte(12) == 0x70)
+ } else if (IsHeNet()) {
+ // for he.net, use /36 groups
nBits = 36;
- // for the rest of the IPv6 network, use /32 groups
- else
+ } else {
+ // for the rest of the IPv6 network, use /32 groups
nBits = 32;
-
- vchRet.push_back(nClass);
+ }
// push our ip onto vchRet byte by byte...
while (nBits >= 8)
diff --git a/src/netaddress.h b/src/netaddress.h
index dcb492da0d..b7381c1eb4 100644
--- a/src/netaddress.h
+++ b/src/netaddress.h
@@ -39,14 +39,12 @@ class CNetAddr
explicit CNetAddr(const struct in_addr& ipv4Addr);
void SetIP(const CNetAddr& ip);
- private:
/**
* Set raw IPv4 or IPv6 address (in network byte order)
* @note Only NET_IPV4 and NET_IPV6 are allowed for network.
*/
void SetRaw(Network network, const uint8_t *data);
- public:
bool SetInternal(const std::string& name);
bool SetSpecial(const std::string &strName); // for Tor addresses
@@ -67,6 +65,7 @@ class CNetAddr
bool IsRFC4862() const; // IPv6 autoconfig (FE80::/64)
bool IsRFC6052() const; // IPv6 well-known prefix for IPv4-embedded address (64:FF9B::/96)
bool IsRFC6145() const; // IPv6 IPv4-translated address (::FFFF:0:0:0/96) (actually defined in RFC2765)
+ bool IsHeNet() const; // IPv6 Hurricane Electric - https://he.net (2001:0470::/36)
bool IsTor() const;
bool IsLocal() const;
bool IsRoutable() const;
@@ -78,7 +77,19 @@ class CNetAddr
unsigned int GetByte(int n) const;
uint64_t GetHash() const;
bool GetInAddr(struct in_addr* pipv4Addr) const;
- std::vector<unsigned char> GetGroup() const;
+ uint32_t GetNetClass() const;
+
+ //! For IPv4, mapped IPv4, SIIT translated IPv4, Teredo, 6to4 tunneled addresses, return the relevant IPv4 address as a uint32.
+ uint32_t GetLinkedIPv4() const;
+ //! Whether this address has a linked IPv4 address (see GetLinkedIPv4()).
+ bool HasLinkedIPv4() const;
+
+ // The AS on the BGP path to the node we use to diversify
+ // peers in AddrMan bucketing based on the AS infrastructure.
+ // The ip->AS mapping depends on how asmap is constructed.
+ uint32_t GetMappedAS(const std::vector<bool> &asmap) const;
+
+ std::vector<unsigned char> GetGroup(const std::vector<bool> &asmap) const;
int GetReachabilityFrom(const CNetAddr *paddrPartner = nullptr) const;
explicit CNetAddr(const struct in6_addr& pipv6Addr, const uint32_t scope = 0);
diff --git a/src/netbase.cpp b/src/netbase.cpp
index 735003cb06..a70179cb16 100644
--- a/src/netbase.cpp
+++ b/src/netbase.cpp
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -7,8 +7,9 @@
#include <sync.h>
#include <tinyformat.h>
-#include <util/system.h>
#include <util/strencodings.h>
+#include <util/string.h>
+#include <util/system.h>
#include <atomic>
@@ -27,7 +28,7 @@
#endif
// Settings
-static CCriticalSection cs_proxyInfos;
+static RecursiveMutex cs_proxyInfos;
static proxyType proxyInfo[NET_MAX] GUARDED_BY(cs_proxyInfos);
static proxyType nameProxy GUARDED_BY(cs_proxyInfos);
int nConnectTimeout = DEFAULT_CONNECT_TIMEOUT;
@@ -59,10 +60,14 @@ std::string GetNetworkName(enum Network net) {
}
}
-bool static LookupIntern(const char *pszName, std::vector<CNetAddr>& vIP, unsigned int nMaxSolutions, bool fAllowLookup)
+bool static LookupIntern(const std::string& name, std::vector<CNetAddr>& vIP, unsigned int nMaxSolutions, bool fAllowLookup)
{
vIP.clear();
+ if (!ValidAsCString(name)) {
+ return false;
+ }
+
{
CNetAddr addr;
// From our perspective, onion addresses are not hostnames but rather
@@ -71,7 +76,7 @@ bool static LookupIntern(const char *pszName, std::vector<CNetAddr>& vIP, unsign
// getaddrinfo to decode them and it wouldn't make sense to resolve
// them, we return a network address representing it instead. See
// CNetAddr::SetSpecial(const std::string&) for more details.
- if (addr.SetSpecial(std::string(pszName))) {
+ if (addr.SetSpecial(name)) {
vIP.push_back(addr);
return true;
}
@@ -93,7 +98,7 @@ bool static LookupIntern(const char *pszName, std::vector<CNetAddr>& vIP, unsign
// hostname lookups.
aiHint.ai_flags = fAllowLookup ? AI_ADDRCONFIG : AI_NUMERICHOST;
struct addrinfo *aiRes = nullptr;
- int nErr = getaddrinfo(pszName, nullptr, &aiHint, &aiRes);
+ int nErr = getaddrinfo(name.c_str(), nullptr, &aiHint, &aiRes);
if (nErr)
return false;
@@ -131,7 +136,7 @@ bool static LookupIntern(const char *pszName, std::vector<CNetAddr>& vIP, unsign
/**
* Resolve a host string to its corresponding network addresses.
*
- * @param pszName The string representing a host. Could be a name or a numerical
+ * @param name The string representing a host. Could be a name or a numerical
* IP address (IPv6 addresses in their bracketed form are
* allowed).
* @param[out] vIP The resulting network addresses to which the specified host
@@ -143,28 +148,34 @@ bool static LookupIntern(const char *pszName, std::vector<CNetAddr>& vIP, unsign
* @see Lookup(const char *, std::vector<CService>&, int, bool, unsigned int)
* for additional parameter descriptions.
*/
-bool LookupHost(const char *pszName, std::vector<CNetAddr>& vIP, unsigned int nMaxSolutions, bool fAllowLookup)
+bool LookupHost(const std::string& name, std::vector<CNetAddr>& vIP, unsigned int nMaxSolutions, bool fAllowLookup)
{
- std::string strHost(pszName);
+ if (!ValidAsCString(name)) {
+ return false;
+ }
+ std::string strHost = name;
if (strHost.empty())
return false;
if (strHost.front() == '[' && strHost.back() == ']') {
strHost = strHost.substr(1, strHost.size() - 2);
}
- return LookupIntern(strHost.c_str(), vIP, nMaxSolutions, fAllowLookup);
+ return LookupIntern(strHost, vIP, nMaxSolutions, fAllowLookup);
}
/**
* Resolve a host string to its first corresponding network address.
*
- * @see LookupHost(const char *, std::vector<CNetAddr>&, unsigned int, bool) for
+ * @see LookupHost(const std::string&, std::vector<CNetAddr>&, unsigned int, bool) for
* additional parameter descriptions.
*/
-bool LookupHost(const char *pszName, CNetAddr& addr, bool fAllowLookup)
+bool LookupHost(const std::string& name, CNetAddr& addr, bool fAllowLookup)
{
+ if (!ValidAsCString(name)) {
+ return false;
+ }
std::vector<CNetAddr> vIP;
- LookupHost(pszName, vIP, 1, fAllowLookup);
+ LookupHost(name, vIP, 1, fAllowLookup);
if(vIP.empty())
return false;
addr = vIP.front();
@@ -174,7 +185,7 @@ bool LookupHost(const char *pszName, CNetAddr& addr, bool fAllowLookup)
/**
* Resolve a service string to its corresponding service.
*
- * @param pszName The string representing a service. Could be a name or a
+ * @param name The string representing a service. Could be a name or a
* numerical IP address (IPv6 addresses should be in their
* disambiguated bracketed form), optionally followed by a port
* number. (e.g. example.com:8333 or
@@ -191,16 +202,17 @@ bool LookupHost(const char *pszName, CNetAddr& addr, bool fAllowLookup)
* @returns Whether or not the service string successfully resolved to any
* resulting services.
*/
-bool Lookup(const char *pszName, std::vector<CService>& vAddr, int portDefault, bool fAllowLookup, unsigned int nMaxSolutions)
+bool Lookup(const std::string& name, std::vector<CService>& vAddr, int portDefault, bool fAllowLookup, unsigned int nMaxSolutions)
{
- if (pszName[0] == 0)
+ if (name.empty() || !ValidAsCString(name)) {
return false;
+ }
int port = portDefault;
std::string hostname;
- SplitHostPort(std::string(pszName), port, hostname);
+ SplitHostPort(name, port, hostname);
std::vector<CNetAddr> vIP;
- bool fRet = LookupIntern(hostname.c_str(), vIP, nMaxSolutions, fAllowLookup);
+ bool fRet = LookupIntern(hostname, vIP, nMaxSolutions, fAllowLookup);
if (!fRet)
return false;
vAddr.resize(vIP.size());
@@ -215,10 +227,13 @@ bool Lookup(const char *pszName, std::vector<CService>& vAddr, int portDefault,
* @see Lookup(const char *, std::vector<CService>&, int, bool, unsigned int)
* for additional parameter descriptions.
*/
-bool Lookup(const char *pszName, CService& addr, int portDefault, bool fAllowLookup)
+bool Lookup(const std::string& name, CService& addr, int portDefault, bool fAllowLookup)
{
+ if (!ValidAsCString(name)) {
+ return false;
+ }
std::vector<CService> vService;
- bool fRet = Lookup(pszName, vService, portDefault, fAllowLookup, 1);
+ bool fRet = Lookup(name, vService, portDefault, fAllowLookup, 1);
if (!fRet)
return false;
addr = vService[0];
@@ -235,12 +250,15 @@ bool Lookup(const char *pszName, CService& addr, int portDefault, bool fAllowLoo
* @see Lookup(const char *, CService&, int, bool) for additional parameter
* descriptions.
*/
-CService LookupNumeric(const char *pszName, int portDefault)
+CService LookupNumeric(const std::string& name, int portDefault)
{
+ if (!ValidAsCString(name)) {
+ return {};
+ }
CService addr;
// "1.2:345" will fail to resolve the ip, but will still set the port.
// If the ip fails to resolve, re-init the result.
- if(!Lookup(pszName, addr, portDefault, false))
+ if(!Lookup(name, addr, portDefault, false))
addr = CService();
return addr;
}
@@ -763,17 +781,16 @@ bool IsProxy(const CNetAddr &addr) {
* @param hSocket The socket on which to connect to the SOCKS5 proxy.
* @param nTimeout Wait this many milliseconds for the connection to the SOCKS5
* proxy to be established.
- * @param outProxyConnectionFailed[out] Whether or not the connection to the
+ * @param[out] outProxyConnectionFailed Whether or not the connection to the
* SOCKS5 proxy failed.
*
* @returns Whether or not the operation succeeded.
*/
-bool ConnectThroughProxy(const proxyType &proxy, const std::string& strDest, int port, const SOCKET& hSocket, int nTimeout, bool *outProxyConnectionFailed)
+bool ConnectThroughProxy(const proxyType &proxy, const std::string& strDest, int port, const SOCKET& hSocket, int nTimeout, bool& outProxyConnectionFailed)
{
// first connect to proxy server
if (!ConnectSocketDirectly(proxy.proxy, hSocket, nTimeout, true)) {
- if (outProxyConnectionFailed)
- *outProxyConnectionFailed = true;
+ outProxyConnectionFailed = true;
return false;
}
// do socks negotiation
@@ -796,23 +813,25 @@ bool ConnectThroughProxy(const proxyType &proxy, const std::string& strDest, int
* Parse and resolve a specified subnet string into the appropriate internal
* representation.
*
- * @param pszName A string representation of a subnet of the form `network
+ * @param strSubnet A string representation of a subnet of the form `network
* address [ "/", ( CIDR-style suffix | netmask ) ]`(e.g.
* `2001:db8::/32`, `192.0.2.0/255.255.255.0`, or `8.8.8.8`).
* @param ret The resulting internal representation of a subnet.
*
* @returns Whether the operation succeeded or not.
*/
-bool LookupSubNet(const char* pszName, CSubNet& ret)
+bool LookupSubNet(const std::string& strSubnet, CSubNet& ret)
{
- std::string strSubnet(pszName);
+ if (!ValidAsCString(strSubnet)) {
+ return false;
+ }
size_t slash = strSubnet.find_last_of('/');
std::vector<CNetAddr> vIP;
std::string strAddress = strSubnet.substr(0, slash);
- // TODO: Use LookupHost(const char *, CNetAddr&, bool) instead to just get
+ // TODO: Use LookupHost(const std::string&, CNetAddr&, bool) instead to just get
// one CNetAddr.
- if (LookupHost(strAddress.c_str(), vIP, 1, false))
+ if (LookupHost(strAddress, vIP, 1, false))
{
CNetAddr network = vIP[0];
if (slash != strSubnet.npos)
@@ -827,7 +846,7 @@ bool LookupSubNet(const char* pszName, CSubNet& ret)
else // If not a valid number, try full netmask syntax
{
// Never allow lookup for netmask
- if (LookupHost(strNetmask.c_str(), vIP, 1, false)) {
+ if (LookupHost(strNetmask, vIP, 1, false)) {
ret = CSubNet(network, vIP[0]);
return ret.IsValid();
}
diff --git a/src/netbase.h b/src/netbase.h
index 8f9d65bf3a..ac4cd97673 100644
--- a/src/netbase.h
+++ b/src/netbase.h
@@ -45,15 +45,15 @@ bool IsProxy(const CNetAddr &addr);
bool SetNameProxy(const proxyType &addrProxy);
bool HaveNameProxy();
bool GetNameProxy(proxyType &nameProxyOut);
-bool LookupHost(const char *pszName, std::vector<CNetAddr>& vIP, unsigned int nMaxSolutions, bool fAllowLookup);
-bool LookupHost(const char *pszName, CNetAddr& addr, bool fAllowLookup);
-bool Lookup(const char *pszName, CService& addr, int portDefault, bool fAllowLookup);
-bool Lookup(const char *pszName, std::vector<CService>& vAddr, int portDefault, bool fAllowLookup, unsigned int nMaxSolutions);
-CService LookupNumeric(const char *pszName, int portDefault = 0);
-bool LookupSubNet(const char *pszName, CSubNet& subnet);
+bool LookupHost(const std::string& name, std::vector<CNetAddr>& vIP, unsigned int nMaxSolutions, bool fAllowLookup);
+bool LookupHost(const std::string& name, CNetAddr& addr, bool fAllowLookup);
+bool Lookup(const std::string& name, CService& addr, int portDefault, bool fAllowLookup);
+bool Lookup(const std::string& name, std::vector<CService>& vAddr, int portDefault, bool fAllowLookup, unsigned int nMaxSolutions);
+CService LookupNumeric(const std::string& name, int portDefault = 0);
+bool LookupSubNet(const std::string& strSubnet, CSubNet& subnet);
SOCKET CreateSocket(const CService &addrConnect);
bool ConnectSocketDirectly(const CService &addrConnect, const SOCKET& hSocketRet, int nTimeout, bool manual_connection);
-bool ConnectThroughProxy(const proxyType &proxy, const std::string& strDest, int port, const SOCKET& hSocketRet, int nTimeout, bool *outProxyConnectionFailed);
+bool ConnectThroughProxy(const proxyType &proxy, const std::string& strDest, int port, const SOCKET& hSocketRet, int nTimeout, bool& outProxyConnectionFailed);
/** Return readable error string for a network error code */
std::string NetworkErrorString(int err);
/** Close socket and set hSocket to INVALID_SOCKET */
diff --git a/src/node/coinstats.cpp b/src/node/coinstats.cpp
index a818f06d51..641b2a5d9c 100644
--- a/src/node/coinstats.cpp
+++ b/src/node/coinstats.cpp
@@ -23,7 +23,7 @@ static void ApplyStats(CCoinsStats &stats, CHashWriter& ss, const uint256& hash,
for (const auto& output : outputs) {
ss << VARINT(output.first + 1);
ss << output.second.out.scriptPubKey;
- ss << VARINT(output.second.out.nValue, VarIntMode::NONNEGATIVE_SIGNED);
+ ss << VARINT_MODE(output.second.out.nValue, VarIntMode::NONNEGATIVE_SIGNED);
stats.nTransactionOutputs++;
stats.nTotalAmount += output.second.out.nValue;
stats.nBogoSize += 32 /* txid */ + 4 /* vout index */ + 4 /* height + coinbase */ + 8 /* amount */ +
diff --git a/src/node/context.cpp b/src/node/context.cpp
index 26a01420c8..5b19a41bd4 100644
--- a/src/node/context.cpp
+++ b/src/node/context.cpp
@@ -8,6 +8,7 @@
#include <interfaces/chain.h>
#include <net.h>
#include <net_processing.h>
+#include <scheduler.h>
NodeContext::NodeContext() {}
NodeContext::~NodeContext() {}
diff --git a/src/node/context.h b/src/node/context.h
index dab5b5d048..1c592b456b 100644
--- a/src/node/context.h
+++ b/src/node/context.h
@@ -10,6 +10,7 @@
class BanMan;
class CConnman;
+class CScheduler;
class CTxMemPool;
class PeerLogicValidation;
namespace interfaces {
@@ -34,6 +35,7 @@ struct NodeContext {
std::unique_ptr<BanMan> banman;
std::unique_ptr<interfaces::Chain> chain;
std::vector<std::unique_ptr<interfaces::ChainClient>> chain_clients;
+ std::unique_ptr<CScheduler> scheduler;
//! Declare default constructor and destructor that are not inline, so code
//! instantiating the NodeContext struct doesn't need to #include class
diff --git a/src/node/psbt.cpp b/src/node/psbt.cpp
index 34c6866a5c..5b16035f7d 100644
--- a/src/node/psbt.cpp
+++ b/src/node/psbt.cpp
@@ -2,6 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <amount.h>
#include <coins.h>
#include <consensus/tx_verify.h>
#include <node/psbt.h>
@@ -17,9 +18,7 @@ PSBTAnalysis AnalyzePSBT(PartiallySignedTransaction psbtx)
PSBTAnalysis result;
bool calc_fee = true;
- bool all_final = true;
- bool only_missing_sigs = true;
- bool only_missing_final = false;
+
CAmount in_amt = 0;
result.inputs.resize(psbtx.tx->vin.size());
@@ -28,12 +27,23 @@ PSBTAnalysis AnalyzePSBT(PartiallySignedTransaction psbtx)
PSBTInput& input = psbtx.inputs[i];
PSBTInputAnalysis& input_analysis = result.inputs[i];
+ // We set next role here and ratchet backwards as required
+ input_analysis.next = PSBTRole::EXTRACTOR;
+
// Check for a UTXO
CTxOut utxo;
if (psbtx.GetInputUTXO(utxo, i)) {
+ if (!MoneyRange(utxo.nValue) || !MoneyRange(in_amt + utxo.nValue)) {
+ result.SetInvalid(strprintf("PSBT is not valid. Input %u has invalid value", i));
+ return result;
+ }
in_amt += utxo.nValue;
input_analysis.has_utxo = true;
} else {
+ if (input.non_witness_utxo && psbtx.tx->vin[i].prevout.n >= input.non_witness_utxo->vout.size()) {
+ result.SetInvalid(strprintf("PSBT is not valid. Input %u specifies invalid prevout", i));
+ return result;
+ }
input_analysis.has_utxo = false;
input_analysis.is_final = false;
input_analysis.next = PSBTRole::UPDATER;
@@ -48,7 +58,6 @@ PSBTAnalysis AnalyzePSBT(PartiallySignedTransaction psbtx)
// Check if it is final
if (!utxo.IsNull() && !PSBTInputSigned(input)) {
input_analysis.is_final = false;
- all_final = false;
// Figure out what is missing
SignatureData outdata;
@@ -65,11 +74,9 @@ PSBTAnalysis AnalyzePSBT(PartiallySignedTransaction psbtx)
if (outdata.missing_pubkeys.empty() && outdata.missing_redeem_script.IsNull() && outdata.missing_witness_script.IsNull() && !outdata.missing_sigs.empty()) {
input_analysis.next = PSBTRole::SIGNER;
} else {
- only_missing_sigs = false;
input_analysis.next = PSBTRole::UPDATER;
}
} else {
- only_missing_final = true;
input_analysis.next = PSBTRole::FINALIZER;
}
} else if (!utxo.IsNull()){
@@ -77,17 +84,28 @@ PSBTAnalysis AnalyzePSBT(PartiallySignedTransaction psbtx)
}
}
- if (all_final) {
- only_missing_sigs = false;
- result.next = PSBTRole::EXTRACTOR;
+ // Calculate next role for PSBT by grabbing "minimum" PSBTInput next role
+ result.next = PSBTRole::EXTRACTOR;
+ for (unsigned int i = 0; i < psbtx.tx->vin.size(); ++i) {
+ PSBTInputAnalysis& input_analysis = result.inputs[i];
+ result.next = std::min(result.next, input_analysis.next);
}
+ assert(result.next > PSBTRole::CREATOR);
+
if (calc_fee) {
// Get the output amount
CAmount out_amt = std::accumulate(psbtx.tx->vout.begin(), psbtx.tx->vout.end(), CAmount(0),
[](CAmount a, const CTxOut& b) {
+ if (!MoneyRange(a) || !MoneyRange(b.nValue) || !MoneyRange(a + b.nValue)) {
+ return CAmount(-1);
+ }
return a += b.nValue;
}
);
+ if (!MoneyRange(out_amt)) {
+ result.SetInvalid(strprintf("PSBT is not valid. Output amount invalid"));
+ return result;
+ }
// Get the fee
CAmount fee = in_amt - out_amt;
@@ -123,17 +141,6 @@ PSBTAnalysis AnalyzePSBT(PartiallySignedTransaction psbtx)
result.estimated_feerate = feerate;
}
- if (only_missing_sigs) {
- result.next = PSBTRole::SIGNER;
- } else if (only_missing_final) {
- result.next = PSBTRole::FINALIZER;
- } else if (all_final) {
- result.next = PSBTRole::EXTRACTOR;
- } else {
- result.next = PSBTRole::UPDATER;
- }
- } else {
- result.next = PSBTRole::UPDATER;
}
return result;
diff --git a/src/node/transaction.cpp b/src/node/transaction.cpp
index 1bb9b88d00..201406ce3b 100644
--- a/src/node/transaction.cpp
+++ b/src/node/transaction.cpp
@@ -7,7 +7,6 @@
#include <net.h>
#include <net_processing.h>
#include <node/context.h>
-#include <util/validation.h>
#include <validation.h>
#include <validationinterface.h>
#include <node/transaction.h>
@@ -41,7 +40,7 @@ TransactionError BroadcastTransaction(NodeContext& node, const CTransactionRef t
TxValidationState state;
if (!AcceptToMemoryPool(*node.mempool, state, std::move(tx),
nullptr /* plTxnReplaced */, false /* bypass_limits */, max_tx_fee)) {
- err_string = FormatStateMessage(state);
+ err_string = state.ToString();
if (state.IsInvalid()) {
if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) {
return TransactionError::MISSING_INPUTS;
diff --git a/src/node/transaction.h b/src/node/transaction.h
index 35873d8376..a85dfb8ace 100644
--- a/src/node/transaction.h
+++ b/src/node/transaction.h
@@ -22,10 +22,10 @@ struct NodeContext;
*
* @param[in] node reference to node context
* @param[in] tx the transaction to broadcast
- * @param[out] &err_string reference to std::string to fill with error string if available
+ * @param[out] err_string reference to std::string to fill with error string if available
* @param[in] max_tx_fee reject txs with fees higher than this (if 0, accept any fee)
* @param[in] relay flag if both mempool insertion and p2p relay are requested
- * @param[in] wait_callback, wait until callbacks have been processed to avoid stale result due to a sequentially RPC.
+ * @param[in] wait_callback wait until callbacks have been processed to avoid stale result due to a sequentially RPC.
* return error
*/
NODISCARD TransactionError BroadcastTransaction(NodeContext& node, CTransactionRef tx, std::string& err_string, const CAmount& max_tx_fee, bool relay, bool wait_callback);
diff --git a/src/outputtype.cpp b/src/outputtype.cpp
index 85ceb03aa6..71b5cba01c 100644
--- a/src/outputtype.cpp
+++ b/src/outputtype.cpp
@@ -19,6 +19,8 @@ static const std::string OUTPUT_TYPE_STRING_LEGACY = "legacy";
static const std::string OUTPUT_TYPE_STRING_P2SH_SEGWIT = "p2sh-segwit";
static const std::string OUTPUT_TYPE_STRING_BECH32 = "bech32";
+const std::array<OutputType, 3> OUTPUT_TYPES = {OutputType::LEGACY, OutputType::P2SH_SEGWIT, OutputType::BECH32};
+
bool ParseOutputType(const std::string& type, OutputType& output_type)
{
if (type == OUTPUT_TYPE_STRING_LEGACY) {
diff --git a/src/outputtype.h b/src/outputtype.h
index b91082ddc0..1438f65844 100644
--- a/src/outputtype.h
+++ b/src/outputtype.h
@@ -10,6 +10,7 @@
#include <script/signingprovider.h>
#include <script/standard.h>
+#include <array>
#include <string>
#include <vector>
@@ -27,6 +28,8 @@ enum class OutputType {
CHANGE_AUTO,
};
+extern const std::array<OutputType, 3> OUTPUT_TYPES;
+
NODISCARD bool ParseOutputType(const std::string& str, OutputType& output_type);
const std::string& FormatOutputType(OutputType type);
@@ -47,4 +50,3 @@ std::vector<CTxDestination> GetAllDestinationsForKey(const CPubKey& key);
CTxDestination AddAndGetDestinationForScript(FillableSigningProvider& keystore, const CScript& script, OutputType);
#endif // BITCOIN_OUTPUTTYPE_H
-
diff --git a/src/policy/fees.h b/src/policy/fees.h
index 5d86417524..6ee6e0d547 100644
--- a/src/policy/fees.h
+++ b/src/policy/fees.h
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_POLICY_FEES_H
@@ -223,7 +223,7 @@ public:
unsigned int HighestTargetTracked(FeeEstimateHorizon horizon) const;
private:
- mutable CCriticalSection m_cs_fee_estimator;
+ mutable RecursiveMutex m_cs_fee_estimator;
unsigned int nBestSeenHeight GUARDED_BY(m_cs_fee_estimator);
unsigned int firstRecordedHeight GUARDED_BY(m_cs_fee_estimator);
diff --git a/src/prevector.h b/src/prevector.h
index f4ece738a8..6d690e7f96 100644
--- a/src/prevector.h
+++ b/src/prevector.h
@@ -15,7 +15,6 @@
#include <type_traits>
#include <utility>
-#pragma pack(push, 1)
/** Implements a drop-in replacement for std::vector<T> which stores up to N
* elements directly (without heap allocation). The types Size and Diff are
* used to store element counts, and can be any unsigned + signed type.
@@ -147,14 +146,20 @@ public:
};
private:
- size_type _size = 0;
+#pragma pack(push, 1)
union direct_or_indirect {
char direct[sizeof(T) * N];
struct {
- size_type capacity;
char* indirect;
+ size_type capacity;
};
- } _union = {};
+ };
+#pragma pack(pop)
+ alignas(char*) direct_or_indirect _union = {};
+ size_type _size = 0;
+
+ static_assert(alignof(char*) % alignof(size_type) == 0 && sizeof(char*) % alignof(size_type) == 0, "size_type cannot have more restrictive alignment requirement than pointer");
+ static_assert(alignof(char*) % alignof(T) == 0, "value_type T cannot have more restrictive alignment requirement than pointer");
T* direct_ptr(difference_type pos) { return reinterpret_cast<T*>(_union.direct) + pos; }
const T* direct_ptr(difference_type pos) const { return reinterpret_cast<const T*>(_union.direct) + pos; }
@@ -419,15 +424,20 @@ public:
return first;
}
- void push_back(const T& value) {
+ template<typename... Args>
+ void emplace_back(Args&&... args) {
size_type new_size = size() + 1;
if (capacity() < new_size) {
change_capacity(new_size + (new_size >> 1));
}
- new(item_ptr(size())) T(value);
+ new(item_ptr(size())) T(std::forward<Args>(args)...);
_size++;
}
+ void push_back(const T& value) {
+ emplace_back(value);
+ }
+
void pop_back() {
erase(end() - 1, end());
}
@@ -523,6 +533,5 @@ public:
return item_ptr(0);
}
};
-#pragma pack(pop)
#endif // BITCOIN_PREVECTOR_H
diff --git a/src/psbt.cpp b/src/psbt.cpp
index c23b78b3ee..e6b6285652 100644
--- a/src/psbt.cpp
+++ b/src/psbt.cpp
@@ -66,8 +66,11 @@ bool PartiallySignedTransaction::AddOutput(const CTxOut& txout, const PSBTOutput
bool PartiallySignedTransaction::GetInputUTXO(CTxOut& utxo, int input_index) const
{
PSBTInput input = inputs[input_index];
- int prevout_index = tx->vin[input_index].prevout.n;
+ uint32_t prevout_index = tx->vin[input_index].prevout.n;
if (input.non_witness_utxo) {
+ if (prevout_index >= input.non_witness_utxo->vout.size()) {
+ return false;
+ }
utxo = input.non_witness_utxo->vout[prevout_index];
} else if (!input.witness_utxo.IsNull()) {
utxo = input.witness_utxo;
@@ -255,6 +258,9 @@ bool SignPSBTInput(const SigningProvider& provider, PartiallySignedTransaction&
if (input.non_witness_utxo) {
// If we're taking our information from a non-witness UTXO, verify that it matches the prevout.
COutPoint prevout = tx.vin[index].prevout;
+ if (prevout.n >= input.non_witness_utxo->vout.size()) {
+ return false;
+ }
if (input.non_witness_utxo->GetHash() != prevout.hash) {
return false;
}
diff --git a/src/psbt.h b/src/psbt.h
index d507d5b6b7..dfba261961 100644
--- a/src/psbt.h
+++ b/src/psbt.h
@@ -584,7 +584,7 @@ void UpdatePSBTOutput(const SigningProvider& provider, PartiallySignedTransactio
/**
* Finalizes a PSBT if possible, combining partial signatures.
*
- * @param[in,out] &psbtx reference to PartiallySignedTransaction to finalize
+ * @param[in,out] psbtx PartiallySignedTransaction to finalize
* return True if the PSBT is now complete, false otherwise
*/
bool FinalizePSBT(PartiallySignedTransaction& psbtx);
@@ -592,7 +592,7 @@ bool FinalizePSBT(PartiallySignedTransaction& psbtx);
/**
* Finalizes a PSBT if possible, and extracts it to a CMutableTransaction if it could be finalized.
*
- * @param[in] &psbtx reference to PartiallySignedTransaction
+ * @param[in] psbtx PartiallySignedTransaction
* @param[out] result CMutableTransaction representing the complete transaction, if successful
* @return True if we successfully extracted the transaction, false otherwise
*/
@@ -601,7 +601,7 @@ bool FinalizeAndExtractPSBT(PartiallySignedTransaction& psbtx, CMutableTransacti
/**
* Combines PSBTs with the same underlying transaction, resulting in a single PSBT with all partial signatures from each input.
*
- * @param[out] &out the combined PSBT, if successful
+ * @param[out] out the combined PSBT, if successful
* @param[in] psbtxs the PSBTs to combine
* @return error (OK if we successfully combined the transactions, other error if they were not compatible)
*/
diff --git a/src/qt/bantablemodel.cpp b/src/qt/bantablemodel.cpp
index d1ee7fac6a..72f16bb09f 100644
--- a/src/qt/bantablemodel.cpp
+++ b/src/qt/bantablemodel.cpp
@@ -6,12 +6,13 @@
#include <interfaces/node.h>
#include <net_types.h> // For banmap_t
-#include <qt/clientmodel.h>
#include <utility>
-#include <QDebug>
+#include <QDateTime>
#include <QList>
+#include <QModelIndex>
+#include <QVariant>
bool BannedNodeLessThan::operator()(const CCombinedBan& left, const CCombinedBan& right) const
{
@@ -78,10 +79,9 @@ public:
}
};
-BanTableModel::BanTableModel(interfaces::Node& node, ClientModel *parent) :
+BanTableModel::BanTableModel(interfaces::Node& node, QObject* parent) :
QAbstractTableModel(parent),
- m_node(node),
- clientModel(parent)
+ m_node(node)
{
columns << tr("IP/Netmask") << tr("Banned Until");
priv.reset(new BanTablePriv());
diff --git a/src/qt/bantablemodel.h b/src/qt/bantablemodel.h
index 9dec5fa6a9..f01c506a1e 100644
--- a/src/qt/bantablemodel.h
+++ b/src/qt/bantablemodel.h
@@ -12,7 +12,6 @@
#include <QAbstractTableModel>
#include <QStringList>
-class ClientModel;
class BanTablePriv;
namespace interfaces {
@@ -45,7 +44,7 @@ class BanTableModel : public QAbstractTableModel
Q_OBJECT
public:
- explicit BanTableModel(interfaces::Node& node, ClientModel *parent = nullptr);
+ explicit BanTableModel(interfaces::Node& node, QObject* parent);
~BanTableModel();
void startAutoRefresh();
void stopAutoRefresh();
@@ -72,7 +71,6 @@ public Q_SLOTS:
private:
interfaces::Node& m_node;
- ClientModel *clientModel;
QStringList columns;
std::unique_ptr<BanTablePriv> priv;
};
diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp
index 0021c3dbc7..4313d6ee7f 100644
--- a/src/qt/bitcoin.cpp
+++ b/src/qt/bitcoin.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2011-2019 The Bitcoin Core developers
+// Copyright (c) 2011-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -281,8 +281,11 @@ void BitcoinApplication::parameterSetup()
m_node.initParameterInteraction();
}
-void BitcoinApplication::SetPrune(bool prune, bool force) {
- optionsModel->SetPrune(prune, force);
+void BitcoinApplication::InitializePruneSetting(bool prune)
+{
+ // If prune is set, intentionally override existing prune size with
+ // the default size since this is called when choosing a new datadir.
+ optionsModel->SetPruneTargetGB(prune ? DEFAULT_PRUNE_TARGET_GB : 0, true);
}
void BitcoinApplication::requestInitialize()
@@ -556,12 +559,13 @@ int GuiMain(int argc, char* argv[])
qInstallMessageHandler(DebugMessageHandler);
// Allow parameter interaction before we create the options model
app.parameterSetup();
+ GUIUtil::LogQtInfo();
// Load GUI settings from QSettings
app.createOptionsModel(gArgs.GetBoolArg("-resetguisettings", false));
if (did_show_intro) {
// Store intro dialog settings other than datadir (network specific)
- app.SetPrune(prune, true);
+ app.InitializePruneSetting(prune);
}
if (gArgs.GetBoolArg("-splash", DEFAULT_SPLASHSCREEN) && !gArgs.GetBoolArg("-min", false))
diff --git a/src/qt/bitcoin.h b/src/qt/bitcoin.h
index 2d2d43a4bb..077a37fde5 100644
--- a/src/qt/bitcoin.h
+++ b/src/qt/bitcoin.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2011-2019 The Bitcoin Core developers
+// Copyright (c) 2011-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -67,8 +67,8 @@ public:
void parameterSetup();
/// Create options model
void createOptionsModel(bool resetSettings);
- /// Update prune value
- void SetPrune(bool prune, bool force = false);
+ /// Initialize prune setting
+ void InitializePruneSetting(bool prune);
/// Create main window
void createWindow(const NetworkStyle *networkStyle);
/// Create splash screen
diff --git a/src/qt/bitcoin_locale.qrc b/src/qt/bitcoin_locale.qrc
index dec3670536..c781072e9b 100644
--- a/src/qt/bitcoin_locale.qrc
+++ b/src/qt/bitcoin_locale.qrc
@@ -11,6 +11,7 @@
<file alias="de_DE">locale/bitcoin_de_DE.qm</file>
<file alias="el">locale/bitcoin_el.qm</file>
<file alias="el_GR">locale/bitcoin_el_GR.qm</file>
+ <file alias="en">locale/bitcoin_en.qm</file>
<file alias="en_AU">locale/bitcoin_en_AU.qm</file>
<file alias="en_GB">locale/bitcoin_en_GB.qm</file>
<file alias="eo">locale/bitcoin_eo.qm</file>
diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp
index 8444984b27..5fab267610 100644
--- a/src/qt/bitcoingui.cpp
+++ b/src/qt/bitcoingui.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2011-2019 The Bitcoin Core developers
+// Copyright (c) 2011-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -318,8 +318,8 @@ void BitcoinGUI::createActions()
verifyMessageAction = new QAction(tr("&Verify message..."), this);
verifyMessageAction->setStatusTip(tr("Verify messages to ensure they were signed with specified Bitcoin addresses"));
- openRPCConsoleAction = new QAction(tr("&Debug window"), this);
- openRPCConsoleAction->setStatusTip(tr("Open debugging and diagnostic console"));
+ openRPCConsoleAction = new QAction(tr("Node window"), this);
+ openRPCConsoleAction->setStatusTip(tr("Open node debugging and diagnostic console"));
// initially disable the debug window menu item
openRPCConsoleAction->setEnabled(false);
openRPCConsoleAction->setObjectName("openRPCConsoleAction");
@@ -634,10 +634,10 @@ void BitcoinGUI::setWalletController(WalletController* wallet_controller)
void BitcoinGUI::addWallet(WalletModel* walletModel)
{
if (!walletFrame) return;
+ if (!walletFrame->addWallet(walletModel)) return;
const QString display_name = walletModel->getDisplayName();
setWalletActionsEnabled(true);
rpcConsole->addWallet(walletModel);
- walletFrame->addWallet(walletModel);
m_wallet_selector->addItem(display_name, QVariant::fromValue(walletModel));
if (m_wallet_selector->count() == 2) {
m_wallet_selector_label_action->setVisible(true);
@@ -648,6 +648,10 @@ void BitcoinGUI::addWallet(WalletModel* walletModel)
void BitcoinGUI::removeWallet(WalletModel* walletModel)
{
if (!walletFrame) return;
+
+ labelWalletHDStatusIcon->hide();
+ labelWalletEncryptionIcon->hide();
+
int index = m_wallet_selector->findData(QVariant::fromValue(walletModel));
m_wallet_selector->removeItem(index);
if (m_wallet_selector->count() == 0) {
@@ -1209,7 +1213,7 @@ void BitcoinGUI::setHDStatus(bool privkeyDisabled, int hdEnabled)
{
labelWalletHDStatusIcon->setPixmap(platformStyle->SingleColorIcon(privkeyDisabled ? ":/icons/eye" : hdEnabled ? ":/icons/hd_enabled" : ":/icons/hd_disabled").pixmap(STATUSBAR_ICONSIZE,STATUSBAR_ICONSIZE));
labelWalletHDStatusIcon->setToolTip(privkeyDisabled ? tr("Private key <b>disabled</b>") : hdEnabled ? tr("HD key generation is <b>enabled</b>") : tr("HD key generation is <b>disabled</b>"));
-
+ labelWalletHDStatusIcon->show();
// eventually disable the QLabel to set its opacity to 50%
labelWalletHDStatusIcon->setEnabled(hdEnabled);
}
diff --git a/src/qt/bitcoinunits.h b/src/qt/bitcoinunits.h
index 7c82febf65..4c8a889965 100644
--- a/src/qt/bitcoinunits.h
+++ b/src/qt/bitcoinunits.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2011-2018 The Bitcoin Core developers
+// Copyright (c) 2011-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/src/qt/clientmodel.cpp b/src/qt/clientmodel.cpp
index e8146982f9..a1ec3eaab1 100644
--- a/src/qt/clientmodel.cpp
+++ b/src/qt/clientmodel.cpp
@@ -242,8 +242,9 @@ static void BlockTipChanged(ClientModel *clientmodel, bool initialSync, int heig
clientmodel->cachedBestHeaderHeight = height;
clientmodel->cachedBestHeaderTime = blockTime;
}
- // if we are in-sync or if we notify a header update, update the UI regardless of last update time
- if (fHeader || !initialSync || now - nLastUpdateNotification > MODEL_UPDATE_DELAY) {
+
+ // During initial sync, block notifications, and header notifications from reindexing are both throttled.
+ if (!initialSync || (fHeader && !clientmodel->node().getReindex()) || now - nLastUpdateNotification > MODEL_UPDATE_DELAY) {
//pass an async signal to the UI thread
bool invoked = QMetaObject::invokeMethod(clientmodel, "numBlocksChanged", Qt::QueuedConnection,
Q_ARG(int, height),
diff --git a/src/qt/coincontroltreewidget.h b/src/qt/coincontroltreewidget.h
index 65f94644e8..39dc9a5e9e 100644
--- a/src/qt/coincontroltreewidget.h
+++ b/src/qt/coincontroltreewidget.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2011-2018 The Bitcoin Core developers
+// Copyright (c) 2011-2019 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/src/qt/csvmodelwriter.h b/src/qt/csvmodelwriter.h
index ab27fef99e..e443529335 100644
--- a/src/qt/csvmodelwriter.h
+++ b/src/qt/csvmodelwriter.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2011-2018 The Bitcoin Core developers
+// Copyright (c) 2011-2019 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/src/qt/forms/debugwindow.ui b/src/qt/forms/debugwindow.ui
index be807b20c0..ebb6bbd4f5 100644
--- a/src/qt/forms/debugwindow.ui
+++ b/src/qt/forms/debugwindow.ui
@@ -11,7 +11,7 @@
</rect>
</property>
<property name="windowTitle">
- <string>Debug window</string>
+ <string>Node window</string>
</property>
<layout class="QVBoxLayout" name="verticalLayout_2">
<item>
diff --git a/src/qt/forms/modaloverlay.ui b/src/qt/forms/modaloverlay.ui
index da19a6fa2e..d2e7ca8f06 100644
--- a/src/qt/forms/modaloverlay.ui
+++ b/src/qt/forms/modaloverlay.ui
@@ -351,6 +351,9 @@ QLabel { color: rgb(40,40,40); }</string>
<property name="text">
<string>Hide</string>
</property>
+ <property name="shortcut">
+ <string>Esc</string>
+ </property>
<property name="focusPolicy">
<enum>Qt::StrongFocus</enum>
</property>
diff --git a/src/qt/guiconstants.h b/src/qt/guiconstants.h
index f0a6ac2273..9457ea37d6 100644
--- a/src/qt/guiconstants.h
+++ b/src/qt/guiconstants.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2011-2019 The Bitcoin Core developers
+// Copyright (c) 2011-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -51,4 +51,7 @@ static const int TOOLTIP_WRAP_THRESHOLD = 80;
/* One gigabyte (GB) in bytes */
static constexpr uint64_t GB_BYTES{1000000000};
+// Default prune target displayed in GUI.
+static constexpr int DEFAULT_PRUNE_TARGET_GB{2};
+
#endif // BITCOIN_QT_GUICONSTANTS_H
diff --git a/src/qt/guiutil.cpp b/src/qt/guiutil.cpp
index 843de651e5..98dde1656a 100644
--- a/src/qt/guiutil.cpp
+++ b/src/qt/guiutil.cpp
@@ -44,14 +44,20 @@
#include <QFont>
#include <QFontDatabase>
#include <QFontMetrics>
+#include <QGuiApplication>
#include <QKeyEvent>
#include <QLineEdit>
+#include <QList>
#include <QMouseEvent>
#include <QProgressDialog>
+#include <QScreen>
#include <QSettings>
+#include <QSize>
+#include <QString>
#include <QTextDocument> // for Qt::mightBeRichText
#include <QThread>
#include <QUrlQuery>
+#include <QtGlobal>
#if defined(Q_OS_MAC)
@@ -767,9 +773,9 @@ QString formatServicesStr(quint64 mask)
return QObject::tr("None");
}
-QString formatPingTime(double dPingTime)
+QString formatPingTime(int64_t ping_usec)
{
- return (dPingTime == std::numeric_limits<int64_t>::max()/1e6 || dPingTime == 0) ? QObject::tr("N/A") : QString(QObject::tr("%1 ms")).arg(QString::number((int)(dPingTime * 1000), 10));
+ return (ping_usec == std::numeric_limits<int64_t>::max() || ping_usec == 0) ? QObject::tr("N/A") : QString(QObject::tr("%1 ms")).arg(QString::number((int)(ping_usec / 1000), 10));
}
QString formatTimeOffset(int64_t nTimeOffset)
@@ -879,4 +885,23 @@ int TextWidth(const QFontMetrics& fm, const QString& text)
#endif
}
+void LogQtInfo()
+{
+#ifdef QT_STATIC
+ const std::string qt_link{"static"};
+#else
+ const std::string qt_link{"dynamic"};
+#endif
+#ifdef QT_STATICPLUGIN
+ const std::string plugin_link{"static"};
+#else
+ const std::string plugin_link{"dynamic"};
+#endif
+ LogPrintf("Qt %s (%s), plugin=%s (%s)\n", qVersion(), qt_link, QGuiApplication::platformName().toStdString(), plugin_link);
+ LogPrintf("System: %s, %s\n", QSysInfo::prettyProductName().toStdString(), QSysInfo::buildAbi().toStdString());
+ for (const QScreen* s : QGuiApplication::screens()) {
+ LogPrintf("Screen: %s %dx%d, pixel ratio=%.1f\n", s->name().toStdString(), s->size().width(), s->size().height(), s->devicePixelRatio());
+ }
+}
+
} // namespace GUIUtil
diff --git a/src/qt/guiutil.h b/src/qt/guiutil.h
index d7877d697e..e571262443 100644
--- a/src/qt/guiutil.h
+++ b/src/qt/guiutil.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2011-2019 The Bitcoin Core developers
+// Copyright (c) 2011-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -202,8 +202,8 @@ namespace GUIUtil
/* Format CNodeStats.nServices bitmask into a user-readable string */
QString formatServicesStr(quint64 mask);
- /* Format a CNodeCombinedStats.dPingTime into a user-readable string or display N/A, if 0*/
- QString formatPingTime(double dPingTime);
+ /* Format a CNodeStats.m_ping_usec into a user-readable string or display N/A, if 0*/
+ QString formatPingTime(int64_t ping_usec);
/* Format a CNodeCombinedStats.nTimeOffset into a user-readable string. */
QString formatTimeOffset(int64_t nTimeOffset);
@@ -265,6 +265,11 @@ namespace GUIUtil
* In Qt 5.11 the QFontMetrics::horizontalAdvance() was introduced.
*/
int TextWidth(const QFontMetrics& fm, const QString& text);
+
+ /**
+ * Writes to debug.log short info about the used Qt and the host system.
+ */
+ void LogQtInfo();
} // namespace GUIUtil
#endif // BITCOIN_QT_GUIUTIL_H
diff --git a/src/qt/intro.cpp b/src/qt/intro.cpp
index 5c02b07777..ad21dfc3ef 100644
--- a/src/qt/intro.cpp
+++ b/src/qt/intro.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2011-2019 The Bitcoin Core developers
+// Copyright (c) 2011-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -12,6 +12,7 @@
#include <qt/guiconstants.h>
#include <qt/guiutil.h>
+#include <qt/optionsmodel.h>
#include <interfaces/node.h>
#include <util/system.h>
@@ -22,9 +23,6 @@
#include <cmath>
-/* Total required space (in GB) depending on user choice (prune, not prune) */
-static uint64_t requiredSpace;
-
/* Check free space asynchronously to prevent hanging the UI thread.
Up to one request to check a path is in flight to this thread; when the check()
@@ -109,14 +107,24 @@ void FreespaceChecker::check()
Q_EMIT reply(replyStatus, replyMessage, freeBytesAvailable);
}
+namespace {
+//! Return pruning size that will be used if automatic pruning is enabled.
+int GetPruneTargetGB()
+{
+ int64_t prune_target_mib = gArgs.GetArg("-prune", 0);
+ // >1 means automatic pruning is enabled by config, 1 means manual pruning, 0 means no pruning.
+ return prune_target_mib > 1 ? PruneMiBtoGB(prune_target_mib) : DEFAULT_PRUNE_TARGET_GB;
+}
+} // namespace
-Intro::Intro(QWidget *parent, uint64_t blockchain_size, uint64_t chain_state_size) :
+Intro::Intro(QWidget *parent, int64_t blockchain_size_gb, int64_t chain_state_size_gb) :
QDialog(parent),
ui(new Ui::Intro),
thread(nullptr),
signalled(false),
- m_blockchain_size(blockchain_size),
- m_chain_state_size(chain_state_size)
+ m_blockchain_size_gb(blockchain_size_gb),
+ m_chain_state_size_gb(chain_state_size_gb),
+ m_prune_target_gb{GetPruneTargetGB()}
{
ui->setupUi(this);
ui->welcomeLabel->setText(ui->welcomeLabel->text().arg(PACKAGE_NAME));
@@ -124,37 +132,24 @@ Intro::Intro(QWidget *parent, uint64_t blockchain_size, uint64_t chain_state_siz
ui->lblExplanation1->setText(ui->lblExplanation1->text()
.arg(PACKAGE_NAME)
- .arg(m_blockchain_size)
+ .arg(m_blockchain_size_gb)
.arg(2009)
.arg(tr("Bitcoin"))
);
ui->lblExplanation2->setText(ui->lblExplanation2->text().arg(PACKAGE_NAME));
- uint64_t pruneTarget = std::max<int64_t>(0, gArgs.GetArg("-prune", 0));
- if (pruneTarget > 1) { // -prune=1 means enabled, above that it's a size in MB
+ if (gArgs.GetArg("-prune", 0) > 1) { // -prune=1 means enabled, above that it's a size in MiB
ui->prune->setChecked(true);
ui->prune->setEnabled(false);
}
- ui->prune->setText(tr("Discard blocks after verification, except most recent %1 GB (prune)").arg(pruneTarget ? pruneTarget / 1000 : 2));
- requiredSpace = m_blockchain_size;
- QString storageRequiresMsg = tr("At least %1 GB of data will be stored in this directory, and it will grow over time.");
- if (pruneTarget) {
- uint64_t prunedGBs = std::ceil(pruneTarget * 1024 * 1024.0 / GB_BYTES);
- if (prunedGBs <= requiredSpace) {
- requiredSpace = prunedGBs;
- storageRequiresMsg = tr("Approximately %1 GB of data will be stored in this directory.");
- }
- ui->lblExplanation3->setVisible(true);
- } else {
- ui->lblExplanation3->setVisible(false);
- }
- requiredSpace += m_chain_state_size;
- ui->sizeWarningLabel->setText(
- tr("%1 will download and store a copy of the Bitcoin block chain.").arg(PACKAGE_NAME) + " " +
- storageRequiresMsg.arg(requiredSpace) + " " +
- tr("The wallet will also be stored in this directory.")
- );
- this->adjustSize();
+ ui->prune->setText(tr("Discard blocks after verification, except most recent %1 GB (prune)").arg(m_prune_target_gb));
+ UpdatePruneLabels(ui->prune->isChecked());
+
+ connect(ui->prune, &QCheckBox::toggled, [this](bool prune_checked) {
+ UpdatePruneLabels(prune_checked);
+ UpdateFreeSpaceLabel();
+ });
+
startThread();
}
@@ -270,25 +265,31 @@ void Intro::setStatus(int status, const QString &message, quint64 bytesAvailable
{
ui->freeSpace->setText("");
} else {
- QString freeString = tr("%n GB of free space available", "", bytesAvailable/GB_BYTES);
- if(bytesAvailable < requiredSpace * GB_BYTES)
- {
- freeString += " " + tr("(of %n GB needed)", "", requiredSpace);
- ui->freeSpace->setStyleSheet("QLabel { color: #800000 }");
- ui->prune->setChecked(true);
- } else if (bytesAvailable / GB_BYTES - requiredSpace < 10) {
- freeString += " " + tr("(%n GB needed for full chain)", "", requiredSpace);
- ui->freeSpace->setStyleSheet("QLabel { color: #999900 }");
- ui->prune->setChecked(true);
- } else {
- ui->freeSpace->setStyleSheet("");
+ m_bytes_available = bytesAvailable;
+ if (ui->prune->isEnabled()) {
+ ui->prune->setChecked(m_bytes_available < (m_blockchain_size_gb + m_chain_state_size_gb + 10) * GB_BYTES);
}
- ui->freeSpace->setText(freeString + ".");
+ UpdateFreeSpaceLabel();
}
/* Don't allow confirm in ERROR state */
ui->buttonBox->button(QDialogButtonBox::Ok)->setEnabled(status != FreespaceChecker::ST_ERROR);
}
+void Intro::UpdateFreeSpaceLabel()
+{
+ QString freeString = tr("%n GB of free space available", "", m_bytes_available / GB_BYTES);
+ if (m_bytes_available < m_required_space_gb * GB_BYTES) {
+ freeString += " " + tr("(of %n GB needed)", "", m_required_space_gb);
+ ui->freeSpace->setStyleSheet("QLabel { color: #800000 }");
+ } else if (m_bytes_available / GB_BYTES - m_required_space_gb < 10) {
+ freeString += " " + tr("(%n GB needed for full chain)", "", m_required_space_gb);
+ ui->freeSpace->setStyleSheet("QLabel { color: #999900 }");
+ } else {
+ ui->freeSpace->setStyleSheet("");
+ }
+ ui->freeSpace->setText(freeString + ".");
+}
+
void Intro::on_dataDirectory_textChanged(const QString &dataDirStr)
{
/* Disable OK button until check result comes in */
@@ -349,3 +350,20 @@ QString Intro::getPathToCheck()
mutex.unlock();
return retval;
}
+
+void Intro::UpdatePruneLabels(bool prune_checked)
+{
+ m_required_space_gb = m_blockchain_size_gb + m_chain_state_size_gb;
+ QString storageRequiresMsg = tr("At least %1 GB of data will be stored in this directory, and it will grow over time.");
+ if (prune_checked && m_prune_target_gb <= m_blockchain_size_gb) {
+ m_required_space_gb = m_prune_target_gb + m_chain_state_size_gb;
+ storageRequiresMsg = tr("Approximately %1 GB of data will be stored in this directory.");
+ }
+ ui->lblExplanation3->setVisible(prune_checked);
+ ui->sizeWarningLabel->setText(
+ tr("%1 will download and store a copy of the Bitcoin block chain.").arg(PACKAGE_NAME) + " " +
+ storageRequiresMsg.arg(m_required_space_gb) + " " +
+ tr("The wallet will also be stored in this directory.")
+ );
+ this->adjustSize();
+}
diff --git a/src/qt/intro.h b/src/qt/intro.h
index 41da06141f..732393246e 100644
--- a/src/qt/intro.h
+++ b/src/qt/intro.h
@@ -31,7 +31,7 @@ class Intro : public QDialog
public:
explicit Intro(QWidget *parent = nullptr,
- uint64_t blockchain_size = 0, uint64_t chain_state_size = 0);
+ int64_t blockchain_size_gb = 0, int64_t chain_state_size_gb = 0);
~Intro();
QString getDataDirectory();
@@ -67,12 +67,18 @@ private:
QMutex mutex;
bool signalled;
QString pathToCheck;
- uint64_t m_blockchain_size;
- uint64_t m_chain_state_size;
+ const int64_t m_blockchain_size_gb;
+ const int64_t m_chain_state_size_gb;
+ //! Total required space (in GB) depending on user choice (prune or not prune).
+ int64_t m_required_space_gb{0};
+ uint64_t m_bytes_available{0};
+ const int64_t m_prune_target_gb;
void startThread();
void checkPath(const QString &dataDir);
QString getPathToCheck();
+ void UpdatePruneLabels(bool prune_checked);
+ void UpdateFreeSpaceLabel();
friend class FreespaceChecker;
};
diff --git a/src/qt/macnotificationhandler.mm b/src/qt/macnotificationhandler.mm
index 7c094134cd..b16042e946 100644
--- a/src/qt/macnotificationhandler.mm
+++ b/src/qt/macnotificationhandler.mm
@@ -1,4 +1,4 @@
-// Copyright (c) 2011-2018 The Bitcoin Core developers
+// Copyright (c) 2011-2019 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/src/qt/modaloverlay.cpp b/src/qt/modaloverlay.cpp
index f2cc7ee4b6..6243a71c7d 100644
--- a/src/qt/modaloverlay.cpp
+++ b/src/qt/modaloverlay.cpp
@@ -31,7 +31,7 @@ userClosed(false)
setVisible(false);
if (!enable_wallet) {
ui->infoText->setVisible(false);
- ui->infoTextStrong->setText(tr("Bitcoin Core is currently syncing. It will download headers and blocks from peers and validate them until reaching the tip of the block chain."));
+ ui->infoTextStrong->setText(tr("%1 is currently syncing. It will download headers and blocks from peers and validate them until reaching the tip of the block chain.").arg(PACKAGE_NAME));
}
}
diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp
index 2f612664df..8ee6c947e6 100644
--- a/src/qt/optionsdialog.cpp
+++ b/src/qt/optionsdialog.cpp
@@ -375,7 +375,7 @@ QValidator::State ProxyAddressValidator::validate(QString &input, int &pos) cons
{
Q_UNUSED(pos);
// Validate the proxy
- CService serv(LookupNumeric(input.toStdString().c_str(), DEFAULT_GUI_PROXY_PORT));
+ CService serv(LookupNumeric(input.toStdString(), DEFAULT_GUI_PROXY_PORT));
proxyType addrProxy = proxyType(serv, true);
if (addrProxy.IsValid())
return QValidator::Acceptable;
diff --git a/src/qt/optionsmodel.cpp b/src/qt/optionsmodel.cpp
index ec18cdf3f8..977076c4c2 100644
--- a/src/qt/optionsmodel.cpp
+++ b/src/qt/optionsmodel.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2011-2019 The Bitcoin Core developers
+// Copyright (c) 2011-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -91,8 +91,8 @@ void OptionsModel::Init(bool resetSettings)
if (!settings.contains("bPrune"))
settings.setValue("bPrune", false);
if (!settings.contains("nPruneSize"))
- settings.setValue("nPruneSize", 2);
- SetPrune(settings.value("bPrune").toBool());
+ settings.setValue("nPruneSize", DEFAULT_PRUNE_TARGET_GB);
+ SetPruneEnabled(settings.value("bPrune").toBool());
if (!settings.contains("nDatabaseCache"))
settings.setValue("nDatabaseCache", (qint64)nDefaultDbCache);
@@ -236,13 +236,12 @@ static const QString GetDefaultProxyAddress()
return QString("%1:%2").arg(DEFAULT_GUI_PROXY_HOST).arg(DEFAULT_GUI_PROXY_PORT);
}
-void OptionsModel::SetPrune(bool prune, bool force)
+void OptionsModel::SetPruneEnabled(bool prune, bool force)
{
QSettings settings;
settings.setValue("bPrune", prune);
- // Convert prune size from GB to MiB:
- const uint64_t nPruneSizeMiB = (settings.value("nPruneSize").toInt() * GB_BYTES) >> 20;
- std::string prune_val = prune ? std::to_string(nPruneSizeMiB) : "0";
+ const int64_t prune_target_mib = PruneGBtoMiB(settings.value("nPruneSize").toInt());
+ std::string prune_val = prune ? std::to_string(prune_target_mib) : "0";
if (force) {
m_node.forceSetArg("-prune", prune_val);
return;
@@ -252,6 +251,16 @@ void OptionsModel::SetPrune(bool prune, bool force)
}
}
+void OptionsModel::SetPruneTargetGB(int prune_target_gb, bool force)
+{
+ const bool prune = prune_target_gb > 0;
+ if (prune) {
+ QSettings settings;
+ settings.setValue("nPruneSize", prune_target_gb);
+ }
+ SetPruneEnabled(prune, force);
+}
+
// read QSettings values and return them
QVariant OptionsModel::data(const QModelIndex & index, int role) const
{
diff --git a/src/qt/optionsmodel.h b/src/qt/optionsmodel.h
index 7ed9fbe440..b3260349e7 100644
--- a/src/qt/optionsmodel.h
+++ b/src/qt/optionsmodel.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2011-2019 The Bitcoin Core developers
+// Copyright (c) 2011-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -6,6 +6,7 @@
#define BITCOIN_QT_OPTIONSMODEL_H
#include <amount.h>
+#include <qt/guiconstants.h>
#include <QAbstractListModel>
@@ -16,6 +17,16 @@ class Node;
extern const char *DEFAULT_GUI_PROXY_HOST;
static constexpr unsigned short DEFAULT_GUI_PROXY_PORT = 9050;
+/**
+ * Convert configured prune target MiB to displayed GB. Round up to avoid underestimating max disk usage.
+ */
+static inline int PruneMiBtoGB(int64_t mib) { return (mib * 1024 * 1024 + GB_BYTES - 1) / GB_BYTES; }
+
+/**
+ * Convert displayed prune target GB to configured MiB. Round down so roundtrip GB -> MiB -> GB conversion is stable.
+ */
+static inline int64_t PruneGBtoMiB(int gb) { return gb * GB_BYTES / 1024 / 1024; }
+
/** Interface from Qt to configuration data structure for Bitcoin client.
To Qt, the options are presented as a list with the different options
laid out vertically.
@@ -73,7 +84,8 @@ public:
const QString& getOverriddenByCommandLine() { return strOverriddenByCommandLine; }
/* Explicit setters */
- void SetPrune(bool prune, bool force = false);
+ void SetPruneEnabled(bool prune, bool force = false);
+ void SetPruneTargetGB(int prune_target_gb, bool force = false);
/* Restart flag helper */
void setRestartRequired(bool fRequired);
diff --git a/src/qt/peertablemodel.cpp b/src/qt/peertablemodel.cpp
index 631c66e745..a1fc791536 100644
--- a/src/qt/peertablemodel.cpp
+++ b/src/qt/peertablemodel.cpp
@@ -4,7 +4,6 @@
#include <qt/peertablemodel.h>
-#include <qt/clientmodel.h>
#include <qt/guiconstants.h>
#include <qt/guiutil.h>
@@ -33,7 +32,7 @@ bool NodeLessThan::operator()(const CNodeCombinedStats &left, const CNodeCombine
case PeerTableModel::Subversion:
return pLeft->cleanSubVer.compare(pRight->cleanSubVer) < 0;
case PeerTableModel::Ping:
- return pLeft->dMinPing < pRight->dMinPing;
+ return pLeft->m_min_ping_usec < pRight->m_min_ping_usec;
case PeerTableModel::Sent:
return pLeft->nSendBytes < pRight->nSendBytes;
case PeerTableModel::Received:
@@ -100,10 +99,9 @@ public:
}
};
-PeerTableModel::PeerTableModel(interfaces::Node& node, ClientModel *parent) :
+PeerTableModel::PeerTableModel(interfaces::Node& node, QObject* parent) :
QAbstractTableModel(parent),
m_node(node),
- clientModel(parent),
timer(nullptr)
{
columns << tr("NodeId") << tr("Node/Service") << tr("Ping") << tr("Sent") << tr("Received") << tr("User Agent");
@@ -163,7 +161,7 @@ QVariant PeerTableModel::data(const QModelIndex &index, int role) const
case Subversion:
return QString::fromStdString(rec->nodeStats.cleanSubVer);
case Ping:
- return GUIUtil::formatPingTime(rec->nodeStats.dMinPing);
+ return GUIUtil::formatPingTime(rec->nodeStats.m_min_ping_usec);
case Sent:
return GUIUtil::formatBytes(rec->nodeStats.nSendBytes);
case Received:
diff --git a/src/qt/peertablemodel.h b/src/qt/peertablemodel.h
index b3f5dd7dbe..cf45c5a08f 100644
--- a/src/qt/peertablemodel.h
+++ b/src/qt/peertablemodel.h
@@ -13,7 +13,6 @@
#include <QAbstractTableModel>
#include <QStringList>
-class ClientModel;
class PeerTablePriv;
namespace interfaces {
@@ -51,7 +50,7 @@ class PeerTableModel : public QAbstractTableModel
Q_OBJECT
public:
- explicit PeerTableModel(interfaces::Node& node, ClientModel *parent = nullptr);
+ explicit PeerTableModel(interfaces::Node& node, QObject* parent);
~PeerTableModel();
const CNodeCombinedStats *getNodeStats(int idx);
int getRowByNodeId(NodeId nodeid);
@@ -83,7 +82,6 @@ public Q_SLOTS:
private:
interfaces::Node& m_node;
- ClientModel *clientModel;
QStringList columns;
std::unique_ptr<PeerTablePriv> priv;
QTimer *timer;
diff --git a/src/qt/qvaluecombobox.h b/src/qt/qvaluecombobox.h
index d873322405..5cca515079 100644
--- a/src/qt/qvaluecombobox.h
+++ b/src/qt/qvaluecombobox.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2011-2018 The Bitcoin Core developers
+// Copyright (c) 2011-2019 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp
index 3dd64c5273..b82ab9ffe8 100644
--- a/src/qt/rpcconsole.cpp
+++ b/src/qt/rpcconsole.cpp
@@ -905,12 +905,8 @@ void RPCConsole::on_lineEdit_returnPressed()
cmdBeforeBrowsing = QString();
- WalletModel* wallet_model{nullptr};
#ifdef ENABLE_WALLET
- const int wallet_index = ui->WalletSelector->currentIndex();
- if (wallet_index > 0) {
- wallet_model = ui->WalletSelector->itemData(wallet_index).value<WalletModel*>();
- }
+ WalletModel* wallet_model = ui->WalletSelector->currentData().value<WalletModel*>();
if (m_last_wallet_model != wallet_model) {
if (wallet_model) {
@@ -1113,9 +1109,9 @@ void RPCConsole::updateNodeDetail(const CNodeCombinedStats *stats)
ui->peerBytesSent->setText(GUIUtil::formatBytes(stats->nodeStats.nSendBytes));
ui->peerBytesRecv->setText(GUIUtil::formatBytes(stats->nodeStats.nRecvBytes));
ui->peerConnTime->setText(GUIUtil::formatDurationStr(GetSystemTimeInSeconds() - stats->nodeStats.nTimeConnected));
- ui->peerPingTime->setText(GUIUtil::formatPingTime(stats->nodeStats.dPingTime));
- ui->peerPingWait->setText(GUIUtil::formatPingTime(stats->nodeStats.dPingWait));
- ui->peerMinPing->setText(GUIUtil::formatPingTime(stats->nodeStats.dMinPing));
+ ui->peerPingTime->setText(GUIUtil::formatPingTime(stats->nodeStats.m_ping_usec));
+ ui->peerPingWait->setText(GUIUtil::formatPingTime(stats->nodeStats.m_ping_wait_usec));
+ ui->peerMinPing->setText(GUIUtil::formatPingTime(stats->nodeStats.m_min_ping_usec));
ui->timeoffset->setText(GUIUtil::formatTimeOffset(stats->nodeStats.nTimeOffset));
ui->peerVersion->setText(QString("%1").arg(QString::number(stats->nodeStats.nVersion)));
ui->peerSubversion->setText(QString::fromStdString(stats->nodeStats.cleanSubVer));
@@ -1240,7 +1236,7 @@ void RPCConsole::unbanSelectedNode()
QString strNode = nodes.at(i).data().toString();
CSubNet possibleSubnet;
- LookupSubNet(strNode.toStdString().c_str(), possibleSubnet);
+ LookupSubNet(strNode.toStdString(), possibleSubnet);
if (possibleSubnet.IsValid() && m_node.unban(possibleSubnet))
{
clientModel->getBanTableModel()->refresh();
diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp
index cc01aafb23..4ddee513a1 100644
--- a/src/qt/sendcoinsdialog.cpp
+++ b/src/qt/sendcoinsdialog.cpp
@@ -26,7 +26,6 @@
#include <ui_interface.h>
#include <wallet/coincontrol.h>
#include <wallet/fees.h>
-#include <wallet/psbtwallet.h>
#include <wallet/wallet.h>
#include <QFontMetrics>
diff --git a/src/qt/signverifymessagedialog.cpp b/src/qt/signverifymessagedialog.cpp
index 5f2836cc75..4552753bf6 100644
--- a/src/qt/signverifymessagedialog.cpp
+++ b/src/qt/signverifymessagedialog.cpp
@@ -11,7 +11,7 @@
#include <qt/walletmodel.h>
#include <key_io.h>
-#include <util/validation.h> // For strMessageMagic
+#include <util/message.h> // For MessageSign(), MessageVerify()
#include <wallet/wallet.h>
#include <vector>
@@ -133,30 +133,34 @@ void SignVerifyMessageDialog::on_signMessageButton_SM_clicked()
return;
}
- CKey key;
- if (!model->wallet().getPrivKey(GetScriptForDestination(destination), CKeyID(*pkhash), key))
- {
- ui->statusLabel_SM->setStyleSheet("QLabel { color: red; }");
- ui->statusLabel_SM->setText(tr("Private key for the entered address is not available."));
- return;
+ const std::string& message = ui->messageIn_SM->document()->toPlainText().toStdString();
+ std::string signature;
+ SigningResult res = model->wallet().signMessage(message, *pkhash, signature);
+
+ QString error;
+ switch (res) {
+ case SigningResult::OK:
+ error = tr("No error");
+ break;
+ case SigningResult::PRIVATE_KEY_NOT_AVAILABLE:
+ error = tr("Private key for the entered address is not available.");
+ break;
+ case SigningResult::SIGNING_FAILED:
+ error = tr("Message signing failed.");
+ break;
+ // no default case, so the compiler can warn about missing cases
}
- CHashWriter ss(SER_GETHASH, 0);
- ss << strMessageMagic;
- ss << ui->messageIn_SM->document()->toPlainText().toStdString();
-
- std::vector<unsigned char> vchSig;
- if (!key.SignCompact(ss.GetHash(), vchSig))
- {
+ if (res != SigningResult::OK) {
ui->statusLabel_SM->setStyleSheet("QLabel { color: red; }");
- ui->statusLabel_SM->setText(QString("<nobr>") + tr("Message signing failed.") + QString("</nobr>"));
+ ui->statusLabel_SM->setText(QString("<nobr>") + error + QString("</nobr>"));
return;
}
ui->statusLabel_SM->setStyleSheet("QLabel { color: green; }");
ui->statusLabel_SM->setText(QString("<nobr>") + tr("Message signed.") + QString("</nobr>"));
- ui->signatureOut_SM->setText(QString::fromStdString(EncodeBase64(vchSig.data(), vchSig.size())));
+ ui->signatureOut_SM->setText(QString::fromStdString(signature));
}
void SignVerifyMessageDialog::on_copySignatureButton_SM_clicked()
@@ -189,51 +193,57 @@ void SignVerifyMessageDialog::on_addressBookButton_VM_clicked()
void SignVerifyMessageDialog::on_verifyMessageButton_VM_clicked()
{
- CTxDestination destination = DecodeDestination(ui->addressIn_VM->text().toStdString());
- if (!IsValidDestination(destination)) {
+ const std::string& address = ui->addressIn_VM->text().toStdString();
+ const std::string& signature = ui->signatureIn_VM->text().toStdString();
+ const std::string& message = ui->messageIn_VM->document()->toPlainText().toStdString();
+
+ const auto result = MessageVerify(address, signature, message);
+
+ if (result == MessageVerificationResult::OK) {
+ ui->statusLabel_VM->setStyleSheet("QLabel { color: green; }");
+ } else {
ui->statusLabel_VM->setStyleSheet("QLabel { color: red; }");
- ui->statusLabel_VM->setText(tr("The entered address is invalid.") + QString(" ") + tr("Please check the address and try again."));
- return;
}
- if (!boost::get<PKHash>(&destination)) {
+
+ switch (result) {
+ case MessageVerificationResult::OK:
+ ui->statusLabel_VM->setText(
+ QString("<nobr>") + tr("Message verified.") + QString("</nobr>")
+ );
+ return;
+ case MessageVerificationResult::ERR_INVALID_ADDRESS:
+ ui->statusLabel_VM->setText(
+ tr("The entered address is invalid.") + QString(" ") +
+ tr("Please check the address and try again.")
+ );
+ return;
+ case MessageVerificationResult::ERR_ADDRESS_NO_KEY:
ui->addressIn_VM->setValid(false);
- ui->statusLabel_VM->setStyleSheet("QLabel { color: red; }");
- ui->statusLabel_VM->setText(tr("The entered address does not refer to a key.") + QString(" ") + tr("Please check the address and try again."));
+ ui->statusLabel_VM->setText(
+ tr("The entered address does not refer to a key.") + QString(" ") +
+ tr("Please check the address and try again.")
+ );
return;
- }
-
- bool fInvalid = false;
- std::vector<unsigned char> vchSig = DecodeBase64(ui->signatureIn_VM->text().toStdString().c_str(), &fInvalid);
-
- if (fInvalid)
- {
+ case MessageVerificationResult::ERR_MALFORMED_SIGNATURE:
ui->signatureIn_VM->setValid(false);
- ui->statusLabel_VM->setStyleSheet("QLabel { color: red; }");
- ui->statusLabel_VM->setText(tr("The signature could not be decoded.") + QString(" ") + tr("Please check the signature and try again."));
+ ui->statusLabel_VM->setText(
+ tr("The signature could not be decoded.") + QString(" ") +
+ tr("Please check the signature and try again.")
+ );
return;
- }
-
- CHashWriter ss(SER_GETHASH, 0);
- ss << strMessageMagic;
- ss << ui->messageIn_VM->document()->toPlainText().toStdString();
-
- CPubKey pubkey;
- if (!pubkey.RecoverCompact(ss.GetHash(), vchSig))
- {
+ case MessageVerificationResult::ERR_PUBKEY_NOT_RECOVERED:
ui->signatureIn_VM->setValid(false);
- ui->statusLabel_VM->setStyleSheet("QLabel { color: red; }");
- ui->statusLabel_VM->setText(tr("The signature did not match the message digest.") + QString(" ") + tr("Please check the signature and try again."));
+ ui->statusLabel_VM->setText(
+ tr("The signature did not match the message digest.") + QString(" ") +
+ tr("Please check the signature and try again.")
+ );
return;
- }
-
- if (!(CTxDestination(PKHash(pubkey)) == destination)) {
- ui->statusLabel_VM->setStyleSheet("QLabel { color: red; }");
- ui->statusLabel_VM->setText(QString("<nobr>") + tr("Message verification failed.") + QString("</nobr>"));
+ case MessageVerificationResult::ERR_NOT_SIGNED:
+ ui->statusLabel_VM->setText(
+ QString("<nobr>") + tr("Message verification failed.") + QString("</nobr>")
+ );
return;
}
-
- ui->statusLabel_VM->setStyleSheet("QLabel { color: green; }");
- ui->statusLabel_VM->setText(QString("<nobr>") + tr("Message verified.") + QString("</nobr>"));
}
void SignVerifyMessageDialog::on_clearButton_VM_clicked()
diff --git a/src/qt/splashscreen.cpp b/src/qt/splashscreen.cpp
index e19833019d..e4ffa6cd9a 100644
--- a/src/qt/splashscreen.cpp
+++ b/src/qt/splashscreen.cpp
@@ -137,7 +137,7 @@ SplashScreen::~SplashScreen()
bool SplashScreen::eventFilter(QObject * obj, QEvent * ev) {
if (ev->type() == QEvent::KeyPress) {
QKeyEvent *keyEvent = static_cast<QKeyEvent *>(ev);
- if(keyEvent->text()[0] == 'q') {
+ if (keyEvent->key() == Qt::Key_Q) {
m_node.startShutdown();
}
}
diff --git a/src/qt/test/addressbooktests.cpp b/src/qt/test/addressbooktests.cpp
index 6e8d383847..0f082802cc 100644
--- a/src/qt/test/addressbooktests.cpp
+++ b/src/qt/test/addressbooktests.cpp
@@ -1,3 +1,7 @@
+// Copyright (c) 2017-2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
#include <qt/test/addressbooktests.h>
#include <qt/test/util.h>
#include <test/util/setup_common.h>
@@ -55,6 +59,7 @@ void TestAddAddressesToSendBook(interfaces::Node& node)
{
TestChain100Setup test;
std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(node.context()->chain.get(), WalletLocation(), WalletDatabase::CreateMock());
+ wallet->SetupLegacyScriptPubKeyMan();
bool firstRun;
wallet->LoadWallet(firstRun);
diff --git a/src/qt/test/addressbooktests.h b/src/qt/test/addressbooktests.h
index 9944750ec8..5de89c7592 100644
--- a/src/qt/test/addressbooktests.h
+++ b/src/qt/test/addressbooktests.h
@@ -1,3 +1,7 @@
+// Copyright (c) 2018-2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
#ifndef BITCOIN_QT_TEST_ADDRESSBOOKTESTS_H
#define BITCOIN_QT_TEST_ADDRESSBOOKTESTS_H
diff --git a/src/qt/test/apptests.h b/src/qt/test/apptests.h
index a428728db8..d16c9fe487 100644
--- a/src/qt/test/apptests.h
+++ b/src/qt/test/apptests.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2018-2017 The Bitcoin Core developers
+// Copyright (c) 2018-2019 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/src/qt/test/test_main.cpp b/src/qt/test/test_main.cpp
index f8c0379ea8..aefdcd2716 100644
--- a/src/qt/test/test_main.cpp
+++ b/src/qt/test/test_main.cpp
@@ -37,6 +37,8 @@ Q_IMPORT_PLUGIN(QCocoaIntegrationPlugin);
#endif
#endif
+const std::function<void(const std::string&)> G_TEST_LOG_FUN{};
+
// This is all you need to run all the tests
int main(int argc, char *argv[])
{
diff --git a/src/qt/test/util.cpp b/src/qt/test/util.cpp
index ae2fb93bf7..e09f0ad77d 100644
--- a/src/qt/test/util.cpp
+++ b/src/qt/test/util.cpp
@@ -1,3 +1,7 @@
+// Copyright (c) 2018 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
#include <QApplication>
#include <QMessageBox>
#include <QPushButton>
diff --git a/src/qt/test/util.h b/src/qt/test/util.h
index 377f07dcba..763847606a 100644
--- a/src/qt/test/util.h
+++ b/src/qt/test/util.h
@@ -1,3 +1,7 @@
+// Copyright (c) 2018 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
#ifndef BITCOIN_QT_TEST_UTIL_H
#define BITCOIN_QT_TEST_UTIL_H
diff --git a/src/qt/test/wallettests.cpp b/src/qt/test/wallettests.cpp
index f6d2816ff8..c1a0f63f73 100644
--- a/src/qt/test/wallettests.cpp
+++ b/src/qt/test/wallettests.cpp
@@ -1,3 +1,7 @@
+// Copyright (c) 2015-2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
#include <qt/test/wallettests.h>
#include <qt/test/util.h>
@@ -139,10 +143,9 @@ void TestGUI(interfaces::Node& node)
bool firstRun;
wallet->LoadWallet(firstRun);
{
- auto spk_man = wallet->GetLegacyScriptPubKeyMan();
+ auto spk_man = wallet->GetOrCreateLegacyScriptPubKeyMan();
auto locked_chain = wallet->chain().lock();
- LOCK(wallet->cs_wallet);
- AssertLockHeld(spk_man->cs_wallet);
+ LOCK2(wallet->cs_wallet, spk_man->cs_KeyStore);
wallet->SetAddressBook(GetDestinationForKey(test.coinbaseKey.GetPubKey(), wallet->m_default_address_type), "", "receive");
spk_man->AddKeyPubKey(test.coinbaseKey, test.coinbaseKey.GetPubKey());
wallet->SetLastBlockProcessed(105, ::ChainActive().Tip()->GetBlockHash());
diff --git a/src/qt/test/wallettests.h b/src/qt/test/wallettests.h
index 0a7b57a678..8ee40bf07f 100644
--- a/src/qt/test/wallettests.h
+++ b/src/qt/test/wallettests.h
@@ -1,3 +1,7 @@
+// Copyright (c) 2017-2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
#ifndef BITCOIN_QT_TEST_WALLETTESTS_H
#define BITCOIN_QT_TEST_WALLETTESTS_H
diff --git a/src/qt/trafficgraphwidget.h b/src/qt/trafficgraphwidget.h
index a0b83e27fe..af5890ba24 100644
--- a/src/qt/trafficgraphwidget.h
+++ b/src/qt/trafficgraphwidget.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2011-2018 The Bitcoin Core developers
+// Copyright (c) 2011-2019 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/src/qt/transactiondescdialog.h b/src/qt/transactiondescdialog.h
index 20b2092ed7..74e34cde87 100644
--- a/src/qt/transactiondescdialog.h
+++ b/src/qt/transactiondescdialog.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2011-2018 The Bitcoin Core developers
+// Copyright (c) 2011-2019 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/src/qt/utilitydialog.cpp b/src/qt/utilitydialog.cpp
index efe213902e..3101fb01c3 100644
--- a/src/qt/utilitydialog.cpp
+++ b/src/qt/utilitydialog.cpp
@@ -10,8 +10,6 @@
#include <qt/forms/ui_helpmessagedialog.h>
-#include <qt/bitcoingui.h>
-
#include <clientversion.h>
#include <init.h>
#include <util/system.h>
@@ -21,9 +19,10 @@
#include <QCloseEvent>
#include <QLabel>
+#include <QMainWindow>
#include <QRegExp>
-#include <QTextTable>
#include <QTextCursor>
+#include <QTextTable>
#include <QVBoxLayout>
/** "Help message" or "About" dialog box */
@@ -144,10 +143,9 @@ ShutdownWindow::ShutdownWindow(QWidget *parent, Qt::WindowFlags f):
setLayout(layout);
}
-QWidget *ShutdownWindow::showShutdownWindow(BitcoinGUI *window)
+QWidget* ShutdownWindow::showShutdownWindow(QMainWindow* window)
{
- if (!window)
- return nullptr;
+ assert(window != nullptr);
// Show a simple window indicating shutdown status
QWidget *shutdownWindow = new ShutdownWindow();
diff --git a/src/qt/utilitydialog.h b/src/qt/utilitydialog.h
index f1cedff282..833b86fd3e 100644
--- a/src/qt/utilitydialog.h
+++ b/src/qt/utilitydialog.h
@@ -6,9 +6,11 @@
#define BITCOIN_QT_UTILITYDIALOG_H
#include <QDialog>
-#include <QObject>
+#include <QWidget>
-class BitcoinGUI;
+QT_BEGIN_NAMESPACE
+class QMainWindow;
+QT_END_NAMESPACE
namespace interfaces {
class Node;
@@ -46,7 +48,7 @@ class ShutdownWindow : public QWidget
public:
explicit ShutdownWindow(QWidget *parent=nullptr, Qt::WindowFlags f=Qt::Widget);
- static QWidget *showShutdownWindow(BitcoinGUI *window);
+ static QWidget* showShutdownWindow(QMainWindow* window);
protected:
void closeEvent(QCloseEvent *event);
diff --git a/src/qt/walletframe.cpp b/src/qt/walletframe.cpp
index 4b2b475883..dac3326cc4 100644
--- a/src/qt/walletframe.cpp
+++ b/src/qt/walletframe.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2011-2019 The Bitcoin Core developers
+// Copyright (c) 2011-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -37,16 +37,19 @@ WalletFrame::~WalletFrame()
void WalletFrame::setClientModel(ClientModel *_clientModel)
{
this->clientModel = _clientModel;
+
+ for (auto i = mapWalletViews.constBegin(); i != mapWalletViews.constEnd(); ++i) {
+ i.value()->setClientModel(_clientModel);
+ }
}
-void WalletFrame::addWallet(WalletModel *walletModel)
+bool WalletFrame::addWallet(WalletModel *walletModel)
{
- if (!gui || !clientModel || !walletModel) return;
+ if (!gui || !clientModel || !walletModel) return false;
- if (mapWalletViews.count(walletModel) > 0) return;
+ if (mapWalletViews.count(walletModel) > 0) return false;
WalletView *walletView = new WalletView(platformStyle, this);
- walletView->setBitcoinGUI(gui);
walletView->setClientModel(clientModel);
walletView->setWalletModel(walletModel);
walletView->showOutOfSyncWarning(bOutOfSync);
@@ -62,6 +65,16 @@ void WalletFrame::addWallet(WalletModel *walletModel)
mapWalletViews[walletModel] = walletView;
connect(walletView, &WalletView::outOfSyncWarningClicked, this, &WalletFrame::outOfSyncWarningClicked);
+ connect(walletView, &WalletView::transactionClicked, gui, &BitcoinGUI::gotoHistoryPage);
+ connect(walletView, &WalletView::coinsSent, gui, &BitcoinGUI::gotoHistoryPage);
+ connect(walletView, &WalletView::message, [this](const QString& title, const QString& message, unsigned int style) {
+ gui->message(title, message, style);
+ });
+ connect(walletView, &WalletView::encryptionStatusChanged, gui, &BitcoinGUI::updateWalletStatus);
+ connect(walletView, &WalletView::incomingTransaction, gui, &BitcoinGUI::incomingTransaction);
+ connect(walletView, &WalletView::hdEnabledStatusChanged, gui, &BitcoinGUI::updateWalletStatus);
+
+ return true;
}
void WalletFrame::setCurrentWallet(WalletModel* wallet_model)
diff --git a/src/qt/walletframe.h b/src/qt/walletframe.h
index 156653f47d..20fad08b0e 100644
--- a/src/qt/walletframe.h
+++ b/src/qt/walletframe.h
@@ -36,7 +36,7 @@ public:
void setClientModel(ClientModel *clientModel);
- void addWallet(WalletModel *walletModel);
+ bool addWallet(WalletModel *walletModel);
void setCurrentWallet(WalletModel* wallet_model);
void removeWallet(WalletModel* wallet_model);
void removeAllWallets();
diff --git a/src/qt/walletmodel.cpp b/src/qt/walletmodel.cpp
index fb92e29f21..8a84a8c168 100644
--- a/src/qt/walletmodel.cpp
+++ b/src/qt/walletmodel.cpp
@@ -10,6 +10,7 @@
#include <qt/addresstablemodel.h>
#include <qt/guiconstants.h>
+#include <qt/guiutil.h>
#include <qt/optionsmodel.h>
#include <qt/paymentserver.h>
#include <qt/recentrequeststablemodel.h>
@@ -81,12 +82,12 @@ void WalletModel::pollBalanceChanged()
return;
}
- if(fForceCheckBalanceChanged || m_node.getNumBlocks() != cachedNumBlocks)
+ if(fForceCheckBalanceChanged || numBlocks != cachedNumBlocks)
{
fForceCheckBalanceChanged = false;
// Balance and number of transactions might have changed
- cachedNumBlocks = m_node.getNumBlocks();
+ cachedNumBlocks = numBlocks;
checkBalanceChanged(new_balances);
if(transactionTableModel)
@@ -487,8 +488,10 @@ bool WalletModel::bumpFee(uint256 hash, uint256& new_hash)
return false;
}
+ const bool create_psbt = privateKeysDisabled();
+
// allow a user based fee verification
- QString questionString = tr("Do you want to increase the fee?");
+ QString questionString = create_psbt ? tr("Do you want to draft a transaction with fee increase?") : tr("Do you want to increase the fee?");
questionString.append("<br />");
questionString.append("<table style=\"text-align: left;\">");
questionString.append("<tr><td>");
@@ -519,6 +522,23 @@ bool WalletModel::bumpFee(uint256 hash, uint256& new_hash)
return false;
}
+ // Short-circuit if we are returning a bumped transaction PSBT to clipboard
+ if (create_psbt) {
+ PartiallySignedTransaction psbtx(mtx);
+ bool complete = false;
+ const TransactionError err = wallet().fillPSBT(psbtx, complete, SIGHASH_ALL, false /* sign */, true /* bip32derivs */);
+ if (err != TransactionError::OK || complete) {
+ QMessageBox::critical(nullptr, tr("Fee bump error"), tr("Can't draft transaction."));
+ return false;
+ }
+ // Serialize the PSBT
+ CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
+ ssTx << psbtx;
+ GUIUtil::setClipboard(EncodeBase64(ssTx.str()).c_str());
+ Q_EMIT message(tr("PSBT copied"), "Copied to clipboard", CClientUIInterface::MSG_INFORMATION);
+ return true;
+ }
+
// sign bumped transaction
if (!m_wallet->signBumpTransaction(mtx)) {
QMessageBox::critical(nullptr, tr("Fee bump error"), tr("Can't sign transaction."));
diff --git a/src/qt/walletview.cpp b/src/qt/walletview.cpp
index c777d633be..bdcb82e06b 100644
--- a/src/qt/walletview.cpp
+++ b/src/qt/walletview.cpp
@@ -6,7 +6,6 @@
#include <qt/addressbookpage.h>
#include <qt/askpassphrasedialog.h>
-#include <qt/bitcoingui.h>
#include <qt/clientmodel.h>
#include <qt/guiutil.h>
#include <qt/optionsmodel.h>
@@ -65,11 +64,13 @@ WalletView::WalletView(const PlatformStyle *_platformStyle, QWidget *parent):
addWidget(receiveCoinsPage);
addWidget(sendCoinsPage);
+ connect(overviewPage, &OverviewPage::transactionClicked, this, &WalletView::transactionClicked);
// Clicking on a transaction on the overview pre-selects the transaction on the transaction history page
connect(overviewPage, &OverviewPage::transactionClicked, transactionView, static_cast<void (TransactionView::*)(const QModelIndex&)>(&TransactionView::focusTransaction));
connect(overviewPage, &OverviewPage::outOfSyncWarningClicked, this, &WalletView::requestedSyncWarningInfo);
+ connect(sendCoinsPage, &SendCoinsDialog::coinsSent, this, &WalletView::coinsSent);
// Highlight transaction after send
connect(sendCoinsPage, &SendCoinsDialog::coinsSent, transactionView, static_cast<void (TransactionView::*)(const uint256&)>(&TransactionView::focusTransaction));
@@ -86,32 +87,6 @@ WalletView::~WalletView()
{
}
-void WalletView::setBitcoinGUI(BitcoinGUI *gui)
-{
- if (gui)
- {
- // Clicking on a transaction on the overview page simply sends you to transaction history page
- connect(overviewPage, &OverviewPage::transactionClicked, gui, &BitcoinGUI::gotoHistoryPage);
-
- // Navigate to transaction history page after send
- connect(sendCoinsPage, &SendCoinsDialog::coinsSent, gui, &BitcoinGUI::gotoHistoryPage);
-
- // Receive and report messages
- connect(this, &WalletView::message, [gui](const QString &title, const QString &message, unsigned int style) {
- gui->message(title, message, style);
- });
-
- // Pass through encryption status changed signals
- connect(this, &WalletView::encryptionStatusChanged, gui, &BitcoinGUI::updateWalletStatus);
-
- // Pass through transaction notifications
- connect(this, &WalletView::incomingTransaction, gui, &BitcoinGUI::incomingTransaction);
-
- // Connect HD enabled state signal
- connect(this, &WalletView::hdEnabledStatusChanged, gui, &BitcoinGUI::updateWalletStatus);
- }
-}
-
void WalletView::setClientModel(ClientModel *_clientModel)
{
this->clientModel = _clientModel;
diff --git a/src/qt/walletview.h b/src/qt/walletview.h
index 4313f0bfa2..78d870f59f 100644
--- a/src/qt/walletview.h
+++ b/src/qt/walletview.h
@@ -9,7 +9,6 @@
#include <QStackedWidget>
-class BitcoinGUI;
class ClientModel;
class OverviewPage;
class PlatformStyle;
@@ -39,7 +38,6 @@ public:
explicit WalletView(const PlatformStyle *platformStyle, QWidget *parent);
~WalletView();
- void setBitcoinGUI(BitcoinGUI *gui);
/** Set the client model.
The client model represents the part of the core that communicates with the P2P network, and is wallet-agnostic.
*/
@@ -68,7 +66,7 @@ private:
TransactionView *transactionView;
- QProgressDialog *progressDialog;
+ QProgressDialog* progressDialog{nullptr};
const PlatformStyle *platformStyle;
public Q_SLOTS:
@@ -115,6 +113,8 @@ public Q_SLOTS:
void requestedSyncWarningInfo();
Q_SIGNALS:
+ void transactionClicked();
+ void coinsSent();
/** Fired when a message should be reported to the user */
void message(const QString &title, const QString &message, unsigned int style);
/** Encryption status of wallet changed */
diff --git a/src/randomenv.cpp b/src/randomenv.cpp
index 6992c720ff..8b3d478529 100644
--- a/src/randomenv.cpp
+++ b/src/randomenv.cpp
@@ -38,11 +38,6 @@
#include <sys/utsname.h>
#include <unistd.h>
#endif
-#ifdef __MACH__
-#include <mach/clock.h>
-#include <mach/mach.h>
-#include <mach/mach_time.h>
-#endif
#if HAVE_DECL_GETIFADDRS
#include <ifaddrs.h>
#endif
@@ -237,8 +232,6 @@ void RandAddDynamicEnv(CSHA512& hasher)
GetSystemTimeAsFileTime(&ftime);
hasher << ftime;
#else
-# ifndef __MACH__
- // On non-MacOS systems, use various clock_gettime() calls.
struct timespec ts = {};
# ifdef CLOCK_MONOTONIC
clock_gettime(CLOCK_MONOTONIC, &ts);
@@ -252,18 +245,6 @@ void RandAddDynamicEnv(CSHA512& hasher)
clock_gettime(CLOCK_BOOTTIME, &ts);
hasher << ts;
# endif
-# else
- // On MacOS use mach_absolute_time (number of CPU ticks since boot) as a replacement for CLOCK_MONOTONIC,
- // and clock_get_time for CALENDAR_CLOCK as a replacement for CLOCK_REALTIME.
- hasher << mach_absolute_time();
- // From https://gist.github.com/jbenet/1087739
- clock_serv_t cclock;
- mach_timespec_t mts = {};
- if (host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock) == KERN_SUCCESS && clock_get_time(cclock, &mts) == KERN_SUCCESS) {
- hasher << mts;
- mach_port_deallocate(mach_task_self(), cclock);
- }
-# endif
// gettimeofday is available on all UNIX systems, but only has microsecond precision.
struct timeval tv = {};
gettimeofday(&tv, nullptr);
diff --git a/src/reverselock.h b/src/reverselock.h
deleted file mode 100644
index 9d9cc9fd77..0000000000
--- a/src/reverselock.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright (c) 2015-2016 The Bitcoin Core developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#ifndef BITCOIN_REVERSELOCK_H
-#define BITCOIN_REVERSELOCK_H
-
-/**
- * An RAII-style reverse lock. Unlocks on construction and locks on destruction.
- */
-template<typename Lock>
-class reverse_lock
-{
-public:
-
- explicit reverse_lock(Lock& _lock) : lock(_lock) {
- _lock.unlock();
- _lock.swap(templock);
- }
-
- ~reverse_lock() {
- templock.lock();
- templock.swap(lock);
- }
-
-private:
- reverse_lock(reverse_lock const&);
- reverse_lock& operator=(reverse_lock const&);
-
- Lock& lock;
- Lock templock;
-};
-
-#endif // BITCOIN_REVERSELOCK_H
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index eb5148eebd..133d1b809f 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -31,7 +31,6 @@
#include <undo.h>
#include <util/strencodings.h>
#include <util/system.h>
-#include <util/validation.h>
#include <validation.h>
#include <validationinterface.h>
#include <warnings.h>
@@ -178,8 +177,7 @@ static UniValue getblockcount(const JSONRPCRequest& request)
"The genesis block has height 0.\n",
{},
RPCResult{
- "n (numeric) The current block count\n"
- },
+ RPCResult::Type::NUM, "", "The current block count"},
RPCExamples{
HelpExampleCli("getblockcount", "")
+ HelpExampleRpc("getblockcount", "")
@@ -196,8 +194,7 @@ static UniValue getbestblockhash(const JSONRPCRequest& request)
"\nReturns the hash of the best (tip) block in the most-work fully-validated chain.\n",
{},
RPCResult{
- "\"hex\" (string) the block hash, hex-encoded\n"
- },
+ RPCResult::Type::STR_HEX, "", "the block hash, hex-encoded"},
RPCExamples{
HelpExampleCli("getbestblockhash", "")
+ HelpExampleRpc("getbestblockhash", "")
@@ -227,11 +224,11 @@ static UniValue waitfornewblock(const JSONRPCRequest& request)
{"timeout", RPCArg::Type::NUM, /* default */ "0", "Time in milliseconds to wait for a response. 0 indicates no timeout."},
},
RPCResult{
- "{ (json object)\n"
- " \"hash\" : { (string) The blockhash\n"
- " \"height\" : { (int) Block height\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "hash", "The blockhash"},
+ {RPCResult::Type::NUM, "height", "Block height"},
+ }},
RPCExamples{
HelpExampleCli("waitfornewblock", "1000")
+ HelpExampleRpc("waitfornewblock", "1000")
@@ -267,11 +264,11 @@ static UniValue waitforblock(const JSONRPCRequest& request)
{"timeout", RPCArg::Type::NUM, /* default */ "0", "Time in milliseconds to wait for a response. 0 indicates no timeout."},
},
RPCResult{
- "{ (json object)\n"
- " \"hash\" : { (string) The blockhash\n"
- " \"height\" : { (int) Block height\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "hash", "The blockhash"},
+ {RPCResult::Type::NUM, "height", "Block height"},
+ }},
RPCExamples{
HelpExampleCli("waitforblock", "\"0000000000079f8ef3d2c688c244eb7a4570b24c9ed7b4a8c619eb02596f8862\", 1000")
+ HelpExampleRpc("waitforblock", "\"0000000000079f8ef3d2c688c244eb7a4570b24c9ed7b4a8c619eb02596f8862\", 1000")
@@ -311,11 +308,11 @@ static UniValue waitforblockheight(const JSONRPCRequest& request)
{"timeout", RPCArg::Type::NUM, /* default */ "0", "Time in milliseconds to wait for a response. 0 indicates no timeout."},
},
RPCResult{
- "{ (json object)\n"
- " \"hash\" : { (string) The blockhash\n"
- " \"height\" : { (int) Block height\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "hash", "The blockhash"},
+ {RPCResult::Type::NUM, "height", "Block height"},
+ }},
RPCExamples{
HelpExampleCli("waitforblockheight", "\"100\", 1000")
+ HelpExampleRpc("waitforblockheight", "\"100\", 1000")
@@ -365,8 +362,7 @@ static UniValue getdifficulty(const JSONRPCRequest& request)
"\nReturns the proof-of-work difficulty as a multiple of the minimum difficulty.\n",
{},
RPCResult{
- "n.nnn (numeric) the proof-of-work difficulty as a multiple of the minimum difficulty.\n"
- },
+ RPCResult::Type::NUM, "", "the proof-of-work difficulty as a multiple of the minimum difficulty."},
RPCExamples{
HelpExampleCli("getdifficulty", "")
+ HelpExampleRpc("getdifficulty", "")
@@ -377,37 +373,35 @@ static UniValue getdifficulty(const JSONRPCRequest& request)
return GetDifficulty(::ChainActive().Tip());
}
-static std::string EntryDescriptionString()
-{
- return " \"vsize\" : n, (numeric) virtual transaction size as defined in BIP 141. This is different from actual serialized size for witness transactions as witness data is discounted.\n"
- " \"size\" : n, (numeric) (DEPRECATED) same as vsize. Only returned if bitcoind is started with -deprecatedrpc=size\n"
- " size will be completely removed in v0.20.\n"
- " \"weight\" : n, (numeric) transaction weight as defined in BIP 141.\n"
- " \"fee\" : n, (numeric) transaction fee in " + CURRENCY_UNIT + " (DEPRECATED)\n"
- " \"modifiedfee\" : n, (numeric) transaction fee with fee deltas used for mining priority (DEPRECATED)\n"
- " \"time\" : n, (numeric) local time transaction entered pool in seconds since 1 Jan 1970 GMT\n"
- " \"height\" : n, (numeric) block height when transaction entered pool\n"
- " \"descendantcount\" : n, (numeric) number of in-mempool descendant transactions (including this one)\n"
- " \"descendantsize\" : n, (numeric) virtual transaction size of in-mempool descendants (including this one)\n"
- " \"descendantfees\" : n, (numeric) modified fees (see above) of in-mempool descendants (including this one) (DEPRECATED)\n"
- " \"ancestorcount\" : n, (numeric) number of in-mempool ancestor transactions (including this one)\n"
- " \"ancestorsize\" : n, (numeric) virtual transaction size of in-mempool ancestors (including this one)\n"
- " \"ancestorfees\" : n, (numeric) modified fees (see above) of in-mempool ancestors (including this one) (DEPRECATED)\n"
- " \"wtxid\" : hash, (string) hash of serialized transaction, including witness data\n"
- " \"fees\" : {\n"
- " \"base\" : n, (numeric) transaction fee in " + CURRENCY_UNIT + "\n"
- " \"modified\" : n, (numeric) transaction fee with fee deltas used for mining priority in " + CURRENCY_UNIT + "\n"
- " \"ancestor\" : n, (numeric) modified fees (see above) of in-mempool ancestors (including this one) in " + CURRENCY_UNIT + "\n"
- " \"descendant\" : n, (numeric) modified fees (see above) of in-mempool descendants (including this one) in " + CURRENCY_UNIT + "\n"
- " }\n"
- " \"depends\" : [ (array) unconfirmed transactions used as inputs for this transaction\n"
- " \"transactionid\", (string) parent transaction id\n"
- " ... ]\n"
- " \"spentby\" : [ (array) unconfirmed transactions spending outputs from this transaction\n"
- " \"transactionid\", (string) child transaction id\n"
- " ... ]\n"
- " \"bip125-replaceable\" : true|false, (boolean) Whether this transaction could be replaced due to BIP125 (replace-by-fee)\n";
-}
+static std::vector<RPCResult> MempoolEntryDescription() { return {
+ RPCResult{RPCResult::Type::NUM, "vsize", "virtual transaction size as defined in BIP 141. This is different from actual serialized size for witness transactions as witness data is discounted."},
+ RPCResult{RPCResult::Type::NUM, "size", "(DEPRECATED) same as vsize. Only returned if bitcoind is started with -deprecatedrpc=size\n"
+ "size will be completely removed in v0.20."},
+ RPCResult{RPCResult::Type::NUM, "weight", "transaction weight as defined in BIP 141."},
+ RPCResult{RPCResult::Type::STR_AMOUNT, "fee", "transaction fee in " + CURRENCY_UNIT + " (DEPRECATED)"},
+ RPCResult{RPCResult::Type::STR_AMOUNT, "modifiedfee", "transaction fee with fee deltas used for mining priority (DEPRECATED)"},
+ RPCResult{RPCResult::Type::NUM_TIME, "time", "local time transaction entered pool in seconds since 1 Jan 1970 GMT"},
+ RPCResult{RPCResult::Type::NUM, "height", "block height when transaction entered pool"},
+ RPCResult{RPCResult::Type::NUM, "descendantcount", "number of in-mempool descendant transactions (including this one)"},
+ RPCResult{RPCResult::Type::NUM, "descendantsize", "virtual transaction size of in-mempool descendants (including this one)"},
+ RPCResult{RPCResult::Type::STR_AMOUNT, "descendantfees", "modified fees (see above) of in-mempool descendants (including this one) (DEPRECATED)"},
+ RPCResult{RPCResult::Type::NUM, "ancestorcount", "number of in-mempool ancestor transactions (including this one)"},
+ RPCResult{RPCResult::Type::NUM, "ancestorsize", "virtual transaction size of in-mempool ancestors (including this one)"},
+ RPCResult{RPCResult::Type::STR_AMOUNT, "ancestorfees", "modified fees (see above) of in-mempool ancestors (including this one) (DEPRECATED)"},
+ RPCResult{RPCResult::Type::STR_HEX, "wtxid", "hash of serialized transaction, including witness data"},
+ RPCResult{RPCResult::Type::OBJ, "fees", "",
+ {
+ RPCResult{RPCResult::Type::STR_AMOUNT, "base", "transaction fee in " + CURRENCY_UNIT},
+ RPCResult{RPCResult::Type::STR_AMOUNT, "modified", "transaction fee with fee deltas used for mining priority in " + CURRENCY_UNIT},
+ RPCResult{RPCResult::Type::STR_AMOUNT, "ancestor", "modified fees (see above) of in-mempool ancestors (including this one) in " + CURRENCY_UNIT},
+ RPCResult{RPCResult::Type::STR_AMOUNT, "descendant", "modified fees (see above) of in-mempool descendants (including this one) in " + CURRENCY_UNIT},
+ }},
+ RPCResult{RPCResult::Type::ARR, "depends", "unconfirmed transactions used as inputs for this transaction",
+ {RPCResult{RPCResult::Type::STR_HEX, "transactionid", "parent transaction id"}}},
+ RPCResult{RPCResult::Type::ARR, "spentby", "unconfirmed transactions spending outputs from this transaction",
+ {RPCResult{RPCResult::Type::STR_HEX, "transactionid", "child transaction id"}}},
+ RPCResult{RPCResult::Type::BOOL, "bip125-replaceable", "Whether this transaction could be replaced due to BIP125 (replace-by-fee)"},
+};}
static void entryToJSON(const CTxMemPool& pool, UniValue& info, const CTxMemPoolEntry& e) EXCLUSIVE_LOCKS_REQUIRED(pool.cs)
{
@@ -506,17 +500,17 @@ static UniValue getrawmempool(const JSONRPCRequest& request)
{
{"verbose", RPCArg::Type::BOOL, /* default */ "false", "True for a json object, false for array of transaction ids"},
},
- RPCResult{"for verbose = false",
- "[ (json array of string)\n"
- " \"transactionid\" (string) The transaction id\n"
- " ,...\n"
- "]\n"
- "\nResult: (for verbose = true):\n"
- "{ (json object)\n"
- " \"transactionid\" : { (json object)\n"
- + EntryDescriptionString()
- + " }, ...\n"
- "}\n"
+ {
+ RPCResult{"for verbose = false",
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "", "The transaction id"},
+ }},
+ RPCResult{"for verbose = true",
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::OBJ_DYN, "transactionid", "", MempoolEntryDescription()},
+ }},
},
RPCExamples{
HelpExampleCli("getrawmempool", "true")
@@ -541,18 +535,10 @@ static UniValue getmempoolancestors(const JSONRPCRequest& request)
},
{
RPCResult{"for verbose = false",
- "[ (json array of strings)\n"
- " \"transactionid\" (string) The transaction id of an in-mempool ancestor transaction\n"
- " ,...\n"
- "]\n"
- },
+ RPCResult::Type::ARR, "", "",
+ {{RPCResult::Type::STR_HEX, "", "The transaction id of an in-mempool ancestor transaction"}}},
RPCResult{"for verbose = true",
- "{ (json object)\n"
- " \"transactionid\" : { (json object)\n"
- + EntryDescriptionString()
- + " }, ...\n"
- "}\n"
- },
+ RPCResult::Type::OBJ_DYN, "transactionid", "", MempoolEntryDescription()},
},
RPCExamples{
HelpExampleCli("getmempoolancestors", "\"mytxid\"")
@@ -609,18 +595,13 @@ static UniValue getmempooldescendants(const JSONRPCRequest& request)
},
{
RPCResult{"for verbose = false",
- "[ (json array of strings)\n"
- " \"transactionid\" (string) The transaction id of an in-mempool descendant transaction\n"
- " ,...\n"
- "]\n"
- },
+ RPCResult::Type::ARR, "", "",
+ {{RPCResult::Type::STR_HEX, "", "The transaction id of an in-mempool descendant transaction"}}},
RPCResult{"for verbose = true",
- "{ (json object)\n"
- " \"transactionid\" : { (json object)\n"
- + EntryDescriptionString()
- + " }, ...\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::OBJ_DYN, "transactionid", "", MempoolEntryDescription()},
+ }},
},
RPCExamples{
HelpExampleCli("getmempooldescendants", "\"mytxid\"")
@@ -675,10 +656,7 @@ static UniValue getmempoolentry(const JSONRPCRequest& request)
{"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The transaction id (must be in mempool)"},
},
RPCResult{
- "{ (json object)\n"
- + EntryDescriptionString()
- + "}\n"
- },
+ RPCResult::Type::OBJ_DYN, "", "", MempoolEntryDescription()},
RPCExamples{
HelpExampleCli("getmempoolentry", "\"mytxid\"")
+ HelpExampleRpc("getmempoolentry", "\"mytxid\"")
@@ -709,8 +687,7 @@ static UniValue getblockhash(const JSONRPCRequest& request)
{"height", RPCArg::Type::NUM, RPCArg::Optional::NO, "The height index"},
},
RPCResult{
- "\"hash\" (string) The block hash\n"
- },
+ RPCResult::Type::STR_HEX, "", "The block hash"},
RPCExamples{
HelpExampleCli("getblockhash", "1000")
+ HelpExampleRpc("getblockhash", "1000")
@@ -738,27 +715,26 @@ static UniValue getblockheader(const JSONRPCRequest& request)
},
{
RPCResult{"for verbose = true",
- "{\n"
- " \"hash\" : \"hash\", (string) the block hash (same as provided)\n"
- " \"confirmations\" : n, (numeric) The number of confirmations, or -1 if the block is not on the main chain\n"
- " \"height\" : n, (numeric) The block height or index\n"
- " \"version\" : n, (numeric) The block version\n"
- " \"versionHex\" : \"00000000\", (string) The block version formatted in hexadecimal\n"
- " \"merkleroot\" : \"xxxx\", (string) The merkle root\n"
- " \"time\" : ttt, (numeric) The block time expressed in " + UNIX_EPOCH_TIME + "\n"
- " \"mediantime\" : ttt, (numeric) The median block time expressed in " + UNIX_EPOCH_TIME + "\n"
- " \"nonce\" : n, (numeric) The nonce\n"
- " \"bits\" : \"1d00ffff\", (string) The bits\n"
- " \"difficulty\" : x.xxx, (numeric) The difficulty\n"
- " \"chainwork\" : \"0000...1f3\" (string) Expected number of hashes required to produce the current chain (in hex)\n"
- " \"nTx\" : n, (numeric) The number of transactions in the block.\n"
- " \"previousblockhash\" : \"hash\", (string) The hash of the previous block\n"
- " \"nextblockhash\" : \"hash\", (string) The hash of the next block\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "hash", "the block hash (same as provided)"},
+ {RPCResult::Type::NUM, "confirmations", "The number of confirmations, or -1 if the block is not on the main chain"},
+ {RPCResult::Type::NUM, "height", "The block height or index"},
+ {RPCResult::Type::NUM, "version", "The block version"},
+ {RPCResult::Type::STR_HEX, "versionHex", "The block version formatted in hexadecimal"},
+ {RPCResult::Type::STR_HEX, "merkleroot", "The merkle root"},
+ {RPCResult::Type::NUM_TIME, "time", "The block time expressed in " + UNIX_EPOCH_TIME},
+ {RPCResult::Type::NUM_TIME, "mediantime", "The median block time expressed in " + UNIX_EPOCH_TIME},
+ {RPCResult::Type::NUM, "nonce", "The nonce"},
+ {RPCResult::Type::STR_HEX, "bits", "The bits"},
+ {RPCResult::Type::NUM, "difficulty", "The difficulty"},
+ {RPCResult::Type::STR_HEX, "chainwork", "Expected number of hashes required to produce the current chain"},
+ {RPCResult::Type::NUM, "nTx", "The number of transactions in the block"},
+ {RPCResult::Type::STR_HEX, "previousblockhash", "The hash of the previous block"},
+ {RPCResult::Type::STR_HEX, "nextblockhash", "The hash of the next block"},
+ }},
RPCResult{"for verbose=false",
- "\"data\" (string) A string that is serialized, hex-encoded data for block 'hash'.\n"
- },
+ RPCResult::Type::STR_HEX, "", "A string that is serialized, hex-encoded data for block 'hash'"},
},
RPCExamples{
HelpExampleCli("getblockheader", "\"00000000c937983704a73af28acdec37b049d214adbda81d7e2a3dd146f6ed09\"")
@@ -840,44 +816,45 @@ static UniValue getblock(const JSONRPCRequest& request)
},
{
RPCResult{"for verbosity = 0",
- "\"data\" (string) A string that is serialized, hex-encoded data for block 'hash'.\n"
- },
+ RPCResult::Type::STR_HEX, "", "A string that is serialized, hex-encoded data for block 'hash'"},
RPCResult{"for verbosity = 1",
- "{\n"
- " \"hash\" : \"hash\", (string) the block hash (same as provided)\n"
- " \"confirmations\" : n, (numeric) The number of confirmations, or -1 if the block is not on the main chain\n"
- " \"size\" : n, (numeric) The block size\n"
- " \"strippedsize\" : n, (numeric) The block size excluding witness data\n"
- " \"weight\" : n (numeric) The block weight as defined in BIP 141\n"
- " \"height\" : n, (numeric) The block height or index\n"
- " \"version\" : n, (numeric) The block version\n"
- " \"versionHex\" : \"00000000\", (string) The block version formatted in hexadecimal\n"
- " \"merkleroot\" : \"xxxx\", (string) The merkle root\n"
- " \"tx\" : [ (array of string) The transaction ids\n"
- " \"transactionid\" (string) The transaction id\n"
- " ,...\n"
- " ],\n"
- " \"time\" : ttt, (numeric) The block time expressed in " + UNIX_EPOCH_TIME + "\n"
- " \"mediantime\" : ttt, (numeric) The median block time expressed in " + UNIX_EPOCH_TIME + "\n"
- " \"nonce\" : n, (numeric) The nonce\n"
- " \"bits\" : \"1d00ffff\", (string) The bits\n"
- " \"difficulty\" : x.xxx, (numeric) The difficulty\n"
- " \"chainwork\" : \"xxxx\", (string) Expected number of hashes required to produce the chain up to this block (in hex)\n"
- " \"nTx\" : n, (numeric) The number of transactions in the block.\n"
- " \"previousblockhash\" : \"hash\", (string) The hash of the previous block\n"
- " \"nextblockhash\" : \"hash\" (string) The hash of the next block\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "hash", "the block hash (same as provided)"},
+ {RPCResult::Type::NUM, "confirmations", "The number of confirmations, or -1 if the block is not on the main chain"},
+ {RPCResult::Type::NUM, "size", "The block size"},
+ {RPCResult::Type::NUM, "strippedsize", "The block size excluding witness data"},
+ {RPCResult::Type::NUM, "weight", "The block weight as defined in BIP 141"},
+ {RPCResult::Type::NUM, "height", "The block height or index"},
+ {RPCResult::Type::NUM, "version", "The block version"},
+ {RPCResult::Type::STR_HEX, "versionHex", "The block version formatted in hexadecimal"},
+ {RPCResult::Type::STR_HEX, "merkleroot", "The merkle root"},
+ {RPCResult::Type::ARR, "tx", "The transaction ids",
+ {{RPCResult::Type::STR_HEX, "", "The transaction id"}}},
+ {RPCResult::Type::NUM_TIME, "time", "The block time expressed in " + UNIX_EPOCH_TIME},
+ {RPCResult::Type::NUM_TIME, "mediantime", "The median block time expressed in " + UNIX_EPOCH_TIME},
+ {RPCResult::Type::NUM, "nonce", "The nonce"},
+ {RPCResult::Type::STR_HEX, "bits", "The bits"},
+ {RPCResult::Type::NUM, "difficulty", "The difficulty"},
+ {RPCResult::Type::STR_HEX, "chainwork", "Expected number of hashes required to produce the chain up to this block (in hex)"},
+ {RPCResult::Type::NUM, "nTx", "The number of transactions in the block"},
+ {RPCResult::Type::STR_HEX, "previousblockhash", "The hash of the previous block"},
+ {RPCResult::Type::STR_HEX, "nextblockhash", "The hash of the next block"},
+ }},
RPCResult{"for verbosity = 2",
- "{\n"
- " ..., Same output as verbosity = 1.\n"
- " \"tx\" : [ (array of Objects) The transactions in the format of the getrawtransaction RPC. Different from verbosity = 1 \"tx\" result.\n"
- " ,...\n"
- " ],\n"
- " ,... Same output as verbosity = 1.\n"
- "}\n"
- },
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::ELISION, "", "Same output as verbosity = 1"},
+ {RPCResult::Type::ARR, "tx", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::ELISION, "", "The transactions in the format of the getrawtransaction RPC. Different from verbosity = 1 \"tx\" result"},
+ }},
+ }},
+ {RPCResult::Type::ELISION, "", "Same output as verbosity = 1"},
+ }},
+ },
RPCExamples{
HelpExampleCli("getblock", "\"00000000c937983704a73af28acdec37b049d214adbda81d7e2a3dd146f6ed09\"")
+ HelpExampleRpc("getblock", "\"00000000c937983704a73af28acdec37b049d214adbda81d7e2a3dd146f6ed09\"")
@@ -928,8 +905,7 @@ static UniValue pruneblockchain(const JSONRPCRequest& request)
" to prune blocks whose block time is at least 2 hours older than the provided timestamp."},
},
RPCResult{
- "n (numeric) Height of the last block pruned.\n"
- },
+ RPCResult::Type::NUM, "", "Height of the last block pruned"},
RPCExamples{
HelpExampleCli("pruneblockchain", "1000")
+ HelpExampleRpc("pruneblockchain", "1000")
@@ -983,17 +959,17 @@ static UniValue gettxoutsetinfo(const JSONRPCRequest& request)
"Note this call may take some time.\n",
{},
RPCResult{
- "{\n"
- " \"height\":n, (numeric) The current block height (index)\n"
- " \"bestblock\": \"hex\", (string) The hash of the block at the tip of the chain\n"
- " \"transactions\": n, (numeric) The number of transactions with unspent outputs\n"
- " \"txouts\": n, (numeric) The number of unspent transaction outputs\n"
- " \"bogosize\": n, (numeric) A meaningless metric for UTXO set size\n"
- " \"hash_serialized_2\": \"hash\", (string) The serialized hash\n"
- " \"disk_size\": n, (numeric) The estimated size of the chainstate on disk\n"
- " \"total_amount\": x.xxx (numeric) The total amount\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM, "height", "The current block height (index)"},
+ {RPCResult::Type::STR_HEX, "bestblock", "The hash of the block at the tip of the chain"},
+ {RPCResult::Type::NUM, "transactions", "The number of transactions with unspent outputs"},
+ {RPCResult::Type::NUM, "txouts", "The number of unspent transaction outputs"},
+ {RPCResult::Type::NUM, "bogosize", "A meaningless metric for UTXO set size"},
+ {RPCResult::Type::STR_HEX, "hash_serialized_2", "The serialized hash"},
+ {RPCResult::Type::NUM, "disk_size", "The estimated size of the chainstate on disk"},
+ {RPCResult::Type::STR_AMOUNT, "total_amount", "The total amount"},
+ }},
RPCExamples{
HelpExampleCli("gettxoutsetinfo", "")
+ HelpExampleRpc("gettxoutsetinfo", "")
@@ -1031,23 +1007,22 @@ UniValue gettxout(const JSONRPCRequest& request)
{"include_mempool", RPCArg::Type::BOOL, /* default */ "true", "Whether to include the mempool. Note that an unspent output that is spent in the mempool won't appear."},
},
RPCResult{
- "{\n"
- " \"bestblock\": \"hash\", (string) The hash of the block at the tip of the chain\n"
- " \"confirmations\" : n, (numeric) The number of confirmations\n"
- " \"value\" : x.xxx, (numeric) The transaction value in " + CURRENCY_UNIT + "\n"
- " \"scriptPubKey\" : { (json object)\n"
- " \"asm\" : \"code\", (string) \n"
- " \"hex\" : \"hex\", (string) \n"
- " \"reqSigs\" : n, (numeric) Number of required signatures\n"
- " \"type\" : \"pubkeyhash\", (string) The type, eg pubkeyhash\n"
- " \"addresses\" : [ (array of string) array of bitcoin addresses\n"
- " \"address\" (string) bitcoin address\n"
- " ,...\n"
- " ]\n"
- " },\n"
- " \"coinbase\" : true|false (boolean) Coinbase or not\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "bestblock", "The hash of the block at the tip of the chain"},
+ {RPCResult::Type::NUM, "confirmations", "The number of confirmations"},
+ {RPCResult::Type::STR_AMOUNT, "value", "The transaction value in " + CURRENCY_UNIT},
+ {RPCResult::Type::OBJ, "scriptPubKey", "",
+ {
+ {RPCResult::Type::STR_HEX, "asm", ""},
+ {RPCResult::Type::STR_HEX, "hex", ""},
+ {RPCResult::Type::NUM, "reqSigs", "Number of required signatures"},
+ {RPCResult::Type::STR_HEX, "type", "The type, eg pubkeyhash"},
+ {RPCResult::Type::ARR, "addresses", "array of bitcoin addresses",
+ {{RPCResult::Type::STR, "address", "bitcoin address"}}},
+ }},
+ {RPCResult::Type::BOOL, "coinbase", "Coinbase or not"},
+ }},
RPCExamples{
"\nGet unspent transactions\n"
+ HelpExampleCli("listunspent", "") +
@@ -1112,8 +1087,7 @@ static UniValue verifychain(const JSONRPCRequest& request)
{"nblocks", RPCArg::Type::NUM, /* default */ strprintf("%d, 0=all", nCheckDepth), "The number of blocks to check."},
},
RPCResult{
- "true|false (boolean) Verified or not\n"
- },
+ RPCResult::Type::BOOL, "", "Verified or not"},
RPCExamples{
HelpExampleCli("verifychain", "")
+ HelpExampleRpc("verifychain", "")
@@ -1203,45 +1177,49 @@ UniValue getblockchaininfo(const JSONRPCRequest& request)
"Returns an object containing various state info regarding blockchain processing.\n",
{},
RPCResult{
- "{\n"
- " \"chain\": \"xxxx\", (string) current network name (main, test, regtest)\n"
- " \"blocks\": xxxxxx, (numeric) the height of the most-work fully-validated chain. The genesis block has height 0\n"
- " \"headers\": xxxxxx, (numeric) the current number of headers we have validated\n"
- " \"bestblockhash\": \"...\", (string) the hash of the currently best block\n"
- " \"difficulty\": xxxxxx, (numeric) the current difficulty\n"
- " \"mediantime\": xxxxxx, (numeric) median time for the current best block\n"
- " \"verificationprogress\": xxxx, (numeric) estimate of verification progress [0..1]\n"
- " \"initialblockdownload\": xxxx, (bool) (debug information) estimate of whether this node is in Initial Block Download mode.\n"
- " \"chainwork\": \"xxxx\" (string) total amount of work in active chain, in hexadecimal\n"
- " \"size_on_disk\": xxxxxx, (numeric) the estimated size of the block and undo files on disk\n"
- " \"pruned\": xx, (boolean) if the blocks are subject to pruning\n"
- " \"pruneheight\": xxxxxx, (numeric) lowest-height complete block stored (only present if pruning is enabled)\n"
- " \"automatic_pruning\": xx, (boolean) whether automatic pruning is enabled (only present if pruning is enabled)\n"
- " \"prune_target_size\": xxxxxx, (numeric) the target size used by pruning (only present if automatic pruning is enabled)\n"
- " \"softforks\": { (object) status of softforks\n"
- " \"xxxx\" : { (string) name of the softfork\n"
- " \"type\": \"xxxx\", (string) one of \"buried\", \"bip9\"\n"
- " \"bip9\": { (object) status of bip9 softforks (only for \"bip9\" type)\n"
- " \"status\": \"xxxx\", (string) one of \"defined\", \"started\", \"locked_in\", \"active\", \"failed\"\n"
- " \"bit\": xx, (numeric) the bit (0-28) in the block version field used to signal this softfork (only for \"started\" status)\n"
- " \"start_time\": xx, (numeric) the minimum median time past of a block at which the bit gains its meaning\n"
- " \"timeout\": xx, (numeric) the median time past of a block at which the deployment is considered failed if not yet locked in\n"
- " \"since\": xx, (numeric) height of the first block to which the status applies\n"
- " \"statistics\": { (object) numeric statistics about BIP9 signalling for a softfork\n"
- " \"period\": xx, (numeric) the length in blocks of the BIP9 signalling period \n"
- " \"threshold\": xx, (numeric) the number of blocks with the version bit set required to activate the feature \n"
- " \"elapsed\": xx, (numeric) the number of blocks elapsed since the beginning of the current period \n"
- " \"count\": xx, (numeric) the number of blocks with the version bit set in the current period \n"
- " \"possible\": xx (boolean) returns false if there are not enough blocks left in this period to pass activation threshold \n"
- " }\n"
- " },\n"
- " \"height\": \"xxxxxx\", (numeric) height of the first block which the rules are or will be enforced (only for \"buried\" type, or \"bip9\" type with \"active\" status)\n"
- " \"active\": xx, (boolean) true if the rules are enforced for the mempool and the next block\n"
- " }\n"
- " }\n"
- " \"warnings\" : \"...\", (string) any network and blockchain warnings.\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "chain", "current network name (main, test, regtest)"},
+ {RPCResult::Type::NUM, "blocks", "the height of the most-work fully-validated chain. The genesis block has height 0"},
+ {RPCResult::Type::NUM, "headers", "the current number of headers we have validated"},
+ {RPCResult::Type::STR, "bestblockhash", "the hash of the currently best block"},
+ {RPCResult::Type::NUM, "difficulty", "the current difficulty"},
+ {RPCResult::Type::NUM, "mediantime", "median time for the current best block"},
+ {RPCResult::Type::NUM, "verificationprogress", "estimate of verification progress [0..1]"},
+ {RPCResult::Type::BOOL, "initialblockdownload", "(debug information) estimate of whether this node is in Initial Block Download mode"},
+ {RPCResult::Type::STR_HEX, "chainwork", "total amount of work in active chain, in hexadecimal"},
+ {RPCResult::Type::NUM, "size_on_disk", "the estimated size of the block and undo files on disk"},
+ {RPCResult::Type::BOOL, "pruned", "if the blocks are subject to pruning"},
+ {RPCResult::Type::NUM, "pruneheight", "lowest-height complete block stored (only present if pruning is enabled)"},
+ {RPCResult::Type::BOOL, "automatic_pruning", "whether automatic pruning is enabled (only present if pruning is enabled)"},
+ {RPCResult::Type::NUM, "prune_target_size", "the target size used by pruning (only present if automatic pruning is enabled)"},
+ {RPCResult::Type::OBJ_DYN, "softforks", "status of softforks",
+ {
+ {RPCResult::Type::OBJ, "xxxx", "name of the softfork",
+ {
+ {RPCResult::Type::STR, "type", "one of \"buried\", \"bip9\""},
+ {RPCResult::Type::OBJ, "bip9", "status of bip9 softforks (only for \"bip9\" type)",
+ {
+ {RPCResult::Type::STR, "status", "one of \"defined\", \"started\", \"locked_in\", \"active\", \"failed\""},
+ {RPCResult::Type::NUM, "bit", "the bit (0-28) in the block version field used to signal this softfork (only for \"started\" status)"},
+ {RPCResult::Type::NUM_TIME, "start_time", "the minimum median time past of a block at which the bit gains its meaning"},
+ {RPCResult::Type::NUM_TIME, "timeout", "the median time past of a block at which the deployment is considered failed if not yet locked in"},
+ {RPCResult::Type::NUM, "since", "height of the first block to which the status applies"},
+ {RPCResult::Type::OBJ, "statistics", "numeric statistics about BIP9 signalling for a softfork",
+ {
+ {RPCResult::Type::NUM, "period", "the length in blocks of the BIP9 signalling period"},
+ {RPCResult::Type::NUM, "threshold", "the number of blocks with the version bit set required to activate the feature"},
+ {RPCResult::Type::NUM, "elapsed", "the number of blocks elapsed since the beginning of the current period"},
+ {RPCResult::Type::NUM, "count", "the number of blocks with the version bit set in the current period"},
+ {RPCResult::Type::BOOL, "possible", "returns false if there are not enough blocks left in this period to pass activation threshold"},
+ }},
+ }},
+ {RPCResult::Type::NUM, "height", "height of the first block which the rules are or will be enforced (only for \"buried\" type, or \"bip9\" type with \"active\" status)"},
+ {RPCResult::Type::BOOL, "active", "true if the rules are enforced for the mempool and the next block"},
+ }},
+ }},
+ {RPCResult::Type::STR, "warnings", "any network and blockchain warnings"},
+ }},
RPCExamples{
HelpExampleCli("getblockchaininfo", "")
+ HelpExampleRpc("getblockchaininfo", "")
@@ -1316,27 +1294,20 @@ static UniValue getchaintips(const JSONRPCRequest& request)
" including the main chain as well as orphaned branches.\n",
{},
RPCResult{
- "[\n"
- " {\n"
- " \"height\": xxxx, (numeric) height of the chain tip\n"
- " \"hash\": \"xxxx\", (string) block hash of the tip\n"
- " \"branchlen\": 0 (numeric) zero for main chain\n"
- " \"status\": \"active\" (string) \"active\" for the main chain\n"
- " },\n"
- " {\n"
- " \"height\": xxxx,\n"
- " \"hash\": \"xxxx\",\n"
- " \"branchlen\": 1 (numeric) length of branch connecting the tip to the main chain\n"
- " \"status\": \"xxxx\" (string) status of the chain (active, valid-fork, valid-headers, headers-only, invalid)\n"
- " }\n"
- "]\n"
+ RPCResult::Type::ARR, "", "",
+ {{RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM, "height", "height of the chain tip"},
+ {RPCResult::Type::STR_HEX, "hash", "block hash of the tip"},
+ {RPCResult::Type::NUM, "branchlen", "zero for main chain, otherwise length of branch connecting the tip to the main chain"},
+ {RPCResult::Type::STR, "status", "status of the chain, \"active\" for the main chain\n"
"Possible values for status:\n"
"1. \"invalid\" This branch contains at least one invalid block\n"
"2. \"headers-only\" Not all blocks for this branch are available, but the headers are valid\n"
"3. \"valid-headers\" All blocks are available for this branch, but they were never fully validated\n"
"4. \"valid-fork\" This branch is not part of the active chain, but is fully validated\n"
- "5. \"active\" This is the tip of the active main chain, which is certainly valid\n"
- },
+ "5. \"active\" This is the tip of the active main chain, which is certainly valid"},
+ }}}},
RPCExamples{
HelpExampleCli("getchaintips", "")
+ HelpExampleRpc("getchaintips", "")
@@ -1436,16 +1407,16 @@ static UniValue getmempoolinfo(const JSONRPCRequest& request)
"\nReturns details on the active state of the TX memory pool.\n",
{},
RPCResult{
- "{\n"
- " \"loaded\": true|false (boolean) True if the mempool is fully loaded\n"
- " \"size\": xxxxx, (numeric) Current tx count\n"
- " \"bytes\": xxxxx, (numeric) Sum of all virtual transaction sizes as defined in BIP 141. Differs from actual serialized size because witness data is discounted\n"
- " \"usage\": xxxxx, (numeric) Total memory usage for the mempool\n"
- " \"maxmempool\": xxxxx, (numeric) Maximum memory usage for the mempool\n"
- " \"mempoolminfee\": xxxxx (numeric) Minimum fee rate in " + CURRENCY_UNIT + "/kB for tx to be accepted. Is the maximum of minrelaytxfee and minimum mempool fee\n"
- " \"minrelaytxfee\": xxxxx (numeric) Current minimum relay fee for transactions\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::BOOL, "loaded", "True if the mempool is fully loaded"},
+ {RPCResult::Type::NUM, "size", "Current tx count"},
+ {RPCResult::Type::NUM, "bytes", "Sum of all virtual transaction sizes as defined in BIP 141. Differs from actual serialized size because witness data is discounted"},
+ {RPCResult::Type::NUM, "usage", "Total memory usage for the mempool"},
+ {RPCResult::Type::NUM, "maxmempool", "Maximum memory usage for the mempool"},
+ {RPCResult::Type::STR_AMOUNT, "mempoolminfee", "Minimum fee rate in " + CURRENCY_UNIT + "/kB for tx to be accepted. Is the maximum of minrelaytxfee and minimum mempool fee"},
+ {RPCResult::Type::STR_AMOUNT, "minrelaytxfee", "Current minimum relay fee for transactions"},
+ }},
RPCExamples{
HelpExampleCli("getmempoolinfo", "")
+ HelpExampleRpc("getmempoolinfo", "")
@@ -1486,7 +1457,7 @@ static UniValue preciousblock(const JSONRPCRequest& request)
PreciousBlock(state, Params(), pblockindex);
if (!state.IsValid()) {
- throw JSONRPCError(RPC_DATABASE_ERROR, FormatStateMessage(state));
+ throw JSONRPCError(RPC_DATABASE_ERROR, state.ToString());
}
return NullUniValue;
@@ -1524,7 +1495,7 @@ static UniValue invalidateblock(const JSONRPCRequest& request)
}
if (!state.IsValid()) {
- throw JSONRPCError(RPC_DATABASE_ERROR, FormatStateMessage(state));
+ throw JSONRPCError(RPC_DATABASE_ERROR, state.ToString());
}
return NullUniValue;
@@ -1561,7 +1532,7 @@ static UniValue reconsiderblock(const JSONRPCRequest& request)
ActivateBestChain(state, Params());
if (!state.IsValid()) {
- throw JSONRPCError(RPC_DATABASE_ERROR, FormatStateMessage(state));
+ throw JSONRPCError(RPC_DATABASE_ERROR, state.ToString());
}
return NullUniValue;
@@ -1576,17 +1547,17 @@ static UniValue getchaintxstats(const JSONRPCRequest& request)
{"blockhash", RPCArg::Type::STR_HEX, /* default */ "chain tip", "The hash of the block that ends the window."},
},
RPCResult{
- "{\n"
- " \"time\": xxxxx, (numeric) The timestamp for the final block in the window, expressed in " + UNIX_EPOCH_TIME + ".\n"
- " \"txcount\": xxxxx, (numeric) The total number of transactions in the chain up to that point.\n"
- " \"window_final_block_hash\": \"...\", (string) The hash of the final block in the window.\n"
- " \"window_final_block_height\": xxxxx, (numeric) The height of the final block in the window.\n"
- " \"window_block_count\": xxxxx, (numeric) Size of the window in number of blocks.\n"
- " \"window_tx_count\": xxxxx, (numeric) The number of transactions in the window. Only returned if \"window_block_count\" is > 0.\n"
- " \"window_interval\": xxxxx, (numeric) The elapsed time in the window in seconds. Only returned if \"window_block_count\" is > 0.\n"
- " \"txrate\": x.xx, (numeric) The average rate of transactions per second in the window. Only returned if \"window_interval\" is > 0.\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM_TIME, "time", "The timestamp for the final block in the window, expressed in " + UNIX_EPOCH_TIME},
+ {RPCResult::Type::NUM, "txcount", "The total number of transactions in the chain up to that point"},
+ {RPCResult::Type::STR_HEX, "window_final_block_hash", "The hash of the final block in the window"},
+ {RPCResult::Type::NUM, "window_final_block_height", "The height of the final block in the window."},
+ {RPCResult::Type::NUM, "window_block_count", "Size of the window in number of blocks"},
+ {RPCResult::Type::NUM, "window_tx_count", "The number of transactions in the window. Only returned if \"window_block_count\" is > 0"},
+ {RPCResult::Type::NUM, "window_interval", "The elapsed time in the window in seconds. Only returned if \"window_block_count\" is > 0"},
+ {RPCResult::Type::NUM, "txrate", "The average rate of transactions per second in the window. Only returned if \"window_interval\" is > 0"},
+ }},
RPCExamples{
HelpExampleCli("getchaintxstats", "")
+ HelpExampleRpc("getchaintxstats", "2016")
@@ -1715,44 +1686,45 @@ static UniValue getblockstats(const JSONRPCRequest& request)
"stats"},
},
RPCResult{
- "{ (json object)\n"
- " \"avgfee\": xxxxx, (numeric) Average fee in the block\n"
- " \"avgfeerate\": xxxxx, (numeric) Average feerate (in satoshis per virtual byte)\n"
- " \"avgtxsize\": xxxxx, (numeric) Average transaction size\n"
- " \"blockhash\": xxxxx, (string) The block hash (to check for potential reorgs)\n"
- " \"feerate_percentiles\": [ (array of numeric) Feerates at the 10th, 25th, 50th, 75th, and 90th percentile weight unit (in satoshis per virtual byte)\n"
- " \"10th_percentile_feerate\", (numeric) The 10th percentile feerate\n"
- " \"25th_percentile_feerate\", (numeric) The 25th percentile feerate\n"
- " \"50th_percentile_feerate\", (numeric) The 50th percentile feerate\n"
- " \"75th_percentile_feerate\", (numeric) The 75th percentile feerate\n"
- " \"90th_percentile_feerate\", (numeric) The 90th percentile feerate\n"
- " ],\n"
- " \"height\": xxxxx, (numeric) The height of the block\n"
- " \"ins\": xxxxx, (numeric) The number of inputs (excluding coinbase)\n"
- " \"maxfee\": xxxxx, (numeric) Maximum fee in the block\n"
- " \"maxfeerate\": xxxxx, (numeric) Maximum feerate (in satoshis per virtual byte)\n"
- " \"maxtxsize\": xxxxx, (numeric) Maximum transaction size\n"
- " \"medianfee\": xxxxx, (numeric) Truncated median fee in the block\n"
- " \"mediantime\": xxxxx, (numeric) The block median time past\n"
- " \"mediantxsize\": xxxxx, (numeric) Truncated median transaction size\n"
- " \"minfee\": xxxxx, (numeric) Minimum fee in the block\n"
- " \"minfeerate\": xxxxx, (numeric) Minimum feerate (in satoshis per virtual byte)\n"
- " \"mintxsize\": xxxxx, (numeric) Minimum transaction size\n"
- " \"outs\": xxxxx, (numeric) The number of outputs\n"
- " \"subsidy\": xxxxx, (numeric) The block subsidy\n"
- " \"swtotal_size\": xxxxx, (numeric) Total size of all segwit transactions\n"
- " \"swtotal_weight\": xxxxx, (numeric) Total weight of all segwit transactions divided by segwit scale factor (4)\n"
- " \"swtxs\": xxxxx, (numeric) The number of segwit transactions\n"
- " \"time\": xxxxx, (numeric) The block time\n"
- " \"total_out\": xxxxx, (numeric) Total amount in all outputs (excluding coinbase and thus reward [ie subsidy + totalfee])\n"
- " \"total_size\": xxxxx, (numeric) Total size of all non-coinbase transactions\n"
- " \"total_weight\": xxxxx, (numeric) Total weight of all non-coinbase transactions divided by segwit scale factor (4)\n"
- " \"totalfee\": xxxxx, (numeric) The fee total\n"
- " \"txs\": xxxxx, (numeric) The number of transactions (excluding coinbase)\n"
- " \"utxo_increase\": xxxxx, (numeric) The increase/decrease in the number of unspent outputs\n"
- " \"utxo_size_inc\": xxxxx, (numeric) The increase/decrease in size for the utxo index (not discounting op_return and similar)\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM, "avgfee", "Average fee in the block"},
+ {RPCResult::Type::NUM, "avgfeerate", "Average feerate (in satoshis per virtual byte)"},
+ {RPCResult::Type::NUM, "avgtxsize", "Average transaction size"},
+ {RPCResult::Type::STR_HEX, "blockhash", "The block hash (to check for potential reorgs)"},
+ {RPCResult::Type::ARR_FIXED, "feerate_percentiles", "Feerates at the 10th, 25th, 50th, 75th, and 90th percentile weight unit (in satoshis per virtual byte)",
+ {
+ {RPCResult::Type::NUM, "10th_percentile_feerate", "The 10th percentile feerate"},
+ {RPCResult::Type::NUM, "25th_percentile_feerate", "The 25th percentile feerate"},
+ {RPCResult::Type::NUM, "50th_percentile_feerate", "The 50th percentile feerate"},
+ {RPCResult::Type::NUM, "75th_percentile_feerate", "The 75th percentile feerate"},
+ {RPCResult::Type::NUM, "90th_percentile_feerate", "The 90th percentile feerate"},
+ }},
+ {RPCResult::Type::NUM, "height", "The height of the block"},
+ {RPCResult::Type::NUM, "ins", "The number of inputs (excluding coinbase)"},
+ {RPCResult::Type::NUM, "maxfee", "Maximum fee in the block"},
+ {RPCResult::Type::NUM, "maxfeerate", "Maximum feerate (in satoshis per virtual byte)"},
+ {RPCResult::Type::NUM, "maxtxsize", "Maximum transaction size"},
+ {RPCResult::Type::NUM, "medianfee", "Truncated median fee in the block"},
+ {RPCResult::Type::NUM, "mediantime", "The block median time past"},
+ {RPCResult::Type::NUM, "mediantxsize", "Truncated median transaction size"},
+ {RPCResult::Type::NUM, "minfee", "Minimum fee in the block"},
+ {RPCResult::Type::NUM, "minfeerate", "Minimum feerate (in satoshis per virtual byte)"},
+ {RPCResult::Type::NUM, "mintxsize", "Minimum transaction size"},
+ {RPCResult::Type::NUM, "outs", "The number of outputs"},
+ {RPCResult::Type::NUM, "subsidy", "The block subsidy"},
+ {RPCResult::Type::NUM, "swtotal_size", "Total size of all segwit transactions"},
+ {RPCResult::Type::NUM, "swtotal_weight", "Total weight of all segwit transactions divided by segwit scale factor (4)"},
+ {RPCResult::Type::NUM, "swtxs", "The number of segwit transactions"},
+ {RPCResult::Type::NUM, "time", "The block time"},
+ {RPCResult::Type::NUM, "total_out", "Total amount in all outputs (excluding coinbase and thus reward [ie subsidy + totalfee])"},
+ {RPCResult::Type::NUM, "total_size", "Total size of all non-coinbase transactions"},
+ {RPCResult::Type::NUM, "total_weight", "Total weight of all non-coinbase transactions divided by segwit scale factor (4)"},
+ {RPCResult::Type::NUM, "totalfee", "The fee total"},
+ {RPCResult::Type::NUM, "txs", "The number of transactions (excluding coinbase)"},
+ {RPCResult::Type::NUM, "utxo_increase", "The increase/decrease in the number of unspent outputs"},
+ {RPCResult::Type::NUM, "utxo_size_inc", "The increase/decrease in size for the utxo index (not discounting op_return and similar)"},
+ }},
RPCExamples{
HelpExampleCli("getblockstats", "1000 '[\"minfeerate\",\"avgfeerate\"]'")
+ HelpExampleRpc("getblockstats", "1000 '[\"minfeerate\",\"avgfeerate\"]'")
@@ -2075,24 +2047,26 @@ UniValue scantxoutset(const JSONRPCRequest& request)
"[scanobjects,...]"},
},
RPCResult{
- "{\n"
- " \"success\": true|false, (boolean) Whether the scan was completed\n"
- " \"txouts\": n, (numeric) The number of unspent transaction outputs scanned\n"
- " \"height\": n, (numeric) The current block height (index)\n"
- " \"bestblock\": \"hex\", (string) The hash of the block at the tip of the chain\n"
- " \"unspents\": [\n"
- " {\n"
- " \"txid\": \"hash\", (string) The transaction id\n"
- " \"vout\": n, (numeric) The vout value\n"
- " \"scriptPubKey\": \"script\", (string) The script key\n"
- " \"desc\": \"descriptor\", (string) A specialized descriptor for the matched scriptPubKey\n"
- " \"amount\": x.xxx, (numeric) The total amount in " + CURRENCY_UNIT + " of the unspent output\n"
- " \"height\": n, (numeric) Height of the unspent transaction output\n"
- " }\n"
- " ,...],\n"
- " \"total_amount\": x.xxx, (numeric) The total amount of all found unspent outputs in " + CURRENCY_UNIT + "\n"
- "]\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::BOOL, "success", "Whether the scan was completed"},
+ {RPCResult::Type::NUM, "txouts", "The number of unspent transaction outputs scanned"},
+ {RPCResult::Type::NUM, "height", "The current block height (index)"},
+ {RPCResult::Type::STR_HEX, "bestblock", "The hash of the block at the tip of the chain"},
+ {RPCResult::Type::ARR, "unspents", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "txid", "The transaction id"},
+ {RPCResult::Type::NUM, "vout", "The vout value"},
+ {RPCResult::Type::STR_HEX, "scriptPubKey", "The script key"},
+ {RPCResult::Type::STR, "desc", "A specialized descriptor for the matched scriptPubKey"},
+ {RPCResult::Type::STR_AMOUNT, "amount", "The total amount in " + CURRENCY_UNIT + " of the unspent output"},
+ {RPCResult::Type::NUM, "height", "Height of the unspent transaction output"},
+ }},
+ }},
+ {RPCResult::Type::STR_AMOUNT, "total_amount", "The total amount of all found unspent outputs in " + CURRENCY_UNIT},
+ }},
RPCExamples{""},
}.Check(request);
@@ -2198,11 +2172,11 @@ static UniValue getblockfilter(const JSONRPCRequest& request)
{"filtertype", RPCArg::Type::STR, /*default*/ "basic", "The type name of the filter"},
},
RPCResult{
- "{\n"
- " \"filter\" : (string) the hex-encoded filter data\n"
- " \"header\" : (string) the hex-encoded filter header\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "filter", "the hex-encoded filter data"},
+ {RPCResult::Type::STR_HEX, "header", "the hex-encoded filter header"},
+ }},
RPCExamples{
HelpExampleCli("getblockfilter", "\"00000000c937983704a73af28acdec37b049d214adbda81d7e2a3dd146f6ed09\" \"basic\"")
}
@@ -2283,12 +2257,13 @@ UniValue dumptxoutset(const JSONRPCRequest& request)
"path to the output file. If relative, will be prefixed by datadir."},
},
RPCResult{
- "{\n"
- " \"coins_written\": n, (numeric) the number of coins written in the snapshot\n"
- " \"base_hash\": \"...\", (string) the hash of the base of the snapshot\n"
- " \"base_height\": n, (string) the height of the base of the snapshot\n"
- " \"path\": \"...\" (string) the absolute path that the snapshot was written to\n"
- "]\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM, "coins_written", "the number of coins written in the snapshot"},
+ {RPCResult::Type::STR_HEX, "base_hash", "the hash of the base of the snapshot"},
+ {RPCResult::Type::NUM, "base_height", "the height of the base of the snapshot"},
+ {RPCResult::Type::STR, "path", "the absolute path that the snapshot was written to"},
+ }
},
RPCExamples{
HelpExampleCli("dumptxoutset", "utxo.dat")
diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp
index 2eaa3427eb..c1762483e9 100644
--- a/src/rpc/client.cpp
+++ b/src/rpc/client.cpp
@@ -27,6 +27,7 @@ public:
static const CRPCConvertParam vRPCConvertParams[] =
{
{ "setmocktime", 0, "timestamp" },
+ { "mockscheduler", 0, "delta_time" },
{ "utxoupdatepsbt", 1, "descriptors" },
{ "generatetoaddress", 0, "nblocks" },
{ "generatetoaddress", 2, "maxtries" },
diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp
index 81da053b4e..9245d09b89 100644
--- a/src/rpc/mining.cpp
+++ b/src/rpc/mining.cpp
@@ -1,5 +1,5 @@
// Copyright (c) 2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -28,7 +28,6 @@
#include <util/fees.h>
#include <util/strencodings.h>
#include <util/system.h>
-#include <util/validation.h>
#include <validation.h>
#include <validationinterface.h>
#include <versionbitsinfo.h>
@@ -90,8 +89,7 @@ static UniValue getnetworkhashps(const JSONRPCRequest& request)
{"height", RPCArg::Type::NUM, /* default */ "-1", "To estimate at the time of the given height."},
},
RPCResult{
- "x (numeric) Hashes per second estimated\n"
- },
+ RPCResult::Type::NUM, "", "Hashes per second estimated"},
RPCExamples{
HelpExampleCli("getnetworkhashps", "")
+ HelpExampleRpc("getnetworkhashps", "")
@@ -154,7 +152,11 @@ static UniValue generatetodescriptor(const JSONRPCRequest& request)
{"maxtries", RPCArg::Type::NUM, /* default */ "1000000", "How many iterations to try."},
},
RPCResult{
- "[ blockhashes ] (array) hashes of blocks generated\n"},
+ RPCResult::Type::ARR, "", "hashes of blocks generated",
+ {
+ {RPCResult::Type::STR_HEX, "", "blockhash"},
+ }
+ },
RPCExamples{
"\nGenerate 11 blocks to mydesc\n" + HelpExampleCli("generatetodescriptor", "11 \"mydesc\"")},
}
@@ -196,8 +198,10 @@ static UniValue generatetoaddress(const JSONRPCRequest& request)
{"maxtries", RPCArg::Type::NUM, /* default */ "1000000", "How many iterations to try."},
},
RPCResult{
- "[ blockhashes ] (array) hashes of blocks generated\n"
- },
+ RPCResult::Type::ARR, "", "hashes of blocks generated",
+ {
+ {RPCResult::Type::STR_HEX, "", "blockhash"},
+ }},
RPCExamples{
"\nGenerate 11 blocks to myaddress\n"
+ HelpExampleCli("generatetoaddress", "11 \"myaddress\"")
@@ -230,17 +234,17 @@ static UniValue getmininginfo(const JSONRPCRequest& request)
"\nReturns a json object containing mining-related information.",
{},
RPCResult{
- "{\n"
- " \"blocks\": nnn, (numeric) The current block\n"
- " \"currentblockweight\": nnn, (numeric, optional) The block weight of the last assembled block (only present if a block was ever assembled)\n"
- " \"currentblocktx\": nnn, (numeric, optional) The number of block transactions of the last assembled block (only present if a block was ever assembled)\n"
- " \"difficulty\": xxx.xxxxx (numeric) The current difficulty\n"
- " \"networkhashps\": nnn, (numeric) The network hashes per second\n"
- " \"pooledtx\": n (numeric) The size of the mempool\n"
- " \"chain\": \"xxxx\", (string) current network name (main, test, regtest)\n"
- " \"warnings\": \"...\" (string) any network and blockchain warnings\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM, "blocks", "The current block"},
+ {RPCResult::Type::NUM, "currentblockweight", /* optional */ true, "The block weight of the last assembled block (only present if a block was ever assembled)"},
+ {RPCResult::Type::NUM, "currentblocktx", /* optional */ true, "The number of block transactions of the last assembled block (only present if a block was ever assembled)"},
+ {RPCResult::Type::NUM, "difficulty", "The current difficulty"},
+ {RPCResult::Type::NUM, "networkhashps", "The network hashes per second"},
+ {RPCResult::Type::NUM, "pooledtx", "The size of the mempool"},
+ {RPCResult::Type::STR, "chain", "current network name (main, test, regtest)"},
+ {RPCResult::Type::STR, "warnings", "any network and blockchain warnings"},
+ }},
RPCExamples{
HelpExampleCli("getmininginfo", "")
+ HelpExampleRpc("getmininginfo", "")
@@ -278,8 +282,7 @@ static UniValue prioritisetransaction(const JSONRPCRequest& request)
" considers the transaction as it would have paid a higher (or lower) fee."},
},
RPCResult{
- "true (boolean) Returns true\n"
- },
+ RPCResult::Type::BOOL, "", "Returns true"},
RPCExamples{
HelpExampleCli("prioritisetransaction", "\"txid\" 0.0 10000")
+ HelpExampleRpc("prioritisetransaction", "\"txid\", 0.0, 10000")
@@ -307,7 +310,7 @@ static UniValue BIP22ValidationResult(const BlockValidationState& state)
return NullUniValue;
if (state.IsError())
- throw JSONRPCError(RPC_VERIFY_ERROR, FormatStateMessage(state));
+ throw JSONRPCError(RPC_VERIFY_ERROR, state.ToString());
if (state.IsInvalid())
{
std::string strRejectReason = state.GetRejectReason();
@@ -356,48 +359,58 @@ static UniValue getblocktemplate(const JSONRPCRequest& request)
"\"template_request\""},
},
RPCResult{
- "{\n"
- " \"version\" : n, (numeric) The preferred block version\n"
- " \"rules\" : [ \"rulename\", ... ], (array of strings) specific block rules that are to be enforced\n"
- " \"vbavailable\" : { (json object) set of pending, supported versionbit (BIP 9) softfork deployments\n"
- " \"rulename\" : bitnumber (numeric) identifies the bit number as indicating acceptance and readiness for the named softfork rule\n"
- " ,...\n"
- " },\n"
- " \"vbrequired\" : n, (numeric) bit mask of versionbits the server requires set in submissions\n"
- " \"previousblockhash\" : \"xxxx\", (string) The hash of current highest block\n"
- " \"transactions\" : [ (array) contents of non-coinbase transactions that should be included in the next block\n"
- " {\n"
- " \"data\" : \"xxxx\", (string) transaction data encoded in hexadecimal (byte-for-byte)\n"
- " \"txid\" : \"xxxx\", (string) transaction id encoded in little-endian hexadecimal\n"
- " \"hash\" : \"xxxx\", (string) hash encoded in little-endian hexadecimal (including witness data)\n"
- " \"depends\" : [ (array) array of numbers \n"
- " n (numeric) transactions before this one (by 1-based index in 'transactions' list) that must be present in the final block if this one is\n"
- " ,...\n"
- " ],\n"
- " \"fee\": n, (numeric) difference in value between transaction inputs and outputs (in satoshis); for coinbase transactions, this is a negative Number of the total collected block fees (ie, not including the block subsidy); if key is not present, fee is unknown and clients MUST NOT assume there isn't one\n"
- " \"sigops\" : n, (numeric) total SigOps cost, as counted for purposes of block limits; if key is not present, sigop cost is unknown and clients MUST NOT assume it is zero\n"
- " \"weight\" : n, (numeric) total transaction weight, as counted for purposes of block limits\n"
- " }\n"
- " ,...\n"
- " ],\n"
- " \"coinbaseaux\" : { ... }, (json object) data that should be included in the coinbase's scriptSig content\n"
- " \"coinbasevalue\" : n, (numeric) maximum allowable input to coinbase transaction, including the generation award and transaction fees (in satoshis)\n"
- " \"coinbasetxn\" : { ... }, (json object) information for coinbase transaction\n"
- " \"target\" : \"xxxx\", (string) The hash target\n"
- " \"mintime\" : xxx, (numeric) The minimum timestamp appropriate for the next block time, expressed in " + UNIX_EPOCH_TIME + "\n"
- " \"mutable\" : [ (array of string) list of ways the block template may be changed \n"
- " \"value\" (string) A way the block template may be changed, e.g. 'time', 'transactions', 'prevblock'\n"
- " ,...\n"
- " ],\n"
- " \"noncerange\" : \"00000000ffffffff\",(string) A range of valid nonces\n"
- " \"sigoplimit\" : n, (numeric) limit of sigops in blocks\n"
- " \"sizelimit\" : n, (numeric) limit of block size\n"
- " \"weightlimit\" : n, (numeric) limit of block weight\n"
- " \"curtime\" : ttt, (numeric) current timestamp in " + UNIX_EPOCH_TIME + "\n"
- " \"bits\" : \"xxxxxxxx\", (string) compressed target of next block\n"
- " \"height\" : n (numeric) The height of the next block\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM, "version", "The preferred block version"},
+ {RPCResult::Type::ARR, "rules", "specific block rules that are to be enforced",
+ {
+ {RPCResult::Type::STR, "", "rulename"},
+ }},
+ {RPCResult::Type::OBJ_DYN, "vbavailable", "set of pending, supported versionbit (BIP 9) softfork deployments",
+ {
+ {RPCResult::Type::NUM, "rulename", "identifies the bit number as indicating acceptance and readiness for the named softfork rule"},
+ }},
+ {RPCResult::Type::NUM, "vbrequired", "bit mask of versionbits the server requires set in submissions"},
+ {RPCResult::Type::STR, "previousblockhash", "The hash of current highest block"},
+ {RPCResult::Type::ARR, "", "contents of non-coinbase transactions that should be included in the next block",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "data", "transaction data encoded in hexadecimal (byte-for-byte)"},
+ {RPCResult::Type::STR_HEX, "txid", "transaction id encoded in little-endian hexadecimal"},
+ {RPCResult::Type::STR_HEX, "hash", "hash encoded in little-endian hexadecimal (including witness data)"},
+ {RPCResult::Type::ARR, "depends", "array of numbers",
+ {
+ {RPCResult::Type::NUM, "", "transactions before this one (by 1-based index in 'transactions' list) that must be present in the final block if this one is"},
+ }},
+ {RPCResult::Type::NUM, "fee", "difference in value between transaction inputs and outputs (in satoshis); for coinbase transactions, this is a negative Number of the total collected block fees (ie, not including the block subsidy); if key is not present, fee is unknown and clients MUST NOT assume there isn't one"},
+ {RPCResult::Type::NUM, "sigops", "total SigOps cost, as counted for purposes of block limits; if key is not present, sigop cost is unknown and clients MUST NOT assume it is zero"},
+ {RPCResult::Type::NUM, "weight", "total transaction weight, as counted for purposes of block limits"},
+ }},
+ }},
+ {RPCResult::Type::OBJ, "coinbaseaux", "data that should be included in the coinbase's scriptSig content",
+ {
+ {RPCResult::Type::ELISION, "", ""},
+ }},
+ {RPCResult::Type::NUM, "coinbasevalue", "maximum allowable input to coinbase transaction, including the generation award and transaction fees (in satoshis)"},
+ {RPCResult::Type::OBJ, "coinbasetxn", "information for coinbase transaction",
+ {
+ {RPCResult::Type::ELISION, "", ""},
+ }},
+ {RPCResult::Type::STR, "target", "The hash target"},
+ {RPCResult::Type::NUM_TIME, "mintime", "The minimum timestamp appropriate for the next block time, expressed in " + UNIX_EPOCH_TIME},
+ {RPCResult::Type::ARR, "mutable", "list of ways the block template may be changed",
+ {
+ {RPCResult::Type::STR, "value", "A way the block template may be changed, e.g. 'time', 'transactions', 'prevblock'"},
+ }},
+ {RPCResult::Type::STR_HEX, "noncerange", "A range of valid nonces"},
+ {RPCResult::Type::NUM, "sigoplimit", "limit of sigops in blocks"},
+ {RPCResult::Type::NUM, "sizelimit", "limit of block size"},
+ {RPCResult::Type::NUM, "weightlimit", "limit of block weight"},
+ {RPCResult::Type::NUM_TIME, "curtime", "current timestamp in " + UNIX_EPOCH_TIME},
+ {RPCResult::Type::STR, "bits", "compressed target of next block"},
+ {RPCResult::Type::NUM, "height", "The height of the next block"},
+ }},
RPCExamples{
HelpExampleCli("getblocktemplate", "'{\"rules\": [\"segwit\"]}'")
+ HelpExampleRpc("getblocktemplate", "{\"rules\": [\"segwit\"]}")
@@ -800,8 +813,7 @@ static UniValue submitheader(const JSONRPCRequest& request)
{"hexdata", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "the hex-encoded block header data"},
},
RPCResult{
- "None"
- },
+ RPCResult::Type::NONE, "", "None"},
RPCExamples{
HelpExampleCli("submitheader", "\"aabbcc\"") +
HelpExampleRpc("submitheader", "\"aabbcc\"")
@@ -823,7 +835,7 @@ static UniValue submitheader(const JSONRPCRequest& request)
ProcessNewBlockHeaders({h}, state, Params());
if (state.IsValid()) return NullUniValue;
if (state.IsError()) {
- throw JSONRPCError(RPC_VERIFY_ERROR, FormatStateMessage(state));
+ throw JSONRPCError(RPC_VERIFY_ERROR, state.ToString());
}
throw JSONRPCError(RPC_VERIFY_ERROR, state.GetRejectReason());
}
@@ -848,17 +860,19 @@ static UniValue estimatesmartfee(const JSONRPCRequest& request)
" \"CONSERVATIVE\""},
},
RPCResult{
- "{\n"
- " \"feerate\" : x.x, (numeric, optional) estimate fee rate in " + CURRENCY_UNIT + "/kB\n"
- " \"errors\": [ str... ] (json array of strings, optional) Errors encountered during processing\n"
- " \"blocks\" : n (numeric) block number where estimate was found\n"
- "}\n"
- "\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM, "feerate", /* optional */ true, "estimate fee rate in " + CURRENCY_UNIT + "/kB (only present if no errors were encountered)"},
+ {RPCResult::Type::ARR, "errors", "Errors encountered during processing",
+ {
+ {RPCResult::Type::STR, "", "error"},
+ }},
+ {RPCResult::Type::NUM, "blocks", "block number where estimate was found\n"
"The request target will be clamped between 2 and the highest target\n"
"fee estimation is able to return based on how long it has been running.\n"
"An error is returned if not enough transactions and blocks\n"
- "have been observed to make an estimate for any number of blocks.\n"
- },
+ "have been observed to make an estimate for any number of blocks."},
+ }},
RPCExamples{
HelpExampleCli("estimatesmartfee", "6")
},
@@ -908,28 +922,40 @@ static UniValue estimaterawfee(const JSONRPCRequest& request)
" lower buckets."},
},
RPCResult{
- "{\n"
- " \"short\" : { (json object, optional) estimate for short time horizon\n"
- " \"feerate\" : x.x, (numeric, optional) estimate fee rate in " + CURRENCY_UNIT + "/kB\n"
- " \"decay\" : x.x, (numeric) exponential decay (per block) for historical moving average of confirmation data\n"
- " \"scale\" : x, (numeric) The resolution of confirmation targets at this time horizon\n"
- " \"pass\" : { (json object, optional) information about the lowest range of feerates to succeed in meeting the threshold\n"
- " \"startrange\" : x.x, (numeric) start of feerate range\n"
- " \"endrange\" : x.x, (numeric) end of feerate range\n"
- " \"withintarget\" : x.x, (numeric) number of txs over history horizon in the feerate range that were confirmed within target\n"
- " \"totalconfirmed\" : x.x, (numeric) number of txs over history horizon in the feerate range that were confirmed at any point\n"
- " \"inmempool\" : x.x, (numeric) current number of txs in mempool in the feerate range unconfirmed for at least target blocks\n"
- " \"leftmempool\" : x.x, (numeric) number of txs over history horizon in the feerate range that left mempool unconfirmed after target\n"
- " },\n"
- " \"fail\" : { ... }, (json object, optional) information about the highest range of feerates to fail to meet the threshold\n"
- " \"errors\": [ str... ] (json array of strings, optional) Errors encountered during processing\n"
- " },\n"
- " \"medium\" : { ... }, (json object, optional) estimate for medium time horizon\n"
- " \"long\" : { ... } (json object) estimate for long time horizon\n"
- "}\n"
- "\n"
- "Results are returned for any horizon which tracks blocks up to the confirmation target.\n"
- },
+ RPCResult::Type::OBJ, "", "Results are returned for any horizon which tracks blocks up to the confirmation target",
+ {
+ {RPCResult::Type::OBJ, "short", /* optional */ true, "estimate for short time horizon",
+ {
+ {RPCResult::Type::NUM, "feerate", /* optional */ true, "estimate fee rate in " + CURRENCY_UNIT + "/kB"},
+ {RPCResult::Type::NUM, "decay", "exponential decay (per block) for historical moving average of confirmation data"},
+ {RPCResult::Type::NUM, "scale", "The resolution of confirmation targets at this time horizon"},
+ {RPCResult::Type::OBJ, "pass", /* optional */ true, "information about the lowest range of feerates to succeed in meeting the threshold",
+ {
+ {RPCResult::Type::NUM, "startrange", "start of feerate range"},
+ {RPCResult::Type::NUM, "endrange", "end of feerate range"},
+ {RPCResult::Type::NUM, "withintarget", "number of txs over history horizon in the feerate range that were confirmed within target"},
+ {RPCResult::Type::NUM, "totalconfirmed", "number of txs over history horizon in the feerate range that were confirmed at any point"},
+ {RPCResult::Type::NUM, "inmempool", "current number of txs in mempool in the feerate range unconfirmed for at least target blocks"},
+ {RPCResult::Type::NUM, "leftmempool", "number of txs over history horizon in the feerate range that left mempool unconfirmed after target"},
+ }},
+ {RPCResult::Type::OBJ, "fail", /* optional */ true, "information about the highest range of feerates to fail to meet the threshold",
+ {
+ {RPCResult::Type::ELISION, "", ""},
+ }},
+ {RPCResult::Type::ARR, "errors", /* optional */ true, "Errors encountered during processing",
+ {
+ {RPCResult::Type::STR, "error", ""},
+ }},
+ }},
+ {RPCResult::Type::OBJ, "medium", /* optional */ true, "estimate for medium time horizon",
+ {
+ {RPCResult::Type::ELISION, "", ""},
+ }},
+ {RPCResult::Type::OBJ, "long", /* optional */ true, "estimate for long time horizon",
+ {
+ {RPCResult::Type::ELISION, "", ""},
+ }},
+ }},
RPCExamples{
HelpExampleCli("estimaterawfee", "6 0.9")
},
diff --git a/src/rpc/misc.cpp b/src/rpc/misc.cpp
index 56bd33b0ec..8357183934 100644
--- a/src/rpc/misc.cpp
+++ b/src/rpc/misc.cpp
@@ -5,15 +5,17 @@
#include <httpserver.h>
#include <key_io.h>
+#include <node/context.h>
#include <outputtype.h>
#include <rpc/blockchain.h>
#include <rpc/server.h>
#include <rpc/util.h>
+#include <scheduler.h>
#include <script/descriptor.h>
#include <util/check.h>
+#include <util/message.h> // For MessageSign(), MessageVerify()
#include <util/strencodings.h>
#include <util/system.h>
-#include <util/validation.h>
#include <stdint.h>
#include <tuple>
@@ -31,19 +33,20 @@ static UniValue validateaddress(const JSONRPCRequest& request)
{"address", RPCArg::Type::STR, RPCArg::Optional::NO, "The bitcoin address to validate"},
},
RPCResult{
- "{\n"
- " \"isvalid\" : true|false, (boolean) If the address is valid or not. If not, this is the only property returned.\n"
- " \"address\" : \"address\", (string) The bitcoin address validated\n"
- " \"scriptPubKey\" : \"hex\", (string) The hex-encoded scriptPubKey generated by the address\n"
- " \"isscript\" : true|false, (boolean) If the key is a script\n"
- " \"iswitness\" : true|false, (boolean) If the address is a witness address\n"
- " \"witness_version\" : version (numeric, optional) The version number of the witness program\n"
- " \"witness_program\" : \"hex\" (string, optional) The hex value of the witness program\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::BOOL, "isvalid", "If the address is valid or not. If not, this is the only property returned."},
+ {RPCResult::Type::STR, "address", "The bitcoin address validated"},
+ {RPCResult::Type::STR_HEX, "scriptPubKey", "The hex-encoded scriptPubKey generated by the address"},
+ {RPCResult::Type::BOOL, "isscript", "If the key is a script"},
+ {RPCResult::Type::BOOL, "iswitness", "If the address is a witness address"},
+ {RPCResult::Type::NUM, "witness_version", /* optional */ true, "The version number of the witness program"},
+ {RPCResult::Type::STR_HEX, "witness_program", /* optional */ true, "The hex value of the witness program"},
+ }
},
RPCExamples{
- HelpExampleCli("validateaddress", "\"1PSSGeFHDnKNxiEyFrD1wcEaHr9hrQDDWc\"")
- + HelpExampleRpc("validateaddress", "\"1PSSGeFHDnKNxiEyFrD1wcEaHr9hrQDDWc\"")
+ HelpExampleCli("validateaddress", EXAMPLE_ADDRESS) +
+ HelpExampleRpc("validateaddress", EXAMPLE_ADDRESS)
},
}.Check(request);
@@ -80,10 +83,12 @@ static UniValue createmultisig(const JSONRPCRequest& request)
{"address_type", RPCArg::Type::STR, /* default */ "legacy", "The address type to use. Options are \"legacy\", \"p2sh-segwit\", and \"bech32\"."},
},
RPCResult{
- "{\n"
- " \"address\":\"multisigaddress\", (string) The value of the new multisig address.\n"
- " \"redeemScript\":\"script\" (string) The string value of the hex-encoded redemption script.\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "address", "The value of the new multisig address."},
+ {RPCResult::Type::STR_HEX, "redeemScript", "The string value of the hex-encoded redemption script."},
+ {RPCResult::Type::STR, "descriptor", "The descriptor for this multisig"},
+ }
},
RPCExamples{
"\nCreate a multisig address from 2 public keys\n"
@@ -119,9 +124,13 @@ static UniValue createmultisig(const JSONRPCRequest& request)
CScript inner;
const CTxDestination dest = AddAndGetMultisigDestination(required, pubkeys, output_type, keystore, inner);
+ // Make the descriptor
+ std::unique_ptr<Descriptor> descriptor = InferDescriptor(GetScriptForDestination(dest), keystore);
+
UniValue result(UniValue::VOBJ);
result.pushKV("address", EncodeDestination(dest));
result.pushKV("redeemScript", HexStr(inner.begin(), inner.end()));
+ result.pushKV("descriptor", descriptor->ToString());
return result;
}
@@ -134,13 +143,14 @@ UniValue getdescriptorinfo(const JSONRPCRequest& request)
{"descriptor", RPCArg::Type::STR, RPCArg::Optional::NO, "The descriptor."},
},
RPCResult{
- "{\n"
- " \"descriptor\" : \"desc\", (string) The descriptor in canonical form, without private keys\n"
- " \"checksum\" : \"chksum\", (string) The checksum for the input descriptor\n"
- " \"isrange\" : true|false, (boolean) Whether the descriptor is ranged\n"
- " \"issolvable\" : true|false, (boolean) Whether the descriptor is solvable\n"
- " \"hasprivatekeys\" : true|false, (boolean) Whether the input descriptor contained at least one private key\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "descriptor", "The descriptor in canonical form, without private keys"},
+ {RPCResult::Type::STR, "checksum", "The checksum for the input descriptor"},
+ {RPCResult::Type::BOOL, "isrange", "Whether the descriptor is ranged"},
+ {RPCResult::Type::BOOL, "issolvable", "Whether the descriptor is solvable"},
+ {RPCResult::Type::BOOL, "hasprivatekeys", "Whether the input descriptor contained at least one private key"},
+ }
},
RPCExamples{
"Analyse a descriptor\n" +
@@ -182,7 +192,10 @@ UniValue deriveaddresses(const JSONRPCRequest& request)
{"range", RPCArg::Type::RANGE, RPCArg::Optional::OMITTED_NAMED_ARG, "If a ranged descriptor is used, this specifies the end or the range (in [begin,end] notation) to derive."},
},
RPCResult{
- "[ address ] (array) the derived addresses\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::STR, "address", "the derived addresses"},
+ }
},
RPCExamples{
"First three native segwit receive addresses\n" +
@@ -251,7 +264,7 @@ static UniValue verifymessage(const JSONRPCRequest& request)
{"message", RPCArg::Type::STR, RPCArg::Optional::NO, "The message that was signed."},
},
RPCResult{
- "true|false (boolean) If the signature is verified or not.\n"
+ RPCResult::Type::BOOL, "", "If the signature is verified or not."
},
RPCExamples{
"\nUnlock the wallet for 30 seconds\n"
@@ -271,31 +284,21 @@ static UniValue verifymessage(const JSONRPCRequest& request)
std::string strSign = request.params[1].get_str();
std::string strMessage = request.params[2].get_str();
- CTxDestination destination = DecodeDestination(strAddress);
- if (!IsValidDestination(destination)) {
+ switch (MessageVerify(strAddress, strSign, strMessage)) {
+ case MessageVerificationResult::ERR_INVALID_ADDRESS:
throw JSONRPCError(RPC_TYPE_ERROR, "Invalid address");
- }
-
- const PKHash *pkhash = boost::get<PKHash>(&destination);
- if (!pkhash) {
+ case MessageVerificationResult::ERR_ADDRESS_NO_KEY:
throw JSONRPCError(RPC_TYPE_ERROR, "Address does not refer to key");
- }
-
- bool fInvalid = false;
- std::vector<unsigned char> vchSig = DecodeBase64(strSign.c_str(), &fInvalid);
-
- if (fInvalid)
+ case MessageVerificationResult::ERR_MALFORMED_SIGNATURE:
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Malformed base64 encoding");
-
- CHashWriter ss(SER_GETHASH, 0);
- ss << strMessageMagic;
- ss << strMessage;
-
- CPubKey pubkey;
- if (!pubkey.RecoverCompact(ss.GetHash(), vchSig))
+ case MessageVerificationResult::ERR_PUBKEY_NOT_RECOVERED:
+ case MessageVerificationResult::ERR_NOT_SIGNED:
return false;
+ case MessageVerificationResult::OK:
+ return true;
+ }
- return (pubkey.GetID() == *pkhash);
+ return false;
}
static UniValue signmessagewithprivkey(const JSONRPCRequest& request)
@@ -307,7 +310,7 @@ static UniValue signmessagewithprivkey(const JSONRPCRequest& request)
{"message", RPCArg::Type::STR, RPCArg::Optional::NO, "The message to create a signature of."},
},
RPCResult{
- "\"signature\" (string) The signature of the message encoded in base 64\n"
+ RPCResult::Type::STR, "signature", "The signature of the message encoded in base 64"
},
RPCExamples{
"\nCreate the signature\n"
@@ -327,15 +330,13 @@ static UniValue signmessagewithprivkey(const JSONRPCRequest& request)
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid private key");
}
- CHashWriter ss(SER_GETHASH, 0);
- ss << strMessageMagic;
- ss << strMessage;
+ std::string signature;
- std::vector<unsigned char> vchSig;
- if (!key.SignCompact(ss.GetHash(), vchSig))
+ if (!MessageSign(key, strMessage, signature)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Sign failed");
+ }
- return EncodeBase64(vchSig.data(), vchSig.size());
+ return signature;
}
static UniValue setmocktime(const JSONRPCRequest& request)
@@ -350,8 +351,9 @@ static UniValue setmocktime(const JSONRPCRequest& request)
RPCExamples{""},
}.Check(request);
- if (!Params().MineBlocksOnDemand())
- throw std::runtime_error("setmocktime for regression testing (-regtest mode) only");
+ if (!Params().IsMockableChain()) {
+ throw std::runtime_error("setmocktime is for regression testing (-regtest mode) only");
+ }
// For now, don't change mocktime if we're in the middle of validation, as
// this could have an effect on mempool time-based eviction, as well as
@@ -366,6 +368,36 @@ static UniValue setmocktime(const JSONRPCRequest& request)
return NullUniValue;
}
+static UniValue mockscheduler(const JSONRPCRequest& request)
+{
+ RPCHelpMan{"mockscheduler",
+ "\nBump the scheduler into the future (-regtest only)\n",
+ {
+ {"delta_time", RPCArg::Type::NUM, RPCArg::Optional::NO, "Number of seconds to forward the scheduler into the future." },
+ },
+ RPCResults{},
+ RPCExamples{""},
+ }.Check(request);
+
+ if (!Params().IsMockableChain()) {
+ throw std::runtime_error("mockscheduler is for regression testing (-regtest mode) only");
+ }
+
+ // check params are valid values
+ RPCTypeCheck(request.params, {UniValue::VNUM});
+ int64_t delta_seconds = request.params[0].get_int64();
+ if ((delta_seconds <= 0) || (delta_seconds > 3600)) {
+ throw std::runtime_error("delta_time must be between 1 and 3600 seconds (1 hr)");
+ }
+
+ // protect against null pointer dereference
+ CHECK_NONFATAL(g_rpc_node);
+ CHECK_NONFATAL(g_rpc_node->scheduler);
+ g_rpc_node->scheduler->MockForward(std::chrono::seconds(delta_seconds));
+
+ return NullUniValue;
+}
+
static UniValue RPCLockedMemoryInfo()
{
LockedPool::Stats stats = LockedPoolManager::Instance().stats();
@@ -412,19 +444,21 @@ static UniValue getmemoryinfo(const JSONRPCRequest& request)
},
{
RPCResult{"mode \"stats\"",
- "{\n"
- " \"locked\": { (json object) Information about locked memory manager\n"
- " \"used\": xxxxx, (numeric) Number of bytes used\n"
- " \"free\": xxxxx, (numeric) Number of bytes available in current arenas\n"
- " \"total\": xxxxxxx, (numeric) Total number of bytes managed\n"
- " \"locked\": xxxxxx, (numeric) Amount of bytes that succeeded locking. If this number is smaller than total, locking pages failed at some point and key data could be swapped to disk.\n"
- " \"chunks_used\": xxxxx, (numeric) Number allocated chunks\n"
- " \"chunks_free\": xxxxx, (numeric) Number unused chunks\n"
- " }\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::OBJ, "locked", "Information about locked memory manager",
+ {
+ {RPCResult::Type::NUM, "used", "Number of bytes used"},
+ {RPCResult::Type::NUM, "free", "Number of bytes available in current arenas"},
+ {RPCResult::Type::NUM, "total", "Total number of bytes managed"},
+ {RPCResult::Type::NUM, "locked", "Amount of bytes that succeeded locking. If this number is smaller than total, locking pages failed at some point and key data could be swapped to disk."},
+ {RPCResult::Type::NUM, "chunks_used", "Number allocated chunks"},
+ {RPCResult::Type::NUM, "chunks_free", "Number unused chunks"},
+ }},
+ }
},
RPCResult{"mode \"mallocinfo\"",
- "\"<malloc version=\"1\">...\"\n"
+ RPCResult::Type::STR, "", "\"<malloc version=\"1\">...\""
},
},
RPCExamples{
@@ -491,10 +525,10 @@ UniValue logging(const JSONRPCRequest& request)
}},
},
RPCResult{
- "{ (json object where keys are the logging categories, and values indicates its status\n"
- " \"category\": true|false, (bool) if being debug logged or not. false:inactive, true:active\n"
- " ...\n"
- "}\n"
+ RPCResult::Type::OBJ_DYN, "", "keys are the logging categories, and values indicates its status",
+ {
+ {RPCResult::Type::BOOL, "category", "if being debug logged or not. false:inactive, true:active"},
+ }
},
RPCExamples{
HelpExampleCli("logging", "\"[\\\"all\\\"]\" \"[\\\"http\\\"]\"")
@@ -570,6 +604,7 @@ static const CRPCCommand commands[] =
/* Not shown in help */
{ "hidden", "setmocktime", &setmocktime, {"timestamp"}},
+ { "hidden", "mockscheduler", &mockscheduler, {"delta_time"}},
{ "hidden", "echo", &echo, {"arg0","arg1","arg2","arg3","arg4","arg5","arg6","arg7","arg8","arg9"}},
{ "hidden", "echojson", &echo, {"arg0","arg1","arg2","arg3","arg4","arg5","arg6","arg7","arg8","arg9"}},
};
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index 5e53fa5f5d..eeb617f849 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -33,7 +33,7 @@ static UniValue getconnectioncount(const JSONRPCRequest& request)
"\nReturns the number of connections to other nodes.\n",
{},
RPCResult{
- "n (numeric) The connection count\n"
+ RPCResult::Type::NUM, "", "The connection count"
},
RPCExamples{
HelpExampleCli("getconnectioncount", "")
@@ -77,56 +77,60 @@ static UniValue getpeerinfo(const JSONRPCRequest& request)
"\nReturns data about each connected network node as a json array of objects.\n",
{},
RPCResult{
- "[\n"
- " {\n"
- " \"id\": n, (numeric) Peer index\n"
- " \"addr\":\"host:port\", (string) The IP address and port of the peer\n"
- " \"addrbind\":\"ip:port\", (string) Bind address of the connection to the peer\n"
- " \"addrlocal\":\"ip:port\", (string) Local address as reported by the peer\n"
- " \"services\":\"xxxxxxxxxxxxxxxx\", (string) The services offered\n"
- " \"servicesnames\":[ (array) the services offered, in human-readable form\n"
- " \"SERVICE_NAME\", (string) the service name if it is recognised\n"
- " ...\n"
- " ],\n"
- " \"relaytxes\":true|false, (boolean) Whether peer has asked us to relay transactions to it\n"
- " \"lastsend\": ttt, (numeric) The " + UNIX_EPOCH_TIME + " of the last send\n"
- " \"lastrecv\": ttt, (numeric) The " + UNIX_EPOCH_TIME + " of the last receive\n"
- " \"bytessent\": n, (numeric) The total bytes sent\n"
- " \"bytesrecv\": n, (numeric) The total bytes received\n"
- " \"conntime\": ttt, (numeric) The " + UNIX_EPOCH_TIME + " of the connection\n"
- " \"timeoffset\": ttt, (numeric) The time offset in seconds\n"
- " \"pingtime\": n, (numeric) ping time (if available)\n"
- " \"minping\": n, (numeric) minimum observed ping time (if any at all)\n"
- " \"pingwait\": n, (numeric) ping wait (if non-zero)\n"
- " \"version\": v, (numeric) The peer version, such as 70001\n"
- " \"subver\": \"/Satoshi:0.8.5/\", (string) The string version\n"
- " \"inbound\": true|false, (boolean) Inbound (true) or Outbound (false)\n"
- " \"addnode\": true|false, (boolean) Whether connection was due to addnode/-connect or if it was an automatic/inbound connection\n"
- " \"startingheight\": n, (numeric) The starting height (block) of the peer\n"
- " \"banscore\": n, (numeric) The ban score\n"
- " \"synced_headers\": n, (numeric) The last header we have in common with this peer\n"
- " \"synced_blocks\": n, (numeric) The last block we have in common with this peer\n"
- " \"inflight\": [\n"
- " n, (numeric) The heights of blocks we're currently asking from this peer\n"
- " ...\n"
- " ],\n"
- " \"whitelisted\": true|false, (boolean) Whether the peer is whitelisted\n"
- " \"minfeefilter\": n, (numeric) The minimum fee rate for transactions this peer accepts\n"
- " \"bytessent_per_msg\": {\n"
- " \"msg\": n, (numeric) The total bytes sent aggregated by message type\n"
- " When a message type is not listed in this json object, the bytes sent are 0.\n"
- " Only known message types can appear as keys in the object.\n"
- " ...\n"
- " },\n"
- " \"bytesrecv_per_msg\": {\n"
- " \"msg\": n, (numeric) The total bytes received aggregated by message type\n"
- " When a message type is not listed in this json object, the bytes received are 0.\n"
- " Only known message types can appear as keys in the object and all bytes received of unknown message types are listed under '"+NET_MESSAGE_COMMAND_OTHER+"'.\n"
- " ...\n"
- " }\n"
- " }\n"
- " ,...\n"
- "]\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {
+ {RPCResult::Type::NUM, "id", "Peer index"},
+ {RPCResult::Type::STR, "addr", "(host:port) The IP address and port of the peer"},
+ {RPCResult::Type::STR, "addrbind", "(ip:port) Bind address of the connection to the peer"},
+ {RPCResult::Type::STR, "addrlocal", "(ip:port) Local address as reported by the peer"},
+ {RPCResult::Type::NUM, "mapped_as", "The AS in the BGP route to the peer used for diversifying\n"
+ "peer selection (only available if the asmap config flag is set)"},
+ {RPCResult::Type::STR_HEX, "services", "The services offered"},
+ {RPCResult::Type::ARR, "servicesnames", "the services offered, in human-readable form",
+ {
+ {RPCResult::Type::STR, "SERVICE_NAME", "the service name if it is recognised"}
+ }},
+ {RPCResult::Type::BOOL, "relaytxes", "Whether peer has asked us to relay transactions to it"},
+ {RPCResult::Type::NUM_TIME, "lastsend", "The " + UNIX_EPOCH_TIME + " of the last send"},
+ {RPCResult::Type::NUM_TIME, "lastrecv", "The " + UNIX_EPOCH_TIME + " of the last receive"},
+ {RPCResult::Type::NUM, "bytessent", "The total bytes sent"},
+ {RPCResult::Type::NUM, "bytesrecv", "The total bytes received"},
+ {RPCResult::Type::NUM_TIME, "conntime", "The " + UNIX_EPOCH_TIME + " of the connection"},
+ {RPCResult::Type::NUM, "timeoffset", "The time offset in seconds"},
+ {RPCResult::Type::NUM, "pingtime", "ping time (if available)"},
+ {RPCResult::Type::NUM, "minping", "minimum observed ping time (if any at all)"},
+ {RPCResult::Type::NUM, "pingwait", "ping wait (if non-zero)"},
+ {RPCResult::Type::NUM, "version", "The peer version, such as 70001"},
+ {RPCResult::Type::STR, "subver", "The string version"},
+ {RPCResult::Type::BOOL, "inbound", "Inbound (true) or Outbound (false)"},
+ {RPCResult::Type::BOOL, "addnode", "Whether connection was due to addnode/-connect or if it was an automatic/inbound connection"},
+ {RPCResult::Type::NUM, "startingheight", "The starting height (block) of the peer"},
+ {RPCResult::Type::NUM, "banscore", "The ban score"},
+ {RPCResult::Type::NUM, "synced_headers", "The last header we have in common with this peer"},
+ {RPCResult::Type::NUM, "synced_blocks", "The last block we have in common with this peer"},
+ {RPCResult::Type::ARR, "inflight", "",
+ {
+ {RPCResult::Type::NUM, "n", "The heights of blocks we're currently asking from this peer"},
+ }},
+ {RPCResult::Type::BOOL, "whitelisted", "Whether the peer is whitelisted"},
+ {RPCResult::Type::NUM, "minfeefilter", "The minimum fee rate for transactions this peer accepts"},
+ {RPCResult::Type::OBJ_DYN, "bytessent_per_msg", "",
+ {
+ {RPCResult::Type::NUM, "msg", "The total bytes sent aggregated by message type\n"
+ "When a message type is not listed in this json object, the bytes sent are 0.\n"
+ "Only known message types can appear as keys in the object."}
+ }},
+ {RPCResult::Type::OBJ, "bytesrecv_per_msg", "",
+ {
+ {RPCResult::Type::NUM, "msg", "The total bytes received aggregated by message type\n"
+ "When a message type is not listed in this json object, the bytes received are 0.\n"
+ "Only known message types can appear as keys in the object and all bytes received of unknown message types are listed under '"+NET_MESSAGE_COMMAND_OTHER+"'."}
+ }},
+ }},
+ }},
},
RPCExamples{
HelpExampleCli("getpeerinfo", "")
@@ -152,6 +156,9 @@ static UniValue getpeerinfo(const JSONRPCRequest& request)
obj.pushKV("addrlocal", stats.addrLocal);
if (stats.addrBind.IsValid())
obj.pushKV("addrbind", stats.addrBind.ToString());
+ if (stats.m_mapped_as != 0) {
+ obj.pushKV("mapped_as", uint64_t(stats.m_mapped_as));
+ }
obj.pushKV("services", strprintf("%016x", stats.nServices));
obj.pushKV("servicesnames", GetServicesNames(stats.nServices));
obj.pushKV("relaytxes", stats.fRelayTxes);
@@ -161,12 +168,15 @@ static UniValue getpeerinfo(const JSONRPCRequest& request)
obj.pushKV("bytesrecv", stats.nRecvBytes);
obj.pushKV("conntime", stats.nTimeConnected);
obj.pushKV("timeoffset", stats.nTimeOffset);
- if (stats.dPingTime > 0.0)
- obj.pushKV("pingtime", stats.dPingTime);
- if (stats.dMinPing < static_cast<double>(std::numeric_limits<int64_t>::max())/1e6)
- obj.pushKV("minping", stats.dMinPing);
- if (stats.dPingWait > 0.0)
- obj.pushKV("pingwait", stats.dPingWait);
+ if (stats.m_ping_usec > 0) {
+ obj.pushKV("pingtime", ((double)stats.m_ping_usec) / 1e6);
+ }
+ if (stats.m_min_ping_usec < std::numeric_limits<int64_t>::max()) {
+ obj.pushKV("minping", ((double)stats.m_min_ping_usec) / 1e6);
+ }
+ if (stats.m_ping_wait_usec > 0) {
+ obj.pushKV("pingwait", ((double)stats.m_ping_wait_usec) / 1e6);
+ }
obj.pushKV("version", stats.nVersion);
// Use the sanitized form of subver here, to avoid tricksy remote peers from
// corrupting or modifying the JSON output by putting special characters in
@@ -316,19 +326,22 @@ static UniValue getaddednodeinfo(const JSONRPCRequest& request)
{"node", RPCArg::Type::STR, /* default */ "all nodes", "If provided, return information about this specific node, otherwise all nodes are returned."},
},
RPCResult{
- "[\n"
- " {\n"
- " \"addednode\" : \"192.168.0.201\", (string) The node IP address or name (as provided to addnode)\n"
- " \"connected\" : true|false, (boolean) If connected\n"
- " \"addresses\" : [ (list of objects) Only when connected = true\n"
- " {\n"
- " \"address\" : \"192.168.0.201:8333\", (string) The bitcoin server IP and port we're connected to\n"
- " \"connected\" : \"outbound\" (string) connection, inbound or outbound\n"
- " }\n"
- " ]\n"
- " }\n"
- " ,...\n"
- "]\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "addednode", "The node IP address or name (as provided to addnode)"},
+ {RPCResult::Type::BOOL, "connected", "If connected"},
+ {RPCResult::Type::ARR, "addresses", "Only when connected = true",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "address", "The bitcoin server IP and port we're connected to"},
+ {RPCResult::Type::STR, "connected", "connection, inbound or outbound"},
+ }},
+ }},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("getaddednodeinfo", "\"192.168.0.201\"")
@@ -382,20 +395,21 @@ static UniValue getnettotals(const JSONRPCRequest& request)
"and current time.\n",
{},
RPCResult{
- "{\n"
- " \"totalbytesrecv\": n, (numeric) Total bytes received\n"
- " \"totalbytessent\": n, (numeric) Total bytes sent\n"
- " \"timemillis\": t, (numeric) Current UNIX time in milliseconds\n"
- " \"uploadtarget\":\n"
- " {\n"
- " \"timeframe\": n, (numeric) Length of the measuring timeframe in seconds\n"
- " \"target\": n, (numeric) Target in bytes\n"
- " \"target_reached\": true|false, (boolean) True if target is reached\n"
- " \"serve_historical_blocks\": true|false, (boolean) True if serving historical blocks\n"
- " \"bytes_left_in_cycle\": t, (numeric) Bytes left in current time cycle\n"
- " \"time_left_in_cycle\": t (numeric) Seconds left in current time cycle\n"
- " }\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM, "totalbytesrecv", "Total bytes received"},
+ {RPCResult::Type::NUM, "totalbytessent", "Total bytes sent"},
+ {RPCResult::Type::NUM_TIME, "timemillis", "Current UNIX time in milliseconds"},
+ {RPCResult::Type::OBJ, "uploadtarget", "",
+ {
+ {RPCResult::Type::NUM, "timeframe", "Length of the measuring timeframe in seconds"},
+ {RPCResult::Type::NUM, "target", "Target in bytes"},
+ {RPCResult::Type::BOOL, "target_reached", "True if target is reached"},
+ {RPCResult::Type::BOOL, "serve_historical_blocks", "True if serving historical blocks"},
+ {RPCResult::Type::NUM, "bytes_left_in_cycle", "Bytes left in current time cycle"},
+ {RPCResult::Type::NUM, "time_left_in_cycle", "Seconds left in current time cycle"},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("getnettotals", "")
@@ -448,41 +462,44 @@ static UniValue getnetworkinfo(const JSONRPCRequest& request)
"Returns an object containing various state info regarding P2P networking.\n",
{},
RPCResult{
- "{\n"
- " \"version\": xxxxx, (numeric) the server version\n"
- " \"subversion\": \"/Satoshi:x.x.x/\", (string) the server subversion string\n"
- " \"protocolversion\": xxxxx, (numeric) the protocol version\n"
- " \"localservices\": \"xxxxxxxxxxxxxxxx\", (string) the services we offer to the network\n"
- " \"localservicesnames\": [ (array) the services we offer to the network, in human-readable form\n"
- " \"SERVICE_NAME\", (string) the service name\n"
- " ...\n"
- " ],\n"
- " \"localrelay\": true|false, (bool) true if transaction relay is requested from peers\n"
- " \"timeoffset\": xxxxx, (numeric) the time offset\n"
- " \"connections\": xxxxx, (numeric) the number of connections\n"
- " \"networkactive\": true|false, (bool) whether p2p networking is enabled\n"
- " \"networks\": [ (array) information per network\n"
- " {\n"
- " \"name\": \"xxx\", (string) network (ipv4, ipv6 or onion)\n"
- " \"limited\": true|false, (boolean) is the network limited using -onlynet?\n"
- " \"reachable\": true|false, (boolean) is the network reachable?\n"
- " \"proxy\": \"host:port\" (string) the proxy that is used for this network, or empty if none\n"
- " \"proxy_randomize_credentials\": true|false, (string) Whether randomized credentials are used\n"
- " }\n"
- " ,...\n"
- " ],\n"
- " \"relayfee\": x.xxxxxxxx, (numeric) minimum relay fee for transactions in " + CURRENCY_UNIT + "/kB\n"
- " \"incrementalfee\": x.xxxxxxxx, (numeric) minimum fee increment for mempool limiting or BIP 125 replacement in " + CURRENCY_UNIT + "/kB\n"
- " \"localaddresses\": [ (array) list of local addresses\n"
- " {\n"
- " \"address\": \"xxxx\", (string) network address\n"
- " \"port\": xxx, (numeric) network port\n"
- " \"score\": xxx (numeric) relative score\n"
- " }\n"
- " ,...\n"
- " ]\n"
- " \"warnings\": \"...\" (string) any network and blockchain warnings\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM, "version", "the server version"},
+ {RPCResult::Type::STR, "subversion", "the server subversion string"},
+ {RPCResult::Type::NUM, "protocolversion", "the protocol version"},
+ {RPCResult::Type::STR_HEX, "localservices", "the services we offer to the network"},
+ {RPCResult::Type::ARR, "localservicesnames", "the services we offer to the network, in human-readable form",
+ {
+ {RPCResult::Type::STR, "SERVICE_NAME", "the service name"},
+ }},
+ {RPCResult::Type::BOOL, "localrelay", "true if transaction relay is requested from peers"},
+ {RPCResult::Type::NUM, "timeoffset", "the time offset"},
+ {RPCResult::Type::NUM, "connections", "the number of connections"},
+ {RPCResult::Type::BOOL, "networkactive", "whether p2p networking is enabled"},
+ {RPCResult::Type::ARR, "networks", "information per network",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "name", "network (ipv4, ipv6 or onion)"},
+ {RPCResult::Type::BOOL, "limited", "is the network limited using -onlynet?"},
+ {RPCResult::Type::BOOL, "reachable", "is the network reachable?"},
+ {RPCResult::Type::STR, "proxy", "(\"host:port\") the proxy that is used for this network, or empty if none"},
+ {RPCResult::Type::BOOL, "proxy_randomize_credentials", "Whether randomized credentials are used"},
+ }},
+ }},
+ {RPCResult::Type::NUM, "relayfee", "minimum relay fee for transactions in " + CURRENCY_UNIT + "/kB"},
+ {RPCResult::Type::NUM, "incrementalfee", "minimum fee increment for mempool limiting or BIP 125 replacement in " + CURRENCY_UNIT + "/kB"},
+ {RPCResult::Type::ARR, "localaddresses", "list of local addresses",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "address", "network address"},
+ {RPCResult::Type::NUM, "port", "network port"},
+ {RPCResult::Type::NUM, "score", "relative score"},
+ }},
+ }},
+ {RPCResult::Type::STR, "warnings", "any network and blockchain warnings"},
+ }
},
RPCExamples{
HelpExampleCli("getnetworkinfo", "")
@@ -562,11 +579,11 @@ static UniValue setban(const JSONRPCRequest& request)
if (!isSubnet) {
CNetAddr resolved;
- LookupHost(request.params[0].get_str().c_str(), resolved, false);
+ LookupHost(request.params[0].get_str(), resolved, false);
netAddr = resolved;
}
else
- LookupSubNet(request.params[0].get_str().c_str(), subNet);
+ LookupSubNet(request.params[0].get_str(), subNet);
if (! (isSubnet ? subNet.IsValid() : netAddr.IsValid()) )
throw JSONRPCError(RPC_CLIENT_INVALID_IP_OR_SUBNET, "Error: Invalid IP/Subnet");
@@ -689,15 +706,16 @@ static UniValue getnodeaddresses(const JSONRPCRequest& request)
{"count", RPCArg::Type::NUM, /* default */ "1", "How many addresses to return. Limited to the smaller of " + std::to_string(ADDRMAN_GETADDR_MAX) + " or " + std::to_string(ADDRMAN_GETADDR_MAX_PCT) + "% of all known addresses."},
},
RPCResult{
- "[\n"
- " {\n"
- " \"time\": ttt, (numeric) The " + UNIX_EPOCH_TIME + " of when the node was last seen\n"
- " \"services\": n, (numeric) The services offered\n"
- " \"address\": \"host\", (string) The address of the node\n"
- " \"port\": n (numeric) The port of the node\n"
- " }\n"
- " ,....\n"
- "]\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM_TIME, "time", "The " + UNIX_EPOCH_TIME + " of when the node was last seen"},
+ {RPCResult::Type::NUM, "services", "The services offered"},
+ {RPCResult::Type::STR, "address", "The address of the node"},
+ {RPCResult::Type::NUM, "port", "The port of the node"},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("getnodeaddresses", "8")
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index 5be7acce1c..361cf08086 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -94,54 +94,62 @@ static UniValue getrawtransaction(const JSONRPCRequest& request)
},
{
RPCResult{"if verbose is not set or set to false",
- "\"data\" (string) The serialized, hex-encoded data for 'txid'\n"
+ RPCResult::Type::STR, "data", "The serialized, hex-encoded data for 'txid'"
},
RPCResult{"if verbose is set to true",
- "{\n"
- " \"in_active_chain\": b, (bool) Whether specified block is in the active chain or not (only present with explicit \"blockhash\" argument)\n"
- " \"hex\" : \"data\", (string) The serialized, hex-encoded data for 'txid'\n"
- " \"txid\" : \"id\", (string) The transaction id (same as provided)\n"
- " \"hash\" : \"id\", (string) The transaction hash (differs from txid for witness transactions)\n"
- " \"size\" : n, (numeric) The serialized transaction size\n"
- " \"vsize\" : n, (numeric) The virtual transaction size (differs from size for witness transactions)\n"
- " \"weight\" : n, (numeric) The transaction's weight (between vsize*4-3 and vsize*4)\n"
- " \"version\" : n, (numeric) The version\n"
- " \"locktime\" : ttt, (numeric) The lock time\n"
- " \"vin\" : [ (array of json objects)\n"
- " {\n"
- " \"txid\": \"id\", (string) The transaction id\n"
- " \"vout\": n, (numeric) \n"
- " \"scriptSig\": { (json object) The script\n"
- " \"asm\": \"asm\", (string) asm\n"
- " \"hex\": \"hex\" (string) hex\n"
- " },\n"
- " \"sequence\": n (numeric) The script sequence number\n"
- " \"txinwitness\": [\"hex\", ...] (array of string) hex-encoded witness data (if any)\n"
- " }\n"
- " ,...\n"
- " ],\n"
- " \"vout\" : [ (array of json objects)\n"
- " {\n"
- " \"value\" : x.xxx, (numeric) The value in " + CURRENCY_UNIT + "\n"
- " \"n\" : n, (numeric) index\n"
- " \"scriptPubKey\" : { (json object)\n"
- " \"asm\" : \"asm\", (string) the asm\n"
- " \"hex\" : \"hex\", (string) the hex\n"
- " \"reqSigs\" : n, (numeric) The required sigs\n"
- " \"type\" : \"pubkeyhash\", (string) The type, eg 'pubkeyhash'\n"
- " \"addresses\" : [ (json array of string)\n"
- " \"address\" (string) bitcoin address\n"
- " ,...\n"
- " ]\n"
- " }\n"
- " }\n"
- " ,...\n"
- " ],\n"
- " \"blockhash\" : \"hash\", (string) the block hash\n"
- " \"confirmations\" : n, (numeric) The confirmations\n"
- " \"blocktime\" : ttt (numeric) The block time expressed in " + UNIX_EPOCH_TIME + "\n"
- " \"time\" : ttt, (numeric) Same as \"blocktime\"\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::BOOL, "in_active_chain", "Whether specified block is in the active chain or not (only present with explicit \"blockhash\" argument)"},
+ {RPCResult::Type::STR_HEX, "hex", "The serialized, hex-encoded data for 'txid'"},
+ {RPCResult::Type::STR_HEX, "txid", "The transaction id (same as provided)"},
+ {RPCResult::Type::STR_HEX, "hash", "The transaction hash (differs from txid for witness transactions)"},
+ {RPCResult::Type::NUM, "size", "The serialized transaction size"},
+ {RPCResult::Type::NUM, "vsize", "The virtual transaction size (differs from size for witness transactions)"},
+ {RPCResult::Type::NUM, "weight", "The transaction's weight (between vsize*4-3 and vsize*4)"},
+ {RPCResult::Type::NUM, "version", "The version"},
+ {RPCResult::Type::NUM_TIME, "locktime", "The lock time"},
+ {RPCResult::Type::ARR, "vin", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "txid", "The transaction id"},
+ {RPCResult::Type::STR, "vout", ""},
+ {RPCResult::Type::OBJ, "scriptSig", "The script",
+ {
+ {RPCResult::Type::STR, "asm", "asm"},
+ {RPCResult::Type::STR_HEX, "hex", "hex"},
+ }},
+ {RPCResult::Type::NUM, "sequence", "The script sequence number"},
+ {RPCResult::Type::ARR, "txinwitness", "",
+ {
+ {RPCResult::Type::STR_HEX, "hex", "hex-encoded witness data (if any)"},
+ }},
+ }},
+ }},
+ {RPCResult::Type::ARR, "vout", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM, "value", "The value in " + CURRENCY_UNIT},
+ {RPCResult::Type::NUM, "n", "index"},
+ {RPCResult::Type::OBJ, "scriptPubKey", "",
+ {
+ {RPCResult::Type::STR, "asm", "the asm"},
+ {RPCResult::Type::STR, "hex", "the hex"},
+ {RPCResult::Type::NUM, "reqSigs", "The required sigs"},
+ {RPCResult::Type::STR, "type", "The type, eg 'pubkeyhash'"},
+ {RPCResult::Type::ARR, "addresses", "",
+ {
+ {RPCResult::Type::STR, "address", "bitcoin address"},
+ }},
+ }},
+ }},
+ }},
+ {RPCResult::Type::STR_HEX, "blockhash", "the block hash"},
+ {RPCResult::Type::NUM, "confirmations", "The confirmations"},
+ {RPCResult::Type::NUM_TIME, "blocktime", "The block time expressed in " + UNIX_EPOCH_TIME},
+ {RPCResult::Type::NUM, "time", "Same as \"blocktime\""},
+ }
},
},
RPCExamples{
@@ -230,7 +238,7 @@ static UniValue gettxoutproof(const JSONRPCRequest& request)
{"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED_NAMED_ARG, "If specified, looks for txid in the block with this hash"},
},
RPCResult{
- "\"data\" (string) A string that is a serialized, hex-encoded data for the proof.\n"
+ RPCResult::Type::STR, "data", "A string that is a serialized, hex-encoded data for the proof."
},
RPCExamples{""},
}.Check(request);
@@ -315,7 +323,10 @@ static UniValue verifytxoutproof(const JSONRPCRequest& request)
{"proof", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The hex-encoded proof generated by gettxoutproof"},
},
RPCResult{
- "[\"txid\"] (array, strings) The txid(s) which the proof commits to, or empty array if the proof can not be validated.\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "txid", "The txid(s) which the proof commits to, or empty array if the proof can not be validated."},
+ }
},
RPCExamples{""},
}.Check(request);
@@ -390,7 +401,7 @@ static UniValue createrawtransaction(const JSONRPCRequest& request)
" Allows this transaction to be replaced by a transaction with higher fees. If provided, it is an error if explicit sequence numbers are incompatible."},
},
RPCResult{
- "\"transaction\" (string) hex string of the transaction\n"
+ RPCResult::Type::STR_HEX, "transaction", "hex string of the transaction"
},
RPCExamples{
HelpExampleCli("createrawtransaction", "\"[{\\\"txid\\\":\\\"myid\\\",\\\"vout\\\":0}]\" \"[{\\\"address\\\":0.01}]\"")
@@ -432,45 +443,53 @@ static UniValue decoderawtransaction(const JSONRPCRequest& request)
},
},
RPCResult{
- "{\n"
- " \"txid\" : \"id\", (string) The transaction id\n"
- " \"hash\" : \"id\", (string) The transaction hash (differs from txid for witness transactions)\n"
- " \"size\" : n, (numeric) The transaction size\n"
- " \"vsize\" : n, (numeric) The virtual transaction size (differs from size for witness transactions)\n"
- " \"weight\" : n, (numeric) The transaction's weight (between vsize*4 - 3 and vsize*4)\n"
- " \"version\" : n, (numeric) The version\n"
- " \"locktime\" : ttt, (numeric) The lock time\n"
- " \"vin\" : [ (array of json objects)\n"
- " {\n"
- " \"txid\": \"id\", (string) The transaction id\n"
- " \"vout\": n, (numeric) The output number\n"
- " \"scriptSig\": { (json object) The script\n"
- " \"asm\": \"asm\", (string) asm\n"
- " \"hex\": \"hex\" (string) hex\n"
- " },\n"
- " \"txinwitness\": [\"hex\", ...] (array of string) hex-encoded witness data (if any)\n"
- " \"sequence\": n (numeric) The script sequence number\n"
- " }\n"
- " ,...\n"
- " ],\n"
- " \"vout\" : [ (array of json objects)\n"
- " {\n"
- " \"value\" : x.xxx, (numeric) The value in " + CURRENCY_UNIT + "\n"
- " \"n\" : n, (numeric) index\n"
- " \"scriptPubKey\" : { (json object)\n"
- " \"asm\" : \"asm\", (string) the asm\n"
- " \"hex\" : \"hex\", (string) the hex\n"
- " \"reqSigs\" : n, (numeric) The required sigs\n"
- " \"type\" : \"pubkeyhash\", (string) The type, eg 'pubkeyhash'\n"
- " \"addresses\" : [ (json array of string)\n"
- " \"12tvKAXCxZjSmdNbao16dKXC8tRWfcF5oc\" (string) bitcoin address\n"
- " ,...\n"
- " ]\n"
- " }\n"
- " }\n"
- " ,...\n"
- " ],\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "txid", "The transaction id"},
+ {RPCResult::Type::STR_HEX, "hash", "The transaction hash (differs from txid for witness transactions)"},
+ {RPCResult::Type::NUM, "size", "The transaction size"},
+ {RPCResult::Type::NUM, "vsize", "The virtual transaction size (differs from size for witness transactions)"},
+ {RPCResult::Type::NUM, "weight", "The transaction's weight (between vsize*4 - 3 and vsize*4)"},
+ {RPCResult::Type::NUM, "version", "The version"},
+ {RPCResult::Type::NUM_TIME, "locktime", "The lock time"},
+ {RPCResult::Type::ARR, "vin", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "txid", "The transaction id"},
+ {RPCResult::Type::NUM, "vout", "The output number"},
+ {RPCResult::Type::OBJ, "scriptSig", "The script",
+ {
+ {RPCResult::Type::STR, "asm", "asm"},
+ {RPCResult::Type::STR_HEX, "hex", "hex"},
+ }},
+ {RPCResult::Type::ARR, "txinwitness", "",
+ {
+ {RPCResult::Type::STR_HEX, "hex", "hex-encoded witness data (if any)"},
+ }},
+ {RPCResult::Type::NUM, "sequence", "The script sequence number"},
+ }},
+ }},
+ {RPCResult::Type::ARR, "vout", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM, "value", "The value in " + CURRENCY_UNIT},
+ {RPCResult::Type::NUM, "n", "index"},
+ {RPCResult::Type::OBJ, "scriptPubKey", "",
+ {
+ {RPCResult::Type::STR, "asm", "the asm"},
+ {RPCResult::Type::STR_HEX, "hex", "the hex"},
+ {RPCResult::Type::NUM, "reqSigs", "The required sigs"},
+ {RPCResult::Type::STR, "type", "The type, eg 'pubkeyhash'"},
+ {RPCResult::Type::ARR, "addresses", "",
+ {
+ {RPCResult::Type::STR, "address", "bitcoin address"},
+ }},
+ }},
+ }},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("decoderawtransaction", "\"hexstring\"")
@@ -513,26 +532,29 @@ static UniValue decodescript(const JSONRPCRequest& request)
{"hexstring", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "the hex-encoded script"},
},
RPCResult{
- "{\n"
- " \"asm\":\"asm\", (string) Script public key\n"
- " \"type\":\"type\", (string) The output type (e.g. "+GetAllOutputTypes()+")\n"
- " \"reqSigs\": n, (numeric) The required signatures\n"
- " \"addresses\": [ (json array of string)\n"
- " \"address\" (string) bitcoin address\n"
- " ,...\n"
- " ],\n"
- " \"p2sh\":\"str\" (string) address of P2SH script wrapping this redeem script (not returned if the script is already a P2SH).\n"
- " \"segwit\": { (json object) Result of a witness script public key wrapping this redeem script (not returned if the script is a P2SH or witness).\n"
- " \"asm\":\"str\", (string) String representation of the script public key\n"
- " \"hex\":\"hexstr\", (string) Hex string of the script public key\n"
- " \"type\":\"str\", (string) The type of the script public key (e.g. witness_v0_keyhash or witness_v0_scripthash)\n"
- " \"reqSigs\": n, (numeric) The required signatures (always 1)\n"
- " \"addresses\": [ (json array of string) (always length 1)\n"
- " \"address\" (string) segwit address\n"
- " ,...\n"
- " ],\n"
- " \"p2sh-segwit\":\"str\" (string) address of the P2SH script wrapping this witness redeem script.\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "asm", "Script public key"},
+ {RPCResult::Type::STR, "type", "The output type (e.g. "+GetAllOutputTypes()+")"},
+ {RPCResult::Type::NUM, "reqSigs", "The required signatures"},
+ {RPCResult::Type::ARR, "addresses", "",
+ {
+ {RPCResult::Type::STR, "address", "bitcoin address"},
+ }},
+ {RPCResult::Type::STR, "p2sh", "address of P2SH script wrapping this redeem script (not returned if the script is already a P2SH)"},
+ {RPCResult::Type::OBJ, "segwit", "Result of a witness script public key wrapping this redeem script (not returned if the script is a P2SH or witness)",
+ {
+ {RPCResult::Type::STR, "asm", "String representation of the script public key"},
+ {RPCResult::Type::STR_HEX, "hex", "Hex string of the script public key"},
+ {RPCResult::Type::STR, "type", "The type of the script public key (e.g. witness_v0_keyhash or witness_v0_scripthash)"},
+ {RPCResult::Type::NUM, "reqSigs", "The required signatures (always 1)"},
+ {RPCResult::Type::ARR, "addresses", "(always length 1)",
+ {
+ {RPCResult::Type::STR, "address", "segwit address"},
+ }},
+ {RPCResult::Type::STR, "p2sh-segwit", "address of the P2SH script wrapping this witness redeem script"},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("decodescript", "\"hexstring\"")
@@ -607,7 +629,7 @@ static UniValue combinerawtransaction(const JSONRPCRequest& request)
},
},
RPCResult{
- "\"hex\" (string) The hex-encoded raw transaction with signature(s)\n"
+ RPCResult::Type::STR, "", "The hex-encoded raw transaction with signature(s)"
},
RPCExamples{
HelpExampleCli("combinerawtransaction", R"('["myhex1", "myhex2", "myhex3"]')")
@@ -715,20 +737,22 @@ static UniValue signrawtransactionwithkey(const JSONRPCRequest& request)
},
},
RPCResult{
- "{\n"
- " \"hex\" : \"value\", (string) The hex-encoded raw transaction with signature(s)\n"
- " \"complete\" : true|false, (boolean) If the transaction has a complete set of signatures\n"
- " \"errors\" : [ (json array of objects) Script verification errors (if there are any)\n"
- " {\n"
- " \"txid\" : \"hash\", (string) The hash of the referenced, previous transaction\n"
- " \"vout\" : n, (numeric) The index of the output to spent and used as input\n"
- " \"scriptSig\" : \"hex\", (string) The hex-encoded signature script\n"
- " \"sequence\" : n, (numeric) Script sequence number\n"
- " \"error\" : \"text\" (string) Verification or signing error related to the input\n"
- " }\n"
- " ,...\n"
- " ]\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "hex", "The hex-encoded raw transaction with signature(s)"},
+ {RPCResult::Type::BOOL, "complete", "If the transaction has a complete set of signatures"},
+ {RPCResult::Type::ARR, "errors", "Script verification errors (if there are any)",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "txid", "The hash of the referenced, previous transaction"},
+ {RPCResult::Type::NUM, "vout", "The index of the output to spent and used as input"},
+ {RPCResult::Type::STR_HEX, "scriptSig", "The hex-encoded signature script"},
+ {RPCResult::Type::NUM, "sequence", "Script sequence number"},
+ {RPCResult::Type::STR, "error", "Verification or signing error related to the input"},
+ }},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("signrawtransactionwithkey", "\"myhex\" \"[\\\"key1\\\",\\\"key2\\\"]\"")
@@ -784,7 +808,7 @@ static UniValue sendrawtransaction(const JSONRPCRequest& request)
"/kB.\nSet to 0 to accept any fee rate.\n"},
},
RPCResult{
- "\"hex\" (string) The transaction hash in hex\n"
+ RPCResult::Type::STR_HEX, "", "The transaction hash in hex"
},
RPCExamples{
"\nCreate a transaction\n"
@@ -846,14 +870,16 @@ static UniValue testmempoolaccept(const JSONRPCRequest& request)
{"maxfeerate", RPCArg::Type::AMOUNT, /* default */ FormatMoney(DEFAULT_MAX_RAW_TX_FEE_RATE.GetFeePerK()), "Reject transactions whose fee rate is higher than the specified value, expressed in " + CURRENCY_UNIT + "/kB\n"},
},
RPCResult{
- "[ (array) The result of the mempool acceptance test for each raw transaction in the input array.\n"
- " Length is exactly one for now.\n"
- " {\n"
- " \"txid\" (string) The transaction hash in hex\n"
- " \"allowed\" (boolean) If the mempool allows this tx to be inserted\n"
- " \"reject-reason\" (string) Rejection string (only present when 'allowed' is false)\n"
- " }\n"
- "]\n"
+ RPCResult::Type::ARR, "", "The result of the mempool acceptance test for each raw transaction in the input array.\n"
+ "Length is exactly one for now.",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "txid", "The transaction hash in hex"},
+ {RPCResult::Type::BOOL, "allowed", "If the mempool allows this tx to be inserted"},
+ {RPCResult::Type::STR, "reject-reason", "Rejection string (only present when 'allowed' is false)"},
+ }},
+ }
},
RPCExamples{
"\nCreate a transaction\n"
@@ -950,92 +976,108 @@ UniValue decodepsbt(const JSONRPCRequest& request)
{"psbt", RPCArg::Type::STR, RPCArg::Optional::NO, "The PSBT base64 string"},
},
RPCResult{
- "{\n"
- " \"tx\" : { (json object) The decoded network-serialized unsigned transaction.\n"
- " ... The layout is the same as the output of decoderawtransaction.\n"
- " },\n"
- " \"unknown\" : { (json object) The unknown global fields\n"
- " \"key\" : \"value\" (key-value pair) An unknown key-value pair\n"
- " ...\n"
- " },\n"
- " \"inputs\" : [ (array of json objects)\n"
- " {\n"
- " \"non_witness_utxo\" : { (json object, optional) Decoded network transaction for non-witness UTXOs\n"
- " ...\n"
- " },\n"
- " \"witness_utxo\" : { (json object, optional) Transaction output for witness UTXOs\n"
- " \"amount\" : x.xxx, (numeric) The value in " + CURRENCY_UNIT + "\n"
- " \"scriptPubKey\" : { (json object)\n"
- " \"asm\" : \"asm\", (string) The asm\n"
- " \"hex\" : \"hex\", (string) The hex\n"
- " \"type\" : \"pubkeyhash\", (string) The type, eg 'pubkeyhash'\n"
- " \"address\" : \"address\" (string) Bitcoin address if there is one\n"
- " }\n"
- " },\n"
- " \"partial_signatures\" : { (json object, optional)\n"
- " \"pubkey\" : \"signature\", (string) The public key and signature that corresponds to it.\n"
- " ,...\n"
- " }\n"
- " \"sighash\" : \"type\", (string, optional) The sighash type to be used\n"
- " \"redeem_script\" : { (json object, optional)\n"
- " \"asm\" : \"asm\", (string) The asm\n"
- " \"hex\" : \"hex\", (string) The hex\n"
- " \"type\" : \"pubkeyhash\", (string) The type, eg 'pubkeyhash'\n"
- " }\n"
- " \"witness_script\" : { (json object, optional)\n"
- " \"asm\" : \"asm\", (string) The asm\n"
- " \"hex\" : \"hex\", (string) The hex\n"
- " \"type\" : \"pubkeyhash\", (string) The type, eg 'pubkeyhash'\n"
- " }\n"
- " \"bip32_derivs\" : { (json object, optional)\n"
- " \"pubkey\" : { (json object, optional) The public key with the derivation path as the value.\n"
- " \"master_fingerprint\" : \"fingerprint\" (string) The fingerprint of the master key\n"
- " \"path\" : \"path\", (string) The path\n"
- " }\n"
- " ,...\n"
- " }\n"
- " \"final_scriptsig\" : { (json object, optional)\n"
- " \"asm\" : \"asm\", (string) The asm\n"
- " \"hex\" : \"hex\", (string) The hex\n"
- " }\n"
- " \"final_scriptwitness\": [\"hex\", ...] (array of string) hex-encoded witness data (if any)\n"
- " \"unknown\" : { (json object) The unknown global fields\n"
- " \"key\" : \"value\" (key-value pair) An unknown key-value pair\n"
- " ...\n"
- " },\n"
- " }\n"
- " ,...\n"
- " ]\n"
- " \"outputs\" : [ (array of json objects)\n"
- " {\n"
- " \"redeem_script\" : { (json object, optional)\n"
- " \"asm\" : \"asm\", (string) The asm\n"
- " \"hex\" : \"hex\", (string) The hex\n"
- " \"type\" : \"pubkeyhash\", (string) The type, eg 'pubkeyhash'\n"
- " }\n"
- " \"witness_script\" : { (json object, optional)\n"
- " \"asm\" : \"asm\", (string) The asm\n"
- " \"hex\" : \"hex\", (string) The hex\n"
- " \"type\" : \"pubkeyhash\", (string) The type, eg 'pubkeyhash'\n"
- " }\n"
- " \"bip32_derivs\" : [ (array of json objects, optional)\n"
- " {\n"
- " \"pubkey\" : \"pubkey\", (string) The public key this path corresponds to\n"
- " \"master_fingerprint\" : \"fingerprint\" (string) The fingerprint of the master key\n"
- " \"path\" : \"path\", (string) The path\n"
- " }\n"
- " }\n"
- " ,...\n"
- " ],\n"
- " \"unknown\" : { (json object) The unknown global fields\n"
- " \"key\" : \"value\" (key-value pair) An unknown key-value pair\n"
- " ...\n"
- " },\n"
- " }\n"
- " ,...\n"
- " ]\n"
- " \"fee\" : fee (numeric, optional) The transaction fee paid if all UTXOs slots in the PSBT have been filled.\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::OBJ, "tx", "The decoded network-serialized unsigned transaction.",
+ {
+ {RPCResult::Type::ELISION, "", "The layout is the same as the output of decoderawtransaction."},
+ }},
+ {RPCResult::Type::OBJ_DYN, "unknown", "The unknown global fields",
+ {
+ {RPCResult::Type::STR_HEX, "key", "(key-value pair) An unknown key-value pair"},
+ }},
+ {RPCResult::Type::ARR, "inputs", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::OBJ, "non_witness_utxo", /* optional */ true, "Decoded network transaction for non-witness UTXOs",
+ {
+ {RPCResult::Type::ELISION, "",""},
+ }},
+ {RPCResult::Type::OBJ, "witness_utxo", /* optional */ true, "Transaction output for witness UTXOs",
+ {
+ {RPCResult::Type::NUM, "amount", "The value in " + CURRENCY_UNIT},
+ {RPCResult::Type::OBJ, "scriptPubKey", "",
+ {
+ {RPCResult::Type::STR, "asm", "The asm"},
+ {RPCResult::Type::STR_HEX, "hex", "The hex"},
+ {RPCResult::Type::STR, "type", "The type, eg 'pubkeyhash'"},
+ {RPCResult::Type::STR, "address"," Bitcoin address if there is one"},
+ }},
+ }},
+ {RPCResult::Type::OBJ_DYN, "partial_signatures", /* optional */ true, "",
+ {
+ {RPCResult::Type::STR, "pubkey", "The public key and signature that corresponds to it."},
+ }},
+ {RPCResult::Type::STR, "sighash", /* optional */ true, "The sighash type to be used"},
+ {RPCResult::Type::OBJ, "redeem_script", /* optional */ true, "",
+ {
+ {RPCResult::Type::STR, "asm", "The asm"},
+ {RPCResult::Type::STR_HEX, "hex", "The hex"},
+ {RPCResult::Type::STR, "type", "The type, eg 'pubkeyhash'"},
+ }},
+ {RPCResult::Type::OBJ, "witness_script", /* optional */ true, "",
+ {
+ {RPCResult::Type::STR, "asm", "The asm"},
+ {RPCResult::Type::STR_HEX, "hex", "The hex"},
+ {RPCResult::Type::STR, "type", "The type, eg 'pubkeyhash'"},
+ }},
+ {RPCResult::Type::ARR, "bip32_derivs", /* optional */ true, "",
+ {
+ {RPCResult::Type::OBJ, "pubkey", /* optional */ true, "The public key with the derivation path as the value.",
+ {
+ {RPCResult::Type::STR, "master_fingerprint", "The fingerprint of the master key"},
+ {RPCResult::Type::STR, "path", "The path"},
+ }},
+ }},
+ {RPCResult::Type::OBJ, "final_scriptsig", /* optional */ true, "",
+ {
+ {RPCResult::Type::STR, "asm", "The asm"},
+ {RPCResult::Type::STR, "hex", "The hex"},
+ }},
+ {RPCResult::Type::ARR, "final_scriptwitness", "",
+ {
+ {RPCResult::Type::STR_HEX, "", "hex-encoded witness data (if any)"},
+ }},
+ {RPCResult::Type::OBJ_DYN, "unknown", "The unknown global fields",
+ {
+ {RPCResult::Type::STR_HEX, "key", "(key-value pair) An unknown key-value pair"},
+ }},
+ }},
+ }},
+ {RPCResult::Type::ARR, "outputs", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::OBJ, "redeem_script", /* optional */ true, "",
+ {
+ {RPCResult::Type::STR, "asm", "The asm"},
+ {RPCResult::Type::STR_HEX, "hex", "The hex"},
+ {RPCResult::Type::STR, "type", "The type, eg 'pubkeyhash'"},
+ }},
+ {RPCResult::Type::OBJ, "witness_script", /* optional */ true, "",
+ {
+ {RPCResult::Type::STR, "asm", "The asm"},
+ {RPCResult::Type::STR_HEX, "hex", "The hex"},
+ {RPCResult::Type::STR, "type", "The type, eg 'pubkeyhash'"},
+ }},
+ {RPCResult::Type::ARR, "bip32_derivs", /* optional */ true, "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "pubkey", "The public key this path corresponds to"},
+ {RPCResult::Type::STR, "master_fingerprint", "The fingerprint of the master key"},
+ {RPCResult::Type::STR, "path", "The path"},
+ }},
+ }},
+ {RPCResult::Type::OBJ_DYN, "unknown", "The unknown global fields",
+ {
+ {RPCResult::Type::STR_HEX, "key", "(key-value pair) An unknown key-value pair"},
+ }},
+ }},
+ }},
+ {RPCResult::Type::STR_AMOUNT, "fee", /* optional */ true, "The transaction fee paid if all UTXOs slots in the PSBT have been filled."},
+ }
},
RPCExamples{
HelpExampleCli("decodepsbt", "\"psbt\"")
@@ -1079,7 +1121,12 @@ UniValue decodepsbt(const JSONRPCRequest& request)
UniValue out(UniValue::VOBJ);
out.pushKV("amount", ValueFromAmount(txout.nValue));
- total_in += txout.nValue;
+ if (MoneyRange(txout.nValue) && MoneyRange(total_in + txout.nValue)) {
+ total_in += txout.nValue;
+ } else {
+ // Hack to just not show fee later
+ have_all_utxos = false;
+ }
UniValue o(UniValue::VOBJ);
ScriptToUniv(txout.scriptPubKey, o, true);
@@ -1089,7 +1136,13 @@ UniValue decodepsbt(const JSONRPCRequest& request)
UniValue non_wit(UniValue::VOBJ);
TxToUniv(*input.non_witness_utxo, uint256(), non_wit, false);
in.pushKV("non_witness_utxo", non_wit);
- total_in += input.non_witness_utxo->vout[psbtx.tx->vin[i].prevout.n].nValue;
+ CAmount utxo_val = input.non_witness_utxo->vout[psbtx.tx->vin[i].prevout.n].nValue;
+ if (MoneyRange(utxo_val) && MoneyRange(total_in + utxo_val)) {
+ total_in += utxo_val;
+ } else {
+ // Hack to just not show fee later
+ have_all_utxos = false;
+ }
} else {
have_all_utxos = false;
}
@@ -1205,7 +1258,12 @@ UniValue decodepsbt(const JSONRPCRequest& request)
outputs.push_back(out);
// Fee calculation
- output_value += psbtx.tx->vout[i].nValue;
+ if (MoneyRange(psbtx.tx->vout[i].nValue) && MoneyRange(output_value + psbtx.tx->vout[i].nValue)) {
+ output_value += psbtx.tx->vout[i].nValue;
+ } else {
+ // Hack to just not show fee later
+ have_all_utxos = false;
+ }
}
result.pushKV("outputs", outputs);
if (have_all_utxos) {
@@ -1228,7 +1286,7 @@ UniValue combinepsbt(const JSONRPCRequest& request)
},
},
RPCResult{
- " \"psbt\" (string) The base64-encoded partially signed transaction\n"
+ RPCResult::Type::STR, "", "The base64-encoded partially signed transaction"
},
RPCExamples{
HelpExampleCli("combinepsbt", R"('["mybase64_1", "mybase64_2", "mybase64_3"]')")
@@ -1276,12 +1334,12 @@ UniValue finalizepsbt(const JSONRPCRequest& request)
" extract and return the complete transaction in normal network serialization instead of the PSBT."},
},
RPCResult{
- "{\n"
- " \"psbt\" : \"value\", (string) The base64-encoded partially signed transaction if not extracted\n"
- " \"hex\" : \"value\", (string) The hex-encoded network transaction if extracted\n"
- " \"complete\" : true|false, (boolean) If the transaction has a complete set of signatures\n"
- " ]\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "psbt", "The base64-encoded partially signed transaction if not extracted"},
+ {RPCResult::Type::STR_HEX, "hex", "The hex-encoded network transaction if extracted"},
+ {RPCResult::Type::BOOL, "complete", "If the transaction has a complete set of signatures"},
+ }
},
RPCExamples{
HelpExampleCli("finalizepsbt", "\"psbt\"")
@@ -1359,7 +1417,7 @@ UniValue createpsbt(const JSONRPCRequest& request)
" Allows this transaction to be replaced by a transaction with higher fees. If provided, it is an error if explicit sequence numbers are incompatible."},
},
RPCResult{
- " \"psbt\" (string) The resulting raw transaction (base64-encoded string)\n"
+ RPCResult::Type::STR, "", "The resulting raw transaction (base64-encoded string)"
},
RPCExamples{
HelpExampleCli("createpsbt", "\"[{\\\"txid\\\":\\\"myid\\\",\\\"vout\\\":0}]\" \"[{\\\"data\\\":\\\"00010203\\\"}]\"")
@@ -1416,7 +1474,7 @@ UniValue converttopsbt(const JSONRPCRequest& request)
},
},
RPCResult{
- " \"psbt\" (string) The resulting raw transaction (base64-encoded string)\n"
+ RPCResult::Type::STR, "", "The resulting raw transaction (base64-encoded string)"
},
RPCExamples{
"\nCreate a transaction\n"
@@ -1480,7 +1538,7 @@ UniValue utxoupdatepsbt(const JSONRPCRequest& request)
}},
},
RPCResult {
- " \"psbt\" (string) The base64-encoded partially signed transaction with inputs updated\n"
+ RPCResult::Type::STR, "", "The base64-encoded partially signed transaction with inputs updated"
},
RPCExamples {
HelpExampleCli("utxoupdatepsbt", "\"psbt\"")
@@ -1565,7 +1623,7 @@ UniValue joinpsbts(const JSONRPCRequest& request)
}}
},
RPCResult {
- " \"psbt\" (string) The base64-encoded partially signed transaction\n"
+ RPCResult::Type::STR, "", "The base64-encoded partially signed transaction"
},
RPCExamples {
HelpExampleCli("joinpsbts", "\"psbt\"")
@@ -1654,31 +1712,36 @@ UniValue analyzepsbt(const JSONRPCRequest& request)
{"psbt", RPCArg::Type::STR, RPCArg::Optional::NO, "A base64 string of a PSBT"}
},
RPCResult {
- "{\n"
- " \"inputs\" : [ (array of json objects)\n"
- " {\n"
- " \"has_utxo\" : true|false (boolean) Whether a UTXO is provided\n"
- " \"is_final\" : true|false (boolean) Whether the input is finalized\n"
- " \"missing\" : { (json object, optional) Things that are missing that are required to complete this input\n"
- " \"pubkeys\" : [ (array, optional)\n"
- " \"keyid\" (string) Public key ID, hash160 of the public key, of a public key whose BIP 32 derivation path is missing\n"
- " ]\n"
- " \"signatures\" : [ (array, optional)\n"
- " \"keyid\" (string) Public key ID, hash160 of the public key, of a public key whose signature is missing\n"
- " ]\n"
- " \"redeemscript\" : \"hash\" (string, optional) Hash160 of the redeemScript that is missing\n"
- " \"witnessscript\" : \"hash\" (string, optional) SHA256 of the witnessScript that is missing\n"
- " }\n"
- " \"next\" : \"role\" (string, optional) Role of the next person that this input needs to go to\n"
- " }\n"
- " ,...\n"
- " ]\n"
- " \"estimated_vsize\" : vsize (numeric, optional) Estimated vsize of the final signed transaction\n"
- " \"estimated_feerate\" : feerate (numeric, optional) Estimated feerate of the final signed transaction in " + CURRENCY_UNIT + "/kB. Shown only if all UTXO slots in the PSBT have been filled.\n"
- " \"fee\" : fee (numeric, optional) The transaction fee paid. Shown only if all UTXO slots in the PSBT have been filled.\n"
- " \"next\" : \"role\" (string) Role of the next person that this psbt needs to go to\n"
- " \"error\" : \"error\" (string) Error message if there is one\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::ARR, "inputs", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::BOOL, "has_utxo", "Whether a UTXO is provided"},
+ {RPCResult::Type::BOOL, "is_final", "Whether the input is finalized"},
+ {RPCResult::Type::OBJ, "missing", /* optional */ true, "Things that are missing that are required to complete this input",
+ {
+ {RPCResult::Type::ARR, "pubkeys", /* optional */ true, "",
+ {
+ {RPCResult::Type::STR_HEX, "keyid", "Public key ID, hash160 of the public key, of a public key whose BIP 32 derivation path is missing"},
+ }},
+ {RPCResult::Type::ARR, "signatures", /* optional */ true, "",
+ {
+ {RPCResult::Type::STR_HEX, "keyid", "Public key ID, hash160 of the public key, of a public key whose signature is missing"},
+ }},
+ {RPCResult::Type::STR_HEX, "redeemscript", /* optional */ true, "Hash160 of the redeemScript that is missing"},
+ {RPCResult::Type::STR_HEX, "witnessscript", /* optional */ true, "SHA256 of the witnessScript that is missing"},
+ }},
+ {RPCResult::Type::STR, "next", /* optional */ true, "Role of the next person that this input needs to go to"},
+ }},
+ }},
+ {RPCResult::Type::NUM, "estimated_vsize", /* optional */ true, "Estimated vsize of the final signed transaction"},
+ {RPCResult::Type::STR_AMOUNT, "estimated_feerate", /* optional */ true, "Estimated feerate of the final signed transaction in " + CURRENCY_UNIT + "/kB. Shown only if all UTXO slots in the PSBT have been filled"},
+ {RPCResult::Type::STR_AMOUNT, "fee", /* optional */ true, "The transaction fee paid. Shown only if all UTXO slots in the PSBT have been filled"},
+ {RPCResult::Type::STR, "next", "Role of the next person that this psbt needs to go to"},
+ {RPCResult::Type::STR, "error", "Error message if there is one"},
+ }
},
RPCExamples {
HelpExampleCli("analyzepsbt", "\"psbt\"")
diff --git a/src/rpc/rawtransaction_util.cpp b/src/rpc/rawtransaction_util.cpp
index 40334883c5..54baec6c6f 100644
--- a/src/rpc/rawtransaction_util.cpp
+++ b/src/rpc/rawtransaction_util.cpp
@@ -272,55 +272,27 @@ void SignTransaction(CMutableTransaction& mtx, const SigningProvider* keystore,
{
int nHashType = ParseSighashString(hashType);
- bool fHashSingle = ((nHashType & ~SIGHASH_ANYONECANPAY) == SIGHASH_SINGLE);
-
// Script verification errors
- UniValue vErrors(UniValue::VARR);
-
- // Use CTransaction for the constant parts of the
- // transaction to avoid rehashing.
- const CTransaction txConst(mtx);
- // Sign what we can:
- for (unsigned int i = 0; i < mtx.vin.size(); i++) {
- CTxIn& txin = mtx.vin[i];
- auto coin = coins.find(txin.prevout);
- if (coin == coins.end() || coin->second.IsSpent()) {
- TxInErrorToJSON(txin, vErrors, "Input not found or already spent");
- continue;
- }
- const CScript& prevPubKey = coin->second.out.scriptPubKey;
- const CAmount& amount = coin->second.out.nValue;
-
- SignatureData sigdata = DataFromTransaction(mtx, i, coin->second.out);
- // Only sign SIGHASH_SINGLE if there's a corresponding output:
- if (!fHashSingle || (i < mtx.vout.size())) {
- ProduceSignature(*keystore, MutableTransactionSignatureCreator(&mtx, i, amount, nHashType), prevPubKey, sigdata);
- }
-
- UpdateInput(txin, sigdata);
+ std::map<int, std::string> input_errors;
- // amount must be specified for valid segwit signature
- if (amount == MAX_MONEY && !txin.scriptWitness.IsNull()) {
- throw JSONRPCError(RPC_TYPE_ERROR, strprintf("Missing amount for %s", coin->second.out.ToString()));
- }
+ bool complete = SignTransaction(mtx, keystore, coins, nHashType, input_errors);
+ SignTransactionResultToJSON(mtx, complete, coins, input_errors, result);
+}
- ScriptError serror = SCRIPT_ERR_OK;
- if (!VerifyScript(txin.scriptSig, prevPubKey, &txin.scriptWitness, STANDARD_SCRIPT_VERIFY_FLAGS, TransactionSignatureChecker(&txConst, i, amount), &serror)) {
- if (serror == SCRIPT_ERR_INVALID_STACK_OPERATION) {
- // Unable to sign input and verification failed (possible attempt to partially sign).
- TxInErrorToJSON(txin, vErrors, "Unable to sign input, invalid stack size (possibly missing key)");
- } else if (serror == SCRIPT_ERR_SIG_NULLFAIL) {
- // Verification failed (possibly due to insufficient signatures).
- TxInErrorToJSON(txin, vErrors, "CHECK(MULTI)SIG failing with non-zero signature (possibly need more signatures)");
- } else {
- TxInErrorToJSON(txin, vErrors, ScriptErrorString(serror));
- }
+void SignTransactionResultToJSON(CMutableTransaction& mtx, bool complete, const std::map<COutPoint, Coin>& coins, std::map<int, std::string>& input_errors, UniValue& result)
+{
+ // Make errors UniValue
+ UniValue vErrors(UniValue::VARR);
+ for (const auto& err_pair : input_errors) {
+ if (err_pair.second == "Missing amount") {
+ // This particular error needs to be an exception for some reason
+ throw JSONRPCError(RPC_TYPE_ERROR, strprintf("Missing amount for %s", coins.at(mtx.vin.at(err_pair.first).prevout).out.ToString()));
}
+ TxInErrorToJSON(mtx.vin.at(err_pair.first), vErrors, err_pair.second);
}
- bool fComplete = vErrors.empty();
result.pushKV("hex", EncodeHexTx(CTransaction(mtx)));
- result.pushKV("complete", fComplete);
+ result.pushKV("complete", complete);
if (!vErrors.empty()) {
if (result.exists("errors")) {
vErrors.push_backV(result["errors"].getValues());
diff --git a/src/rpc/rawtransaction_util.h b/src/rpc/rawtransaction_util.h
index e70446d7f1..436db5dc60 100644
--- a/src/rpc/rawtransaction_util.h
+++ b/src/rpc/rawtransaction_util.h
@@ -6,6 +6,7 @@
#define BITCOIN_RPC_RAWTRANSACTION_UTIL_H
#include <map>
+#include <string>
class FillableSigningProvider;
class UniValue;
@@ -24,11 +25,12 @@ class SigningProvider;
* @param result JSON object where signed transaction results accumulate
*/
void SignTransaction(CMutableTransaction& mtx, const SigningProvider* keystore, const std::map<COutPoint, Coin>& coins, const UniValue& hashType, UniValue& result);
+void SignTransactionResultToJSON(CMutableTransaction& mtx, bool complete, const std::map<COutPoint, Coin>& coins, std::map<int, std::string>& input_errors, UniValue& result);
/**
* Parse a prevtxs UniValue array and get the map of coins from it
*
- * @param prevTxs Array of previous txns outputs that tx depends on but may not yet be in the block chain
+ * @param prevTxsUnival Array of previous txns outputs that tx depends on but may not yet be in the block chain
* @param keystore A pointer to the temporary keystore if there is one
* @param coins Map of unspent outputs - coins in mempool and current chain UTXO set, may be extended by previous txns outputs after call
*/
diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp
index 91d3e1fca4..2893aa0e60 100644
--- a/src/rpc/server.cpp
+++ b/src/rpc/server.cpp
@@ -1,5 +1,5 @@
// Copyright (c) 2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -18,7 +18,7 @@
#include <memory> // for unique_ptr
#include <unordered_map>
-static CCriticalSection cs_rpcWarmup;
+static RecursiveMutex cs_rpcWarmup;
static std::atomic<bool> g_rpc_running{false};
static bool fRPCInWarmup GUARDED_BY(cs_rpcWarmup) = true;
static std::string rpcWarmupStatus GUARDED_BY(cs_rpcWarmup) = "RPC server started";
@@ -137,7 +137,7 @@ UniValue help(const JSONRPCRequest& jsonRequest)
{"command", RPCArg::Type::STR, /* default */ "all commands", "The command to get help on"},
},
RPCResult{
- "\"text\" (string) The help text\n"
+ RPCResult::Type::STR, "", "The help text"
},
RPCExamples{""},
}.ToString()
@@ -169,7 +169,7 @@ UniValue stop(const JSONRPCRequest& jsonRequest)
// this reply will get back to the client.
StartShutdown();
if (jsonRequest.params[0].isNum()) {
- MilliSleep(jsonRequest.params[0].get_int());
+ UninterruptibleSleep(std::chrono::milliseconds{jsonRequest.params[0].get_int()});
}
return PACKAGE_NAME " stopping";
}
@@ -180,7 +180,7 @@ static UniValue uptime(const JSONRPCRequest& jsonRequest)
"\nReturns the total uptime of the server.\n",
{},
RPCResult{
- "ttt (numeric) The number of seconds that the server has been running\n"
+ RPCResult::Type::NUM, "", "The number of seconds that the server has been running"
},
RPCExamples{
HelpExampleCli("uptime", "")
@@ -197,16 +197,18 @@ static UniValue getrpcinfo(const JSONRPCRequest& request)
"\nReturns details of the RPC server.\n",
{},
RPCResult{
- "{\n"
- " \"active_commands\" (array) All active commands\n"
- " [\n"
- " { (object) Information about an active command\n"
- " \"method\" (string) The name of the RPC command \n"
- " \"duration\" (numeric) The running time in microseconds\n"
- " },...\n"
- " ],\n"
- " \"logpath\": \"xxx\" (string) The complete file path to the debug log\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::ARR, "active_commands", "All active commands",
+ {
+ {RPCResult::Type::OBJ, "", "Information about an active command",
+ {
+ {RPCResult::Type::STR, "method", "The name of the RPC command"},
+ {RPCResult::Type::NUM, "duration", "The running time in microseconds"},
+ }},
+ }},
+ {RPCResult::Type::STR, "logpath", "The complete file path to the debug log"},
+ }
},
RPCExamples{
HelpExampleCli("getrpcinfo", "")
diff --git a/src/rpc/util.cpp b/src/rpc/util.cpp
index 78586c22f9..a4e67c8da8 100644
--- a/src/rpc/util.cpp
+++ b/src/rpc/util.cpp
@@ -14,6 +14,7 @@
#include <tuple>
const std::string UNIX_EPOCH_TIME = "UNIX epoch time";
+const std::string EXAMPLE_ADDRESS = "\"bc1q09vm5lfy0j5reeulh4x5752q25uqqvz34hufdl\"";
void RPCTypeCheck(const UniValue& params,
const std::list<UniValueType>& typesExpected,
@@ -311,20 +312,9 @@ struct Sections {
}
/**
- * Serializing RPCArgs depends on the outer type. Only arrays and
- * dictionaries can be nested in json. The top-level outer type is "named
- * arguments", a mix between a dictionary and arrays.
- */
- enum class OuterType {
- ARR,
- OBJ,
- NAMED_ARG, // Only set on first recursion
- };
-
- /**
* Recursive helper to translate an RPCArg into sections
*/
- void Push(const RPCArg& arg, const size_t current_indent = 5, const OuterType outer_type = OuterType::NAMED_ARG)
+ void Push(const RPCArg& arg, const size_t current_indent = 5, const OuterType outer_type = OuterType::NONE)
{
const auto indent = std::string(current_indent, ' ');
const auto indent_next = std::string(current_indent + 2, ' ');
@@ -337,7 +327,7 @@ struct Sections {
case RPCArg::Type::AMOUNT:
case RPCArg::Type::RANGE:
case RPCArg::Type::BOOL: {
- if (outer_type == OuterType::NAMED_ARG) return; // Nothing more to do for non-recursive types on first recursion
+ if (outer_type == OuterType::NONE) return; // Nothing more to do for non-recursive types on first recursion
auto left = indent;
if (arg.m_type_str.size() != 0 && push_name) {
left += "\"" + arg.m_name + "\": " + arg.m_type_str.at(0);
@@ -350,7 +340,7 @@ struct Sections {
}
case RPCArg::Type::OBJ:
case RPCArg::Type::OBJ_USER_KEYS: {
- const auto right = outer_type == OuterType::NAMED_ARG ? "" : arg.ToDescriptionString();
+ const auto right = outer_type == OuterType::NONE ? "" : arg.ToDescriptionString();
PushSection({indent + (push_name ? "\"" + arg.m_name + "\": " : "") + "{", right});
for (const auto& arg_inner : arg.m_inner) {
Push(arg_inner, current_indent + 2, OuterType::OBJ);
@@ -358,20 +348,20 @@ struct Sections {
if (arg.m_type != RPCArg::Type::OBJ) {
PushSection({indent_next + "...", ""});
}
- PushSection({indent + "}" + (outer_type != OuterType::NAMED_ARG ? "," : ""), ""});
+ PushSection({indent + "}" + (outer_type != OuterType::NONE ? "," : ""), ""});
break;
}
case RPCArg::Type::ARR: {
auto left = indent;
left += push_name ? "\"" + arg.m_name + "\": " : "";
left += "[";
- const auto right = outer_type == OuterType::NAMED_ARG ? "" : arg.ToDescriptionString();
+ const auto right = outer_type == OuterType::NONE ? "" : arg.ToDescriptionString();
PushSection({left, right});
for (const auto& arg_inner : arg.m_inner) {
Push(arg_inner, current_indent + 2, OuterType::ARR);
}
PushSection({indent_next + "...", ""});
- PushSection({indent + "]" + (outer_type != OuterType::NAMED_ARG ? "," : ""), ""});
+ PushSection({indent + "]" + (outer_type != OuterType::NONE ? "," : ""), ""});
break;
}
@@ -443,7 +433,9 @@ std::string RPCResults::ToDescriptionString() const
} else {
result += "\nResult (" + r.m_cond + "):\n";
}
- result += r.m_result;
+ Sections sections;
+ r.ToSections(sections);
+ result += sections.ToString();
}
return result;
}
@@ -590,6 +582,93 @@ std::string RPCArg::ToDescriptionString() const
return ret;
}
+void RPCResult::ToSections(Sections& sections, const OuterType outer_type, const int current_indent) const
+{
+ // Indentation
+ const std::string indent(current_indent, ' ');
+ const std::string indent_next(current_indent + 2, ' ');
+
+ // Elements in a JSON structure (dictionary or array) are separated by a comma
+ const std::string maybe_separator{outer_type != OuterType::NONE ? "," : ""};
+
+ // The key name if recursed into an dictionary
+ const std::string maybe_key{
+ outer_type == OuterType::OBJ ?
+ "\"" + this->m_key_name + "\" : " :
+ ""};
+
+ // Format description with type
+ const auto Description = [&](const std::string& type) {
+ return "(" + type + (this->m_optional ? ", optional" : "") + ")" +
+ (this->m_description.empty() ? "" : " " + this->m_description);
+ };
+
+ switch (m_type) {
+ case Type::ELISION: {
+ // If the inner result is empty, use three dots for elision
+ sections.PushSection({indent_next + "...", m_description});
+ return;
+ }
+ case Type::NONE: {
+ sections.PushSection({indent + "None", Description("json null")});
+ return;
+ }
+ case Type::STR: {
+ sections.PushSection({indent + maybe_key + "\"str\"" + maybe_separator, Description("string")});
+ return;
+ }
+ case Type::STR_AMOUNT: {
+ sections.PushSection({indent + maybe_key + "n" + maybe_separator, Description("numeric")});
+ return;
+ }
+ case Type::STR_HEX: {
+ sections.PushSection({indent + maybe_key + "\"hex\"" + maybe_separator, Description("string")});
+ return;
+ }
+ case Type::NUM: {
+ sections.PushSection({indent + maybe_key + "n" + maybe_separator, Description("numeric")});
+ return;
+ }
+ case Type::NUM_TIME: {
+ sections.PushSection({indent + maybe_key + "xxx" + maybe_separator, Description("numeric")});
+ return;
+ }
+ case Type::BOOL: {
+ sections.PushSection({indent + maybe_key + "true|false" + maybe_separator, Description("boolean")});
+ return;
+ }
+ case Type::ARR_FIXED:
+ case Type::ARR: {
+ sections.PushSection({indent + maybe_key + "[", Description("json array")});
+ for (const auto& i : m_inner) {
+ i.ToSections(sections, OuterType::ARR, current_indent + 2);
+ }
+ if (m_type == Type::ARR) {
+ sections.PushSection({indent_next + "...", ""});
+ }
+ sections.PushSection({indent + "]" + maybe_separator, ""});
+ return;
+ }
+ case Type::OBJ_DYN:
+ case Type::OBJ: {
+ sections.PushSection({indent + maybe_key + "{", Description("json object")});
+ for (const auto& i : m_inner) {
+ i.ToSections(sections, OuterType::OBJ, current_indent + 2);
+ }
+ if (m_type == Type::OBJ_DYN) {
+ // If the dictionary keys are dynamic, use three dots for continuation
+ sections.PushSection({indent_next + "...", ""});
+ }
+ sections.PushSection({indent + "}" + maybe_separator, ""});
+ return;
+ }
+
+ // no default case, so the compiler can warn about missing cases
+ }
+
+ CHECK_NONFATAL(false);
+}
+
std::string RPCArg::ToStringObj(const bool oneline) const
{
std::string res;
diff --git a/src/rpc/util.h b/src/rpc/util.h
index 065a992a88..b5eebf0915 100644
--- a/src/rpc/util.h
+++ b/src/rpc/util.h
@@ -28,9 +28,16 @@
*/
extern const std::string UNIX_EPOCH_TIME;
+/**
+ * Example bech32 address used in multiple RPCExamples. The address is intentionally
+ * invalid to prevent accidental transactions by users.
+ */
+extern const std::string EXAMPLE_ADDRESS;
+
class FillableSigningProvider;
class CPubKey;
class CScript;
+struct Sections;
/** Wrapper for UniValue::VType, which includes typeAny:
* Used to denote don't care type. */
@@ -95,6 +102,16 @@ std::vector<CScript> EvalDescriptorStringOrObject(const UniValue& scanobject, Fl
/** Returns, given services flags, a list of humanly readable (known) network services */
UniValue GetServicesNames(ServiceFlags services);
+/**
+ * Serializing JSON objects depends on the outer type. Only arrays and
+ * dictionaries can be nested in json. The top-level outer type is "NONE".
+ */
+enum class OuterType {
+ ARR,
+ OBJ,
+ NONE, // Only set on first recursion
+};
+
struct RPCArg {
enum class Type {
OBJ,
@@ -134,37 +151,37 @@ struct RPCArg {
const std::vector<std::string> m_type_str; //!< Should be empty unless it is supposed to override the auto-generated type strings. Vector length is either 0 or 2, m_type_str.at(0) will override the type of the value in a key-value pair, m_type_str.at(1) will override the type in the argument description.
RPCArg(
- const std::string& name,
- const Type& type,
- const Fallback& fallback,
- const std::string& description,
- const std::string& oneline_description = "",
- const std::vector<std::string>& type_str = {})
- : m_name{name},
- m_type{type},
- m_fallback{fallback},
- m_description{description},
- m_oneline_description{oneline_description},
- m_type_str{type_str}
+ const std::string name,
+ const Type type,
+ const Fallback fallback,
+ const std::string description,
+ const std::string oneline_description = "",
+ const std::vector<std::string> type_str = {})
+ : m_name{std::move(name)},
+ m_type{std::move(type)},
+ m_fallback{std::move(fallback)},
+ m_description{std::move(description)},
+ m_oneline_description{std::move(oneline_description)},
+ m_type_str{std::move(type_str)}
{
CHECK_NONFATAL(type != Type::ARR && type != Type::OBJ);
}
RPCArg(
- const std::string& name,
- const Type& type,
- const Fallback& fallback,
- const std::string& description,
- const std::vector<RPCArg>& inner,
- const std::string& oneline_description = "",
- const std::vector<std::string>& type_str = {})
- : m_name{name},
- m_type{type},
- m_inner{inner},
- m_fallback{fallback},
- m_description{description},
- m_oneline_description{oneline_description},
- m_type_str{type_str}
+ const std::string name,
+ const Type type,
+ const Fallback fallback,
+ const std::string description,
+ const std::vector<RPCArg> inner,
+ const std::string oneline_description = "",
+ const std::vector<std::string> type_str = {})
+ : m_name{std::move(name)},
+ m_type{std::move(type)},
+ m_inner{std::move(inner)},
+ m_fallback{std::move(fallback)},
+ m_description{std::move(description)},
+ m_oneline_description{std::move(oneline_description)},
+ m_type_str{std::move(type_str)}
{
CHECK_NONFATAL(type == Type::ARR || type == Type::OBJ);
}
@@ -189,21 +206,85 @@ struct RPCArg {
};
struct RPCResult {
+ enum class Type {
+ OBJ,
+ ARR,
+ STR,
+ NUM,
+ BOOL,
+ NONE,
+ STR_AMOUNT, //!< Special string to represent a floating point amount
+ STR_HEX, //!< Special string with only hex chars
+ OBJ_DYN, //!< Special dictionary with keys that are not literals
+ ARR_FIXED, //!< Special array that has a fixed number of entries
+ NUM_TIME, //!< Special numeric to denote unix epoch time
+ ELISION, //!< Special type to denote elision (...)
+ };
+
+ const Type m_type;
+ const std::string m_key_name; //!< Only used for dicts
+ const std::vector<RPCResult> m_inner; //!< Only used for arrays or dicts
+ const bool m_optional;
+ const std::string m_description;
const std::string m_cond;
- const std::string m_result;
- explicit RPCResult(std::string result)
- : m_cond{}, m_result{std::move(result)}
+ RPCResult(
+ const std::string cond,
+ const Type type,
+ const std::string m_key_name,
+ const bool optional,
+ const std::string description,
+ const std::vector<RPCResult> inner = {})
+ : m_type{std::move(type)},
+ m_key_name{std::move(m_key_name)},
+ m_inner{std::move(inner)},
+ m_optional{optional},
+ m_description{std::move(description)},
+ m_cond{std::move(cond)}
{
- CHECK_NONFATAL(!m_result.empty());
+ CHECK_NONFATAL(!m_cond.empty());
+ const bool inner_needed{type == Type::ARR || type == Type::ARR_FIXED || type == Type::OBJ || type == Type::OBJ_DYN};
+ CHECK_NONFATAL(inner_needed != inner.empty());
}
- RPCResult(std::string cond, std::string result)
- : m_cond{std::move(cond)}, m_result{std::move(result)}
+ RPCResult(
+ const std::string cond,
+ const Type type,
+ const std::string m_key_name,
+ const std::string description,
+ const std::vector<RPCResult> inner = {})
+ : RPCResult{cond, type, m_key_name, false, description, inner} {}
+
+ RPCResult(
+ const Type type,
+ const std::string m_key_name,
+ const bool optional,
+ const std::string description,
+ const std::vector<RPCResult> inner = {})
+ : m_type{std::move(type)},
+ m_key_name{std::move(m_key_name)},
+ m_inner{std::move(inner)},
+ m_optional{optional},
+ m_description{std::move(description)},
+ m_cond{}
{
- CHECK_NONFATAL(!m_cond.empty());
- CHECK_NONFATAL(!m_result.empty());
+ const bool inner_needed{type == Type::ARR || type == Type::ARR_FIXED || type == Type::OBJ || type == Type::OBJ_DYN};
+ CHECK_NONFATAL(inner_needed != inner.empty());
}
+
+ RPCResult(
+ const Type type,
+ const std::string m_key_name,
+ const std::string description,
+ const std::vector<RPCResult> inner = {})
+ : RPCResult{type, m_key_name, false, description, inner} {}
+
+ /** Append the sections of the result. */
+ void ToSections(Sections& sections, OuterType outer_type = OuterType::NONE, const int current_indent = 0) const;
+ /** Return the type string of the result when it is in an object (dict). */
+ std::string ToStringObj() const;
+ /** Return the description string, including the result type. */
+ std::string ToDescriptionString() const;
};
struct RPCResults {
diff --git a/src/scheduler.cpp b/src/scheduler.cpp
index 927a3f3820..7cb7754fde 100644
--- a/src/scheduler.cpp
+++ b/src/scheduler.cpp
@@ -5,7 +5,6 @@
#include <scheduler.h>
#include <random.h>
-#include <reverselock.h>
#include <assert.h>
#include <utility>
@@ -20,18 +19,9 @@ CScheduler::~CScheduler()
}
-#if BOOST_VERSION < 105000
-static boost::system_time toPosixTime(const boost::chrono::system_clock::time_point& t)
-{
- // Creating the posix_time using from_time_t loses sub-second precision. So rather than exporting the time_point to time_t,
- // start with a posix_time at the epoch (0) and add the milliseconds that have passed since then.
- return boost::posix_time::from_time_t(0) + boost::posix_time::milliseconds(boost::chrono::duration_cast<boost::chrono::milliseconds>(t.time_since_epoch()).count());
-}
-#endif
-
void CScheduler::serviceQueue()
{
- boost::unique_lock<boost::mutex> lock(newTaskMutex);
+ WAIT_LOCK(newTaskMutex, lock);
++nThreadsServicingQueue;
// newTaskMutex is locked throughout this loop EXCEPT
@@ -40,7 +30,7 @@ void CScheduler::serviceQueue()
while (!shouldStop()) {
try {
if (!shouldStop() && taskQueue.empty()) {
- reverse_lock<boost::unique_lock<boost::mutex> > rlock(lock);
+ REVERSE_LOCK(lock);
}
while (!shouldStop() && taskQueue.empty()) {
// Wait until there is something to do.
@@ -50,21 +40,13 @@ void CScheduler::serviceQueue()
// Wait until either there is a new task, or until
// the time of the first item on the queue:
-// wait_until needs boost 1.50 or later; older versions have timed_wait:
-#if BOOST_VERSION < 105000
- while (!shouldStop() && !taskQueue.empty() &&
- newTaskScheduled.timed_wait(lock, toPosixTime(taskQueue.begin()->first))) {
- // Keep waiting until timeout
- }
-#else
- // Some boost versions have a conflicting overload of wait_until that returns void.
- // Explicitly use a template here to avoid hitting that overload.
while (!shouldStop() && !taskQueue.empty()) {
- boost::chrono::system_clock::time_point timeToWaitFor = taskQueue.begin()->first;
- if (newTaskScheduled.wait_until<>(lock, timeToWaitFor) == boost::cv_status::timeout)
+ std::chrono::system_clock::time_point timeToWaitFor = taskQueue.begin()->first;
+ if (newTaskScheduled.wait_until(lock, timeToWaitFor) == std::cv_status::timeout) {
break; // Exit loop after timeout, it means we reached the time of the event
+ }
}
-#endif
+
// If there are multiple threads, the queue can empty while we're waiting (another
// thread may service the task we were waiting on).
if (shouldStop() || taskQueue.empty())
@@ -76,7 +58,7 @@ void CScheduler::serviceQueue()
{
// Unlock before calling f, so it can reschedule itself or another task
// without deadlocking:
- reverse_lock<boost::unique_lock<boost::mutex> > rlock(lock);
+ REVERSE_LOCK(lock);
f();
}
} catch (...) {
@@ -91,7 +73,7 @@ void CScheduler::serviceQueue()
void CScheduler::stop(bool drain)
{
{
- boost::unique_lock<boost::mutex> lock(newTaskMutex);
+ LOCK(newTaskMutex);
if (drain)
stopWhenEmpty = true;
else
@@ -100,10 +82,10 @@ void CScheduler::stop(bool drain)
newTaskScheduled.notify_all();
}
-void CScheduler::schedule(CScheduler::Function f, boost::chrono::system_clock::time_point t)
+void CScheduler::schedule(CScheduler::Function f, std::chrono::system_clock::time_point t)
{
{
- boost::unique_lock<boost::mutex> lock(newTaskMutex);
+ LOCK(newTaskMutex);
taskQueue.insert(std::make_pair(t, f));
}
newTaskScheduled.notify_one();
@@ -111,7 +93,29 @@ void CScheduler::schedule(CScheduler::Function f, boost::chrono::system_clock::t
void CScheduler::scheduleFromNow(CScheduler::Function f, int64_t deltaMilliSeconds)
{
- schedule(f, boost::chrono::system_clock::now() + boost::chrono::milliseconds(deltaMilliSeconds));
+ schedule(f, std::chrono::system_clock::now() + std::chrono::milliseconds(deltaMilliSeconds));
+}
+
+void CScheduler::MockForward(std::chrono::seconds delta_seconds)
+{
+ assert(delta_seconds.count() > 0 && delta_seconds < std::chrono::hours{1});
+
+ {
+ LOCK(newTaskMutex);
+
+ // use temp_queue to maintain updated schedule
+ std::multimap<std::chrono::system_clock::time_point, Function> temp_queue;
+
+ for (const auto& element : taskQueue) {
+ temp_queue.emplace_hint(temp_queue.cend(), element.first - delta_seconds, element.second);
+ }
+
+ // point taskQueue to temp_queue
+ taskQueue = std::move(temp_queue);
+ }
+
+ // notify that the taskQueue needs to be processed
+ newTaskScheduled.notify_one();
}
static void Repeat(CScheduler* s, CScheduler::Function f, int64_t deltaMilliSeconds)
@@ -125,10 +129,10 @@ void CScheduler::scheduleEvery(CScheduler::Function f, int64_t deltaMilliSeconds
scheduleFromNow(std::bind(&Repeat, this, f, deltaMilliSeconds), deltaMilliSeconds);
}
-size_t CScheduler::getQueueInfo(boost::chrono::system_clock::time_point &first,
- boost::chrono::system_clock::time_point &last) const
+size_t CScheduler::getQueueInfo(std::chrono::system_clock::time_point &first,
+ std::chrono::system_clock::time_point &last) const
{
- boost::unique_lock<boost::mutex> lock(newTaskMutex);
+ LOCK(newTaskMutex);
size_t result = taskQueue.size();
if (!taskQueue.empty()) {
first = taskQueue.begin()->first;
@@ -138,7 +142,7 @@ size_t CScheduler::getQueueInfo(boost::chrono::system_clock::time_point &first,
}
bool CScheduler::AreThreadsServicingQueue() const {
- boost::unique_lock<boost::mutex> lock(newTaskMutex);
+ LOCK(newTaskMutex);
return nThreadsServicingQueue;
}
@@ -152,7 +156,7 @@ void SingleThreadedSchedulerClient::MaybeScheduleProcessQueue() {
if (m_are_callbacks_running) return;
if (m_callbacks_pending.empty()) return;
}
- m_pscheduler->schedule(std::bind(&SingleThreadedSchedulerClient::ProcessQueue, this));
+ m_pscheduler->schedule(std::bind(&SingleThreadedSchedulerClient::ProcessQueue, this), std::chrono::system_clock::now());
}
void SingleThreadedSchedulerClient::ProcessQueue() {
diff --git a/src/scheduler.h b/src/scheduler.h
index b8597b6f5b..4d5aa3068e 100644
--- a/src/scheduler.h
+++ b/src/scheduler.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2015-2019 The Bitcoin Core developers
+// Copyright (c) 2015-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -7,11 +7,12 @@
//
// NOTE:
-// boost::thread / boost::chrono should be ported to std::thread / std::chrono
+// boost::thread should be ported to std::thread
// when we support C++11.
//
-#include <boost/chrono/chrono.hpp>
-#include <boost/thread.hpp>
+#include <condition_variable>
+#include <functional>
+#include <list>
#include <map>
#include <sync.h>
@@ -27,8 +28,8 @@
// s->scheduleFromNow(std::bind(Class::func, this, argument), 3);
// boost::thread* t = new boost::thread(std::bind(CScheduler::serviceQueue, s));
//
-// ... then at program shutdown, clean up the thread running serviceQueue:
-// t->interrupt();
+// ... then at program shutdown, make sure to call stop() to clean up the thread(s) running serviceQueue:
+// s->stop();
// t->join();
// delete t;
// delete s; // Must be done after thread is interrupted/joined.
@@ -43,7 +44,7 @@ public:
typedef std::function<void()> Function;
// Call func at/after time t
- void schedule(Function f, boost::chrono::system_clock::time_point t=boost::chrono::system_clock::now());
+ void schedule(Function f, std::chrono::system_clock::time_point t);
// Convenience method: call f once deltaMilliSeconds from now
void scheduleFromNow(Function f, int64_t deltaMilliSeconds);
@@ -55,6 +56,13 @@ public:
// need more accurate scheduling, don't use this method.
void scheduleEvery(Function f, int64_t deltaMilliSeconds);
+ /**
+ * Mock the scheduler to fast forward in time.
+ * Iterates through items on taskQueue and reschedules them
+ * to be delta_seconds sooner.
+ */
+ void MockForward(std::chrono::seconds delta_seconds);
+
// To keep things as simple as possible, there is no unschedule.
// Services the queue 'forever'. Should be run in a thread,
@@ -68,20 +76,20 @@ public:
// Returns number of tasks waiting to be serviced,
// and first and last task times
- size_t getQueueInfo(boost::chrono::system_clock::time_point &first,
- boost::chrono::system_clock::time_point &last) const;
+ size_t getQueueInfo(std::chrono::system_clock::time_point &first,
+ std::chrono::system_clock::time_point &last) const;
// Returns true if there are threads actively running in serviceQueue()
bool AreThreadsServicingQueue() const;
private:
- std::multimap<boost::chrono::system_clock::time_point, Function> taskQueue;
- boost::condition_variable newTaskScheduled;
- mutable boost::mutex newTaskMutex;
- int nThreadsServicingQueue;
- bool stopRequested;
- bool stopWhenEmpty;
- bool shouldStop() const { return stopRequested || (stopWhenEmpty && taskQueue.empty()); }
+ mutable Mutex newTaskMutex;
+ std::condition_variable newTaskScheduled;
+ std::multimap<std::chrono::system_clock::time_point, Function> taskQueue GUARDED_BY(newTaskMutex);
+ int nThreadsServicingQueue GUARDED_BY(newTaskMutex);
+ bool stopRequested GUARDED_BY(newTaskMutex);
+ bool stopWhenEmpty GUARDED_BY(newTaskMutex);
+ bool shouldStop() const EXCLUSIVE_LOCKS_REQUIRED(newTaskMutex) { return stopRequested || (stopWhenEmpty && taskQueue.empty()); }
};
/**
@@ -98,7 +106,7 @@ class SingleThreadedSchedulerClient {
private:
CScheduler *m_pscheduler;
- CCriticalSection m_cs_callbacks_pending;
+ RecursiveMutex m_cs_callbacks_pending;
std::list<std::function<void ()>> m_callbacks_pending GUARDED_BY(m_cs_callbacks_pending);
bool m_are_callbacks_running GUARDED_BY(m_cs_callbacks_pending) = false;
diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp
index 773d6a55c4..b1d9a5bda7 100644
--- a/src/script/descriptor.cpp
+++ b/src/script/descriptor.cpp
@@ -339,14 +339,15 @@ class DescriptorImpl : public Descriptor
{
//! Public key arguments for this descriptor (size 1 for PK, PKH, WPKH; any size for Multisig).
const std::vector<std::unique_ptr<PubkeyProvider>> m_pubkey_args;
+ //! The string name of the descriptor function.
+ const std::string m_name;
+
+protected:
//! The sub-descriptor argument (nullptr for everything but SH and WSH).
//! In doc/descriptors.m this is referred to as SCRIPT expressions sh(SCRIPT)
//! and wsh(SCRIPT), and distinct from KEY expressions and ADDR expressions.
const std::unique_ptr<DescriptorImpl> m_subdescriptor_arg;
- //! The string name of the descriptor function.
- const std::string m_name;
-protected:
//! Return a serialization of anything except pubkey and script arguments, to be prepended to those.
virtual std::string ToStringExtra() const { return ""; }
@@ -364,7 +365,7 @@ protected:
virtual std::vector<CScript> MakeScripts(const std::vector<CPubKey>& pubkeys, const CScript* script, FlatSigningProvider& out) const = 0;
public:
- DescriptorImpl(std::vector<std::unique_ptr<PubkeyProvider>> pubkeys, std::unique_ptr<DescriptorImpl> script, const std::string& name) : m_pubkey_args(std::move(pubkeys)), m_subdescriptor_arg(std::move(script)), m_name(name) {}
+ DescriptorImpl(std::vector<std::unique_ptr<PubkeyProvider>> pubkeys, std::unique_ptr<DescriptorImpl> script, const std::string& name) : m_pubkey_args(std::move(pubkeys)), m_name(name), m_subdescriptor_arg(std::move(script)) {}
bool IsSolvable() const override
{
@@ -500,6 +501,8 @@ public:
out = Merge(out, subprovider);
}
}
+
+ Optional<OutputType> GetOutputType() const override { return nullopt; }
};
/** A parsed addr(A) descriptor. */
@@ -512,6 +515,19 @@ protected:
public:
AddressDescriptor(CTxDestination destination) : DescriptorImpl({}, {}, "addr"), m_destination(std::move(destination)) {}
bool IsSolvable() const final { return false; }
+
+ Optional<OutputType> GetOutputType() const override
+ {
+ switch (m_destination.which()) {
+ case 1 /* PKHash */:
+ case 2 /* ScriptHash */: return OutputType::LEGACY;
+ case 3 /* WitnessV0ScriptHash */:
+ case 4 /* WitnessV0KeyHash */:
+ case 5 /* WitnessUnknown */: return OutputType::BECH32;
+ case 0 /* CNoDestination */:
+ default: return nullopt;
+ }
+ }
};
/** A parsed raw(H) descriptor. */
@@ -524,6 +540,21 @@ protected:
public:
RawDescriptor(CScript script) : DescriptorImpl({}, {}, "raw"), m_script(std::move(script)) {}
bool IsSolvable() const final { return false; }
+
+ Optional<OutputType> GetOutputType() const override
+ {
+ CTxDestination dest;
+ ExtractDestination(m_script, dest);
+ switch (dest.which()) {
+ case 1 /* PKHash */:
+ case 2 /* ScriptHash */: return OutputType::LEGACY;
+ case 3 /* WitnessV0ScriptHash */:
+ case 4 /* WitnessV0KeyHash */:
+ case 5 /* WitnessUnknown */: return OutputType::BECH32;
+ case 0 /* CNoDestination */:
+ default: return nullopt;
+ }
+ }
};
/** A parsed pk(P) descriptor. */
@@ -547,6 +578,7 @@ protected:
}
public:
PKHDescriptor(std::unique_ptr<PubkeyProvider> prov) : DescriptorImpl(Vector(std::move(prov)), {}, "pkh") {}
+ Optional<OutputType> GetOutputType() const override { return OutputType::LEGACY; }
};
/** A parsed wpkh(P) descriptor. */
@@ -561,6 +593,7 @@ protected:
}
public:
WPKHDescriptor(std::unique_ptr<PubkeyProvider> prov) : DescriptorImpl(Vector(std::move(prov)), {}, "wpkh") {}
+ Optional<OutputType> GetOutputType() const override { return OutputType::BECH32; }
};
/** A parsed combo(P) descriptor. */
@@ -612,6 +645,13 @@ protected:
std::vector<CScript> MakeScripts(const std::vector<CPubKey>&, const CScript* script, FlatSigningProvider&) const override { return Vector(GetScriptForDestination(ScriptHash(*script))); }
public:
SHDescriptor(std::unique_ptr<DescriptorImpl> desc) : DescriptorImpl({}, std::move(desc), "sh") {}
+
+ Optional<OutputType> GetOutputType() const override
+ {
+ assert(m_subdescriptor_arg);
+ if (m_subdescriptor_arg->GetOutputType() == OutputType::BECH32) return OutputType::P2SH_SEGWIT;
+ return OutputType::LEGACY;
+ }
};
/** A parsed wsh(...) descriptor. */
@@ -621,6 +661,7 @@ protected:
std::vector<CScript> MakeScripts(const std::vector<CPubKey>&, const CScript* script, FlatSigningProvider&) const override { return Vector(GetScriptForDestination(WitnessV0ScriptHash(*script))); }
public:
WSHDescriptor(std::unique_ptr<DescriptorImpl> desc) : DescriptorImpl({}, std::move(desc), "wsh") {}
+ Optional<OutputType> GetOutputType() const override { return OutputType::BECH32; }
};
////////////////////////////////////////////////////////////////////////////
diff --git a/src/script/descriptor.h b/src/script/descriptor.h
index cf5428c213..58b920c681 100644
--- a/src/script/descriptor.h
+++ b/src/script/descriptor.h
@@ -5,6 +5,8 @@
#ifndef BITCOIN_SCRIPT_DESCRIPTOR_H
#define BITCOIN_SCRIPT_DESCRIPTOR_H
+#include <optional.h>
+#include <outputtype.h>
#include <script/script.h>
#include <script/sign.h>
#include <script/signingprovider.h>
@@ -47,30 +49,33 @@ struct Descriptor {
/** Expand a descriptor at a specified position.
*
- * @param[in] pos: The position at which to expand the descriptor. If IsRange() is false, this is ignored.
- * @param[in] provider: The provider to query for private keys in case of hardened derivation.
- * @param[out] output_scripts: The expanded scriptPubKeys.
- * @param[out] out: Scripts and public keys necessary for solving the expanded scriptPubKeys (may be equal to `provider`).
- * @param[out] cache: Cache data necessary to evaluate the descriptor at this point without access to private keys.
+ * @param[in] pos The position at which to expand the descriptor. If IsRange() is false, this is ignored.
+ * @param[in] provider The provider to query for private keys in case of hardened derivation.
+ * @param[out] output_scripts The expanded scriptPubKeys.
+ * @param[out] out Scripts and public keys necessary for solving the expanded scriptPubKeys (may be equal to `provider`).
+ * @param[out] cache Cache data necessary to evaluate the descriptor at this point without access to private keys.
*/
virtual bool Expand(int pos, const SigningProvider& provider, std::vector<CScript>& output_scripts, FlatSigningProvider& out, std::vector<unsigned char>* cache = nullptr) const = 0;
/** Expand a descriptor at a specified position using cached expansion data.
*
- * @param[in] pos: The position at which to expand the descriptor. If IsRange() is false, this is ignored.
- * @param[in] cache: Cached expansion data.
- * @param[out] output_scripts: The expanded scriptPubKeys.
- * @param[out] out: Scripts and public keys necessary for solving the expanded scriptPubKeys (may be equal to `provider`).
+ * @param[in] pos The position at which to expand the descriptor. If IsRange() is false, this is ignored.
+ * @param[in] cache Cached expansion data.
+ * @param[out] output_scripts The expanded scriptPubKeys.
+ * @param[out] out Scripts and public keys necessary for solving the expanded scriptPubKeys (may be equal to `provider`).
*/
virtual bool ExpandFromCache(int pos, const std::vector<unsigned char>& cache, std::vector<CScript>& output_scripts, FlatSigningProvider& out) const = 0;
/** Expand the private key for a descriptor at a specified position, if possible.
*
- * @param[in] pos: The position at which to expand the descriptor. If IsRange() is false, this is ignored.
- * @param[in] provider: The provider to query for the private keys.
- * @param[out] out: Any private keys available for the specified `pos`.
+ * @param[in] pos The position at which to expand the descriptor. If IsRange() is false, this is ignored.
+ * @param[in] provider The provider to query for the private keys.
+ * @param[out] out Any private keys available for the specified `pos`.
*/
virtual void ExpandPrivate(int pos, const SigningProvider& provider, FlatSigningProvider& out) const = 0;
+
+ /** @return The OutputType of the scriptPubKey(s) produced by this descriptor. Or nullopt if indeterminate (multiple or none) */
+ virtual Optional<OutputType> GetOutputType() const = 0;
};
/** Parse a `descriptor` string. Included private keys are put in `out`.
diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp
index b919046ab6..d0865d2793 100644
--- a/src/script/interpreter.cpp
+++ b/src/script/interpreter.cpp
@@ -1281,13 +1281,11 @@ uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn
return ss.GetHash();
}
- static const uint256 one(uint256S("0000000000000000000000000000000000000000000000000000000000000001"));
-
// Check for invalid use of SIGHASH_SINGLE
if ((nHashType & 0x1f) == SIGHASH_SINGLE) {
if (nIn >= txTo.vout.size()) {
// nOut out of range
- return one;
+ return UINT256_ONE();
}
}
diff --git a/src/script/sign.cpp b/src/script/sign.cpp
index 8791d1542a..fe8292fe57 100644
--- a/src/script/sign.cpp
+++ b/src/script/sign.cpp
@@ -144,8 +144,13 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator
ret.push_back(valtype()); // workaround CHECKMULTISIG bug
for (size_t i = 1; i < vSolutions.size() - 1; ++i) {
CPubKey pubkey = CPubKey(vSolutions[i]);
- if (ret.size() < required + 1 && CreateSig(creator, sigdata, provider, sig, pubkey, scriptPubKey, sigversion)) {
- ret.push_back(std::move(sig));
+ // We need to always call CreateSig in order to fill sigdata with all
+ // possible signatures that we can create. This will allow further PSBT
+ // processing to work as it needs all possible signature and pubkey pairs
+ if (CreateSig(creator, sigdata, provider, sig, pubkey, scriptPubKey, sigversion)) {
+ if (ret.size() < required + 1) {
+ ret.push_back(std::move(sig));
+ }
}
}
bool ok = ret.size() == required + 1;
@@ -460,3 +465,54 @@ bool IsSegWitOutput(const SigningProvider& provider, const CScript& script)
}
return false;
}
+
+bool SignTransaction(CMutableTransaction& mtx, const SigningProvider* keystore, const std::map<COutPoint, Coin>& coins, int nHashType, std::map<int, std::string>& input_errors)
+{
+ bool fHashSingle = ((nHashType & ~SIGHASH_ANYONECANPAY) == SIGHASH_SINGLE);
+
+ // Use CTransaction for the constant parts of the
+ // transaction to avoid rehashing.
+ const CTransaction txConst(mtx);
+ // Sign what we can:
+ for (unsigned int i = 0; i < mtx.vin.size(); i++) {
+ CTxIn& txin = mtx.vin[i];
+ auto coin = coins.find(txin.prevout);
+ if (coin == coins.end() || coin->second.IsSpent()) {
+ input_errors[i] = "Input not found or already spent";
+ continue;
+ }
+ const CScript& prevPubKey = coin->second.out.scriptPubKey;
+ const CAmount& amount = coin->second.out.nValue;
+
+ SignatureData sigdata = DataFromTransaction(mtx, i, coin->second.out);
+ // Only sign SIGHASH_SINGLE if there's a corresponding output:
+ if (!fHashSingle || (i < mtx.vout.size())) {
+ ProduceSignature(*keystore, MutableTransactionSignatureCreator(&mtx, i, amount, nHashType), prevPubKey, sigdata);
+ }
+
+ UpdateInput(txin, sigdata);
+
+ // amount must be specified for valid segwit signature
+ if (amount == MAX_MONEY && !txin.scriptWitness.IsNull()) {
+ input_errors[i] = "Missing amount";
+ continue;
+ }
+
+ ScriptError serror = SCRIPT_ERR_OK;
+ if (!VerifyScript(txin.scriptSig, prevPubKey, &txin.scriptWitness, STANDARD_SCRIPT_VERIFY_FLAGS, TransactionSignatureChecker(&txConst, i, amount), &serror)) {
+ if (serror == SCRIPT_ERR_INVALID_STACK_OPERATION) {
+ // Unable to sign input and verification failed (possible attempt to partially sign).
+ input_errors[i] = "Unable to sign input, invalid stack size (possibly missing key)";
+ } else if (serror == SCRIPT_ERR_SIG_NULLFAIL) {
+ // Verification failed (possibly due to insufficient signatures).
+ input_errors[i] = "CHECK(MULTI)SIG failing with non-zero signature (possibly need more signatures)";
+ } else {
+ input_errors[i] = ScriptErrorString(serror);
+ }
+ } else {
+ // If this input succeeds, make sure there is no error set for it
+ input_errors.erase(i);
+ }
+ }
+ return input_errors.empty();
+}
diff --git a/src/script/sign.h b/src/script/sign.h
index 033c9ba19e..f03af0713f 100644
--- a/src/script/sign.h
+++ b/src/script/sign.h
@@ -6,6 +6,7 @@
#ifndef BITCOIN_SCRIPT_SIGN_H
#define BITCOIN_SCRIPT_SIGN_H
+#include <coins.h>
#include <hash.h>
#include <pubkey.h>
#include <script/interpreter.h>
@@ -168,4 +169,7 @@ bool IsSolvable(const SigningProvider& provider, const CScript& script);
/** Check whether a scriptPubKey is known to be segwit. */
bool IsSegWitOutput(const SigningProvider& provider, const CScript& script);
+/** Sign the CMutableTransaction */
+bool SignTransaction(CMutableTransaction& mtx, const SigningProvider* provider, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, std::string>& input_errors);
+
#endif // BITCOIN_SCRIPT_SIGN_H
diff --git a/src/script/signingprovider.h b/src/script/signingprovider.h
index c40fecac5c..76f31d2f6f 100644
--- a/src/script/signingprovider.h
+++ b/src/script/signingprovider.h
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -66,13 +66,59 @@ protected:
using KeyMap = std::map<CKeyID, CKey>;
using ScriptMap = std::map<CScriptID, CScript>;
+ /**
+ * Map of key id to unencrypted private keys known by the signing provider.
+ * Map may be empty if the provider has another source of keys, like an
+ * encrypted store.
+ */
KeyMap mapKeys GUARDED_BY(cs_KeyStore);
+
+ /**
+ * Map of script id to scripts known by the signing provider.
+ *
+ * This map originally just held P2SH redeemScripts, and was used by wallet
+ * code to look up script ids referenced in "OP_HASH160 <script id>
+ * OP_EQUAL" P2SH outputs. Later in 605e8473a7d it was extended to hold
+ * P2WSH witnessScripts as well, and used to look up nested scripts
+ * referenced in "OP_0 <script hash>" P2WSH outputs. Later in commits
+ * f4691ab3a9d and 248f3a76a82, it was extended once again to hold segwit
+ * "OP_0 <key or script hash>" scriptPubKeys, in order to give the wallet a
+ * way to distinguish between segwit outputs that it generated addresses for
+ * and wanted to receive payments from, and segwit outputs that it never
+ * generated addresses for, but it could spend just because of having keys.
+ * (Before segwit activation it was also important to not treat segwit
+ * outputs to arbitrary wallet keys as payments, because these could be
+ * spent by anyone without even needing to sign with the keys.)
+ *
+ * Some of the scripts stored in mapScripts are memory-only and
+ * intentionally not saved to disk. Specifically, scripts added by
+ * ImplicitlyLearnRelatedKeyScripts(pubkey) calls are not written to disk so
+ * future wallet code can have flexibility to be more selective about what
+ * transaction outputs it recognizes as payments, instead of having to treat
+ * all outputs spending to keys it knows as payments. By contrast,
+ * mapScripts entries added by AddCScript(script),
+ * LearnRelatedScripts(pubkey, type), and LearnAllRelatedScripts(pubkey)
+ * calls are saved because they are all intentionally used to receive
+ * payments.
+ *
+ * The FillableSigningProvider::mapScripts script map should not be confused
+ * with LegacyScriptPubKeyMan::setWatchOnly script set. The two collections
+ * can hold the same scripts, but they serve different purposes. The
+ * setWatchOnly script set is intended to expand the set of outputs the
+ * wallet considers payments. Every output with a script it contains is
+ * considered to belong to the wallet, regardless of whether the script is
+ * solvable or signable. By contrast, the scripts in mapScripts are only
+ * used for solving, and to restrict which outputs are considered payments
+ * by the wallet. An output with a script in mapScripts, unlike
+ * setWatchOnly, is not automatically considered to belong to the wallet if
+ * it can't be solved and signed for.
+ */
ScriptMap mapScripts GUARDED_BY(cs_KeyStore);
void ImplicitlyLearnRelatedKeyScripts(const CPubKey& pubkey) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
public:
- mutable CCriticalSection cs_KeyStore;
+ mutable RecursiveMutex cs_KeyStore;
virtual bool AddKeyPubKey(const CKey& key, const CPubKey &pubkey);
virtual bool AddKey(const CKey &key) { return AddKeyPubKey(key, key.GetPubKey()); }
diff --git a/src/script/standard.h b/src/script/standard.h
index 350a5603d5..49a45f3eba 100644
--- a/src/script/standard.h
+++ b/src/script/standard.h
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -80,9 +80,14 @@ struct PKHash : public uint160
using uint160::uint160;
};
+struct WitnessV0KeyHash;
struct ScriptHash : public uint160
{
ScriptHash() : uint160() {}
+ // These don't do what you'd expect.
+ // Use ScriptHash(GetScriptForDestination(...)) instead.
+ explicit ScriptHash(const WitnessV0KeyHash& hash) = delete;
+ explicit ScriptHash(const PKHash& hash) = delete;
explicit ScriptHash(const uint160& hash) : uint160(hash) {}
explicit ScriptHash(const CScript& script);
using uint160::uint160;
diff --git a/src/serialize.h b/src/serialize.h
index c9e994f844..5045cb3c7f 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -9,6 +9,7 @@
#include <compat/endian.h>
#include <algorithm>
+#include <cstring>
#include <ios>
#include <limits>
#include <map>
@@ -25,6 +26,9 @@
static const unsigned int MAX_SIZE = 0x02000000;
+/** Maximum amount of memory (in bytes) to allocate at once when deserializing vectors. */
+static const unsigned int MAX_VECTOR_ALLOCATE = 5000000;
+
/**
* Dummy data type to identify deserializing constructors.
*
@@ -136,27 +140,31 @@ template<typename Stream> inline uint64_t ser_readdata64(Stream &s)
}
inline uint64_t ser_double_to_uint64(double x)
{
- union { double x; uint64_t y; } tmp;
- tmp.x = x;
- return tmp.y;
+ uint64_t tmp;
+ std::memcpy(&tmp, &x, sizeof(x));
+ static_assert(sizeof(tmp) == sizeof(x), "double and uint64_t assumed to have the same size");
+ return tmp;
}
inline uint32_t ser_float_to_uint32(float x)
{
- union { float x; uint32_t y; } tmp;
- tmp.x = x;
- return tmp.y;
+ uint32_t tmp;
+ std::memcpy(&tmp, &x, sizeof(x));
+ static_assert(sizeof(tmp) == sizeof(x), "float and uint32_t assumed to have the same size");
+ return tmp;
}
inline double ser_uint64_to_double(uint64_t y)
{
- union { double x; uint64_t y; } tmp;
- tmp.y = y;
- return tmp.x;
+ double tmp;
+ std::memcpy(&tmp, &y, sizeof(y));
+ static_assert(sizeof(tmp) == sizeof(y), "double and uint64_t assumed to have the same size");
+ return tmp;
}
inline float ser_uint32_to_float(uint32_t y)
{
- union { float x; uint32_t y; } tmp;
- tmp.y = y;
- return tmp.x;
+ float tmp;
+ std::memcpy(&tmp, &y, sizeof(y));
+ static_assert(sizeof(tmp) == sizeof(y), "float and uint32_t assumed to have the same size");
+ return tmp;
}
@@ -200,6 +208,30 @@ template<typename X> const X& ReadWriteAsHelper(const X& x) { return x; }
}
/**
+ * Implement the Ser and Unser methods needed for implementing a formatter (see Using below).
+ *
+ * Both Ser and Unser are delegated to a single static method SerializationOps, which is polymorphic
+ * in the serialized/deserialized type (allowing it to be const when serializing, and non-const when
+ * deserializing).
+ *
+ * Example use:
+ * struct FooFormatter {
+ * FORMATTER_METHODS(Class, obj) { READWRITE(obj.val1, VARINT(obj.val2)); }
+ * }
+ * would define a class FooFormatter that defines a serialization of Class objects consisting
+ * of serializing its val1 member using the default serialization, and its val2 member using
+ * VARINT serialization. That FooFormatter can then be used in statements like
+ * READWRITE(Using<FooFormatter>(obj.bla)).
+ */
+#define FORMATTER_METHODS(cls, obj) \
+ template<typename Stream> \
+ static void Ser(Stream& s, const cls& obj) { SerializationOps(obj, s, CSerActionSerialize()); } \
+ template<typename Stream> \
+ static void Unser(Stream& s, cls& obj) { SerializationOps(obj, s, CSerActionUnserialize()); } \
+ template<typename Stream, typename Type, typename Operation> \
+ static inline void SerializationOps(Type& obj, Stream& s, Operation ser_action) \
+
+/**
* Implement the Serialize and Unserialize methods by delegating to a single templated
* static method that takes the to-be-(de)serialized object as a parameter. This approach
* has the advantage that the constness of the object becomes a template parameter, and
@@ -211,17 +243,15 @@ template<typename X> const X& ReadWriteAsHelper(const X& x) { return x; }
void Serialize(Stream& s) const \
{ \
static_assert(std::is_same<const cls&, decltype(*this)>::value, "Serialize type mismatch"); \
- SerializationOps(*this, s, CSerActionSerialize()); \
+ Ser(s, *this); \
} \
template<typename Stream> \
void Unserialize(Stream& s) \
{ \
static_assert(std::is_same<cls&, decltype(*this)>::value, "Unserialize type mismatch"); \
- SerializationOps(*this, s, CSerActionUnserialize()); \
+ Unser(s, *this); \
} \
- template<typename Stream, typename Type, typename Operation> \
- static inline void SerializationOps(Type& obj, Stream& s, Operation ser_action) \
-
+ FORMATTER_METHODS(cls, obj)
#ifndef CHAR_EQUALS_INT8
template<typename Stream> inline void Serialize(Stream& s, char a ) { ser_writedata8(s, a); } // TODO Get rid of bare char
@@ -442,26 +472,71 @@ I ReadVarInt(Stream& is)
}
}
-#define VARINT(obj, ...) WrapVarInt<__VA_ARGS__>(REF(obj))
-#define COMPACTSIZE(obj) CCompactSize(REF(obj))
-#define LIMITED_STRING(obj,n) LimitedString< n >(REF(obj))
-
-template<VarIntMode Mode, typename I>
-class CVarInt
+/** Simple wrapper class to serialize objects using a formatter; used by Using(). */
+template<typename Formatter, typename T>
+class Wrapper
{
+ static_assert(std::is_lvalue_reference<T>::value, "Wrapper needs an lvalue reference type T");
protected:
- I &n;
+ T m_object;
public:
- explicit CVarInt(I& nIn) : n(nIn) { }
+ explicit Wrapper(T obj) : m_object(obj) {}
+ template<typename Stream> void Serialize(Stream &s) const { Formatter().Ser(s, m_object); }
+ template<typename Stream> void Unserialize(Stream &s) { Formatter().Unser(s, m_object); }
+};
- template<typename Stream>
- void Serialize(Stream &s) const {
- WriteVarInt<Stream,Mode,I>(s, n);
+/** Cause serialization/deserialization of an object to be done using a specified formatter class.
+ *
+ * To use this, you need a class Formatter that has public functions Ser(stream, const object&) for
+ * serialization, and Unser(stream, object&) for deserialization. Serialization routines (inside
+ * READWRITE, or directly with << and >> operators), can then use Using<Formatter>(object).
+ *
+ * This works by constructing a Wrapper<Formatter, T>-wrapped version of object, where T is
+ * const during serialization, and non-const during deserialization, which maintains const
+ * correctness.
+ */
+template<typename Formatter, typename T>
+static inline Wrapper<Formatter, T&> Using(T&& t) { return Wrapper<Formatter, T&>(t); }
+
+#define VARINT_MODE(obj, mode) Using<VarIntFormatter<mode>>(obj)
+#define VARINT(obj) Using<VarIntFormatter<VarIntMode::DEFAULT>>(obj)
+#define COMPACTSIZE(obj) Using<CompactSizeFormatter>(obj)
+#define LIMITED_STRING(obj,n) LimitedString< n >(REF(obj))
+
+/** Serialization wrapper class for integers in VarInt format. */
+template<VarIntMode Mode>
+struct VarIntFormatter
+{
+ template<typename Stream, typename I> void Ser(Stream &s, I v)
+ {
+ WriteVarInt<Stream,Mode,typename std::remove_cv<I>::type>(s, v);
}
- template<typename Stream>
- void Unserialize(Stream& s) {
- n = ReadVarInt<Stream,Mode,I>(s);
+ template<typename Stream, typename I> void Unser(Stream& s, I& v)
+ {
+ v = ReadVarInt<Stream,Mode,typename std::remove_cv<I>::type>(s);
+ }
+};
+
+template<int Bytes>
+struct CustomUintFormatter
+{
+ static_assert(Bytes > 0 && Bytes <= 8, "CustomUintFormatter Bytes out of range");
+ static constexpr uint64_t MAX = 0xffffffffffffffff >> (8 * (8 - Bytes));
+
+ template <typename Stream, typename I> void Ser(Stream& s, I v)
+ {
+ if (v < 0 || v > MAX) throw std::ios_base::failure("CustomUintFormatter value out of range");
+ uint64_t raw = htole64(v);
+ s.write((const char*)&raw, Bytes);
+ }
+
+ template <typename Stream, typename I> void Unser(Stream& s, I& v)
+ {
+ static_assert(std::numeric_limits<I>::max() >= MAX && std::numeric_limits<I>::min() <= 0, "CustomUintFormatter type too small");
+ uint64_t raw = 0;
+ s.read((char*)&raw, Bytes);
+ v = le64toh(raw);
}
};
@@ -499,21 +574,26 @@ public:
}
};
-class CCompactSize
+/** Formatter for integers in CompactSize format. */
+struct CompactSizeFormatter
{
-protected:
- uint64_t &n;
-public:
- explicit CCompactSize(uint64_t& nIn) : n(nIn) { }
-
- template<typename Stream>
- void Serialize(Stream &s) const {
- WriteCompactSize<Stream>(s, n);
+ template<typename Stream, typename I>
+ void Unser(Stream& s, I& v)
+ {
+ uint64_t n = ReadCompactSize<Stream>(s);
+ if (n < std::numeric_limits<I>::min() || n > std::numeric_limits<I>::max()) {
+ throw std::ios_base::failure("CompactSize exceeds limit of type");
+ }
+ v = n;
}
- template<typename Stream>
- void Unserialize(Stream& s) {
- n = ReadCompactSize<Stream>(s);
+ template<typename Stream, typename I>
+ void Ser(Stream& s, I v)
+ {
+ static_assert(std::is_unsigned<I>::value, "CompactSize only supported for unsigned integers");
+ static_assert(std::numeric_limits<I>::max() <= std::numeric_limits<uint64_t>::max(), "CompactSize only supports 64-bit integers and below");
+
+ WriteCompactSize<Stream>(s, v);
}
};
@@ -546,12 +626,57 @@ public:
}
};
-template<VarIntMode Mode=VarIntMode::DEFAULT, typename I>
-CVarInt<Mode, I> WrapVarInt(I& n) { return CVarInt<Mode, I>{n}; }
-
template<typename I>
BigEndian<I> WrapBigEndian(I& n) { return BigEndian<I>(n); }
+/** Formatter to serialize/deserialize vector elements using another formatter
+ *
+ * Example:
+ * struct X {
+ * std::vector<uint64_t> v;
+ * SERIALIZE_METHODS(X, obj) { READWRITE(Using<VectorFormatter<VarInt>>(obj.v)); }
+ * };
+ * will define a struct that contains a vector of uint64_t, which is serialized
+ * as a vector of VarInt-encoded integers.
+ *
+ * V is not required to be an std::vector type. It works for any class that
+ * exposes a value_type, size, reserve, emplace_back, back, and const iterators.
+ */
+template<class Formatter>
+struct VectorFormatter
+{
+ template<typename Stream, typename V>
+ void Ser(Stream& s, const V& v)
+ {
+ Formatter formatter;
+ WriteCompactSize(s, v.size());
+ for (const typename V::value_type& elem : v) {
+ formatter.Ser(s, elem);
+ }
+ }
+
+ template<typename Stream, typename V>
+ void Unser(Stream& s, V& v)
+ {
+ Formatter formatter;
+ v.clear();
+ size_t size = ReadCompactSize(s);
+ size_t allocated = 0;
+ while (allocated < size) {
+ // For DoS prevention, do not blindly allocate as much as the stream claims to contain.
+ // Instead, allocate in 5MiB batches, so that an attacker actually needs to provide
+ // X MiB of data to make us allocate X+5 Mib.
+ static_assert(sizeof(typename V::value_type) <= MAX_VECTOR_ALLOCATE, "Vector element size too large");
+ allocated = std::min(size, allocated + MAX_VECTOR_ALLOCATE / sizeof(typename V::value_type));
+ v.reserve(allocated);
+ while (v.size() < allocated) {
+ v.emplace_back();
+ formatter.Unser(s, v.back());
+ }
+ }
+ };
+};
+
/**
* Forward declarations
*/
@@ -632,6 +757,20 @@ inline void Unserialize(Stream& is, T&& a)
a.Unserialize(is);
}
+/** Default formatter. Serializes objects as themselves.
+ *
+ * The vector/prevector serialization code passes this to VectorFormatter
+ * to enable reusing that logic. It shouldn't be needed elsewhere.
+ */
+struct DefaultFormatter
+{
+ template<typename Stream, typename T>
+ static void Ser(Stream& s, const T& t) { Serialize(s, t); }
+
+ template<typename Stream, typename T>
+ static void Unser(Stream& s, T& t) { Unserialize(s, t); }
+};
+
@@ -672,9 +811,7 @@ void Serialize_impl(Stream& os, const prevector<N, T>& v, const unsigned char&)
template<typename Stream, unsigned int N, typename T, typename V>
void Serialize_impl(Stream& os, const prevector<N, T>& v, const V&)
{
- WriteCompactSize(os, v.size());
- for (typename prevector<N, T>::const_iterator vi = v.begin(); vi != v.end(); ++vi)
- ::Serialize(os, (*vi));
+ Serialize(os, Using<VectorFormatter<DefaultFormatter>>(v));
}
template<typename Stream, unsigned int N, typename T>
@@ -703,19 +840,7 @@ void Unserialize_impl(Stream& is, prevector<N, T>& v, const unsigned char&)
template<typename Stream, unsigned int N, typename T, typename V>
void Unserialize_impl(Stream& is, prevector<N, T>& v, const V&)
{
- v.clear();
- unsigned int nSize = ReadCompactSize(is);
- unsigned int i = 0;
- unsigned int nMid = 0;
- while (nMid < nSize)
- {
- nMid += 5000000 / sizeof(T);
- if (nMid > nSize)
- nMid = nSize;
- v.resize_uninitialized(nMid);
- for (; i < nMid; ++i)
- Unserialize(is, v[i]);
- }
+ Unserialize(is, Using<VectorFormatter<DefaultFormatter>>(v));
}
template<typename Stream, unsigned int N, typename T>
@@ -752,9 +877,7 @@ void Serialize_impl(Stream& os, const std::vector<T, A>& v, const bool&)
template<typename Stream, typename T, typename A, typename V>
void Serialize_impl(Stream& os, const std::vector<T, A>& v, const V&)
{
- WriteCompactSize(os, v.size());
- for (typename std::vector<T, A>::const_iterator vi = v.begin(); vi != v.end(); ++vi)
- ::Serialize(os, (*vi));
+ Serialize(os, Using<VectorFormatter<DefaultFormatter>>(v));
}
template<typename Stream, typename T, typename A>
@@ -783,19 +906,7 @@ void Unserialize_impl(Stream& is, std::vector<T, A>& v, const unsigned char&)
template<typename Stream, typename T, typename A, typename V>
void Unserialize_impl(Stream& is, std::vector<T, A>& v, const V&)
{
- v.clear();
- unsigned int nSize = ReadCompactSize(is);
- unsigned int i = 0;
- unsigned int nMid = 0;
- while (nMid < nSize)
- {
- nMid += 5000000 / sizeof(T);
- if (nMid > nSize)
- nMid = nSize;
- v.resize(nMid);
- for (; i < nMid; i++)
- Unserialize(is, v[i]);
- }
+ Unserialize(is, Using<VectorFormatter<DefaultFormatter>>(v));
}
template<typename Stream, typename T, typename A>
diff --git a/src/sync.cpp b/src/sync.cpp
index 3c88becb6a..71657a7439 100644
--- a/src/sync.cpp
+++ b/src/sync.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2011-2019 The Bitcoin Core developers
+// Copyright (c) 2011-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -13,7 +13,7 @@
#include <util/strencodings.h>
#include <util/threadnames.h>
-
+#include <system_error>
#include <map>
#include <set>
@@ -60,6 +60,11 @@ struct CLockLocation {
mutexName, sourceFile, itostr(sourceLine), (fTry ? " (TRY)" : ""), m_thread_name);
}
+ std::string Name() const
+ {
+ return mutexName;
+ }
+
private:
bool fTry;
std::string mutexName;
@@ -75,7 +80,7 @@ typedef std::set<std::pair<void*, void*> > InvLockOrders;
struct LockData {
// Very ugly hack: as the global constructs and destructors run single
// threaded, we use this boolean to know whether LockData still exists,
- // as DeleteLock can get called by global CCriticalSection destructors
+ // as DeleteLock can get called by global RecursiveMutex destructors
// after LockData disappears.
bool available;
LockData() : available(true) {}
@@ -155,6 +160,18 @@ void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs
push_lock(cs, CLockLocation(pszName, pszFile, nLine, fTry, util::ThreadGetInternalName()));
}
+void CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line)
+{
+ if (!g_lockstack.empty()) {
+ const auto& lastlock = g_lockstack.back();
+ if (lastlock.first == cs) {
+ lockname = lastlock.second.Name();
+ return;
+ }
+ }
+ throw std::system_error(EPERM, std::generic_category(), strprintf("%s:%s %s was not most recent critical section locked", file, line, guardname));
+}
+
void LeaveCritical()
{
pop_lock();
diff --git a/src/sync.h b/src/sync.h
index bd32a8bb61..204734c273 100644
--- a/src/sync.h
+++ b/src/sync.h
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -50,6 +50,7 @@ LEAVE_CRITICAL_SECTION(mutex); // no RAII
#ifdef DEBUG_LOCKORDER
void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false);
void LeaveCritical();
+void CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line);
std::string LocksHeld();
void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) ASSERT_EXCLUSIVE_LOCK(cs);
void AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs);
@@ -64,6 +65,7 @@ extern bool g_debug_lockorder_abort;
#else
void static inline EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false) {}
void static inline LeaveCritical() {}
+void static inline CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line) {}
void static inline AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) ASSERT_EXCLUSIVE_LOCK(cs) {}
void static inline AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) {}
void static inline DeleteLock(void* cs) {}
@@ -106,7 +108,6 @@ public:
* TODO: We should move away from using the recursive lock by default.
*/
using RecursiveMutex = AnnotatedMixin<std::recursive_mutex>;
-typedef AnnotatedMixin<std::recursive_mutex> CCriticalSection;
/** Wrapped mutex: supports waiting but not recursive locking */
typedef AnnotatedMixin<std::mutex> Mutex;
@@ -172,8 +173,45 @@ public:
{
return Base::owns_lock();
}
+
+protected:
+ // needed for reverse_lock
+ UniqueLock() { }
+
+public:
+ /**
+ * An RAII-style reverse lock. Unlocks on construction and locks on destruction.
+ */
+ class reverse_lock {
+ public:
+ explicit reverse_lock(UniqueLock& _lock, const char* _guardname, const char* _file, int _line) : lock(_lock), file(_file), line(_line) {
+ CheckLastCritical((void*)lock.mutex(), lockname, _guardname, _file, _line);
+ lock.unlock();
+ LeaveCritical();
+ lock.swap(templock);
+ }
+
+ ~reverse_lock() {
+ templock.swap(lock);
+ EnterCritical(lockname.c_str(), file.c_str(), line, (void*)lock.mutex());
+ lock.lock();
+ }
+
+ private:
+ reverse_lock(reverse_lock const&);
+ reverse_lock& operator=(reverse_lock const&);
+
+ UniqueLock& lock;
+ UniqueLock templock;
+ std::string lockname;
+ const std::string file;
+ const int line;
+ };
+ friend class reverse_lock;
};
+#define REVERSE_LOCK(g) decltype(g)::reverse_lock PASTE2(revlock, __COUNTER__)(g, #g, __FILE__, __LINE__)
+
template<typename MutexArg>
using DebugLock = UniqueLock<typename std::remove_reference<typename std::remove_pointer<MutexArg>::type>::type>;
diff --git a/src/test/addrman_tests.cpp b/src/test/addrman_tests.cpp
index c034216bc1..07cebeb35a 100644
--- a/src/test/addrman_tests.cpp
+++ b/src/test/addrman_tests.cpp
@@ -5,6 +5,8 @@
#include <test/util/setup_common.h>
#include <string>
#include <boost/test/unit_test.hpp>
+#include <util/asmap.h>
+#include <test/data/asmap.raw.h>
#include <hash.h>
#include <netbase.h>
@@ -12,13 +14,18 @@
class CAddrManTest : public CAddrMan
{
+private:
+ bool deterministic;
public:
- explicit CAddrManTest(bool makeDeterministic = true)
+ explicit CAddrManTest(bool makeDeterministic = true,
+ std::vector<bool> asmap = std::vector<bool>())
{
if (makeDeterministic) {
// Set addrman addr placement to be deterministic.
MakeDeterministic();
}
+ deterministic = makeDeterministic;
+ m_asmap = asmap;
}
//! Ensure that bucket placement is always the same for testing purposes.
@@ -46,6 +53,21 @@ public:
CAddrMan::Delete(nId);
}
+ // Used to test deserialization
+ std::pair<int, int> GetBucketAndEntry(const CAddress& addr)
+ {
+ LOCK(cs);
+ int nId = mapAddr[addr];
+ for (int bucket = 0; bucket < ADDRMAN_NEW_BUCKET_COUNT; ++bucket) {
+ for (int entry = 0; entry < ADDRMAN_BUCKET_SIZE; ++entry) {
+ if (nId == vvNew[bucket][entry]) {
+ return std::pair<int, int>(bucket, entry);
+ }
+ }
+ }
+ return std::pair<int, int>(-1, -1);
+ }
+
// Simulates connection failure so that we can test eviction of offline nodes
void SimConnFail(CService& addr)
{
@@ -57,32 +79,45 @@ public:
int64_t nLastTry = GetAdjustedTime()-61;
Attempt(addr, count_failure, nLastTry);
}
+
+ void Clear()
+ {
+ CAddrMan::Clear();
+ if (deterministic) {
+ nKey.SetNull();
+ insecure_rand = FastRandomContext(true);
+ }
+ }
+
};
-static CNetAddr ResolveIP(const char* ip)
+static CNetAddr ResolveIP(const std::string& ip)
{
CNetAddr addr;
BOOST_CHECK_MESSAGE(LookupHost(ip, addr, false), strprintf("failed to resolve: %s", ip));
return addr;
}
-static CNetAddr ResolveIP(std::string ip)
-{
- return ResolveIP(ip.c_str());
-}
-
-static CService ResolveService(const char* ip, int port = 0)
+static CService ResolveService(const std::string& ip, const int port = 0)
{
CService serv;
BOOST_CHECK_MESSAGE(Lookup(ip, serv, port, false), strprintf("failed to resolve: %s:%i", ip, port));
return serv;
}
-static CService ResolveService(std::string ip, int port = 0)
-{
- return ResolveService(ip.c_str(), port);
+
+static std::vector<bool> FromBytes(const unsigned char* source, int vector_size) {
+ std::vector<bool> result(vector_size);
+ for (int byte_i = 0; byte_i < vector_size / 8; ++byte_i) {
+ unsigned char cur_byte = source[byte_i];
+ for (int bit_i = 0; bit_i < 8; ++bit_i) {
+ result[byte_i * 8 + bit_i] = (cur_byte >> bit_i) & 1;
+ }
+ }
+ return result;
}
+
BOOST_FIXTURE_TEST_SUITE(addrman_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(addrman_simple)
@@ -409,7 +444,7 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr)
}
-BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket)
+BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket_legacy)
{
CAddrManTest addrman;
@@ -424,30 +459,31 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket)
uint256 nKey1 = (uint256)(CHashWriter(SER_GETHASH, 0) << 1).GetHash();
uint256 nKey2 = (uint256)(CHashWriter(SER_GETHASH, 0) << 2).GetHash();
+ std::vector<bool> asmap; // use /16
- BOOST_CHECK_EQUAL(info1.GetTriedBucket(nKey1), 40);
+ BOOST_CHECK_EQUAL(info1.GetTriedBucket(nKey1, asmap), 40);
// Test: Make sure key actually randomizes bucket placement. A fail on
// this test could be a security issue.
- BOOST_CHECK(info1.GetTriedBucket(nKey1) != info1.GetTriedBucket(nKey2));
+ BOOST_CHECK(info1.GetTriedBucket(nKey1, asmap) != info1.GetTriedBucket(nKey2, asmap));
// Test: Two addresses with same IP but different ports can map to
// different buckets because they have different keys.
CAddrInfo info2 = CAddrInfo(addr2, source1);
BOOST_CHECK(info1.GetKey() != info2.GetKey());
- BOOST_CHECK(info1.GetTriedBucket(nKey1) != info2.GetTriedBucket(nKey1));
+ BOOST_CHECK(info1.GetTriedBucket(nKey1, asmap) != info2.GetTriedBucket(nKey1, asmap));
std::set<int> buckets;
for (int i = 0; i < 255; i++) {
CAddrInfo infoi = CAddrInfo(
CAddress(ResolveService("250.1.1." + std::to_string(i)), NODE_NONE),
ResolveIP("250.1.1." + std::to_string(i)));
- int bucket = infoi.GetTriedBucket(nKey1);
+ int bucket = infoi.GetTriedBucket(nKey1, asmap);
buckets.insert(bucket);
}
- // Test: IP addresses in the same group (\16 prefix for IPv4) should
- // never get more than 8 buckets
+ // Test: IP addresses in the same /16 prefix should
+ // never get more than 8 buckets with legacy grouping
BOOST_CHECK_EQUAL(buckets.size(), 8U);
buckets.clear();
@@ -455,15 +491,15 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket)
CAddrInfo infoj = CAddrInfo(
CAddress(ResolveService("250." + std::to_string(j) + ".1.1"), NODE_NONE),
ResolveIP("250." + std::to_string(j) + ".1.1"));
- int bucket = infoj.GetTriedBucket(nKey1);
+ int bucket = infoj.GetTriedBucket(nKey1, asmap);
buckets.insert(bucket);
}
- // Test: IP addresses in the different groups should map to more than
- // 8 buckets.
+ // Test: IP addresses in the different /16 prefix should map to more than
+ // 8 buckets with legacy grouping
BOOST_CHECK_EQUAL(buckets.size(), 160U);
}
-BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
+BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket_legacy)
{
CAddrManTest addrman;
@@ -477,25 +513,27 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
uint256 nKey1 = (uint256)(CHashWriter(SER_GETHASH, 0) << 1).GetHash();
uint256 nKey2 = (uint256)(CHashWriter(SER_GETHASH, 0) << 2).GetHash();
+ std::vector<bool> asmap; // use /16
+
// Test: Make sure the buckets are what we expect
- BOOST_CHECK_EQUAL(info1.GetNewBucket(nKey1), 786);
- BOOST_CHECK_EQUAL(info1.GetNewBucket(nKey1, source1), 786);
+ BOOST_CHECK_EQUAL(info1.GetNewBucket(nKey1, asmap), 786);
+ BOOST_CHECK_EQUAL(info1.GetNewBucket(nKey1, source1, asmap), 786);
// Test: Make sure key actually randomizes bucket placement. A fail on
// this test could be a security issue.
- BOOST_CHECK(info1.GetNewBucket(nKey1) != info1.GetNewBucket(nKey2));
+ BOOST_CHECK(info1.GetNewBucket(nKey1, asmap) != info1.GetNewBucket(nKey2, asmap));
// Test: Ports should not affect bucket placement in the addr
CAddrInfo info2 = CAddrInfo(addr2, source1);
BOOST_CHECK(info1.GetKey() != info2.GetKey());
- BOOST_CHECK_EQUAL(info1.GetNewBucket(nKey1), info2.GetNewBucket(nKey1));
+ BOOST_CHECK_EQUAL(info1.GetNewBucket(nKey1, asmap), info2.GetNewBucket(nKey1, asmap));
std::set<int> buckets;
for (int i = 0; i < 255; i++) {
CAddrInfo infoi = CAddrInfo(
CAddress(ResolveService("250.1.1." + std::to_string(i)), NODE_NONE),
ResolveIP("250.1.1." + std::to_string(i)));
- int bucket = infoi.GetNewBucket(nKey1);
+ int bucket = infoi.GetNewBucket(nKey1, asmap);
buckets.insert(bucket);
}
// Test: IP addresses in the same group (\16 prefix for IPv4) should
@@ -508,10 +546,10 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
ResolveService(
std::to_string(250 + (j / 255)) + "." + std::to_string(j % 256) + ".1.1"), NODE_NONE),
ResolveIP("251.4.1.1"));
- int bucket = infoj.GetNewBucket(nKey1);
+ int bucket = infoj.GetNewBucket(nKey1, asmap);
buckets.insert(bucket);
}
- // Test: IP addresses in the same source groups should map to no more
+ // Test: IP addresses in the same source groups should map to NO MORE
// than 64 buckets.
BOOST_CHECK(buckets.size() <= 64);
@@ -520,14 +558,226 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
CAddrInfo infoj = CAddrInfo(
CAddress(ResolveService("250.1.1.1"), NODE_NONE),
ResolveIP("250." + std::to_string(p) + ".1.1"));
- int bucket = infoj.GetNewBucket(nKey1);
+ int bucket = infoj.GetNewBucket(nKey1, asmap);
buckets.insert(bucket);
}
- // Test: IP addresses in the different source groups should map to more
+ // Test: IP addresses in the different source groups should map to MORE
// than 64 buckets.
BOOST_CHECK(buckets.size() > 64);
}
+// The following three test cases use asmap.raw
+// We use an artificial minimal mock mapping
+// 250.0.0.0/8 AS1000
+// 101.1.0.0/16 AS1
+// 101.2.0.0/16 AS2
+// 101.3.0.0/16 AS3
+// 101.4.0.0/16 AS4
+// 101.5.0.0/16 AS5
+// 101.6.0.0/16 AS6
+// 101.7.0.0/16 AS7
+// 101.8.0.0/16 AS8
+BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket)
+{
+ CAddrManTest addrman;
+
+ CAddress addr1 = CAddress(ResolveService("250.1.1.1", 8333), NODE_NONE);
+ CAddress addr2 = CAddress(ResolveService("250.1.1.1", 9999), NODE_NONE);
+
+ CNetAddr source1 = ResolveIP("250.1.1.1");
+
+
+ CAddrInfo info1 = CAddrInfo(addr1, source1);
+
+ uint256 nKey1 = (uint256)(CHashWriter(SER_GETHASH, 0) << 1).GetHash();
+ uint256 nKey2 = (uint256)(CHashWriter(SER_GETHASH, 0) << 2).GetHash();
+
+ std::vector<bool> asmap = FromBytes(asmap_raw, sizeof(asmap_raw) * 8);
+
+ BOOST_CHECK_EQUAL(info1.GetTriedBucket(nKey1, asmap), 236);
+
+ // Test: Make sure key actually randomizes bucket placement. A fail on
+ // this test could be a security issue.
+ BOOST_CHECK(info1.GetTriedBucket(nKey1, asmap) != info1.GetTriedBucket(nKey2, asmap));
+
+ // Test: Two addresses with same IP but different ports can map to
+ // different buckets because they have different keys.
+ CAddrInfo info2 = CAddrInfo(addr2, source1);
+
+ BOOST_CHECK(info1.GetKey() != info2.GetKey());
+ BOOST_CHECK(info1.GetTriedBucket(nKey1, asmap) != info2.GetTriedBucket(nKey1, asmap));
+
+ std::set<int> buckets;
+ for (int j = 0; j < 255; j++) {
+ CAddrInfo infoj = CAddrInfo(
+ CAddress(ResolveService("101." + std::to_string(j) + ".1.1"), NODE_NONE),
+ ResolveIP("101." + std::to_string(j) + ".1.1"));
+ int bucket = infoj.GetTriedBucket(nKey1, asmap);
+ buckets.insert(bucket);
+ }
+ // Test: IP addresses in the different /16 prefix MAY map to more than
+ // 8 buckets.
+ BOOST_CHECK(buckets.size() > 8);
+
+ buckets.clear();
+ for (int j = 0; j < 255; j++) {
+ CAddrInfo infoj = CAddrInfo(
+ CAddress(ResolveService("250." + std::to_string(j) + ".1.1"), NODE_NONE),
+ ResolveIP("250." + std::to_string(j) + ".1.1"));
+ int bucket = infoj.GetTriedBucket(nKey1, asmap);
+ buckets.insert(bucket);
+ }
+ // Test: IP addresses in the different /16 prefix MAY NOT map to more than
+ // 8 buckets.
+ BOOST_CHECK(buckets.size() == 8);
+}
+
+BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
+{
+ CAddrManTest addrman;
+
+ CAddress addr1 = CAddress(ResolveService("250.1.2.1", 8333), NODE_NONE);
+ CAddress addr2 = CAddress(ResolveService("250.1.2.1", 9999), NODE_NONE);
+
+ CNetAddr source1 = ResolveIP("250.1.2.1");
+
+ CAddrInfo info1 = CAddrInfo(addr1, source1);
+
+ uint256 nKey1 = (uint256)(CHashWriter(SER_GETHASH, 0) << 1).GetHash();
+ uint256 nKey2 = (uint256)(CHashWriter(SER_GETHASH, 0) << 2).GetHash();
+
+ std::vector<bool> asmap = FromBytes(asmap_raw, sizeof(asmap_raw) * 8);
+
+ // Test: Make sure the buckets are what we expect
+ BOOST_CHECK_EQUAL(info1.GetNewBucket(nKey1, asmap), 795);
+ BOOST_CHECK_EQUAL(info1.GetNewBucket(nKey1, source1, asmap), 795);
+
+ // Test: Make sure key actually randomizes bucket placement. A fail on
+ // this test could be a security issue.
+ BOOST_CHECK(info1.GetNewBucket(nKey1, asmap) != info1.GetNewBucket(nKey2, asmap));
+
+ // Test: Ports should not affect bucket placement in the addr
+ CAddrInfo info2 = CAddrInfo(addr2, source1);
+ BOOST_CHECK(info1.GetKey() != info2.GetKey());
+ BOOST_CHECK_EQUAL(info1.GetNewBucket(nKey1, asmap), info2.GetNewBucket(nKey1, asmap));
+
+ std::set<int> buckets;
+ for (int i = 0; i < 255; i++) {
+ CAddrInfo infoi = CAddrInfo(
+ CAddress(ResolveService("250.1.1." + std::to_string(i)), NODE_NONE),
+ ResolveIP("250.1.1." + std::to_string(i)));
+ int bucket = infoi.GetNewBucket(nKey1, asmap);
+ buckets.insert(bucket);
+ }
+ // Test: IP addresses in the same /16 prefix
+ // usually map to the same bucket.
+ BOOST_CHECK_EQUAL(buckets.size(), 1U);
+
+ buckets.clear();
+ for (int j = 0; j < 4 * 255; j++) {
+ CAddrInfo infoj = CAddrInfo(CAddress(
+ ResolveService(
+ std::to_string(250 + (j / 255)) + "." + std::to_string(j % 256) + ".1.1"), NODE_NONE),
+ ResolveIP("251.4.1.1"));
+ int bucket = infoj.GetNewBucket(nKey1, asmap);
+ buckets.insert(bucket);
+ }
+ // Test: IP addresses in the same source /16 prefix should not map to more
+ // than 64 buckets.
+ BOOST_CHECK(buckets.size() <= 64);
+
+ buckets.clear();
+ for (int p = 0; p < 255; p++) {
+ CAddrInfo infoj = CAddrInfo(
+ CAddress(ResolveService("250.1.1.1"), NODE_NONE),
+ ResolveIP("101." + std::to_string(p) + ".1.1"));
+ int bucket = infoj.GetNewBucket(nKey1, asmap);
+ buckets.insert(bucket);
+ }
+ // Test: IP addresses in the different source /16 prefixes usually map to MORE
+ // than 1 bucket.
+ BOOST_CHECK(buckets.size() > 1);
+
+ buckets.clear();
+ for (int p = 0; p < 255; p++) {
+ CAddrInfo infoj = CAddrInfo(
+ CAddress(ResolveService("250.1.1.1"), NODE_NONE),
+ ResolveIP("250." + std::to_string(p) + ".1.1"));
+ int bucket = infoj.GetNewBucket(nKey1, asmap);
+ buckets.insert(bucket);
+ }
+ // Test: IP addresses in the different source /16 prefixes sometimes map to NO MORE
+ // than 1 bucket.
+ BOOST_CHECK(buckets.size() == 1);
+
+}
+
+BOOST_AUTO_TEST_CASE(addrman_serialization)
+{
+ std::vector<bool> asmap1 = FromBytes(asmap_raw, sizeof(asmap_raw) * 8);
+
+ CAddrManTest addrman_asmap1(true, asmap1);
+ CAddrManTest addrman_asmap1_dup(true, asmap1);
+ CAddrManTest addrman_noasmap;
+ CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
+
+ CAddress addr = CAddress(ResolveService("250.1.1.1"), NODE_NONE);
+ CNetAddr default_source;
+
+
+ addrman_asmap1.Add(addr, default_source);
+
+ stream << addrman_asmap1;
+ // serizalizing/deserializing addrman with the same asmap
+ stream >> addrman_asmap1_dup;
+
+ std::pair<int, int> bucketAndEntry_asmap1 = addrman_asmap1.GetBucketAndEntry(addr);
+ std::pair<int, int> bucketAndEntry_asmap1_dup = addrman_asmap1_dup.GetBucketAndEntry(addr);
+ BOOST_CHECK(bucketAndEntry_asmap1.second != -1);
+ BOOST_CHECK(bucketAndEntry_asmap1_dup.second != -1);
+
+ BOOST_CHECK(bucketAndEntry_asmap1.first == bucketAndEntry_asmap1_dup.first);
+ BOOST_CHECK(bucketAndEntry_asmap1.second == bucketAndEntry_asmap1_dup.second);
+
+ // deserializing asmaped peers.dat to non-asmaped addrman
+ stream << addrman_asmap1;
+ stream >> addrman_noasmap;
+ std::pair<int, int> bucketAndEntry_noasmap = addrman_noasmap.GetBucketAndEntry(addr);
+ BOOST_CHECK(bucketAndEntry_noasmap.second != -1);
+ BOOST_CHECK(bucketAndEntry_asmap1.first != bucketAndEntry_noasmap.first);
+ BOOST_CHECK(bucketAndEntry_asmap1.second != bucketAndEntry_noasmap.second);
+
+ // deserializing non-asmaped peers.dat to asmaped addrman
+ addrman_asmap1.Clear();
+ addrman_noasmap.Clear();
+ addrman_noasmap.Add(addr, default_source);
+ stream << addrman_noasmap;
+ stream >> addrman_asmap1;
+ std::pair<int, int> bucketAndEntry_asmap1_deser = addrman_asmap1.GetBucketAndEntry(addr);
+ BOOST_CHECK(bucketAndEntry_asmap1_deser.second != -1);
+ BOOST_CHECK(bucketAndEntry_asmap1_deser.first != bucketAndEntry_noasmap.first);
+ BOOST_CHECK(bucketAndEntry_asmap1_deser.first == bucketAndEntry_asmap1_dup.first);
+ BOOST_CHECK(bucketAndEntry_asmap1_deser.second == bucketAndEntry_asmap1_dup.second);
+
+ // used to map to different buckets, now maps to the same bucket.
+ addrman_asmap1.Clear();
+ addrman_noasmap.Clear();
+ CAddress addr1 = CAddress(ResolveService("250.1.1.1"), NODE_NONE);
+ CAddress addr2 = CAddress(ResolveService("250.2.1.1"), NODE_NONE);
+ addrman_noasmap.Add(addr, default_source);
+ addrman_noasmap.Add(addr2, default_source);
+ std::pair<int, int> bucketAndEntry_noasmap_addr1 = addrman_noasmap.GetBucketAndEntry(addr1);
+ std::pair<int, int> bucketAndEntry_noasmap_addr2 = addrman_noasmap.GetBucketAndEntry(addr2);
+ BOOST_CHECK(bucketAndEntry_noasmap_addr1.first != bucketAndEntry_noasmap_addr2.first);
+ BOOST_CHECK(bucketAndEntry_noasmap_addr1.second != bucketAndEntry_noasmap_addr2.second);
+ stream << addrman_noasmap;
+ stream >> addrman_asmap1;
+ std::pair<int, int> bucketAndEntry_asmap1_deser_addr1 = addrman_asmap1.GetBucketAndEntry(addr1);
+ std::pair<int, int> bucketAndEntry_asmap1_deser_addr2 = addrman_asmap1.GetBucketAndEntry(addr2);
+ BOOST_CHECK(bucketAndEntry_asmap1_deser_addr1.first == bucketAndEntry_asmap1_deser_addr2.first);
+ BOOST_CHECK(bucketAndEntry_asmap1_deser_addr1.second != bucketAndEntry_asmap1_deser_addr2.second);
+}
+
BOOST_AUTO_TEST_CASE(addrman_selecttriedcollision)
{
diff --git a/src/test/blockchain_tests.cpp b/src/test/blockchain_tests.cpp
index 29935c09f8..3b4c480f72 100644
--- a/src/test/blockchain_tests.cpp
+++ b/src/test/blockchain_tests.cpp
@@ -1,3 +1,7 @@
+// Copyright (c) 2017-2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
#include <boost/test/unit_test.hpp>
#include <stdlib.h>
diff --git a/src/test/blockfilter_index_tests.cpp b/src/test/blockfilter_index_tests.cpp
index 79e18cd2c0..5e52dc268f 100644
--- a/src/test/blockfilter_index_tests.cpp
+++ b/src/test/blockfilter_index_tests.cpp
@@ -138,7 +138,7 @@ BOOST_FIXTURE_TEST_CASE(blockfilter_index_initial_sync, BuildChainTestingSetup)
int64_t time_start = GetTimeMillis();
while (!filter_index.BlockUntilSyncedToCurrentChain()) {
BOOST_REQUIRE(time_start + timeout_ms > GetTimeMillis());
- MilliSleep(100);
+ UninterruptibleSleep(std::chrono::milliseconds{100});
}
// Check that filter index has all blocks that were in the chain before it started.
diff --git a/src/test/bswap_tests.cpp b/src/test/bswap_tests.cpp
index d5e2344a8b..0b4bfdb019 100644
--- a/src/test/bswap_tests.cpp
+++ b/src/test/bswap_tests.cpp
@@ -11,16 +11,16 @@ BOOST_FIXTURE_TEST_SUITE(bswap_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(bswap_tests)
{
- // Sibling in bitcoin/src/qt/test/compattests.cpp
- uint16_t u1 = 0x1234;
- uint32_t u2 = 0x56789abc;
- uint64_t u3 = 0xdef0123456789abc;
- uint16_t e1 = 0x3412;
- uint32_t e2 = 0xbc9a7856;
- uint64_t e3 = 0xbc9a78563412f0de;
- BOOST_CHECK(bswap_16(u1) == e1);
- BOOST_CHECK(bswap_32(u2) == e2);
- BOOST_CHECK(bswap_64(u3) == e3);
+ // Sibling in bitcoin/src/qt/test/compattests.cpp
+ uint16_t u1 = 0x1234;
+ uint32_t u2 = 0x56789abc;
+ uint64_t u3 = 0xdef0123456789abc;
+ uint16_t e1 = 0x3412;
+ uint32_t e2 = 0xbc9a7856;
+ uint64_t e3 = 0xbc9a78563412f0de;
+ BOOST_CHECK(bswap_16(u1) == e1);
+ BOOST_CHECK(bswap_32(u2) == e2);
+ BOOST_CHECK(bswap_64(u3) == e3);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/checkqueue_tests.cpp b/src/test/checkqueue_tests.cpp
index 482fe3772c..a9628e85f9 100644
--- a/src/test/checkqueue_tests.cpp
+++ b/src/test/checkqueue_tests.cpp
@@ -393,7 +393,7 @@ BOOST_AUTO_TEST_CASE(test_CheckQueueControl_Locks)
CCheckQueueControl<FakeCheck> control(queue.get());
// While sleeping, no other thread should execute to this point
auto observed = ++nThreads;
- MilliSleep(10);
+ UninterruptibleSleep(std::chrono::milliseconds{10});
fails += observed != nThreads;
});
}
diff --git a/src/test/cuckoocache_tests.cpp b/src/test/cuckoocache_tests.cpp
index 119d4b3295..6be24c0845 100644
--- a/src/test/cuckoocache_tests.cpp
+++ b/src/test/cuckoocache_tests.cpp
@@ -7,6 +7,7 @@
#include <test/util/setup_common.h>
#include <random.h>
#include <thread>
+#include <deque>
/** Test Suite for CuckooCache
*
diff --git a/src/test/data/asmap.raw b/src/test/data/asmap.raw
new file mode 100644
index 0000000000..3dcf1f3940
--- /dev/null
+++ b/src/test/data/asmap.raw
Binary files differ
diff --git a/src/test/denialofservice_tests.cpp b/src/test/denialofservice_tests.cpp
index 632151793e..e5d51ab83b 100644
--- a/src/test/denialofservice_tests.cpp
+++ b/src/test/denialofservice_tests.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2011-2019 The Bitcoin Core developers
+// Copyright (c) 2011-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -51,7 +51,7 @@ struct COrphanTx {
NodeId fromPeer;
int64_t nTimeExpire;
};
-extern CCriticalSection g_cs_orphans;
+extern RecursiveMutex g_cs_orphans;
extern std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(g_cs_orphans);
static CService ip(uint32_t i)
@@ -78,7 +78,7 @@ BOOST_FIXTURE_TEST_SUITE(denialofservice_tests, TestingSetup)
BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction)
{
auto connman = MakeUnique<CConnman>(0x1337, 0x1337);
- auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), nullptr, scheduler);
+ auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), nullptr, *m_node.scheduler);
// Mock an outbound peer
CAddress addr1(ip(0xa0b0c001), NODE_NONE);
@@ -148,7 +148,7 @@ static void AddRandomOutboundPeer(std::vector<CNode *> &vNodes, PeerLogicValidat
BOOST_AUTO_TEST_CASE(stale_tip_peer_management)
{
auto connman = MakeUnique<CConnmanTest>(0x1337, 0x1337);
- auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), nullptr, scheduler);
+ auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), nullptr, *m_node.scheduler);
const Consensus::Params& consensusParams = Params().GetConsensus();
constexpr int max_outbound_full_relay = 8;
@@ -221,7 +221,7 @@ BOOST_AUTO_TEST_CASE(DoS_banning)
{
auto banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", nullptr, DEFAULT_MISBEHAVING_BANTIME);
auto connman = MakeUnique<CConnman>(0x1337, 0x1337);
- auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), banman.get(), scheduler);
+ auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), banman.get(), *m_node.scheduler);
banman->ClearBanned();
CAddress addr1(ip(0xa0b0c001), NODE_NONE);
@@ -276,7 +276,7 @@ BOOST_AUTO_TEST_CASE(DoS_banscore)
{
auto banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", nullptr, DEFAULT_MISBEHAVING_BANTIME);
auto connman = MakeUnique<CConnman>(0x1337, 0x1337);
- auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), banman.get(), scheduler);
+ auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), banman.get(), *m_node.scheduler);
banman->ClearBanned();
gArgs.ForceSetArg("-banscore", "111"); // because 11 is my favorite number
@@ -323,7 +323,7 @@ BOOST_AUTO_TEST_CASE(DoS_bantime)
{
auto banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", nullptr, DEFAULT_MISBEHAVING_BANTIME);
auto connman = MakeUnique<CConnman>(0x1337, 0x1337);
- auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), banman.get(), scheduler);
+ auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), banman.get(), *m_node.scheduler);
banman->ClearBanned();
int64_t nStartTime = GetTime();
diff --git a/src/test/descriptor_tests.cpp b/src/test/descriptor_tests.cpp
index bcce8854e3..e99aee724e 100644
--- a/src/test/descriptor_tests.cpp
+++ b/src/test/descriptor_tests.cpp
@@ -62,7 +62,7 @@ std::string UseHInsteadOfApostrophe(const std::string& desc)
const std::set<std::vector<uint32_t>> ONLY_EMPTY{{}};
-void DoCheck(const std::string& prv, const std::string& pub, int flags, const std::vector<std::vector<std::string>>& scripts, const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY,
+void DoCheck(const std::string& prv, const std::string& pub, int flags, const std::vector<std::vector<std::string>>& scripts, const Optional<OutputType>& type, const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY,
bool replace_apostrophe_with_h_in_prv=false, bool replace_apostrophe_with_h_in_pub=false)
{
FlatSigningProvider keys_priv, keys_pub;
@@ -86,6 +86,10 @@ void DoCheck(const std::string& prv, const std::string& pub, int flags, const st
BOOST_CHECK(parse_priv);
BOOST_CHECK(parse_pub);
+ // Check that the correct OutputType is inferred
+ BOOST_CHECK(parse_priv->GetOutputType() == type);
+ BOOST_CHECK(parse_pub->GetOutputType() == type);
+
// Check private keys are extracted from the private version but not the public one.
BOOST_CHECK(keys_priv.keys.size());
BOOST_CHECK(!keys_pub.keys.size());
@@ -181,29 +185,29 @@ void DoCheck(const std::string& prv, const std::string& pub, int flags, const st
BOOST_CHECK_MESSAGE(left_paths.empty(), "Not all expected key paths found: " + prv);
}
-void Check(const std::string& prv, const std::string& pub, int flags, const std::vector<std::vector<std::string>>& scripts, const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY)
+void Check(const std::string& prv, const std::string& pub, int flags, const std::vector<std::vector<std::string>>& scripts, const Optional<OutputType>& type, const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY)
{
bool found_apostrophes_in_prv = false;
bool found_apostrophes_in_pub = false;
// Do not replace apostrophes with 'h' in prv and pub
- DoCheck(prv, pub, flags, scripts, paths);
+ DoCheck(prv, pub, flags, scripts, type, paths);
// Replace apostrophes with 'h' in prv but not in pub, if apostrophes are found in prv
if (prv.find('\'') != std::string::npos) {
found_apostrophes_in_prv = true;
- DoCheck(prv, pub, flags, scripts, paths, /* replace_apostrophe_with_h_in_prv = */true, /*replace_apostrophe_with_h_in_pub = */false);
+ DoCheck(prv, pub, flags, scripts, type, paths, /* replace_apostrophe_with_h_in_prv = */true, /*replace_apostrophe_with_h_in_pub = */false);
}
// Replace apostrophes with 'h' in pub but not in prv, if apostrophes are found in pub
if (pub.find('\'') != std::string::npos) {
found_apostrophes_in_pub = true;
- DoCheck(prv, pub, flags, scripts, paths, /* replace_apostrophe_with_h_in_prv = */false, /*replace_apostrophe_with_h_in_pub = */true);
+ DoCheck(prv, pub, flags, scripts, type, paths, /* replace_apostrophe_with_h_in_prv = */false, /*replace_apostrophe_with_h_in_pub = */true);
}
// Replace apostrophes with 'h' both in prv and in pub, if apostrophes are found in both
if (found_apostrophes_in_prv && found_apostrophes_in_pub) {
- DoCheck(prv, pub, flags, scripts, paths, /* replace_apostrophe_with_h_in_prv = */true, /*replace_apostrophe_with_h_in_pub = */true);
+ DoCheck(prv, pub, flags, scripts, type, paths, /* replace_apostrophe_with_h_in_prv = */true, /*replace_apostrophe_with_h_in_pub = */true);
}
}
@@ -214,50 +218,50 @@ BOOST_FIXTURE_TEST_SUITE(descriptor_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(descriptor_test)
{
// Basic single-key compressed
- Check("combo(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "combo(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"2103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bdac","76a9149a1c78a507689f6f54b847ad1cef1e614ee23f1e88ac","00149a1c78a507689f6f54b847ad1cef1e614ee23f1e","a91484ab21b1b2fd065d4504ff693d832434b6108d7b87"}});
- Check("pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"2103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bdac"}});
- Check("pkh([deadbeef/1/2'/3/4']L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pkh([deadbeef/1/2'/3/4']03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"76a9149a1c78a507689f6f54b847ad1cef1e614ee23f1e88ac"}}, {{1,0x80000002UL,3,0x80000004UL}});
- Check("wpkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"00149a1c78a507689f6f54b847ad1cef1e614ee23f1e"}});
- Check("sh(wpkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a91484ab21b1b2fd065d4504ff693d832434b6108d7b87"}});
+ Check("combo(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "combo(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"2103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bdac","76a9149a1c78a507689f6f54b847ad1cef1e614ee23f1e88ac","00149a1c78a507689f6f54b847ad1cef1e614ee23f1e","a91484ab21b1b2fd065d4504ff693d832434b6108d7b87"}}, nullopt);
+ Check("pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"2103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bdac"}}, nullopt);
+ Check("pkh([deadbeef/1/2'/3/4']L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pkh([deadbeef/1/2'/3/4']03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"76a9149a1c78a507689f6f54b847ad1cef1e614ee23f1e88ac"}}, OutputType::LEGACY, {{1,0x80000002UL,3,0x80000004UL}});
+ Check("wpkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"00149a1c78a507689f6f54b847ad1cef1e614ee23f1e"}}, OutputType::BECH32);
+ Check("sh(wpkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a91484ab21b1b2fd065d4504ff693d832434b6108d7b87"}}, OutputType::P2SH_SEGWIT);
CheckUnparsable("sh(wpkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY2))", "sh(wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5))", "Pubkey '03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5' is invalid"); // Invalid pubkey
CheckUnparsable("pkh(deadbeef/1/2'/3/4']L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pkh(deadbeef/1/2'/3/4']03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", "Key origin start '[ character expected but not found, got 'd' instead"); // Missing start bracket in key origin
CheckUnparsable("pkh([deadbeef]/1/2'/3/4']L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pkh([deadbeef]/1/2'/3/4']03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", "Multiple ']' characters found for a single pubkey"); // Multiple end brackets in key origin
// Basic single-key uncompressed
- Check("combo(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "combo(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235ac","76a914b5bd079c4d57cc7fc28ecf8213a6b791625b818388ac"}});
- Check("pk(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "pk(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235ac"}});
- Check("pkh(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "pkh(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"76a914b5bd079c4d57cc7fc28ecf8213a6b791625b818388ac"}});
+ Check("combo(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "combo(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235ac","76a914b5bd079c4d57cc7fc28ecf8213a6b791625b818388ac"}}, nullopt);
+ Check("pk(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "pk(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235ac"}}, nullopt);
+ Check("pkh(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "pkh(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"76a914b5bd079c4d57cc7fc28ecf8213a6b791625b818388ac"}}, OutputType::LEGACY);
CheckUnparsable("wpkh(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "wpkh(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", "Uncompressed keys are not allowed"); // No uncompressed keys in witness
CheckUnparsable("wsh(pk(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss))", "wsh(pk(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235))", "Uncompressed keys are not allowed"); // No uncompressed keys in witness
CheckUnparsable("sh(wpkh(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss))", "sh(wpkh(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235))", "Uncompressed keys are not allowed"); // No uncompressed keys in witness
// Some unconventional single-key constructions
- Check("sh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a9141857af51a5e516552b3086430fd8ce55f7c1a52487"}});
- Check("sh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a9141a31ad23bf49c247dd531a623c2ef57da3c400c587"}});
- Check("wsh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "wsh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"00202e271faa2325c199d25d22e1ead982e45b64eeb4f31e73dbdf41bd4b5fec23fa"}});
- Check("wsh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "wsh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"0020338e023079b91c58571b20e602d7805fb808c22473cbc391a41b1bd3a192e75b"}});
- Check("sh(wsh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)))", "sh(wsh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))", SIGNABLE, {{"a91472d0c5a3bfad8c3e7bd5303a72b94240e80b6f1787"}});
- Check("sh(wsh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)))", "sh(wsh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))", SIGNABLE, {{"a914b61b92e2ca21bac1e72a3ab859a742982bea960a87"}});
+ Check("sh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a9141857af51a5e516552b3086430fd8ce55f7c1a52487"}}, OutputType::LEGACY);
+ Check("sh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a9141a31ad23bf49c247dd531a623c2ef57da3c400c587"}}, OutputType::LEGACY);
+ Check("wsh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "wsh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"00202e271faa2325c199d25d22e1ead982e45b64eeb4f31e73dbdf41bd4b5fec23fa"}}, OutputType::BECH32);
+ Check("wsh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "wsh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"0020338e023079b91c58571b20e602d7805fb808c22473cbc391a41b1bd3a192e75b"}}, OutputType::BECH32);
+ Check("sh(wsh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)))", "sh(wsh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))", SIGNABLE, {{"a91472d0c5a3bfad8c3e7bd5303a72b94240e80b6f1787"}}, OutputType::P2SH_SEGWIT);
+ Check("sh(wsh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)))", "sh(wsh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))", SIGNABLE, {{"a914b61b92e2ca21bac1e72a3ab859a742982bea960a87"}}, OutputType::P2SH_SEGWIT);
// Versions with BIP32 derivations
- Check("combo([01234567]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)", "combo([01234567]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)", SIGNABLE, {{"2102d2b36900396c9282fa14628566582f206a5dd0bcc8d5e892611806cafb0301f0ac","76a91431a507b815593dfc51ffc7245ae7e5aee304246e88ac","001431a507b815593dfc51ffc7245ae7e5aee304246e","a9142aafb926eb247cb18240a7f4c07983ad1f37922687"}});
- Check("pk(xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0)", "pk(xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0)", DEFAULT, {{"210379e45b3cf75f9c5f9befd8e9506fb962f6a9d185ac87001ec44a8d3df8d4a9e3ac"}}, {{0}});
- Check("pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0)", "pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0)", HARDENED, {{"76a914ebdc90806a9c4356c1c88e42216611e1cb4c1c1788ac"}}, {{0xFFFFFFFFUL,0}});
- Check("wpkh([ffffffff/13']xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*)", "wpkh([ffffffff/13']xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*)", RANGE, {{"0014326b2249e3a25d5dc60935f044ee835d090ba859"},{"0014af0bd98abc2f2cae66e36896a39ffe2d32984fb7"},{"00141fa798efd1cbf95cebf912c031b8a4a6e9fb9f27"}}, {{0x8000000DUL, 1, 2, 0}, {0x8000000DUL, 1, 2, 1}, {0x8000000DUL, 1, 2, 2}});
- Check("sh(wpkh(xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "sh(wpkh(xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", RANGE | HARDENED, {{"a9149a4d9901d6af519b2a23d4a2f51650fcba87ce7b87"},{"a914bed59fc0024fae941d6e20a3b44a109ae740129287"},{"a9148483aa1116eb9c05c482a72bada4b1db24af654387"}}, {{10, 20, 30, 40, 0x80000000UL}, {10, 20, 30, 40, 0x80000001UL}, {10, 20, 30, 40, 0x80000002UL}});
- Check("combo(xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334/*)", "combo(xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV/*)", RANGE, {{"2102df12b7035bdac8e3bab862a3a83d06ea6b17b6753d52edecba9be46f5d09e076ac","76a914f90e3178ca25f2c808dc76624032d352fdbdfaf288ac","0014f90e3178ca25f2c808dc76624032d352fdbdfaf2","a91408f3ea8c68d4a7585bf9e8bda226723f70e445f087"},{"21032869a233c9adff9a994e4966e5b821fd5bac066da6c3112488dc52383b4a98ecac","76a914a8409d1b6dfb1ed2a3e8aa5e0ef2ff26b15b75b788ac","0014a8409d1b6dfb1ed2a3e8aa5e0ef2ff26b15b75b7","a91473e39884cb71ae4e5ac9739e9225026c99763e6687"}}, {{0}, {1}});
+ Check("combo([01234567]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)", "combo([01234567]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)", SIGNABLE, {{"2102d2b36900396c9282fa14628566582f206a5dd0bcc8d5e892611806cafb0301f0ac","76a91431a507b815593dfc51ffc7245ae7e5aee304246e88ac","001431a507b815593dfc51ffc7245ae7e5aee304246e","a9142aafb926eb247cb18240a7f4c07983ad1f37922687"}}, nullopt);
+ Check("pk(xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0)", "pk(xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0)", DEFAULT, {{"210379e45b3cf75f9c5f9befd8e9506fb962f6a9d185ac87001ec44a8d3df8d4a9e3ac"}}, nullopt, {{0}});
+ Check("pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0)", "pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0)", HARDENED, {{"76a914ebdc90806a9c4356c1c88e42216611e1cb4c1c1788ac"}}, OutputType::LEGACY, {{0xFFFFFFFFUL,0}});
+ Check("wpkh([ffffffff/13']xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*)", "wpkh([ffffffff/13']xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*)", RANGE, {{"0014326b2249e3a25d5dc60935f044ee835d090ba859"},{"0014af0bd98abc2f2cae66e36896a39ffe2d32984fb7"},{"00141fa798efd1cbf95cebf912c031b8a4a6e9fb9f27"}}, OutputType::BECH32, {{0x8000000DUL, 1, 2, 0}, {0x8000000DUL, 1, 2, 1}, {0x8000000DUL, 1, 2, 2}});
+ Check("sh(wpkh(xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "sh(wpkh(xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", RANGE | HARDENED, {{"a9149a4d9901d6af519b2a23d4a2f51650fcba87ce7b87"},{"a914bed59fc0024fae941d6e20a3b44a109ae740129287"},{"a9148483aa1116eb9c05c482a72bada4b1db24af654387"}}, OutputType::P2SH_SEGWIT, {{10, 20, 30, 40, 0x80000000UL}, {10, 20, 30, 40, 0x80000001UL}, {10, 20, 30, 40, 0x80000002UL}});
+ Check("combo(xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334/*)", "combo(xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV/*)", RANGE, {{"2102df12b7035bdac8e3bab862a3a83d06ea6b17b6753d52edecba9be46f5d09e076ac","76a914f90e3178ca25f2c808dc76624032d352fdbdfaf288ac","0014f90e3178ca25f2c808dc76624032d352fdbdfaf2","a91408f3ea8c68d4a7585bf9e8bda226723f70e445f087"},{"21032869a233c9adff9a994e4966e5b821fd5bac066da6c3112488dc52383b4a98ecac","76a914a8409d1b6dfb1ed2a3e8aa5e0ef2ff26b15b75b788ac","0014a8409d1b6dfb1ed2a3e8aa5e0ef2ff26b15b75b7","a91473e39884cb71ae4e5ac9739e9225026c99763e6687"}}, nullopt, {{0}, {1}});
CheckUnparsable("combo([012345678]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)", "combo([012345678]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)", "Fingerprint is not 4 bytes (9 characters instead of 8 characters)"); // Too long key fingerprint
CheckUnparsable("pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483648)", "pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483648)", "Key path value 2147483648 is out of range"); // BIP 32 path element overflow
CheckUnparsable("pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/1aa)", "pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/1aa)", "Key path value '1aa' is not a valid uint32"); // Path is not valid uint
// Multisig constructions
- Check("multi(1,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "multi(1,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae"}});
- Check("sortedmulti(1,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "sortedmulti(1,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae"}});
- Check("sortedmulti(1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "sortedmulti(1,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae"}});
- Check("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))", DEFAULT, {{"a91445a9a622a8b0a1269944be477640eedc447bbd8487"}}, {{0x8000006FUL,222},{0}});
- Check("sortedmulti(2,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/*,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0/0/*)", "sortedmulti(2,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/*,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0/0/*)", RANGE, {{"5221025d5fc65ebb8d44a5274b53bac21ff8307fec2334a32df05553459f8b1f7fe1b62102fbd47cc8034098f0e6a94c6aeee8528abf0a2153a5d8e46d325b7284c046784652ae"}, {"52210264fd4d1f5dea8ded94c61e9641309349b62f27fbffe807291f664e286bfbe6472103f4ece6dfccfa37b211eb3d0af4d0c61dba9ef698622dc17eecdf764beeb005a652ae"}, {"5221022ccabda84c30bad578b13c89eb3b9544ce149787e5b538175b1d1ba259cbb83321024d902e1a2fc7a8755ab5b694c575fce742c48d9ff192e63df5193e4c7afe1f9c52ae"}}, {{0}, {1}, {2}, {0, 0, 0}, {0, 0, 1}, {0, 0, 2}});
- Check("wsh(multi(2,xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0,xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*,xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "wsh(multi(2,xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0,xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*,xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", HARDENED | RANGE, {{"0020b92623201f3bb7c3771d45b2ad1d0351ea8fbf8cfe0a0e570264e1075fa1948f"},{"002036a08bbe4923af41cf4316817c93b8d37e2f635dd25cfff06bd50df6ae7ea203"},{"0020a96e7ab4607ca6b261bfe3245ffda9c746b28d3f59e83d34820ec0e2b36c139c"}}, {{0xFFFFFFFFUL,0}, {1,2,0}, {1,2,1}, {1,2,2}, {10, 20, 30, 40, 0x80000000UL}, {10, 20, 30, 40, 0x80000001UL}, {10, 20, 30, 40, 0x80000002UL}});
- Check("sh(wsh(multi(16,KzoAz5CanayRKex3fSLQ2BwJpN7U52gZvxMyk78nDMHuqrUxuSJy,KwGNz6YCCQtYvFzMtrC6D3tKTKdBBboMrLTsjr2NYVBwapCkn7Mr,KxogYhiNfwxuswvXV66eFyKcCpm7dZ7TqHVqujHAVUjJxyivxQ9X,L2BUNduTSyZwZjwNHynQTF14mv2uz2NRq5n5sYWTb4FkkmqgEE9f,L1okJGHGn1kFjdXHKxXjwVVtmCMR2JA5QsbKCSpSb7ReQjezKeoD,KxDCNSST75HFPaW5QKpzHtAyaCQC7p9Vo3FYfi2u4dXD1vgMiboK,L5edQjFtnkcf5UWURn6UuuoFrabgDQUHdheKCziwN42aLwS3KizU,KzF8UWFcEC7BYTq8Go1xVimMkDmyNYVmXV5PV7RuDicvAocoPB8i,L3nHUboKG2w4VSJ5jYZ5CBM97oeK6YuKvfZxrefdShECcjEYKMWZ,KyjHo36dWkYhimKmVVmQTq3gERv3pnqA4xFCpvUgbGDJad7eS8WE,KwsfyHKRUTZPQtysN7M3tZ4GXTnuov5XRgjdF2XCG8faAPmFruRF,KzCUbGhN9LJhdeFfL9zQgTJMjqxdBKEekRGZX24hXdgCNCijkkap,KzgpMBwwsDLwkaC5UrmBgCYaBD2WgZ7PBoGYXR8KT7gCA9UTN5a3,KyBXTPy4T7YG4q9tcAM3LkvfRpD1ybHMvcJ2ehaWXaSqeGUxEdkP,KzJDe9iwJRPtKP2F2AoN6zBgzS7uiuAwhWCfGdNeYJ3PC1HNJ8M8,L1xbHrxynrqLKkoYc4qtoQPx6uy5qYXR5ZDYVYBSRmCV5piU3JG9)))","sh(wsh(multi(16,03669b8afcec803a0d323e9a17f3ea8e68e8abe5a278020a929adbec52421adbd0,0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600,0362a74e399c39ed5593852a30147f2959b56bb827dfa3e60e464b02ccf87dc5e8,0261345b53de74a4d721ef877c255429961b7e43714171ac06168d7e08c542a8b8,02da72e8b46901a65d4374fe6315538d8f368557dda3a1dcf9ea903f3afe7314c8,0318c82dd0b53fd3a932d16e0ba9e278fcc937c582d5781be626ff16e201f72286,0297ccef1ef99f9d73dec9ad37476ddb232f1238aff877af19e72ba04493361009,02e502cfd5c3f972fe9a3e2a18827820638f96b6f347e54d63deb839011fd5765d,03e687710f0e3ebe81c1037074da939d409c0025f17eb86adb9427d28f0f7ae0e9,02c04d3a5274952acdbc76987f3184b346a483d43be40874624b29e3692c1df5af,02ed06e0f418b5b43a7ec01d1d7d27290fa15f75771cb69b642a51471c29c84acd,036d46073cbb9ffee90473f3da429abc8de7f8751199da44485682a989a4bebb24,02f5d1ff7c9029a80a4e36b9a5497027ef7f3e73384a4a94fbfe7c4e9164eec8bc,02e41deffd1b7cce11cde209a781adcffdabd1b91c0ba0375857a2bfd9302419f3,02d76625f7956a7fc505ab02556c23ee72d832f1bac391bcd2d3abce5710a13d06,0399eb0a5487515802dc14544cf10b3666623762fbed2ec38a3975716e2c29c232)))", SIGNABLE, {{"a9147fc63e13dc25e8a95a3cee3d9a714ac3afd96f1e87"}});
+ Check("multi(1,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "multi(1,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae"}}, nullopt);
+ Check("sortedmulti(1,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "sortedmulti(1,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae"}}, nullopt);
+ Check("sortedmulti(1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "sortedmulti(1,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae"}}, nullopt);
+ Check("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))", DEFAULT, {{"a91445a9a622a8b0a1269944be477640eedc447bbd8487"}}, OutputType::LEGACY, {{0x8000006FUL,222},{0}});
+ Check("sortedmulti(2,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/*,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0/0/*)", "sortedmulti(2,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/*,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0/0/*)", RANGE, {{"5221025d5fc65ebb8d44a5274b53bac21ff8307fec2334a32df05553459f8b1f7fe1b62102fbd47cc8034098f0e6a94c6aeee8528abf0a2153a5d8e46d325b7284c046784652ae"}, {"52210264fd4d1f5dea8ded94c61e9641309349b62f27fbffe807291f664e286bfbe6472103f4ece6dfccfa37b211eb3d0af4d0c61dba9ef698622dc17eecdf764beeb005a652ae"}, {"5221022ccabda84c30bad578b13c89eb3b9544ce149787e5b538175b1d1ba259cbb83321024d902e1a2fc7a8755ab5b694c575fce742c48d9ff192e63df5193e4c7afe1f9c52ae"}}, nullopt, {{0}, {1}, {2}, {0, 0, 0}, {0, 0, 1}, {0, 0, 2}});
+ Check("wsh(multi(2,xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0,xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*,xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "wsh(multi(2,xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0,xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*,xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", HARDENED | RANGE, {{"0020b92623201f3bb7c3771d45b2ad1d0351ea8fbf8cfe0a0e570264e1075fa1948f"},{"002036a08bbe4923af41cf4316817c93b8d37e2f635dd25cfff06bd50df6ae7ea203"},{"0020a96e7ab4607ca6b261bfe3245ffda9c746b28d3f59e83d34820ec0e2b36c139c"}}, OutputType::BECH32, {{0xFFFFFFFFUL,0}, {1,2,0}, {1,2,1}, {1,2,2}, {10, 20, 30, 40, 0x80000000UL}, {10, 20, 30, 40, 0x80000001UL}, {10, 20, 30, 40, 0x80000002UL}});
+ Check("sh(wsh(multi(16,KzoAz5CanayRKex3fSLQ2BwJpN7U52gZvxMyk78nDMHuqrUxuSJy,KwGNz6YCCQtYvFzMtrC6D3tKTKdBBboMrLTsjr2NYVBwapCkn7Mr,KxogYhiNfwxuswvXV66eFyKcCpm7dZ7TqHVqujHAVUjJxyivxQ9X,L2BUNduTSyZwZjwNHynQTF14mv2uz2NRq5n5sYWTb4FkkmqgEE9f,L1okJGHGn1kFjdXHKxXjwVVtmCMR2JA5QsbKCSpSb7ReQjezKeoD,KxDCNSST75HFPaW5QKpzHtAyaCQC7p9Vo3FYfi2u4dXD1vgMiboK,L5edQjFtnkcf5UWURn6UuuoFrabgDQUHdheKCziwN42aLwS3KizU,KzF8UWFcEC7BYTq8Go1xVimMkDmyNYVmXV5PV7RuDicvAocoPB8i,L3nHUboKG2w4VSJ5jYZ5CBM97oeK6YuKvfZxrefdShECcjEYKMWZ,KyjHo36dWkYhimKmVVmQTq3gERv3pnqA4xFCpvUgbGDJad7eS8WE,KwsfyHKRUTZPQtysN7M3tZ4GXTnuov5XRgjdF2XCG8faAPmFruRF,KzCUbGhN9LJhdeFfL9zQgTJMjqxdBKEekRGZX24hXdgCNCijkkap,KzgpMBwwsDLwkaC5UrmBgCYaBD2WgZ7PBoGYXR8KT7gCA9UTN5a3,KyBXTPy4T7YG4q9tcAM3LkvfRpD1ybHMvcJ2ehaWXaSqeGUxEdkP,KzJDe9iwJRPtKP2F2AoN6zBgzS7uiuAwhWCfGdNeYJ3PC1HNJ8M8,L1xbHrxynrqLKkoYc4qtoQPx6uy5qYXR5ZDYVYBSRmCV5piU3JG9)))","sh(wsh(multi(16,03669b8afcec803a0d323e9a17f3ea8e68e8abe5a278020a929adbec52421adbd0,0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600,0362a74e399c39ed5593852a30147f2959b56bb827dfa3e60e464b02ccf87dc5e8,0261345b53de74a4d721ef877c255429961b7e43714171ac06168d7e08c542a8b8,02da72e8b46901a65d4374fe6315538d8f368557dda3a1dcf9ea903f3afe7314c8,0318c82dd0b53fd3a932d16e0ba9e278fcc937c582d5781be626ff16e201f72286,0297ccef1ef99f9d73dec9ad37476ddb232f1238aff877af19e72ba04493361009,02e502cfd5c3f972fe9a3e2a18827820638f96b6f347e54d63deb839011fd5765d,03e687710f0e3ebe81c1037074da939d409c0025f17eb86adb9427d28f0f7ae0e9,02c04d3a5274952acdbc76987f3184b346a483d43be40874624b29e3692c1df5af,02ed06e0f418b5b43a7ec01d1d7d27290fa15f75771cb69b642a51471c29c84acd,036d46073cbb9ffee90473f3da429abc8de7f8751199da44485682a989a4bebb24,02f5d1ff7c9029a80a4e36b9a5497027ef7f3e73384a4a94fbfe7c4e9164eec8bc,02e41deffd1b7cce11cde209a781adcffdabd1b91c0ba0375857a2bfd9302419f3,02d76625f7956a7fc505ab02556c23ee72d832f1bac391bcd2d3abce5710a13d06,0399eb0a5487515802dc14544cf10b3666623762fbed2ec38a3975716e2c29c232)))", SIGNABLE, {{"a9147fc63e13dc25e8a95a3cee3d9a714ac3afd96f1e87"}}, OutputType::P2SH_SEGWIT);
CheckUnparsable("sh(multi(16,KzoAz5CanayRKex3fSLQ2BwJpN7U52gZvxMyk78nDMHuqrUxuSJy,KwGNz6YCCQtYvFzMtrC6D3tKTKdBBboMrLTsjr2NYVBwapCkn7Mr,KxogYhiNfwxuswvXV66eFyKcCpm7dZ7TqHVqujHAVUjJxyivxQ9X,L2BUNduTSyZwZjwNHynQTF14mv2uz2NRq5n5sYWTb4FkkmqgEE9f,L1okJGHGn1kFjdXHKxXjwVVtmCMR2JA5QsbKCSpSb7ReQjezKeoD,KxDCNSST75HFPaW5QKpzHtAyaCQC7p9Vo3FYfi2u4dXD1vgMiboK,L5edQjFtnkcf5UWURn6UuuoFrabgDQUHdheKCziwN42aLwS3KizU,KzF8UWFcEC7BYTq8Go1xVimMkDmyNYVmXV5PV7RuDicvAocoPB8i,L3nHUboKG2w4VSJ5jYZ5CBM97oeK6YuKvfZxrefdShECcjEYKMWZ,KyjHo36dWkYhimKmVVmQTq3gERv3pnqA4xFCpvUgbGDJad7eS8WE,KwsfyHKRUTZPQtysN7M3tZ4GXTnuov5XRgjdF2XCG8faAPmFruRF,KzCUbGhN9LJhdeFfL9zQgTJMjqxdBKEekRGZX24hXdgCNCijkkap,KzgpMBwwsDLwkaC5UrmBgCYaBD2WgZ7PBoGYXR8KT7gCA9UTN5a3,KyBXTPy4T7YG4q9tcAM3LkvfRpD1ybHMvcJ2ehaWXaSqeGUxEdkP,KzJDe9iwJRPtKP2F2AoN6zBgzS7uiuAwhWCfGdNeYJ3PC1HNJ8M8,L1xbHrxynrqLKkoYc4qtoQPx6uy5qYXR5ZDYVYBSRmCV5piU3JG9))","sh(multi(16,03669b8afcec803a0d323e9a17f3ea8e68e8abe5a278020a929adbec52421adbd0,0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600,0362a74e399c39ed5593852a30147f2959b56bb827dfa3e60e464b02ccf87dc5e8,0261345b53de74a4d721ef877c255429961b7e43714171ac06168d7e08c542a8b8,02da72e8b46901a65d4374fe6315538d8f368557dda3a1dcf9ea903f3afe7314c8,0318c82dd0b53fd3a932d16e0ba9e278fcc937c582d5781be626ff16e201f72286,0297ccef1ef99f9d73dec9ad37476ddb232f1238aff877af19e72ba04493361009,02e502cfd5c3f972fe9a3e2a18827820638f96b6f347e54d63deb839011fd5765d,03e687710f0e3ebe81c1037074da939d409c0025f17eb86adb9427d28f0f7ae0e9,02c04d3a5274952acdbc76987f3184b346a483d43be40874624b29e3692c1df5af,02ed06e0f418b5b43a7ec01d1d7d27290fa15f75771cb69b642a51471c29c84acd,036d46073cbb9ffee90473f3da429abc8de7f8751199da44485682a989a4bebb24,02f5d1ff7c9029a80a4e36b9a5497027ef7f3e73384a4a94fbfe7c4e9164eec8bc,02e41deffd1b7cce11cde209a781adcffdabd1b91c0ba0375857a2bfd9302419f3,02d76625f7956a7fc505ab02556c23ee72d832f1bac391bcd2d3abce5710a13d06,0399eb0a5487515802dc14544cf10b3666623762fbed2ec38a3975716e2c29c232))", "P2SH script is too large, 547 bytes is larger than 520 bytes"); // P2SH does not fit 16 compressed pubkeys in a redeemscript
CheckUnparsable("wsh(multi(2,[aaaaaaaa][aaaaaaaa]xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0,xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*,xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "wsh(multi(2,[aaaaaaaa][aaaaaaaa]xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0,xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*,xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", "Multiple ']' characters found for a single pubkey"); // Double key origin descriptor
CheckUnparsable("wsh(multi(2,[aaaagaaa]xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0,xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*,xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "wsh(multi(2,[aaagaaaa]xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0,xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*,xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", "Fingerprint 'aaagaaaa' is not hex"); // Non hex fingerprint
@@ -280,8 +284,8 @@ BOOST_AUTO_TEST_CASE(descriptor_test)
CheckUnparsable("wsh(wsh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)))", "wsh(wsh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))", "Cannot have wsh within wsh"); // Cannot embed P2WSH inside P2WSH
// Checksums
- Check("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))#ggrsrxfy", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))#tjg09x5t", DEFAULT, {{"a91445a9a622a8b0a1269944be477640eedc447bbd8487"}}, {{0x8000006FUL,222},{0}});
- Check("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))", DEFAULT, {{"a91445a9a622a8b0a1269944be477640eedc447bbd8487"}}, {{0x8000006FUL,222},{0}});
+ Check("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))#ggrsrxfy", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))#tjg09x5t", DEFAULT, {{"a91445a9a622a8b0a1269944be477640eedc447bbd8487"}}, OutputType::LEGACY, {{0x8000006FUL,222},{0}});
+ Check("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))", DEFAULT, {{"a91445a9a622a8b0a1269944be477640eedc447bbd8487"}}, OutputType::LEGACY, {{0x8000006FUL,222},{0}});
CheckUnparsable("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))#", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))#", "Expected 8 character checksum, not 0 characters"); // Empty checksum
CheckUnparsable("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))#ggrsrxfyq", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))#tjg09x5tq", "Expected 8 character checksum, not 9 characters"); // Too long checksum
CheckUnparsable("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))#ggrsrxf", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))#tjg09x5", "Expected 8 character checksum, not 7 characters"); // Too short checksum
diff --git a/src/test/fuzz/FuzzedDataProvider.h b/src/test/fuzz/FuzzedDataProvider.h
index 1b5b4bb012..3e069eba69 100644
--- a/src/test/fuzz/FuzzedDataProvider.h
+++ b/src/test/fuzz/FuzzedDataProvider.h
@@ -13,11 +13,10 @@
#ifndef LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_
#define LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_
-#include <limits.h>
-#include <stddef.h>
-#include <stdint.h>
-
#include <algorithm>
+#include <climits>
+#include <cstddef>
+#include <cstdint>
#include <cstring>
#include <initializer_list>
#include <string>
@@ -25,8 +24,10 @@
#include <utility>
#include <vector>
+// In addition to the comments below, the API is also briefly documented at
+// https://github.com/google/fuzzing/blob/master/docs/split-inputs.md#fuzzed-data-provider
class FuzzedDataProvider {
-public:
+ public:
// |data| is an array of length |size| that the FuzzedDataProvider wraps to
// provide more granular access. |data| must outlive the FuzzedDataProvider.
FuzzedDataProvider(const uint8_t *data, size_t size)
@@ -143,9 +144,9 @@ public:
return ConsumeBytes<T>(remaining_bytes_);
}
+ // Returns a std::string containing all remaining bytes of the input data.
// Prefer using |ConsumeRemainingBytes| unless you actually need a std::string
// object.
- // Returns a std::vector containing all remaining bytes of the input data.
std::string ConsumeRemainingBytesAsString() {
return ConsumeBytesAsString(remaining_bytes_);
}
@@ -161,7 +162,7 @@ public:
// Reads one byte and returns a bool, or false when no data remains.
bool ConsumeBool() { return 1 & ConsumeIntegral<uint8_t>(); }
- // Returns a copy of a value selected from a fixed-size |array|.
+ // Returns a copy of the value selected from the given fixed-size |array|.
template <typename T, size_t size>
T PickValueInArray(const T (&array)[size]) {
static_assert(size > 0, "The array must be non empty.");
@@ -170,11 +171,14 @@ public:
template <typename T>
T PickValueInArray(std::initializer_list<const T> list) {
- // static_assert(list.size() > 0, "The array must be non empty.");
+ // TODO(Dor1s): switch to static_assert once C++14 is allowed.
+ if (!list.size())
+ abort();
+
return *(list.begin() + ConsumeIntegralInRange<size_t>(0, list.size() - 1));
}
- // Return an enum value. The enum must start at 0 and be contiguous. It must
+ // Returns an enum value. The enum must start at 0 and be contiguous. It must
// also contain |kMaxValue| aliased to its largest (inclusive) value. Such as:
// enum class Foo { SomeValue, OtherValue, kMaxValue = OtherValue };
template <typename T> T ConsumeEnum() {
@@ -183,10 +187,60 @@ public:
0, static_cast<uint32_t>(T::kMaxValue)));
}
+ // Returns a floating point number in the range [0.0, 1.0]. If there's no
+ // input data left, always returns 0.
+ template <typename T> T ConsumeProbability() {
+ static_assert(std::is_floating_point<T>::value,
+ "A floating point type is required.");
+
+ // Use different integral types for different floating point types in order
+ // to provide better density of the resulting values.
+ using IntegralType =
+ typename std::conditional<(sizeof(T) <= sizeof(uint32_t)), uint32_t,
+ uint64_t>::type;
+
+ T result = static_cast<T>(ConsumeIntegral<IntegralType>());
+ result /= static_cast<T>(std::numeric_limits<IntegralType>::max());
+ return result;
+ }
+
+ // Returns a floating point value in the range [Type's lowest, Type's max] by
+ // consuming bytes from the input data. If there's no input data left, always
+ // returns approximately 0.
+ template <typename T> T ConsumeFloatingPoint() {
+ return ConsumeFloatingPointInRange<T>(std::numeric_limits<T>::lowest(),
+ std::numeric_limits<T>::max());
+ }
+
+ // Returns a floating point value in the given range by consuming bytes from
+ // the input data. If there's no input data left, returns |min|. Note that
+ // |min| must be less than or equal to |max|.
+ template <typename T> T ConsumeFloatingPointInRange(T min, T max) {
+ if (min > max)
+ abort();
+
+ T range = .0;
+ T result = min;
+ constexpr T zero(.0);
+ if (max > zero && min < zero && max > min + std::numeric_limits<T>::max()) {
+ // The diff |max - min| would overflow the given floating point type. Use
+ // the half of the diff as the range and consume a bool to decide whether
+ // the result is in the first of the second part of the diff.
+ range = (max / 2.0) - (min / 2.0);
+ if (ConsumeBool()) {
+ result += range;
+ }
+ } else {
+ range = max - min;
+ }
+
+ return result + range * ConsumeProbability<T>();
+ }
+
// Reports the remaining bytes available for fuzzed input.
size_t remaining_bytes() { return remaining_bytes_; }
-private:
+ private:
FuzzedDataProvider(const FuzzedDataProvider &) = delete;
FuzzedDataProvider &operator=(const FuzzedDataProvider &) = delete;
@@ -209,6 +263,12 @@ private:
// which seems to be a natural choice for other implementations as well.
// To increase the odds even more, we also call |shrink_to_fit| below.
std::vector<T> result(size);
+ if (size == 0) {
+ if (num_bytes_to_consume != 0)
+ abort();
+ return result;
+ }
+
std::memcpy(result.data(), data_ptr_, num_bytes_to_consume);
Advance(num_bytes_to_consume);
@@ -230,9 +290,9 @@ private:
// Avoid using implementation-defined unsigned to signer conversions.
// To learn more, see https://stackoverflow.com/questions/13150449.
- if (value <= std::numeric_limits<TS>::max())
+ if (value <= std::numeric_limits<TS>::max()) {
return static_cast<TS>(value);
- else {
+ } else {
constexpr auto TS_min = std::numeric_limits<TS>::min();
return TS_min + static_cast<char>(value - TS_min);
}
diff --git a/src/test/fuzz/asmap.cpp b/src/test/fuzz/asmap.cpp
new file mode 100644
index 0000000000..7f3eef79a1
--- /dev/null
+++ b/src/test/fuzz/asmap.cpp
@@ -0,0 +1,28 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <netaddress.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+
+#include <cstdint>
+#include <vector>
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+ const Network network = fuzzed_data_provider.PickValueInArray({NET_IPV4, NET_IPV6});
+ if (fuzzed_data_provider.remaining_bytes() < 16) {
+ return;
+ }
+ CNetAddr net_addr;
+ net_addr.SetRaw(network, fuzzed_data_provider.ConsumeBytes<uint8_t>(16).data());
+ std::vector<bool> asmap;
+ for (const char cur_byte : fuzzed_data_provider.ConsumeRemainingBytes<char>()) {
+ for (int bit = 0; bit < 8; ++bit) {
+ asmap.push_back((cur_byte >> bit) & 1);
+ }
+ }
+ (void)net_addr.GetMappedAS(asmap);
+}
diff --git a/src/test/fuzz/bloom_filter.cpp b/src/test/fuzz/bloom_filter.cpp
new file mode 100644
index 0000000000..d1112f8e62
--- /dev/null
+++ b/src/test/fuzz/bloom_filter.cpp
@@ -0,0 +1,80 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <bloom.h>
+#include <optional.h>
+#include <primitives/transaction.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/util.h>
+#include <uint256.h>
+
+#include <cassert>
+#include <cstdint>
+#include <string>
+#include <vector>
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+
+ CBloomFilter bloom_filter{
+ fuzzed_data_provider.ConsumeIntegralInRange<unsigned int>(1, 10000000),
+ 1.0 / fuzzed_data_provider.ConsumeIntegralInRange<unsigned int>(1, std::numeric_limits<unsigned int>::max()),
+ fuzzed_data_provider.ConsumeIntegral<unsigned int>(),
+ static_cast<unsigned char>(fuzzed_data_provider.PickValueInArray({BLOOM_UPDATE_NONE, BLOOM_UPDATE_ALL, BLOOM_UPDATE_P2PUBKEY_ONLY, BLOOM_UPDATE_MASK}))};
+ while (fuzzed_data_provider.remaining_bytes() > 0) {
+ switch (fuzzed_data_provider.ConsumeIntegralInRange(0, 6)) {
+ case 0: {
+ const std::vector<unsigned char> b = ConsumeRandomLengthByteVector(fuzzed_data_provider);
+ (void)bloom_filter.contains(b);
+ bloom_filter.insert(b);
+ const bool present = bloom_filter.contains(b);
+ assert(present);
+ break;
+ }
+ case 1: {
+ const Optional<COutPoint> out_point = ConsumeDeserializable<COutPoint>(fuzzed_data_provider);
+ if (!out_point) {
+ break;
+ }
+ (void)bloom_filter.contains(*out_point);
+ bloom_filter.insert(*out_point);
+ const bool present = bloom_filter.contains(*out_point);
+ assert(present);
+ break;
+ }
+ case 2: {
+ const Optional<uint256> u256 = ConsumeDeserializable<uint256>(fuzzed_data_provider);
+ if (!u256) {
+ break;
+ }
+ (void)bloom_filter.contains(*u256);
+ bloom_filter.insert(*u256);
+ const bool present = bloom_filter.contains(*u256);
+ assert(present);
+ break;
+ }
+ case 3:
+ bloom_filter.clear();
+ break;
+ case 4:
+ bloom_filter.reset(fuzzed_data_provider.ConsumeIntegral<unsigned int>());
+ break;
+ case 5: {
+ const Optional<CMutableTransaction> mut_tx = ConsumeDeserializable<CMutableTransaction>(fuzzed_data_provider);
+ if (!mut_tx) {
+ break;
+ }
+ const CTransaction tx{*mut_tx};
+ (void)bloom_filter.IsRelevantAndUpdate(tx);
+ break;
+ }
+ case 6:
+ bloom_filter.UpdateEmptyFull();
+ break;
+ }
+ (void)bloom_filter.IsWithinSizeConstraints();
+ }
+}
diff --git a/src/test/fuzz/decode_tx.cpp b/src/test/fuzz/decode_tx.cpp
new file mode 100644
index 0000000000..09c4ff05df
--- /dev/null
+++ b/src/test/fuzz/decode_tx.cpp
@@ -0,0 +1,31 @@
+// Copyright (c) 2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <core_io.h>
+#include <primitives/transaction.h>
+#include <test/fuzz/fuzz.h>
+#include <util/strencodings.h>
+
+#include <cassert>
+#include <cstdint>
+#include <string>
+#include <vector>
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ const std::string tx_hex = HexStr(std::string{buffer.begin(), buffer.end()});
+ CMutableTransaction mtx;
+ const bool result_none = DecodeHexTx(mtx, tx_hex, false, false);
+ const bool result_try_witness = DecodeHexTx(mtx, tx_hex, false, true);
+ const bool result_try_witness_and_maybe_no_witness = DecodeHexTx(mtx, tx_hex, true, true);
+ const bool result_try_no_witness = DecodeHexTx(mtx, tx_hex, true, false);
+ assert(!result_none);
+ if (result_try_witness_and_maybe_no_witness) {
+ assert(result_try_no_witness || result_try_witness);
+ }
+ // if (result_try_no_witness) { // Uncomment when https://github.com/bitcoin/bitcoin/pull/17775 is merged
+ if (result_try_witness) { // Remove stop-gap when https://github.com/bitcoin/bitcoin/pull/17775 is merged
+ assert(result_try_witness_and_maybe_no_witness);
+ }
+}
diff --git a/src/test/fuzz/deserialize.cpp b/src/test/fuzz/deserialize.cpp
index bd05283b78..f06f339b9d 100644
--- a/src/test/fuzz/deserialize.cpp
+++ b/src/test/fuzz/deserialize.cpp
@@ -206,7 +206,7 @@ void test_one_input(const std::vector<uint8_t>& buffer)
DeserializeFromFuzzingInput(buffer, dbi);
#elif TXOUTCOMPRESSOR_DESERIALIZE
CTxOut to;
- CTxOutCompressor toc(to);
+ auto toc = Using<TxOutCompression>(to);
DeserializeFromFuzzingInput(buffer, toc);
#elif BLOCKTRANSACTIONS_DESERIALIZE
BlockTransactions bt;
diff --git a/src/test/fuzz/float.cpp b/src/test/fuzz/float.cpp
new file mode 100644
index 0000000000..a24bae5b35
--- /dev/null
+++ b/src/test/fuzz/float.cpp
@@ -0,0 +1,42 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <memusage.h>
+#include <serialize.h>
+#include <streams.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <version.h>
+
+#include <cassert>
+#include <cstdint>
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+
+ {
+ const double d = fuzzed_data_provider.ConsumeFloatingPoint<double>();
+ (void)memusage::DynamicUsage(d);
+ assert(ser_uint64_to_double(ser_double_to_uint64(d)) == d);
+
+ CDataStream stream(SER_NETWORK, INIT_PROTO_VERSION);
+ stream << d;
+ double d_deserialized;
+ stream >> d_deserialized;
+ assert(d == d_deserialized);
+ }
+
+ {
+ const float f = fuzzed_data_provider.ConsumeFloatingPoint<float>();
+ (void)memusage::DynamicUsage(f);
+ assert(ser_uint32_to_float(ser_float_to_uint32(f)) == f);
+
+ CDataStream stream(SER_NETWORK, INIT_PROTO_VERSION);
+ stream << f;
+ float f_deserialized;
+ stream >> f_deserialized;
+ assert(f == f_deserialized);
+ }
+}
diff --git a/src/test/fuzz/fuzz.cpp b/src/test/fuzz/fuzz.cpp
index da4e623e98..a085e36911 100644
--- a/src/test/fuzz/fuzz.cpp
+++ b/src/test/fuzz/fuzz.cpp
@@ -4,10 +4,15 @@
#include <test/fuzz/fuzz.h>
+#include <test/util/setup_common.h>
+
#include <cstdint>
#include <unistd.h>
#include <vector>
+const std::function<void(const std::string&)> G_TEST_LOG_FUN{};
+
+#if defined(__AFL_COMPILER)
static bool read_stdin(std::vector<uint8_t>& data)
{
uint8_t buffer[1024];
@@ -19,6 +24,7 @@ static bool read_stdin(std::vector<uint8_t>& data)
}
return length == 0;
}
+#endif
// Default initialization: Override using a non-weak initialize().
__attribute__((weak)) void initialize()
@@ -40,9 +46,9 @@ extern "C" int LLVMFuzzerInitialize(int* argc, char*** argv)
return 0;
}
-// Declare main(...) "weak" to allow for libFuzzer linking. libFuzzer provides
-// the main(...) function.
-__attribute__((weak)) int main(int argc, char** argv)
+// Generally, the fuzzer will provide main(), except for AFL
+#if defined(__AFL_COMPILER)
+int main(int argc, char** argv)
{
initialize();
#ifdef __AFL_INIT
@@ -70,3 +76,4 @@ __attribute__((weak)) int main(int argc, char** argv)
#endif
return 0;
}
+#endif
diff --git a/src/test/fuzz/hex.cpp b/src/test/fuzz/hex.cpp
index 54693180be..2de6100d7b 100644
--- a/src/test/fuzz/hex.cpp
+++ b/src/test/fuzz/hex.cpp
@@ -2,8 +2,12 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <core_io.h>
+#include <primitives/block.h>
+#include <rpc/util.h>
#include <test/fuzz/fuzz.h>
-
+#include <uint256.h>
+#include <univalue.h>
#include <util/strencodings.h>
#include <cassert>
@@ -19,4 +23,14 @@ void test_one_input(const std::vector<uint8_t>& buffer)
if (IsHex(random_hex_string)) {
assert(ToLower(random_hex_string) == hex_data);
}
+ (void)IsHexNumber(random_hex_string);
+ uint256 result;
+ (void)ParseHashStr(random_hex_string, result);
+ (void)uint256S(random_hex_string);
+ try {
+ (void)HexToPubKey(random_hex_string);
+ } catch (const UniValue&) {
+ }
+ CBlockHeader block_header;
+ (void)DecodeHexBlockHeader(block_header, random_hex_string);
}
diff --git a/src/test/fuzz/integer.cpp b/src/test/fuzz/integer.cpp
index 723938bcdb..350d3d3331 100644
--- a/src/test/fuzz/integer.cpp
+++ b/src/test/fuzz/integer.cpp
@@ -2,6 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <amount.h>
#include <arith_uint256.h>
#include <compressor.h>
#include <consensus/merkle.h>
@@ -18,12 +19,15 @@
#include <script/signingprovider.h>
#include <script/standard.h>
#include <serialize.h>
+#include <streams.h>
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
#include <uint256.h>
+#include <util/moneystr.h>
#include <util/strencodings.h>
#include <util/system.h>
#include <util/time.h>
+#include <version.h>
#include <cassert>
#include <limits>
@@ -53,10 +57,18 @@ void test_one_input(const std::vector<uint8_t>& buffer)
// We cannot assume a specific value of std::is_signed<char>::value:
// ConsumeIntegral<char>() instead of casting from {u,}int8_t.
const char ch = fuzzed_data_provider.ConsumeIntegral<char>();
+ const bool b = fuzzed_data_provider.ConsumeBool();
const Consensus::Params& consensus_params = Params().GetConsensus();
(void)CheckProofOfWork(u256, u32, consensus_params);
- (void)CompressAmount(u64);
+ if (u64 <= MAX_MONEY) {
+ const uint64_t compressed_money_amount = CompressAmount(u64);
+ assert(u64 == DecompressAmount(compressed_money_amount));
+ static const uint64_t compressed_money_amount_max = CompressAmount(MAX_MONEY - 1);
+ assert(compressed_money_amount <= compressed_money_amount_max);
+ } else {
+ (void)CompressAmount(u64);
+ }
static const uint256 u256_min(uint256S("0000000000000000000000000000000000000000000000000000000000000000"));
static const uint256 u256_max(uint256S("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"));
const std::vector<uint256> v256{u256, u256_min, u256_max};
@@ -65,11 +77,19 @@ void test_one_input(const std::vector<uint8_t>& buffer)
(void)DecompressAmount(u64);
(void)FormatISO8601Date(i64);
(void)FormatISO8601DateTime(i64);
+ // FormatMoney(i) not defined when i == std::numeric_limits<int64_t>::min()
+ if (i64 != std::numeric_limits<int64_t>::min()) {
+ int64_t parsed_money;
+ if (ParseMoney(FormatMoney(i64), parsed_money)) {
+ assert(parsed_money == i64);
+ }
+ }
(void)GetSizeOfCompactSize(u64);
(void)GetSpecialScriptSize(u32);
// (void)GetVirtualTransactionSize(i64, i64); // function defined only for a subset of int64_t inputs
// (void)GetVirtualTransactionSize(i64, i64, u32); // function defined only for a subset of int64_t/uint32_t inputs
(void)HexDigit(ch);
+ (void)MoneyRange(i64);
(void)i64tostr(i64);
(void)IsDigit(ch);
(void)IsSpace(ch);
@@ -95,6 +115,14 @@ void test_one_input(const std::vector<uint8_t>& buffer)
(void)SipHashUint256(u64, u64, u256);
(void)SipHashUint256Extra(u64, u64, u256, u32);
(void)ToLower(ch);
+ (void)ToUpper(ch);
+ // ValueFromAmount(i) not defined when i == std::numeric_limits<int64_t>::min()
+ if (i64 != std::numeric_limits<int64_t>::min()) {
+ int64_t parsed_money;
+ if (ParseMoney(ValueFromAmount(i64).getValStr(), parsed_money)) {
+ assert(parsed_money == i64);
+ }
+ }
const arith_uint256 au256 = UintToArith256(u256);
assert(ArithToUint256(au256) == u256);
@@ -124,4 +152,68 @@ void test_one_input(const std::vector<uint8_t>& buffer)
(void)GetScriptForDestination(destination);
(void)IsValidDestination(destination);
}
+
+ {
+ CDataStream stream(SER_NETWORK, INIT_PROTO_VERSION);
+
+ uint256 deserialized_u256;
+ stream << u256;
+ stream >> deserialized_u256;
+ assert(u256 == deserialized_u256 && stream.empty());
+
+ uint160 deserialized_u160;
+ stream << u160;
+ stream >> deserialized_u160;
+ assert(u160 == deserialized_u160 && stream.empty());
+
+ uint64_t deserialized_u64;
+ stream << u64;
+ stream >> deserialized_u64;
+ assert(u64 == deserialized_u64 && stream.empty());
+
+ int64_t deserialized_i64;
+ stream << i64;
+ stream >> deserialized_i64;
+ assert(i64 == deserialized_i64 && stream.empty());
+
+ uint32_t deserialized_u32;
+ stream << u32;
+ stream >> deserialized_u32;
+ assert(u32 == deserialized_u32 && stream.empty());
+
+ int32_t deserialized_i32;
+ stream << i32;
+ stream >> deserialized_i32;
+ assert(i32 == deserialized_i32 && stream.empty());
+
+ uint16_t deserialized_u16;
+ stream << u16;
+ stream >> deserialized_u16;
+ assert(u16 == deserialized_u16 && stream.empty());
+
+ int16_t deserialized_i16;
+ stream << i16;
+ stream >> deserialized_i16;
+ assert(i16 == deserialized_i16 && stream.empty());
+
+ uint8_t deserialized_u8;
+ stream << u8;
+ stream >> deserialized_u8;
+ assert(u8 == deserialized_u8 && stream.empty());
+
+ int8_t deserialized_i8;
+ stream << i8;
+ stream >> deserialized_i8;
+ assert(i8 == deserialized_i8 && stream.empty());
+
+ char deserialized_ch;
+ stream << ch;
+ stream >> deserialized_ch;
+ assert(ch == deserialized_ch && stream.empty());
+
+ bool deserialized_b;
+ stream << b;
+ stream >> deserialized_b;
+ assert(b == deserialized_b && stream.empty());
+ }
}
diff --git a/src/test/fuzz/key.cpp b/src/test/fuzz/key.cpp
new file mode 100644
index 0000000000..1919a5f881
--- /dev/null
+++ b/src/test/fuzz/key.cpp
@@ -0,0 +1,309 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <chainparams.h>
+#include <chainparamsbase.h>
+#include <key.h>
+#include <key_io.h>
+#include <outputtype.h>
+#include <policy/policy.h>
+#include <pubkey.h>
+#include <rpc/util.h>
+#include <script/keyorigin.h>
+#include <script/script.h>
+#include <script/sign.h>
+#include <script/signingprovider.h>
+#include <script/standard.h>
+#include <streams.h>
+#include <test/fuzz/fuzz.h>
+#include <util/memory.h>
+#include <util/strencodings.h>
+
+#include <cassert>
+#include <cstdint>
+#include <numeric>
+#include <string>
+#include <vector>
+
+void initialize()
+{
+ static const ECCVerifyHandle ecc_verify_handle;
+ ECC_Start();
+ SelectParams(CBaseChainParams::REGTEST);
+}
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ const CKey key = [&] {
+ CKey k;
+ k.Set(buffer.begin(), buffer.end(), true);
+ return k;
+ }();
+ if (!key.IsValid()) {
+ return;
+ }
+
+ {
+ assert(key.begin() + key.size() == key.end());
+ assert(key.IsCompressed());
+ assert(key.size() == 32);
+ assert(DecodeSecret(EncodeSecret(key)) == key);
+ }
+
+ {
+ CKey invalid_key;
+ assert(!(invalid_key == key));
+ assert(!invalid_key.IsCompressed());
+ assert(!invalid_key.IsValid());
+ assert(invalid_key.size() == 0);
+ }
+
+ {
+ CKey uncompressed_key;
+ uncompressed_key.Set(buffer.begin(), buffer.end(), false);
+ assert(!(uncompressed_key == key));
+ assert(!uncompressed_key.IsCompressed());
+ assert(key.size() == 32);
+ assert(uncompressed_key.begin() + uncompressed_key.size() == uncompressed_key.end());
+ assert(uncompressed_key.IsValid());
+ }
+
+ {
+ CKey copied_key;
+ copied_key.Set(key.begin(), key.end(), key.IsCompressed());
+ assert(copied_key == key);
+ }
+
+ {
+ CKey negated_key = key;
+ negated_key.Negate();
+ assert(negated_key.IsValid());
+ assert(!(negated_key == key));
+
+ negated_key.Negate();
+ assert(negated_key == key);
+ }
+
+ const uint256 random_uint256 = Hash(buffer.begin(), buffer.end());
+
+ {
+ CKey child_key;
+ ChainCode child_chaincode;
+ const bool ok = key.Derive(child_key, child_chaincode, 0, random_uint256);
+ assert(ok);
+ assert(child_key.IsValid());
+ assert(!(child_key == key));
+ assert(child_chaincode != random_uint256);
+ }
+
+ const CPubKey pubkey = key.GetPubKey();
+
+ {
+ assert(pubkey.size() == 33);
+ assert(key.VerifyPubKey(pubkey));
+ assert(pubkey.GetHash() != random_uint256);
+ assert(pubkey.begin() + pubkey.size() == pubkey.end());
+ assert(pubkey.data() == pubkey.begin());
+ assert(pubkey.IsCompressed());
+ assert(pubkey.IsValid());
+ assert(pubkey.IsFullyValid());
+ assert(HexToPubKey(HexStr(pubkey.begin(), pubkey.end())) == pubkey);
+ assert(GetAllDestinationsForKey(pubkey).size() == 3);
+ }
+
+ {
+ CDataStream data_stream{SER_NETWORK, INIT_PROTO_VERSION};
+ pubkey.Serialize(data_stream);
+
+ CPubKey pubkey_deserialized;
+ pubkey_deserialized.Unserialize(data_stream);
+ assert(pubkey_deserialized == pubkey);
+ }
+
+ {
+ const CScript tx_pubkey_script = GetScriptForRawPubKey(pubkey);
+ assert(!tx_pubkey_script.IsPayToScriptHash());
+ assert(!tx_pubkey_script.IsPayToWitnessScriptHash());
+ assert(!tx_pubkey_script.IsPushOnly());
+ assert(!tx_pubkey_script.IsUnspendable());
+ assert(tx_pubkey_script.HasValidOps());
+ assert(tx_pubkey_script.size() == 35);
+
+ const CScript tx_multisig_script = GetScriptForMultisig(1, {pubkey});
+ assert(!tx_multisig_script.IsPayToScriptHash());
+ assert(!tx_multisig_script.IsPayToWitnessScriptHash());
+ assert(!tx_multisig_script.IsPushOnly());
+ assert(!tx_multisig_script.IsUnspendable());
+ assert(tx_multisig_script.HasValidOps());
+ assert(tx_multisig_script.size() == 37);
+
+ FillableSigningProvider fillable_signing_provider;
+ assert(IsSolvable(fillable_signing_provider, tx_pubkey_script));
+ assert(IsSolvable(fillable_signing_provider, tx_multisig_script));
+ assert(!IsSegWitOutput(fillable_signing_provider, tx_pubkey_script));
+ assert(!IsSegWitOutput(fillable_signing_provider, tx_multisig_script));
+ assert(fillable_signing_provider.GetKeys().size() == 0);
+ assert(!fillable_signing_provider.HaveKey(pubkey.GetID()));
+
+ const bool ok_add_key = fillable_signing_provider.AddKey(key);
+ assert(ok_add_key);
+ assert(fillable_signing_provider.HaveKey(pubkey.GetID()));
+
+ FillableSigningProvider fillable_signing_provider_pub;
+ assert(!fillable_signing_provider_pub.HaveKey(pubkey.GetID()));
+
+ const bool ok_add_key_pubkey = fillable_signing_provider_pub.AddKeyPubKey(key, pubkey);
+ assert(ok_add_key_pubkey);
+ assert(fillable_signing_provider_pub.HaveKey(pubkey.GetID()));
+
+ txnouttype which_type_tx_pubkey;
+ const bool is_standard_tx_pubkey = IsStandard(tx_pubkey_script, which_type_tx_pubkey);
+ assert(is_standard_tx_pubkey);
+ assert(which_type_tx_pubkey == txnouttype::TX_PUBKEY);
+
+ txnouttype which_type_tx_multisig;
+ const bool is_standard_tx_multisig = IsStandard(tx_multisig_script, which_type_tx_multisig);
+ assert(is_standard_tx_multisig);
+ assert(which_type_tx_multisig == txnouttype::TX_MULTISIG);
+
+ std::vector<std::vector<unsigned char>> v_solutions_ret_tx_pubkey;
+ const txnouttype outtype_tx_pubkey = Solver(tx_pubkey_script, v_solutions_ret_tx_pubkey);
+ assert(outtype_tx_pubkey == txnouttype::TX_PUBKEY);
+ assert(v_solutions_ret_tx_pubkey.size() == 1);
+ assert(v_solutions_ret_tx_pubkey[0].size() == 33);
+
+ std::vector<std::vector<unsigned char>> v_solutions_ret_tx_multisig;
+ const txnouttype outtype_tx_multisig = Solver(tx_multisig_script, v_solutions_ret_tx_multisig);
+ assert(outtype_tx_multisig == txnouttype::TX_MULTISIG);
+ assert(v_solutions_ret_tx_multisig.size() == 3);
+ assert(v_solutions_ret_tx_multisig[0].size() == 1);
+ assert(v_solutions_ret_tx_multisig[1].size() == 33);
+ assert(v_solutions_ret_tx_multisig[2].size() == 1);
+
+ OutputType output_type{};
+ const CTxDestination tx_destination = GetDestinationForKey(pubkey, output_type);
+ assert(output_type == OutputType::LEGACY);
+ assert(IsValidDestination(tx_destination));
+ assert(CTxDestination{PKHash{pubkey}} == tx_destination);
+
+ const CScript script_for_destination = GetScriptForDestination(tx_destination);
+ assert(script_for_destination.size() == 25);
+
+ const std::string destination_address = EncodeDestination(tx_destination);
+ assert(DecodeDestination(destination_address) == tx_destination);
+
+ const CPubKey pubkey_from_address_string = AddrToPubKey(fillable_signing_provider, destination_address);
+ assert(pubkey_from_address_string == pubkey);
+
+ CKeyID key_id = pubkey.GetID();
+ assert(!key_id.IsNull());
+ assert(key_id == CKeyID{key_id});
+ assert(key_id == GetKeyForDestination(fillable_signing_provider, tx_destination));
+
+ CPubKey pubkey_out;
+ const bool ok_get_pubkey = fillable_signing_provider.GetPubKey(key_id, pubkey_out);
+ assert(ok_get_pubkey);
+
+ CKey key_out;
+ const bool ok_get_key = fillable_signing_provider.GetKey(key_id, key_out);
+ assert(ok_get_key);
+ assert(fillable_signing_provider.GetKeys().size() == 1);
+ assert(fillable_signing_provider.HaveKey(key_id));
+
+ KeyOriginInfo key_origin_info;
+ const bool ok_get_key_origin = fillable_signing_provider.GetKeyOrigin(key_id, key_origin_info);
+ assert(!ok_get_key_origin);
+ }
+
+ {
+ const std::vector<unsigned char> vch_pubkey{pubkey.begin(), pubkey.end()};
+ assert(CPubKey::ValidSize(vch_pubkey));
+ assert(!CPubKey::ValidSize({pubkey.begin(), pubkey.begin() + pubkey.size() - 1}));
+
+ const CPubKey pubkey_ctor_1{vch_pubkey};
+ assert(pubkey == pubkey_ctor_1);
+
+ const CPubKey pubkey_ctor_2{vch_pubkey.begin(), vch_pubkey.end()};
+ assert(pubkey == pubkey_ctor_2);
+
+ CPubKey pubkey_set;
+ pubkey_set.Set(vch_pubkey.begin(), vch_pubkey.end());
+ assert(pubkey == pubkey_set);
+ }
+
+ {
+ const CPubKey invalid_pubkey{};
+ assert(!invalid_pubkey.IsValid());
+ assert(!invalid_pubkey.IsFullyValid());
+ assert(!(pubkey == invalid_pubkey));
+ assert(pubkey != invalid_pubkey);
+ assert(pubkey < invalid_pubkey);
+ }
+
+ {
+ // Cover CPubKey's operator[](unsigned int pos)
+ unsigned int sum = 0;
+ for (size_t i = 0; i < pubkey.size(); ++i) {
+ sum += pubkey[i];
+ }
+ assert(std::accumulate(pubkey.begin(), pubkey.end(), 0U) == sum);
+ }
+
+ {
+ CPubKey decompressed_pubkey = pubkey;
+ assert(decompressed_pubkey.IsCompressed());
+
+ const bool ok = decompressed_pubkey.Decompress();
+ assert(ok);
+ assert(!decompressed_pubkey.IsCompressed());
+ assert(decompressed_pubkey.size() == 65);
+ }
+
+ {
+ std::vector<unsigned char> vch_sig;
+ const bool ok = key.Sign(random_uint256, vch_sig, false);
+ assert(ok);
+ assert(pubkey.Verify(random_uint256, vch_sig));
+ assert(CPubKey::CheckLowS(vch_sig));
+
+ const std::vector<unsigned char> vch_invalid_sig{vch_sig.begin(), vch_sig.begin() + vch_sig.size() - 1};
+ assert(!pubkey.Verify(random_uint256, vch_invalid_sig));
+ assert(!CPubKey::CheckLowS(vch_invalid_sig));
+ }
+
+ {
+ std::vector<unsigned char> vch_compact_sig;
+ const bool ok_sign_compact = key.SignCompact(random_uint256, vch_compact_sig);
+ assert(ok_sign_compact);
+
+ CPubKey recover_pubkey;
+ const bool ok_recover_compact = recover_pubkey.RecoverCompact(random_uint256, vch_compact_sig);
+ assert(ok_recover_compact);
+ assert(recover_pubkey == pubkey);
+ }
+
+ {
+ CPubKey child_pubkey;
+ ChainCode child_chaincode;
+ const bool ok = pubkey.Derive(child_pubkey, child_chaincode, 0, random_uint256);
+ assert(ok);
+ assert(child_pubkey != pubkey);
+ assert(child_pubkey.IsCompressed());
+ assert(child_pubkey.IsFullyValid());
+ assert(child_pubkey.IsValid());
+ assert(child_pubkey.size() == 33);
+ assert(child_chaincode != random_uint256);
+ }
+
+ const CPrivKey priv_key = key.GetPrivKey();
+
+ {
+ for (const bool skip_check : {true, false}) {
+ CKey loaded_key;
+ const bool ok = loaded_key.Load(priv_key, pubkey, skip_check);
+ assert(ok);
+ assert(key == loaded_key);
+ }
+ }
+}
diff --git a/src/test/fuzz/key_io.cpp b/src/test/fuzz/key_io.cpp
new file mode 100644
index 0000000000..f44628b0f8
--- /dev/null
+++ b/src/test/fuzz/key_io.cpp
@@ -0,0 +1,48 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <chainparams.h>
+#include <key_io.h>
+#include <rpc/util.h>
+#include <script/signingprovider.h>
+#include <script/standard.h>
+#include <test/fuzz/fuzz.h>
+
+#include <cassert>
+#include <cstdint>
+#include <string>
+#include <vector>
+
+void initialize()
+{
+ SelectParams(CBaseChainParams::REGTEST);
+}
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ const std::string random_string(buffer.begin(), buffer.end());
+
+ const CKey key = DecodeSecret(random_string);
+ if (key.IsValid()) {
+ assert(key == DecodeSecret(EncodeSecret(key)));
+ }
+
+ const CExtKey ext_key = DecodeExtKey(random_string);
+ if (ext_key.key.size() == 32) {
+ assert(ext_key == DecodeExtKey(EncodeExtKey(ext_key)));
+ }
+
+ const CExtPubKey ext_pub_key = DecodeExtPubKey(random_string);
+ if (ext_pub_key.pubkey.size() == CPubKey::COMPRESSED_SIZE) {
+ assert(ext_pub_key == DecodeExtPubKey(EncodeExtPubKey(ext_pub_key)));
+ }
+
+ const CTxDestination tx_destination = DecodeDestination(random_string);
+ (void)DescribeAddress(tx_destination);
+ (void)GetKeyForDestination(/* store */ {}, tx_destination);
+ (void)GetScriptForDestination(tx_destination);
+ (void)IsValidDestination(tx_destination);
+
+ (void)IsValidDestinationString(random_string);
+}
diff --git a/src/test/fuzz/locale.cpp b/src/test/fuzz/locale.cpp
new file mode 100644
index 0000000000..c8288123e8
--- /dev/null
+++ b/src/test/fuzz/locale.cpp
@@ -0,0 +1,96 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <tinyformat.h>
+#include <util/strencodings.h>
+
+#include <cassert>
+#include <clocale>
+#include <cstdint>
+#include <locale>
+#include <string>
+#include <vector>
+
+namespace {
+const std::string locale_identifiers[] = {
+ "C", "C.UTF-8", "aa_DJ", "aa_DJ.ISO-8859-1", "aa_DJ.UTF-8", "aa_ER", "aa_ER.UTF-8", "aa_ET", "aa_ET.UTF-8", "af_ZA", "af_ZA.ISO-8859-1", "af_ZA.UTF-8", "agr_PE", "agr_PE.UTF-8", "ak_GH", "ak_GH.UTF-8", "am_ET", "am_ET.UTF-8", "an_ES", "an_ES.ISO-8859-15", "an_ES.UTF-8", "anp_IN", "anp_IN.UTF-8", "ar_AE", "ar_AE.ISO-8859-6", "ar_AE.UTF-8", "ar_BH", "ar_BH.ISO-8859-6", "ar_BH.UTF-8", "ar_DZ", "ar_DZ.ISO-8859-6", "ar_DZ.UTF-8", "ar_EG", "ar_EG.ISO-8859-6", "ar_EG.UTF-8", "ar_IN", "ar_IN.UTF-8", "ar_IQ", "ar_IQ.ISO-8859-6", "ar_IQ.UTF-8", "ar_JO", "ar_JO.ISO-8859-6", "ar_JO.UTF-8", "ar_KW", "ar_KW.ISO-8859-6", "ar_KW.UTF-8", "ar_LB", "ar_LB.ISO-8859-6", "ar_LB.UTF-8", "ar_LY", "ar_LY.ISO-8859-6", "ar_LY.UTF-8", "ar_MA", "ar_MA.ISO-8859-6", "ar_MA.UTF-8", "ar_OM", "ar_OM.ISO-8859-6", "ar_OM.UTF-8", "ar_QA", "ar_QA.ISO-8859-6", "ar_QA.UTF-8", "ar_SA", "ar_SA.ISO-8859-6", "ar_SA.UTF-8", "ar_SD", "ar_SD.ISO-8859-6", "ar_SD.UTF-8", "ar_SS", "ar_SS.UTF-8", "ar_SY", "ar_SY.ISO-8859-6", "ar_SY.UTF-8", "ar_TN", "ar_TN.ISO-8859-6", "ar_TN.UTF-8", "ar_YE", "ar_YE.ISO-8859-6", "ar_YE.UTF-8", "as_IN", "as_IN.UTF-8", "ast_ES", "ast_ES.ISO-8859-15", "ast_ES.UTF-8", "ayc_PE", "ayc_PE.UTF-8", "az_AZ", "az_AZ.UTF-8", "az_IR", "az_IR.UTF-8", "be_BY", "be_BY.CP1251", "be_BY.UTF-8", "bem_ZM", "bem_ZM.UTF-8", "ber_DZ", "ber_DZ.UTF-8", "ber_MA", "ber_MA.UTF-8", "bg_BG", "bg_BG.CP1251", "bg_BG.UTF-8", "bho_IN", "bho_IN.UTF-8", "bho_NP", "bho_NP.UTF-8", "bi_VU", "bi_VU.UTF-8", "bn_BD", "bn_BD.UTF-8", "bn_IN", "bn_IN.UTF-8", "bo_CN", "bo_CN.UTF-8", "bo_IN", "bo_IN.UTF-8", "br_FR", "br_FR.ISO-8859-1", "br_FR.UTF-8", "brx_IN", "brx_IN.UTF-8", "bs_BA", "bs_BA.ISO-8859-2", "bs_BA.UTF-8", "byn_ER", "byn_ER.UTF-8", "ca_AD", "ca_AD.ISO-8859-15", "ca_AD.UTF-8", "ca_ES", "ca_ES.ISO-8859-1", "ca_ES.UTF-8", "ca_FR", "ca_FR.ISO-8859-15", "ca_FR.UTF-8", "ca_IT", "ca_IT.ISO-8859-15", "ca_IT.UTF-8", "ce_RU", "ce_RU.UTF-8", "chr_US", "chr_US.UTF-8", "ckb_IQ", "ckb_IQ.UTF-8", "cmn_TW", "cmn_TW.UTF-8", "crh_UA", "crh_UA.UTF-8", "csb_PL", "csb_PL.UTF-8", "cs_CZ", "cs_CZ.ISO-8859-2", "cs_CZ.UTF-8", "cv_RU", "cv_RU.UTF-8", "cy_GB", "cy_GB.ISO-8859-14", "cy_GB.UTF-8", "da_DK", "da_DK.ISO-8859-1", "da_DK.UTF-8", "de_AT", "de_AT.ISO-8859-1", "de_AT.UTF-8", "de_BE", "de_BE.ISO-8859-1", "de_BE.UTF-8", "de_CH", "de_CH.ISO-8859-1", "de_CH.UTF-8", "de_DE", "de_DE.ISO-8859-1", "de_DE.UTF-8", "de_IT", "de_IT.ISO-8859-1", "de_IT.UTF-8", "de_LU", "de_LU.ISO-8859-1", "de_LU.UTF-8", "doi_IN", "doi_IN.UTF-8", "dv_MV", "dv_MV.UTF-8", "dz_BT", "dz_BT.UTF-8", "el_CY", "el_CY.ISO-8859-7", "el_CY.UTF-8", "el_GR", "el_GR.ISO-8859-7", "el_GR.UTF-8", "en_AG", "en_AG.UTF-8", "en_AU", "en_AU.ISO-8859-1", "en_AU.UTF-8", "en_BW", "en_BW.ISO-8859-1", "en_BW.UTF-8", "en_CA", "en_CA.ISO-8859-1", "en_CA.UTF-8", "en_DK", "en_DK.ISO-8859-1", "en_DK.ISO-8859-15", "en_DK.UTF-8", "en_GB", "en_GB.ISO-8859-1", "en_GB.ISO-8859-15", "en_GB.UTF-8", "en_HK", "en_HK.ISO-8859-1", "en_HK.UTF-8", "en_IE", "en_IE.ISO-8859-1", "en_IE.UTF-8", "en_IL", "en_IL.UTF-8", "en_IN", "en_IN.UTF-8", "en_NG", "en_NG.UTF-8", "en_NZ", "en_NZ.ISO-8859-1", "en_NZ.UTF-8", "en_PH", "en_PH.ISO-8859-1", "en_PH.UTF-8", "en_SG", "en_SG.ISO-8859-1", "en_SG.UTF-8", "en_US", "en_US.ISO-8859-1", "en_US.ISO-8859-15", "en_US.UTF-8", "en_ZA", "en_ZA.ISO-8859-1", "en_ZA.UTF-8", "en_ZM", "en_ZM.UTF-8", "en_ZW", "en_ZW.ISO-8859-1", "en_ZW.UTF-8", "es_AR", "es_AR.ISO-8859-1", "es_AR.UTF-8", "es_BO", "es_BO.ISO-8859-1", "es_BO.UTF-8", "es_CL", "es_CL.ISO-8859-1", "es_CL.UTF-8", "es_CO", "es_CO.ISO-8859-1", "es_CO.UTF-8", "es_CR", "es_CR.ISO-8859-1", "es_CR.UTF-8", "es_CU", "es_CU.UTF-8", "es_DO", "es_DO.ISO-8859-1", "es_DO.UTF-8", "es_EC", "es_EC.ISO-8859-1", "es_EC.UTF-8", "es_ES", "es_ES.ISO-8859-1", "es_ES.UTF-8", "es_GT", "es_GT.ISO-8859-1", "es_GT.UTF-8", "es_HN", "es_HN.ISO-8859-1", "es_HN.UTF-8", "es_MX", "es_MX.ISO-8859-1", "es_MX.UTF-8", "es_NI", "es_NI.ISO-8859-1", "es_NI.UTF-8", "es_PA", "es_PA.ISO-8859-1", "es_PA.UTF-8", "es_PE", "es_PE.ISO-8859-1", "es_PE.UTF-8", "es_PR", "es_PR.ISO-8859-1", "es_PR.UTF-8", "es_PY", "es_PY.ISO-8859-1", "es_PY.UTF-8", "es_SV", "es_SV.ISO-8859-1", "es_SV.UTF-8", "es_US", "es_US.ISO-8859-1", "es_US.UTF-8", "es_UY", "es_UY.ISO-8859-1", "es_UY.UTF-8", "es_VE", "es_VE.ISO-8859-1", "es_VE.UTF-8", "et_EE", "et_EE.ISO-8859-1", "et_EE.ISO-8859-15", "et_EE.UTF-8", "eu_ES", "eu_ES.ISO-8859-1", "eu_ES.UTF-8", "eu_FR", "eu_FR.ISO-8859-1", "eu_FR.UTF-8", "fa_IR", "fa_IR.UTF-8", "ff_SN", "ff_SN.UTF-8", "fi_FI", "fi_FI.ISO-8859-1", "fi_FI.UTF-8", "fil_PH", "fil_PH.UTF-8", "fo_FO", "fo_FO.ISO-8859-1", "fo_FO.UTF-8", "fr_BE", "fr_BE.ISO-8859-1", "fr_BE.UTF-8", "fr_CA", "fr_CA.ISO-8859-1", "fr_CA.UTF-8", "fr_CH", "fr_CH.ISO-8859-1", "fr_CH.UTF-8", "fr_FR", "fr_FR.ISO-8859-1", "fr_FR.UTF-8", "fr_LU", "fr_LU.ISO-8859-1", "fr_LU.UTF-8", "fur_IT", "fur_IT.UTF-8", "fy_DE", "fy_DE.UTF-8", "fy_NL", "fy_NL.UTF-8", "ga_IE", "ga_IE.ISO-8859-1", "ga_IE.UTF-8", "gd_GB", "gd_GB.ISO-8859-15", "gd_GB.UTF-8", "gez_ER", "gez_ER.UTF-8", "gez_ET", "gez_ET.UTF-8", "gl_ES", "gl_ES.ISO-8859-1", "gl_ES.UTF-8", "gu_IN", "gu_IN.UTF-8", "gv_GB", "gv_GB.ISO-8859-1", "gv_GB.UTF-8", "hak_TW", "hak_TW.UTF-8", "ha_NG", "ha_NG.UTF-8", "he_IL", "he_IL.ISO-8859-8", "he_IL.UTF-8", "hif_FJ", "hif_FJ.UTF-8", "hi_IN", "hi_IN.UTF-8", "hne_IN", "hne_IN.UTF-8", "hr_HR", "hr_HR.ISO-8859-2", "hr_HR.UTF-8", "hsb_DE", "hsb_DE.ISO-8859-2", "hsb_DE.UTF-8", "ht_HT", "ht_HT.UTF-8", "hu_HU", "hu_HU.ISO-8859-2", "hu_HU.UTF-8", "hy_AM", "hy_AM.ARMSCII-8", "hy_AM.UTF-8", "ia_FR", "ia_FR.UTF-8", "id_ID", "id_ID.ISO-8859-1", "id_ID.UTF-8", "ig_NG", "ig_NG.UTF-8", "ik_CA", "ik_CA.UTF-8", "is_IS", "is_IS.ISO-8859-1", "is_IS.UTF-8", "it_CH", "it_CH.ISO-8859-1", "it_CH.UTF-8", "it_IT", "it_IT.ISO-8859-1", "it_IT.UTF-8", "iu_CA", "iu_CA.UTF-8", "kab_DZ", "kab_DZ.UTF-8", "ka_GE", "ka_GE.GEORGIAN-PS", "ka_GE.UTF-8", "kk_KZ", "kk_KZ.PT154", "kk_KZ.RK1048", "kk_KZ.UTF-8", "kl_GL", "kl_GL.ISO-8859-1", "kl_GL.UTF-8", "km_KH", "km_KH.UTF-8", "kn_IN", "kn_IN.UTF-8", "kok_IN", "kok_IN.UTF-8", "ks_IN", "ks_IN.UTF-8", "ku_TR", "ku_TR.ISO-8859-9", "ku_TR.UTF-8", "kw_GB", "kw_GB.ISO-8859-1", "kw_GB.UTF-8", "ky_KG", "ky_KG.UTF-8", "lb_LU", "lb_LU.UTF-8", "lg_UG", "lg_UG.ISO-8859-10", "lg_UG.UTF-8", "li_BE", "li_BE.UTF-8", "lij_IT", "lij_IT.UTF-8", "li_NL", "li_NL.UTF-8", "ln_CD", "ln_CD.UTF-8", "lo_LA", "lo_LA.UTF-8", "lt_LT", "lt_LT.ISO-8859-13", "lt_LT.UTF-8", "lv_LV", "lv_LV.ISO-8859-13", "lv_LV.UTF-8", "lzh_TW", "lzh_TW.UTF-8", "mag_IN", "mag_IN.UTF-8", "mai_IN", "mai_IN.UTF-8", "mai_NP", "mai_NP.UTF-8", "mfe_MU", "mfe_MU.UTF-8", "mg_MG", "mg_MG.ISO-8859-15", "mg_MG.UTF-8", "mhr_RU", "mhr_RU.UTF-8", "mi_NZ", "mi_NZ.ISO-8859-13", "mi_NZ.UTF-8", "miq_NI", "miq_NI.UTF-8", "mjw_IN", "mjw_IN.UTF-8", "mk_MK", "mk_MK.ISO-8859-5", "mk_MK.UTF-8", "ml_IN", "ml_IN.UTF-8", "mni_IN", "mni_IN.UTF-8", "mn_MN", "mn_MN.UTF-8", "mr_IN", "mr_IN.UTF-8", "ms_MY", "ms_MY.ISO-8859-1", "ms_MY.UTF-8", "mt_MT", "mt_MT.ISO-8859-3", "mt_MT.UTF-8", "my_MM", "my_MM.UTF-8", "nan_TW", "nan_TW.UTF-8", "nb_NO", "nb_NO.ISO-8859-1", "nb_NO.UTF-8", "nds_DE", "nds_DE.UTF-8", "nds_NL", "nds_NL.UTF-8", "ne_NP", "ne_NP.UTF-8", "nhn_MX", "nhn_MX.UTF-8", "niu_NU", "niu_NU.UTF-8", "niu_NZ", "niu_NZ.UTF-8", "nl_AW", "nl_AW.UTF-8", "nl_BE", "nl_BE.ISO-8859-1", "nl_BE.UTF-8", "nl_NL", "nl_NL.ISO-8859-1", "nl_NL.UTF-8", "nn_NO", "nn_NO.ISO-8859-1", "nn_NO.UTF-8", "nr_ZA", "nr_ZA.UTF-8", "nso_ZA", "nso_ZA.UTF-8", "oc_FR", "oc_FR.ISO-8859-1", "oc_FR.UTF-8", "om_ET", "om_ET.UTF-8", "om_KE", "om_KE.ISO-8859-1", "om_KE.UTF-8", "or_IN", "or_IN.UTF-8", "os_RU", "os_RU.UTF-8", "pa_IN", "pa_IN.UTF-8", "pap_AW", "pap_AW.UTF-8", "pap_CW", "pap_CW.UTF-8", "pa_PK", "pa_PK.UTF-8", "pl_PL", "pl_PL.ISO-8859-2", "pl_PL.UTF-8", "ps_AF", "ps_AF.UTF-8", "pt_BR", "pt_BR.ISO-8859-1", "pt_BR.UTF-8", "pt_PT", "pt_PT.ISO-8859-1", "pt_PT.UTF-8", "quz_PE", "quz_PE.UTF-8", "raj_IN", "raj_IN.UTF-8", "ro_RO", "ro_RO.ISO-8859-2", "ro_RO.UTF-8", "ru_RU", "ru_RU.CP1251", "ru_RU.ISO-8859-5", "ru_RU.KOI8-R", "ru_RU.UTF-8", "ru_UA", "ru_UA.KOI8-U", "ru_UA.UTF-8", "rw_RW", "rw_RW.UTF-8", "sa_IN", "sa_IN.UTF-8", "sat_IN", "sat_IN.UTF-8", "sc_IT", "sc_IT.UTF-8", "sd_IN", "sd_IN.UTF-8", "sd_PK", "sd_PK.UTF-8", "se_NO", "se_NO.UTF-8", "sgs_LT", "sgs_LT.UTF-8", "shn_MM", "shn_MM.UTF-8", "shs_CA", "shs_CA.UTF-8", "sid_ET", "sid_ET.UTF-8", "si_LK", "si_LK.UTF-8", "sk_SK", "sk_SK.ISO-8859-2", "sk_SK.UTF-8", "sl_SI", "sl_SI.ISO-8859-2", "sl_SI.UTF-8", "sm_WS", "sm_WS.UTF-8", "so_DJ", "so_DJ.ISO-8859-1", "so_DJ.UTF-8", "so_ET", "so_ET.UTF-8", "so_KE", "so_KE.ISO-8859-1", "so_KE.UTF-8", "so_SO", "so_SO.ISO-8859-1", "so_SO.UTF-8", "sq_AL", "sq_AL.ISO-8859-1", "sq_AL.UTF-8", "sq_MK", "sq_MK.UTF-8", "sr_ME", "sr_ME.UTF-8", "sr_RS", "sr_RS.UTF-8", "ss_ZA", "ss_ZA.UTF-8", "st_ZA", "st_ZA.ISO-8859-1", "st_ZA.UTF-8", "sv_FI", "sv_FI.ISO-8859-1", "sv_FI.UTF-8", "sv_SE", "sv_SE.ISO-8859-1", "sv_SE.ISO-8859-15", "sv_SE.UTF-8", "sw_KE", "sw_KE.UTF-8", "sw_TZ", "sw_TZ.UTF-8", "szl_PL", "szl_PL.UTF-8", "ta_IN", "ta_IN.UTF-8", "ta_LK", "ta_LK.UTF-8", "te_IN", "te_IN.UTF-8", "tg_TJ", "tg_TJ.KOI8-T", "tg_TJ.UTF-8", "the_NP", "the_NP.UTF-8", "th_TH", "th_TH.TIS-620", "th_TH.UTF-8", "ti_ER", "ti_ER.UTF-8", "ti_ET", "ti_ET.UTF-8", "tig_ER", "tig_ER.UTF-8", "tk_TM", "tk_TM.UTF-8", "tl_PH", "tl_PH.ISO-8859-1", "tl_PH.UTF-8", "tn_ZA", "tn_ZA.UTF-8", "to_TO", "to_TO.UTF-8", "tpi_PG", "tpi_PG.UTF-8", "tr_CY", "tr_CY.ISO-8859-9", "tr_CY.UTF-8", "tr_TR", "tr_TR.ISO-8859-9", "tr_TR.UTF-8", "ts_ZA", "ts_ZA.UTF-8", "tt_RU", "tt_RU.UTF-8", "ug_CN", "ug_CN.UTF-8", "uk_UA", "uk_UA.KOI8-U", "uk_UA.UTF-8", "unm_US", "unm_US.UTF-8", "ur_IN", "ur_IN.UTF-8", "ur_PK", "ur_PK.UTF-8", "uz_UZ", "uz_UZ.ISO-8859-1", "uz_UZ.UTF-8", "ve_ZA", "ve_ZA.UTF-8", "vi_VN", "vi_VN.UTF-8", "wa_BE", "wa_BE.ISO-8859-1", "wa_BE.UTF-8", "wae_CH", "wae_CH.UTF-8", "wal_ET", "wal_ET.UTF-8", "wo_SN", "wo_SN.UTF-8", "xh_ZA", "xh_ZA.ISO-8859-1", "xh_ZA.UTF-8", "yi_US", "yi_US.CP1255", "yi_US.UTF-8", "yo_NG", "yo_NG.UTF-8", "yue_HK", "yue_HK.UTF-8", "yuw_PG", "yuw_PG.UTF-8", "zh_CN", "zh_CN.GB18030", "zh_CN.GB2312", "zh_CN.GBK", "zh_CN.UTF-8", "zh_HK", "zh_HK.BIG5-HKSCS", "zh_HK.UTF-8", "zh_SG", "zh_SG.GB2312", "zh_SG.GBK", "zh_SG.UTF-8", "zh_TW", "zh_TW.BIG5", "zh_TW.EUC-TW", "zh_TW.UTF-8", "zu_ZA", "zu_ZA.ISO-8859-1", "zu_ZA.UTF-8"
+};
+
+std::string ConsumeLocaleIdentifier(FuzzedDataProvider& fuzzed_data_provider)
+{
+ return fuzzed_data_provider.PickValueInArray<std::string>(locale_identifiers);
+}
+
+bool IsAvailableLocale(const std::string& locale_identifier)
+{
+ try {
+ (void)std::locale(locale_identifier);
+ } catch (const std::runtime_error&) {
+ return false;
+ }
+ return true;
+}
+} // namespace
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+ const std::string locale_identifier = ConsumeLocaleIdentifier(fuzzed_data_provider);
+ if (!IsAvailableLocale(locale_identifier)) {
+ return;
+ }
+ const char* c_locale = std::setlocale(LC_ALL, "C");
+ assert(c_locale != nullptr);
+
+ const std::string random_string = fuzzed_data_provider.ConsumeRandomLengthString(5);
+ int32_t parseint32_out_without_locale;
+ const bool parseint32_without_locale = ParseInt32(random_string, &parseint32_out_without_locale);
+ int64_t parseint64_out_without_locale;
+ const bool parseint64_without_locale = ParseInt64(random_string, &parseint64_out_without_locale);
+ const int64_t atoi64_without_locale = atoi64(random_string);
+ const int atoi_without_locale = atoi(random_string);
+ const int64_t atoi64c_without_locale = atoi64(random_string.c_str());
+ const int64_t random_int64 = fuzzed_data_provider.ConsumeIntegral<int64_t>();
+ const std::string i64tostr_without_locale = i64tostr(random_int64);
+ const int32_t random_int32 = fuzzed_data_provider.ConsumeIntegral<int32_t>();
+ const std::string itostr_without_locale = itostr(random_int32);
+ const std::string strprintf_int_without_locale = strprintf("%d", random_int64);
+ const double random_double = fuzzed_data_provider.ConsumeFloatingPoint<double>();
+ const std::string strprintf_double_without_locale = strprintf("%f", random_double);
+
+ const char* new_locale = std::setlocale(LC_ALL, locale_identifier.c_str());
+ assert(new_locale != nullptr);
+
+ int32_t parseint32_out_with_locale;
+ const bool parseint32_with_locale = ParseInt32(random_string, &parseint32_out_with_locale);
+ assert(parseint32_without_locale == parseint32_with_locale);
+ if (parseint32_without_locale) {
+ assert(parseint32_out_without_locale == parseint32_out_with_locale);
+ }
+ int64_t parseint64_out_with_locale;
+ const bool parseint64_with_locale = ParseInt64(random_string, &parseint64_out_with_locale);
+ assert(parseint64_without_locale == parseint64_with_locale);
+ if (parseint64_without_locale) {
+ assert(parseint64_out_without_locale == parseint64_out_with_locale);
+ }
+ const int64_t atoi64_with_locale = atoi64(random_string);
+ assert(atoi64_without_locale == atoi64_with_locale);
+ const int64_t atoi64c_with_locale = atoi64(random_string.c_str());
+ assert(atoi64c_without_locale == atoi64c_with_locale);
+ const int atoi_with_locale = atoi(random_string);
+ assert(atoi_without_locale == atoi_with_locale);
+ const std::string i64tostr_with_locale = i64tostr(random_int64);
+ assert(i64tostr_without_locale == i64tostr_with_locale);
+ const std::string itostr_with_locale = itostr(random_int32);
+ assert(itostr_without_locale == itostr_with_locale);
+ const std::string strprintf_int_with_locale = strprintf("%d", random_int64);
+ assert(strprintf_int_without_locale == strprintf_int_with_locale);
+ const std::string strprintf_double_with_locale = strprintf("%f", random_double);
+ assert(strprintf_double_without_locale == strprintf_double_with_locale);
+
+ const std::locale current_cpp_locale;
+ assert(current_cpp_locale == std::locale::classic());
+}
diff --git a/src/test/fuzz/netaddress.cpp b/src/test/fuzz/netaddress.cpp
new file mode 100644
index 0000000000..7d87ebc214
--- /dev/null
+++ b/src/test/fuzz/netaddress.cpp
@@ -0,0 +1,123 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <netaddress.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+
+#include <cassert>
+#include <cstdint>
+#include <netinet/in.h>
+#include <vector>
+
+namespace {
+CNetAddr ConsumeNetAddr(FuzzedDataProvider& fuzzed_data_provider) noexcept
+{
+ const Network network = fuzzed_data_provider.PickValueInArray({Network::NET_IPV4, Network::NET_IPV6, Network::NET_INTERNAL, Network::NET_ONION});
+ if (network == Network::NET_IPV4) {
+ const in_addr v4_addr = {
+ .s_addr = fuzzed_data_provider.ConsumeIntegral<uint32_t>()};
+ return CNetAddr{v4_addr};
+ } else if (network == Network::NET_IPV6) {
+ if (fuzzed_data_provider.remaining_bytes() < 16) {
+ return CNetAddr{};
+ }
+ in6_addr v6_addr = {};
+ memcpy(v6_addr.s6_addr, fuzzed_data_provider.ConsumeBytes<uint8_t>(16).data(), 16);
+ return CNetAddr{v6_addr, fuzzed_data_provider.ConsumeIntegral<uint32_t>()};
+ } else if (network == Network::NET_INTERNAL) {
+ CNetAddr net_addr;
+ net_addr.SetInternal(fuzzed_data_provider.ConsumeBytesAsString(32));
+ return net_addr;
+ } else if (network == Network::NET_ONION) {
+ CNetAddr net_addr;
+ net_addr.SetSpecial(fuzzed_data_provider.ConsumeBytesAsString(32));
+ return net_addr;
+ } else {
+ assert(false);
+ }
+}
+}; // namespace
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+
+ const CNetAddr net_addr = ConsumeNetAddr(fuzzed_data_provider);
+ for (int i = 0; i < 15; ++i) {
+ (void)net_addr.GetByte(i);
+ }
+ (void)net_addr.GetHash();
+ (void)net_addr.GetNetClass();
+ if (net_addr.GetNetwork() == Network::NET_IPV4) {
+ assert(net_addr.IsIPv4());
+ }
+ if (net_addr.GetNetwork() == Network::NET_IPV6) {
+ assert(net_addr.IsIPv6());
+ }
+ if (net_addr.GetNetwork() == Network::NET_ONION) {
+ assert(net_addr.IsTor());
+ }
+ if (net_addr.GetNetwork() == Network::NET_INTERNAL) {
+ assert(net_addr.IsInternal());
+ }
+ if (net_addr.GetNetwork() == Network::NET_UNROUTABLE) {
+ assert(!net_addr.IsRoutable());
+ }
+ (void)net_addr.IsBindAny();
+ if (net_addr.IsInternal()) {
+ assert(net_addr.GetNetwork() == Network::NET_INTERNAL);
+ }
+ if (net_addr.IsIPv4()) {
+ assert(net_addr.GetNetwork() == Network::NET_IPV4 || net_addr.GetNetwork() == Network::NET_UNROUTABLE);
+ }
+ if (net_addr.IsIPv6()) {
+ assert(net_addr.GetNetwork() == Network::NET_IPV6 || net_addr.GetNetwork() == Network::NET_UNROUTABLE);
+ }
+ (void)net_addr.IsLocal();
+ if (net_addr.IsRFC1918() || net_addr.IsRFC2544() || net_addr.IsRFC6598() || net_addr.IsRFC5737() || net_addr.IsRFC3927()) {
+ assert(net_addr.IsIPv4());
+ }
+ (void)net_addr.IsRFC2544();
+ if (net_addr.IsRFC3849() || net_addr.IsRFC3964() || net_addr.IsRFC4380() || net_addr.IsRFC4843() || net_addr.IsRFC7343() || net_addr.IsRFC4862() || net_addr.IsRFC6052() || net_addr.IsRFC6145()) {
+ assert(net_addr.IsIPv6());
+ }
+ (void)net_addr.IsRFC3927();
+ (void)net_addr.IsRFC3964();
+ if (net_addr.IsRFC4193()) {
+ assert(net_addr.GetNetwork() == Network::NET_ONION || net_addr.GetNetwork() == Network::NET_INTERNAL || net_addr.GetNetwork() == Network::NET_UNROUTABLE);
+ }
+ (void)net_addr.IsRFC4380();
+ (void)net_addr.IsRFC4843();
+ (void)net_addr.IsRFC4862();
+ (void)net_addr.IsRFC5737();
+ (void)net_addr.IsRFC6052();
+ (void)net_addr.IsRFC6145();
+ (void)net_addr.IsRFC6598();
+ (void)net_addr.IsRFC7343();
+ if (!net_addr.IsRoutable()) {
+ assert(net_addr.GetNetwork() == Network::NET_UNROUTABLE || net_addr.GetNetwork() == Network::NET_INTERNAL);
+ }
+ if (net_addr.IsTor()) {
+ assert(net_addr.GetNetwork() == Network::NET_ONION);
+ }
+ (void)net_addr.IsValid();
+ (void)net_addr.ToString();
+ (void)net_addr.ToStringIP();
+
+ const CSubNet sub_net{net_addr, fuzzed_data_provider.ConsumeIntegral<int32_t>()};
+ (void)sub_net.IsValid();
+ (void)sub_net.ToString();
+
+ const CService service{net_addr, fuzzed_data_provider.ConsumeIntegral<uint16_t>()};
+ (void)service.GetKey();
+ (void)service.GetPort();
+ (void)service.ToString();
+ (void)service.ToStringIPPort();
+ (void)service.ToStringPort();
+
+ const CNetAddr other_net_addr = ConsumeNetAddr(fuzzed_data_provider);
+ (void)net_addr.GetReachabilityFrom(&other_net_addr);
+ (void)sub_net.Match(other_net_addr);
+}
diff --git a/src/test/fuzz/p2p_transport_deserializer.cpp b/src/test/fuzz/p2p_transport_deserializer.cpp
new file mode 100644
index 0000000000..57393fed45
--- /dev/null
+++ b/src/test/fuzz/p2p_transport_deserializer.cpp
@@ -0,0 +1,47 @@
+// Copyright (c) 2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <chainparams.h>
+#include <net.h>
+#include <protocol.h>
+#include <test/fuzz/fuzz.h>
+
+#include <cassert>
+#include <cstdint>
+#include <limits>
+#include <vector>
+
+void initialize()
+{
+ SelectParams(CBaseChainParams::REGTEST);
+}
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ V1TransportDeserializer deserializer{Params().MessageStart(), SER_NETWORK, INIT_PROTO_VERSION};
+ const char* pch = (const char*)buffer.data();
+ size_t n_bytes = buffer.size();
+ while (n_bytes > 0) {
+ const int handled = deserializer.Read(pch, n_bytes);
+ if (handled < 0) {
+ break;
+ }
+ pch += handled;
+ n_bytes -= handled;
+ if (deserializer.Complete()) {
+ const int64_t m_time = std::numeric_limits<int64_t>::max();
+ const CNetMessage msg = deserializer.GetMessage(Params().MessageStart(), m_time);
+ assert(msg.m_command.size() <= CMessageHeader::COMMAND_SIZE);
+ assert(msg.m_raw_message_size <= buffer.size());
+ assert(msg.m_raw_message_size == CMessageHeader::HEADER_SIZE + msg.m_message_size);
+ assert(msg.m_time == m_time);
+ if (msg.m_valid_header) {
+ assert(msg.m_valid_netmagic);
+ }
+ if (!msg.m_valid_netmagic) {
+ assert(!msg.m_valid_header);
+ }
+ }
+ }
+}
diff --git a/src/test/fuzz/process_message.cpp b/src/test/fuzz/process_message.cpp
new file mode 100644
index 0000000000..934f741068
--- /dev/null
+++ b/src/test/fuzz/process_message.cpp
@@ -0,0 +1,98 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <banman.h>
+#include <chainparams.h>
+#include <consensus/consensus.h>
+#include <net.h>
+#include <net_processing.h>
+#include <protocol.h>
+#include <scheduler.h>
+#include <script/script.h>
+#include <streams.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <test/util/mining.h>
+#include <test/util/setup_common.h>
+#include <util/memory.h>
+#include <validationinterface.h>
+#include <version.h>
+
+#include <algorithm>
+#include <atomic>
+#include <cassert>
+#include <chrono>
+#include <cstdint>
+#include <iosfwd>
+#include <iostream>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+bool ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CConnman* connman, BanMan* banman, const std::atomic<bool>& interruptMsgProc);
+
+namespace {
+
+#ifdef MESSAGE_TYPE
+#define TO_STRING_(s) #s
+#define TO_STRING(s) TO_STRING_(s)
+const std::string LIMIT_TO_MESSAGE_TYPE{TO_STRING(MESSAGE_TYPE)};
+#else
+const std::string LIMIT_TO_MESSAGE_TYPE;
+#endif
+
+const std::map<std::string, std::set<std::string>> EXPECTED_DESERIALIZATION_EXCEPTIONS = {
+ {"CDataStream::read(): end of data: iostream error", {"addr", "block", "blocktxn", "cmpctblock", "feefilter", "filteradd", "filterload", "getblocks", "getblocktxn", "getdata", "getheaders", "headers", "inv", "notfound", "ping", "sendcmpct", "tx"}},
+ {"CompactSize exceeds limit of type: iostream error", {"cmpctblock"}},
+ {"differential value overflow: iostream error", {"getblocktxn"}},
+ {"index overflowed 16 bits: iostream error", {"getblocktxn"}},
+ {"index overflowed 16-bits: iostream error", {"cmpctblock"}},
+ {"indexes overflowed 16 bits: iostream error", {"getblocktxn"}},
+ {"non-canonical ReadCompactSize(): iostream error", {"addr", "block", "blocktxn", "cmpctblock", "filteradd", "filterload", "getblocks", "getblocktxn", "getdata", "getheaders", "headers", "inv", "notfound", "tx"}},
+ {"ReadCompactSize(): size too large: iostream error", {"addr", "block", "blocktxn", "cmpctblock", "filteradd", "filterload", "getblocks", "getblocktxn", "getdata", "getheaders", "headers", "inv", "notfound", "tx"}},
+ {"Superfluous witness record: iostream error", {"block", "blocktxn", "cmpctblock", "tx"}},
+ {"Unknown transaction optional data: iostream error", {"block", "blocktxn", "cmpctblock", "tx"}},
+};
+
+const RegTestingSetup* g_setup;
+} // namespace
+
+void initialize()
+{
+ static RegTestingSetup setup{};
+ g_setup = &setup;
+
+ for (int i = 0; i < 2 * COINBASE_MATURITY; i++) {
+ MineBlock(g_setup->m_node, CScript() << OP_TRUE);
+ }
+ SyncWithValidationInterfaceQueue();
+}
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+ const std::string random_message_type{fuzzed_data_provider.ConsumeBytesAsString(CMessageHeader::COMMAND_SIZE).c_str()};
+ if (!LIMIT_TO_MESSAGE_TYPE.empty() && random_message_type != LIMIT_TO_MESSAGE_TYPE) {
+ return;
+ }
+ CDataStream random_bytes_data_stream{fuzzed_data_provider.ConsumeRemainingBytes<unsigned char>(), SER_NETWORK, PROTOCOL_VERSION};
+ CNode p2p_node{0, ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_BLOOM), 0, INVALID_SOCKET, CAddress{CService{in_addr{0x0100007f}, 7777}, NODE_NETWORK}, 0, 0, CAddress{}, std::string{}, false};
+ p2p_node.fSuccessfullyConnected = true;
+ p2p_node.nVersion = PROTOCOL_VERSION;
+ p2p_node.SetSendVersion(PROTOCOL_VERSION);
+ g_setup->m_node.peer_logic->InitializeNode(&p2p_node);
+ try {
+ (void)ProcessMessage(&p2p_node, random_message_type, random_bytes_data_stream, GetTimeMillis(), Params(), g_setup->m_node.connman.get(), g_setup->m_node.banman.get(), std::atomic<bool>{false});
+ } catch (const std::ios_base::failure& e) {
+ const std::string exception_message{e.what()};
+ const auto p = EXPECTED_DESERIALIZATION_EXCEPTIONS.find(exception_message);
+ if (p == EXPECTED_DESERIALIZATION_EXCEPTIONS.cend() || p->second.count(random_message_type) == 0) {
+ std::cout << "Unexpected exception when processing message type \"" << random_message_type << "\": " << exception_message << std::endl;
+ assert(false);
+ }
+ }
+ SyncWithValidationInterfaceQueue();
+}
diff --git a/src/test/fuzz/rolling_bloom_filter.cpp b/src/test/fuzz/rolling_bloom_filter.cpp
new file mode 100644
index 0000000000..3b37321977
--- /dev/null
+++ b/src/test/fuzz/rolling_bloom_filter.cpp
@@ -0,0 +1,50 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <bloom.h>
+#include <optional.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/util.h>
+#include <uint256.h>
+
+#include <cassert>
+#include <cstdint>
+#include <string>
+#include <vector>
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+
+ CRollingBloomFilter rolling_bloom_filter{
+ fuzzed_data_provider.ConsumeIntegralInRange<unsigned int>(1, 1000),
+ 0.999 / fuzzed_data_provider.ConsumeIntegralInRange<unsigned int>(1, std::numeric_limits<unsigned int>::max())};
+ while (fuzzed_data_provider.remaining_bytes() > 0) {
+ switch (fuzzed_data_provider.ConsumeIntegralInRange(0, 2)) {
+ case 0: {
+ const std::vector<unsigned char> b = ConsumeRandomLengthByteVector(fuzzed_data_provider);
+ (void)rolling_bloom_filter.contains(b);
+ rolling_bloom_filter.insert(b);
+ const bool present = rolling_bloom_filter.contains(b);
+ assert(present);
+ break;
+ }
+ case 1: {
+ const Optional<uint256> u256 = ConsumeDeserializable<uint256>(fuzzed_data_provider);
+ if (!u256) {
+ break;
+ }
+ (void)rolling_bloom_filter.contains(*u256);
+ rolling_bloom_filter.insert(*u256);
+ const bool present = rolling_bloom_filter.contains(*u256);
+ assert(present);
+ break;
+ }
+ case 2:
+ rolling_bloom_filter.reset();
+ break;
+ }
+ }
+}
diff --git a/src/test/fuzz/script.cpp b/src/test/fuzz/script.cpp
index 0469e87de6..0d18784302 100644
--- a/src/test/fuzz/script.cpp
+++ b/src/test/fuzz/script.cpp
@@ -15,12 +15,15 @@
#include <script/standard.h>
#include <streams.h>
#include <test/fuzz/fuzz.h>
+#include <univalue.h>
#include <util/memory.h>
void initialize()
{
// Fuzzers using pubkey must hold an ECCVerifyHandle.
static const auto verify_handle = MakeUnique<ECCVerifyHandle>();
+
+ SelectParams(CBaseChainParams::REGTEST);
}
void test_one_input(const std::vector<uint8_t>& buffer)
@@ -28,7 +31,22 @@ void test_one_input(const std::vector<uint8_t>& buffer)
const CScript script(buffer.begin(), buffer.end());
std::vector<unsigned char> compressed;
- (void)CompressScript(script, compressed);
+ if (CompressScript(script, compressed)) {
+ const unsigned int size = compressed[0];
+ compressed.erase(compressed.begin());
+ assert(size >= 0 && size <= 5);
+ CScript decompressed_script;
+ const bool ok = DecompressScript(decompressed_script, size, compressed);
+ assert(ok);
+ assert(script == decompressed_script);
+ }
+
+ for (unsigned int size = 0; size < 6; ++size) {
+ std::vector<unsigned char> vch(GetSpecialScriptSize(size), 0x00);
+ vch.insert(vch.end(), buffer.begin(), buffer.end());
+ CScript decompressed_script;
+ (void)DecompressScript(decompressed_script, size, vch);
+ }
CTxDestination address;
(void)ExtractDestination(script, address);
@@ -61,4 +79,17 @@ void test_one_input(const std::vector<uint8_t>& buffer)
(void)script.IsPushOnly();
(void)script.IsUnspendable();
(void)script.GetSigOpCount(/* fAccurate= */ false);
+
+ (void)FormatScript(script);
+ (void)ScriptToAsmStr(script, false);
+ (void)ScriptToAsmStr(script, true);
+
+ UniValue o1(UniValue::VOBJ);
+ ScriptPubKeyToUniv(script, o1, true);
+ UniValue o2(UniValue::VOBJ);
+ ScriptPubKeyToUniv(script, o2, false);
+ UniValue o3(UniValue::VOBJ);
+ ScriptToUniv(script, o3, true);
+ UniValue o4(UniValue::VOBJ);
+ ScriptToUniv(script, o4, false);
}
diff --git a/src/test/fuzz/script_ops.cpp b/src/test/fuzz/script_ops.cpp
new file mode 100644
index 0000000000..0cd129ba7a
--- /dev/null
+++ b/src/test/fuzz/script_ops.cpp
@@ -0,0 +1,67 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <script/script.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/util.h>
+
+#include <cstdint>
+#include <string>
+#include <vector>
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+ CScript script = ConsumeScript(fuzzed_data_provider);
+ while (fuzzed_data_provider.remaining_bytes() > 0) {
+ switch (fuzzed_data_provider.ConsumeIntegralInRange(0, 7)) {
+ case 0:
+ script += ConsumeScript(fuzzed_data_provider);
+ break;
+ case 1:
+ script = script + ConsumeScript(fuzzed_data_provider);
+ break;
+ case 2:
+ script << fuzzed_data_provider.ConsumeIntegral<int64_t>();
+ break;
+ case 3:
+ script << ConsumeOpcodeType(fuzzed_data_provider);
+ break;
+ case 4:
+ script << ConsumeScriptNum(fuzzed_data_provider);
+ break;
+ case 5:
+ script << ConsumeRandomLengthByteVector(fuzzed_data_provider);
+ break;
+ case 6:
+ script.clear();
+ break;
+ case 7: {
+ (void)script.GetSigOpCount(false);
+ (void)script.GetSigOpCount(true);
+ (void)script.GetSigOpCount(script);
+ (void)script.HasValidOps();
+ (void)script.IsPayToScriptHash();
+ (void)script.IsPayToWitnessScriptHash();
+ (void)script.IsPushOnly();
+ (void)script.IsUnspendable();
+ {
+ CScript::const_iterator pc = script.begin();
+ opcodetype opcode;
+ (void)script.GetOp(pc, opcode);
+ std::vector<uint8_t> data;
+ (void)script.GetOp(pc, opcode, data);
+ (void)script.IsPushOnly(pc);
+ }
+ {
+ int version;
+ std::vector<uint8_t> program;
+ (void)script.IsWitnessProgram(version, program);
+ }
+ break;
+ }
+ }
+ }
+}
diff --git a/src/test/fuzz/scriptnum_ops.cpp b/src/test/fuzz/scriptnum_ops.cpp
new file mode 100644
index 0000000000..db44bb9e19
--- /dev/null
+++ b/src/test/fuzz/scriptnum_ops.cpp
@@ -0,0 +1,137 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <script/script.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/util.h>
+
+#include <cassert>
+#include <cstdint>
+#include <limits>
+#include <vector>
+
+namespace {
+bool IsValidAddition(const CScriptNum& lhs, const CScriptNum& rhs)
+{
+ return rhs == 0 || (rhs > 0 && lhs <= CScriptNum{std::numeric_limits<int64_t>::max()} - rhs) || (rhs < 0 && lhs >= CScriptNum{std::numeric_limits<int64_t>::min()} - rhs);
+}
+
+bool IsValidSubtraction(const CScriptNum& lhs, const CScriptNum& rhs)
+{
+ return rhs == 0 || (rhs > 0 && lhs >= CScriptNum{std::numeric_limits<int64_t>::min()} + rhs) || (rhs < 0 && lhs <= CScriptNum{std::numeric_limits<int64_t>::max()} + rhs);
+}
+} // namespace
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+ CScriptNum script_num = ConsumeScriptNum(fuzzed_data_provider);
+ while (fuzzed_data_provider.remaining_bytes() > 0) {
+ switch (fuzzed_data_provider.ConsumeIntegralInRange(0, 11)) {
+ case 0: {
+ const int64_t i = fuzzed_data_provider.ConsumeIntegral<int64_t>();
+ assert((script_num == i) != (script_num != i));
+ assert((script_num <= i) != script_num > i);
+ assert((script_num >= i) != (script_num < i));
+ // Avoid signed integer overflow:
+ // script/script.h:264:93: runtime error: signed integer overflow: -2261405121394637306 + -9223372036854775802 cannot be represented in type 'long'
+ if (IsValidAddition(script_num, CScriptNum{i})) {
+ assert((script_num + i) - i == script_num);
+ }
+ // Avoid signed integer overflow:
+ // script/script.h:265:93: runtime error: signed integer overflow: 9223371895120855039 - -9223372036854710486 cannot be represented in type 'long'
+ if (IsValidSubtraction(script_num, CScriptNum{i})) {
+ assert((script_num - i) + i == script_num);
+ }
+ break;
+ }
+ case 1: {
+ const CScriptNum random_script_num = ConsumeScriptNum(fuzzed_data_provider);
+ assert((script_num == random_script_num) != (script_num != random_script_num));
+ assert((script_num <= random_script_num) != (script_num > random_script_num));
+ assert((script_num >= random_script_num) != (script_num < random_script_num));
+ // Avoid signed integer overflow:
+ // script/script.h:264:93: runtime error: signed integer overflow: -9223126527765971126 + -9223372036854756825 cannot be represented in type 'long'
+ if (IsValidAddition(script_num, random_script_num)) {
+ assert((script_num + random_script_num) - random_script_num == script_num);
+ }
+ // Avoid signed integer overflow:
+ // script/script.h:265:93: runtime error: signed integer overflow: 6052837899185946624 - -9223372036854775808 cannot be represented in type 'long'
+ if (IsValidSubtraction(script_num, random_script_num)) {
+ assert((script_num - random_script_num) + random_script_num == script_num);
+ }
+ break;
+ }
+ case 2: {
+ const CScriptNum random_script_num = ConsumeScriptNum(fuzzed_data_provider);
+ if (!IsValidAddition(script_num, random_script_num)) {
+ // Avoid assertion failure:
+ // ./script/script.h:292: CScriptNum &CScriptNum::operator+=(const int64_t &): Assertion `rhs == 0 || (rhs > 0 && m_value <= std::numeric_limits<int64_t>::max() - rhs) || (rhs < 0 && m_value >= std::numeric_limits<int64_t>::min() - rhs)' failed.
+ break;
+ }
+ script_num += random_script_num;
+ break;
+ }
+ case 3: {
+ const CScriptNum random_script_num = ConsumeScriptNum(fuzzed_data_provider);
+ if (!IsValidSubtraction(script_num, random_script_num)) {
+ // Avoid assertion failure:
+ // ./script/script.h:300: CScriptNum &CScriptNum::operator-=(const int64_t &): Assertion `rhs == 0 || (rhs > 0 && m_value >= std::numeric_limits<int64_t>::min() + rhs) || (rhs < 0 && m_value <= std::numeric_limits<int64_t>::max() + rhs)' failed.
+ break;
+ }
+ script_num -= random_script_num;
+ break;
+ }
+ case 4:
+ script_num = script_num & fuzzed_data_provider.ConsumeIntegral<int64_t>();
+ break;
+ case 5:
+ script_num = script_num & ConsumeScriptNum(fuzzed_data_provider);
+ break;
+ case 6:
+ script_num &= ConsumeScriptNum(fuzzed_data_provider);
+ break;
+ case 7:
+ if (script_num == CScriptNum{std::numeric_limits<int64_t>::min()}) {
+ // Avoid assertion failure:
+ // ./script/script.h:279: CScriptNum CScriptNum::operator-() const: Assertion `m_value != std::numeric_limits<int64_t>::min()' failed.
+ break;
+ }
+ script_num = -script_num;
+ break;
+ case 8:
+ script_num = fuzzed_data_provider.ConsumeIntegral<int64_t>();
+ break;
+ case 9: {
+ const int64_t random_integer = fuzzed_data_provider.ConsumeIntegral<int64_t>();
+ if (!IsValidAddition(script_num, CScriptNum{random_integer})) {
+ // Avoid assertion failure:
+ // ./script/script.h:292: CScriptNum &CScriptNum::operator+=(const int64_t &): Assertion `rhs == 0 || (rhs > 0 && m_value <= std::numeric_limits<int64_t>::max() - rhs) || (rhs < 0 && m_value >= std::numeric_limits<int64_t>::min() - rhs)' failed.
+ break;
+ }
+ script_num += random_integer;
+ break;
+ }
+ case 10: {
+ const int64_t random_integer = fuzzed_data_provider.ConsumeIntegral<int64_t>();
+ if (!IsValidSubtraction(script_num, CScriptNum{random_integer})) {
+ // Avoid assertion failure:
+ // ./script/script.h:300: CScriptNum &CScriptNum::operator-=(const int64_t &): Assertion `rhs == 0 || (rhs > 0 && m_value >= std::numeric_limits<int64_t>::min() + rhs) || (rhs < 0 && m_value <= std::numeric_limits<int64_t>::max() + rhs)' failed.
+ break;
+ }
+ script_num -= random_integer;
+ break;
+ }
+ case 11:
+ script_num &= fuzzed_data_provider.ConsumeIntegral<int64_t>();
+ break;
+ }
+ // Avoid negation failure:
+ // script/script.h:332:35: runtime error: negation of -9223372036854775808 cannot be represented in type 'int64_t' (aka 'long'); cast to an unsigned type to negate this value to itself
+ if (script_num != CScriptNum{std::numeric_limits<int64_t>::min()}) {
+ (void)script_num.getvch();
+ }
+ }
+}
diff --git a/src/test/fuzz/strprintf.cpp b/src/test/fuzz/strprintf.cpp
new file mode 100644
index 0000000000..d5be1070bd
--- /dev/null
+++ b/src/test/fuzz/strprintf.cpp
@@ -0,0 +1,166 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <tinyformat.h>
+#include <util/strencodings.h>
+
+#include <algorithm>
+#include <cstdint>
+#include <string>
+#include <vector>
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+ const std::string format_string = fuzzed_data_provider.ConsumeRandomLengthString(64);
+
+ const int digits_in_format_specifier = std::count_if(format_string.begin(), format_string.end(), IsDigit);
+
+ // Avoid triggering the following crash bug:
+ // * strprintf("%987654321000000:", 1);
+ //
+ // Avoid triggering the following OOM bug:
+ // * strprintf("%.222222200000000$", 1.1);
+ //
+ // Upstream bug report: https://github.com/c42f/tinyformat/issues/70
+ if (format_string.find('%') != std::string::npos && digits_in_format_specifier >= 7) {
+ return;
+ }
+
+ // Avoid triggering the following crash bug:
+ // * strprintf("%1$*1$*", -11111111);
+ //
+ // Upstream bug report: https://github.com/c42f/tinyformat/issues/70
+ if (format_string.find('%') != std::string::npos && format_string.find('$') != std::string::npos && format_string.find('*') != std::string::npos && digits_in_format_specifier > 0) {
+ return;
+ }
+
+ // Avoid triggering the following crash bug:
+ // * strprintf("%.1s", (char*)nullptr);
+ //
+ // (void)strprintf(format_string, (char*)nullptr);
+ //
+ // Upstream bug report: https://github.com/c42f/tinyformat/issues/70
+
+ try {
+ (void)strprintf(format_string, (signed char*)nullptr);
+ } catch (const tinyformat::format_error&) {
+ }
+ try {
+ (void)strprintf(format_string, (unsigned char*)nullptr);
+ } catch (const tinyformat::format_error&) {
+ }
+ try {
+ (void)strprintf(format_string, (void*)nullptr);
+ } catch (const tinyformat::format_error&) {
+ }
+ try {
+ (void)strprintf(format_string, (bool*)nullptr);
+ } catch (const tinyformat::format_error&) {
+ }
+ try {
+ (void)strprintf(format_string, (float*)nullptr);
+ } catch (const tinyformat::format_error&) {
+ }
+ try {
+ (void)strprintf(format_string, (double*)nullptr);
+ } catch (const tinyformat::format_error&) {
+ }
+ try {
+ (void)strprintf(format_string, (int16_t*)nullptr);
+ } catch (const tinyformat::format_error&) {
+ }
+ try {
+ (void)strprintf(format_string, (uint16_t*)nullptr);
+ } catch (const tinyformat::format_error&) {
+ }
+ try {
+ (void)strprintf(format_string, (int32_t*)nullptr);
+ } catch (const tinyformat::format_error&) {
+ }
+ try {
+ (void)strprintf(format_string, (uint32_t*)nullptr);
+ } catch (const tinyformat::format_error&) {
+ }
+ try {
+ (void)strprintf(format_string, (int64_t*)nullptr);
+ } catch (const tinyformat::format_error&) {
+ }
+ try {
+ (void)strprintf(format_string, (uint64_t*)nullptr);
+ } catch (const tinyformat::format_error&) {
+ }
+
+ try {
+ switch (fuzzed_data_provider.ConsumeIntegralInRange(0, 5)) {
+ case 0:
+ (void)strprintf(format_string, fuzzed_data_provider.ConsumeRandomLengthString(32));
+ break;
+ case 1:
+ (void)strprintf(format_string, fuzzed_data_provider.ConsumeRandomLengthString(32).c_str());
+ break;
+ case 2:
+ (void)strprintf(format_string, fuzzed_data_provider.ConsumeIntegral<signed char>());
+ break;
+ case 3:
+ (void)strprintf(format_string, fuzzed_data_provider.ConsumeIntegral<unsigned char>());
+ break;
+ case 4:
+ (void)strprintf(format_string, fuzzed_data_provider.ConsumeIntegral<char>());
+ break;
+ case 5:
+ (void)strprintf(format_string, fuzzed_data_provider.ConsumeBool());
+ break;
+ }
+ } catch (const tinyformat::format_error&) {
+ }
+
+ if (format_string.find('%') != std::string::npos && format_string.find('c') != std::string::npos) {
+ // Avoid triggering the following:
+ // * strprintf("%c", 1.31783e+38);
+ // tinyformat.h:244:36: runtime error: 1.31783e+38 is outside the range of representable values of type 'char'
+ return;
+ }
+
+ if (format_string.find('%') != std::string::npos && format_string.find('*') != std::string::npos) {
+ // Avoid triggering the following:
+ // * strprintf("%*", -2.33527e+38);
+ // tinyformat.h:283:65: runtime error: -2.33527e+38 is outside the range of representable values of type 'int'
+ // * strprintf("%*", -2147483648);
+ // tinyformat.h:763:25: runtime error: negation of -2147483648 cannot be represented in type 'int'; cast to an unsigned type to negate this value to itself
+ return;
+ }
+
+ try {
+ switch (fuzzed_data_provider.ConsumeIntegralInRange(0, 7)) {
+ case 0:
+ (void)strprintf(format_string, fuzzed_data_provider.ConsumeFloatingPoint<float>());
+ break;
+ case 1:
+ (void)strprintf(format_string, fuzzed_data_provider.ConsumeFloatingPoint<double>());
+ break;
+ case 2:
+ (void)strprintf(format_string, fuzzed_data_provider.ConsumeIntegral<int16_t>());
+ break;
+ case 3:
+ (void)strprintf(format_string, fuzzed_data_provider.ConsumeIntegral<uint16_t>());
+ break;
+ case 4:
+ (void)strprintf(format_string, fuzzed_data_provider.ConsumeIntegral<int32_t>());
+ break;
+ case 5:
+ (void)strprintf(format_string, fuzzed_data_provider.ConsumeIntegral<uint32_t>());
+ break;
+ case 6:
+ (void)strprintf(format_string, fuzzed_data_provider.ConsumeIntegral<int64_t>());
+ break;
+ case 7:
+ (void)strprintf(format_string, fuzzed_data_provider.ConsumeIntegral<uint64_t>());
+ break;
+ }
+ } catch (const tinyformat::format_error&) {
+ }
+}
diff --git a/src/test/fuzz/transaction.cpp b/src/test/fuzz/transaction.cpp
index fefafda36b..1ec69cc23d 100644
--- a/src/test/fuzz/transaction.cpp
+++ b/src/test/fuzz/transaction.cpp
@@ -2,6 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <chainparams.h>
#include <coins.h>
#include <consensus/tx_check.h>
#include <consensus/tx_verify.h>
@@ -13,12 +14,18 @@
#include <primitives/transaction.h>
#include <streams.h>
#include <test/fuzz/fuzz.h>
+#include <univalue.h>
#include <util/rbf.h>
#include <validation.h>
#include <version.h>
#include <cassert>
+void initialize()
+{
+ SelectParams(CBaseChainParams::REGTEST);
+}
+
void test_one_input(const std::vector<uint8_t>& buffer)
{
CDataStream ds(buffer, SER_NETWORK, INIT_PROTO_VERSION);
@@ -85,4 +92,21 @@ void test_one_input(const std::vector<uint8_t>& buffer)
(void)IsStandardTx(tx, reason);
(void)RecursiveDynamicUsage(tx);
(void)SignalsOptInRBF(tx);
+
+ CCoinsView coins_view;
+ const CCoinsViewCache coins_view_cache(&coins_view);
+ (void)AreInputsStandard(tx, coins_view_cache);
+ (void)IsWitnessStandard(tx, coins_view_cache);
+
+ UniValue u(UniValue::VOBJ);
+ // ValueFromAmount(i) not defined when i == std::numeric_limits<int64_t>::min()
+ bool skip_tx_to_univ = false;
+ for (const CTxOut& txout : tx.vout) {
+ if (txout.nValue == std::numeric_limits<int64_t>::min()) {
+ skip_tx_to_univ = true;
+ }
+ }
+ if (!skip_tx_to_univ) {
+ TxToUniv(tx, /* hashBlock */ {}, u);
+ }
}
diff --git a/src/test/fuzz/util.h b/src/test/fuzz/util.h
new file mode 100644
index 0000000000..2f4aa9ad2b
--- /dev/null
+++ b/src/test/fuzz/util.h
@@ -0,0 +1,57 @@
+// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_TEST_FUZZ_UTIL_H
+#define BITCOIN_TEST_FUZZ_UTIL_H
+
+#include <attributes.h>
+#include <optional.h>
+#include <script/script.h>
+#include <serialize.h>
+#include <streams.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <version.h>
+
+#include <cstdint>
+#include <string>
+#include <vector>
+
+NODISCARD inline std::vector<uint8_t> ConsumeRandomLengthByteVector(FuzzedDataProvider& fuzzed_data_provider, size_t max_length = 4096) noexcept
+{
+ const std::string s = fuzzed_data_provider.ConsumeRandomLengthString(max_length);
+ return {s.begin(), s.end()};
+}
+
+template <typename T>
+NODISCARD inline Optional<T> ConsumeDeserializable(FuzzedDataProvider& fuzzed_data_provider, size_t max_length = 4096) noexcept
+{
+ const std::vector<uint8_t> buffer = ConsumeRandomLengthByteVector(fuzzed_data_provider, max_length);
+ CDataStream ds{buffer, SER_NETWORK, INIT_PROTO_VERSION};
+ T obj;
+ try {
+ ds >> obj;
+ } catch (const std::ios_base::failure&) {
+ return nullopt;
+ }
+ return obj;
+}
+
+NODISCARD inline opcodetype ConsumeOpcodeType(FuzzedDataProvider& fuzzed_data_provider) noexcept
+{
+ return static_cast<opcodetype>(fuzzed_data_provider.ConsumeIntegralInRange<uint32_t>(0, MAX_OPCODE));
+}
+
+NODISCARD inline CScript ConsumeScript(FuzzedDataProvider& fuzzed_data_provider) noexcept
+{
+ const std::vector<uint8_t> b = ConsumeRandomLengthByteVector(fuzzed_data_provider);
+ return {b.begin(), b.end()};
+}
+
+NODISCARD inline CScriptNum ConsumeScriptNum(FuzzedDataProvider& fuzzed_data_provider) noexcept
+{
+ return CScriptNum{fuzzed_data_provider.ConsumeIntegral<int64_t>()};
+}
+
+#endif // BITCOIN_TEST_FUZZ_UTIL_H
diff --git a/src/test/getarg_tests.cpp b/src/test/getarg_tests.cpp
index 4c64d8c833..10fb05ca8a 100644
--- a/src/test/getarg_tests.cpp
+++ b/src/test/getarg_tests.cpp
@@ -183,4 +183,32 @@ BOOST_AUTO_TEST_CASE(boolargno)
BOOST_CHECK(gArgs.GetBoolArg("-foo", false));
}
+BOOST_AUTO_TEST_CASE(logargs)
+{
+ const auto okaylog_bool = std::make_pair("-okaylog-bool", ArgsManager::ALLOW_BOOL);
+ const auto okaylog_negbool = std::make_pair("-okaylog-negbool", ArgsManager::ALLOW_BOOL);
+ const auto okaylog = std::make_pair("-okaylog", ArgsManager::ALLOW_ANY);
+ const auto dontlog = std::make_pair("-dontlog", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE);
+ SetupArgs({okaylog_bool, okaylog_negbool, okaylog, dontlog});
+ ResetArgs("-okaylog-bool -nookaylog-negbool -okaylog=public -dontlog=private");
+
+ // Everything logged to debug.log will also append to str
+ std::string str;
+ auto print_connection = LogInstance().PushBackCallback(
+ [&str](const std::string& s) {
+ str += s;
+ });
+
+ // Log the arguments
+ gArgs.LogArgs();
+
+ LogInstance().DeleteCallback(print_connection);
+ // Check that what should appear does, and what shouldn't doesn't.
+ BOOST_CHECK(str.find("Command-line arg: okaylog-bool=\"\"") != std::string::npos);
+ BOOST_CHECK(str.find("Command-line arg: okaylog-negbool=false") != std::string::npos);
+ BOOST_CHECK(str.find("Command-line arg: okaylog=\"public\"") != std::string::npos);
+ BOOST_CHECK(str.find("dontlog=****") != std::string::npos);
+ BOOST_CHECK(str.find("private") == std::string::npos);
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/main.cpp b/src/test/main.cpp
index ff3f36b561..e6529949e2 100644
--- a/src/test/main.cpp
+++ b/src/test/main.cpp
@@ -2,6 +2,21 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+/**
+ * See https://www.boost.org/doc/libs/1_71_0/libs/test/doc/html/boost_test/utf_reference/link_references/link_boost_test_module_macro.html
+ */
#define BOOST_TEST_MODULE Bitcoin Core Test Suite
#include <boost/test/unit_test.hpp>
+
+#include <test/util/setup_common.h>
+
+/** Redirect debug log to boost log */
+const std::function<void(const std::string&)> G_TEST_LOG_FUN = [](const std::string& s) {
+ if (s.back() == '\n') {
+ // boost will insert the new line
+ BOOST_TEST_MESSAGE(s.substr(0, s.size() - 1));
+ } else {
+ BOOST_TEST_MESSAGE(s);
+ }
+};
diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp
index daf7fea6ad..cb1ef5dcf3 100644
--- a/src/test/net_tests.cpp
+++ b/src/test/net_tests.cpp
@@ -99,6 +99,8 @@ BOOST_AUTO_TEST_CASE(caddrdb_read)
BOOST_CHECK(Lookup("250.7.1.1", addr1, 8333, false));
BOOST_CHECK(Lookup("250.7.2.2", addr2, 9999, false));
BOOST_CHECK(Lookup("250.7.3.3", addr3, 9999, false));
+ BOOST_CHECK(Lookup(std::string("250.7.3.3", 9), addr3, 9999, false));
+ BOOST_CHECK(!Lookup(std::string("250.7.3.3\0example.com", 21), addr3, 9999, false));
// Add three addresses to new table.
CService source;
diff --git a/src/test/netbase_tests.cpp b/src/test/netbase_tests.cpp
index 481dedc356..9730b40580 100644
--- a/src/test/netbase_tests.cpp
+++ b/src/test/netbase_tests.cpp
@@ -13,21 +13,21 @@
BOOST_FIXTURE_TEST_SUITE(netbase_tests, BasicTestingSetup)
-static CNetAddr ResolveIP(const char* ip)
+static CNetAddr ResolveIP(const std::string& ip)
{
CNetAddr addr;
LookupHost(ip, addr, false);
return addr;
}
-static CSubNet ResolveSubNet(const char* subnet)
+static CSubNet ResolveSubNet(const std::string& subnet)
{
CSubNet ret;
LookupSubNet(subnet, ret);
return ret;
}
-static CNetAddr CreateInternal(const char* host)
+static CNetAddr CreateInternal(const std::string& host)
{
CNetAddr addr;
addr.SetInternal(host);
@@ -105,7 +105,7 @@ BOOST_AUTO_TEST_CASE(netbase_splithost)
bool static TestParse(std::string src, std::string canon)
{
- CService addr(LookupNumeric(src.c_str(), 65535));
+ CService addr(LookupNumeric(src, 65535));
return canon == addr.ToString();
}
@@ -127,7 +127,6 @@ BOOST_AUTO_TEST_CASE(netbase_lookupnumeric)
BOOST_AUTO_TEST_CASE(onioncat_test)
{
-
// values from https://web.archive.org/web/20121122003543/http://www.cypherpunk.at/onioncat/wiki/OnionCat
CNetAddr addr1(ResolveIP("5wyqrzbvrdsumnok.onion"));
CNetAddr addr2(ResolveIP("FD87:D87E:EB43:edb1:8e4:3588:e546:35ca"));
@@ -287,23 +286,23 @@ BOOST_AUTO_TEST_CASE(subnet_test)
BOOST_AUTO_TEST_CASE(netbase_getgroup)
{
-
- BOOST_CHECK(ResolveIP("127.0.0.1").GetGroup() == std::vector<unsigned char>({0})); // Local -> !Routable()
- BOOST_CHECK(ResolveIP("257.0.0.1").GetGroup() == std::vector<unsigned char>({0})); // !Valid -> !Routable()
- BOOST_CHECK(ResolveIP("10.0.0.1").GetGroup() == std::vector<unsigned char>({0})); // RFC1918 -> !Routable()
- BOOST_CHECK(ResolveIP("169.254.1.1").GetGroup() == std::vector<unsigned char>({0})); // RFC3927 -> !Routable()
- BOOST_CHECK(ResolveIP("1.2.3.4").GetGroup() == std::vector<unsigned char>({(unsigned char)NET_IPV4, 1, 2})); // IPv4
- BOOST_CHECK(ResolveIP("::FFFF:0:102:304").GetGroup() == std::vector<unsigned char>({(unsigned char)NET_IPV4, 1, 2})); // RFC6145
- BOOST_CHECK(ResolveIP("64:FF9B::102:304").GetGroup() == std::vector<unsigned char>({(unsigned char)NET_IPV4, 1, 2})); // RFC6052
- BOOST_CHECK(ResolveIP("2002:102:304:9999:9999:9999:9999:9999").GetGroup() == std::vector<unsigned char>({(unsigned char)NET_IPV4, 1, 2})); // RFC3964
- BOOST_CHECK(ResolveIP("2001:0:9999:9999:9999:9999:FEFD:FCFB").GetGroup() == std::vector<unsigned char>({(unsigned char)NET_IPV4, 1, 2})); // RFC4380
- BOOST_CHECK(ResolveIP("FD87:D87E:EB43:edb1:8e4:3588:e546:35ca").GetGroup() == std::vector<unsigned char>({(unsigned char)NET_ONION, 239})); // Tor
- BOOST_CHECK(ResolveIP("2001:470:abcd:9999:9999:9999:9999:9999").GetGroup() == std::vector<unsigned char>({(unsigned char)NET_IPV6, 32, 1, 4, 112, 175})); //he.net
- BOOST_CHECK(ResolveIP("2001:2001:9999:9999:9999:9999:9999:9999").GetGroup() == std::vector<unsigned char>({(unsigned char)NET_IPV6, 32, 1, 32, 1})); //IPv6
+ std::vector<bool> asmap; // use /16
+ BOOST_CHECK(ResolveIP("127.0.0.1").GetGroup(asmap) == std::vector<unsigned char>({0})); // Local -> !Routable()
+ BOOST_CHECK(ResolveIP("257.0.0.1").GetGroup(asmap) == std::vector<unsigned char>({0})); // !Valid -> !Routable()
+ BOOST_CHECK(ResolveIP("10.0.0.1").GetGroup(asmap) == std::vector<unsigned char>({0})); // RFC1918 -> !Routable()
+ BOOST_CHECK(ResolveIP("169.254.1.1").GetGroup(asmap) == std::vector<unsigned char>({0})); // RFC3927 -> !Routable()
+ BOOST_CHECK(ResolveIP("1.2.3.4").GetGroup(asmap) == std::vector<unsigned char>({(unsigned char)NET_IPV4, 1, 2})); // IPv4
+ BOOST_CHECK(ResolveIP("::FFFF:0:102:304").GetGroup(asmap) == std::vector<unsigned char>({(unsigned char)NET_IPV4, 1, 2})); // RFC6145
+ BOOST_CHECK(ResolveIP("64:FF9B::102:304").GetGroup(asmap) == std::vector<unsigned char>({(unsigned char)NET_IPV4, 1, 2})); // RFC6052
+ BOOST_CHECK(ResolveIP("2002:102:304:9999:9999:9999:9999:9999").GetGroup(asmap) == std::vector<unsigned char>({(unsigned char)NET_IPV4, 1, 2})); // RFC3964
+ BOOST_CHECK(ResolveIP("2001:0:9999:9999:9999:9999:FEFD:FCFB").GetGroup(asmap) == std::vector<unsigned char>({(unsigned char)NET_IPV4, 1, 2})); // RFC4380
+ BOOST_CHECK(ResolveIP("FD87:D87E:EB43:edb1:8e4:3588:e546:35ca").GetGroup(asmap) == std::vector<unsigned char>({(unsigned char)NET_ONION, 239})); // Tor
+ BOOST_CHECK(ResolveIP("2001:470:abcd:9999:9999:9999:9999:9999").GetGroup(asmap) == std::vector<unsigned char>({(unsigned char)NET_IPV6, 32, 1, 4, 112, 175})); //he.net
+ BOOST_CHECK(ResolveIP("2001:2001:9999:9999:9999:9999:9999:9999").GetGroup(asmap) == std::vector<unsigned char>({(unsigned char)NET_IPV6, 32, 1, 32, 1})); //IPv6
// baz.net sha256 hash: 12929400eb4607c4ac075f087167e75286b179c693eb059a01774b864e8fe505
std::vector<unsigned char> internal_group = {NET_INTERNAL, 0x12, 0x92, 0x94, 0x00, 0xeb, 0x46, 0x07, 0xc4, 0xac, 0x07};
- BOOST_CHECK(CreateInternal("baz.net").GetGroup() == internal_group);
+ BOOST_CHECK(CreateInternal("baz.net").GetGroup(asmap) == internal_group);
}
BOOST_AUTO_TEST_CASE(netbase_parsenetwork)
@@ -402,4 +401,22 @@ BOOST_AUTO_TEST_CASE(netpermissions_test)
BOOST_CHECK(std::find(strings.begin(), strings.end(), "mempool") != strings.end());
}
+BOOST_AUTO_TEST_CASE(netbase_dont_resolve_strings_with_embedded_nul_characters)
+{
+ CNetAddr addr;
+ BOOST_CHECK(LookupHost(std::string("127.0.0.1", 9), addr, false));
+ BOOST_CHECK(!LookupHost(std::string("127.0.0.1\0", 10), addr, false));
+ BOOST_CHECK(!LookupHost(std::string("127.0.0.1\0example.com", 21), addr, false));
+ BOOST_CHECK(!LookupHost(std::string("127.0.0.1\0example.com\0", 22), addr, false));
+ CSubNet ret;
+ BOOST_CHECK(LookupSubNet(std::string("1.2.3.0/24", 10), ret));
+ BOOST_CHECK(!LookupSubNet(std::string("1.2.3.0/24\0", 11), ret));
+ BOOST_CHECK(!LookupSubNet(std::string("1.2.3.0/24\0example.com", 22), ret));
+ BOOST_CHECK(!LookupSubNet(std::string("1.2.3.0/24\0example.com\0", 23), ret));
+ BOOST_CHECK(LookupSubNet(std::string("5wyqrzbvrdsumnok.onion", 22), ret));
+ BOOST_CHECK(!LookupSubNet(std::string("5wyqrzbvrdsumnok.onion\0", 23), ret));
+ BOOST_CHECK(!LookupSubNet(std::string("5wyqrzbvrdsumnok.onion\0example.com", 34), ret));
+ BOOST_CHECK(!LookupSubNet(std::string("5wyqrzbvrdsumnok.onion\0example.com\0", 35), ret));
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/reverselock_tests.cpp b/src/test/reverselock_tests.cpp
index 532fe143ae..4e51b8c02a 100644
--- a/src/test/reverselock_tests.cpp
+++ b/src/test/reverselock_tests.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <reverselock.h>
+#include <sync.h>
#include <test/util/setup_common.h>
#include <boost/test/unit_test.hpp>
@@ -11,21 +11,50 @@ BOOST_FIXTURE_TEST_SUITE(reverselock_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(reverselock_basics)
{
- boost::mutex mutex;
- boost::unique_lock<boost::mutex> lock(mutex);
+ Mutex mutex;
+ WAIT_LOCK(mutex, lock);
BOOST_CHECK(lock.owns_lock());
{
- reverse_lock<boost::unique_lock<boost::mutex> > rlock(lock);
+ REVERSE_LOCK(lock);
BOOST_CHECK(!lock.owns_lock());
}
BOOST_CHECK(lock.owns_lock());
}
+BOOST_AUTO_TEST_CASE(reverselock_multiple)
+{
+ Mutex mutex2;
+ Mutex mutex;
+ WAIT_LOCK(mutex2, lock2);
+ WAIT_LOCK(mutex, lock);
+
+ // Make sure undoing two locks succeeds
+ {
+ REVERSE_LOCK(lock);
+ BOOST_CHECK(!lock.owns_lock());
+ REVERSE_LOCK(lock2);
+ BOOST_CHECK(!lock2.owns_lock());
+ }
+ BOOST_CHECK(lock.owns_lock());
+ BOOST_CHECK(lock2.owns_lock());
+}
+
BOOST_AUTO_TEST_CASE(reverselock_errors)
{
- boost::mutex mutex;
- boost::unique_lock<boost::mutex> lock(mutex);
+ Mutex mutex2;
+ Mutex mutex;
+ WAIT_LOCK(mutex2, lock2);
+ WAIT_LOCK(mutex, lock);
+
+#ifdef DEBUG_LOCKORDER
+ // Make sure trying to reverse lock a previous lock fails
+ try {
+ REVERSE_LOCK(lock2);
+ BOOST_CHECK(false); // REVERSE_LOCK(lock2) succeeded
+ } catch(...) { }
+ BOOST_CHECK(lock2.owns_lock());
+#endif
// Make sure trying to reverse lock an unlocked lock fails
lock.unlock();
@@ -34,7 +63,7 @@ BOOST_AUTO_TEST_CASE(reverselock_errors)
bool failed = false;
try {
- reverse_lock<boost::unique_lock<boost::mutex> > rlock(lock);
+ REVERSE_LOCK(lock);
} catch(...) {
failed = true;
}
@@ -49,7 +78,7 @@ BOOST_AUTO_TEST_CASE(reverselock_errors)
lock.lock();
BOOST_CHECK(lock.owns_lock());
{
- reverse_lock<boost::unique_lock<boost::mutex> > rlock(lock);
+ REVERSE_LOCK(lock);
BOOST_CHECK(!lock.owns_lock());
}
diff --git a/src/test/scheduler_tests.cpp b/src/test/scheduler_tests.cpp
index b292d5b0d0..7d26840b73 100644
--- a/src/test/scheduler_tests.cpp
+++ b/src/test/scheduler_tests.cpp
@@ -4,39 +4,26 @@
#include <random.h>
#include <scheduler.h>
-
-#include <test/util/setup_common.h>
+#include <util/time.h>
#include <boost/thread.hpp>
#include <boost/test/unit_test.hpp>
BOOST_AUTO_TEST_SUITE(scheduler_tests)
-static void microTask(CScheduler& s, boost::mutex& mutex, int& counter, int delta, boost::chrono::system_clock::time_point rescheduleTime)
+static void microTask(CScheduler& s, boost::mutex& mutex, int& counter, int delta, std::chrono::system_clock::time_point rescheduleTime)
{
{
boost::unique_lock<boost::mutex> lock(mutex);
counter += delta;
}
- boost::chrono::system_clock::time_point noTime = boost::chrono::system_clock::time_point::min();
+ std::chrono::system_clock::time_point noTime = std::chrono::system_clock::time_point::min();
if (rescheduleTime != noTime) {
CScheduler::Function f = std::bind(&microTask, std::ref(s), std::ref(mutex), std::ref(counter), -delta + 1, noTime);
s.schedule(f, rescheduleTime);
}
}
-static void MicroSleep(uint64_t n)
-{
-#if defined(HAVE_WORKING_BOOST_SLEEP_FOR)
- boost::this_thread::sleep_for(boost::chrono::microseconds(n));
-#elif defined(HAVE_WORKING_BOOST_SLEEP)
- boost::this_thread::sleep(boost::posix_time::microseconds(n));
-#else
- //should never get here
- #error missing boost sleep implementation
-#endif
-}
-
BOOST_AUTO_TEST_CASE(manythreads)
{
// Stress test: hundreds of microsecond-scheduled tasks,
@@ -58,15 +45,15 @@ BOOST_AUTO_TEST_CASE(manythreads)
auto randomMsec = [](FastRandomContext& rc) -> int { return -11 + (int)rc.randrange(1012); }; // [-11, 1000]
auto randomDelta = [](FastRandomContext& rc) -> int { return -1000 + (int)rc.randrange(2001); }; // [-1000, 1000]
- boost::chrono::system_clock::time_point start = boost::chrono::system_clock::now();
- boost::chrono::system_clock::time_point now = start;
- boost::chrono::system_clock::time_point first, last;
+ std::chrono::system_clock::time_point start = std::chrono::system_clock::now();
+ std::chrono::system_clock::time_point now = start;
+ std::chrono::system_clock::time_point first, last;
size_t nTasks = microTasks.getQueueInfo(first, last);
BOOST_CHECK(nTasks == 0);
for (int i = 0; i < 100; ++i) {
- boost::chrono::system_clock::time_point t = now + boost::chrono::microseconds(randomMsec(rng));
- boost::chrono::system_clock::time_point tReschedule = now + boost::chrono::microseconds(500 + randomMsec(rng));
+ std::chrono::system_clock::time_point t = now + std::chrono::microseconds(randomMsec(rng));
+ std::chrono::system_clock::time_point tReschedule = now + std::chrono::microseconds(500 + randomMsec(rng));
int whichCounter = zeroToNine(rng);
CScheduler::Function f = std::bind(&microTask, std::ref(microTasks),
std::ref(counterMutex[whichCounter]), std::ref(counter[whichCounter]),
@@ -83,15 +70,15 @@ BOOST_AUTO_TEST_CASE(manythreads)
for (int i = 0; i < 5; i++)
microThreads.create_thread(std::bind(&CScheduler::serviceQueue, &microTasks));
- MicroSleep(600);
- now = boost::chrono::system_clock::now();
+ UninterruptibleSleep(std::chrono::microseconds{600});
+ now = std::chrono::system_clock::now();
// More threads and more tasks:
for (int i = 0; i < 5; i++)
microThreads.create_thread(std::bind(&CScheduler::serviceQueue, &microTasks));
for (int i = 0; i < 100; i++) {
- boost::chrono::system_clock::time_point t = now + boost::chrono::microseconds(randomMsec(rng));
- boost::chrono::system_clock::time_point tReschedule = now + boost::chrono::microseconds(500 + randomMsec(rng));
+ std::chrono::system_clock::time_point t = now + std::chrono::microseconds(randomMsec(rng));
+ std::chrono::system_clock::time_point tReschedule = now + std::chrono::microseconds(500 + randomMsec(rng));
int whichCounter = zeroToNine(rng);
CScheduler::Function f = std::bind(&microTask, std::ref(microTasks),
std::ref(counterMutex[whichCounter]), std::ref(counter[whichCounter]),
@@ -155,4 +142,45 @@ BOOST_AUTO_TEST_CASE(singlethreadedscheduler_ordered)
BOOST_CHECK_EQUAL(counter2, 100);
}
+BOOST_AUTO_TEST_CASE(mockforward)
+{
+ CScheduler scheduler;
+
+ int counter{0};
+ CScheduler::Function dummy = [&counter]{counter++;};
+
+ // schedule jobs for 2, 5 & 8 minutes into the future
+ int64_t min_in_milli = 60*1000;
+ scheduler.scheduleFromNow(dummy, 2*min_in_milli);
+ scheduler.scheduleFromNow(dummy, 5*min_in_milli);
+ scheduler.scheduleFromNow(dummy, 8*min_in_milli);
+
+ // check taskQueue
+ std::chrono::system_clock::time_point first, last;
+ size_t num_tasks = scheduler.getQueueInfo(first, last);
+ BOOST_CHECK_EQUAL(num_tasks, 3ul);
+
+ std::thread scheduler_thread([&]() { scheduler.serviceQueue(); });
+
+ // bump the scheduler forward 5 minutes
+ scheduler.MockForward(std::chrono::seconds(5*60));
+
+ // ensure scheduler has chance to process all tasks queued for before 1 ms from now.
+ scheduler.scheduleFromNow([&scheduler]{ scheduler.stop(false); }, 1);
+ scheduler_thread.join();
+
+ // check that the queue only has one job remaining
+ num_tasks = scheduler.getQueueInfo(first, last);
+ BOOST_CHECK_EQUAL(num_tasks, 1ul);
+
+ // check that the dummy function actually ran
+ BOOST_CHECK_EQUAL(counter, 2);
+
+ // check that the time of the remaining job has been updated
+ std::chrono::system_clock::time_point now = std::chrono::system_clock::now();
+ int delta = std::chrono::duration_cast<std::chrono::seconds>(first - now).count();
+ // should be between 2 & 3 minutes from now
+ BOOST_CHECK(delta > 2*60 && delta < 3*60);
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp
index 303bb9b88c..ea600499ca 100644
--- a/src/test/serialize_tests.cpp
+++ b/src/test/serialize_tests.cpp
@@ -182,8 +182,8 @@ BOOST_AUTO_TEST_CASE(varints)
CDataStream ss(SER_DISK, 0);
CDataStream::size_type size = 0;
for (int i = 0; i < 100000; i++) {
- ss << VARINT(i, VarIntMode::NONNEGATIVE_SIGNED);
- size += ::GetSerializeSize(VARINT(i, VarIntMode::NONNEGATIVE_SIGNED), 0);
+ ss << VARINT_MODE(i, VarIntMode::NONNEGATIVE_SIGNED);
+ size += ::GetSerializeSize(VARINT_MODE(i, VarIntMode::NONNEGATIVE_SIGNED), 0);
BOOST_CHECK(size == ss.size());
}
@@ -196,7 +196,7 @@ BOOST_AUTO_TEST_CASE(varints)
// decode
for (int i = 0; i < 100000; i++) {
int j = -1;
- ss >> VARINT(j, VarIntMode::NONNEGATIVE_SIGNED);
+ ss >> VARINT_MODE(j, VarIntMode::NONNEGATIVE_SIGNED);
BOOST_CHECK_MESSAGE(i == j, "decoded:" << j << " expected:" << i);
}
@@ -210,21 +210,21 @@ BOOST_AUTO_TEST_CASE(varints)
BOOST_AUTO_TEST_CASE(varints_bitpatterns)
{
CDataStream ss(SER_DISK, 0);
- ss << VARINT(0, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "00"); ss.clear();
- ss << VARINT(0x7f, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "7f"); ss.clear();
- ss << VARINT((int8_t)0x7f, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "7f"); ss.clear();
- ss << VARINT(0x80, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "8000"); ss.clear();
+ ss << VARINT_MODE(0, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "00"); ss.clear();
+ ss << VARINT_MODE(0x7f, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "7f"); ss.clear();
+ ss << VARINT_MODE((int8_t)0x7f, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "7f"); ss.clear();
+ ss << VARINT_MODE(0x80, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "8000"); ss.clear();
ss << VARINT((uint8_t)0x80); BOOST_CHECK_EQUAL(HexStr(ss), "8000"); ss.clear();
- ss << VARINT(0x1234, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "a334"); ss.clear();
- ss << VARINT((int16_t)0x1234, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "a334"); ss.clear();
- ss << VARINT(0xffff, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "82fe7f"); ss.clear();
+ ss << VARINT_MODE(0x1234, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "a334"); ss.clear();
+ ss << VARINT_MODE((int16_t)0x1234, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "a334"); ss.clear();
+ ss << VARINT_MODE(0xffff, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "82fe7f"); ss.clear();
ss << VARINT((uint16_t)0xffff); BOOST_CHECK_EQUAL(HexStr(ss), "82fe7f"); ss.clear();
- ss << VARINT(0x123456, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "c7e756"); ss.clear();
- ss << VARINT((int32_t)0x123456, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "c7e756"); ss.clear();
+ ss << VARINT_MODE(0x123456, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "c7e756"); ss.clear();
+ ss << VARINT_MODE((int32_t)0x123456, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "c7e756"); ss.clear();
ss << VARINT(0x80123456U); BOOST_CHECK_EQUAL(HexStr(ss), "86ffc7e756"); ss.clear();
ss << VARINT((uint32_t)0x80123456U); BOOST_CHECK_EQUAL(HexStr(ss), "86ffc7e756"); ss.clear();
ss << VARINT(0xffffffff); BOOST_CHECK_EQUAL(HexStr(ss), "8efefefe7f"); ss.clear();
- ss << VARINT(0x7fffffffffffffffLL, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "fefefefefefefefe7f"); ss.clear();
+ ss << VARINT_MODE(0x7fffffffffffffffLL, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "fefefefefefefefe7f"); ss.clear();
ss << VARINT(0xffffffffffffffffULL); BOOST_CHECK_EQUAL(HexStr(ss), "80fefefefefefefefe7f"); ss.clear();
}
diff --git a/src/test/sighash_tests.cpp b/src/test/sighash_tests.cpp
index 2c56bbdbb0..bcc4a46873 100644
--- a/src/test/sighash_tests.cpp
+++ b/src/test/sighash_tests.cpp
@@ -26,10 +26,9 @@ extern UniValue read_json(const std::string& jsondata);
// Old script.cpp SignatureHash function
uint256 static SignatureHashOld(CScript scriptCode, const CTransaction& txTo, unsigned int nIn, int nHashType)
{
- static const uint256 one(uint256S("0000000000000000000000000000000000000000000000000000000000000001"));
if (nIn >= txTo.vin.size())
{
- return one;
+ return UINT256_ONE();
}
CMutableTransaction txTmp(txTo);
@@ -59,7 +58,7 @@ uint256 static SignatureHashOld(CScript scriptCode, const CTransaction& txTo, un
unsigned int nOut = nIn;
if (nOut >= txTmp.vout.size())
{
- return one;
+ return UINT256_ONE();
}
txTmp.vout.resize(nOut+1);
for (unsigned int i = 0; i < nOut; i++)
diff --git a/src/test/sync_tests.cpp b/src/test/sync_tests.cpp
index 188e9986ff..5c6c2ee38e 100644
--- a/src/test/sync_tests.cpp
+++ b/src/test/sync_tests.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2019 The Bitcoin Core developers
+// Copyright (c) 2012-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -38,7 +38,7 @@ BOOST_AUTO_TEST_CASE(potential_deadlock_detected)
g_debug_lockorder_abort = false;
#endif
- CCriticalSection rmutex1, rmutex2;
+ RecursiveMutex rmutex1, rmutex2;
TestPotentialDeadLockDetected(rmutex1, rmutex2);
Mutex mutex1, mutex2;
diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp
index 0939803953..44d4e0890a 100644
--- a/src/test/transaction_tests.cpp
+++ b/src/test/transaction_tests.cpp
@@ -22,6 +22,7 @@
#include <script/standard.h>
#include <streams.h>
#include <util/strencodings.h>
+#include <test/util/transaction_utils.h>
#include <map>
#include <string>
@@ -122,10 +123,9 @@ BOOST_AUTO_TEST_CASE(tx_valid)
std::map<COutPoint, int64_t> mapprevOutValues;
UniValue inputs = test[0].get_array();
bool fValid = true;
- for (unsigned int inpIdx = 0; inpIdx < inputs.size(); inpIdx++) {
- const UniValue& input = inputs[inpIdx];
- if (!input.isArray())
- {
+ for (unsigned int inpIdx = 0; inpIdx < inputs.size(); inpIdx++) {
+ const UniValue& input = inputs[inpIdx];
+ if (!input.isArray()) {
fValid = false;
break;
}
@@ -209,10 +209,9 @@ BOOST_AUTO_TEST_CASE(tx_invalid)
std::map<COutPoint, int64_t> mapprevOutValues;
UniValue inputs = test[0].get_array();
bool fValid = true;
- for (unsigned int inpIdx = 0; inpIdx < inputs.size(); inpIdx++) {
- const UniValue& input = inputs[inpIdx];
- if (!input.isArray())
- {
+ for (unsigned int inpIdx = 0; inpIdx < inputs.size(); inpIdx++) {
+ const UniValue& input = inputs[inpIdx];
+ if (!input.isArray()) {
fValid = false;
break;
}
@@ -282,50 +281,13 @@ BOOST_AUTO_TEST_CASE(basic_transaction_tests)
BOOST_CHECK_MESSAGE(!CheckTransaction(CTransaction(tx), state) || !state.IsValid(), "Transaction with duplicate txins should be invalid.");
}
-//
-// Helper: create two dummy transactions, each with
-// two outputs. The first has 11 and 50 CENT outputs
-// paid to a TX_PUBKEY, the second 21 and 22 CENT outputs
-// paid to a TX_PUBKEYHASH.
-//
-static std::vector<CMutableTransaction>
-SetupDummyInputs(FillableSigningProvider& keystoreRet, CCoinsViewCache& coinsRet)
-{
- std::vector<CMutableTransaction> dummyTransactions;
- dummyTransactions.resize(2);
-
- // Add some keys to the keystore:
- CKey key[4];
- for (int i = 0; i < 4; i++)
- {
- key[i].MakeNewKey(i % 2);
- keystoreRet.AddKey(key[i]);
- }
-
- // Create some dummy input transactions
- dummyTransactions[0].vout.resize(2);
- dummyTransactions[0].vout[0].nValue = 11*CENT;
- dummyTransactions[0].vout[0].scriptPubKey << ToByteVector(key[0].GetPubKey()) << OP_CHECKSIG;
- dummyTransactions[0].vout[1].nValue = 50*CENT;
- dummyTransactions[0].vout[1].scriptPubKey << ToByteVector(key[1].GetPubKey()) << OP_CHECKSIG;
- AddCoins(coinsRet, CTransaction(dummyTransactions[0]), 0);
-
- dummyTransactions[1].vout.resize(2);
- dummyTransactions[1].vout[0].nValue = 21*CENT;
- dummyTransactions[1].vout[0].scriptPubKey = GetScriptForDestination(PKHash(key[2].GetPubKey()));
- dummyTransactions[1].vout[1].nValue = 22*CENT;
- dummyTransactions[1].vout[1].scriptPubKey = GetScriptForDestination(PKHash(key[3].GetPubKey()));
- AddCoins(coinsRet, CTransaction(dummyTransactions[1]), 0);
-
- return dummyTransactions;
-}
-
BOOST_AUTO_TEST_CASE(test_Get)
{
FillableSigningProvider keystore;
CCoinsView coinsDummy;
CCoinsViewCache coins(&coinsDummy);
- std::vector<CMutableTransaction> dummyTransactions = SetupDummyInputs(keystore, coins);
+ std::vector<CMutableTransaction> dummyTransactions =
+ SetupDummyInputs(keystore, coins, {11*CENT, 50*CENT, 21*CENT, 22*CENT});
CMutableTransaction t1;
t1.vin.resize(3);
@@ -685,7 +647,8 @@ BOOST_AUTO_TEST_CASE(test_IsStandard)
FillableSigningProvider keystore;
CCoinsView coinsDummy;
CCoinsViewCache coins(&coinsDummy);
- std::vector<CMutableTransaction> dummyTransactions = SetupDummyInputs(keystore, coins);
+ std::vector<CMutableTransaction> dummyTransactions =
+ SetupDummyInputs(keystore, coins, {11*CENT, 50*CENT, 21*CENT, 22*CENT});
CMutableTransaction t;
t.vin.resize(1);
@@ -821,9 +784,29 @@ BOOST_AUTO_TEST_CASE(test_IsStandard)
BOOST_CHECK(!IsStandardTx(CTransaction(t), reason));
BOOST_CHECK_EQUAL(reason, "scriptsig-size");
+ // Check tx-size (non-standard if transaction weight is > MAX_STANDARD_TX_WEIGHT)
+ t.vin.clear();
+ t.vin.resize(2438); // size per input (empty scriptSig): 41 bytes
+ t.vout[0].scriptPubKey = CScript() << OP_RETURN << std::vector<unsigned char>(19, 0); // output size: 30 bytes
+ // tx header: 12 bytes => 48 vbytes
+ // 2438 inputs: 2438*41 = 99958 bytes => 399832 vbytes
+ // 1 output: 30 bytes => 120 vbytes
+ // ===============================
+ // total: 400000 vbytes
+ BOOST_CHECK_EQUAL(GetTransactionWeight(CTransaction(t)), 400000);
+ BOOST_CHECK(IsStandardTx(CTransaction(t), reason));
+
+ // increase output size by one byte, so we end up with 400004 vbytes
+ t.vout[0].scriptPubKey = CScript() << OP_RETURN << std::vector<unsigned char>(20, 0); // output size: 31 bytes
+ BOOST_CHECK_EQUAL(GetTransactionWeight(CTransaction(t)), 400004);
+ reason.clear();
+ BOOST_CHECK(!IsStandardTx(CTransaction(t), reason));
+ BOOST_CHECK_EQUAL(reason, "tx-size");
+
// Check bare multisig (standard if policy flag fIsBareMultisigStd is set)
fIsBareMultisigStd = true;
t.vout[0].scriptPubKey = GetScriptForMultisig(1, {key.GetPubKey()}); // simple 1-of-1
+ t.vin.resize(1);
t.vin[0].scriptSig = CScript() << std::vector<unsigned char>(65, 0);
BOOST_CHECK(IsStandardTx(CTransaction(t), reason));
@@ -831,6 +814,7 @@ BOOST_AUTO_TEST_CASE(test_IsStandard)
reason.clear();
BOOST_CHECK(!IsStandardTx(CTransaction(t), reason));
BOOST_CHECK_EQUAL(reason, "bare-multisig");
+ fIsBareMultisigStd = DEFAULT_PERMIT_BAREMULTISIG;
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/txindex_tests.cpp b/src/test/txindex_tests.cpp
index 4b0214a15a..3550a02316 100644
--- a/src/test/txindex_tests.cpp
+++ b/src/test/txindex_tests.cpp
@@ -34,7 +34,7 @@ BOOST_FIXTURE_TEST_CASE(txindex_initial_sync, TestChain100Setup)
int64_t time_start = GetTimeMillis();
while (!txindex.BlockUntilSyncedToCurrentChain()) {
BOOST_REQUIRE(time_start + timeout_ms > GetTimeMillis());
- MilliSleep(100);
+ UninterruptibleSleep(std::chrono::milliseconds{100});
}
// Check that txindex excludes genesis block transactions.
@@ -70,6 +70,8 @@ BOOST_FIXTURE_TEST_CASE(txindex_initial_sync, TestChain100Setup)
// shutdown sequence (c.f. Shutdown() in init.cpp)
txindex.Stop();
+ // txindex job may be scheduled, so stop scheduler before destructing
+ m_node.scheduler->stop();
threadGroup.interrupt_all();
threadGroup.join_all();
diff --git a/src/test/txvalidationcache_tests.cpp b/src/test/txvalidationcache_tests.cpp
index 77b4ba62be..7842594b80 100644
--- a/src/test/txvalidationcache_tests.cpp
+++ b/src/test/txvalidationcache_tests.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2011-2019 The Bitcoin Core developers
+// Copyright (c) 2011-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -15,7 +15,7 @@
bool CheckInputScripts(const CTransaction& tx, TxValidationState &state, const CCoinsViewCache &inputs, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks);
-BOOST_AUTO_TEST_SUITE(tx_validationcache_tests)
+BOOST_AUTO_TEST_SUITE(txvalidationcache_tests)
BOOST_FIXTURE_TEST_CASE(tx_mempool_block_doublespend, TestChain100Setup)
{
diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp
index 3bdf3485fa..ad3ff48860 100644
--- a/src/test/util/setup_common.cpp
+++ b/src/test/util/setup_common.cpp
@@ -13,6 +13,7 @@
#include <init.h>
#include <miner.h>
#include <net.h>
+#include <net_processing.h>
#include <noui.h>
#include <pow.h>
#include <rpc/blockchain.h>
@@ -25,7 +26,6 @@
#include <util/strencodings.h>
#include <util/time.h>
#include <util/translation.h>
-#include <util/validation.h>
#include <validation.h>
#include <validationinterface.h>
@@ -63,7 +63,7 @@ std::ostream& operator<<(std::ostream& os, const uint256& num)
}
BasicTestingSetup::BasicTestingSetup(const std::string& chainName)
- : m_path_root{fs::temp_directory_path() / "test_common_" PACKAGE_NAME / std::to_string(g_insecure_rand_ctx_temp_path.rand32())}
+ : m_path_root{fs::temp_directory_path() / "test_common_" PACKAGE_NAME / g_insecure_rand_ctx_temp_path.rand256().ToString()}
{
fs::create_directories(m_path_root);
gArgs.ForceSetArg("-datadir", m_path_root.string());
@@ -71,6 +71,7 @@ BasicTestingSetup::BasicTestingSetup(const std::string& chainName)
SelectParams(chainName);
SeedInsecureRand();
gArgs.ForceSetArg("-printtoconsole", "0");
+ if (G_TEST_LOG_FUN) LogInstance().PushBackCallback(G_TEST_LOG_FUN);
InitLogging();
LogInstance().StartLogging();
SHA256AutoDetect();
@@ -102,10 +103,12 @@ TestingSetup::TestingSetup(const std::string& chainName) : BasicTestingSetup(cha
g_rpc_node = &m_node;
RegisterAllCoreRPCCommands(tableRPC);
+ m_node.scheduler = MakeUnique<CScheduler>();
+
// We have to run a scheduler thread to prevent ActivateBestChain
// from blocking due to queue overrun.
- threadGroup.create_thread(std::bind(&CScheduler::serviceQueue, &scheduler));
- GetMainSignals().RegisterBackgroundSignalScheduler(scheduler);
+ threadGroup.create_thread([&]{ m_node.scheduler->serviceQueue(); });
+ GetMainSignals().RegisterBackgroundSignalScheduler(*g_rpc_node->scheduler);
pblocktree.reset(new CBlockTreeDB(1 << 20, true));
g_chainstate = MakeUnique<CChainState>();
@@ -120,7 +123,7 @@ TestingSetup::TestingSetup(const std::string& chainName) : BasicTestingSetup(cha
BlockValidationState state;
if (!ActivateBestChain(state, chainparams)) {
- throw std::runtime_error(strprintf("ActivateBestChain failed. (%s)", FormatStateMessage(state)));
+ throw std::runtime_error(strprintf("ActivateBestChain failed. (%s)", state.ToString()));
}
// Start script-checking threads. Set g_parallel_script_checks to true so they are used.
@@ -134,10 +137,12 @@ TestingSetup::TestingSetup(const std::string& chainName) : BasicTestingSetup(cha
m_node.mempool->setSanityCheck(1.0);
m_node.banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", nullptr, DEFAULT_MISBEHAVING_BANTIME);
m_node.connman = MakeUnique<CConnman>(0x1337, 0x1337); // Deterministic randomness for tests.
+ m_node.peer_logic = MakeUnique<PeerLogicValidation>(m_node.connman.get(), m_node.banman.get(), *m_node.scheduler);
}
TestingSetup::~TestingSetup()
{
+ if (m_node.scheduler) m_node.scheduler->stop();
threadGroup.interrupt_all();
threadGroup.join_all();
GetMainSignals().FlushBackgroundCallbacks();
@@ -146,6 +151,7 @@ TestingSetup::~TestingSetup()
m_node.connman.reset();
m_node.banman.reset();
m_node.mempool = nullptr;
+ m_node.scheduler.reset();
UnloadBlockIndex();
g_chainstate.reset();
pblocktree.reset();
diff --git a/src/test/util/setup_common.h b/src/test/util/setup_common.h
index 1e2e059a56..56ad62eb24 100644
--- a/src/test/util/setup_common.h
+++ b/src/test/util/setup_common.h
@@ -18,6 +18,9 @@
#include <boost/thread.hpp>
+/** This is connected to the logger. Can be used to redirect logs to any other log */
+extern const std::function<void(const std::string&)> G_TEST_LOG_FUN;
+
// Enable BOOST_CHECK_EQUAL for enum class types
template <typename T>
std::ostream& operator<<(typename std::enable_if<std::is_enum<T>::value, std::ostream>::type& stream, const T& e)
@@ -82,7 +85,6 @@ private:
struct TestingSetup : public BasicTestingSetup {
NodeContext m_node;
boost::thread_group threadGroup;
- CScheduler scheduler;
explicit TestingSetup(const std::string& chainName = CBaseChainParams::MAIN);
~TestingSetup();
diff --git a/src/test/util/transaction_utils.cpp b/src/test/util/transaction_utils.cpp
index 90b78effb0..999b803a8d 100644
--- a/src/test/util/transaction_utils.cpp
+++ b/src/test/util/transaction_utils.cpp
@@ -3,6 +3,8 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <test/util/transaction_utils.h>
+#include <coins.h>
+#include <script/signingprovider.h>
CMutableTransaction BuildCreditingTransaction(const CScript& scriptPubKey, int nValue)
{
@@ -37,3 +39,33 @@ CMutableTransaction BuildSpendingTransaction(const CScript& scriptSig, const CSc
return txSpend;
}
+
+std::vector<CMutableTransaction> SetupDummyInputs(FillableSigningProvider& keystoreRet, CCoinsViewCache& coinsRet, const std::array<CAmount,4>& nValues)
+{
+ std::vector<CMutableTransaction> dummyTransactions;
+ dummyTransactions.resize(2);
+
+ // Add some keys to the keystore:
+ CKey key[4];
+ for (int i = 0; i < 4; i++) {
+ key[i].MakeNewKey(i % 2);
+ keystoreRet.AddKey(key[i]);
+ }
+
+ // Create some dummy input transactions
+ dummyTransactions[0].vout.resize(2);
+ dummyTransactions[0].vout[0].nValue = nValues[0];
+ dummyTransactions[0].vout[0].scriptPubKey << ToByteVector(key[0].GetPubKey()) << OP_CHECKSIG;
+ dummyTransactions[0].vout[1].nValue = nValues[1];
+ dummyTransactions[0].vout[1].scriptPubKey << ToByteVector(key[1].GetPubKey()) << OP_CHECKSIG;
+ AddCoins(coinsRet, CTransaction(dummyTransactions[0]), 0);
+
+ dummyTransactions[1].vout.resize(2);
+ dummyTransactions[1].vout[0].nValue = nValues[2];
+ dummyTransactions[1].vout[0].scriptPubKey = GetScriptForDestination(PKHash(key[2].GetPubKey()));
+ dummyTransactions[1].vout[1].nValue = nValues[3];
+ dummyTransactions[1].vout[1].scriptPubKey = GetScriptForDestination(PKHash(key[3].GetPubKey()));
+ AddCoins(coinsRet, CTransaction(dummyTransactions[1]), 0);
+
+ return dummyTransactions;
+}
diff --git a/src/test/util/transaction_utils.h b/src/test/util/transaction_utils.h
index 57604646e7..f843928a5f 100644
--- a/src/test/util/transaction_utils.h
+++ b/src/test/util/transaction_utils.h
@@ -7,6 +7,11 @@
#include <primitives/transaction.h>
+#include <array>
+
+class FillableSigningProvider;
+class CCoinsViewCache;
+
// create crediting transaction
// [1 coinbase input => 1 output with given scriptPubkey and value]
CMutableTransaction BuildCreditingTransaction(const CScript& scriptPubKey, int nValue = 0);
@@ -16,4 +21,9 @@ CMutableTransaction BuildCreditingTransaction(const CScript& scriptPubKey, int n
// 1 output with empty scriptPubKey, full value of referenced transaction]
CMutableTransaction BuildSpendingTransaction(const CScript& scriptSig, const CScriptWitness& scriptWitness, const CTransaction& txCredit);
+// Helper: create two dummy transactions, each with two outputs.
+// The first has nValues[0] and nValues[1] outputs paid to a TX_PUBKEY,
+// the second nValues[2] and nValues[3] outputs paid to a TX_PUBKEYHASH.
+std::vector<CMutableTransaction> SetupDummyInputs(FillableSigningProvider& keystoreRet, CCoinsViewCache& coinsRet, const std::array<CAmount,4>& nValues);
+
#endif // BITCOIN_TEST_UTIL_TRANSACTION_UTILS_H
diff --git a/src/test/util/wallet.cpp b/src/test/util/wallet.cpp
index 226d2df6e4..fd6012e9fe 100644
--- a/src/test/util/wallet.cpp
+++ b/src/test/util/wallet.cpp
@@ -27,8 +27,7 @@ std::string getnewaddress(CWallet& w)
void importaddress(CWallet& wallet, const std::string& address)
{
auto spk_man = wallet.GetLegacyScriptPubKeyMan();
- LOCK(wallet.cs_wallet);
- AssertLockHeld(spk_man->cs_wallet);
+ LOCK2(wallet.cs_wallet, spk_man->cs_KeyStore);
const auto dest = DecodeDestination(address);
assert(IsValidDestination(dest));
const auto script = GetScriptForDestination(dest);
diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp
index 86ea56ff36..eb0d31fbdc 100644
--- a/src/test/util_tests.cpp
+++ b/src/test/util_tests.cpp
@@ -1,14 +1,18 @@
-// Copyright (c) 2011-2019 The Bitcoin Core developers
+// Copyright (c) 2011-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <util/system.h>
#include <clientversion.h>
+#include <hash.h> // For Hash()
+#include <key.h> // For CKey
#include <optional.h>
#include <sync.h>
#include <test/util/setup_common.h>
#include <test/util/str.h>
+#include <uint256.h>
+#include <util/message.h> // For MessageSign(), MessageVerify(), MESSAGE_MAGIC
#include <util/moneystr.h>
#include <util/strencodings.h>
#include <util/string.h>
@@ -16,6 +20,7 @@
#include <util/spanparsing.h>
#include <util/vector.h>
+#include <array>
#include <stdint.h>
#include <thread>
#include <univalue.h>
@@ -38,7 +43,7 @@ BOOST_FIXTURE_TEST_SUITE(util_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(util_criticalsection)
{
- CCriticalSection cs;
+ RecursiveMutex cs;
do {
LOCK(cs);
@@ -340,6 +345,27 @@ BOOST_AUTO_TEST_CASE(util_ParseParameters)
BOOST_CHECK(testArgs.GetArgs("-ccc").size() == 2);
}
+BOOST_AUTO_TEST_CASE(util_ParseInvalidParameters)
+{
+ TestArgsManager test;
+ test.SetupArgs({{"-registered", ArgsManager::ALLOW_ANY}});
+
+ const char* argv[] = {"ignored", "-registered"};
+ std::string error;
+ BOOST_CHECK(test.ParseParameters(2, (char**)argv, error));
+ BOOST_CHECK_EQUAL(error, "");
+
+ argv[1] = "-unregistered";
+ BOOST_CHECK(!test.ParseParameters(2, (char**)argv, error));
+ BOOST_CHECK_EQUAL(error, "Invalid parameter -unregistered");
+
+ // Make sure registered parameters prefixed with a chain name trigger errors.
+ // (Previously, they were accepted and ignored.)
+ argv[1] = "-test.registered";
+ BOOST_CHECK(!test.ParseParameters(2, (char**)argv, error));
+ BOOST_CHECK_EQUAL(error, "Invalid parameter -test.registered");
+}
+
static void TestParse(const std::string& str, bool expected_bool, int64_t expected_int)
{
TestArgsManager test;
@@ -835,7 +861,8 @@ struct ArgsMergeTestingSetup : public BasicTestingSetup {
void ForEachMergeSetup(Fn&& fn)
{
ActionList arg_actions = {};
- ForEachNoDup(arg_actions, SET, SECTION_NEGATE, [&] {
+ // command_line_options do not have sections. Only iterate over SET and NEGATE
+ ForEachNoDup(arg_actions, SET, NEGATE, [&] {
ActionList conf_actions = {};
ForEachNoDup(conf_actions, SET, SECTION_NEGATE, [&] {
for (bool soft_set : {false, true}) {
@@ -995,7 +1022,7 @@ BOOST_FIXTURE_TEST_CASE(util_ArgsMerge, ArgsMergeTestingSetup)
// Results file is formatted like:
//
// <input> || <IsArgSet/IsArgNegated/GetArg output> | <GetArgs output> | <GetUnsuitable output>
- BOOST_CHECK_EQUAL(out_sha_hex, "b835eef5977d69114eb039a976201f8c7121f34fe2b7ea2b73cafb516e5c9dc8");
+ BOOST_CHECK_EQUAL(out_sha_hex, "8fd4877bb8bf337badca950ede6c917441901962f160e52514e06a60dea46cde");
}
// Similar test as above, but for ArgsManager::GetChainName function.
@@ -1172,6 +1199,12 @@ BOOST_AUTO_TEST_CASE(util_ParseMoney)
BOOST_CHECK(ParseMoney("0.00000001", ret));
BOOST_CHECK_EQUAL(ret, COIN/100000000);
+ // Parsing amount that can not be represented in ret should fail
+ BOOST_CHECK(!ParseMoney("0.000000001", ret));
+
+ // Parsing empty string should fail
+ BOOST_CHECK(!ParseMoney("", ret));
+
// Attempted 63 bit overflow should fail
BOOST_CHECK(!ParseMoney("92233720368.54775808", ret));
@@ -1300,7 +1333,7 @@ BOOST_AUTO_TEST_CASE(util_time_GetTime)
SetMockTime(111);
// Check that mock time does not change after a sleep
for (const auto& num_sleep : {0, 1}) {
- MilliSleep(num_sleep);
+ UninterruptibleSleep(std::chrono::milliseconds{num_sleep});
BOOST_CHECK_EQUAL(111, GetTime()); // Deprecated time getter
BOOST_CHECK_EQUAL(111, GetTime<std::chrono::seconds>().count());
BOOST_CHECK_EQUAL(111000, GetTime<std::chrono::milliseconds>().count());
@@ -1311,7 +1344,7 @@ BOOST_AUTO_TEST_CASE(util_time_GetTime)
// Check that system time changes after a sleep
const auto ms_0 = GetTime<std::chrono::milliseconds>();
const auto us_0 = GetTime<std::chrono::microseconds>();
- MilliSleep(1);
+ UninterruptibleSleep(std::chrono::milliseconds{1});
BOOST_CHECK(ms_0 < GetTime<std::chrono::milliseconds>());
BOOST_CHECK(us_0 < GetTime<std::chrono::microseconds>());
}
@@ -2003,4 +2036,109 @@ BOOST_AUTO_TEST_CASE(test_tracked_vector)
BOOST_CHECK_EQUAL(v8[2].copies, 0);
}
+BOOST_AUTO_TEST_CASE(message_sign)
+{
+ const std::array<unsigned char, 32> privkey_bytes = {
+ // just some random data
+ // derived address from this private key: 15CRxFdyRpGZLW9w8HnHvVduizdL5jKNbs
+ 0xD9, 0x7F, 0x51, 0x08, 0xF1, 0x1C, 0xDA, 0x6E,
+ 0xEE, 0xBA, 0xAA, 0x42, 0x0F, 0xEF, 0x07, 0x26,
+ 0xB1, 0xF8, 0x98, 0x06, 0x0B, 0x98, 0x48, 0x9F,
+ 0xA3, 0x09, 0x84, 0x63, 0xC0, 0x03, 0x28, 0x66
+ };
+
+ const std::string message = "Trust no one";
+
+ const std::string expected_signature =
+ "IPojfrX2dfPnH26UegfbGQQLrdK844DlHq5157/P6h57WyuS/Qsl+h/WSVGDF4MUi4rWSswW38oimDYfNNUBUOk=";
+
+ CKey privkey;
+ std::string generated_signature;
+
+ BOOST_REQUIRE_MESSAGE(!privkey.IsValid(),
+ "Confirm the private key is invalid");
+
+ BOOST_CHECK_MESSAGE(!MessageSign(privkey, message, generated_signature),
+ "Sign with an invalid private key");
+
+ privkey.Set(privkey_bytes.begin(), privkey_bytes.end(), true);
+
+ BOOST_REQUIRE_MESSAGE(privkey.IsValid(),
+ "Confirm the private key is valid");
+
+ BOOST_CHECK_MESSAGE(MessageSign(privkey, message, generated_signature),
+ "Sign with a valid private key");
+
+ BOOST_CHECK_EQUAL(expected_signature, generated_signature);
+}
+
+BOOST_AUTO_TEST_CASE(message_verify)
+{
+ BOOST_CHECK_EQUAL(
+ MessageVerify(
+ "invalid address",
+ "signature should be irrelevant",
+ "message too"),
+ MessageVerificationResult::ERR_INVALID_ADDRESS);
+
+ BOOST_CHECK_EQUAL(
+ MessageVerify(
+ "3B5fQsEXEaV8v6U3ejYc8XaKXAkyQj2MjV",
+ "signature should be irrelevant",
+ "message too"),
+ MessageVerificationResult::ERR_ADDRESS_NO_KEY);
+
+ BOOST_CHECK_EQUAL(
+ MessageVerify(
+ "1KqbBpLy5FARmTPD4VZnDDpYjkUvkr82Pm",
+ "invalid signature, not in base64 encoding",
+ "message should be irrelevant"),
+ MessageVerificationResult::ERR_MALFORMED_SIGNATURE);
+
+ BOOST_CHECK_EQUAL(
+ MessageVerify(
+ "1KqbBpLy5FARmTPD4VZnDDpYjkUvkr82Pm",
+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=",
+ "message should be irrelevant"),
+ MessageVerificationResult::ERR_PUBKEY_NOT_RECOVERED);
+
+ BOOST_CHECK_EQUAL(
+ MessageVerify(
+ "15CRxFdyRpGZLW9w8HnHvVduizdL5jKNbs",
+ "IPojfrX2dfPnH26UegfbGQQLrdK844DlHq5157/P6h57WyuS/Qsl+h/WSVGDF4MUi4rWSswW38oimDYfNNUBUOk=",
+ "I never signed this"),
+ MessageVerificationResult::ERR_NOT_SIGNED);
+
+ BOOST_CHECK_EQUAL(
+ MessageVerify(
+ "15CRxFdyRpGZLW9w8HnHvVduizdL5jKNbs",
+ "IPojfrX2dfPnH26UegfbGQQLrdK844DlHq5157/P6h57WyuS/Qsl+h/WSVGDF4MUi4rWSswW38oimDYfNNUBUOk=",
+ "Trust no one"),
+ MessageVerificationResult::OK);
+
+ BOOST_CHECK_EQUAL(
+ MessageVerify(
+ "11canuhp9X2NocwCq7xNrQYTmUgZAnLK3",
+ "IIcaIENoYW5jZWxsb3Igb24gYnJpbmsgb2Ygc2Vjb25kIGJhaWxvdXQgZm9yIGJhbmtzIAaHRtbCeDZINyavx14=",
+ "Trust me"),
+ MessageVerificationResult::OK);
+}
+
+BOOST_AUTO_TEST_CASE(message_hash)
+{
+ const std::string unsigned_tx = "...";
+ const std::string prefixed_message =
+ std::string(1, (char)MESSAGE_MAGIC.length()) +
+ MESSAGE_MAGIC +
+ std::string(1, (char)unsigned_tx.length()) +
+ unsigned_tx;
+
+ const uint256 signature_hash = Hash(unsigned_tx.begin(), unsigned_tx.end());
+ const uint256 message_hash1 = Hash(prefixed_message.begin(), prefixed_message.end());
+ const uint256 message_hash2 = MessageHash(unsigned_tx);
+
+ BOOST_CHECK_EQUAL(message_hash1, message_hash2);
+ BOOST_CHECK_NE(message_hash1, signature_hash);
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/validation_block_tests.cpp b/src/test/validation_block_tests.cpp
index dae389a167..f2c862011d 100644
--- a/src/test/validation_block_tests.cpp
+++ b/src/test/validation_block_tests.cpp
@@ -205,7 +205,7 @@ BOOST_AUTO_TEST_CASE(processnewblock_signals_ordering)
t.join();
}
while (GetMainSignals().CallbacksPending() > 0) {
- MilliSleep(100);
+ UninterruptibleSleep(std::chrono::milliseconds{100});
}
UnregisterValidationInterface(&sub);
diff --git a/src/test/validation_flush_tests.cpp b/src/test/validation_flush_tests.cpp
new file mode 100644
index 0000000000..c24164528f
--- /dev/null
+++ b/src/test/validation_flush_tests.cpp
@@ -0,0 +1,166 @@
+// Copyright (c) 2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+//
+#include <sync.h>
+#include <test/util/setup_common.h>
+#include <txmempool.h>
+#include <validation.h>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_FIXTURE_TEST_SUITE(validation_flush_tests, BasicTestingSetup)
+
+//! Test utilities for detecting when we need to flush the coins cache based
+//! on estimated memory usage.
+//!
+//! @sa CChainState::GetCoinsCacheSizeState()
+//!
+BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
+{
+ BlockManager blockman{};
+ CChainState chainstate{blockman};
+ chainstate.InitCoinsDB(/*cache_size_bytes*/ 1 << 10, /*in_memory*/ true, /*should_wipe*/ false);
+ WITH_LOCK(::cs_main, chainstate.InitCoinsCache());
+ CTxMemPool tx_pool{};
+
+ constexpr bool is_64_bit = sizeof(void*) == 8;
+
+ LOCK(::cs_main);
+ auto& view = chainstate.CoinsTip();
+
+ //! Create and add a Coin with DynamicMemoryUsage of 80 bytes to the given view.
+ auto add_coin = [](CCoinsViewCache& coins_view) -> COutPoint {
+ Coin newcoin;
+ uint256 txid = InsecureRand256();
+ COutPoint outp{txid, 0};
+ newcoin.nHeight = 1;
+ newcoin.out.nValue = InsecureRand32();
+ newcoin.out.scriptPubKey.assign((uint32_t)56, 1);
+ coins_view.AddCoin(outp, std::move(newcoin), false);
+
+ return outp;
+ };
+
+ // The number of bytes consumed by coin's heap data, i.e. CScript
+ // (prevector<28, unsigned char>) when assigned 56 bytes of data per above.
+ //
+ // See also: Coin::DynamicMemoryUsage().
+ constexpr int COIN_SIZE = is_64_bit ? 80 : 64;
+
+ auto print_view_mem_usage = [](CCoinsViewCache& view) {
+ BOOST_TEST_MESSAGE("CCoinsViewCache memory usage: " << view.DynamicMemoryUsage());
+ };
+
+ constexpr size_t MAX_COINS_CACHE_BYTES = 1024;
+
+ // Without any coins in the cache, we shouldn't need to flush.
+ BOOST_CHECK_EQUAL(
+ chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
+ CoinsCacheSizeState::OK);
+
+ // If the initial memory allocations of cacheCoins don't match these common
+ // cases, we can't really continue to make assertions about memory usage.
+ // End the test early.
+ if (view.DynamicMemoryUsage() != 32 && view.DynamicMemoryUsage() != 16) {
+ // Add a bunch of coins to see that we at least flip over to CRITICAL.
+
+ for (int i{0}; i < 1000; ++i) {
+ COutPoint res = add_coin(view);
+ BOOST_CHECK_EQUAL(view.AccessCoin(res).DynamicMemoryUsage(), COIN_SIZE);
+ }
+
+ BOOST_CHECK_EQUAL(
+ chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
+ CoinsCacheSizeState::CRITICAL);
+
+ BOOST_TEST_MESSAGE("Exiting cache flush tests early due to unsupported arch");
+ return;
+ }
+
+ print_view_mem_usage(view);
+ BOOST_CHECK_EQUAL(view.DynamicMemoryUsage(), is_64_bit ? 32 : 16);
+
+ // We should be able to add COINS_UNTIL_CRITICAL coins to the cache before going CRITICAL.
+ // This is contingent not only on the dynamic memory usage of the Coins
+ // that we're adding (COIN_SIZE bytes per), but also on how much memory the
+ // cacheCoins (unordered_map) preallocates.
+ constexpr int COINS_UNTIL_CRITICAL{3};
+
+ for (int i{0}; i < COINS_UNTIL_CRITICAL; ++i) {
+ COutPoint res = add_coin(view);
+ print_view_mem_usage(view);
+ BOOST_CHECK_EQUAL(view.AccessCoin(res).DynamicMemoryUsage(), COIN_SIZE);
+ BOOST_CHECK_EQUAL(
+ chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
+ CoinsCacheSizeState::OK);
+ }
+
+ // Adding some additional coins will push us over the edge to CRITICAL.
+ for (int i{0}; i < 4; ++i) {
+ add_coin(view);
+ print_view_mem_usage(view);
+ if (chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0) ==
+ CoinsCacheSizeState::CRITICAL) {
+ break;
+ }
+ }
+
+ BOOST_CHECK_EQUAL(
+ chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0),
+ CoinsCacheSizeState::CRITICAL);
+
+ // Passing non-zero max mempool usage should allow us more headroom.
+ BOOST_CHECK_EQUAL(
+ chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 1 << 10),
+ CoinsCacheSizeState::OK);
+
+ for (int i{0}; i < 3; ++i) {
+ add_coin(view);
+ print_view_mem_usage(view);
+ BOOST_CHECK_EQUAL(
+ chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 1 << 10),
+ CoinsCacheSizeState::OK);
+ }
+
+ // Adding another coin with the additional mempool room will put us >90%
+ // but not yet critical.
+ add_coin(view);
+ print_view_mem_usage(view);
+
+ // Only perform these checks on 64 bit hosts; I haven't done the math for 32.
+ if (is_64_bit) {
+ float usage_percentage = (float)view.DynamicMemoryUsage() / (MAX_COINS_CACHE_BYTES + (1 << 10));
+ BOOST_TEST_MESSAGE("CoinsTip usage percentage: " << usage_percentage);
+ BOOST_CHECK(usage_percentage >= 0.9);
+ BOOST_CHECK(usage_percentage < 1);
+ BOOST_CHECK_EQUAL(
+ chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, 1 << 10),
+ CoinsCacheSizeState::LARGE);
+ }
+
+ // Using the default max_* values permits way more coins to be added.
+ for (int i{0}; i < 1000; ++i) {
+ add_coin(view);
+ BOOST_CHECK_EQUAL(
+ chainstate.GetCoinsCacheSizeState(tx_pool),
+ CoinsCacheSizeState::OK);
+ }
+
+ // Flushing the view doesn't take us back to OK because cacheCoins has
+ // preallocated memory that doesn't get reclaimed even after flush.
+
+ BOOST_CHECK_EQUAL(
+ chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, 0),
+ CoinsCacheSizeState::CRITICAL);
+
+ view.SetBestBlock(InsecureRand256());
+ BOOST_CHECK(view.Flush());
+ print_view_mem_usage(view);
+
+ BOOST_CHECK_EQUAL(
+ chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, 0),
+ CoinsCacheSizeState::CRITICAL);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/timedata.cpp b/src/timedata.cpp
index b6163631fd..942b3cb919 100644
--- a/src/timedata.cpp
+++ b/src/timedata.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2014-2019 The Bitcoin Core developers
+// Copyright (c) 2014-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -16,7 +16,7 @@
#include <warnings.h>
-static CCriticalSection cs_nTimeOffset;
+static RecursiveMutex cs_nTimeOffset;
static int64_t nTimeOffset GUARDED_BY(cs_nTimeOffset) = 0;
/**
diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp
index d06b3cd20d..84118b36ef 100644
--- a/src/torcontrol.cpp
+++ b/src/torcontrol.cpp
@@ -501,7 +501,7 @@ void TorController::add_onion_cb(TorControlConnection& _conn, const TorControlRe
}
return;
}
- service = LookupNumeric(std::string(service_id+".onion").c_str(), Params().GetDefaultPort());
+ service = LookupNumeric(std::string(service_id+".onion"), Params().GetDefaultPort());
LogPrintf("tor: Got service ID %s, advertising service %s\n", service_id, service.ToString());
if (WriteBinaryFile(GetPrivateKeyFile(), private_key)) {
LogPrint(BCLog::TOR, "tor: Cached service private key to %s\n", GetPrivateKeyFile().string());
diff --git a/src/txdb.cpp b/src/txdb.cpp
index 9568251149..acc47ab45e 100644
--- a/src/txdb.cpp
+++ b/src/txdb.cpp
@@ -336,10 +336,10 @@ public:
vout.assign(vAvail.size(), CTxOut());
for (unsigned int i = 0; i < vAvail.size(); i++) {
if (vAvail[i])
- ::Unserialize(s, CTxOutCompressor(vout[i]));
+ ::Unserialize(s, Using<TxOutCompression>(vout[i]));
}
// coinbase height
- ::Unserialize(s, VARINT(nHeight, VarIntMode::NONNEGATIVE_SIGNED));
+ ::Unserialize(s, VARINT_MODE(nHeight, VarIntMode::NONNEGATIVE_SIGNED));
}
};
diff --git a/src/txdb.h b/src/txdb.h
index f71b4e737d..488c24f935 100644
--- a/src/txdb.h
+++ b/src/txdb.h
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -20,8 +20,6 @@ class CBlockIndex;
class CCoinsViewDBCursor;
class uint256;
-//! No need to periodic flush if at least this much space still available.
-static constexpr int MAX_BLOCK_COINSDB_USAGE = 10;
//! -dbcache default (MiB)
static const int64_t nDefaultDbCache = 450;
//! -dbbatchsize default (bytes)
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index 441255182e..5768219f3a 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -23,7 +23,7 @@ CTxMemPoolEntry::CTxMemPoolEntry(const CTransactionRef& _tx, const CAmount& _nFe
int64_t _nTime, unsigned int _entryHeight,
bool _spendsCoinbase, int64_t _sigOpsCost, LockPoints lp)
: tx(_tx), nFee(_nFee), nTxWeight(GetTransactionWeight(*tx)), nUsageSize(RecursiveDynamicUsage(tx)), nTime(_nTime), entryHeight(_entryHeight),
- spendsCoinbase(_spendsCoinbase), sigOpCost(_sigOpsCost), lockPoints(lp)
+ spendsCoinbase(_spendsCoinbase), sigOpCost(_sigOpsCost), lockPoints(lp), m_epoch(0)
{
nCountWithDescendants = 1;
nSizeWithDescendants = GetTxSize();
@@ -122,8 +122,6 @@ void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashes
// setMemPoolChildren will be updated, an assumption made in
// UpdateForDescendants.
for (const uint256 &hash : reverse_iterate(vHashesToUpdate)) {
- // we cache the in-mempool children to avoid duplicate updates
- setEntries setChildren;
// calculate children from mapNextTx
txiter it = mapTx.find(hash);
if (it == mapTx.end()) {
@@ -132,17 +130,21 @@ void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashes
auto iter = mapNextTx.lower_bound(COutPoint(hash, 0));
// First calculate the children, and update setMemPoolChildren to
// include them, and update their setMemPoolParents to include this tx.
- for (; iter != mapNextTx.end() && iter->first->hash == hash; ++iter) {
- const uint256 &childHash = iter->second->GetHash();
- txiter childIter = mapTx.find(childHash);
- assert(childIter != mapTx.end());
- // We can skip updating entries we've encountered before or that
- // are in the block (which are already accounted for).
- if (setChildren.insert(childIter).second && !setAlreadyIncluded.count(childHash)) {
- UpdateChild(it, childIter, true);
- UpdateParent(childIter, it, true);
+ // we cache the in-mempool children to avoid duplicate updates
+ {
+ const auto epoch = GetFreshEpoch();
+ for (; iter != mapNextTx.end() && iter->first->hash == hash; ++iter) {
+ const uint256 &childHash = iter->second->GetHash();
+ txiter childIter = mapTx.find(childHash);
+ assert(childIter != mapTx.end());
+ // We can skip updating entries we've encountered before or that
+ // are in the block (which are already accounted for).
+ if (!visited(childIter) && !setAlreadyIncluded.count(childHash)) {
+ UpdateChild(it, childIter, true);
+ UpdateParent(childIter, it, true);
+ }
}
- }
+ } // release epoch guard for UpdateForDescendants
UpdateForDescendants(it, mapMemPoolDescendantsToUpdate, setAlreadyIncluded);
}
}
@@ -325,7 +327,7 @@ void CTxMemPoolEntry::UpdateAncestorState(int64_t modifySize, CAmount modifyFee,
}
CTxMemPool::CTxMemPool(CBlockPolicyEstimator* estimator)
- : nTransactionsUpdated(0), minerPolicyEstimator(estimator)
+ : nTransactionsUpdated(0), minerPolicyEstimator(estimator), m_epoch(0), m_has_epoch_guard(false)
{
_clear(); //lock free clear
@@ -1105,4 +1107,23 @@ void CTxMemPool::SetIsLoaded(bool loaded)
m_is_loaded = loaded;
}
+
+CTxMemPool::EpochGuard CTxMemPool::GetFreshEpoch() const
+{
+ return EpochGuard(*this);
+}
+CTxMemPool::EpochGuard::EpochGuard(const CTxMemPool& in) : pool(in)
+{
+ assert(!pool.m_has_epoch_guard);
+ ++pool.m_epoch;
+ pool.m_has_epoch_guard = true;
+}
+
+CTxMemPool::EpochGuard::~EpochGuard()
+{
+ // prevents stale results being used
+ ++pool.m_epoch;
+ pool.m_has_epoch_guard = false;
+}
+
SaltedTxidHasher::SaltedTxidHasher() : k0(GetRand(std::numeric_limits<uint64_t>::max())), k1(GetRand(std::numeric_limits<uint64_t>::max())) {}
diff --git a/src/txmempool.h b/src/txmempool.h
index e9f1f1c8a1..0de6f28777 100644
--- a/src/txmempool.h
+++ b/src/txmempool.h
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -30,7 +30,7 @@
#include <boost/signals2/signal.hpp>
class CBlockIndex;
-extern CCriticalSection cs_main;
+extern RecursiveMutex cs_main;
/** Fake height value used in Coin to signify they are only in the memory pool (since 0.8) */
static const uint32_t MEMPOOL_HEIGHT = 0x7FFFFFFF;
@@ -129,6 +129,7 @@ public:
int64_t GetSigOpCostWithAncestors() const { return nSigOpCostWithAncestors; }
mutable size_t vTxHashesIdx; //!< Index in mempool's vTxHashes
+ mutable uint64_t m_epoch; //!< epoch when last touched, useful for graph algorithms
};
// Helpers for modifying CTxMemPool::mapTx, which is a boost multi_index.
@@ -453,6 +454,8 @@ private:
mutable int64_t lastRollingFeeUpdate;
mutable bool blockSinceLastRollingFeeBump;
mutable double rollingMinimumFeeRate; //!< minimum fee to get into the pool, decreases exponentially
+ mutable uint64_t m_epoch;
+ mutable bool m_has_epoch_guard;
void trackPackageRemoved(const CFeeRate& rate) EXCLUSIVE_LOCKS_REQUIRED(cs);
@@ -736,6 +739,55 @@ private:
* removal.
*/
void removeUnchecked(txiter entry, MemPoolRemovalReason reason) EXCLUSIVE_LOCKS_REQUIRED(cs);
+public:
+ /** EpochGuard: RAII-style guard for using epoch-based graph traversal algorithms.
+ * When walking ancestors or descendants, we generally want to avoid
+ * visiting the same transactions twice. Some traversal algorithms use
+ * std::set (or setEntries) to deduplicate the transaction we visit.
+ * However, use of std::set is algorithmically undesirable because it both
+ * adds an asymptotic factor of O(log n) to traverals cost and triggers O(n)
+ * more dynamic memory allocations.
+ * In many algorithms we can replace std::set with an internal mempool
+ * counter to track the time (or, "epoch") that we began a traversal, and
+ * check + update a per-transaction epoch for each transaction we look at to
+ * determine if that transaction has not yet been visited during the current
+ * traversal's epoch.
+ * Algorithms using std::set can be replaced on a one by one basis.
+ * Both techniques are not fundamentally incompatible across the codebase.
+ * Generally speaking, however, the remaining use of std::set for mempool
+ * traversal should be viewed as a TODO for replacement with an epoch based
+ * traversal, rather than a preference for std::set over epochs in that
+ * algorithm.
+ */
+ class EpochGuard {
+ const CTxMemPool& pool;
+ public:
+ EpochGuard(const CTxMemPool& in);
+ ~EpochGuard();
+ };
+ // N.B. GetFreshEpoch modifies mutable state via the EpochGuard construction
+ // (and later destruction)
+ EpochGuard GetFreshEpoch() const EXCLUSIVE_LOCKS_REQUIRED(cs);
+
+ /** visited marks a CTxMemPoolEntry as having been traversed
+ * during the lifetime of the most recently created EpochGuard
+ * and returns false if we are the first visitor, true otherwise.
+ *
+ * An EpochGuard must be held when visited is called or an assert will be
+ * triggered.
+ *
+ */
+ bool visited(txiter it) const EXCLUSIVE_LOCKS_REQUIRED(cs) {
+ assert(m_has_epoch_guard);
+ bool ret = it->m_epoch >= m_epoch;
+ it->m_epoch = std::max(it->m_epoch, m_epoch);
+ return ret;
+ }
+
+ bool visited(Optional<txiter> it) const EXCLUSIVE_LOCKS_REQUIRED(cs) {
+ assert(m_has_epoch_guard);
+ return !it || visited(*it);
+ }
};
/**
diff --git a/src/ui_interface.cpp b/src/ui_interface.cpp
index c51f89d0dc..85bb746d19 100644
--- a/src/ui_interface.cpp
+++ b/src/ui_interface.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2010-2019 The Bitcoin Core developers
+// Copyright (c) 2010-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -16,7 +16,6 @@ struct UISignals {
boost::signals2::signal<CClientUIInterface::NotifyNumConnectionsChangedSig> NotifyNumConnectionsChanged;
boost::signals2::signal<CClientUIInterface::NotifyNetworkActiveChangedSig> NotifyNetworkActiveChanged;
boost::signals2::signal<CClientUIInterface::NotifyAlertChangedSig> NotifyAlertChanged;
- boost::signals2::signal<CClientUIInterface::LoadWalletSig> LoadWallet;
boost::signals2::signal<CClientUIInterface::ShowProgressSig> ShowProgress;
boost::signals2::signal<CClientUIInterface::NotifyBlockTipSig> NotifyBlockTip;
boost::signals2::signal<CClientUIInterface::NotifyHeaderTipSig> NotifyHeaderTip;
@@ -36,7 +35,6 @@ ADD_SIGNALS_IMPL_WRAPPER(InitMessage);
ADD_SIGNALS_IMPL_WRAPPER(NotifyNumConnectionsChanged);
ADD_SIGNALS_IMPL_WRAPPER(NotifyNetworkActiveChanged);
ADD_SIGNALS_IMPL_WRAPPER(NotifyAlertChanged);
-ADD_SIGNALS_IMPL_WRAPPER(LoadWallet);
ADD_SIGNALS_IMPL_WRAPPER(ShowProgress);
ADD_SIGNALS_IMPL_WRAPPER(NotifyBlockTip);
ADD_SIGNALS_IMPL_WRAPPER(NotifyHeaderTip);
@@ -48,7 +46,6 @@ void CClientUIInterface::InitMessage(const std::string& message) { return g_ui_s
void CClientUIInterface::NotifyNumConnectionsChanged(int newNumConnections) { return g_ui_signals.NotifyNumConnectionsChanged(newNumConnections); }
void CClientUIInterface::NotifyNetworkActiveChanged(bool networkActive) { return g_ui_signals.NotifyNetworkActiveChanged(networkActive); }
void CClientUIInterface::NotifyAlertChanged() { return g_ui_signals.NotifyAlertChanged(); }
-void CClientUIInterface::LoadWallet(std::unique_ptr<interfaces::Wallet>& wallet) { return g_ui_signals.LoadWallet(wallet); }
void CClientUIInterface::ShowProgress(const std::string& title, int nProgress, bool resume_possible) { return g_ui_signals.ShowProgress(title, nProgress, resume_possible); }
void CClientUIInterface::NotifyBlockTip(bool b, const CBlockIndex* i) { return g_ui_signals.NotifyBlockTip(b, i); }
void CClientUIInterface::NotifyHeaderTip(bool b, const CBlockIndex* i) { return g_ui_signals.NotifyHeaderTip(b, i); }
diff --git a/src/ui_interface.h b/src/ui_interface.h
index 650a29ae3d..b402177b85 100644
--- a/src/ui_interface.h
+++ b/src/ui_interface.h
@@ -1,5 +1,5 @@
// Copyright (c) 2010 Satoshi Nakamoto
-// Copyright (c) 2012-2019 The Bitcoin Core developers
+// Copyright (c) 2012-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -17,10 +17,6 @@ class connection;
}
} // namespace boost
-namespace interfaces {
-class Wallet;
-} // namespace interfaces
-
/** General change type (added, updated, removed). */
enum ChangeType
{
@@ -105,9 +101,6 @@ public:
*/
ADD_SIGNALS_DECL_WRAPPER(NotifyAlertChanged, void, );
- /** A wallet has been loaded. */
- ADD_SIGNALS_DECL_WRAPPER(LoadWallet, void, std::unique_ptr<interfaces::Wallet>& wallet);
-
/**
* Show progress e.g. for verifychain.
* resume_possible indicates shutting down now will result in the current progress action resuming upon restart.
diff --git a/src/uint256.cpp b/src/uint256.cpp
index 6398d6326f..a943e71062 100644
--- a/src/uint256.cpp
+++ b/src/uint256.cpp
@@ -75,3 +75,8 @@ template std::string base_blob<256>::GetHex() const;
template std::string base_blob<256>::ToString() const;
template void base_blob<256>::SetHex(const char*);
template void base_blob<256>::SetHex(const std::string&);
+
+uint256& UINT256_ONE() {
+ static uint256* one = new uint256(uint256S("0000000000000000000000000000000000000000000000000000000000000001"));
+ return *one;
+}
diff --git a/src/uint256.h b/src/uint256.h
index ff0b74e117..b36598f572 100644
--- a/src/uint256.h
+++ b/src/uint256.h
@@ -144,4 +144,6 @@ inline uint256 uint256S(const std::string& str)
return rv;
}
+uint256& UINT256_ONE();
+
#endif // BITCOIN_UINT256_H
diff --git a/src/undo.h b/src/undo.h
index 3f50f4caad..47f132c7d8 100644
--- a/src/undo.h
+++ b/src/undo.h
@@ -13,58 +13,42 @@
#include <serialize.h>
#include <version.h>
-/** Undo information for a CTxIn
+/** Formatter for undo information for a CTxIn
*
* Contains the prevout's CTxOut being spent, and its metadata as well
* (coinbase or not, height). The serialization contains a dummy value of
* zero. This is compatible with older versions which expect to see
* the transaction version there.
*/
-class TxInUndoSerializer
+struct TxInUndoFormatter
{
- const Coin* txout;
-
-public:
template<typename Stream>
- void Serialize(Stream &s) const {
- ::Serialize(s, VARINT(txout->nHeight * 2 + (txout->fCoinBase ? 1u : 0u)));
- if (txout->nHeight > 0) {
+ void Ser(Stream &s, const Coin& txout) {
+ ::Serialize(s, VARINT(txout.nHeight * 2 + (txout.fCoinBase ? 1u : 0u)));
+ if (txout.nHeight > 0) {
// Required to maintain compatibility with older undo format.
::Serialize(s, (unsigned char)0);
}
- ::Serialize(s, CTxOutCompressor(REF(txout->out)));
+ ::Serialize(s, Using<TxOutCompression>(txout.out));
}
- explicit TxInUndoSerializer(const Coin* coin) : txout(coin) {}
-};
-
-class TxInUndoDeserializer
-{
- Coin* txout;
-
-public:
template<typename Stream>
- void Unserialize(Stream &s) {
+ void Unser(Stream &s, Coin& txout) {
unsigned int nCode = 0;
::Unserialize(s, VARINT(nCode));
- txout->nHeight = nCode / 2;
- txout->fCoinBase = nCode & 1;
- if (txout->nHeight > 0) {
+ txout.nHeight = nCode / 2;
+ txout.fCoinBase = nCode & 1;
+ if (txout.nHeight > 0) {
// Old versions stored the version number for the last spend of
// a transaction's outputs. Non-final spends were indicated with
// height = 0.
unsigned int nVersionDummy;
::Unserialize(s, VARINT(nVersionDummy));
}
- ::Unserialize(s, CTxOutCompressor(REF(txout->out)));
+ ::Unserialize(s, Using<TxOutCompression>(txout.out));
}
-
- explicit TxInUndoDeserializer(Coin* coin) : txout(coin) {}
};
-static const size_t MIN_TRANSACTION_INPUT_WEIGHT = WITNESS_SCALE_FACTOR * ::GetSerializeSize(CTxIn(), PROTOCOL_VERSION);
-static const size_t MAX_INPUTS_PER_BLOCK = MAX_BLOCK_WEIGHT / MIN_TRANSACTION_INPUT_WEIGHT;
-
/** Undo information for a CTransaction */
class CTxUndo
{
@@ -72,29 +56,7 @@ public:
// undo information for all txins
std::vector<Coin> vprevout;
- template <typename Stream>
- void Serialize(Stream& s) const {
- // TODO: avoid reimplementing vector serializer
- uint64_t count = vprevout.size();
- ::Serialize(s, COMPACTSIZE(REF(count)));
- for (const auto& prevout : vprevout) {
- ::Serialize(s, TxInUndoSerializer(&prevout));
- }
- }
-
- template <typename Stream>
- void Unserialize(Stream& s) {
- // TODO: avoid reimplementing vector deserializer
- uint64_t count = 0;
- ::Unserialize(s, COMPACTSIZE(count));
- if (count > MAX_INPUTS_PER_BLOCK) {
- throw std::ios_base::failure("Too many input undo records");
- }
- vprevout.resize(count);
- for (auto& prevout : vprevout) {
- ::Unserialize(s, TxInUndoDeserializer(&prevout));
- }
- }
+ SERIALIZE_METHODS(CTxUndo, obj) { READWRITE(Using<VectorFormatter<TxInUndoFormatter>>(obj.vprevout)); }
};
/** Undo information for a CBlock */
@@ -103,12 +65,7 @@ class CBlockUndo
public:
std::vector<CTxUndo> vtxundo; // for all but the coinbase
- ADD_SERIALIZE_METHODS;
-
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
- READWRITE(vtxundo);
- }
+ SERIALIZE_METHODS(CBlockUndo, obj) { READWRITE(obj.vtxundo); }
};
#endif // BITCOIN_UNDO_H
diff --git a/src/univalue/Makefile.am b/src/univalue/Makefile.am
index e283fc890e..0f5ba59954 100644
--- a/src/univalue/Makefile.am
+++ b/src/univalue/Makefile.am
@@ -95,6 +95,7 @@ TEST_FILES = \
$(TEST_DATA_DIR)/fail41.json \
$(TEST_DATA_DIR)/fail42.json \
$(TEST_DATA_DIR)/fail44.json \
+ $(TEST_DATA_DIR)/fail45.json \
$(TEST_DATA_DIR)/fail3.json \
$(TEST_DATA_DIR)/fail4.json \
$(TEST_DATA_DIR)/fail5.json \
@@ -105,6 +106,7 @@ TEST_FILES = \
$(TEST_DATA_DIR)/pass1.json \
$(TEST_DATA_DIR)/pass2.json \
$(TEST_DATA_DIR)/pass3.json \
+ $(TEST_DATA_DIR)/pass4.json \
$(TEST_DATA_DIR)/round1.json \
$(TEST_DATA_DIR)/round2.json \
$(TEST_DATA_DIR)/round3.json \
diff --git a/src/univalue/lib/univalue_read.cpp b/src/univalue/lib/univalue_read.cpp
index 14834db24d..5c6a1acf75 100644
--- a/src/univalue/lib/univalue_read.cpp
+++ b/src/univalue/lib/univalue_read.cpp
@@ -8,6 +8,14 @@
#include "univalue.h"
#include "univalue_utffilter.h"
+/*
+ * According to stackexchange, the original json test suite wanted
+ * to limit depth to 22. Widely-deployed PHP bails at depth 512,
+ * so we will follow PHP's lead, which should be more than sufficient
+ * (further stackexchange comments indicate depth > 32 rarely occurs).
+ */
+static const size_t MAX_JSON_DEPTH = 512;
+
static bool json_isdigit(int ch)
{
return ((ch >= '0') && (ch <= '9'));
@@ -323,6 +331,9 @@ bool UniValue::read(const char *raw, size_t size)
stack.push_back(newTop);
}
+ if (stack.size() > MAX_JSON_DEPTH)
+ return false;
+
if (utyp == VOBJ)
setExpect(OBJ_NAME);
else
diff --git a/src/univalue/test/fail45.json b/src/univalue/test/fail45.json
new file mode 100644
index 0000000000..03a30d8800
--- /dev/null
+++ b/src/univalue/test/fail45.json
@@ -0,0 +1 @@
+[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]
diff --git a/src/univalue/test/pass4.json b/src/univalue/test/pass4.json
new file mode 100644
index 0000000000..f5a680b31c
--- /dev/null
+++ b/src/univalue/test/pass4.json
@@ -0,0 +1 @@
+[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]
diff --git a/src/univalue/test/unitester.cpp b/src/univalue/test/unitester.cpp
index 75c0dc225a..2308afbcdf 100644
--- a/src/univalue/test/unitester.cpp
+++ b/src/univalue/test/unitester.cpp
@@ -114,6 +114,7 @@ static const char *filenames[] = {
"fail41.json", // invalid unicode: unfinished UTF-8
"fail42.json", // valid json with garbage following a nul byte
"fail44.json", // unterminated string
+ "fail45.json", // nested beyond max depth
"fail3.json",
"fail4.json", // extra comma
"fail5.json",
@@ -124,6 +125,7 @@ static const char *filenames[] = {
"pass1.json",
"pass2.json",
"pass3.json",
+ "pass4.json",
"round1.json", // round-trip test
"round2.json", // unicode
"round3.json", // bare string
diff --git a/src/util/asmap.cpp b/src/util/asmap.cpp
new file mode 100644
index 0000000000..60bd27bf90
--- /dev/null
+++ b/src/util/asmap.cpp
@@ -0,0 +1,103 @@
+// Copyright (c) 2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <vector>
+#include <assert.h>
+#include <crypto/common.h>
+
+namespace {
+
+uint32_t DecodeBits(std::vector<bool>::const_iterator& bitpos, const std::vector<bool>::const_iterator& endpos, uint8_t minval, const std::vector<uint8_t> &bit_sizes)
+{
+ uint32_t val = minval;
+ bool bit;
+ for (std::vector<uint8_t>::const_iterator bit_sizes_it = bit_sizes.begin();
+ bit_sizes_it != bit_sizes.end(); ++bit_sizes_it) {
+ if (bit_sizes_it + 1 != bit_sizes.end()) {
+ if (bitpos == endpos) break;
+ bit = *bitpos;
+ bitpos++;
+ } else {
+ bit = 0;
+ }
+ if (bit) {
+ val += (1 << *bit_sizes_it);
+ } else {
+ for (int b = 0; b < *bit_sizes_it; b++) {
+ if (bitpos == endpos) break;
+ bit = *bitpos;
+ bitpos++;
+ val += bit << (*bit_sizes_it - 1 - b);
+ }
+ return val;
+ }
+ }
+ return -1;
+}
+
+const std::vector<uint8_t> TYPE_BIT_SIZES{0, 0, 1};
+uint32_t DecodeType(std::vector<bool>::const_iterator& bitpos, const std::vector<bool>::const_iterator& endpos)
+{
+ return DecodeBits(bitpos, endpos, 0, TYPE_BIT_SIZES);
+}
+
+const std::vector<uint8_t> ASN_BIT_SIZES{15, 16, 17, 18, 19, 20, 21, 22, 23, 24};
+uint32_t DecodeASN(std::vector<bool>::const_iterator& bitpos, const std::vector<bool>::const_iterator& endpos)
+{
+ return DecodeBits(bitpos, endpos, 1, ASN_BIT_SIZES);
+}
+
+
+const std::vector<uint8_t> MATCH_BIT_SIZES{1, 2, 3, 4, 5, 6, 7, 8};
+uint32_t DecodeMatch(std::vector<bool>::const_iterator& bitpos, const std::vector<bool>::const_iterator& endpos)
+{
+ return DecodeBits(bitpos, endpos, 2, MATCH_BIT_SIZES);
+}
+
+
+const std::vector<uint8_t> JUMP_BIT_SIZES{5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30};
+uint32_t DecodeJump(std::vector<bool>::const_iterator& bitpos, const std::vector<bool>::const_iterator& endpos)
+{
+ return DecodeBits(bitpos, endpos, 17, JUMP_BIT_SIZES);
+}
+
+}
+
+uint32_t Interpret(const std::vector<bool> &asmap, const std::vector<bool> &ip)
+{
+ std::vector<bool>::const_iterator pos = asmap.begin();
+ const std::vector<bool>::const_iterator endpos = asmap.end();
+ uint8_t bits = ip.size();
+ uint32_t default_asn = 0;
+ uint32_t opcode, jump, match, matchlen;
+ while (pos != endpos) {
+ opcode = DecodeType(pos, endpos);
+ if (opcode == 0) {
+ return DecodeASN(pos, endpos);
+ } else if (opcode == 1) {
+ jump = DecodeJump(pos, endpos);
+ if (bits == 0) break;
+ if (ip[ip.size() - bits]) {
+ if (jump >= endpos - pos) break;
+ pos += jump;
+ }
+ bits--;
+ } else if (opcode == 2) {
+ match = DecodeMatch(pos, endpos);
+ matchlen = CountBits(match) - 1;
+ for (uint32_t bit = 0; bit < matchlen; bit++) {
+ if (bits == 0) break;
+ if ((ip[ip.size() - bits]) != ((match >> (matchlen - 1 - bit)) & 1)) {
+ return default_asn;
+ }
+ bits--;
+ }
+ } else if (opcode == 3) {
+ default_asn = DecodeASN(pos, endpos);
+ } else {
+ break;
+ }
+ }
+ return 0; // 0 is not a valid ASN
+}
diff --git a/src/util/asmap.h b/src/util/asmap.h
new file mode 100644
index 0000000000..a0e14013c5
--- /dev/null
+++ b/src/util/asmap.h
@@ -0,0 +1,10 @@
+// Copyright (c) 2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_UTIL_ASMAP_H
+#define BITCOIN_UTIL_ASMAP_H
+
+uint32_t Interpret(const std::vector<bool> &asmap, const std::vector<bool> &ip);
+
+#endif // BITCOIN_UTIL_ASMAP_H
diff --git a/src/util/bip32.cpp b/src/util/bip32.cpp
index 571c6ec5e1..6f176dd5ec 100644
--- a/src/util/bip32.cpp
+++ b/src/util/bip32.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2019-2018 The Bitcoin Core developers
+// Copyright (c) 2019 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/src/util/bip32.h b/src/util/bip32.h
index c93940e4eb..7e58b79f38 100644
--- a/src/util/bip32.h
+++ b/src/util/bip32.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2019-2018 The Bitcoin Core developers
+// Copyright (c) 2019 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/src/util/message.cpp b/src/util/message.cpp
new file mode 100644
index 0000000000..1e7128d225
--- /dev/null
+++ b/src/util/message.cpp
@@ -0,0 +1,92 @@
+// Copyright (c) 2009-2010 Satoshi Nakamoto
+// Copyright (c) 2009-2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <hash.h> // For CHashWriter
+#include <key.h> // For CKey
+#include <key_io.h> // For DecodeDestination()
+#include <pubkey.h> // For CPubKey
+#include <script/standard.h> // For CTxDestination, IsValidDestination(), PKHash
+#include <serialize.h> // For SER_GETHASH
+#include <util/message.h>
+#include <util/strencodings.h> // For DecodeBase64()
+
+#include <string>
+#include <vector>
+
+/**
+ * Text used to signify that a signed message follows and to prevent
+ * inadvertently signing a transaction.
+ */
+const std::string MESSAGE_MAGIC = "Bitcoin Signed Message:\n";
+
+MessageVerificationResult MessageVerify(
+ const std::string& address,
+ const std::string& signature,
+ const std::string& message)
+{
+ CTxDestination destination = DecodeDestination(address);
+ if (!IsValidDestination(destination)) {
+ return MessageVerificationResult::ERR_INVALID_ADDRESS;
+ }
+
+ if (boost::get<PKHash>(&destination) == nullptr) {
+ return MessageVerificationResult::ERR_ADDRESS_NO_KEY;
+ }
+
+ bool invalid = false;
+ std::vector<unsigned char> signature_bytes = DecodeBase64(signature.c_str(), &invalid);
+ if (invalid) {
+ return MessageVerificationResult::ERR_MALFORMED_SIGNATURE;
+ }
+
+ CPubKey pubkey;
+ if (!pubkey.RecoverCompact(MessageHash(message), signature_bytes)) {
+ return MessageVerificationResult::ERR_PUBKEY_NOT_RECOVERED;
+ }
+
+ if (!(CTxDestination(PKHash(pubkey)) == destination)) {
+ return MessageVerificationResult::ERR_NOT_SIGNED;
+ }
+
+ return MessageVerificationResult::OK;
+}
+
+bool MessageSign(
+ const CKey& privkey,
+ const std::string& message,
+ std::string& signature)
+{
+ std::vector<unsigned char> signature_bytes;
+
+ if (!privkey.SignCompact(MessageHash(message), signature_bytes)) {
+ return false;
+ }
+
+ signature = EncodeBase64(signature_bytes.data(), signature_bytes.size());
+
+ return true;
+}
+
+uint256 MessageHash(const std::string& message)
+{
+ CHashWriter hasher(SER_GETHASH, 0);
+ hasher << MESSAGE_MAGIC << message;
+
+ return hasher.GetHash();
+}
+
+std::string SigningResultString(const SigningResult res)
+{
+ switch (res) {
+ case SigningResult::OK:
+ return "No error";
+ case SigningResult::PRIVATE_KEY_NOT_AVAILABLE:
+ return "Private key not available";
+ case SigningResult::SIGNING_FAILED:
+ return "Sign failed";
+ // no default case, so the compiler can warn about missing cases
+ }
+ assert(false);
+}
diff --git a/src/util/message.h b/src/util/message.h
new file mode 100644
index 0000000000..b31c5f5761
--- /dev/null
+++ b/src/util/message.h
@@ -0,0 +1,76 @@
+// Copyright (c) 2009-2010 Satoshi Nakamoto
+// Copyright (c) 2009-2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_UTIL_MESSAGE_H
+#define BITCOIN_UTIL_MESSAGE_H
+
+#include <key.h> // For CKey
+#include <uint256.h>
+
+#include <string>
+
+extern const std::string MESSAGE_MAGIC;
+
+/** The result of a signed message verification.
+ * Message verification takes as an input:
+ * - address (with whose private key the message is supposed to have been signed)
+ * - signature
+ * - message
+ */
+enum class MessageVerificationResult {
+ //! The provided address is invalid.
+ ERR_INVALID_ADDRESS,
+
+ //! The provided address is valid but does not refer to a public key.
+ ERR_ADDRESS_NO_KEY,
+
+ //! The provided signature couldn't be parsed (maybe invalid base64).
+ ERR_MALFORMED_SIGNATURE,
+
+ //! A public key could not be recovered from the provided signature and message.
+ ERR_PUBKEY_NOT_RECOVERED,
+
+ //! The message was not signed with the private key of the provided address.
+ ERR_NOT_SIGNED,
+
+ //! The message verification was successful.
+ OK
+};
+
+enum class SigningResult {
+ OK, //!< No error
+ PRIVATE_KEY_NOT_AVAILABLE,
+ SIGNING_FAILED,
+};
+
+/** Verify a signed message.
+ * @param[in] address Signer's bitcoin address, it must refer to a public key.
+ * @param[in] signature The signature in base64 format.
+ * @param[in] message The message that was signed.
+ * @return result code */
+MessageVerificationResult MessageVerify(
+ const std::string& address,
+ const std::string& signature,
+ const std::string& message);
+
+/** Sign a message.
+ * @param[in] privkey Private key to sign with.
+ * @param[in] message The message to sign.
+ * @param[out] signature Signature, base64 encoded, only set if true is returned.
+ * @return true if signing was successful. */
+bool MessageSign(
+ const CKey& privkey,
+ const std::string& message,
+ std::string& signature);
+
+/**
+ * Hashes a message for signing and verification in a manner that prevents
+ * inadvertently signing a transaction.
+ */
+uint256 MessageHash(const std::string& message);
+
+std::string SigningResultString(const SigningResult res);
+
+#endif // BITCOIN_UTIL_MESSAGE_H
diff --git a/src/util/moneystr.cpp b/src/util/moneystr.cpp
index 2797f450ca..40d8918dfc 100644
--- a/src/util/moneystr.cpp
+++ b/src/util/moneystr.cpp
@@ -36,14 +36,14 @@ bool ParseMoney(const std::string& str, CAmount& nRet)
if (!ValidAsCString(str)) {
return false;
}
- return ParseMoney(str.c_str(), nRet);
-}
-bool ParseMoney(const char* pszIn, CAmount& nRet)
-{
+ if (str.empty()) {
+ return false;
+ }
+
std::string strWhole;
int64_t nUnits = 0;
- const char* p = pszIn;
+ const char* p = str.c_str();
while (IsSpace(*p))
p++;
for (; *p; p++)
diff --git a/src/util/moneystr.h b/src/util/moneystr.h
index 027c7e2e53..d8b08adc24 100644
--- a/src/util/moneystr.h
+++ b/src/util/moneystr.h
@@ -18,7 +18,7 @@
* JSON but use AmountFromValue and ValueFromAmount for that.
*/
std::string FormatMoney(const CAmount& n);
+/** Parse an amount denoted in full coins. E.g. "0.0034" supplied on the command line. **/
NODISCARD bool ParseMoney(const std::string& str, CAmount& nRet);
-NODISCARD bool ParseMoney(const char* pszIn, CAmount& nRet);
#endif // BITCOIN_UTIL_MONEYSTR_H
diff --git a/src/util/settings.h b/src/util/settings.h
index 9ca581109d..1d03639fa2 100644
--- a/src/util/settings.h
+++ b/src/util/settings.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2019 The Bitcoin Core developers
+// Copyright (c) 2019-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -71,11 +71,11 @@ struct SettingsSpan {
explicit SettingsSpan(const SettingsValue& value) noexcept : SettingsSpan(&value, 1) {}
explicit SettingsSpan(const SettingsValue* data, size_t size) noexcept : data(data), size(size) {}
explicit SettingsSpan(const std::vector<SettingsValue>& vec) noexcept;
- const SettingsValue* begin() const; //<! Pointer to first non-negated value.
- const SettingsValue* end() const; //<! Pointer to end of values.
- bool empty() const; //<! True if there are any non-negated values.
- bool last_negated() const; //<! True if the last value is negated.
- size_t negated() const; //<! Number of negated values.
+ const SettingsValue* begin() const; //!< Pointer to first non-negated value.
+ const SettingsValue* end() const; //!< Pointer to end of values.
+ bool empty() const; //!< True if there are any non-negated values.
+ bool last_negated() const; //!< True if the last value is negated.
+ size_t negated() const; //!< Number of negated values.
const SettingsValue* data = nullptr;
size_t size = 0;
diff --git a/src/util/system.cpp b/src/util/system.cpp
index 8d1efc170a..b0a538b527 100644
--- a/src/util/system.cpp
+++ b/src/util/system.cpp
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -63,6 +63,7 @@
#include <malloc.h>
#endif
+#include <boost/algorithm/string/replace.hpp>
#include <thread>
#include <typeinfo>
#include <univalue.h>
@@ -312,21 +313,18 @@ bool ArgsManager::ParseParameters(int argc, const char* const argv[], std::strin
std::string section;
util::SettingsValue value = InterpretOption(section, key, val);
Optional<unsigned int> flags = GetArgFlags('-' + key);
- if (flags) {
- if (!CheckValid(key, value, *flags, error)) {
- return false;
- }
- // Weird behavior preserved for backwards compatibility: command
- // line options with section prefixes are allowed but ignored. It
- // would be better if these options triggered the Invalid parameter
- // error below.
- if (section.empty()) {
- m_settings.command_line_options[key].push_back(value);
- }
- } else {
- error = strprintf("Invalid parameter -%s", key);
+
+ // Unknown command line options and command line options with dot
+ // characters (which are returned from InterpretOption with nonempty
+ // section strings) are not valid.
+ if (!flags || !section.empty()) {
+ error = strprintf("Invalid parameter %s", argv[i]);
return false;
}
+
+ if (!CheckValid(key, value, *flags, error)) return false;
+
+ m_settings.command_line_options[key].push_back(value);
}
// we do not allow -includeconf from command line
@@ -864,6 +862,32 @@ std::vector<util::SettingsValue> ArgsManager::GetSettingsList(const std::string&
return util::GetSettingsList(m_settings, m_network, SettingName(arg), !UseDefaultSection(arg));
}
+void ArgsManager::logArgsPrefix(
+ const std::string& prefix,
+ const std::string& section,
+ const std::map<std::string, std::vector<util::SettingsValue>>& args) const
+{
+ std::string section_str = section.empty() ? "" : "[" + section + "] ";
+ for (const auto& arg : args) {
+ for (const auto& value : arg.second) {
+ Optional<unsigned int> flags = GetArgFlags('-' + arg.first);
+ if (flags) {
+ std::string value_str = (*flags & SENSITIVE) ? "****" : value.write();
+ LogPrintf("%s %s%s=%s\n", prefix, section_str, arg.first, value_str);
+ }
+ }
+ }
+}
+
+void ArgsManager::LogArgs() const
+{
+ LOCK(cs_args);
+ for (const auto& section : m_settings.ro_config) {
+ logArgsPrefix("Config file arg:", section.first, section.second);
+ }
+ logArgsPrefix("Command-line arg:", "", m_settings.command_line_options);
+}
+
bool RenameOver(fs::path src, fs::path dest)
{
#ifdef WIN32
@@ -974,17 +998,19 @@ void AllocateFileRange(FILE *file, unsigned int offset, unsigned int length) {
SetEndOfFile(hFile);
#elif defined(MAC_OSX)
// OSX specific version
+ // NOTE: Contrary to other OS versions, the OSX version assumes that
+ // NOTE: offset is the size of the file.
fstore_t fst;
fst.fst_flags = F_ALLOCATECONTIG;
fst.fst_posmode = F_PEOFPOSMODE;
fst.fst_offset = 0;
- fst.fst_length = (off_t)offset + length;
+ fst.fst_length = length; // mac os fst_length takes the # of free bytes to allocate, not desired file size
fst.fst_bytesalloc = 0;
if (fcntl(fileno(file), F_PREALLOCATE, &fst) == -1) {
fst.fst_flags = F_ALLOCATEALL;
fcntl(fileno(file), F_PREALLOCATE, &fst);
}
- ftruncate(fileno(file), fst.fst_length);
+ ftruncate(fileno(file), static_cast<off_t>(offset) + length);
#else
#if defined(__linux__)
// Version using posix_fallocate
@@ -1022,6 +1048,15 @@ fs::path GetSpecialFolderPath(int nFolder, bool fCreate)
}
#endif
+#ifndef WIN32
+std::string ShellEscape(const std::string& arg)
+{
+ std::string escaped = arg;
+ boost::replace_all(escaped, "'", "'\"'\"'");
+ return "'" + escaped + "'";
+}
+#endif
+
#if HAVE_SYSTEM
void runCommand(const std::string& strCommand)
{
diff --git a/src/util/system.h b/src/util/system.h
index 394adb9555..3138522b5c 100644
--- a/src/util/system.h
+++ b/src/util/system.h
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -81,6 +81,9 @@ fs::path GetConfigFile(const std::string& confPath);
#ifdef WIN32
fs::path GetSpecialFolderPath(int nFolder, bool fCreate = true);
#endif
+#ifndef WIN32
+std::string ShellEscape(const std::string& arg);
+#endif
#if HAVE_SYSTEM
void runCommand(const std::string& strCommand);
#endif
@@ -145,6 +148,8 @@ public:
* between mainnet and regtest/testnet won't cause problems due to these
* parameters by accident. */
NETWORK_ONLY = 0x200,
+ // This argument's value is sensitive (such as a password).
+ SENSITIVE = 0x400,
};
protected:
@@ -155,7 +160,7 @@ protected:
unsigned int m_flags;
};
- mutable CCriticalSection cs_args;
+ mutable RecursiveMutex cs_args;
util::Settings m_settings GUARDED_BY(cs_args);
std::string m_network GUARDED_BY(cs_args);
std::set<std::string> m_network_only_args GUARDED_BY(cs_args);
@@ -318,6 +323,19 @@ public:
* Return nullopt for unknown arg.
*/
Optional<unsigned int> GetArgFlags(const std::string& name) const;
+
+ /**
+ * Log the config file options and the command line arguments,
+ * useful for troubleshooting.
+ */
+ void LogArgs() const;
+
+private:
+ // Helper function for LogArgs().
+ void logArgsPrefix(
+ const std::string& prefix,
+ const std::string& section,
+ const std::map<std::string, std::vector<util::SettingsValue>>& args) const;
};
extern ArgsManager gArgs;
diff --git a/src/util/time.cpp b/src/util/time.cpp
index 2afff2626b..14937b985e 100644
--- a/src/util/time.cpp
+++ b/src/util/time.cpp
@@ -11,10 +11,13 @@
#include <atomic>
#include <boost/date_time/posix_time/posix_time.hpp>
-#include <boost/thread.hpp>
#include <ctime>
+#include <thread>
+
#include <tinyformat.h>
+void UninterruptibleSleep(const std::chrono::microseconds& n) { std::this_thread::sleep_for(n); }
+
static std::atomic<int64_t> nMockTime(0); //!< For unit testing
int64_t GetTime()
@@ -72,32 +75,16 @@ int64_t GetSystemTimeInSeconds()
return GetTimeMicros()/1000000;
}
-void MilliSleep(int64_t n)
-{
-
-/**
- * Boost's sleep_for was uninterruptible when backed by nanosleep from 1.50
- * until fixed in 1.52. Use the deprecated sleep method for the broken case.
- * See: https://svn.boost.org/trac/boost/ticket/7238
- */
-#if defined(HAVE_WORKING_BOOST_SLEEP_FOR)
- boost::this_thread::sleep_for(boost::chrono::milliseconds(n));
-#elif defined(HAVE_WORKING_BOOST_SLEEP)
- boost::this_thread::sleep(boost::posix_time::milliseconds(n));
-#else
-//should never get here
-#error missing boost sleep implementation
-#endif
-}
-
std::string FormatISO8601DateTime(int64_t nTime) {
struct tm ts;
time_t time_val = nTime;
#ifdef _MSC_VER
- gmtime_s(&ts, &time_val);
+ if (gmtime_s(&ts, &time_val) != 0) {
#else
- gmtime_r(&time_val, &ts);
+ if (gmtime_r(&time_val, &ts) == nullptr) {
#endif
+ return {};
+ }
return strprintf("%04i-%02i-%02iT%02i:%02i:%02iZ", ts.tm_year + 1900, ts.tm_mon + 1, ts.tm_mday, ts.tm_hour, ts.tm_min, ts.tm_sec);
}
@@ -105,10 +92,12 @@ std::string FormatISO8601Date(int64_t nTime) {
struct tm ts;
time_t time_val = nTime;
#ifdef _MSC_VER
- gmtime_s(&ts, &time_val);
+ if (gmtime_s(&ts, &time_val) != 0) {
#else
- gmtime_r(&time_val, &ts);
+ if (gmtime_r(&time_val, &ts) == nullptr) {
#endif
+ return {};
+ }
return strprintf("%04i-%02i-%02i", ts.tm_year + 1900, ts.tm_mon + 1, ts.tm_mday);
}
@@ -124,4 +113,4 @@ int64_t ParseISO8601DateTime(const std::string& str)
if (ptime.is_not_a_date_time() || epoch > ptime)
return 0;
return (ptime - epoch).total_seconds();
-} \ No newline at end of file
+}
diff --git a/src/util/time.h b/src/util/time.h
index af4390aa1c..77de1e047d 100644
--- a/src/util/time.h
+++ b/src/util/time.h
@@ -10,6 +10,8 @@
#include <string>
#include <chrono>
+void UninterruptibleSleep(const std::chrono::microseconds& n);
+
/**
* Helper to count the seconds of a duration.
*
@@ -36,8 +38,6 @@ void SetMockTime(int64_t nMockTimeIn);
/** For testing */
int64_t GetMockTime();
-void MilliSleep(int64_t n);
-
/** Return system time (or mocked time, if set) */
template <typename T>
T GetTime();
diff --git a/src/util/validation.cpp b/src/util/validation.cpp
deleted file mode 100644
index bd52f57751..0000000000
--- a/src/util/validation.cpp
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#include <util/validation.h>
-
-#include <consensus/validation.h>
-#include <tinyformat.h>
-
-/** Convert ValidationState to a human-readable message for logging */
-std::string FormatStateMessage(const ValidationState &state)
-{
- return strprintf("%s%s",
- state.GetRejectReason(),
- state.GetDebugMessage().empty() ? "" : ", "+state.GetDebugMessage());
-}
-
-const std::string strMessageMagic = "Bitcoin Signed Message:\n";
diff --git a/src/util/validation.h b/src/util/validation.h
deleted file mode 100644
index da2cf9f102..0000000000
--- a/src/util/validation.h
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#ifndef BITCOIN_UTIL_VALIDATION_H
-#define BITCOIN_UTIL_VALIDATION_H
-
-#include <string>
-
-class ValidationState;
-
-/** Convert ValidationState to a human-readable message for logging */
-std::string FormatStateMessage(const ValidationState &state);
-
-extern const std::string strMessageMagic;
-
-#endif // BITCOIN_UTIL_VALIDATION_H
diff --git a/src/validation.cpp b/src/validation.cpp
index cdffec48ab..c0327c39bc 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -43,7 +43,6 @@
#include <util/strencodings.h>
#include <util/system.h>
#include <util/translation.h>
-#include <util/validation.h>
#include <validationinterface.h>
#include <warnings.h>
@@ -133,7 +132,7 @@ CTxMemPool mempool(&feeEstimator);
namespace {
CBlockIndex* pindexBestInvalid = nullptr;
- CCriticalSection cs_LastBlockFile;
+ RecursiveMutex cs_LastBlockFile;
std::vector<CBlockFileInfo> vinfoBlockFile;
int nLastBlockFile = 0;
/** Global flag to indicate we should check to see if there are
@@ -662,7 +661,7 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
CAmount nFees = 0;
if (!Consensus::CheckTxInputs(tx, state, m_view, GetSpendHeight(m_view), nFees)) {
- return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), FormatStateMessage(state));
+ return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), state.ToString());
}
// Check for non-standard pay-to-script-hash in inputs
@@ -951,7 +950,7 @@ bool MemPoolAccept::ConsensusScriptChecks(ATMPArgs& args, Workspace& ws, Precomp
unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(::ChainActive().Tip(), chainparams.GetConsensus());
if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool, currentBlockScriptVerifyFlags, txdata)) {
return error("%s: BUG! PLEASE REPORT THIS! CheckInputScripts failed against latest-block but not STANDARD flags %s, %s",
- __func__, hash.ToString(), FormatStateMessage(state));
+ __func__, hash.ToString(), state.ToString());
}
return true;
@@ -1392,10 +1391,14 @@ static void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip) E
CheckForkWarningConditions();
}
+// Called both upon regular invalid block discovery *and* InvalidateBlock
void static InvalidChainFound(CBlockIndex* pindexNew) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork)
pindexBestInvalid = pindexNew;
+ if (pindexBestHeader != nullptr && pindexBestHeader->GetAncestor(pindexNew->nHeight) == pindexNew) {
+ pindexBestHeader = ::ChainActive().Tip();
+ }
LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__,
pindexNew->GetBlockHash().ToString(), pindexNew->nHeight,
@@ -1408,6 +1411,8 @@ void static InvalidChainFound(CBlockIndex* pindexNew) EXCLUSIVE_LOCKS_REQUIRED(c
CheckForkWarningConditions();
}
+// Same as InvalidChainFound, above, except not called directly from InvalidateBlock,
+// which does its own setBlockIndexCandidates manageent.
void CChainState::InvalidBlockFound(CBlockIndex *pindex, const BlockValidationState &state) {
if (state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
pindex->nStatus |= BLOCK_FAILED_VALID;
@@ -1915,7 +1920,7 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
// problems.
return AbortNode(state, "Corrupt block found indicating potential hardware failure; shutting down");
}
- return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state));
+ return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
}
// verify that the view's current state corresponds to the previous block
@@ -2093,7 +2098,7 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
// Any transaction validation failure in ConnectBlock is a block consensus failure
state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
tx_state.GetRejectReason(), tx_state.GetDebugMessage());
- return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), FormatStateMessage(state));
+ return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), state.ToString());
}
nFees += txfee;
if (!MoneyRange(nFees)) {
@@ -2136,7 +2141,7 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
tx_state.GetRejectReason(), tx_state.GetDebugMessage());
return error("ConnectBlock(): CheckInputScripts on %s failed with %s",
- tx.GetHash().ToString(), FormatStateMessage(state));
+ tx.GetHash().ToString(), state.ToString());
}
control.Add(vChecks);
}
@@ -2187,13 +2192,44 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
return true;
}
+CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(const CTxMemPool& tx_pool)
+{
+ return this->GetCoinsCacheSizeState(
+ tx_pool,
+ nCoinCacheUsage,
+ gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000);
+}
+
+CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(
+ const CTxMemPool& tx_pool,
+ size_t max_coins_cache_size_bytes,
+ size_t max_mempool_size_bytes)
+{
+ int64_t nMempoolUsage = tx_pool.DynamicMemoryUsage();
+ int64_t cacheSize = CoinsTip().DynamicMemoryUsage();
+ int64_t nTotalSpace =
+ max_coins_cache_size_bytes + std::max<int64_t>(max_mempool_size_bytes - nMempoolUsage, 0);
+
+ //! No need to periodic flush if at least this much space still available.
+ static constexpr int64_t MAX_BLOCK_COINSDB_USAGE_BYTES = 10 * 1024 * 1024; // 10MB
+ int64_t large_threshold =
+ std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE_BYTES);
+
+ if (cacheSize > nTotalSpace) {
+ LogPrintf("Cache size (%s) exceeds total space (%s)\n", cacheSize, nTotalSpace);
+ return CoinsCacheSizeState::CRITICAL;
+ } else if (cacheSize > large_threshold) {
+ return CoinsCacheSizeState::LARGE;
+ }
+ return CoinsCacheSizeState::OK;
+}
+
bool CChainState::FlushStateToDisk(
const CChainParams& chainparams,
BlockValidationState &state,
FlushStateMode mode,
int nManualPruneHeight)
{
- int64_t nMempoolUsage = mempool.DynamicMemoryUsage();
LOCK(cs_main);
assert(this->CanFlushToDisk());
static int64_t nLastWrite = 0;
@@ -2208,6 +2244,7 @@ bool CChainState::FlushStateToDisk(
{
bool fFlushForPrune = false;
bool fDoFullFlush = false;
+ CoinsCacheSizeState cache_state = GetCoinsCacheSizeState(::mempool);
LOCK(cs_LastBlockFile);
if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) {
if (nManualPruneHeight > 0) {
@@ -2236,13 +2273,10 @@ bool CChainState::FlushStateToDisk(
if (nLastFlush == 0) {
nLastFlush = nNow;
}
- int64_t nMempoolSizeMax = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
- int64_t cacheSize = CoinsTip().DynamicMemoryUsage();
- int64_t nTotalSpace = nCoinCacheUsage + std::max<int64_t>(nMempoolSizeMax - nMempoolUsage, 0);
// The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing).
- bool fCacheLarge = mode == FlushStateMode::PERIODIC && cacheSize > std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE * 1024 * 1024);
+ bool fCacheLarge = mode == FlushStateMode::PERIODIC && cache_state >= CoinsCacheSizeState::LARGE;
// The cache is over the limit, we have to write now.
- bool fCacheCritical = mode == FlushStateMode::IF_NEEDED && cacheSize > nTotalSpace;
+ bool fCacheCritical = mode == FlushStateMode::IF_NEEDED && cache_state >= CoinsCacheSizeState::CRITICAL;
// It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
bool fPeriodicWrite = mode == FlushStateMode::PERIODIC && nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000;
// It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
@@ -2324,7 +2358,7 @@ void CChainState::ForceFlushStateToDisk() {
BlockValidationState state;
const CChainParams& chainparams = Params();
if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS)) {
- LogPrintf("%s: failed to flush state (%s)\n", __func__, FormatStateMessage(state));
+ LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
}
}
@@ -2334,7 +2368,7 @@ void CChainState::PruneAndFlush() {
const CChainParams& chainparams = Params();
if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::NONE)) {
- LogPrintf("%s: failed to flush state (%s)\n", __func__, FormatStateMessage(state));
+ LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
}
}
@@ -2561,7 +2595,7 @@ bool CChainState::ConnectTip(BlockValidationState& state, const CChainParams& ch
if (!rv) {
if (state.IsInvalid())
InvalidBlockFound(pindexNew, state);
- return error("%s: ConnectBlock %s failed, %s", __func__, pindexNew->GetBlockHash().ToString(), FormatStateMessage(state));
+ return error("%s: ConnectBlock %s failed, %s", __func__, pindexNew->GetBlockHash().ToString(), state.ToString());
}
nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2;
LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO, nTimeConnectTotal * MILLI / nBlocksTotal);
@@ -3566,7 +3600,7 @@ bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationS
}
if (!CheckBlockHeader(block, state, chainparams.GetConsensus()))
- return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state));
+ return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__, hash.ToString(), state.ToString());
// Get prev block index
CBlockIndex* pindexPrev = nullptr;
@@ -3581,7 +3615,7 @@ bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationS
return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
}
if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, GetAdjustedTime()))
- return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state));
+ return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), state.ToString());
/* Determine if this block descends from any block which has been found
* invalid (m_failed_blocks), then mark pindexPrev and any blocks between
@@ -3731,7 +3765,7 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, Block
pindex->nStatus |= BLOCK_FAILED_VALID;
setDirtyBlockIndex.insert(pindex);
}
- return error("%s: %s", __func__, FormatStateMessage(state));
+ return error("%s: %s", __func__, state.ToString());
}
// Header is valid/has work, merkle tree and segwit merkle tree are good...RELAY NOW
@@ -3781,7 +3815,7 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<cons
}
if (!ret) {
GetMainSignals().BlockChecked(*pblock, state);
- return error("%s: AcceptBlock FAILED (%s)", __func__, FormatStateMessage(state));
+ return error("%s: AcceptBlock FAILED (%s)", __func__, state.ToString());
}
}
@@ -3789,7 +3823,7 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<cons
BlockValidationState state; // Only used to report errors, not invalidity - ignore it
if (!::ChainstateActive().ActivateBestChain(state, chainparams, pblock))
- return error("%s: ActivateBestChain failed (%s)", __func__, FormatStateMessage(state));
+ return error("%s: ActivateBestChain failed (%s)", __func__, state.ToString());
return true;
}
@@ -3807,11 +3841,11 @@ bool TestBlockValidity(BlockValidationState& state, const CChainParams& chainpar
// NOTE: CheckBlockHeader is called by CheckBlock
if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, GetAdjustedTime()))
- return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, FormatStateMessage(state));
+ return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, state.ToString());
if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot))
- return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state));
+ return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
if (!ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindexPrev))
- return error("%s: Consensus::ContextualCheckBlock: %s", __func__, FormatStateMessage(state));
+ return error("%s: Consensus::ContextualCheckBlock: %s", __func__, state.ToString());
if (!::ChainstateActive().ConnectBlock(block, state, &indexDummy, viewNew, chainparams, true))
return false;
assert(state.IsValid());
@@ -3909,7 +3943,7 @@ void PruneBlockFilesManual(int nManualPruneHeight)
const CChainParams& chainparams = Params();
if (!::ChainstateActive().FlushStateToDisk(
chainparams, state, FlushStateMode::NONE, nManualPruneHeight)) {
- LogPrintf("%s: failed to flush state (%s)\n", __func__, FormatStateMessage(state));
+ LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
}
}
@@ -4227,7 +4261,7 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
// check level 1: verify block validity
if (nCheckLevel >= 1 && !CheckBlock(block, state, chainparams.GetConsensus()))
return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__,
- pindex->nHeight, pindex->GetBlockHash().ToString(), FormatStateMessage(state));
+ pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
// check level 2: verify undo validity
if (nCheckLevel >= 2 && pindex) {
CBlockUndo undo;
@@ -4276,7 +4310,7 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
if (!::ChainstateActive().ConnectBlock(block, state, pindex, coins, chainparams))
- return error("VerifyDB(): *** found unconnectable block at %d, hash=%s (%s)", pindex->nHeight, pindex->GetBlockHash().ToString(), FormatStateMessage(state));
+ return error("VerifyDB(): *** found unconnectable block at %d, hash=%s (%s)", pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
}
}
@@ -4464,7 +4498,7 @@ bool CChainState::RewindBlockIndex(const CChainParams& params)
// Disconnect block
if (!DisconnectTip(state, params, nullptr)) {
- return error("RewindBlockIndex: unable to disconnect block at height %i (%s)", tip->nHeight, FormatStateMessage(state));
+ return error("RewindBlockIndex: unable to disconnect block at height %i (%s)", tip->nHeight, state.ToString());
}
// Reduce validity flag and have-data flags.
@@ -4484,7 +4518,7 @@ bool CChainState::RewindBlockIndex(const CChainParams& params)
// Occasionally flush state to disk.
if (!FlushStateToDisk(params, state, FlushStateMode::PERIODIC)) {
- LogPrintf("RewindBlockIndex: unable to flush state to disk (%s)\n", FormatStateMessage(state));
+ LogPrintf("RewindBlockIndex: unable to flush state to disk (%s)\n", state.ToString());
return false;
}
}
@@ -4515,7 +4549,7 @@ bool RewindBlockIndex(const CChainParams& params) {
// it'll get called a bunch real soon.
BlockValidationState state;
if (!::ChainstateActive().FlushStateToDisk(params, state, FlushStateMode::ALWAYS)) {
- LogPrintf("RewindBlockIndex: unable to flush state to disk (%s)\n", FormatStateMessage(state));
+ LogPrintf("RewindBlockIndex: unable to flush state to disk (%s)\n", state.ToString());
return false;
}
}
diff --git a/src/validation.h b/src/validation.h
index 54f97e7213..a5335edc43 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -136,7 +136,7 @@ struct BlockHasher
size_t operator()(const uint256& hash) const { return ReadLE64(hash.begin()); }
};
-extern CCriticalSection cs_main;
+extern RecursiveMutex cs_main;
extern CBlockPolicyEstimator feeEstimator;
extern CTxMemPool mempool;
typedef std::unordered_map<uint256, CBlockIndex*, BlockHasher> BlockMap;
@@ -530,6 +530,15 @@ public:
void InitCache() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
};
+enum class CoinsCacheSizeState
+{
+ //! The coins cache is in immediate need of a flush.
+ CRITICAL = 2,
+ //! The cache is at >= 90% capacity.
+ LARGE = 1,
+ OK = 0
+};
+
/**
* CChainState stores and provides an API to update our local knowledge of the
* current best chain.
@@ -551,7 +560,7 @@ private:
* Every received block is assigned a unique and increasing identifier, so we
* know which one to give priority in case of a fork.
*/
- CCriticalSection cs_nBlockSequenceId;
+ RecursiveMutex cs_nBlockSequenceId;
/** Blocks loaded from disk are assigned id 0, so start the counter at 1. */
int32_t nBlockSequenceId = 1;
/** Decreasing counter (used by subsequent preciousblock calls). */
@@ -563,7 +572,7 @@ private:
* the ChainState CriticalSection
* A lock that must be held when modifying this ChainState - held in ActivateBestChain()
*/
- CCriticalSection m_cs_chainstate;
+ RecursiveMutex m_cs_chainstate;
/**
* Whether this chainstate is undergoing initial block download.
@@ -721,6 +730,17 @@ public:
/** Update the chain tip based on database information, i.e. CoinsTip()'s best block. */
bool LoadChainTip(const CChainParams& chainparams) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+ //! Dictates whether we need to flush the cache to disk or not.
+ //!
+ //! @return the state of the size of the coins cache.
+ CoinsCacheSizeState GetCoinsCacheSizeState(const CTxMemPool& tx_pool)
+ EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
+
+ CoinsCacheSizeState GetCoinsCacheSizeState(
+ const CTxMemPool& tx_pool,
+ size_t max_coins_cache_size_bytes,
+ size_t max_mempool_size_bytes) EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
+
private:
bool ActivateBestChainStep(BlockValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace) EXCLUSIVE_LOCKS_REQUIRED(cs_main, ::mempool.cs);
bool ConnectTip(BlockValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions& disconnectpool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, ::mempool.cs);
diff --git a/src/validationinterface.cpp b/src/validationinterface.cpp
index 7b20d26f33..1deb93c972 100644
--- a/src/validationinterface.cpp
+++ b/src/validationinterface.cpp
@@ -1,11 +1,15 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <validationinterface.h>
+#include <chain.h>
+#include <consensus/validation.h>
+#include <logging.h>
#include <primitives/block.h>
+#include <primitives/transaction.h>
#include <scheduler.h>
#include <future>
@@ -110,52 +114,89 @@ void SyncWithValidationInterfaceQueue() {
promise.get_future().wait();
}
+// Use a macro instead of a function for conditional logging to prevent
+// evaluating arguments when logging is not enabled.
+//
+// NOTE: The lambda captures all local variables by value.
+#define ENQUEUE_AND_LOG_EVENT(event, fmt, name, ...) \
+ do { \
+ auto local_name = (name); \
+ LOG_EVENT("Enqueuing " fmt, local_name, __VA_ARGS__); \
+ m_internals->m_schedulerClient.AddToProcessQueue([=] { \
+ LOG_EVENT(fmt, local_name, __VA_ARGS__); \
+ event(); \
+ }); \
+ } while (0)
+
+#define LOG_EVENT(fmt, ...) \
+ LogPrint(BCLog::VALIDATION, fmt "\n", __VA_ARGS__)
void CMainSignals::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) {
// Dependencies exist that require UpdatedBlockTip events to be delivered in the order in which
// the chain actually updates. One way to ensure this is for the caller to invoke this signal
// in the same critical section where the chain is updated
- m_internals->m_schedulerClient.AddToProcessQueue([pindexNew, pindexFork, fInitialDownload, this] {
+ auto event = [pindexNew, pindexFork, fInitialDownload, this] {
m_internals->UpdatedBlockTip(pindexNew, pindexFork, fInitialDownload);
- });
+ };
+ ENQUEUE_AND_LOG_EVENT(event, "%s: new block hash=%s fork block hash=%s (in IBD=%s)", __func__,
+ pindexNew->GetBlockHash().ToString(),
+ pindexFork ? pindexFork->GetBlockHash().ToString() : "null",
+ fInitialDownload);
}
void CMainSignals::TransactionAddedToMempool(const CTransactionRef &ptx) {
- m_internals->m_schedulerClient.AddToProcessQueue([ptx, this] {
+ auto event = [ptx, this] {
m_internals->TransactionAddedToMempool(ptx);
- });
+ };
+ ENQUEUE_AND_LOG_EVENT(event, "%s: txid=%s wtxid=%s", __func__,
+ ptx->GetHash().ToString(),
+ ptx->GetWitnessHash().ToString());
}
void CMainSignals::TransactionRemovedFromMempool(const CTransactionRef &ptx) {
- m_internals->m_schedulerClient.AddToProcessQueue([ptx, this] {
+ auto event = [ptx, this] {
m_internals->TransactionRemovedFromMempool(ptx);
- });
+ };
+ ENQUEUE_AND_LOG_EVENT(event, "%s: txid=%s wtxid=%s", __func__,
+ ptx->GetHash().ToString(),
+ ptx->GetWitnessHash().ToString());
}
void CMainSignals::BlockConnected(const std::shared_ptr<const CBlock> &pblock, const CBlockIndex *pindex, const std::shared_ptr<const std::vector<CTransactionRef>>& pvtxConflicted) {
- m_internals->m_schedulerClient.AddToProcessQueue([pblock, pindex, pvtxConflicted, this] {
+ auto event = [pblock, pindex, pvtxConflicted, this] {
m_internals->BlockConnected(pblock, pindex, *pvtxConflicted);
- });
+ };
+ ENQUEUE_AND_LOG_EVENT(event, "%s: block hash=%s block height=%d", __func__,
+ pblock->GetHash().ToString(),
+ pindex->nHeight);
}
void CMainSignals::BlockDisconnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex)
{
- m_internals->m_schedulerClient.AddToProcessQueue([pblock, pindex, this] {
+ auto event = [pblock, pindex, this] {
m_internals->BlockDisconnected(pblock, pindex);
- });
+ };
+ ENQUEUE_AND_LOG_EVENT(event, "%s: block hash=%s block height=%d", __func__,
+ pblock->GetHash().ToString(),
+ pindex->nHeight);
}
void CMainSignals::ChainStateFlushed(const CBlockLocator &locator) {
- m_internals->m_schedulerClient.AddToProcessQueue([locator, this] {
+ auto event = [locator, this] {
m_internals->ChainStateFlushed(locator);
- });
+ };
+ ENQUEUE_AND_LOG_EVENT(event, "%s: block hash=%s", __func__,
+ locator.IsNull() ? "null" : locator.vHave.front().ToString());
}
void CMainSignals::BlockChecked(const CBlock& block, const BlockValidationState& state) {
+ LOG_EVENT("%s: block hash=%s state=%s", __func__,
+ block.GetHash().ToString(), state.ToString());
m_internals->BlockChecked(block, state);
}
void CMainSignals::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock> &block) {
+ LOG_EVENT("%s: block hash=%s", __func__, block->GetHash().ToString());
m_internals->NewPoWValidBlock(pindex, block);
}
diff --git a/src/validationinterface.h b/src/validationinterface.h
index 25e14b5d0e..ed6c560944 100644
--- a/src/validationinterface.h
+++ b/src/validationinterface.h
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -12,7 +12,7 @@
#include <functional>
#include <memory>
-extern CCriticalSection cs_main;
+extern RecursiveMutex cs_main;
class BlockValidationState;
class CBlock;
class CBlockIndex;
diff --git a/src/wallet/db.cpp b/src/wallet/db.cpp
index 79825847c8..67be4d85d2 100644
--- a/src/wallet/db.cpp
+++ b/src/wallet/db.cpp
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -44,7 +44,7 @@ void CheckUniqueFileid(const BerkeleyEnvironment& env, const std::string& filena
}
}
-CCriticalSection cs_db;
+RecursiveMutex cs_db;
std::map<std::string, std::weak_ptr<BerkeleyEnvironment>> g_dbenvs GUARDED_BY(cs_db); //!< Map from directory name to db environment.
} // namespace
@@ -650,7 +650,7 @@ void BerkeleyEnvironment::ReloadDbEnv()
{
// Make sure that no Db's are in use
AssertLockNotHeld(cs_db);
- std::unique_lock<CCriticalSection> lock(cs_db);
+ std::unique_lock<RecursiveMutex> lock(cs_db);
m_db_in_use.wait(lock, [this](){
for (auto& count : mapFileUseCount) {
if (count.second > 0) return false;
@@ -756,7 +756,7 @@ bool BerkeleyBatch::Rewrite(BerkeleyDatabase& database, const char* pszSkip)
return fSuccess;
}
}
- MilliSleep(100);
+ UninterruptibleSleep(std::chrono::milliseconds{100});
}
}
@@ -850,7 +850,7 @@ bool BerkeleyDatabase::Rewrite(const char* pszSkip)
return BerkeleyBatch::Rewrite(*this, pszSkip);
}
-bool BerkeleyDatabase::Backup(const std::string& strDest)
+bool BerkeleyDatabase::Backup(const std::string& strDest) const
{
if (IsDummy()) {
return false;
@@ -887,7 +887,7 @@ bool BerkeleyDatabase::Backup(const std::string& strDest)
}
}
}
- MilliSleep(100);
+ UninterruptibleSleep(std::chrono::milliseconds{100});
}
}
diff --git a/src/wallet/db.h b/src/wallet/db.h
index abec3ae4e2..bebaa55d05 100644
--- a/src/wallet/db.h
+++ b/src/wallet/db.h
@@ -157,7 +157,7 @@ public:
/** Back up the entire database to a file.
*/
- bool Backup(const std::string& strDest);
+ bool Backup(const std::string& strDest) const;
/** Make sure all changes are flushed to disk.
*/
@@ -193,7 +193,7 @@ private:
* Only to be used at a low level, application should ideally not care
* about this.
*/
- bool IsDummy() { return env == nullptr; }
+ bool IsDummy() const { return env == nullptr; }
};
/** RAII class that provides access to a Berkeley database */
diff --git a/src/wallet/feebumper.cpp b/src/wallet/feebumper.cpp
index 36588eb7d1..486a6b8bf1 100644
--- a/src/wallet/feebumper.cpp
+++ b/src/wallet/feebumper.cpp
@@ -12,7 +12,6 @@
#include <util/moneystr.h>
#include <util/rbf.h>
#include <util/system.h>
-#include <util/validation.h>
//! Check whether transaction has descendant in wallet or mempool, or has been
//! mined, or conflicts with a mined transaction. Return a feebumper::Result.
@@ -47,7 +46,8 @@ static feebumper::Result PreconditionChecks(const CWallet& wallet, const CWallet
// check that original tx consists entirely of our inputs
// if not, we can't bump the fee, because the wallet has no way of knowing the value of the other inputs (thus the fee)
- if (!wallet.IsAllFromMe(*wtx.tx, ISMINE_SPENDABLE)) {
+ isminefilter filter = wallet.GetLegacyScriptPubKeyMan() && wallet.IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS) ? ISMINE_WATCH_ONLY : ISMINE_SPENDABLE;
+ if (!wallet.IsAllFromMe(*wtx.tx, filter)) {
errors.push_back("Transaction contains inputs that don't belong to this wallet");
return feebumper::Result::WALLET_ERROR;
}
@@ -78,7 +78,8 @@ static feebumper::Result CheckFeeRate(const CWallet& wallet, const CWalletTx& wt
CFeeRate incrementalRelayFee = std::max(wallet.chain().relayIncrementalFee(), CFeeRate(WALLET_INCREMENTAL_RELAY_FEE));
// Given old total fee and transaction size, calculate the old feeRate
- CAmount old_fee = wtx.GetDebit(ISMINE_SPENDABLE) - wtx.tx->GetValueOut();
+ isminefilter filter = wallet.GetLegacyScriptPubKeyMan() && wallet.IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS) ? ISMINE_WATCH_ONLY : ISMINE_SPENDABLE;
+ CAmount old_fee = wtx.GetDebit(filter) - wtx.tx->GetValueOut();
const int64_t txSize = GetVirtualTransactionSize(*(wtx.tx));
CFeeRate nOldFeeRate(old_fee, txSize);
// Min total fee is old fee + relay fee
@@ -195,7 +196,8 @@ Result CreateTotalBumpTransaction(const CWallet* wallet, const uint256& txid, co
}
// calculate the old fee and fee-rate
- old_fee = wtx.GetDebit(ISMINE_SPENDABLE) - wtx.tx->GetValueOut();
+ isminefilter filter = wallet->GetLegacyScriptPubKeyMan() && wallet->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS) ? ISMINE_WATCH_ONLY : ISMINE_SPENDABLE;
+ old_fee = wtx.GetDebit(filter) - wtx.tx->GetValueOut();
CFeeRate nOldFeeRate(old_fee, txSize);
// The wallet uses a conservative WALLET_INCREMENTAL_RELAY_FEE value to
// future proof against changes to network wide policy for incremental relay
@@ -308,7 +310,8 @@ Result CreateRateBumpTransaction(CWallet& wallet, const uint256& txid, const CCo
}
}
- old_fee = wtx.GetDebit(ISMINE_SPENDABLE) - wtx.tx->GetValueOut();
+ isminefilter filter = wallet.GetLegacyScriptPubKeyMan() && wallet.IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS) ? ISMINE_WATCH_ONLY : ISMINE_SPENDABLE;
+ old_fee = wtx.GetDebit(filter) - wtx.tx->GetValueOut();
if (coin_control.m_feerate) {
// The user provided a feeRate argument.
diff --git a/src/wallet/init.cpp b/src/wallet/init.cpp
index dd0d2ffbd7..50f064b305 100644
--- a/src/wallet/init.cpp
+++ b/src/wallet/init.cpp
@@ -46,7 +46,7 @@ void WalletInit::AddWalletOptions() const
gArgs.AddArg("-fallbackfee=<amt>", strprintf("A fee rate (in %s/kB) that will be used when fee estimation has insufficient data. 0 to entirely disable the fallbackfee feature. (default: %s)",
CURRENCY_UNIT, FormatMoney(DEFAULT_FALLBACK_FEE)), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
- gArgs.AddArg("-keypool=<n>", strprintf("Set key pool size to <n> (default: %u)", DEFAULT_KEYPOOL_SIZE), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ gArgs.AddArg("-keypool=<n>", strprintf("Set key pool size to <n> (default: %u). Warning: Smaller sizes may increase the risk of losing funds when restoring from an old backup, if none of the addresses in the original keypool have been used.", DEFAULT_KEYPOOL_SIZE), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
gArgs.AddArg("-maxtxfee=<amt>", strprintf("Maximum total fees (in %s) to use in a single wallet transaction; setting this too low may abort large transactions (default: %s)",
CURRENCY_UNIT, FormatMoney(DEFAULT_TRANSACTION_MAXFEE)), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
gArgs.AddArg("-mintxfee=<amt>", strprintf("Fees (in %s/kB) smaller than this are considered zero fee for transaction creation (default: %s)",
@@ -62,7 +62,7 @@ void WalletInit::AddWalletOptions() const
gArgs.AddArg("-walletbroadcast", strprintf("Make the wallet broadcast transactions (default: %u)", DEFAULT_WALLETBROADCAST), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
gArgs.AddArg("-walletdir=<dir>", "Specify directory to hold wallets (default: <datadir>/wallets if it exists, otherwise <datadir>)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::WALLET);
#if HAVE_SYSTEM
- gArgs.AddArg("-walletnotify=<cmd>", "Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ gArgs.AddArg("-walletnotify=<cmd>", "Execute command when a wallet transaction changes. %s in cmd is replaced by TxID and %w is replaced by wallet name. %w is not currently implemented on windows. On systems where %w is supported, it should NOT be quoted because this would break shell escaping used to invoke the command.", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
#endif
gArgs.AddArg("-walletrbf", strprintf("Send transactions with full-RBF opt-in enabled (RPC only, default: %u)", DEFAULT_WALLET_RBF), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
gArgs.AddArg("-zapwallettxes=<mode>", "Delete all wallet transactions and only recover those parts of the blockchain through -rescan on startup"
diff --git a/src/wallet/psbtwallet.cpp b/src/wallet/psbtwallet.cpp
deleted file mode 100644
index 96c1ad8d3f..0000000000
--- a/src/wallet/psbtwallet.cpp
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright (c) 2009-2019 The Bitcoin Core developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#include <wallet/psbtwallet.h>
-
-TransactionError FillPSBT(const CWallet* pwallet, PartiallySignedTransaction& psbtx, bool& complete, int sighash_type, bool sign, bool bip32derivs)
-{
- LOCK(pwallet->cs_wallet);
- // Get all of the previous transactions
- complete = true;
- for (unsigned int i = 0; i < psbtx.tx->vin.size(); ++i) {
- const CTxIn& txin = psbtx.tx->vin[i];
- PSBTInput& input = psbtx.inputs.at(i);
-
- if (PSBTInputSigned(input)) {
- continue;
- }
-
- // Verify input looks sane. This will check that we have at most one uxto, witness or non-witness.
- if (!input.IsSane()) {
- return TransactionError::INVALID_PSBT;
- }
-
- // If we have no utxo, grab it from the wallet.
- if (!input.non_witness_utxo && input.witness_utxo.IsNull()) {
- const uint256& txhash = txin.prevout.hash;
- const auto it = pwallet->mapWallet.find(txhash);
- if (it != pwallet->mapWallet.end()) {
- const CWalletTx& wtx = it->second;
- // We only need the non_witness_utxo, which is a superset of the witness_utxo.
- // The signing code will switch to the smaller witness_utxo if this is ok.
- input.non_witness_utxo = wtx.tx;
- }
- }
-
- // Get the Sighash type
- if (sign && input.sighash_type > 0 && input.sighash_type != sighash_type) {
- return TransactionError::SIGHASH_MISMATCH;
- }
-
- // Get the scriptPubKey to know which SigningProvider to use
- CScript script;
- if (!input.witness_utxo.IsNull()) {
- script = input.witness_utxo.scriptPubKey;
- } else if (input.non_witness_utxo) {
- script = input.non_witness_utxo->vout[txin.prevout.n].scriptPubKey;
- } else {
- // There's no UTXO so we can just skip this now
- complete = false;
- continue;
- }
- SignatureData sigdata;
- input.FillSignatureData(sigdata);
- const SigningProvider* provider = pwallet->GetSigningProvider(script, sigdata);
- if (!provider) {
- complete = false;
- continue;
- }
-
- complete &= SignPSBTInput(HidingSigningProvider(provider, !sign, !bip32derivs), psbtx, i, sighash_type);
- }
-
- // Fill in the bip32 keypaths and redeemscripts for the outputs so that hardware wallets can identify change
- for (unsigned int i = 0; i < psbtx.tx->vout.size(); ++i) {
- const CTxOut& out = psbtx.tx->vout.at(i);
- const SigningProvider* provider = pwallet->GetSigningProvider(out.scriptPubKey);
- if (provider) {
- UpdatePSBTOutput(HidingSigningProvider(provider, true, !bip32derivs), psbtx, i);
- }
- }
-
- return TransactionError::OK;
-}
diff --git a/src/wallet/psbtwallet.h b/src/wallet/psbtwallet.h
deleted file mode 100644
index a7e52df6d9..0000000000
--- a/src/wallet/psbtwallet.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) 2009-2019 The Bitcoin Core developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#ifndef BITCOIN_WALLET_PSBTWALLET_H
-#define BITCOIN_WALLET_PSBTWALLET_H
-
-#include <psbt.h>
-#include <wallet/wallet.h>
-
-/**
- * Fills out a PSBT with information from the wallet. Fills in UTXOs if we have
- * them. Tries to sign if sign=true. Sets `complete` if the PSBT is now complete
- * (i.e. has all required signatures or signature-parts, and is ready to
- * finalize.) Sets `error` and returns false if something goes wrong.
- *
- * @param[in] pwallet pointer to a wallet
- * @param[in] &psbtx reference to PartiallySignedTransaction to fill in
- * @param[out] &complete indicates whether the PSBT is now complete
- * @param[in] sighash_type the sighash type to use when signing (if PSBT does not specify)
- * @param[in] sign whether to sign or not
- * @param[in] bip32derivs whether to fill in bip32 derivation information if available
- * return error
- */
-NODISCARD TransactionError FillPSBT(const CWallet* pwallet,
- PartiallySignedTransaction& psbtx,
- bool& complete,
- int sighash_type = 1 /* SIGHASH_ALL */,
- bool sign = true,
- bool bip32derivs = false);
-
-#endif // BITCOIN_WALLET_PSBTWALLET_H
diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp
index 633ac1b16d..8df3491060 100644
--- a/src/wallet/rpcdump.cpp
+++ b/src/wallet/rpcdump.cpp
@@ -54,7 +54,7 @@ static std::string DecodeDumpString(const std::string &str) {
return ret.str();
}
-static bool GetWalletAddressesForKey(LegacyScriptPubKeyMan* spk_man, CWallet* const pwallet, const CKeyID& keyid, std::string& strAddr, std::string& strLabel) EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet)
+static bool GetWalletAddressesForKey(LegacyScriptPubKeyMan* spk_man, const CWallet* const pwallet, const CKeyID& keyid, std::string& strAddr, std::string& strLabel) EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet)
{
bool fLabelFound = false;
CKey key;
@@ -65,7 +65,7 @@ static bool GetWalletAddressesForKey(LegacyScriptPubKeyMan* spk_man, CWallet* co
strAddr += ",";
}
strAddr += EncodeDestination(dest);
- strLabel = EncodeDumpString(pwallet->mapAddressBook[dest].name);
+ strLabel = EncodeDumpString(pwallet->mapAddressBook.at(dest).name);
fLabelFound = true;
}
}
@@ -125,7 +125,7 @@ UniValue importprivkey(const JSONRPCRequest& request)
throw JSONRPCError(RPC_WALLET_ERROR, "Cannot import private keys to a wallet with private keys disabled");
}
- EnsureLegacyScriptPubKeyMan(*wallet);
+ EnsureLegacyScriptPubKeyMan(*wallet, true);
WalletRescanReserver reserver(pwallet);
bool fRescan = true;
@@ -253,7 +253,7 @@ UniValue importaddress(const JSONRPCRequest& request)
},
}.Check(request);
- EnsureLegacyScriptPubKeyMan(*pwallet);
+ EnsureLegacyScriptPubKeyMan(*pwallet, true);
std::string strLabel;
if (!request.params[1].isNull())
@@ -454,7 +454,7 @@ UniValue importpubkey(const JSONRPCRequest& request)
},
}.Check(request);
- EnsureLegacyScriptPubKeyMan(*wallet);
+ EnsureLegacyScriptPubKeyMan(*wallet, true);
std::string strLabel;
if (!request.params[1].isNull())
@@ -538,7 +538,7 @@ UniValue importwallet(const JSONRPCRequest& request)
},
}.Check(request);
- EnsureLegacyScriptPubKeyMan(*wallet);
+ EnsureLegacyScriptPubKeyMan(*wallet, true);
if (pwallet->chain().havePruned()) {
// Exit early and print an error.
@@ -676,7 +676,7 @@ UniValue importwallet(const JSONRPCRequest& request)
UniValue dumpprivkey(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
}
@@ -688,7 +688,7 @@ UniValue dumpprivkey(const JSONRPCRequest& request)
{"address", RPCArg::Type::STR, RPCArg::Optional::NO, "The bitcoin address for the private key"},
},
RPCResult{
- "\"key\" (string) The private key\n"
+ RPCResult::Type::STR, "key", "The private key"
},
RPCExamples{
HelpExampleCli("dumpprivkey", "\"myaddress\"")
@@ -700,7 +700,7 @@ UniValue dumpprivkey(const JSONRPCRequest& request)
LegacyScriptPubKeyMan& spk_man = EnsureLegacyScriptPubKeyMan(*wallet);
auto locked_chain = pwallet->chain().lock();
- LOCK(pwallet->cs_wallet);
+ LOCK2(pwallet->cs_wallet, spk_man.cs_KeyStore);
EnsureWalletIsUnlocked(pwallet);
@@ -724,7 +724,7 @@ UniValue dumpprivkey(const JSONRPCRequest& request)
UniValue dumpwallet(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
}
@@ -738,9 +738,10 @@ UniValue dumpwallet(const JSONRPCRequest& request)
{"filename", RPCArg::Type::STR, RPCArg::Optional::NO, "The filename with path (either absolute or relative to bitcoind)"},
},
RPCResult{
- "{ (json object)\n"
- " \"filename\" : { (string) The filename with full absolute path\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "filename", "The filename with full absolute path"},
+ }
},
RPCExamples{
HelpExampleCli("dumpwallet", "\"test\"")
@@ -751,8 +752,7 @@ UniValue dumpwallet(const JSONRPCRequest& request)
LegacyScriptPubKeyMan& spk_man = EnsureLegacyScriptPubKeyMan(*wallet);
auto locked_chain = pwallet->chain().lock();
- LOCK(pwallet->cs_wallet);
- AssertLockHeld(spk_man.cs_wallet);
+ LOCK2(pwallet->cs_wallet, spk_man.cs_KeyStore);
EnsureWalletIsUnlocked(pwallet);
@@ -1322,8 +1322,21 @@ UniValue importmulti(const JSONRPCRequest& mainRequest)
"\"options\""},
},
RPCResult{
- "\nResponse is an array with the same size as the input that has the execution result :\n"
- " [{\"success\": true}, {\"success\": true, \"warnings\": [\"Ignoring irrelevant private key\"]}, {\"success\": false, \"error\": {\"code\": -1, \"message\": \"Internal Server Error\"}}, ...]\n"
+ RPCResult::Type::ARR, "", "Response is an array with the same size as the input that has the execution result",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::BOOL, "success", ""},
+ {RPCResult::Type::ARR, "warnings", /* optional */ true, "",
+ {
+ {RPCResult::Type::STR, "", ""},
+ }},
+ {RPCResult::Type::OBJ, "error", /* optional */ true, "",
+ {
+ {RPCResult::Type::ELISION, "", "JSONRPC error"},
+ }},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("importmulti", "'[{ \"scriptPubKey\": { \"address\": \"<my address>\" }, \"timestamp\":1455191478 }, "
@@ -1335,7 +1348,7 @@ UniValue importmulti(const JSONRPCRequest& mainRequest)
RPCTypeCheck(mainRequest.params, {UniValue::VARR, UniValue::VOBJ});
- EnsureLegacyScriptPubKeyMan(*wallet);
+ EnsureLegacyScriptPubKeyMan(*wallet, true);
const UniValue& requests = mainRequest.params[0];
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index 63cbe02b17..44e580a52a 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -1,5 +1,5 @@
// Copyright (c) 2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -19,14 +19,14 @@
#include <script/sign.h>
#include <util/bip32.h>
#include <util/fees.h>
+#include <util/message.h> // For MessageSign()
#include <util/moneystr.h>
#include <util/string.h>
#include <util/system.h>
#include <util/url.h>
-#include <util/validation.h>
+#include <util/vector.h>
#include <wallet/coincontrol.h>
#include <wallet/feebumper.h>
-#include <wallet/psbtwallet.h>
#include <wallet/rpcwallet.h>
#include <wallet/wallet.h>
#include <wallet/walletdb.h>
@@ -39,7 +39,7 @@
static const std::string WALLET_ENDPOINT_BASE = "/wallet/";
-static inline bool GetAvoidReuseFlag(CWallet * const pwallet, const UniValue& param) {
+static inline bool GetAvoidReuseFlag(const CWallet* const pwallet, const UniValue& param) {
bool can_avoid_reuse = pwallet->IsWalletFlagSet(WALLET_FLAG_AVOID_REUSE);
bool avoid_reuse = param.isNull() ? can_avoid_reuse : param.get_bool();
@@ -124,9 +124,13 @@ void EnsureWalletIsUnlocked(const CWallet* pwallet)
}
}
-LegacyScriptPubKeyMan& EnsureLegacyScriptPubKeyMan(CWallet& wallet)
+// also_create should only be set to true only when the RPC is expected to add things to a blank wallet and make it no longer blank
+LegacyScriptPubKeyMan& EnsureLegacyScriptPubKeyMan(CWallet& wallet, bool also_create)
{
LegacyScriptPubKeyMan* spk_man = wallet.GetLegacyScriptPubKeyMan();
+ if (!spk_man && also_create) {
+ spk_man = wallet.GetOrCreateLegacyScriptPubKeyMan();
+ }
if (!spk_man) {
throw JSONRPCError(RPC_WALLET_ERROR, "This type of wallet does not support this command");
}
@@ -201,7 +205,7 @@ static UniValue getnewaddress(const JSONRPCRequest& request)
{"address_type", RPCArg::Type::STR, /* default */ "set by -addresstype", "The address type to use. Options are \"legacy\", \"p2sh-segwit\", and \"bech32\"."},
},
RPCResult{
- "\"address\" (string) The new bitcoin address\n"
+ RPCResult::Type::STR, "address", "The new bitcoin address"
},
RPCExamples{
HelpExampleCli("getnewaddress", "")
@@ -252,7 +256,7 @@ static UniValue getrawchangeaddress(const JSONRPCRequest& request)
{"address_type", RPCArg::Type::STR, /* default */ "set by -changetype", "The address type to use. Options are \"legacy\", \"p2sh-segwit\", and \"bech32\"."},
},
RPCResult{
- "\"address\" (string) The address\n"
+ RPCResult::Type::STR, "address", "The address"
},
RPCExamples{
HelpExampleCli("getrawchangeaddress", "")
@@ -338,7 +342,7 @@ static CTransactionRef SendMoney(interfaces::Chain::Lock& locked_chain, CWallet
CScript scriptPubKey = GetScriptForDestination(address);
// Create and send the transaction
- CAmount nFeeRequired;
+ CAmount nFeeRequired = 0;
std::string strError;
std::vector<CRecipient> vecSend;
int nChangePosRet = -1;
@@ -386,7 +390,7 @@ static UniValue sendtoaddress(const JSONRPCRequest& request)
" dirty if they have previously been used in a transaction."},
},
RPCResult{
- "\"txid\" (string) The transaction id.\n"
+ RPCResult::Type::STR_HEX, "txid", "The transaction id."
},
RPCExamples{
HelpExampleCli("sendtoaddress", "\"1M72Sfpbz1BPpXFHz9m3CdqATR44Jvaydd\" 0.1")
@@ -453,7 +457,7 @@ static UniValue sendtoaddress(const JSONRPCRequest& request)
static UniValue listaddressgroupings(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -465,17 +469,18 @@ static UniValue listaddressgroupings(const JSONRPCRequest& request)
"in past transactions\n",
{},
RPCResult{
- "[\n"
- " [\n"
- " [\n"
- " \"address\", (string) The bitcoin address\n"
- " amount, (numeric) The amount in " + CURRENCY_UNIT + "\n"
- " \"label\" (string, optional) The label\n"
- " ]\n"
- " ,...\n"
- " ]\n"
- " ,...\n"
- "]\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::STR, "address", "The bitcoin address"},
+ {RPCResult::Type::STR_AMOUNT, "amount", "The amount in " + CURRENCY_UNIT},
+ {RPCResult::Type::STR, "label", /* optional */ true, "The label"},
+ }},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("listaddressgroupings", "")
@@ -514,7 +519,7 @@ static UniValue listaddressgroupings(const JSONRPCRequest& request)
static UniValue signmessage(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -528,7 +533,7 @@ static UniValue signmessage(const JSONRPCRequest& request)
{"message", RPCArg::Type::STR, RPCArg::Optional::NO, "The message to create a signature of."},
},
RPCResult{
- "\"signature\" (string) The signature of the message encoded in base 64\n"
+ RPCResult::Type::STR, "signature", "The signature of the message encoded in base 64"
},
RPCExamples{
"\nUnlock the wallet for 30 seconds\n"
@@ -560,33 +565,21 @@ static UniValue signmessage(const JSONRPCRequest& request)
throw JSONRPCError(RPC_TYPE_ERROR, "Address does not refer to key");
}
- CScript script_pub_key = GetScriptForDestination(*pkhash);
- const SigningProvider* provider = pwallet->GetSigningProvider(script_pub_key);
- if (!provider) {
- throw JSONRPCError(RPC_WALLET_ERROR, "Private key not available");
- }
-
- CKey key;
- CKeyID keyID(*pkhash);
- if (!provider->GetKey(keyID, key)) {
- throw JSONRPCError(RPC_WALLET_ERROR, "Private key not available");
+ std::string signature;
+ SigningResult err = pwallet->SignMessage(strMessage, *pkhash, signature);
+ if (err == SigningResult::SIGNING_FAILED) {
+ throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, SigningResultString(err));
+ } else if (err != SigningResult::OK){
+ throw JSONRPCError(RPC_WALLET_ERROR, SigningResultString(err));
}
- CHashWriter ss(SER_GETHASH, 0);
- ss << strMessageMagic;
- ss << strMessage;
-
- std::vector<unsigned char> vchSig;
- if (!key.SignCompact(ss.GetHash(), vchSig))
- throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Sign failed");
-
- return EncodeBase64(vchSig.data(), vchSig.size());
+ return signature;
}
static UniValue getreceivedbyaddress(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -599,7 +592,7 @@ static UniValue getreceivedbyaddress(const JSONRPCRequest& request)
{"minconf", RPCArg::Type::NUM, /* default */ "1", "Only include transactions confirmed at least this many times."},
},
RPCResult{
- "amount (numeric) The total amount in " + CURRENCY_UNIT + " received at this address.\n"
+ RPCResult::Type::STR_AMOUNT, "amount", "The total amount in " + CURRENCY_UNIT + " received at this address."
},
RPCExamples{
"\nThe amount from transactions with at least 1 confirmation\n"
@@ -656,7 +649,7 @@ static UniValue getreceivedbyaddress(const JSONRPCRequest& request)
static UniValue getreceivedbylabel(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -669,7 +662,7 @@ static UniValue getreceivedbylabel(const JSONRPCRequest& request)
{"minconf", RPCArg::Type::NUM, /* default */ "1", "Only include transactions confirmed at least this many times."},
},
RPCResult{
- "amount (numeric) The total amount in " + CURRENCY_UNIT + " received for this label.\n"
+ RPCResult::Type::STR_AMOUNT, "amount", "The total amount in " + CURRENCY_UNIT + " received for this label."
},
RPCExamples{
"\nAmount received by the default label with at least 1 confirmation\n"
@@ -724,7 +717,7 @@ static UniValue getreceivedbylabel(const JSONRPCRequest& request)
static UniValue getbalance(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -741,7 +734,7 @@ static UniValue getbalance(const JSONRPCRequest& request)
{"avoid_reuse", RPCArg::Type::BOOL, /* default */ "true", "(only available if avoid_reuse wallet flag is set) Do not include balance in dirty outputs; addresses are considered dirty if they have previously been used in a transaction."},
},
RPCResult{
- "amount (numeric) The total amount in " + CURRENCY_UNIT + " received for this wallet.\n"
+ RPCResult::Type::STR_AMOUNT, "amount", "The total amount in " + CURRENCY_UNIT + " received for this wallet."
},
RPCExamples{
"\nThe total amount in the wallet with 1 or more confirmations\n"
@@ -782,7 +775,7 @@ static UniValue getbalance(const JSONRPCRequest& request)
static UniValue getunconfirmedbalance(const JSONRPCRequest &request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -843,8 +836,8 @@ static UniValue sendmany(const JSONRPCRequest& request)
" \"CONSERVATIVE\""},
},
RPCResult{
- "\"txid\" (string) The transaction id for the send. Only 1 transaction is created regardless of \n"
- " the number of addresses.\n"
+ RPCResult::Type::STR_HEX, "txid", "The transaction id for the send. Only 1 transaction is created regardless of\n"
+ "the number of addresses."
},
RPCExamples{
"\nSend two amounts to two different addresses:\n"
@@ -967,10 +960,12 @@ static UniValue addmultisigaddress(const JSONRPCRequest& request)
{"address_type", RPCArg::Type::STR, /* default */ "set by -addresstype", "The address type to use. Options are \"legacy\", \"p2sh-segwit\", and \"bech32\"."},
},
RPCResult{
- "{\n"
- " \"address\":\"multisigaddress\", (string) The value of the new multisig address.\n"
- " \"redeemScript\":\"script\" (string) The string value of the hex-encoded redemption script.\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "address", "The value of the new multisig address"},
+ {RPCResult::Type::STR_HEX, "redeemScript", "The string value of the hex-encoded redemption script"},
+ {RPCResult::Type::STR, "descriptor", "The descriptor for this multisig"},
+ }
},
RPCExamples{
"\nAdd a multisig address from 2 addresses\n"
@@ -983,7 +978,7 @@ static UniValue addmultisigaddress(const JSONRPCRequest& request)
LegacyScriptPubKeyMan& spk_man = EnsureLegacyScriptPubKeyMan(*pwallet);
auto locked_chain = pwallet->chain().lock();
- LOCK(pwallet->cs_wallet);
+ LOCK2(pwallet->cs_wallet, spk_man.cs_KeyStore);
std::string label;
if (!request.params[2].isNull())
@@ -1014,9 +1009,13 @@ static UniValue addmultisigaddress(const JSONRPCRequest& request)
CTxDestination dest = AddAndGetMultisigDestination(required, pubkeys, output_type, spk_man, inner);
pwallet->SetAddressBook(dest, label, "send");
+ // Make the descriptor
+ std::unique_ptr<Descriptor> descriptor = InferDescriptor(GetScriptForDestination(dest), spk_man);
+
UniValue result(UniValue::VOBJ);
result.pushKV("address", EncodeDestination(dest));
result.pushKV("redeemScript", HexStr(inner.begin(), inner.end()));
+ result.pushKV("descriptor", descriptor->ToString());
return result;
}
@@ -1031,7 +1030,7 @@ struct tallyitem
}
};
-static UniValue ListReceived(interfaces::Chain::Lock& locked_chain, CWallet * const pwallet, const UniValue& params, bool by_label) EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet)
+static UniValue ListReceived(interfaces::Chain::Lock& locked_chain, const CWallet* const pwallet, const UniValue& params, bool by_label) EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet)
{
// Minimum confirmations
int nMinDepth = 1;
@@ -1180,7 +1179,7 @@ static UniValue ListReceived(interfaces::Chain::Lock& locked_chain, CWallet * co
static UniValue listreceivedbyaddress(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -1195,20 +1194,21 @@ static UniValue listreceivedbyaddress(const JSONRPCRequest& request)
{"address_filter", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "If present, only return information on this address."},
},
RPCResult{
- "[\n"
- " {\n"
- " \"involvesWatchonly\" : true, (bool) Only returns true if imported addresses were involved in transaction.\n"
- " \"address\" : \"receivingaddress\", (string) The receiving address\n"
- " \"amount\" : x.xxx, (numeric) The total amount in " + CURRENCY_UNIT + " received by the address\n"
- " \"confirmations\" : n, (numeric) The number of confirmations of the most recent transaction included\n"
- " \"label\" : \"label\", (string) The label of the receiving address. The default label is \"\".\n"
- " \"txids\": [\n"
- " \"txid\", (string) The ids of transactions received with the address \n"
- " ...\n"
- " ]\n"
- " }\n"
- " ,...\n"
- "]\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::BOOL, "involvesWatchonly", "Only returns true if imported addresses were involved in transaction"},
+ {RPCResult::Type::STR, "address", "The receiving address"},
+ {RPCResult::Type::STR_AMOUNT, "amount", "The total amount in " + CURRENCY_UNIT + " received by the address"},
+ {RPCResult::Type::NUM, "confirmations", "The number of confirmations of the most recent transaction included"},
+ {RPCResult::Type::STR, "label", "The label of the receiving address. The default label is \"\""},
+ {RPCResult::Type::ARR, "txids", "",
+ {
+ {RPCResult::Type::STR_HEX, "txid", "The ids of transactions received with the address"},
+ }},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("listreceivedbyaddress", "")
@@ -1231,7 +1231,7 @@ static UniValue listreceivedbyaddress(const JSONRPCRequest& request)
static UniValue listreceivedbylabel(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -1245,15 +1245,16 @@ static UniValue listreceivedbylabel(const JSONRPCRequest& request)
{"include_watchonly", RPCArg::Type::BOOL, /* default */ "true for watch-only wallets, otherwise false", "Whether to include watch-only addresses (see 'importaddress')"},
},
RPCResult{
- "[\n"
- " {\n"
- " \"involvesWatchonly\" : true, (bool) Only returns true if imported addresses were involved in transaction.\n"
- " \"amount\" : x.xxx, (numeric) The total amount received by addresses with this label\n"
- " \"confirmations\" : n, (numeric) The number of confirmations of the most recent transaction included\n"
- " \"label\" : \"label\" (string) The label of the receiving address. The default label is \"\".\n"
- " }\n"
- " ,...\n"
- "]\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::BOOL, "involvesWatchonly", "Only returns true if imported addresses were involved in transaction"},
+ {RPCResult::Type::STR_AMOUNT, "amount", "The total amount received by addresses with this label"},
+ {RPCResult::Type::NUM, "confirmations", "The number of confirmations of the most recent transaction included"},
+ {RPCResult::Type::STR, "label", "The label of the receiving address. The default label is \"\""},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("listreceivedbylabel", "")
@@ -1290,7 +1291,7 @@ static void MaybePushAddress(UniValue & entry, const CTxDestination &dest)
* @param filter_ismine The "is mine" filter flags.
* @param filter_label Optional label string to filter incoming transactions.
*/
-static void ListTransactions(interfaces::Chain::Lock& locked_chain, CWallet* const pwallet, const CWalletTx& wtx, int nMinDepth, bool fLong, UniValue& ret, const isminefilter& filter_ismine, const std::string* filter_label) EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet)
+static void ListTransactions(interfaces::Chain::Lock& locked_chain, const CWallet* const pwallet, const CWalletTx& wtx, int nMinDepth, bool fLong, UniValue& ret, const isminefilter& filter_ismine, const std::string* filter_label) EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet)
{
CAmount nFee;
std::list<COutputEntry> listReceived;
@@ -1313,7 +1314,7 @@ static void ListTransactions(interfaces::Chain::Lock& locked_chain, CWallet* con
entry.pushKV("category", "send");
entry.pushKV("amount", ValueFromAmount(-s.amount));
if (pwallet->mapAddressBook.count(s.destination)) {
- entry.pushKV("label", pwallet->mapAddressBook[s.destination].name);
+ entry.pushKV("label", pwallet->mapAddressBook.at(s.destination).name);
}
entry.pushKV("vout", s.vout);
entry.pushKV("fee", ValueFromAmount(-nFee));
@@ -1330,7 +1331,7 @@ static void ListTransactions(interfaces::Chain::Lock& locked_chain, CWallet* con
{
std::string label;
if (pwallet->mapAddressBook.count(r.destination)) {
- label = pwallet->mapAddressBook[r.destination].name;
+ label = pwallet->mapAddressBook.at(r.destination).name;
}
if (filter_label && label != *filter_label) {
continue;
@@ -1365,32 +1366,32 @@ static void ListTransactions(interfaces::Chain::Lock& locked_chain, CWallet* con
}
}
-static const std::string TransactionDescriptionString()
+static const std::vector<RPCResult> TransactionDescriptionString()
{
- return " \"confirmations\": n, (numeric) The number of confirmations for the transaction. Negative confirmations means the\n"
- " transaction conflicted that many blocks ago.\n"
- " \"generated\": xxx, (bool) Only present if transaction only input is a coinbase one.\n"
- " \"trusted\": xxx, (bool) Only present if we consider transaction to be trusted and so safe to spend from.\n"
- " \"blockhash\": \"hashvalue\", (string) The block hash containing the transaction.\n"
- " \"blockheight\": n, (numeric) The block height containing the transaction.\n"
- " \"blockindex\": n, (numeric) The index of the transaction in the block that includes it.\n"
- " \"blocktime\": xxx, (numeric) The block time expressed in " + UNIX_EPOCH_TIME + ".\n"
- " \"txid\": \"transactionid\", (string) The transaction id.\n"
- " \"walletconflicts\": [ (array) Conflicting transaction ids.\n"
- " \"txid\", (string) The transaction id.\n"
- " ...\n"
- " ],\n"
- " \"time\": xxx, (numeric) The transaction time expressed in " + UNIX_EPOCH_TIME + ".\n"
- " \"timereceived\": xxx, (numeric) The time received expressed in " + UNIX_EPOCH_TIME + ".\n"
- " \"comment\": \"...\", (string) If a comment is associated with the transaction, only present if not empty.\n"
- " \"bip125-replaceable\": \"yes|no|unknown\", (string) Whether this transaction could be replaced due to BIP125 (replace-by-fee);\n"
- " may be unknown for unconfirmed transactions not in the mempool\n";
+ return{{RPCResult::Type::NUM, "confirmations", "The number of confirmations for the transaction. Negative confirmations means the\n"
+ "transaction conflicted that many blocks ago."},
+ {RPCResult::Type::BOOL, "generated", "Only present if transaction only input is a coinbase one."},
+ {RPCResult::Type::BOOL, "trusted", "Only present if we consider transaction to be trusted and so safe to spend from."},
+ {RPCResult::Type::STR_HEX, "blockhash", "The block hash containing the transaction."},
+ {RPCResult::Type::NUM, "blockheight", "The block height containing the transaction."},
+ {RPCResult::Type::NUM, "blockindex", "The index of the transaction in the block that includes it."},
+ {RPCResult::Type::NUM_TIME, "blocktime", "The block time expressed in " + UNIX_EPOCH_TIME + "."},
+ {RPCResult::Type::STR_HEX, "txid", "The transaction id."},
+ {RPCResult::Type::ARR, "walletconflicts", "Conflicting transaction ids.",
+ {
+ {RPCResult::Type::STR_HEX, "txid", "The transaction id."},
+ }},
+ {RPCResult::Type::NUM_TIME, "time", "The transaction time expressed in " + UNIX_EPOCH_TIME + "."},
+ {RPCResult::Type::NUM_TIME, "timereceived", "The time received expressed in " + UNIX_EPOCH_TIME + "."},
+ {RPCResult::Type::STR, "comment", "If a comment is associated with the transaction, only present if not empty."},
+ {RPCResult::Type::STR, "bip125-replaceable", "(\"yes|no|unknown\") Whether this transaction could be replaced due to BIP125 (replace-by-fee);\n"
+ "may be unknown for unconfirmed transactions not in the mempool"}};
}
UniValue listtransactions(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -1407,27 +1408,31 @@ UniValue listtransactions(const JSONRPCRequest& request)
{"include_watchonly", RPCArg::Type::BOOL, /* default */ "true for watch-only wallets, otherwise false", "Include transactions to watch-only addresses (see 'importaddress')"},
},
RPCResult{
- "[\n"
- " {\n"
- " \"involvesWatchonly\": xxx, (bool) Only returns true if imported addresses were involved in transaction.\n"
- " \"address\":\"address\", (string) The bitcoin address of the transaction.\n"
- " \"category\": (string) The transaction category.\n"
- " \"send\" Transactions sent.\n"
- " \"receive\" Non-coinbase transactions received.\n"
- " \"generate\" Coinbase transactions received with more than 100 confirmations.\n"
- " \"immature\" Coinbase transactions received with 100 or fewer confirmations.\n"
- " \"orphan\" Orphaned coinbase transactions received.\n"
- " \"amount\": x.xxx, (numeric) The amount in " + CURRENCY_UNIT + ". This is negative for the 'send' category, and is positive\n"
- " for all other categories\n"
- " \"label\": \"label\", (string) A comment for the address/transaction, if any\n"
- " \"vout\": n, (numeric) the vout value\n"
- " \"fee\": x.xxx, (numeric) The amount of the fee in " + CURRENCY_UNIT + ". This is negative and only available for the \n"
- " 'send' category of transactions.\n"
- + TransactionDescriptionString()
- + " \"abandoned\": xxx (bool) 'true' if the transaction has been abandoned (inputs are respendable). Only available for the \n"
- " 'send' category of transactions.\n"
- " }\n"
- "]\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::OBJ, "", "", Cat(Cat<std::vector<RPCResult>>(
+ {
+ {RPCResult::Type::BOOL, "involvesWatchonly", "Only returns true if imported addresses were involved in transaction."},
+ {RPCResult::Type::STR, "address", "The bitcoin address of the transaction."},
+ {RPCResult::Type::STR, "category", "The transaction category.\n"
+ "\"send\" Transactions sent.\n"
+ "\"receive\" Non-coinbase transactions received.\n"
+ "\"generate\" Coinbase transactions received with more than 100 confirmations.\n"
+ "\"immature\" Coinbase transactions received with 100 or fewer confirmations.\n"
+ "\"orphan\" Orphaned coinbase transactions received."},
+ {RPCResult::Type::STR_AMOUNT, "amount", "The amount in " + CURRENCY_UNIT + ". This is negative for the 'send' category, and is positive\n"
+ "for all other categories"},
+ {RPCResult::Type::STR, "label", "A comment for the address/transaction, if any"},
+ {RPCResult::Type::NUM, "vout", "the vout value"},
+ {RPCResult::Type::STR_AMOUNT, "fee", "The amount of the fee in " + CURRENCY_UNIT + ". This is negative and only available for the\n"
+ "'send' category of transactions."},
+ },
+ TransactionDescriptionString()),
+ {
+ {RPCResult::Type::BOOL, "abandoned", "'true' if the transaction has been abandoned (inputs are respendable). Only available for the \n"
+ "'send' category of transactions."},
+ })},
+ }
},
RPCExamples{
"\nList the most recent 10 transactions in the systems\n"
@@ -1491,29 +1496,16 @@ UniValue listtransactions(const JSONRPCRequest& request)
if ((nFrom + nCount) > (int)ret.size())
nCount = ret.size() - nFrom;
- std::vector<UniValue> arrTmp = ret.getValues();
-
- std::vector<UniValue>::iterator first = arrTmp.begin();
- std::advance(first, nFrom);
- std::vector<UniValue>::iterator last = arrTmp.begin();
- std::advance(last, nFrom+nCount);
-
- if (last != arrTmp.end()) arrTmp.erase(last, arrTmp.end());
- if (first != arrTmp.begin()) arrTmp.erase(arrTmp.begin(), first);
-
- std::reverse(arrTmp.begin(), arrTmp.end()); // Return oldest to newest
-
- ret.clear();
- ret.setArray();
- ret.push_backV(arrTmp);
-
- return ret;
+ const std::vector<UniValue>& txs = ret.getValues();
+ UniValue result{UniValue::VARR};
+ result.push_backV({ txs.rend() - nFrom - nCount, txs.rend() - nFrom }); // Return oldest to newest
+ return result;
}
static UniValue listsinceblock(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -1531,32 +1523,39 @@ static UniValue listsinceblock(const JSONRPCRequest& request)
" (not guaranteed to work on pruned nodes)"},
},
RPCResult{
- "{\n"
- " \"transactions\": [\n"
- " \"involvesWatchonly\": xxx, (bool) Only returns true if imported addresses were involved in transaction.\n"
- " \"address\":\"address\", (string) The bitcoin address of the transaction.\n"
- " \"category\": (string) The transaction category.\n"
- " \"send\" Transactions sent.\n"
- " \"receive\" Non-coinbase transactions received.\n"
- " \"generate\" Coinbase transactions received with more than 100 confirmations.\n"
- " \"immature\" Coinbase transactions received with 100 or fewer confirmations.\n"
- " \"orphan\" Orphaned coinbase transactions received.\n"
- " \"amount\": x.xxx, (numeric) The amount in " + CURRENCY_UNIT + ". This is negative for the 'send' category, and is positive\n"
- " for all other categories\n"
- " \"vout\" : n, (numeric) the vout value\n"
- " \"fee\": x.xxx, (numeric) The amount of the fee in " + CURRENCY_UNIT + ". This is negative and only available for the 'send' category of transactions.\n"
- + TransactionDescriptionString()
- + " \"abandoned\": xxx, (bool) 'true' if the transaction has been abandoned (inputs are respendable). Only available for the 'send' category of transactions.\n"
- " \"comment\": \"...\", (string) If a comment is associated with the transaction.\n"
- " \"label\" : \"label\" (string) A comment for the address/transaction, if any\n"
- " \"to\": \"...\", (string) If a comment to is associated with the transaction.\n"
- " ],\n"
- " \"removed\": [\n"
- " <structure is the same as \"transactions\" above, only present if include_removed=true>\n"
- " Note: transactions that were re-added in the active chain will appear as-is in this array, and may thus have a positive confirmation count.\n"
- " ],\n"
- " \"lastblock\": \"lastblockhash\" (string) The hash of the block (target_confirmations-1) from the best block on the main chain. This is typically used to feed back into listsinceblock the next time you call it. So you would generally use a target_confirmations of say 6, so you will be continually re-notified of transactions until they've reached 6 confirmations plus any new ones\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::ARR, "transactions", "",
+ {
+ {RPCResult::Type::OBJ, "", "", Cat(Cat<std::vector<RPCResult>>(
+ {
+ {RPCResult::Type::BOOL, "involvesWatchonly", "Only returns true if imported addresses were involved in transaction."},
+ {RPCResult::Type::STR, "address", "The bitcoin address of the transaction."},
+ {RPCResult::Type::STR, "category", "The transaction category.\n"
+ "\"send\" Transactions sent.\n"
+ "\"receive\" Non-coinbase transactions received.\n"
+ "\"generate\" Coinbase transactions received with more than 100 confirmations.\n"
+ "\"immature\" Coinbase transactions received with 100 or fewer confirmations.\n"
+ "\"orphan\" Orphaned coinbase transactions received."},
+ {RPCResult::Type::STR_AMOUNT, "amount", "The amount in " + CURRENCY_UNIT + ". This is negative for the 'send' category, and is positive\n"
+ "for all other categories"},
+ {RPCResult::Type::NUM, "vout", "the vout value"},
+ {RPCResult::Type::STR_AMOUNT, "fee", "The amount of the fee in " + CURRENCY_UNIT + ". This is negative and only available for the\n"
+ "'send' category of transactions."},
+ },
+ TransactionDescriptionString()),
+ {
+ {RPCResult::Type::BOOL, "abandoned", "'true' if the transaction has been abandoned (inputs are respendable). Only available for the \n"
+ "'send' category of transactions."},
+ {RPCResult::Type::STR, "label", "A comment for the address/transaction, if any"},
+ {RPCResult::Type::STR, "to", "If a comment to is associated with the transaction."},
+ })},
+ }},
+ {RPCResult::Type::ARR, "removed", "<structure is the same as \"transactions\" above, only present if include_removed=true>\n"
+ "Note: transactions that were re-added in the active chain will appear as-is in this array, and may thus have a positive confirmation count."
+ , {{RPCResult::Type::ELISION, "", ""},}},
+ {RPCResult::Type::STR_HEX, "lastblock", "The hash of the block (target_confirmations-1) from the best block on the main chain. This is typically used to feed back into listsinceblock the next time you call it. So you would generally use a target_confirmations of say 6, so you will be continually re-notified of transactions until they've reached 6 confirmations plus any new ones"},
+ }
},
RPCExamples{
HelpExampleCli("listsinceblock", "")
@@ -1648,7 +1647,7 @@ static UniValue listsinceblock(const JSONRPCRequest& request)
static UniValue gettransaction(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -1664,35 +1663,41 @@ static UniValue gettransaction(const JSONRPCRequest& request)
"Whether to include a `decoded` field containing the decoded transaction (equivalent to RPC decoderawtransaction)"},
},
RPCResult{
- "{\n"
- " \"amount\" : x.xxx, (numeric) The transaction amount in " + CURRENCY_UNIT + "\n"
- " \"fee\": x.xxx, (numeric) The amount of the fee in " + CURRENCY_UNIT + ". This is negative and only available for the \n"
- " 'send' category of transactions.\n"
- + TransactionDescriptionString()
- + " \"details\" : [\n"
- " {\n"
- " \"involvesWatchonly\": xxx, (bool) Only returns true if imported addresses were involved in transaction.\n"
- " \"address\" : \"address\", (string) The bitcoin address involved in the transaction\n"
- " \"category\" : (string) The transaction category.\n"
- " \"send\" Transactions sent.\n"
- " \"receive\" Non-coinbase transactions received.\n"
- " \"generate\" Coinbase transactions received with more than 100 confirmations.\n"
- " \"immature\" Coinbase transactions received with 100 or fewer confirmations.\n"
- " \"orphan\" Orphaned coinbase transactions received.\n"
- " \"amount\" : x.xxx, (numeric) The amount in " + CURRENCY_UNIT + "\n"
- " \"label\" : \"label\", (string) A comment for the address/transaction, if any\n"
- " \"vout\" : n, (numeric) the vout value\n"
- " \"fee\": x.xxx, (numeric) The amount of the fee in " + CURRENCY_UNIT + ". This is negative and only available for the \n"
- " 'send' category of transactions.\n"
- " \"abandoned\": xxx (bool) 'true' if the transaction has been abandoned (inputs are respendable). Only available for the \n"
- " 'send' category of transactions.\n"
- " }\n"
- " ,...\n"
- " ],\n"
- " \"hex\" : \"data\" (string) Raw data for transaction\n"
- " \"decoded\" : transaction (json object) Optional, the decoded transaction (only present when `verbose` is passed), equivalent to the\n"
- " RPC decoderawtransaction method, or the RPC getrawtransaction method when `verbose` is passed.\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "", Cat(Cat<std::vector<RPCResult>>(
+ {
+ {RPCResult::Type::STR_AMOUNT, "amount", "The amount in " + CURRENCY_UNIT},
+ {RPCResult::Type::STR_AMOUNT, "fee", "The amount of the fee in " + CURRENCY_UNIT + ". This is negative and only available for the\n"
+ "'send' category of transactions."},
+ },
+ TransactionDescriptionString()),
+ {
+ {RPCResult::Type::ARR, "details", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::BOOL, "involvesWatchonly", "Only returns true if imported addresses were involved in transaction."},
+ {RPCResult::Type::STR, "address", "The bitcoin address involved in the transaction."},
+ {RPCResult::Type::STR, "category", "The transaction category.\n"
+ "\"send\" Transactions sent.\n"
+ "\"receive\" Non-coinbase transactions received.\n"
+ "\"generate\" Coinbase transactions received with more than 100 confirmations.\n"
+ "\"immature\" Coinbase transactions received with 100 or fewer confirmations.\n"
+ "\"orphan\" Orphaned coinbase transactions received."},
+ {RPCResult::Type::STR_AMOUNT, "amount", "The amount in " + CURRENCY_UNIT},
+ {RPCResult::Type::STR, "label", "A comment for the address/transaction, if any"},
+ {RPCResult::Type::NUM, "vout", "the vout value"},
+ {RPCResult::Type::STR_AMOUNT, "fee", "The amount of the fee in " + CURRENCY_UNIT + ". This is negative and only available for the \n"
+ "'send' category of transactions."},
+ {RPCResult::Type::BOOL, "abandoned", "'true' if the transaction has been abandoned (inputs are respendable). Only available for the \n"
+ "'send' category of transactions."},
+ }},
+ }},
+ {RPCResult::Type::STR_HEX, "hex", "Raw data for transaction"},
+ {RPCResult::Type::OBJ, "decoded", "Optional, the decoded transaction (only present when `verbose` is passed)",
+ {
+ {RPCResult::Type::ELISION, "", "Equivalent to the RPC decoderawtransaction method, or the RPC getrawtransaction method when `verbose` is passed."},
+ }},
+ })
},
RPCExamples{
HelpExampleCli("gettransaction", "\"1075db55d416d3ca199f55b6084e2115b9345e16c5cf302fc80e9d5fbf5d48d\"")
@@ -1801,7 +1806,7 @@ static UniValue abandontransaction(const JSONRPCRequest& request)
static UniValue backupwallet(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -2151,7 +2156,7 @@ static UniValue lockunspent(const JSONRPCRequest& request)
},
},
RPCResult{
- "true|false (boolean) Whether the command was successful or not\n"
+ RPCResult::Type::BOOL, "", "Whether the command was successful or not"
},
RPCExamples{
"\nList the unspent transactions\n"
@@ -2250,7 +2255,7 @@ static UniValue lockunspent(const JSONRPCRequest& request)
static UniValue listlockunspent(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -2261,13 +2266,14 @@ static UniValue listlockunspent(const JSONRPCRequest& request)
"See the lockunspent call to lock and unlock transactions for spending.\n",
{},
RPCResult{
- "[\n"
- " {\n"
- " \"txid\" : \"transactionid\", (string) The transaction id locked\n"
- " \"vout\" : n (numeric) The vout value\n"
- " }\n"
- " ,...\n"
- "]\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "txid", "The transaction id locked"},
+ {RPCResult::Type::NUM, "vout", "The vout value"},
+ }},
+ }
},
RPCExamples{
"\nList the unspent transactions\n"
@@ -2317,7 +2323,7 @@ static UniValue settxfee(const JSONRPCRequest& request)
{"amount", RPCArg::Type::AMOUNT, RPCArg::Optional::NO, "The transaction fee in " + CURRENCY_UNIT + "/kB"},
},
RPCResult{
- "true|false (boolean) Returns true if successful\n"
+ RPCResult::Type::BOOL, "", "Returns true if successful"
},
RPCExamples{
HelpExampleCli("settxfee", "0.00001")
@@ -2355,19 +2361,23 @@ static UniValue getbalances(const JSONRPCRequest& request)
"Returns an object with all balances in " + CURRENCY_UNIT + ".\n",
{},
RPCResult{
- "{\n"
- " \"mine\": { (object) balances from outputs that the wallet can sign\n"
- " \"trusted\": xxx (numeric) trusted balance (outputs created by the wallet or confirmed outputs)\n"
- " \"untrusted_pending\": xxx (numeric) untrusted pending balance (outputs created by others that are in the mempool)\n"
- " \"immature\": xxx (numeric) balance from immature coinbase outputs\n"
- " \"used\": xxx (numeric) (only present if avoid_reuse is set) balance from coins sent to addresses that were previously spent from (potentially privacy violating)\n"
- " },\n"
- " \"watchonly\": { (object) watchonly balances (not present if wallet does not watch anything)\n"
- " \"trusted\": xxx (numeric) trusted balance (outputs created by the wallet or confirmed outputs)\n"
- " \"untrusted_pending\": xxx (numeric) untrusted pending balance (outputs created by others that are in the mempool)\n"
- " \"immature\": xxx (numeric) balance from immature coinbase outputs\n"
- " },\n"
- "}\n"},
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::OBJ, "mine", "balances from outputs that the wallet can sign",
+ {
+ {RPCResult::Type::STR_AMOUNT, "trusted", "trusted balance (outputs created by the wallet or confirmed outputs)"},
+ {RPCResult::Type::STR_AMOUNT, "untrusted_pending", "untrusted pending balance (outputs created by others that are in the mempool)"},
+ {RPCResult::Type::STR_AMOUNT, "immature", "balance from immature coinbase outputs"},
+ {RPCResult::Type::STR_AMOUNT, "used", "(only present if avoid_reuse is set) balance from coins sent to addresses that were previously spent from (potentially privacy violating)"},
+ }},
+ {RPCResult::Type::OBJ, "watchonly", "watchonly balances (not present if wallet does not watch anything)",
+ {
+ {RPCResult::Type::STR_AMOUNT, "trusted", "trusted balance (outputs created by the wallet or confirmed outputs)"},
+ {RPCResult::Type::STR_AMOUNT, "untrusted_pending", "untrusted pending balance (outputs created by others that are in the mempool)"},
+ {RPCResult::Type::STR_AMOUNT, "immature", "balance from immature coinbase outputs"},
+ }},
+ }
+ },
RPCExamples{
HelpExampleCli("getbalances", "") +
HelpExampleRpc("getbalances", "")},
@@ -2411,7 +2421,7 @@ static UniValue getbalances(const JSONRPCRequest& request)
static UniValue getwalletinfo(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -2421,27 +2431,29 @@ static UniValue getwalletinfo(const JSONRPCRequest& request)
"Returns an object containing various wallet state info.\n",
{},
RPCResult{
- "{\n"
- " \"walletname\": xxxxx, (string) the wallet name\n"
- " \"walletversion\": xxxxx, (numeric) the wallet version\n"
- " \"balance\": xxxxxxx, (numeric) DEPRECATED. Identical to getbalances().mine.trusted\n"
- " \"unconfirmed_balance\": xxx, (numeric) DEPRECATED. Identical to getbalances().mine.untrusted_pending\n"
- " \"immature_balance\": xxxxxx, (numeric) DEPRECATED. Identical to getbalances().mine.immature\n"
- " \"txcount\": xxxxxxx, (numeric) the total number of transactions in the wallet\n"
- " \"keypoololdest\": xxxxxx, (numeric) the " + UNIX_EPOCH_TIME + " of the oldest pre-generated key in the key pool\n"
- " \"keypoolsize\": xxxx, (numeric) how many new keys are pre-generated (only counts external keys)\n"
- " \"keypoolsize_hd_internal\": xxxx, (numeric) how many new keys are pre-generated for internal use (used for change outputs, only appears if the wallet is using this feature, otherwise external keys are used)\n"
- " \"unlocked_until\": ttt, (numeric) the " + UNIX_EPOCH_TIME + " until which the wallet is unlocked for transfers, or 0 if the wallet is locked\n"
- " \"paytxfee\": x.xxxx, (numeric) the transaction fee configuration, set in " + CURRENCY_UNIT + "/kB\n"
- " \"hdseedid\": \"<hash160>\" (string, optional) the Hash160 of the HD seed (only present when HD is enabled)\n"
- " \"private_keys_enabled\": true|false (boolean) false if privatekeys are disabled for this wallet (enforced watch-only wallet)\n"
- " \"avoid_reuse\": true|false (boolean) whether this wallet tracks clean/dirty coins in terms of reuse\n"
- " \"scanning\": (json object) current scanning details, or false if no scan is in progress\n"
- " {\n"
- " \"duration\" : xxxx (numeric) elapsed seconds since scan start\n"
- " \"progress\" : x.xxxx, (numeric) scanning progress percentage [0.0, 1.0]\n"
- " }\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {
+ {RPCResult::Type::STR, "walletname", "the wallet name"},
+ {RPCResult::Type::NUM, "walletversion", "the wallet version"},
+ {RPCResult::Type::STR_AMOUNT, "balance", "DEPRECATED. Identical to getbalances().mine.trusted"},
+ {RPCResult::Type::STR_AMOUNT, "unconfirmed_balance", "DEPRECATED. Identical to getbalances().mine.untrusted_pending"},
+ {RPCResult::Type::STR_AMOUNT, "immature_balance", "DEPRECATED. Identical to getbalances().mine.immature"},
+ {RPCResult::Type::NUM, "txcount", "the total number of transactions in the wallet"},
+ {RPCResult::Type::NUM_TIME, "keypoololdest", "the " + UNIX_EPOCH_TIME + " of the oldest pre-generated key in the key pool"},
+ {RPCResult::Type::NUM, "keypoolsize", "how many new keys are pre-generated (only counts external keys)"},
+ {RPCResult::Type::NUM, "keypoolsize_hd_internal", "how many new keys are pre-generated for internal use (used for change outputs, only appears if the wallet is using this feature, otherwise external keys are used)"},
+ {RPCResult::Type::NUM_TIME, "unlocked_until", "the " + UNIX_EPOCH_TIME + " until which the wallet is unlocked for transfers, or 0 if the wallet is locked"},
+ {RPCResult::Type::STR_AMOUNT, "paytxfee", "the transaction fee configuration, set in " + CURRENCY_UNIT + "/kB"},
+ {RPCResult::Type::STR_HEX, "hdseedid", /* optional */ true, "the Hash160 of the HD seed (only present when HD is enabled)"},
+ {RPCResult::Type::BOOL, "private_keys_enabled", "false if privatekeys are disabled for this wallet (enforced watch-only wallet)"},
+ {RPCResult::Type::BOOL, "avoid_reuse", "whether this wallet tracks clean/dirty coins in terms of reuse"},
+ {RPCResult::Type::OBJ, "scanning", "current scanning details, or false if no scan is in progress",
+ {
+ {RPCResult::Type::NUM, "duration", "elapsed seconds since scan start"},
+ {RPCResult::Type::NUM, "progress", "scanning progress percentage [0.0, 1.0]"},
+ }},
+ }},
},
RPCExamples{
HelpExampleCli("getwalletinfo", "")
@@ -2503,14 +2515,16 @@ static UniValue listwalletdir(const JSONRPCRequest& request)
"Returns a list of wallets in the wallet directory.\n",
{},
RPCResult{
- "{\n"
- " \"wallets\" : [ (json array of objects)\n"
- " {\n"
- " \"name\" : \"name\" (string) The wallet name\n"
- " }\n"
- " ,...\n"
- " ]\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::ARR, "wallets", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "name", "The wallet name"},
+ }},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("listwalletdir", "")
@@ -2537,10 +2551,10 @@ static UniValue listwallets(const JSONRPCRequest& request)
"For full information on the wallet, use \"getwalletinfo\"\n",
{},
RPCResult{
- "[ (json array of strings)\n"
- " \"walletname\" (string) the wallet name\n"
- " ...\n"
- "]\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::STR, "walletname", "the wallet name"},
+ }
},
RPCExamples{
HelpExampleCli("listwallets", "")
@@ -2573,10 +2587,11 @@ static UniValue loadwallet(const JSONRPCRequest& request)
{"filename", RPCArg::Type::STR, RPCArg::Optional::NO, "The wallet directory or .dat file."},
},
RPCResult{
- "{\n"
- " \"name\" : <wallet_name>, (string) The wallet name if loaded successfully.\n"
- " \"warning\" : <warning>, (string) Warning message if wallet was not loaded cleanly.\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "name", "The wallet name if loaded successfully."},
+ {RPCResult::Type::STR, "warning", "Warning message if wallet was not loaded cleanly."},
+ }
},
RPCExamples{
HelpExampleCli("loadwallet", "\"test.dat\"")
@@ -2628,11 +2643,12 @@ static UniValue setwalletflag(const JSONRPCRequest& request)
{"value", RPCArg::Type::BOOL, /* default */ "true", "The new state."},
},
RPCResult{
- "{\n"
- " \"flag_name\": string (string) The name of the flag that was modified\n"
- " \"flag_state\": bool (bool) The new state of the flag\n"
- " \"warnings\": string (string) Any warnings associated with the change\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "flag_name", "The name of the flag that was modified"},
+ {RPCResult::Type::BOOL, "flag_state", "The new state of the flag"},
+ {RPCResult::Type::STR, "warnings", "Any warnings associated with the change"},
+ }
},
RPCExamples{
HelpExampleCli("setwalletflag", "avoid_reuse")
@@ -2688,10 +2704,11 @@ static UniValue createwallet(const JSONRPCRequest& request)
{"avoid_reuse", RPCArg::Type::BOOL, /* default */ "false", "Keep track of coin reuse, and treat dirty and clean coins differently with privacy considerations in mind."},
},
RPCResult{
- "{\n"
- " \"name\" : <wallet_name>, (string) The wallet name if created successfully. If the wallet was created using a full path, the wallet_name will be the full path.\n"
- " \"warning\" : <warning>, (string) Warning message if wallet was not loaded cleanly.\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "name", "The wallet name if created successfully. If the wallet was created using a full path, the wallet_name will be the full path."},
+ {RPCResult::Type::STR, "warning", "Warning message if wallet was not loaded cleanly."},
+ }
},
RPCExamples{
HelpExampleCli("createwallet", "\"testwallet\"")
@@ -2786,7 +2803,7 @@ static UniValue unloadwallet(const JSONRPCRequest& request)
static UniValue listunspent(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -2817,27 +2834,28 @@ static UniValue listunspent(const JSONRPCRequest& request)
"query_options"},
},
RPCResult{
- "[ (array of json object)\n"
- " {\n"
- " \"txid\" : \"txid\", (string) the transaction id \n"
- " \"vout\" : n, (numeric) the vout value\n"
- " \"address\" : \"address\", (string) the bitcoin address\n"
- " \"label\" : \"label\", (string) The associated label, or \"\" for the default label\n"
- " \"scriptPubKey\" : \"key\", (string) the script key\n"
- " \"amount\" : x.xxx, (numeric) the transaction output amount in " + CURRENCY_UNIT + "\n"
- " \"confirmations\" : n, (numeric) The number of confirmations\n"
- " \"redeemScript\" : \"script\" (string) The redeemScript if scriptPubKey is P2SH\n"
- " \"witnessScript\" : \"script\" (string) witnessScript if the scriptPubKey is P2WSH or P2SH-P2WSH\n"
- " \"spendable\" : xxx, (bool) Whether we have the private keys to spend this output\n"
- " \"solvable\" : xxx, (bool) Whether we know how to spend this output, ignoring the lack of keys\n"
- " \"reused\" : xxx, (bool) (only present if avoid_reuse is set) Whether this output is reused/dirty (sent to an address that was previously spent from)\n"
- " \"desc\" : xxx, (string, only when solvable) A descriptor for spending this output\n"
- " \"safe\" : xxx (bool) Whether this output is considered safe to spend. Unconfirmed transactions\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "txid", "the transaction id"},
+ {RPCResult::Type::NUM, "vout", "the vout value"},
+ {RPCResult::Type::STR, "address", "the bitcoin address"},
+ {RPCResult::Type::STR, "label", "The associated label, or \"\" for the default label"},
+ {RPCResult::Type::STR, "scriptPubKey", "the script key"},
+ {RPCResult::Type::STR_AMOUNT, "amount", "the transaction output amount in " + CURRENCY_UNIT},
+ {RPCResult::Type::NUM, "confirmations", "The number of confirmations"},
+ {RPCResult::Type::STR_HEX, "redeemScript", "The redeemScript if scriptPubKey is P2SH"},
+ {RPCResult::Type::STR, "witnessScript", "witnessScript if the scriptPubKey is P2WSH or P2SH-P2WSH"},
+ {RPCResult::Type::BOOL, "spendable", "Whether we have the private keys to spend this output"},
+ {RPCResult::Type::BOOL, "solvable", "Whether we know how to spend this output, ignoring the lack of keys"},
+ {RPCResult::Type::BOOL, "reused", "(only present if avoid_reuse is set) Whether this output is reused/dirty (sent to an address that was previously spent from)"},
+ {RPCResult::Type::STR, "desc", "(only when solvable) A descriptor for spending this output"},
+ {RPCResult::Type::BOOL, "safe", "Whether this output is considered safe to spend. Unconfirmed transactions"
" from outside keys and unconfirmed replacement transactions are considered unsafe\n"
- " and are not eligible for spending by fundrawtransaction and sendtoaddress.\n"
- " }\n"
- " ,...\n"
- "]\n"
+ "and are not eligible for spending by fundrawtransaction and sendtoaddress."},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("listunspent", "")
@@ -2927,7 +2945,7 @@ static UniValue listunspent(const JSONRPCRequest& request)
CTxDestination address;
const CScript& scriptPubKey = out.tx->tx->vout[out.i].scriptPubKey;
bool fValidAddress = ExtractDestination(scriptPubKey, address);
- bool reused = avoid_reuse && pwallet->IsUsedDestination(address);
+ bool reused = avoid_reuse && pwallet->IsSpentKey(out.tx->GetHash(), out.i);
if (destinations.size() && (!fValidAddress || !destinations.count(address)))
continue;
@@ -2944,7 +2962,7 @@ static UniValue listunspent(const JSONRPCRequest& request)
entry.pushKV("label", i->second.name);
}
- const SigningProvider* provider = pwallet->GetSigningProvider(scriptPubKey);
+ std::unique_ptr<SigningProvider> provider = pwallet->GetSolvingProvider(scriptPubKey);
if (provider) {
if (scriptPubKey.IsPayToScriptHash()) {
const CScriptID& hash = CScriptID(boost::get<ScriptHash>(address));
@@ -2984,7 +3002,7 @@ static UniValue listunspent(const JSONRPCRequest& request)
entry.pushKV("spendable", out.fSpendable);
entry.pushKV("solvable", out.fSolvable);
if (out.fSolvable) {
- const SigningProvider* provider = pwallet->GetSigningProvider(scriptPubKey);
+ std::unique_ptr<SigningProvider> provider = pwallet->GetSolvingProvider(scriptPubKey);
if (provider) {
auto descriptor = InferDescriptor(scriptPubKey, *provider);
entry.pushKV("desc", descriptor->ToString());
@@ -3175,11 +3193,12 @@ static UniValue fundrawtransaction(const JSONRPCRequest& request)
},
},
RPCResult{
- "{\n"
- " \"hex\": \"value\", (string) The resulting raw transaction (hex-encoded string)\n"
- " \"fee\": n, (numeric) Fee in " + CURRENCY_UNIT + " the resulting transaction pays\n"
- " \"changepos\": n (numeric) The position of the added change output, or -1\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "hex", "The resulting raw transaction (hex-encoded string)"},
+ {RPCResult::Type::STR_AMOUNT, "fee", "Fee in " + CURRENCY_UNIT + " the resulting transaction pays"},
+ {RPCResult::Type::NUM, "changepos", "The position of the added change output, or -1"},
+ }
},
RPCExamples{
"\nCreate a transaction with no inputs\n"
@@ -3218,7 +3237,7 @@ static UniValue fundrawtransaction(const JSONRPCRequest& request)
UniValue signrawtransactionwithwallet(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -3254,20 +3273,22 @@ UniValue signrawtransactionwithwallet(const JSONRPCRequest& request)
" \"SINGLE|ANYONECANPAY\""},
},
RPCResult{
- "{\n"
- " \"hex\" : \"value\", (string) The hex-encoded raw transaction with signature(s)\n"
- " \"complete\" : true|false, (boolean) If the transaction has a complete set of signatures\n"
- " \"errors\" : [ (json array of objects) Script verification errors (if there are any)\n"
- " {\n"
- " \"txid\" : \"hash\", (string) The hash of the referenced, previous transaction\n"
- " \"vout\" : n, (numeric) The index of the output to spent and used as input\n"
- " \"scriptSig\" : \"hex\", (string) The hex-encoded signature script\n"
- " \"sequence\" : n, (numeric) Script sequence number\n"
- " \"error\" : \"text\" (string) Verification or signing error related to the input\n"
- " }\n"
- " ,...\n"
- " ]\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "hex", "The hex-encoded raw transaction with signature(s)"},
+ {RPCResult::Type::BOOL, "complete", "If the transaction has a complete set of signatures"},
+ {RPCResult::Type::ARR, "errors", "Script verification errors (if there are any)",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "txid", "The hash of the referenced, previous transaction"},
+ {RPCResult::Type::NUM, "vout", "The index of the output to spent and used as input"},
+ {RPCResult::Type::STR_HEX, "scriptSig", "The hex-encoded signature script"},
+ {RPCResult::Type::NUM, "sequence", "Script sequence number"},
+ {RPCResult::Type::STR, "error", "Verification or signing error related to the input"},
+ }},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("signrawtransactionwithwallet", "\"myhex\"")
@@ -3297,23 +3318,15 @@ UniValue signrawtransactionwithwallet(const JSONRPCRequest& request)
// Parse the prevtxs array
ParsePrevouts(request.params[1], nullptr, coins);
- std::set<const SigningProvider*> providers;
- for (const std::pair<COutPoint, Coin> coin_pair : coins) {
- const SigningProvider* provider = pwallet->GetSigningProvider(coin_pair.second.out.scriptPubKey);
- if (provider) {
- providers.insert(std::move(provider));
- }
- }
- if (providers.size() == 0) {
- // When there are no available providers, use DUMMY_SIGNING_PROVIDER so we can check if the tx is complete
- providers.insert(&DUMMY_SIGNING_PROVIDER);
- }
+ int nHashType = ParseSighashString(request.params[2]);
+ // Script verification errors
+ std::map<int, std::string> input_errors;
+
+ bool complete = pwallet->SignTransaction(mtx, coins, nHashType, input_errors);
UniValue result(UniValue::VOBJ);
- for (const SigningProvider* provider : providers) {
- SignTransaction(mtx, provider, coins, request.params[2], result);
- }
- return result;
+ SignTransactionResultToJSON(mtx, complete, coins, input_errors, result);
+ return result;
}
static UniValue bumpfee(const JSONRPCRequest& request)
@@ -3364,12 +3377,16 @@ static UniValue bumpfee(const JSONRPCRequest& request)
"options"},
},
RPCResult{
- "{\n"
- " \"txid\": \"value\", (string) The id of the new transaction\n"
- " \"origfee\": n, (numeric) Fee of the replaced transaction\n"
- " \"fee\": n, (numeric) Fee of the new transaction\n"
- " \"errors\": [ str... ] (json array of strings) Errors encountered during processing (may be empty)\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "", {
+ {RPCResult::Type::STR, "psbt", "The base64-encoded unsigned PSBT of the new transaction. Only returned when wallet private keys are disabled."},
+ {RPCResult::Type::STR_HEX, "txid", "The id of the new transaction. Only returned when wallet private keys are enabled."},
+ {RPCResult::Type::STR_AMOUNT, "origfee", "The fee of the replaced transaction."},
+ {RPCResult::Type::STR_AMOUNT, "fee", "The fee of the new transaction."},
+ {RPCResult::Type::ARR, "errors", "Errors encountered during processing (may be empty).",
+ {
+ {RPCResult::Type::STR, "", ""},
+ }},
+ }
},
RPCExamples{
"\nBump the fee, get the new transaction\'s txid\n" +
@@ -3380,10 +3397,12 @@ static UniValue bumpfee(const JSONRPCRequest& request)
RPCTypeCheck(request.params, {UniValue::VSTR, UniValue::VOBJ});
uint256 hash(ParseHashV(request.params[0], "txid"));
+ CCoinControl coin_control;
+ coin_control.fAllowWatchOnly = pwallet->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS);
// optional parameters
CAmount totalFee = 0;
- CCoinControl coin_control;
coin_control.m_signal_bip125_rbf = true;
+
if (!request.params[1].isNull()) {
UniValue options = request.params[1];
RPCTypeCheckObj(options,
@@ -3468,17 +3487,32 @@ static UniValue bumpfee(const JSONRPCRequest& request)
}
}
- // sign bumped transaction
- if (!feebumper::SignTransaction(*pwallet, mtx)) {
- throw JSONRPCError(RPC_WALLET_ERROR, "Can't sign transaction.");
- }
- // commit the bumped transaction
- uint256 txid;
- if (feebumper::CommitTransaction(*pwallet, hash, std::move(mtx), errors, txid) != feebumper::Result::OK) {
- throw JSONRPCError(RPC_WALLET_ERROR, errors[0]);
- }
UniValue result(UniValue::VOBJ);
- result.pushKV("txid", txid.GetHex());
+
+ // If wallet private keys are enabled, return the new transaction id,
+ // otherwise return the base64-encoded unsigned PSBT of the new transaction.
+ if (!pwallet->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)) {
+ if (!feebumper::SignTransaction(*pwallet, mtx)) {
+ throw JSONRPCError(RPC_WALLET_ERROR, "Can't sign transaction.");
+ }
+
+ uint256 txid;
+ if (feebumper::CommitTransaction(*pwallet, hash, std::move(mtx), errors, txid) != feebumper::Result::OK) {
+ throw JSONRPCError(RPC_WALLET_ERROR, errors[0]);
+ }
+
+ result.pushKV("txid", txid.GetHex());
+ } else {
+ PartiallySignedTransaction psbtx(mtx);
+ bool complete = false;
+ const TransactionError err = pwallet->FillPSBT(psbtx, complete, SIGHASH_ALL, false /* sign */, true /* bip32derivs */);
+ CHECK_NONFATAL(err == TransactionError::OK);
+ CHECK_NONFATAL(!complete);
+ CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
+ ssTx << psbtx;
+ result.pushKV("psbt", EncodeBase64(ssTx.str()));
+ }
+
result.pushKV("origfee", ValueFromAmount(old_fee));
result.pushKV("fee", ValueFromAmount(new_fee));
UniValue result_errors(UniValue::VARR);
@@ -3507,10 +3541,11 @@ UniValue rescanblockchain(const JSONRPCRequest& request)
{"stop_height", RPCArg::Type::NUM, RPCArg::Optional::OMITTED_NAMED_ARG, "the last block height that should be scanned. If none is provided it will rescan up to the tip at return time of this call."},
},
RPCResult{
- "{\n"
- " \"start_height\" (numeric) The block height where the rescan started (the requested height or 0)\n"
- " \"stop_height\" (numeric) The height of the last rescanned block. May be null in rare cases if there was a reorg and the call didn't scan any blocks because they were already scanned in the background.\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM, "start_height", "The block height where the rescan started (the requested height or 0)"},
+ {RPCResult::Type::NUM, "stop_height", "The height of the last rescanned block. May be null in rare cases if there was a reorg and the call didn't scan any blocks because they were already scanned in the background."},
+ }
},
RPCExamples{
HelpExampleCli("rescanblockchain", "100000 120000")
@@ -3674,17 +3709,17 @@ public:
UniValue operator()(const WitnessUnknown& id) const { return UniValue(UniValue::VOBJ); }
};
-static UniValue DescribeWalletAddress(CWallet* pwallet, const CTxDestination& dest)
+static UniValue DescribeWalletAddress(const CWallet* const pwallet, const CTxDestination& dest)
{
UniValue ret(UniValue::VOBJ);
UniValue detail = DescribeAddress(dest);
CScript script = GetScriptForDestination(dest);
- const SigningProvider* provider = nullptr;
+ std::unique_ptr<SigningProvider> provider = nullptr;
if (pwallet) {
- provider = pwallet->GetSigningProvider(script);
+ provider = pwallet->GetSolvingProvider(script);
}
ret.pushKVs(detail);
- ret.pushKVs(boost::apply_visitor(DescribeWalletAddressVisitor(provider), dest));
+ ret.pushKVs(boost::apply_visitor(DescribeWalletAddressVisitor(provider.get()), dest));
return ret;
}
@@ -3702,7 +3737,7 @@ static UniValue AddressBookDataToJSON(const CAddressBookData& data, const bool v
UniValue getaddressinfo(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -3715,51 +3750,56 @@ UniValue getaddressinfo(const JSONRPCRequest& request)
{"address", RPCArg::Type::STR, RPCArg::Optional::NO, "The bitcoin address for which to get information."},
},
RPCResult{
- "{\n"
- " \"address\" : \"address\", (string) The bitcoin address validated.\n"
- " \"scriptPubKey\" : \"hex\", (string) The hex-encoded scriptPubKey generated by the address.\n"
- " \"ismine\" : true|false, (boolean) If the address is yours.\n"
- " \"iswatchonly\" : true|false, (boolean) If the address is watchonly.\n"
- " \"solvable\" : true|false, (boolean) If we know how to spend coins sent to this address, ignoring the possible lack of private keys.\n"
- " \"desc\" : \"desc\", (string, optional) A descriptor for spending coins sent to this address (only when solvable).\n"
- " \"isscript\" : true|false, (boolean) If the key is a script.\n"
- " \"ischange\" : true|false, (boolean) If the address was used for change output.\n"
- " \"iswitness\" : true|false, (boolean) If the address is a witness address.\n"
- " \"witness_version\" : version (numeric, optional) The version number of the witness program.\n"
- " \"witness_program\" : \"hex\" (string, optional) The hex value of the witness program.\n"
- " \"script\" : \"type\" (string, optional) The output script type. Only if isscript is true and the redeemscript is known. Possible\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "address", "The bitcoin address validated."},
+ {RPCResult::Type::STR_HEX, "scriptPubKey", "The hex-encoded scriptPubKey generated by the address."},
+ {RPCResult::Type::BOOL, "ismine", "If the address is yours."},
+ {RPCResult::Type::BOOL, "iswatchonly", "If the address is watchonly."},
+ {RPCResult::Type::BOOL, "solvable", "If we know how to spend coins sent to this address, ignoring the possible lack of private keys."},
+ {RPCResult::Type::STR, "desc", /* optional */ true, "A descriptor for spending coins sent to this address (only when solvable)."},
+ {RPCResult::Type::BOOL, "isscript", "If the key is a script."},
+ {RPCResult::Type::BOOL, "ischange", "If the address was used for change output."},
+ {RPCResult::Type::BOOL, "iswitness", "If the address is a witness address."},
+ {RPCResult::Type::NUM, "witness_version", /* optional */ true, "The version number of the witness program."},
+ {RPCResult::Type::STR_HEX, "witness_program", /* optional */ true, "The hex value of the witness program."},
+ {RPCResult::Type::STR, "script", /* optional */ true, "The output script type. Only if isscript is true and the redeemscript is known. Possible\n"
" types: nonstandard, pubkey, pubkeyhash, scripthash, multisig, nulldata, witness_v0_keyhash,\n"
- " witness_v0_scripthash, witness_unknown.\n"
- " \"hex\" : \"hex\", (string, optional) The redeemscript for the p2sh address.\n"
- " \"pubkeys\" (array, optional) Array of pubkeys associated with the known redeemscript (only if script is multisig).\n"
- " [\n"
- " \"pubkey\" (string)\n"
- " ,...\n"
- " ]\n"
- " \"sigsrequired\" : xxxxx (numeric, optional) The number of signatures required to spend multisig output (only if script is multisig).\n"
- " \"pubkey\" : \"publickeyhex\", (string, optional) The hex value of the raw public key for single-key addresses (possibly embedded in P2SH or P2WSH).\n"
- " \"embedded\" : {...}, (object, optional) Information about the address embedded in P2SH or P2WSH, if relevant and known. Includes all\n"
+ "witness_v0_scripthash, witness_unknown."},
+ {RPCResult::Type::STR_HEX, "hex", /* optional */ true, "The redeemscript for the p2sh address."},
+ {RPCResult::Type::ARR, "pubkeys", /* optional */ true, "Array of pubkeys associated with the known redeemscript (only if script is multisig).",
+ {
+ {RPCResult::Type::STR, "pubkey", ""},
+ }},
+ {RPCResult::Type::NUM, "sigsrequired", /* optional */ true, "The number of signatures required to spend multisig output (only if script is multisig)."},
+ {RPCResult::Type::STR_HEX, "pubkey", /* optional */ true, "The hex value of the raw public key for single-key addresses (possibly embedded in P2SH or P2WSH)."},
+ {RPCResult::Type::OBJ, "embedded", /* optional */ true, "Information about the address embedded in P2SH or P2WSH, if relevant and known.",
+ {
+ {RPCResult::Type::ELISION, "", "Includes all\n"
" getaddressinfo output fields for the embedded address, excluding metadata (timestamp, hdkeypath,\n"
- " hdseedid) and relation to the wallet (ismine, iswatchonly).\n"
- " \"iscompressed\" : true|false, (boolean, optional) If the pubkey is compressed.\n"
- " \"label\" : \"label\" (string) The label associated with the address. Defaults to \"\". Equivalent to the name field in the labels array.\n"
- " \"timestamp\" : timestamp, (number, optional) The creation time of the key, if available, expressed in " + UNIX_EPOCH_TIME + ".\n"
- " \"hdkeypath\" : \"keypath\" (string, optional) The HD keypath, if the key is HD and available.\n"
- " \"hdseedid\" : \"<hash160>\" (string, optional) The Hash160 of the HD seed.\n"
- " \"hdmasterfingerprint\" : \"<hash160>\" (string, optional) The fingerprint of the master key.\n"
- " \"labels\" (object) An array of labels associated with the address. Currently limited to one label but returned\n"
- " as an array to keep the API stable if multiple labels are enabled in the future.\n"
- " [\n"
- " { (json object of label data)\n"
- " \"name\": \"label name\" (string) The label name. Defaults to \"\". Equivalent to the label field above.\n"
- " \"purpose\": \"purpose\" (string) The purpose of the associated address (send or receive).\n"
- " },...\n"
- " ]\n"
- "}\n"
+ "hdseedid) and relation to the wallet (ismine, iswatchonly)."},
+ }},
+ {RPCResult::Type::BOOL, "iscompressed", /* optional */ true, "If the pubkey is compressed."},
+ {RPCResult::Type::STR, "label", "DEPRECATED. The label associated with the address. Defaults to \"\". Replaced by the labels array below."},
+ {RPCResult::Type::NUM_TIME, "timestamp", /* optional */ true, "The creation time of the key, if available, expressed in " + UNIX_EPOCH_TIME + "."},
+ {RPCResult::Type::STR, "hdkeypath", /* optional */ true, "The HD keypath, if the key is HD and available."},
+ {RPCResult::Type::STR_HEX, "hdseedid", /* optional */ true, "The Hash160 of the HD seed."},
+ {RPCResult::Type::STR_HEX, "hdmasterfingerprint", /* optional */ true, "The fingerprint of the master key."},
+ {RPCResult::Type::ARR, "labels", "Array of labels associated with the address. Currently limited to one label but returned\n"
+ "as an array to keep the API stable if multiple labels are enabled in the future.",
+ {
+ {RPCResult::Type::STR, "label name", "The label name. Defaults to \"\"."},
+ {RPCResult::Type::OBJ, "", "label data, DEPRECATED, will be removed in 0.21. To re-enable, launch bitcoind with `-deprecatedrpc=labelspurpose`",
+ {
+ {RPCResult::Type::STR, "name", "The label name. Defaults to \"\"."},
+ {RPCResult::Type::STR, "purpose", "The purpose of the associated address (send or receive)."},
+ }},
+ }},
+ }
},
RPCExamples{
- HelpExampleCli("getaddressinfo", "\"bc1q09vm5lfy0j5reeulh4x5752q25uqqvz34hufdl\"") +
- HelpExampleRpc("getaddressinfo", "\"bc1q09vm5lfy0j5reeulh4x5752q25uqqvz34hufdl\"")
+ HelpExampleCli("getaddressinfo", EXAMPLE_ADDRESS) +
+ HelpExampleRpc("getaddressinfo", EXAMPLE_ADDRESS)
},
}.Check(request);
@@ -3767,7 +3807,6 @@ UniValue getaddressinfo(const JSONRPCRequest& request)
UniValue ret(UniValue::VOBJ);
CTxDestination dest = DecodeDestination(request.params[0].get_str());
-
// Make sure the destination is valid
if (!IsValidDestination(dest)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid address");
@@ -3779,7 +3818,7 @@ UniValue getaddressinfo(const JSONRPCRequest& request)
CScript scriptPubKey = GetScriptForDestination(dest);
ret.pushKV("scriptPubKey", HexStr(scriptPubKey.begin(), scriptPubKey.end()));
- const SigningProvider* provider = pwallet->GetSigningProvider(scriptPubKey);
+ std::unique_ptr<SigningProvider> provider = pwallet->GetSolvingProvider(scriptPubKey);
isminetype mine = pwallet->IsMine(dest);
ret.pushKV("ismine", bool(mine & ISMINE_SPENDABLE));
@@ -3793,24 +3832,18 @@ UniValue getaddressinfo(const JSONRPCRequest& request)
ret.pushKV("iswatchonly", bool(mine & ISMINE_WATCH_ONLY));
- // Return DescribeWalletAddress fields.
- // Always returned: isscript, ischange, iswitness.
- // Optional: witness_version, witness_program, script, hex, pubkeys (array),
- // sigsrequired, pubkey, embedded, iscompressed.
UniValue detail = DescribeWalletAddress(pwallet, dest);
ret.pushKVs(detail);
- // Return label field if existing. Currently only one label can be
- // associated with an address, so the label should be equivalent to the
- // value of the name key/value pair in the labels hash array below.
- if (pwallet->mapAddressBook.count(dest)) {
- ret.pushKV("label", pwallet->mapAddressBook[dest].name);
+ // DEPRECATED: Return label field if existing. Currently only one label can
+ // be associated with an address, so the label should be equivalent to the
+ // value of the name key/value pair in the labels array below.
+ if ((pwallet->chain().rpcEnableDeprecated("label")) && (pwallet->mapAddressBook.count(dest))) {
+ ret.pushKV("label", pwallet->mapAddressBook.at(dest).name);
}
ret.pushKV("ischange", pwallet->IsChange(scriptPubKey));
- // Fetch KeyMetadata, if present, for the timestamp, hdkeypath, hdseedid,
- // and hdmasterfingerprint fields.
ScriptPubKeyMan* spk_man = pwallet->GetScriptPubKeyMan(scriptPubKey);
if (spk_man) {
if (const CKeyMetadata* meta = spk_man->GetMetadata(dest)) {
@@ -3823,15 +3856,21 @@ UniValue getaddressinfo(const JSONRPCRequest& request)
}
}
- // Return a labels array containing a hash of key/value pairs for the label
- // name and address purpose. The name value is equivalent to the label field
- // above. Currently only one label can be associated with an address, but we
- // return an array so the API remains stable if we allow multiple labels to
- // be associated with an address in the future.
+ // Return a `labels` array containing the label associated with the address,
+ // equivalent to the `label` field above. Currently only one label can be
+ // associated with an address, but we return an array so the API remains
+ // stable if we allow multiple labels to be associated with an address in
+ // the future.
UniValue labels(UniValue::VARR);
- std::map<CTxDestination, CAddressBookData>::iterator mi = pwallet->mapAddressBook.find(dest);
+ std::map<CTxDestination, CAddressBookData>::const_iterator mi = pwallet->mapAddressBook.find(dest);
if (mi != pwallet->mapAddressBook.end()) {
- labels.push_back(AddressBookDataToJSON(mi->second, true));
+ // DEPRECATED: The previous behavior of returning an array containing a
+ // JSON object of `name` and `purpose` key/value pairs is deprecated.
+ if (pwallet->chain().rpcEnableDeprecated("labelspurpose")) {
+ labels.push_back(AddressBookDataToJSON(mi->second, true));
+ } else {
+ labels.push_back(mi->second.name);
+ }
}
ret.pushKV("labels", std::move(labels));
@@ -3841,7 +3880,7 @@ UniValue getaddressinfo(const JSONRPCRequest& request)
static UniValue getaddressesbylabel(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -3853,11 +3892,13 @@ static UniValue getaddressesbylabel(const JSONRPCRequest& request)
{"label", RPCArg::Type::STR, RPCArg::Optional::NO, "The label."},
},
RPCResult{
- "{ (json object with addresses as keys)\n"
- " \"address\": { (json object with information about address)\n"
- " \"purpose\": \"string\" (string) Purpose of address (\"send\" for sending address, \"receive\" for receiving address)\n"
- " },...\n"
- "}\n"
+ RPCResult::Type::OBJ_DYN, "", "json object with addresses as keys",
+ {
+ {RPCResult::Type::OBJ, "address", "json object with information about address",
+ {
+ {RPCResult::Type::STR, "purpose", "Purpose of address (\"send\" for sending address, \"receive\" for receiving address)"},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("getaddressesbylabel", "\"tabby\"")
@@ -3898,7 +3939,7 @@ static UniValue getaddressesbylabel(const JSONRPCRequest& request)
static UniValue listlabels(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -3910,10 +3951,10 @@ static UniValue listlabels(const JSONRPCRequest& request)
{"purpose", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "Address purpose to list labels for ('send','receive'). An empty string is the same as not providing this argument."},
},
RPCResult{
- "[ (json array of string)\n"
- " \"label\", (string) Label name\n"
- " ...\n"
- "]\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::STR, "label", "Label name"},
+ }
},
RPCExamples{
"\nList all labels\n"
@@ -3981,7 +4022,7 @@ UniValue sethdseed(const JSONRPCRequest& request)
},
}.Check(request);
- LegacyScriptPubKeyMan& spk_man = EnsureLegacyScriptPubKeyMan(*pwallet);
+ LegacyScriptPubKeyMan& spk_man = EnsureLegacyScriptPubKeyMan(*pwallet, true);
if (pwallet->chain().isInitialBlockDownload()) {
throw JSONRPCError(RPC_CLIENT_IN_INITIAL_DOWNLOAD, "Cannot set a new HD seed while still in Initial Block Download");
@@ -3992,7 +4033,7 @@ UniValue sethdseed(const JSONRPCRequest& request)
}
auto locked_chain = pwallet->chain().lock();
- LOCK(pwallet->cs_wallet);
+ LOCK2(pwallet->cs_wallet, spk_man.cs_KeyStore);
// Do not do anything to non-HD wallets
if (!pwallet->CanSupportFeature(FEATURE_HD)) {
@@ -4031,7 +4072,7 @@ UniValue sethdseed(const JSONRPCRequest& request)
UniValue walletprocesspsbt(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -4051,14 +4092,14 @@ UniValue walletprocesspsbt(const JSONRPCRequest& request)
" \"ALL|ANYONECANPAY\"\n"
" \"NONE|ANYONECANPAY\"\n"
" \"SINGLE|ANYONECANPAY\""},
- {"bip32derivs", RPCArg::Type::BOOL, /* default */ "false", "If true, includes the BIP 32 derivation paths for public keys if we know them"},
+ {"bip32derivs", RPCArg::Type::BOOL, /* default */ "true", "Include BIP 32 derivation paths for public keys if we know them"},
},
RPCResult{
- "{\n"
- " \"psbt\" : \"value\", (string) The base64-encoded partially signed transaction\n"
- " \"complete\" : true|false, (boolean) If the transaction has a complete set of signatures\n"
- " ]\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "psbt", "The base64-encoded partially signed transaction"},
+ {RPCResult::Type::BOOL, "complete", "If the transaction has a complete set of signatures"},
+ }
},
RPCExamples{
HelpExampleCli("walletprocesspsbt", "\"psbt\"")
@@ -4079,9 +4120,9 @@ UniValue walletprocesspsbt(const JSONRPCRequest& request)
// Fill transaction with our data and also sign
bool sign = request.params[1].isNull() ? true : request.params[1].get_bool();
- bool bip32derivs = request.params[3].isNull() ? false : request.params[3].get_bool();
+ bool bip32derivs = request.params[3].isNull() ? true : request.params[3].get_bool();
bool complete = true;
- const TransactionError err = FillPSBT(pwallet, psbtx, complete, nHashType, sign, bip32derivs);
+ const TransactionError err = pwallet->FillPSBT(psbtx, complete, nHashType, sign, bip32derivs);
if (err != TransactionError::OK) {
throw JSONRPCTransactionError(err);
}
@@ -4162,14 +4203,15 @@ UniValue walletcreatefundedpsbt(const JSONRPCRequest& request)
" \"CONSERVATIVE\""},
},
"options"},
- {"bip32derivs", RPCArg::Type::BOOL, /* default */ "false", "If true, includes the BIP 32 derivation paths for public keys if we know them"},
+ {"bip32derivs", RPCArg::Type::BOOL, /* default */ "true", "Include BIP 32 derivation paths for public keys if we know them"},
},
RPCResult{
- "{\n"
- " \"psbt\": \"value\", (string) The resulting raw transaction (base64-encoded string)\n"
- " \"fee\": n, (numeric) Fee in " + CURRENCY_UNIT + " the resulting transaction pays\n"
- " \"changepos\": n (numeric) The position of the added change output, or -1\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "psbt", "The resulting raw transaction (base64-encoded string)"},
+ {RPCResult::Type::STR_AMOUNT, "fee", "Fee in " + CURRENCY_UNIT + " the resulting transaction pays"},
+ {RPCResult::Type::NUM, "changepos", "The position of the added change output, or -1"},
+ }
},
RPCExamples{
"\nCreate a transaction with no inputs\n"
@@ -4201,9 +4243,9 @@ UniValue walletcreatefundedpsbt(const JSONRPCRequest& request)
PartiallySignedTransaction psbtx(rawTx);
// Fill transaction with out data but don't sign
- bool bip32derivs = request.params[4].isNull() ? false : request.params[4].get_bool();
+ bool bip32derivs = request.params[4].isNull() ? true : request.params[4].get_bool();
bool complete = true;
- const TransactionError err = FillPSBT(pwallet, psbtx, complete, 1, false, bip32derivs);
+ const TransactionError err = pwallet->FillPSBT(psbtx, complete, 1, false, bip32derivs);
if (err != TransactionError::OK) {
throw JSONRPCTransactionError(err);
}
diff --git a/src/wallet/rpcwallet.h b/src/wallet/rpcwallet.h
index becca455f6..2813fa2bfc 100644
--- a/src/wallet/rpcwallet.h
+++ b/src/wallet/rpcwallet.h
@@ -41,7 +41,7 @@ std::shared_ptr<CWallet> GetWalletForJSONRPCRequest(const JSONRPCRequest& reques
std::string HelpRequiringPassphrase(const CWallet*);
void EnsureWalletIsUnlocked(const CWallet*);
bool EnsureWalletIsAvailable(const CWallet*, bool avoidException);
-LegacyScriptPubKeyMan& EnsureLegacyScriptPubKeyMan(CWallet& wallet);
+LegacyScriptPubKeyMan& EnsureLegacyScriptPubKeyMan(CWallet& wallet, bool also_create = false);
UniValue getaddressinfo(const JSONRPCRequest& request);
UniValue signrawtransactionwithwallet(const JSONRPCRequest& request);
diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp
index be8a71da97..6fe1d84d64 100644
--- a/src/wallet/scriptpubkeyman.cpp
+++ b/src/wallet/scriptpubkeyman.cpp
@@ -5,14 +5,15 @@
#include <key_io.h>
#include <outputtype.h>
#include <script/descriptor.h>
+#include <script/sign.h>
#include <util/bip32.h>
#include <util/strencodings.h>
#include <util/translation.h>
#include <wallet/scriptpubkeyman.h>
-#include <wallet/wallet.h>
bool LegacyScriptPubKeyMan::GetNewDestination(const OutputType type, CTxDestination& dest, std::string& error)
{
+ LOCK(cs_KeyStore);
error.clear();
// Generate a new key that is added to wallet
@@ -70,7 +71,15 @@ bool HaveKeys(const std::vector<valtype>& pubkeys, const LegacyScriptPubKeyMan&
return true;
}
-IsMineResult IsMineInner(const LegacyScriptPubKeyMan& keystore, const CScript& scriptPubKey, IsMineSigVersion sigversion)
+//! Recursively solve script and return spendable/watchonly/invalid status.
+//!
+//! @param keystore legacy key and script store
+//! @param script script to solve
+//! @param sigversion script type (top-level / redeemscript / witnessscript)
+//! @param recurse_scripthash whether to recurse into nested p2sh and p2wsh
+//! scripts or simply treat any script that has been
+//! stored in the keystore as spendable
+IsMineResult IsMineInner(const LegacyScriptPubKeyMan& keystore, const CScript& scriptPubKey, IsMineSigVersion sigversion, bool recurse_scripthash=true)
{
IsMineResult ret = IsMineResult::NO;
@@ -129,7 +138,7 @@ IsMineResult IsMineInner(const LegacyScriptPubKeyMan& keystore, const CScript& s
CScriptID scriptID = CScriptID(uint160(vSolutions[0]));
CScript subscript;
if (keystore.GetCScript(scriptID, subscript)) {
- ret = std::max(ret, IsMineInner(keystore, subscript, IsMineSigVersion::P2SH));
+ ret = std::max(ret, recurse_scripthash ? IsMineInner(keystore, subscript, IsMineSigVersion::P2SH) : IsMineResult::SPENDABLE);
}
break;
}
@@ -147,7 +156,7 @@ IsMineResult IsMineInner(const LegacyScriptPubKeyMan& keystore, const CScript& s
CScriptID scriptID = CScriptID(hash);
CScript subscript;
if (keystore.GetCScript(scriptID, subscript)) {
- ret = std::max(ret, IsMineInner(keystore, subscript, IsMineSigVersion::WITNESS_V0));
+ ret = std::max(ret, recurse_scripthash ? IsMineInner(keystore, subscript, IsMineSigVersion::WITNESS_V0) : IsMineResult::SPENDABLE);
}
break;
}
@@ -238,7 +247,6 @@ bool LegacyScriptPubKeyMan::CheckDecryptionKey(const CKeyingMaterial& master_key
bool LegacyScriptPubKeyMan::Encrypt(const CKeyingMaterial& master_key, WalletBatch* batch)
{
- AssertLockHeld(cs_wallet);
LOCK(cs_KeyStore);
encrypted_batch = batch;
if (!mapCryptedKeys.empty()) {
@@ -269,6 +277,7 @@ bool LegacyScriptPubKeyMan::Encrypt(const CKeyingMaterial& master_key, WalletBat
bool LegacyScriptPubKeyMan::GetReservedDestination(const OutputType type, bool internal, CTxDestination& address, int64_t& index, CKeyPool& keypool)
{
+ LOCK(cs_KeyStore);
if (!CanGetAddresses(internal)) {
return false;
}
@@ -282,7 +291,7 @@ bool LegacyScriptPubKeyMan::GetReservedDestination(const OutputType type, bool i
void LegacyScriptPubKeyMan::MarkUnusedAddresses(const CScript& script)
{
- AssertLockHeld(cs_wallet);
+ LOCK(cs_KeyStore);
// extract addresses and check if they match with an unused keypool key
for (const auto& keyid : GetAffectedKeys(script, *this)) {
std::map<CKeyID, int64_t>::const_iterator mi = m_pool_key_to_index.find(keyid);
@@ -299,7 +308,7 @@ void LegacyScriptPubKeyMan::MarkUnusedAddresses(const CScript& script)
void LegacyScriptPubKeyMan::UpgradeKeyMetadata()
{
- AssertLockHeld(cs_wallet);
+ LOCK(cs_KeyStore);
if (m_storage.IsLocked() || m_storage.IsWalletFlagSet(WALLET_FLAG_KEY_ORIGIN_METADATA)) {
return;
}
@@ -350,9 +359,9 @@ bool LegacyScriptPubKeyMan::IsHDEnabled() const
return !hdChain.seed_id.IsNull();
}
-bool LegacyScriptPubKeyMan::CanGetAddresses(bool internal)
+bool LegacyScriptPubKeyMan::CanGetAddresses(bool internal) const
{
- LOCK(cs_wallet);
+ LOCK(cs_KeyStore);
// Check if the keypool has keys
bool keypool_has_keys;
if (internal && m_storage.CanSupportFeature(FEATURE_HD_SPLIT)) {
@@ -369,7 +378,7 @@ bool LegacyScriptPubKeyMan::CanGetAddresses(bool internal)
bool LegacyScriptPubKeyMan::Upgrade(int prev_version, std::string& error)
{
- AssertLockHeld(cs_wallet);
+ LOCK(cs_KeyStore);
error = "";
bool hd_upgrade = false;
bool split_upgrade = false;
@@ -383,7 +392,7 @@ bool LegacyScriptPubKeyMan::Upgrade(int prev_version, std::string& error)
hd_upgrade = true;
}
// Upgrade to HD chain split if necessary
- if (m_storage.CanSupportFeature(FEATURE_HD_SPLIT)) {
+ if (m_storage.CanSupportFeature(FEATURE_HD_SPLIT) && CHDChain::VERSION_HD_CHAIN_SPLIT) {
WalletLogPrintf("Upgrading wallet to use HD chain split\n");
m_storage.SetMinVersion(FEATURE_PRE_SPLIT_KEYPOOL);
split_upgrade = FEATURE_HD_SPLIT > prev_version;
@@ -410,7 +419,7 @@ bool LegacyScriptPubKeyMan::HavePrivateKeys() const
void LegacyScriptPubKeyMan::RewriteDB()
{
- AssertLockHeld(cs_wallet);
+ LOCK(cs_KeyStore);
setInternalKeyPool.clear();
setExternalKeyPool.clear();
m_pool_key_to_index.clear();
@@ -433,9 +442,9 @@ static int64_t GetOldestKeyTimeInPool(const std::set<int64_t>& setKeyPool, Walle
return keypool.nTime;
}
-int64_t LegacyScriptPubKeyMan::GetOldestKeyPoolTime()
+int64_t LegacyScriptPubKeyMan::GetOldestKeyPoolTime() const
{
- LOCK(cs_wallet);
+ LOCK(cs_KeyStore);
WalletBatch batch(m_storage.GetDatabase());
@@ -451,27 +460,116 @@ int64_t LegacyScriptPubKeyMan::GetOldestKeyPoolTime()
return oldestKey;
}
-size_t LegacyScriptPubKeyMan::KeypoolCountExternalKeys()
+size_t LegacyScriptPubKeyMan::KeypoolCountExternalKeys() const
{
- AssertLockHeld(cs_wallet);
+ LOCK(cs_KeyStore);
return setExternalKeyPool.size() + set_pre_split_keypool.size();
}
unsigned int LegacyScriptPubKeyMan::GetKeyPoolSize() const
{
- AssertLockHeld(cs_wallet);
+ LOCK(cs_KeyStore);
return setInternalKeyPool.size() + setExternalKeyPool.size() + set_pre_split_keypool.size();
}
int64_t LegacyScriptPubKeyMan::GetTimeFirstKey() const
{
- AssertLockHeld(cs_wallet);
+ LOCK(cs_KeyStore);
return nTimeFirstKey;
}
+std::unique_ptr<SigningProvider> LegacyScriptPubKeyMan::GetSolvingProvider(const CScript& script) const
+{
+ return MakeUnique<LegacySigningProvider>(*this);
+}
+
+bool LegacyScriptPubKeyMan::CanProvide(const CScript& script, SignatureData& sigdata)
+{
+ IsMineResult ismine = IsMineInner(*this, script, IsMineSigVersion::TOP, /* recurse_scripthash= */ false);
+ if (ismine == IsMineResult::SPENDABLE || ismine == IsMineResult::WATCH_ONLY) {
+ // If ismine, it means we recognize keys or script ids in the script, or
+ // are watching the script itself, and we can at least provide metadata
+ // or solving information, even if not able to sign fully.
+ return true;
+ } else {
+ // If, given the stuff in sigdata, we could make a valid sigature, then we can provide for this script
+ ProduceSignature(*this, DUMMY_SIGNATURE_CREATOR, script, sigdata);
+ if (!sigdata.signatures.empty()) {
+ // If we could make signatures, make sure we have a private key to actually make a signature
+ bool has_privkeys = false;
+ for (const auto& key_sig_pair : sigdata.signatures) {
+ has_privkeys |= HaveKey(key_sig_pair.first);
+ }
+ return has_privkeys;
+ }
+ return false;
+ }
+}
+
+bool LegacyScriptPubKeyMan::SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, std::string>& input_errors) const
+{
+ return ::SignTransaction(tx, this, coins, sighash, input_errors);
+}
+
+SigningResult LegacyScriptPubKeyMan::SignMessage(const std::string& message, const PKHash& pkhash, std::string& str_sig) const
+{
+ CKeyID key_id(pkhash);
+ CKey key;
+ if (!GetKey(key_id, key)) {
+ return SigningResult::PRIVATE_KEY_NOT_AVAILABLE;
+ }
+
+ if (MessageSign(key, message, str_sig)) {
+ return SigningResult::OK;
+ }
+ return SigningResult::SIGNING_FAILED;
+}
+
+TransactionError LegacyScriptPubKeyMan::FillPSBT(PartiallySignedTransaction& psbtx, int sighash_type, bool sign, bool bip32derivs) const
+{
+ for (unsigned int i = 0; i < psbtx.tx->vin.size(); ++i) {
+ const CTxIn& txin = psbtx.tx->vin[i];
+ PSBTInput& input = psbtx.inputs.at(i);
+
+ if (PSBTInputSigned(input)) {
+ continue;
+ }
+
+ // Verify input looks sane. This will check that we have at most one uxto, witness or non-witness.
+ if (!input.IsSane()) {
+ return TransactionError::INVALID_PSBT;
+ }
+
+ // Get the Sighash type
+ if (sign && input.sighash_type > 0 && input.sighash_type != sighash_type) {
+ return TransactionError::SIGHASH_MISMATCH;
+ }
+
+ // Check non_witness_utxo has specified prevout
+ if (input.non_witness_utxo) {
+ if (txin.prevout.n >= input.non_witness_utxo->vout.size()) {
+ return TransactionError::MISSING_INPUTS;
+ }
+ } else if (input.witness_utxo.IsNull()) {
+ // There's no UTXO so we can just skip this now
+ continue;
+ }
+ SignatureData sigdata;
+ input.FillSignatureData(sigdata);
+ SignPSBTInput(HidingSigningProvider(this, !sign, !bip32derivs), psbtx, i, sighash_type);
+ }
+
+ // Fill in the bip32 keypaths and redeemscripts for the outputs so that hardware wallets can identify change
+ for (unsigned int i = 0; i < psbtx.tx->vout.size(); ++i) {
+ UpdatePSBTOutput(HidingSigningProvider(this, true, !bip32derivs), psbtx, i);
+ }
+
+ return TransactionError::OK;
+}
+
const CKeyMetadata* LegacyScriptPubKeyMan::GetMetadata(const CTxDestination& dest) const
{
- AssertLockHeld(cs_wallet);
+ LOCK(cs_KeyStore);
CKeyID key_id = GetKeyForDestination(*this, dest);
if (!key_id.IsNull()) {
@@ -490,13 +588,18 @@ const CKeyMetadata* LegacyScriptPubKeyMan::GetMetadata(const CTxDestination& des
return nullptr;
}
+uint256 LegacyScriptPubKeyMan::GetID() const
+{
+ return UINT256_ONE();
+}
+
/**
* Update wallet first key creation time. This should be called whenever keys
* are added to the wallet, with the oldest key creation time.
*/
void LegacyScriptPubKeyMan::UpdateTimeFirstKey(int64_t nCreateTime)
{
- AssertLockHeld(cs_wallet);
+ AssertLockHeld(cs_KeyStore);
if (nCreateTime <= 1) {
// Cannot determine birthday information, so set the wallet birthday to
// the beginning of time.
@@ -513,13 +616,14 @@ bool LegacyScriptPubKeyMan::LoadKey(const CKey& key, const CPubKey &pubkey)
bool LegacyScriptPubKeyMan::AddKeyPubKey(const CKey& secret, const CPubKey &pubkey)
{
+ LOCK(cs_KeyStore);
WalletBatch batch(m_storage.GetDatabase());
return LegacyScriptPubKeyMan::AddKeyPubKeyWithDB(batch, secret, pubkey);
}
bool LegacyScriptPubKeyMan::AddKeyPubKeyWithDB(WalletBatch& batch, const CKey& secret, const CPubKey& pubkey)
{
- AssertLockHeld(cs_wallet);
+ AssertLockHeld(cs_KeyStore);
// Make sure we aren't adding private keys to private key disabled wallets
assert(!m_storage.IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS));
@@ -574,14 +678,14 @@ bool LegacyScriptPubKeyMan::LoadCScript(const CScript& redeemScript)
void LegacyScriptPubKeyMan::LoadKeyMetadata(const CKeyID& keyID, const CKeyMetadata& meta)
{
- AssertLockHeld(cs_wallet);
+ LOCK(cs_KeyStore);
UpdateTimeFirstKey(meta.nCreateTime);
mapKeyMetadata[keyID] = meta;
}
void LegacyScriptPubKeyMan::LoadScriptMetadata(const CScriptID& script_id, const CKeyMetadata& meta)
{
- AssertLockHeld(cs_wallet);
+ LOCK(cs_KeyStore);
UpdateTimeFirstKey(meta.nCreateTime);
m_script_metadata[script_id] = meta;
}
@@ -630,7 +734,7 @@ bool LegacyScriptPubKeyMan::AddCryptedKey(const CPubKey &vchPubKey,
if (!AddCryptedKeyInner(vchPubKey, vchCryptedSecret))
return false;
{
- LOCK(cs_wallet);
+ LOCK(cs_KeyStore);
if (encrypted_batch)
return encrypted_batch->WriteCryptedKey(vchPubKey,
vchCryptedSecret,
@@ -663,7 +767,6 @@ static bool ExtractPubKey(const CScript &dest, CPubKey& pubKeyOut)
bool LegacyScriptPubKeyMan::RemoveWatchOnly(const CScript &dest)
{
- AssertLockHeld(cs_wallet);
{
LOCK(cs_KeyStore);
setWatchOnly.erase(dest);
@@ -734,7 +837,7 @@ bool LegacyScriptPubKeyMan::AddWatchOnly(const CScript& dest, int64_t nCreateTim
void LegacyScriptPubKeyMan::SetHDChain(const CHDChain& chain, bool memonly)
{
- LOCK(cs_wallet);
+ LOCK(cs_KeyStore);
if (!memonly && !WalletBatch(m_storage.GetDatabase()).WriteHDChain(chain))
throw std::runtime_error(std::string(__func__) + ": writing chain failed");
@@ -771,7 +874,7 @@ bool LegacyScriptPubKeyMan::GetKeyOrigin(const CKeyID& keyID, KeyOriginInfo& inf
{
CKeyMetadata meta;
{
- LOCK(cs_wallet);
+ LOCK(cs_KeyStore);
auto it = mapKeyMetadata.find(keyID);
if (it != mapKeyMetadata.end()) {
meta = it->second;
@@ -821,7 +924,7 @@ CPubKey LegacyScriptPubKeyMan::GenerateNewKey(WalletBatch &batch, bool internal)
{
assert(!m_storage.IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS));
assert(!m_storage.IsWalletFlagSet(WALLET_FLAG_BLANK_WALLET));
- AssertLockHeld(cs_wallet);
+ AssertLockHeld(cs_KeyStore);
bool fCompressed = m_storage.CanSupportFeature(FEATURE_COMPRPUBKEY); // default to compressed public keys if we want 0.6.0 wallets
CKey secret;
@@ -913,7 +1016,7 @@ void LegacyScriptPubKeyMan::DeriveNewChildKey(WalletBatch &batch, CKeyMetadata&
void LegacyScriptPubKeyMan::LoadKeyPool(int64_t nIndex, const CKeyPool &keypool)
{
- AssertLockHeld(cs_wallet);
+ LOCK(cs_KeyStore);
if (keypool.m_pre_split) {
set_pre_split_keypool.insert(nIndex);
} else if (keypool.fInternal) {
@@ -932,10 +1035,10 @@ void LegacyScriptPubKeyMan::LoadKeyPool(int64_t nIndex, const CKeyPool &keypool)
mapKeyMetadata[keyid] = CKeyMetadata(keypool.nTime);
}
-bool LegacyScriptPubKeyMan::CanGenerateKeys()
+bool LegacyScriptPubKeyMan::CanGenerateKeys() const
{
// A wallet can generate keys if it has an HD seed (IsHDEnabled) or it is a non-HD wallet (pre FEATURE_HD)
- LOCK(cs_wallet);
+ LOCK(cs_KeyStore);
return IsHDEnabled() || !m_storage.CanSupportFeature(FEATURE_HD);
}
@@ -962,7 +1065,7 @@ CPubKey LegacyScriptPubKeyMan::DeriveNewSeed(const CKey& key)
metadata.hd_seed_id = seed.GetID();
{
- LOCK(cs_wallet);
+ LOCK(cs_KeyStore);
// mem store the metadata
mapKeyMetadata[seed.GetID()] = metadata;
@@ -977,7 +1080,7 @@ CPubKey LegacyScriptPubKeyMan::DeriveNewSeed(const CKey& key)
void LegacyScriptPubKeyMan::SetHDSeed(const CPubKey& seed)
{
- LOCK(cs_wallet);
+ LOCK(cs_KeyStore);
// store the keyid (hash160) together with
// the child index counter in the database
// as a hdchain object
@@ -1000,7 +1103,7 @@ bool LegacyScriptPubKeyMan::NewKeyPool()
return false;
}
{
- LOCK(cs_wallet);
+ LOCK(cs_KeyStore);
WalletBatch batch(m_storage.GetDatabase());
for (const int64_t nIndex : setInternalKeyPool) {
@@ -1034,7 +1137,7 @@ bool LegacyScriptPubKeyMan::TopUp(unsigned int kpSize)
return false;
}
{
- LOCK(cs_wallet);
+ LOCK(cs_KeyStore);
if (m_storage.IsLocked()) return false;
@@ -1076,7 +1179,7 @@ bool LegacyScriptPubKeyMan::TopUp(unsigned int kpSize)
void LegacyScriptPubKeyMan::AddKeypoolPubkeyWithDB(const CPubKey& pubkey, const bool internal, WalletBatch& batch)
{
- LOCK(cs_wallet);
+ LOCK(cs_KeyStore);
assert(m_max_keypool_index < std::numeric_limits<int64_t>::max()); // How in the hell did you use so many keys?
int64_t index = ++m_max_keypool_index;
if (!batch.WritePool(index, CKeyPool(pubkey, internal))) {
@@ -1107,7 +1210,7 @@ void LegacyScriptPubKeyMan::ReturnDestination(int64_t nIndex, bool fInternal, co
{
// Return to key pool
{
- LOCK(cs_wallet);
+ LOCK(cs_KeyStore);
if (fInternal) {
setInternalKeyPool.insert(nIndex);
} else if (!set_pre_split_keypool.empty()) {
@@ -1131,7 +1234,7 @@ bool LegacyScriptPubKeyMan::GetKeyFromPool(CPubKey& result, const OutputType typ
CKeyPool keypool;
{
- LOCK(cs_wallet);
+ LOCK(cs_KeyStore);
int64_t nIndex;
if (!ReserveKeyFromKeyPool(nIndex, keypool, internal) && !m_storage.IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)) {
if (m_storage.IsLocked()) return false;
@@ -1150,7 +1253,7 @@ bool LegacyScriptPubKeyMan::ReserveKeyFromKeyPool(int64_t& nIndex, CKeyPool& key
nIndex = -1;
keypool.vchPubKey = CPubKey();
{
- LOCK(cs_wallet);
+ LOCK(cs_KeyStore);
bool fReturningInternal = fRequestedInternal;
fReturningInternal &= (IsHDEnabled() && m_storage.CanSupportFeature(FEATURE_HD_SPLIT)) || m_storage.IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS);
@@ -1210,7 +1313,7 @@ void LegacyScriptPubKeyMan::LearnAllRelatedScripts(const CPubKey& key)
void LegacyScriptPubKeyMan::MarkReserveKeysAsUsed(int64_t keypool_id)
{
- AssertLockHeld(cs_wallet);
+ AssertLockHeld(cs_KeyStore);
bool internal = setInternalKeyPool.count(keypool_id);
if (!internal) assert(setExternalKeyPool.count(keypool_id) || set_pre_split_keypool.count(keypool_id));
std::set<int64_t> *setKeyPool = internal ? &setInternalKeyPool : (set_pre_split_keypool.empty() ? &setExternalKeyPool : &set_pre_split_keypool);
@@ -1281,7 +1384,7 @@ bool LegacyScriptPubKeyMan::AddCScriptWithDB(WalletBatch& batch, const CScript&
bool LegacyScriptPubKeyMan::AddKeyOriginWithDB(WalletBatch& batch, const CPubKey& pubkey, const KeyOriginInfo& info)
{
- LOCK(cs_wallet);
+ LOCK(cs_KeyStore);
std::copy(info.fingerprint, info.fingerprint + 4, mapKeyMetadata[pubkey.GetID()].key_origin.fingerprint);
mapKeyMetadata[pubkey.GetID()].key_origin.path = info.path;
mapKeyMetadata[pubkey.GetID()].has_key_origin = true;
@@ -1393,13 +1496,3 @@ std::set<CKeyID> LegacyScriptPubKeyMan::GetKeys() const
}
return set_address;
}
-
-// Temporary CWallet accessors and aliases.
-LegacyScriptPubKeyMan::LegacyScriptPubKeyMan(CWallet& wallet)
- : ScriptPubKeyMan(wallet),
- m_wallet(wallet),
- cs_wallet(wallet.cs_wallet) {}
-
-void LegacyScriptPubKeyMan::NotifyWatchonlyChanged(bool fHaveWatchOnly) const { return m_wallet.NotifyWatchonlyChanged(fHaveWatchOnly); }
-void LegacyScriptPubKeyMan::NotifyCanGetAddressesChanged() const { return m_wallet.NotifyCanGetAddressesChanged(); }
-template<typename... Params> void LegacyScriptPubKeyMan::WalletLogPrintf(const std::string& fmt, const Params&... parameters) const { return m_wallet.WalletLogPrintf(fmt, parameters...); }
diff --git a/src/wallet/scriptpubkeyman.h b/src/wallet/scriptpubkeyman.h
index b78494921c..8512eadf31 100644
--- a/src/wallet/scriptpubkeyman.h
+++ b/src/wallet/scriptpubkeyman.h
@@ -1,12 +1,15 @@
-// Copyright (c) 2019 The Bitcoin Core developers
+// Copyright (c) 2019-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_WALLET_SCRIPTPUBKEYMAN_H
#define BITCOIN_WALLET_SCRIPTPUBKEYMAN_H
+#include <psbt.h>
#include <script/signingprovider.h>
#include <script/standard.h>
+#include <util/error.h>
+#include <util/message.h>
#include <wallet/crypter.h>
#include <wallet/ismine.h>
#include <wallet/walletdb.h>
@@ -74,6 +77,11 @@ std::vector<CKeyID> GetAffectedKeys(const CScript& spk, const SigningProvider& p
* keys (by default 1000) ahead of the last used key and scans for the
* addresses of those keys. This avoids the risk of not seeing transactions
* involving the wallet's addresses, or of re-using the same address.
+ * In the unlikely case where none of the addresses in the `gap limit` are
+ * used on-chain, the look-ahead will not be incremented to keep
+ * a constant size and addresses beyond this range will not be detected by an
+ * old backup. For this reason, it is not recommended to decrease keypool size
+ * lower than default value.
*
* The HD-split wallet feature added a second keypool (commit: 02592f4c). There
* is an external keypool (for addresses to hand out) and an internal keypool
@@ -179,7 +187,7 @@ public:
virtual bool IsHDEnabled() const { return false; }
/* Returns true if the wallet can give out new addresses. This means it has keys in the keypool or can generate new keys */
- virtual bool CanGetAddresses(bool internal = false) { return false; }
+ virtual bool CanGetAddresses(bool internal = false) const { return false; }
/** Upgrades the wallet to the specified version */
virtual bool Upgrade(int prev_version, std::string& error) { return false; }
@@ -189,15 +197,42 @@ public:
//! The action to do when the DB needs rewrite
virtual void RewriteDB() {}
- virtual int64_t GetOldestKeyPoolTime() { return GetTime(); }
+ virtual int64_t GetOldestKeyPoolTime() const { return GetTime(); }
- virtual size_t KeypoolCountExternalKeys() { return 0; }
+ virtual size_t KeypoolCountExternalKeys() const { return 0; }
virtual unsigned int GetKeyPoolSize() const { return 0; }
virtual int64_t GetTimeFirstKey() const { return 0; }
- //! Return address metadata
virtual const CKeyMetadata* GetMetadata(const CTxDestination& dest) const { return nullptr; }
+
+ virtual std::unique_ptr<SigningProvider> GetSolvingProvider(const CScript& script) const { return nullptr; }
+
+ /** Whether this ScriptPubKeyMan can provide a SigningProvider (via GetSolvingProvider) that, combined with
+ * sigdata, can produce solving data.
+ */
+ virtual bool CanProvide(const CScript& script, SignatureData& sigdata) { return false; }
+
+ /** Creates new signatures and adds them to the transaction. Returns whether all inputs were signed */
+ virtual bool SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, std::string>& input_errors) const { return false; }
+ /** Sign a message with the given script */
+ virtual SigningResult SignMessage(const std::string& message, const PKHash& pkhash, std::string& str_sig) const { return SigningResult::SIGNING_FAILED; };
+ /** Adds script and derivation path information to a PSBT, and optionally signs it. */
+ virtual TransactionError FillPSBT(PartiallySignedTransaction& psbt, int sighash_type = 1 /* SIGHASH_ALL */, bool sign = true, bool bip32derivs = false) const { return TransactionError::INVALID_PSBT; }
+
+ virtual uint256 GetID() const { return uint256(); }
+
+ /** Prepends the wallet name in logging output to ease debugging in multi-wallet use cases */
+ template<typename... Params>
+ void WalletLogPrintf(std::string fmt, Params... parameters) const {
+ LogPrintf(("%s " + fmt).c_str(), m_storage.GetDisplayName(), parameters...);
+ };
+
+ /** Watch-only address added */
+ boost::signals2::signal<void (bool fHaveWatchOnly)> NotifyWatchonlyChanged;
+
+ /** Keypool has new keys */
+ boost::signals2::signal<void ()> NotifyCanGetAddressesChanged;
};
class LegacyScriptPubKeyMan : public ScriptPubKeyMan, public FillableSigningProvider
@@ -209,7 +244,7 @@ private:
using WatchOnlySet = std::set<CScript>;
using WatchKeyMap = std::map<CKeyID, CPubKey>;
- WalletBatch *encrypted_batch GUARDED_BY(cs_wallet) = nullptr;
+ WalletBatch *encrypted_batch GUARDED_BY(cs_KeyStore) = nullptr;
using CryptedKeyMap = std::map<CKeyID, std::pair<CPubKey, std::vector<unsigned char>>>;
@@ -217,7 +252,7 @@ private:
WatchOnlySet setWatchOnly GUARDED_BY(cs_KeyStore);
WatchKeyMap mapWatchKeys GUARDED_BY(cs_KeyStore);
- int64_t nTimeFirstKey GUARDED_BY(cs_wallet) = 0;
+ int64_t nTimeFirstKey GUARDED_BY(cs_KeyStore) = 0;
bool AddKeyPubKeyInner(const CKey& key, const CPubKey &pubkey);
bool AddCryptedKeyInner(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret);
@@ -231,14 +266,14 @@ private:
* of the other AddWatchOnly which accepts a timestamp and sets
* nTimeFirstKey more intelligently for more efficient rescans.
*/
- bool AddWatchOnly(const CScript& dest) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
- bool AddWatchOnlyWithDB(WalletBatch &batch, const CScript& dest) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ bool AddWatchOnly(const CScript& dest) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
+ bool AddWatchOnlyWithDB(WalletBatch &batch, const CScript& dest) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
bool AddWatchOnlyInMem(const CScript &dest);
//! Adds a watch-only address to the store, and saves it to disk.
- bool AddWatchOnlyWithDB(WalletBatch &batch, const CScript& dest, int64_t create_time) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ bool AddWatchOnlyWithDB(WalletBatch &batch, const CScript& dest, int64_t create_time) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
//! Adds a key to the store, and saves it to disk.
- bool AddKeyPubKeyWithDB(WalletBatch &batch,const CKey& key, const CPubKey &pubkey) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ bool AddKeyPubKeyWithDB(WalletBatch &batch,const CKey& key, const CPubKey &pubkey) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
void AddKeypoolPubkeyWithDB(const CPubKey& pubkey, const bool internal, WalletBatch& batch);
@@ -252,12 +287,12 @@ private:
CHDChain hdChain;
/* HD derive new child key (on internal or external chain) */
- void DeriveNewChildKey(WalletBatch& batch, CKeyMetadata& metadata, CKey& secret, bool internal = false) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ void DeriveNewChildKey(WalletBatch& batch, CKeyMetadata& metadata, CKey& secret, bool internal = false) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
- std::set<int64_t> setInternalKeyPool GUARDED_BY(cs_wallet);
- std::set<int64_t> setExternalKeyPool GUARDED_BY(cs_wallet);
- std::set<int64_t> set_pre_split_keypool GUARDED_BY(cs_wallet);
- int64_t m_max_keypool_index GUARDED_BY(cs_wallet) = 0;
+ std::set<int64_t> setInternalKeyPool GUARDED_BY(cs_KeyStore);
+ std::set<int64_t> setExternalKeyPool GUARDED_BY(cs_KeyStore);
+ std::set<int64_t> set_pre_split_keypool GUARDED_BY(cs_KeyStore);
+ int64_t m_max_keypool_index GUARDED_BY(cs_KeyStore) = 0;
std::map<CKeyID, int64_t> m_pool_key_to_index;
// Tracks keypool indexes to CKeyIDs of keys that have been taken out of the keypool but may be returned to it
std::map<int64_t, CKeyID> m_index_to_reserved_key;
@@ -282,6 +317,8 @@ private:
bool ReserveKeyFromKeyPool(int64_t& nIndex, CKeyPool& keypool, bool fRequestedInternal);
public:
+ using ScriptPubKeyMan::ScriptPubKeyMan;
+
bool GetNewDestination(const OutputType type, CTxDestination& dest, std::string& error) override;
isminetype IsMine(const CScript& script) const override;
@@ -297,7 +334,7 @@ public:
void MarkUnusedAddresses(const CScript& script) override;
//! Upgrade stored CKeyMetadata objects to store key origin info as KeyOriginInfo
- void UpgradeKeyMetadata() EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ void UpgradeKeyMetadata();
bool IsHDEnabled() const override;
@@ -309,38 +346,48 @@ public:
void RewriteDB() override;
- int64_t GetOldestKeyPoolTime() override;
- size_t KeypoolCountExternalKeys() override EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ int64_t GetOldestKeyPoolTime() const override;
+ size_t KeypoolCountExternalKeys() const override;
unsigned int GetKeyPoolSize() const override;
int64_t GetTimeFirstKey() const override;
const CKeyMetadata* GetMetadata(const CTxDestination& dest) const override;
- bool CanGetAddresses(bool internal = false) override;
+ bool CanGetAddresses(bool internal = false) const override;
+
+ std::unique_ptr<SigningProvider> GetSolvingProvider(const CScript& script) const override;
+
+ bool CanProvide(const CScript& script, SignatureData& sigdata) override;
+
+ bool SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, std::string>& input_errors) const override;
+ SigningResult SignMessage(const std::string& message, const PKHash& pkhash, std::string& str_sig) const override;
+ TransactionError FillPSBT(PartiallySignedTransaction& psbt, int sighash_type = 1 /* SIGHASH_ALL */, bool sign = true, bool bip32derivs = false) const override;
+
+ uint256 GetID() const override;
// Map from Key ID to key metadata.
- std::map<CKeyID, CKeyMetadata> mapKeyMetadata GUARDED_BY(cs_wallet);
+ std::map<CKeyID, CKeyMetadata> mapKeyMetadata GUARDED_BY(cs_KeyStore);
// Map from Script ID to key metadata (for watch-only keys).
- std::map<CScriptID, CKeyMetadata> m_script_metadata GUARDED_BY(cs_wallet);
+ std::map<CScriptID, CKeyMetadata> m_script_metadata GUARDED_BY(cs_KeyStore);
//! Adds a key to the store, and saves it to disk.
- bool AddKeyPubKey(const CKey& key, const CPubKey &pubkey) override EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ bool AddKeyPubKey(const CKey& key, const CPubKey &pubkey) override;
//! Adds a key to the store, without saving it to disk (used by LoadWallet)
bool LoadKey(const CKey& key, const CPubKey &pubkey);
//! Adds an encrypted key to the store, and saves it to disk.
bool AddCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret);
//! Adds an encrypted key to the store, without saving it to disk (used by LoadWallet)
bool LoadCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret);
- void UpdateTimeFirstKey(int64_t nCreateTime) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ void UpdateTimeFirstKey(int64_t nCreateTime) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
//! Adds a CScript to the store
bool LoadCScript(const CScript& redeemScript);
//! Load metadata (used by LoadWallet)
- void LoadKeyMetadata(const CKeyID& keyID, const CKeyMetadata &metadata) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
- void LoadScriptMetadata(const CScriptID& script_id, const CKeyMetadata &metadata) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ void LoadKeyMetadata(const CKeyID& keyID, const CKeyMetadata &metadata);
+ void LoadScriptMetadata(const CScriptID& script_id, const CKeyMetadata &metadata);
//! Generate a new key
- CPubKey GenerateNewKey(WalletBatch& batch, bool internal = false) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ CPubKey GenerateNewKey(WalletBatch& batch, bool internal = false) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
/* Set the HD chain model (chain child index counters) */
void SetHDChain(const CHDChain& chain, bool memonly);
@@ -353,8 +400,8 @@ public:
//! Returns whether there are any watch-only things in the wallet
bool HaveWatchOnly() const;
//! Remove a watch only script from the keystore
- bool RemoveWatchOnly(const CScript &dest) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
- bool AddWatchOnly(const CScript& dest, int64_t nCreateTime) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ bool RemoveWatchOnly(const CScript &dest);
+ bool AddWatchOnly(const CScript& dest, int64_t nCreateTime) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
//! Fetches a pubkey from mapWatchKeys if it exists there
bool GetWatchPubKey(const CKeyID &address, CPubKey &pubkey_out) const;
@@ -367,17 +414,17 @@ public:
bool GetKeyOrigin(const CKeyID& keyid, KeyOriginInfo& info) const override;
//! Load a keypool entry
- void LoadKeyPool(int64_t nIndex, const CKeyPool &keypool) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ void LoadKeyPool(int64_t nIndex, const CKeyPool &keypool);
bool NewKeyPool();
- void MarkPreSplitKeys() EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ void MarkPreSplitKeys() EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
- bool ImportScripts(const std::set<CScript> scripts, int64_t timestamp) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
- bool ImportPrivKeys(const std::map<CKeyID, CKey>& privkey_map, const int64_t timestamp) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
- bool ImportPubKeys(const std::vector<CKeyID>& ordered_pubkeys, const std::map<CKeyID, CPubKey>& pubkey_map, const std::map<CKeyID, std::pair<CPubKey, KeyOriginInfo>>& key_origins, const bool add_keypool, const bool internal, const int64_t timestamp) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
- bool ImportScriptPubKeys(const std::set<CScript>& script_pub_keys, const bool have_solving_data, const int64_t timestamp) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ bool ImportScripts(const std::set<CScript> scripts, int64_t timestamp) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
+ bool ImportPrivKeys(const std::map<CKeyID, CKey>& privkey_map, const int64_t timestamp) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
+ bool ImportPubKeys(const std::vector<CKeyID>& ordered_pubkeys, const std::map<CKeyID, CPubKey>& pubkey_map, const std::map<CKeyID, std::pair<CPubKey, KeyOriginInfo>>& key_origins, const bool add_keypool, const bool internal, const int64_t timestamp) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
+ bool ImportScriptPubKeys(const std::set<CScript>& script_pub_keys, const bool have_solving_data, const int64_t timestamp) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
/* Returns true if the wallet can generate new keys */
- bool CanGenerateKeys();
+ bool CanGenerateKeys() const;
/* Generates a new HD seed (will not be activated) */
CPubKey GenerateNewSeed();
@@ -408,19 +455,26 @@ public:
/**
* Marks all keys in the keypool up to and including reserve_key as used.
*/
- void MarkReserveKeysAsUsed(int64_t keypool_id) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ void MarkReserveKeysAsUsed(int64_t keypool_id) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
const std::map<CKeyID, int64_t>& GetAllReserveKeys() const { return m_pool_key_to_index; }
std::set<CKeyID> GetKeys() const override;
- // Temporary CWallet accessors and aliases.
- friend class CWallet;
- friend class ReserveDestination;
- LegacyScriptPubKeyMan(CWallet& wallet);
- void NotifyWatchonlyChanged(bool fHaveWatchOnly) const;
- void NotifyCanGetAddressesChanged() const;
- template<typename... Params> void WalletLogPrintf(const std::string& fmt, const Params&... parameters) const;
- CWallet& m_wallet;
- CCriticalSection& cs_wallet;
+};
+
+/** Wraps a LegacyScriptPubKeyMan so that it can be returned in a new unique_ptr. Does not provide privkeys */
+class LegacySigningProvider : public SigningProvider
+{
+private:
+ const LegacyScriptPubKeyMan& m_spk_man;
+public:
+ LegacySigningProvider(const LegacyScriptPubKeyMan& spk_man) : m_spk_man(spk_man) {}
+
+ bool GetCScript(const CScriptID &scriptid, CScript& script) const override { return m_spk_man.GetCScript(scriptid, script); }
+ bool HaveCScript(const CScriptID &scriptid) const override { return m_spk_man.HaveCScript(scriptid); }
+ bool GetPubKey(const CKeyID &address, CPubKey& pubkey) const override { return m_spk_man.GetPubKey(address, pubkey); }
+ bool GetKey(const CKeyID &address, CKey& key) const override { return false; }
+ bool HaveKey(const CKeyID &address) const override { return false; }
+ bool GetKeyOrigin(const CKeyID& keyid, KeyOriginInfo& info) const override { return m_spk_man.GetKeyOrigin(keyid, info); }
};
#endif // BITCOIN_WALLET_SCRIPTPUBKEYMAN_H
diff --git a/src/wallet/test/coinselector_tests.cpp b/src/wallet/test/coinselector_tests.cpp
index 7e9f88f6b7..21d57cb898 100644
--- a/src/wallet/test/coinselector_tests.cpp
+++ b/src/wallet/test/coinselector_tests.cpp
@@ -78,6 +78,7 @@ static void add_coin(CWallet& wallet, const CAmount& nValue, int nAge = 6*24, bo
if (fIsFromMe)
{
wtx->m_amounts[CWalletTx::DEBIT].Set(ISMINE_SPENDABLE, 1);
+ wtx->m_is_cache_empty = false;
}
COutput output(wtx.get(), nInput, nAge, true /* spendable */, true /* solvable */, true /* safe */);
vCoins.push_back(output);
@@ -135,6 +136,7 @@ BOOST_AUTO_TEST_CASE(bnb_search_test)
{
LOCK(testWallet.cs_wallet);
+ testWallet.SetupLegacyScriptPubKeyMan();
// Setup
std::vector<CInputCoin> utxo_pool;
@@ -187,6 +189,19 @@ BOOST_AUTO_TEST_CASE(bnb_search_test)
actual_selection.clear();
selection.clear();
+ // Cost of change is greater than the difference between target value and utxo sum
+ add_coin(1 * CENT, 1, actual_selection);
+ BOOST_CHECK(SelectCoinsBnB(GroupCoins(utxo_pool), 0.9 * CENT, 0.5 * CENT, selection, value_ret, not_input_fees));
+ BOOST_CHECK_EQUAL(value_ret, 1 * CENT);
+ BOOST_CHECK(equal_sets(selection, actual_selection));
+ actual_selection.clear();
+ selection.clear();
+
+ // Cost of change is less than the difference between target value and utxo sum
+ BOOST_CHECK(!SelectCoinsBnB(GroupCoins(utxo_pool), 0.9 * CENT, 0, selection, value_ret, not_input_fees));
+ actual_selection.clear();
+ selection.clear();
+
// Select 10 Cent
add_coin(5 * CENT, 5, utxo_pool);
add_coin(4 * CENT, 4, actual_selection);
@@ -277,6 +292,7 @@ BOOST_AUTO_TEST_CASE(bnb_search_test)
std::unique_ptr<CWallet> wallet = MakeUnique<CWallet>(m_chain.get(), WalletLocation(), WalletDatabase::CreateMock());
bool firstRun;
wallet->LoadWallet(firstRun);
+ wallet->SetupLegacyScriptPubKeyMan();
LOCK(wallet->cs_wallet);
add_coin(*wallet, 5 * CENT, 6 * 24, false, 0, true);
add_coin(*wallet, 3 * CENT, 6 * 24, false, 0, true);
@@ -298,6 +314,7 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test)
bool bnb_used;
LOCK(testWallet.cs_wallet);
+ testWallet.SetupLegacyScriptPubKeyMan();
// test multiple times to allow for differences in the shuffle order
for (int i = 0; i < RUN_TESTS; i++)
@@ -577,6 +594,7 @@ BOOST_AUTO_TEST_CASE(ApproximateBestSubset)
bool bnb_used;
LOCK(testWallet.cs_wallet);
+ testWallet.SetupLegacyScriptPubKeyMan();
empty_wallet();
@@ -595,6 +613,8 @@ BOOST_AUTO_TEST_CASE(ApproximateBestSubset)
// Tests that with the ideal conditions, the coin selector will always be able to find a solution that can pay the target value
BOOST_AUTO_TEST_CASE(SelectCoins_test)
{
+ testWallet.SetupLegacyScriptPubKeyMan();
+
// Random generator stuff
std::default_random_engine generator;
std::exponential_distribution<double> distribution (100);
diff --git a/src/wallet/test/ismine_tests.cpp b/src/wallet/test/ismine_tests.cpp
index 76c3639d16..4c0e4dc653 100644
--- a/src/wallet/test/ismine_tests.cpp
+++ b/src/wallet/test/ismine_tests.cpp
@@ -36,7 +36,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2PK compressed
{
CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
- LOCK(keystore.cs_wallet);
+ keystore.SetupLegacyScriptPubKeyMan();
+ LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
scriptPubKey = GetScriptForRawPubKey(pubkeys[0]);
// Keystore does not have key
@@ -52,7 +53,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2PK uncompressed
{
CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
- LOCK(keystore.cs_wallet);
+ keystore.SetupLegacyScriptPubKeyMan();
+ LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
scriptPubKey = GetScriptForRawPubKey(uncompressedPubkey);
// Keystore does not have key
@@ -68,7 +70,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2PKH compressed
{
CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
- LOCK(keystore.cs_wallet);
+ keystore.SetupLegacyScriptPubKeyMan();
+ LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
scriptPubKey = GetScriptForDestination(PKHash(pubkeys[0]));
// Keystore does not have key
@@ -84,7 +87,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2PKH uncompressed
{
CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
- LOCK(keystore.cs_wallet);
+ keystore.SetupLegacyScriptPubKeyMan();
+ LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
scriptPubKey = GetScriptForDestination(PKHash(uncompressedPubkey));
// Keystore does not have key
@@ -100,7 +104,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2SH
{
CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
- LOCK(keystore.cs_wallet);
+ keystore.SetupLegacyScriptPubKeyMan();
+ LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
CScript redeemScript = GetScriptForDestination(PKHash(pubkeys[0]));
scriptPubKey = GetScriptForDestination(ScriptHash(redeemScript));
@@ -123,7 +128,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// (P2PKH inside) P2SH inside P2SH (invalid)
{
CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
- LOCK(keystore.cs_wallet);
+ keystore.SetupLegacyScriptPubKeyMan();
+ LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
CScript redeemscript_inner = GetScriptForDestination(PKHash(pubkeys[0]));
CScript redeemscript = GetScriptForDestination(ScriptHash(redeemscript_inner));
@@ -140,7 +146,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// (P2PKH inside) P2SH inside P2WSH (invalid)
{
CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
- LOCK(keystore.cs_wallet);
+ keystore.SetupLegacyScriptPubKeyMan();
+ LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
CScript redeemscript = GetScriptForDestination(PKHash(pubkeys[0]));
CScript witnessscript = GetScriptForDestination(ScriptHash(redeemscript));
@@ -157,7 +164,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2WPKH inside P2WSH (invalid)
{
CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
- LOCK(keystore.cs_wallet);
+ keystore.SetupLegacyScriptPubKeyMan();
+ LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
CScript witnessscript = GetScriptForDestination(WitnessV0KeyHash(PKHash(pubkeys[0])));
scriptPubKey = GetScriptForDestination(WitnessV0ScriptHash(witnessscript));
@@ -172,7 +180,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// (P2PKH inside) P2WSH inside P2WSH (invalid)
{
CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
- LOCK(keystore.cs_wallet);
+ keystore.SetupLegacyScriptPubKeyMan();
+ LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
CScript witnessscript_inner = GetScriptForDestination(PKHash(pubkeys[0]));
CScript witnessscript = GetScriptForDestination(WitnessV0ScriptHash(witnessscript_inner));
@@ -189,7 +198,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2WPKH compressed
{
CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
- LOCK(keystore.cs_wallet);
+ keystore.SetupLegacyScriptPubKeyMan();
+ LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[0]));
scriptPubKey = GetScriptForDestination(WitnessV0KeyHash(PKHash(pubkeys[0])));
@@ -203,7 +213,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2WPKH uncompressed
{
CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
- LOCK(keystore.cs_wallet);
+ keystore.SetupLegacyScriptPubKeyMan();
+ LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(uncompressedKey));
scriptPubKey = GetScriptForDestination(WitnessV0KeyHash(PKHash(uncompressedPubkey)));
@@ -221,7 +232,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// scriptPubKey multisig
{
CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
- LOCK(keystore.cs_wallet);
+ keystore.SetupLegacyScriptPubKeyMan();
+ LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
scriptPubKey = GetScriptForMultisig(2, {uncompressedPubkey, pubkeys[1]});
@@ -251,7 +263,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2SH multisig
{
CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
- LOCK(keystore.cs_wallet);
+ keystore.SetupLegacyScriptPubKeyMan();
+ LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(uncompressedKey));
BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[1]));
@@ -271,7 +284,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2WSH multisig with compressed keys
{
CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
- LOCK(keystore.cs_wallet);
+ keystore.SetupLegacyScriptPubKeyMan();
+ LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[0]));
BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[1]));
@@ -296,7 +310,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2WSH multisig with uncompressed key
{
CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
- LOCK(keystore.cs_wallet);
+ keystore.SetupLegacyScriptPubKeyMan();
+ LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(uncompressedKey));
BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[1]));
@@ -321,7 +336,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// P2WSH multisig wrapped in P2SH
{
CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
- LOCK(keystore.cs_wallet);
+ keystore.SetupLegacyScriptPubKeyMan();
+ LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
CScript witnessScript = GetScriptForMultisig(2, {pubkeys[0], pubkeys[1]});
CScript redeemScript = GetScriptForDestination(WitnessV0ScriptHash(witnessScript));
@@ -347,7 +363,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// OP_RETURN
{
CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
- LOCK(keystore.cs_wallet);
+ keystore.SetupLegacyScriptPubKeyMan();
+ LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[0]));
scriptPubKey.clear();
@@ -360,7 +377,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// witness unspendable
{
CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
- LOCK(keystore.cs_wallet);
+ keystore.SetupLegacyScriptPubKeyMan();
+ LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[0]));
scriptPubKey.clear();
@@ -373,7 +391,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// witness unknown
{
CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
- LOCK(keystore.cs_wallet);
+ keystore.SetupLegacyScriptPubKeyMan();
+ LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[0]));
scriptPubKey.clear();
@@ -386,7 +405,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
// Nonstandard
{
CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
- LOCK(keystore.cs_wallet);
+ keystore.SetupLegacyScriptPubKeyMan();
+ LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore);
BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[0]));
scriptPubKey.clear();
diff --git a/src/wallet/test/psbt_wallet_tests.cpp b/src/wallet/test/psbt_wallet_tests.cpp
index d930ca6bea..8b7b7af21d 100644
--- a/src/wallet/test/psbt_wallet_tests.cpp
+++ b/src/wallet/test/psbt_wallet_tests.cpp
@@ -5,7 +5,6 @@
#include <key_io.h>
#include <util/bip32.h>
#include <util/strencodings.h>
-#include <wallet/psbtwallet.h>
#include <wallet/wallet.h>
#include <boost/test/unit_test.hpp>
@@ -16,8 +15,8 @@ BOOST_FIXTURE_TEST_SUITE(psbt_wallet_tests, WalletTestingSetup)
BOOST_AUTO_TEST_CASE(psbt_updater_test)
{
- auto spk_man = m_wallet.GetLegacyScriptPubKeyMan();
- LOCK(m_wallet.cs_wallet);
+ auto spk_man = m_wallet.GetOrCreateLegacyScriptPubKeyMan();
+ LOCK2(m_wallet.cs_wallet, spk_man->cs_KeyStore);
// Create prevtxs and add to wallet
CDataStream s_prev_tx1(ParseHex("0200000000010158e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd7501000000171600145f275f436b09a8cc9a2eb2a2f528485c68a56323feffffff02d8231f1b0100000017a914aed962d6654f9a2b36608eb9d64d2b260db4f1118700c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e88702483045022100a22edcc6e5bc511af4cc4ae0de0fcd75c7e04d8c1c3a8aa9d820ed4b967384ec02200642963597b9b1bc22c75e9f3e117284a962188bf5e8a74c895089046a20ad770121035509a48eb623e10aace8bfd0212fdb8a8e5af3c94b0b133b95e114cab89e4f7965000000"), SER_NETWORK, PROTOCOL_VERSION);
@@ -61,13 +60,20 @@ BOOST_AUTO_TEST_CASE(psbt_updater_test)
// Fill transaction with our data
bool complete = true;
- BOOST_REQUIRE_EQUAL(TransactionError::OK, FillPSBT(&m_wallet, psbtx, complete, SIGHASH_ALL, false, true));
+ BOOST_REQUIRE_EQUAL(TransactionError::OK, m_wallet.FillPSBT(psbtx, complete, SIGHASH_ALL, false, true));
// Get the final tx
CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
ssTx << psbtx;
std::string final_hex = HexStr(ssTx.begin(), ssTx.end());
BOOST_CHECK_EQUAL(final_hex, "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000000104475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae2206029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f10d90c6a4f000000800000008000000080220602dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d710d90c6a4f0000008000000080010000800001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e88701042200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903010547522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae2206023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7310d90c6a4f000000800000008003000080220603089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc10d90c6a4f00000080000000800200008000220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000");
+
+ // Mutate the transaction so that one of the inputs is invalid
+ psbtx.tx->vin[0].prevout.n = 2;
+
+ // Try to sign the mutated input
+ SignatureData sigdata;
+ BOOST_CHECK(spk_man->FillPSBT(psbtx, SIGHASH_ALL, true, true) != TransactionError::OK);
}
BOOST_AUTO_TEST_CASE(parse_hd_keypath)
diff --git a/src/wallet/test/scriptpubkeyman_tests.cpp b/src/wallet/test/scriptpubkeyman_tests.cpp
new file mode 100644
index 0000000000..757865ea37
--- /dev/null
+++ b/src/wallet/test/scriptpubkeyman_tests.cpp
@@ -0,0 +1,43 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <key.h>
+#include <script/standard.h>
+#include <test/util/setup_common.h>
+#include <wallet/scriptpubkeyman.h>
+#include <wallet/wallet.h>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_FIXTURE_TEST_SUITE(scriptpubkeyman_tests, BasicTestingSetup)
+
+// Test LegacyScriptPubKeyMan::CanProvide behavior, making sure it returns true
+// for recognized scripts even when keys may not be available for signing.
+BOOST_AUTO_TEST_CASE(CanProvide)
+{
+ // Set up wallet and keyman variables.
+ NodeContext node;
+ std::unique_ptr<interfaces::Chain> chain = interfaces::MakeChain(node);
+ CWallet wallet(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
+ LegacyScriptPubKeyMan& keyman = *wallet.GetOrCreateLegacyScriptPubKeyMan();
+
+ // Make a 1 of 2 multisig script
+ std::vector<CKey> keys(2);
+ std::vector<CPubKey> pubkeys;
+ for (CKey& key : keys) {
+ key.MakeNewKey(true);
+ pubkeys.emplace_back(key.GetPubKey());
+ }
+ CScript multisig_script = GetScriptForMultisig(1, pubkeys);
+ CScript p2sh_script = GetScriptForDestination(ScriptHash(multisig_script));
+ SignatureData data;
+
+ // Verify the p2sh(multisig) script is not recognized until the multisig
+ // script is added to the keystore to make it solvable
+ BOOST_CHECK(!keyman.CanProvide(p2sh_script, data));
+ keyman.AddCScript(multisig_script);
+ BOOST_CHECK(keyman.CanProvide(p2sh_script, data));
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp
index 2f21b2439b..a487e9e2e0 100644
--- a/src/wallet/test/wallet_tests.cpp
+++ b/src/wallet/test/wallet_tests.cpp
@@ -28,9 +28,8 @@ BOOST_FIXTURE_TEST_SUITE(wallet_tests, WalletTestingSetup)
static void AddKey(CWallet& wallet, const CKey& key)
{
- auto spk_man = wallet.GetLegacyScriptPubKeyMan();
- LOCK(wallet.cs_wallet);
- AssertLockHeld(spk_man->cs_wallet);
+ auto spk_man = wallet.GetOrCreateLegacyScriptPubKeyMan();
+ LOCK2(wallet.cs_wallet, spk_man->cs_KeyStore);
spk_man->AddKeyPubKey(key, key.GetPubKey());
}
@@ -152,6 +151,7 @@ BOOST_FIXTURE_TEST_CASE(importmulti_rescan, TestChain100Setup)
// after.
{
std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
+ wallet->SetupLegacyScriptPubKeyMan();
AddWallet(wallet);
UniValue keys;
keys.setArray();
@@ -216,9 +216,8 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
// Import key into wallet and call dumpwallet to create backup file.
{
std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
- auto spk_man = wallet->GetLegacyScriptPubKeyMan();
- LOCK(wallet->cs_wallet);
- AssertLockHeld(spk_man->cs_wallet);
+ auto spk_man = wallet->GetOrCreateLegacyScriptPubKeyMan();
+ LOCK2(wallet->cs_wallet, spk_man->cs_KeyStore);
spk_man->mapKeyMetadata[coinbaseKey.GetPubKey().GetID()].nCreateTime = KEY_TIME;
spk_man->AddKeyPubKey(coinbaseKey, coinbaseKey.GetPubKey());
@@ -234,6 +233,7 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
// were scanned, and no prior blocks were scanned.
{
std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
+ wallet->SetupLegacyScriptPubKeyMan();
JSONRPCRequest request;
request.params.setArray();
@@ -267,13 +267,12 @@ BOOST_FIXTURE_TEST_CASE(coin_mark_dirty_immature_credit, TestChain100Setup)
auto chain = interfaces::MakeChain(node);
CWallet wallet(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
- auto spk_man = wallet.GetLegacyScriptPubKeyMan();
+ auto spk_man = wallet.GetOrCreateLegacyScriptPubKeyMan();
CWalletTx wtx(&wallet, m_coinbase_txns.back());
auto locked_chain = chain->lock();
LockAssertion lock(::cs_main);
- LOCK(wallet.cs_wallet);
- AssertLockHeld(spk_man->cs_wallet);
+ LOCK2(wallet.cs_wallet, spk_man->cs_KeyStore);
wallet.SetLastBlockProcessed(::ChainActive().Height(), ::ChainActive().Tip()->GetBlockHash());
CWalletTx::Confirmation confirm(CWalletTx::Status::CONFIRMED, ::ChainActive().Height(), ::ChainActive().Tip()->GetBlockHash(), 0);
@@ -283,7 +282,7 @@ BOOST_FIXTURE_TEST_CASE(coin_mark_dirty_immature_credit, TestChain100Setup)
// cache the current immature credit amount, which is 0.
BOOST_CHECK_EQUAL(wtx.GetImmatureCredit(), 0);
- // Invalidate the cached vanue, add the key, and make sure a new immature
+ // Invalidate the cached value, add the key, and make sure a new immature
// credit amount is calculated.
wtx.MarkDirty();
BOOST_CHECK(spk_man->AddKeyPubKey(coinbaseKey, coinbaseKey.GetPubKey()));
@@ -377,7 +376,7 @@ static void TestWatchOnlyPubKey(LegacyScriptPubKeyMan* spk_man, const CPubKey& a
CScript p2pk = GetScriptForRawPubKey(add_pubkey);
CKeyID add_address = add_pubkey.GetID();
CPubKey found_pubkey;
- LOCK(spk_man->cs_wallet);
+ LOCK(spk_man->cs_KeyStore);
// all Scripts (i.e. also all PubKeys) are added to the general watch-only set
BOOST_CHECK(!spk_man->HaveWatchOnly(p2pk));
@@ -394,7 +393,6 @@ static void TestWatchOnlyPubKey(LegacyScriptPubKeyMan* spk_man, const CPubKey& a
BOOST_CHECK(found_pubkey == CPubKey()); // passed key is unchanged
}
- AssertLockHeld(spk_man->cs_wallet);
spk_man->RemoveWatchOnly(p2pk);
BOOST_CHECK(!spk_man->HaveWatchOnly(p2pk));
@@ -419,7 +417,7 @@ BOOST_AUTO_TEST_CASE(WatchOnlyPubKeys)
{
CKey key;
CPubKey pubkey;
- LegacyScriptPubKeyMan* spk_man = m_wallet.GetLegacyScriptPubKeyMan();
+ LegacyScriptPubKeyMan* spk_man = m_wallet.GetOrCreateLegacyScriptPubKeyMan();
BOOST_CHECK(!spk_man->HaveWatchOnly());
@@ -581,6 +579,7 @@ BOOST_FIXTURE_TEST_CASE(wallet_disableprivkeys, TestChain100Setup)
NodeContext node;
auto chain = interfaces::MakeChain(node);
std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
+ wallet->SetupLegacyScriptPubKeyMan();
wallet->SetMinVersion(FEATURE_LATEST);
wallet->SetWalletFlag(WALLET_FLAG_DISABLE_PRIVATE_KEYS);
BOOST_CHECK(!wallet->TopUpKeyPool(1000));
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index 3954f66267..79e29d050f 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -13,6 +13,7 @@
#include <interfaces/wallet.h>
#include <key.h>
#include <key_io.h>
+#include <optional.h>
#include <policy/fees.h>
#include <policy/policy.h>
#include <primitives/block.h>
@@ -26,7 +27,6 @@
#include <util/moneystr.h>
#include <util/rbf.h>
#include <util/translation.h>
-#include <util/validation.h>
#include <wallet/coincontrol.h>
#include <wallet/fees.h>
@@ -45,8 +45,9 @@ const std::map<uint64_t,std::string> WALLET_FLAG_CAVEATS{
static const size_t OUTPUT_GROUP_MAX_ENTRIES = 10;
-static CCriticalSection cs_wallets;
+static RecursiveMutex cs_wallets;
static std::vector<std::shared_ptr<CWallet>> vpwallets GUARDED_BY(cs_wallets);
+static std::list<LoadWalletFn> g_load_wallet_fns GUARDED_BY(cs_wallets);
bool AddWallet(const std::shared_ptr<CWallet>& wallet)
{
@@ -55,6 +56,7 @@ bool AddWallet(const std::shared_ptr<CWallet>& wallet)
std::vector<std::shared_ptr<CWallet>>::const_iterator i = std::find(vpwallets.begin(), vpwallets.end(), wallet);
if (i != vpwallets.end()) return false;
vpwallets.push_back(wallet);
+ wallet->ConnectScriptPubKeyManNotifiers();
return true;
}
@@ -89,6 +91,13 @@ std::shared_ptr<CWallet> GetWallet(const std::string& name)
return nullptr;
}
+std::unique_ptr<interfaces::Handler> HandleLoadWallet(LoadWalletFn load_wallet)
+{
+ LOCK(cs_wallets);
+ auto it = g_load_wallet_fns.emplace(g_load_wallet_fns.end(), std::move(load_wallet));
+ return interfaces::MakeHandler([it] { LOCK(cs_wallets); g_load_wallet_fns.erase(it); });
+}
+
static Mutex g_wallet_release_mutex;
static std::condition_variable g_wallet_release_cv;
static std::set<std::string> g_unloading_wallet_set;
@@ -211,7 +220,8 @@ WalletCreationStatus CreateWallet(interfaces::Chain& chain, const SecureString&
// Set a seed for the wallet
{
- if (auto spk_man = wallet->m_spk_man.get()) {
+ LOCK(wallet->cs_wallet);
+ for (auto spk_man : wallet->GetActiveScriptPubKeyMans()) {
if (!spk_man->SetupGeneration()) {
error = "Unable to generate initial keys";
return WalletCreationStatus::CREATION_FAILED;
@@ -229,7 +239,7 @@ WalletCreationStatus CreateWallet(interfaces::Chain& chain, const SecureString&
return WalletCreationStatus::SUCCESS;
}
-const uint256 CWalletTx::ABANDON_HASH(uint256S("0000000000000000000000000000000000000000000000000000000000000001"));
+const uint256 CWalletTx::ABANDON_HASH(UINT256_ONE());
/** @defgroup mapWallet
*
@@ -256,10 +266,12 @@ void CWallet::UpgradeKeyMetadata()
return;
}
- if (m_spk_man) {
- AssertLockHeld(m_spk_man->cs_wallet);
- m_spk_man->UpgradeKeyMetadata();
+ auto spk_man = GetLegacyScriptPubKeyMan();
+ if (!spk_man) {
+ return;
}
+
+ spk_man->UpgradeKeyMetadata();
SetWalletFlag(WALLET_FLAG_KEY_ORIGIN_METADATA);
}
@@ -540,7 +552,8 @@ bool CWallet::EncryptWallet(const SecureString& strWalletPassphrase)
}
encrypted_batch->WriteMasterKey(nMasterKeyMaxID, kMasterKey);
- if (auto spk_man = m_spk_man.get()) {
+ for (const auto& spk_man_pair : m_spk_managers) {
+ auto spk_man = spk_man_pair.second.get();
if (!spk_man->Encrypt(_vMasterKey, encrypted_batch)) {
encrypted_batch->TxnAbort();
delete encrypted_batch;
@@ -569,7 +582,7 @@ bool CWallet::EncryptWallet(const SecureString& strWalletPassphrase)
Unlock(strWalletPassphrase);
// if we are using HD, replace the HD seed with a new one
- if (auto spk_man = m_spk_man.get()) {
+ if (auto spk_man = GetLegacyScriptPubKeyMan()) {
if (spk_man->IsHDEnabled()) {
if (!spk_man->SetupGeneration(true)) {
return false;
@@ -700,7 +713,7 @@ bool CWallet::MarkReplaced(const uint256& originalHash, const uint256& newHash)
return success;
}
-void CWallet::SetUsedDestinationState(WalletBatch& batch, const uint256& hash, unsigned int n, bool used)
+void CWallet::SetSpentKeyState(WalletBatch& batch, const uint256& hash, unsigned int n, bool used, std::set<CTxDestination>& tx_destinations)
{
AssertLockHeld(cs_wallet);
const CWalletTx* srctx = GetWalletTx(hash);
@@ -710,7 +723,9 @@ void CWallet::SetUsedDestinationState(WalletBatch& batch, const uint256& hash, u
if (ExtractDestination(srctx->tx->vout[n].scriptPubKey, dst)) {
if (IsMine(dst)) {
if (used && !GetDestData(dst, "used", nullptr)) {
- AddDestData(batch, dst, "used", "p"); // p for "present", opposite of absent (null)
+ if (AddDestData(batch, dst, "used", "p")) { // p for "present", opposite of absent (null)
+ tx_destinations.insert(dst);
+ }
} else if (!used && GetDestData(dst, "used", nullptr)) {
EraseDestData(batch, dst, "used");
}
@@ -718,17 +733,33 @@ void CWallet::SetUsedDestinationState(WalletBatch& batch, const uint256& hash, u
}
}
-bool CWallet::IsUsedDestination(const CTxDestination& dst) const
-{
- LOCK(cs_wallet);
- return IsMine(dst) && GetDestData(dst, "used", nullptr);
-}
-
-bool CWallet::IsUsedDestination(const uint256& hash, unsigned int n) const
+bool CWallet::IsSpentKey(const uint256& hash, unsigned int n) const
{
+ AssertLockHeld(cs_wallet);
CTxDestination dst;
const CWalletTx* srctx = GetWalletTx(hash);
- return srctx && ExtractDestination(srctx->tx->vout[n].scriptPubKey, dst) && IsUsedDestination(dst);
+ if (srctx) {
+ assert(srctx->tx->vout.size() > n);
+ LegacyScriptPubKeyMan* spk_man = GetLegacyScriptPubKeyMan();
+ // When descriptor wallets arrive, these additional checks are
+ // likely superfluous and can be optimized out
+ assert(spk_man != nullptr);
+ for (const auto& keyid : GetAffectedKeys(srctx->tx->vout[n].scriptPubKey, *spk_man)) {
+ WitnessV0KeyHash wpkh_dest(keyid);
+ if (GetDestData(wpkh_dest, "used", nullptr)) {
+ return true;
+ }
+ ScriptHash sh_wpkh_dest(GetScriptForDestination(wpkh_dest));
+ if (GetDestData(sh_wpkh_dest, "used", nullptr)) {
+ return true;
+ }
+ PKHash pkh_dest(keyid);
+ if (GetDestData(pkh_dest, "used", nullptr)) {
+ return true;
+ }
+ }
+ }
+ return false;
}
bool CWallet::AddToWallet(const CWalletTx& wtxIn, bool fFlushOnClose)
@@ -741,10 +772,14 @@ bool CWallet::AddToWallet(const CWalletTx& wtxIn, bool fFlushOnClose)
if (IsWalletFlagSet(WALLET_FLAG_AVOID_REUSE)) {
// Mark used destinations
+ std::set<CTxDestination> tx_destinations;
+
for (const CTxIn& txin : wtxIn.tx->vin) {
const COutPoint& op = txin.prevout;
- SetUsedDestinationState(batch, op.hash, op.n, true);
+ SetSpentKeyState(batch, op.hash, op.n, true, tx_destinations);
}
+
+ MarkDestinationsDirty(tx_destinations);
}
// Inserts only if not already there, returns tx inserted or tx found
@@ -811,6 +846,14 @@ bool CWallet::AddToWallet(const CWalletTx& wtxIn, bool fFlushOnClose)
if (!strCmd.empty())
{
boost::replace_all(strCmd, "%s", wtxIn.GetHash().GetHex());
+#ifndef WIN32
+ // Substituting the wallet name isn't currently supported on windows
+ // because windows shell escaping has not been implemented yet:
+ // https://github.com/bitcoin/bitcoin/pull/13339#issuecomment-537384875
+ // A few ways it could be implemented in the future are described in:
+ // https://github.com/bitcoin/bitcoin/pull/13339#issuecomment-461288094
+ boost::replace_all(strCmd, "%w", ShellEscape(GetName()));
+#endif
std::thread t(runCommand, strCmd);
t.detach(); // thread runs free
}
@@ -892,8 +935,8 @@ bool CWallet::AddToWalletIfInvolvingMe(const CTransactionRef& ptx, CWalletTx::Co
// loop though all outputs
for (const CTxOut& txout: tx.vout) {
- if (auto spk_man = m_spk_man.get()) {
- spk_man->MarkUnusedAddresses(txout.scriptPubKey);
+ for (const auto& spk_man_pair : m_spk_managers) {
+ spk_man_pair.second->MarkUnusedAddresses(txout.scriptPubKey);
}
}
@@ -1107,7 +1150,7 @@ void CWallet::UpdatedBlockTip()
}
-void CWallet::BlockUntilSyncedToCurrentChain() {
+void CWallet::BlockUntilSyncedToCurrentChain() const {
AssertLockNotHeld(cs_wallet);
// Skip the queue-draining stuff if we know we're caught up with
// ::ChainActive().Tip(), otherwise put a callback in the validation interface queue and wait
@@ -1164,8 +1207,8 @@ isminetype CWallet::IsMine(const CTxDestination& dest) const
isminetype CWallet::IsMine(const CScript& script) const
{
isminetype result = ISMINE_NO;
- if (auto spk_man = m_spk_man.get()) {
- result = spk_man->IsMine(script);
+ for (const auto& spk_man_pair : m_spk_managers) {
+ result = std::max(result, spk_man_pair.second->IsMine(script));
}
return result;
}
@@ -1284,16 +1327,18 @@ CAmount CWallet::GetChange(const CTransaction& tx) const
bool CWallet::IsHDEnabled() const
{
bool result = true;
- if (auto spk_man = m_spk_man.get()) {
- result &= spk_man->IsHDEnabled();
+ for (const auto& spk_man_pair : m_spk_managers) {
+ result &= spk_man_pair.second->IsHDEnabled();
}
return result;
}
-bool CWallet::CanGetAddresses(bool internal)
+bool CWallet::CanGetAddresses(bool internal) const
{
- {
- auto spk_man = m_spk_man.get();
+ LOCK(cs_wallet);
+ if (m_spk_managers.empty()) return false;
+ for (OutputType t : OUTPUT_TYPES) {
+ auto spk_man = GetScriptPubKeyMan(t, internal);
if (spk_man && spk_man->CanGetAddresses(internal)) {
return true;
}
@@ -1362,7 +1407,7 @@ bool CWallet::DummySignInput(CTxIn &tx_in, const CTxOut &txout, bool use_max_sig
const CScript& scriptPubKey = txout.scriptPubKey;
SignatureData sigdata;
- const SigningProvider* provider = GetSigningProvider(scriptPubKey);
+ std::unique_ptr<SigningProvider> provider = GetSolvingProvider(scriptPubKey);
if (!provider) {
// We don't know about this scriptpbuKey;
return false;
@@ -1397,7 +1442,7 @@ bool CWallet::ImportScripts(const std::set<CScript> scripts, int64_t timestamp)
if (!spk_man) {
return false;
}
- AssertLockHeld(spk_man->cs_wallet);
+ LOCK(spk_man->cs_KeyStore);
return spk_man->ImportScripts(scripts, timestamp);
}
@@ -1407,7 +1452,7 @@ bool CWallet::ImportPrivKeys(const std::map<CKeyID, CKey>& privkey_map, const in
if (!spk_man) {
return false;
}
- AssertLockHeld(spk_man->cs_wallet);
+ LOCK(spk_man->cs_KeyStore);
return spk_man->ImportPrivKeys(privkey_map, timestamp);
}
@@ -1417,7 +1462,7 @@ bool CWallet::ImportPubKeys(const std::vector<CKeyID>& ordered_pubkeys, const st
if (!spk_man) {
return false;
}
- AssertLockHeld(spk_man->cs_wallet);
+ LOCK(spk_man->cs_KeyStore);
return spk_man->ImportPubKeys(ordered_pubkeys, pubkey_map, key_origins, add_keypool, internal, timestamp);
}
@@ -1427,7 +1472,7 @@ bool CWallet::ImportScriptPubKeys(const std::string& label, const std::set<CScri
if (!spk_man) {
return false;
}
- AssertLockHeld(spk_man->cs_wallet);
+ LOCK(spk_man->cs_KeyStore);
if (!spk_man->ImportScriptPubKeys(script_pub_keys, have_solving_data, timestamp)) {
return false;
}
@@ -1765,6 +1810,7 @@ CAmount CWalletTx::GetCachableAmount(AmountType type, const isminefilter& filter
auto& amount = m_amounts[type];
if (recalculate || !amount.m_cached[filter]) {
amount.Set(filter, type == DEBIT ? pwallet->GetDebit(*tx, filter) : pwallet->GetCredit(*tx, filter));
+ m_is_cache_empty = false;
}
return amount.m_value[filter];
}
@@ -1831,7 +1877,7 @@ CAmount CWalletTx::GetAvailableCredit(bool fUseCache, const isminefilter& filter
uint256 hashTx = GetHash();
for (unsigned int i = 0; i < tx->vout.size(); i++)
{
- if (!pwallet->IsSpent(hashTx, i) && (allow_used_addresses || !pwallet->IsUsedDestination(hashTx, i))) {
+ if (!pwallet->IsSpent(hashTx, i) && (allow_used_addresses || !pwallet->IsSpentKey(hashTx, i))) {
const CTxOut &txout = tx->vout[i];
nCredit += pwallet->GetCredit(txout, filter);
if (!MoneyRange(nCredit))
@@ -1841,6 +1887,7 @@ CAmount CWalletTx::GetAvailableCredit(bool fUseCache, const isminefilter& filter
if (allow_cache) {
m_amounts[AVAILABLE_CREDIT].Set(filter, nCredit);
+ m_is_cache_empty = false;
}
return nCredit;
@@ -2120,11 +2167,11 @@ void CWallet::AvailableCoins(interfaces::Chain::Lock& locked_chain, std::vector<
continue;
}
- if (!allow_used_addresses && IsUsedDestination(wtxid, i)) {
+ if (!allow_used_addresses && IsSpentKey(wtxid, i)) {
continue;
}
- const SigningProvider* provider = GetSigningProvider(wtx.tx->vout[i].scriptPubKey);
+ std::unique_ptr<SigningProvider> provider = GetSolvingProvider(wtx.tx->vout[i].scriptPubKey);
bool solvable = provider ? IsSolvable(*provider, wtx.tx->vout[i].scriptPubKey) : false;
bool spendable = ((mine & ISMINE_SPENDABLE) != ISMINE_NO) || (((mine & ISMINE_WATCH_ONLY) != ISMINE_NO) && (coinControl && coinControl->fAllowWatchOnly && solvable));
@@ -2167,8 +2214,8 @@ std::map<CTxDestination, std::vector<COutput>> CWallet::ListCoins(interfaces::Ch
std::vector<COutPoint> lockedCoins;
ListLockedCoins(lockedCoins);
- // Include watch-only for wallets without private keys
- const bool include_watch_only = IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS);
+ // Include watch-only for LegacyScriptPubKeyMan wallets without private keys
+ const bool include_watch_only = GetLegacyScriptPubKeyMan() && IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS);
const isminetype is_mine_filter = include_watch_only ? ISMINE_WATCH_ONLY : ISMINE_SPENDABLE;
for (const COutPoint& output : lockedCoins) {
auto it = mapWallet.find(output.hash);
@@ -2363,34 +2410,172 @@ bool CWallet::SelectCoins(const std::vector<COutput>& vAvailableCoins, const CAm
return res;
}
-bool CWallet::SignTransaction(CMutableTransaction& tx)
+bool CWallet::SignTransaction(CMutableTransaction& tx) const
{
AssertLockHeld(cs_wallet);
- // sign the new tx
- int nIn = 0;
+ // Build coins map
+ std::map<COutPoint, Coin> coins;
for (auto& input : tx.vin) {
std::map<uint256, CWalletTx>::const_iterator mi = mapWallet.find(input.prevout.hash);
if(mi == mapWallet.end() || input.prevout.n >= mi->second.tx->vout.size()) {
return false;
}
- const CScript& scriptPubKey = mi->second.tx->vout[input.prevout.n].scriptPubKey;
- const CAmount& amount = mi->second.tx->vout[input.prevout.n].nValue;
+ const CWalletTx& wtx = mi->second;
+ coins[input.prevout] = Coin(wtx.tx->vout[input.prevout.n], wtx.m_confirm.block_height, wtx.IsCoinBase());
+ }
+ std::map<int, std::string> input_errors;
+ return SignTransaction(tx, coins, SIGHASH_ALL, input_errors);
+}
+
+bool CWallet::SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, std::string>& input_errors) const
+{
+ // Sign the tx with ScriptPubKeyMans
+ // Because each ScriptPubKeyMan can sign more than one input, we need to keep track of each ScriptPubKeyMan that has signed this transaction.
+ // Each iteration, we may sign more txins than the txin that is specified in that iteration.
+ // We assume that each input is signed by only one ScriptPubKeyMan.
+ std::set<uint256> visited_spk_mans;
+ for (unsigned int i = 0; i < tx.vin.size(); i++) {
+ // Get the prevout
+ CTxIn& txin = tx.vin[i];
+ auto coin = coins.find(txin.prevout);
+ if (coin == coins.end() || coin->second.IsSpent()) {
+ input_errors[i] = "Input not found or already spent";
+ continue;
+ }
+
+ // Check if this input is complete
+ SignatureData sigdata = DataFromTransaction(tx, i, coin->second.out);
+ if (sigdata.complete) {
+ continue;
+ }
+
+ // Input needs to be signed, find the right ScriptPubKeyMan
+ std::set<ScriptPubKeyMan*> spk_mans = GetScriptPubKeyMans(coin->second.out.scriptPubKey, sigdata);
+ if (spk_mans.size() == 0) {
+ input_errors[i] = "Unable to sign input, missing keys";
+ continue;
+ }
+
+ for (auto& spk_man : spk_mans) {
+ // If we've already been signed by this spk_man, skip it
+ if (visited_spk_mans.count(spk_man->GetID()) > 0) {
+ continue;
+ }
+
+ // Sign the tx.
+ // spk_man->SignTransaction will return true if the transaction is complete,
+ // so we can exit early and return true if that happens.
+ if (spk_man->SignTransaction(tx, coins, sighash, input_errors)) {
+ return true;
+ }
+
+ // Add this spk_man to visited_spk_mans so we can skip it later
+ visited_spk_mans.insert(spk_man->GetID());
+ }
+ }
+ return false;
+}
+
+TransactionError CWallet::FillPSBT(PartiallySignedTransaction& psbtx, bool& complete, int sighash_type, bool sign, bool bip32derivs) const
+{
+ LOCK(cs_wallet);
+ // Get all of the previous transactions
+ for (unsigned int i = 0; i < psbtx.tx->vin.size(); ++i) {
+ const CTxIn& txin = psbtx.tx->vin[i];
+ PSBTInput& input = psbtx.inputs.at(i);
+
+ if (PSBTInputSigned(input)) {
+ continue;
+ }
+
+ // Verify input looks sane. This will check that we have at most one uxto, witness or non-witness.
+ if (!input.IsSane()) {
+ return TransactionError::INVALID_PSBT;
+ }
+
+ // If we have no utxo, grab it from the wallet.
+ if (!input.non_witness_utxo && input.witness_utxo.IsNull()) {
+ const uint256& txhash = txin.prevout.hash;
+ const auto it = mapWallet.find(txhash);
+ if (it != mapWallet.end()) {
+ const CWalletTx& wtx = it->second;
+ // We only need the non_witness_utxo, which is a superset of the witness_utxo.
+ // The signing code will switch to the smaller witness_utxo if this is ok.
+ input.non_witness_utxo = wtx.tx;
+ }
+ }
+ }
+
+ // Fill in information from ScriptPubKeyMans
+ // Because each ScriptPubKeyMan may be able to fill more than one input, we need to keep track of each ScriptPubKeyMan that has filled this psbt.
+ // Each iteration, we may fill more inputs than the input that is specified in that iteration.
+ // We assume that each input is filled by only one ScriptPubKeyMan
+ std::set<uint256> visited_spk_mans;
+ for (unsigned int i = 0; i < psbtx.tx->vin.size(); ++i) {
+ const CTxIn& txin = psbtx.tx->vin[i];
+ PSBTInput& input = psbtx.inputs.at(i);
+
+ if (PSBTInputSigned(input)) {
+ continue;
+ }
+
+ // Get the scriptPubKey to know which ScriptPubKeyMan to use
+ CScript script;
+ if (!input.witness_utxo.IsNull()) {
+ script = input.witness_utxo.scriptPubKey;
+ } else if (input.non_witness_utxo) {
+ if (txin.prevout.n >= input.non_witness_utxo->vout.size()) {
+ return TransactionError::MISSING_INPUTS;
+ }
+ script = input.non_witness_utxo->vout[txin.prevout.n].scriptPubKey;
+ } else {
+ // There's no UTXO so we can just skip this now
+ continue;
+ }
SignatureData sigdata;
+ input.FillSignatureData(sigdata);
+ std::set<ScriptPubKeyMan*> spk_mans = GetScriptPubKeyMans(script, sigdata);
+ if (spk_mans.size() == 0) {
+ continue;
+ }
- const SigningProvider* provider = GetSigningProvider(scriptPubKey);
- if (!provider) {
- // We don't know about this scriptpbuKey;
- return false;
+ for (auto& spk_man : spk_mans) {
+ // If we've already been signed by this spk_man, skip it
+ if (visited_spk_mans.count(spk_man->GetID()) > 0) {
+ continue;
+ }
+
+ // Fill in the information from the spk_man
+ TransactionError res = spk_man->FillPSBT(psbtx, sighash_type, sign, bip32derivs);
+ if (res != TransactionError::OK) {
+ return res;
+ }
+
+ // Add this spk_man to visited_spk_mans so we can skip it later
+ visited_spk_mans.insert(spk_man->GetID());
}
+ }
- if (!ProduceSignature(*provider, MutableTransactionSignatureCreator(&tx, nIn, amount, SIGHASH_ALL), scriptPubKey, sigdata)) {
- return false;
+ // Complete if every input is now signed
+ complete = true;
+ for (const auto& input : psbtx.inputs) {
+ complete &= PSBTInputSigned(input);
+ }
+
+ return TransactionError::OK;
+}
+
+SigningResult CWallet::SignMessage(const std::string& message, const PKHash& pkhash, std::string& str_sig) const
+{
+ SignatureData sigdata;
+ CScript script_pub_key = GetScriptForDestination(pkhash);
+ for (const auto& spk_man_pair : m_spk_managers) {
+ if (spk_man_pair.second->CanProvide(script_pub_key, sigdata)) {
+ return spk_man_pair.second->SignMessage(message, pkhash, str_sig);
}
- UpdateInput(input, sigdata);
- nIn++;
}
- return true;
+ return SigningResult::PRIVATE_KEY_NOT_AVAILABLE;
}
bool CWallet::FundTransaction(CMutableTransaction& tx, CAmount& nFeeRet, int& nChangePosInOut, std::string& strFailReason, bool lockUnspents, const std::set<int>& setSubtractFeeFromOutputs, CCoinControl coinControl)
@@ -2839,25 +3024,9 @@ bool CWallet::CreateTransaction(interfaces::Chain::Lock& locked_chain, const std
txNew.vin.push_back(CTxIn(coin.outpoint, CScript(), nSequence));
}
- if (sign)
- {
- int nIn = 0;
- for (const auto& coin : selected_coins)
- {
- const CScript& scriptPubKey = coin.txout.scriptPubKey;
- SignatureData sigdata;
-
- const SigningProvider* provider = GetSigningProvider(scriptPubKey);
- if (!provider || !ProduceSignature(*provider, MutableTransactionSignatureCreator(&txNew, nIn, coin.txout.nValue, SIGHASH_ALL), scriptPubKey, sigdata))
- {
- strFailReason = _("Signing transaction failed").translated;
- return false;
- } else {
- UpdateInput(txNew.vin.at(nIn), sigdata);
- }
-
- nIn++;
- }
+ if (sign && !SignTransaction(txNew)) {
+ strFailReason = _("Signing transaction failed").translated;
+ return false;
}
// Return the constructed transaction data.
@@ -2954,17 +3123,17 @@ DBErrors CWallet::LoadWallet(bool& fFirstRunRet)
{
if (database->Rewrite("\x04pool"))
{
- if (auto spk_man = m_spk_man.get()) {
- spk_man->RewriteDB();
+ for (const auto& spk_man_pair : m_spk_managers) {
+ spk_man_pair.second->RewriteDB();
}
}
}
- {
- LOCK(cs_KeyStore);
- // This wallet is in its first run if all of these are empty
- fFirstRunRet = mapKeys.empty() && mapCryptedKeys.empty() && mapWatchKeys.empty() && setWatchOnly.empty() && mapScripts.empty()
- && !IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS) && !IsWalletFlagSet(WALLET_FLAG_BLANK_WALLET);
+ // This wallet is in its first run if there are no ScriptPubKeyMans and it isn't blank or no privkeys
+ fFirstRunRet = m_spk_managers.empty() && !IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS) && !IsWalletFlagSet(WALLET_FLAG_BLANK_WALLET);
+ if (fFirstRunRet) {
+ assert(m_external_spk_managers.empty());
+ assert(m_internal_spk_managers.empty());
}
if (nLoadWalletRet != DBErrors::LOAD_OK)
@@ -2988,8 +3157,8 @@ DBErrors CWallet::ZapSelectTx(std::vector<uint256>& vHashIn, std::vector<uint256
{
if (database->Rewrite("\x04pool"))
{
- if (auto spk_man = m_spk_man.get()) {
- spk_man->RewriteDB();
+ for (const auto& spk_man_pair : m_spk_managers) {
+ spk_man_pair.second->RewriteDB();
}
}
}
@@ -3009,8 +3178,8 @@ DBErrors CWallet::ZapWalletTx(std::vector<CWalletTx>& vWtx)
{
if (database->Rewrite("\x04pool"))
{
- if (auto spk_man = m_spk_man.get()) {
- spk_man->RewriteDB();
+ for (const auto& spk_man_pair : m_spk_managers) {
+ spk_man_pair.second->RewriteDB();
}
}
}
@@ -3065,13 +3234,12 @@ bool CWallet::DelAddressBook(const CTxDestination& address)
return WalletBatch(*database).EraseName(EncodeDestination(address));
}
-size_t CWallet::KeypoolCountExternalKeys()
+size_t CWallet::KeypoolCountExternalKeys() const
{
AssertLockHeld(cs_wallet);
unsigned int count = 0;
- if (auto spk_man = m_spk_man.get()) {
- AssertLockHeld(spk_man->cs_wallet);
+ for (auto spk_man : GetActiveScriptPubKeyMans()) {
count += spk_man->KeypoolCountExternalKeys();
}
@@ -3083,7 +3251,7 @@ unsigned int CWallet::GetKeyPoolSize() const
AssertLockHeld(cs_wallet);
unsigned int count = 0;
- if (auto spk_man = m_spk_man.get()) {
+ for (auto spk_man : GetActiveScriptPubKeyMans()) {
count += spk_man->GetKeyPoolSize();
}
return count;
@@ -3091,8 +3259,9 @@ unsigned int CWallet::GetKeyPoolSize() const
bool CWallet::TopUpKeyPool(unsigned int kpSize)
{
+ LOCK(cs_wallet);
bool res = true;
- if (auto spk_man = m_spk_man.get()) {
+ for (auto spk_man : GetActiveScriptPubKeyMans()) {
res &= spk_man->TopUp(kpSize);
}
return res;
@@ -3103,7 +3272,7 @@ bool CWallet::GetNewDestination(const OutputType type, const std::string label,
LOCK(cs_wallet);
error.clear();
bool result = false;
- auto spk_man = m_spk_man.get();
+ auto spk_man = GetScriptPubKeyMan(type, false /* internal */);
if (spk_man) {
spk_man->TopUp();
result = spk_man->GetNewDestination(type, dest, error);
@@ -3117,6 +3286,7 @@ bool CWallet::GetNewDestination(const OutputType type, const std::string label,
bool CWallet::GetNewChangeDestination(const OutputType type, CTxDestination& dest, std::string& error)
{
+ LOCK(cs_wallet);
error.clear();
ReserveDestination reservedest(this, type);
@@ -3129,16 +3299,31 @@ bool CWallet::GetNewChangeDestination(const OutputType type, CTxDestination& des
return true;
}
-int64_t CWallet::GetOldestKeyPoolTime()
+int64_t CWallet::GetOldestKeyPoolTime() const
{
+ LOCK(cs_wallet);
int64_t oldestKey = std::numeric_limits<int64_t>::max();
- if (auto spk_man = m_spk_man.get()) {
- oldestKey = spk_man->GetOldestKeyPoolTime();
+ for (const auto& spk_man_pair : m_spk_managers) {
+ oldestKey = std::min(oldestKey, spk_man_pair.second->GetOldestKeyPoolTime());
}
return oldestKey;
}
-std::map<CTxDestination, CAmount> CWallet::GetAddressBalances(interfaces::Chain::Lock& locked_chain)
+void CWallet::MarkDestinationsDirty(const std::set<CTxDestination>& destinations) {
+ for (auto& entry : mapWallet) {
+ CWalletTx& wtx = entry.second;
+ if (wtx.m_is_cache_empty) continue;
+ for (unsigned int i = 0; i < wtx.tx->vout.size(); i++) {
+ CTxDestination dst;
+ if (ExtractDestination(wtx.tx->vout[i].scriptPubKey, dst) && destinations.count(dst)) {
+ wtx.MarkDirty();
+ break;
+ }
+ }
+ }
+}
+
+std::map<CTxDestination, CAmount> CWallet::GetAddressBalances(interfaces::Chain::Lock& locked_chain) const
{
std::map<CTxDestination, CAmount> balances;
@@ -3179,7 +3364,7 @@ std::map<CTxDestination, CAmount> CWallet::GetAddressBalances(interfaces::Chain:
return balances;
}
-std::set< std::set<CTxDestination> > CWallet::GetAddressGroupings()
+std::set< std::set<CTxDestination> > CWallet::GetAddressGroupings() const
{
AssertLockHeld(cs_wallet);
std::set< std::set<CTxDestination> > groupings;
@@ -3288,7 +3473,7 @@ std::set<CTxDestination> CWallet::GetLabelAddresses(const std::string& label) co
bool ReserveDestination::GetReservedDestination(CTxDestination& dest, bool internal)
{
- m_spk_man = pwallet->GetLegacyScriptPubKeyMan();
+ m_spk_man = pwallet->GetScriptPubKeyMan(type, internal);
if (!m_spk_man) {
return false;
}
@@ -3370,7 +3555,7 @@ void CWallet::GetKeyBirthTimes(interfaces::Chain::Lock& locked_chain, std::map<C
LegacyScriptPubKeyMan* spk_man = GetLegacyScriptPubKeyMan();
assert(spk_man != nullptr);
- AssertLockHeld(spk_man->cs_wallet);
+ LOCK(spk_man->cs_KeyStore);
// get birth times for keys with metadata
for (const auto& entry : spk_man->mapKeyMetadata) {
@@ -3665,7 +3850,7 @@ std::shared_ptr<CWallet> CWallet::CreateWalletFromFile(interfaces::Chain& chain,
return nullptr;
}
- if (auto spk_man = walletInstance->m_spk_man.get()) {
+ for (auto spk_man : walletInstance->GetActiveScriptPubKeyMans()) {
if (!spk_man->Upgrade(prev_version, error)) {
return nullptr;
}
@@ -3678,8 +3863,13 @@ std::shared_ptr<CWallet> CWallet::CreateWalletFromFile(interfaces::Chain& chain,
walletInstance->SetMinVersion(FEATURE_LATEST);
walletInstance->SetWalletFlags(wallet_creation_flags, false);
+
+ // Always create LegacyScriptPubKeyMan for now
+ walletInstance->SetupLegacyScriptPubKeyMan();
+
if (!(wallet_creation_flags & (WALLET_FLAG_DISABLE_PRIVATE_KEYS | WALLET_FLAG_BLANK_WALLET))) {
- if (auto spk_man = walletInstance->m_spk_man.get()) {
+ LOCK(walletInstance->cs_wallet);
+ for (auto spk_man : walletInstance->GetActiveScriptPubKeyMans()) {
if (!spk_man->SetupGeneration()) {
error = _("Unable to generate initial keys").translated;
return nullptr;
@@ -3694,9 +3884,10 @@ std::shared_ptr<CWallet> CWallet::CreateWalletFromFile(interfaces::Chain& chain,
error = strprintf(_("Error loading %s: Private keys can only be disabled during creation").translated, walletFile);
return NULL;
} else if (walletInstance->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)) {
- if (walletInstance->m_spk_man) {
- if (walletInstance->m_spk_man->HavePrivateKeys()) {
+ for (auto spk_man : walletInstance->GetActiveScriptPubKeyMans()) {
+ if (spk_man->HavePrivateKeys()) {
warnings.push_back(strprintf(_("Warning: Private keys detected in wallet {%s} with disabled private keys").translated, walletFile));
+ break;
}
}
}
@@ -3849,8 +4040,9 @@ std::shared_ptr<CWallet> CWallet::CreateWalletFromFile(interfaces::Chain& chain,
// No need to read and scan block if block was created before
// our wallet birthday (as adjusted for block time variability)
- Optional<int64_t> time_first_key;
- if (auto spk_man = walletInstance->m_spk_man.get()) {
+ // The way the 'time_first_key' is initialized is just a workaround for the gcc bug #47679 since version 4.6.0.
+ Optional<int64_t> time_first_key = MakeOptional(false, int64_t());;
+ for (auto spk_man : walletInstance->GetAllScriptPubKeyMans()) {
int64_t time = spk_man->GetTimeFirstKey();
if (!time_first_key || time < *time_first_key) time_first_key = time;
}
@@ -3895,7 +4087,12 @@ std::shared_ptr<CWallet> CWallet::CreateWalletFromFile(interfaces::Chain& chain,
}
}
- chain.loadWallet(interfaces::MakeWallet(walletInstance));
+ {
+ LOCK(cs_wallets);
+ for (auto& load_wallet : g_load_wallet_fns) {
+ load_wallet(interfaces::MakeWallet(walletInstance));
+ }
+ }
// Register with the validation interface. It's ok to do this after rescan since we're still holding locked_chain.
walletInstance->handleNotifications();
@@ -3929,7 +4126,7 @@ void CWallet::postInitProcess()
chain().requestMempoolTransactions(*this);
}
-bool CWallet::BackupWallet(const std::string& strDest)
+bool CWallet::BackupWallet(const std::string& strDest) const
{
return database->Backup(strDest);
}
@@ -4013,7 +4210,7 @@ bool CWallet::IsLocked() const
if (!IsCrypted()) {
return false;
}
- LOCK(cs_KeyStore);
+ LOCK(cs_wallet);
return vMasterKey.empty();
}
@@ -4023,7 +4220,7 @@ bool CWallet::Lock()
return false;
{
- LOCK(cs_KeyStore);
+ LOCK(cs_wallet);
vMasterKey.clear();
}
@@ -4034,9 +4231,9 @@ bool CWallet::Lock()
bool CWallet::Unlock(const CKeyingMaterial& vMasterKeyIn, bool accept_no_keys)
{
{
- LOCK(cs_KeyStore);
- if (m_spk_man) {
- if (!m_spk_man->CheckDecryptionKey(vMasterKeyIn, accept_no_keys)) {
+ LOCK(cs_wallet);
+ for (const auto& spk_man_pair : m_spk_managers) {
+ if (!spk_man_pair.second->CheckDecryptionKey(vMasterKeyIn, accept_no_keys)) {
return false;
}
}
@@ -4046,24 +4243,113 @@ bool CWallet::Unlock(const CKeyingMaterial& vMasterKeyIn, bool accept_no_keys)
return true;
}
+std::set<ScriptPubKeyMan*> CWallet::GetActiveScriptPubKeyMans() const
+{
+ std::set<ScriptPubKeyMan*> spk_mans;
+ for (bool internal : {false, true}) {
+ for (OutputType t : OUTPUT_TYPES) {
+ auto spk_man = GetScriptPubKeyMan(t, internal);
+ if (spk_man) {
+ spk_mans.insert(spk_man);
+ }
+ }
+ }
+ return spk_mans;
+}
+
+std::set<ScriptPubKeyMan*> CWallet::GetAllScriptPubKeyMans() const
+{
+ std::set<ScriptPubKeyMan*> spk_mans;
+ for (const auto& spk_man_pair : m_spk_managers) {
+ spk_mans.insert(spk_man_pair.second.get());
+ }
+ return spk_mans;
+}
+
+ScriptPubKeyMan* CWallet::GetScriptPubKeyMan(const OutputType& type, bool internal) const
+{
+ const std::map<OutputType, ScriptPubKeyMan*>& spk_managers = internal ? m_internal_spk_managers : m_external_spk_managers;
+ std::map<OutputType, ScriptPubKeyMan*>::const_iterator it = spk_managers.find(type);
+ if (it == spk_managers.end()) {
+ WalletLogPrintf("%s scriptPubKey Manager for output type %d does not exist\n", internal ? "Internal" : "External", static_cast<int>(type));
+ return nullptr;
+ }
+ return it->second;
+}
+
+std::set<ScriptPubKeyMan*> CWallet::GetScriptPubKeyMans(const CScript& script, SignatureData& sigdata) const
+{
+ std::set<ScriptPubKeyMan*> spk_mans;
+ for (const auto& spk_man_pair : m_spk_managers) {
+ if (spk_man_pair.second->CanProvide(script, sigdata)) {
+ spk_mans.insert(spk_man_pair.second.get());
+ }
+ }
+ return spk_mans;
+}
+
ScriptPubKeyMan* CWallet::GetScriptPubKeyMan(const CScript& script) const
{
- return m_spk_man.get();
+ SignatureData sigdata;
+ for (const auto& spk_man_pair : m_spk_managers) {
+ if (spk_man_pair.second->CanProvide(script, sigdata)) {
+ return spk_man_pair.second.get();
+ }
+ }
+ return nullptr;
}
-const SigningProvider* CWallet::GetSigningProvider(const CScript& script) const
+ScriptPubKeyMan* CWallet::GetScriptPubKeyMan(const uint256& id) const
{
- return m_spk_man.get();
+ if (m_spk_managers.count(id) > 0) {
+ return m_spk_managers.at(id).get();
+ }
+ return nullptr;
}
-const SigningProvider* CWallet::GetSigningProvider(const CScript& script, SignatureData& sigdata) const
+std::unique_ptr<SigningProvider> CWallet::GetSolvingProvider(const CScript& script) const
{
- return m_spk_man.get();
+ SignatureData sigdata;
+ return GetSolvingProvider(script, sigdata);
+}
+
+std::unique_ptr<SigningProvider> CWallet::GetSolvingProvider(const CScript& script, SignatureData& sigdata) const
+{
+ for (const auto& spk_man_pair : m_spk_managers) {
+ if (spk_man_pair.second->CanProvide(script, sigdata)) {
+ return spk_man_pair.second->GetSolvingProvider(script);
+ }
+ }
+ return nullptr;
}
LegacyScriptPubKeyMan* CWallet::GetLegacyScriptPubKeyMan() const
{
- return m_spk_man.get();
+ // Legacy wallets only have one ScriptPubKeyMan which is a LegacyScriptPubKeyMan.
+ // Everything in m_internal_spk_managers and m_external_spk_managers point to the same legacyScriptPubKeyMan.
+ auto it = m_internal_spk_managers.find(OutputType::LEGACY);
+ if (it == m_internal_spk_managers.end()) return nullptr;
+ return dynamic_cast<LegacyScriptPubKeyMan*>(it->second);
+}
+
+LegacyScriptPubKeyMan* CWallet::GetOrCreateLegacyScriptPubKeyMan()
+{
+ SetupLegacyScriptPubKeyMan();
+ return GetLegacyScriptPubKeyMan();
+}
+
+void CWallet::SetupLegacyScriptPubKeyMan()
+{
+ if (!m_internal_spk_managers.empty() || !m_external_spk_managers.empty() || !m_spk_managers.empty()) {
+ return;
+ }
+
+ auto spk_manager = std::unique_ptr<ScriptPubKeyMan>(new LegacyScriptPubKeyMan(*this));
+ for (const auto& type : OUTPUT_TYPES) {
+ m_internal_spk_managers[type] = spk_manager.get();
+ m_external_spk_managers[type] = spk_manager.get();
+ }
+ m_spk_managers[spk_manager->GetID()] = std::move(spk_manager);
}
const CKeyingMaterial& CWallet::GetEncryptionKey() const
@@ -4075,3 +4361,11 @@ bool CWallet::HasEncryptionKeys() const
{
return !mapMasterKeys.empty();
}
+
+void CWallet::ConnectScriptPubKeyManNotifiers()
+{
+ for (const auto& spk_man : GetActiveScriptPubKeyMans()) {
+ spk_man->NotifyWatchonlyChanged.connect(NotifyWatchonlyChanged);
+ spk_man->NotifyCanGetAddressesChanged.connect(NotifyCanGetAddressesChanged);
+ }
+}
diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h
index c4511601de..0c86a0c1e8 100644
--- a/src/wallet/wallet.h
+++ b/src/wallet/wallet.h
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -11,8 +11,10 @@
#include <interfaces/handler.h>
#include <outputtype.h>
#include <policy/feerate.h>
+#include <psbt.h>
#include <tinyformat.h>
#include <ui_interface.h>
+#include <util/message.h>
#include <util/strencodings.h>
#include <util/system.h>
#include <validationinterface.h>
@@ -35,6 +37,8 @@
#include <boost/signals2/signal.hpp>
+using LoadWalletFn = std::function<void(std::unique_ptr<interfaces::Wallet> wallet)>;
+
//! Explicitly unload and delete the wallet.
//! Blocks the current thread after signaling the unload intent so that all
//! wallet clients release the wallet.
@@ -48,6 +52,7 @@ bool HasWallets();
std::vector<std::shared_ptr<CWallet>> GetWallets();
std::shared_ptr<CWallet> GetWallet(const std::string& name);
std::shared_ptr<CWallet> LoadWallet(interfaces::Chain& chain, const WalletLocation& location, std::string& error, std::vector<std::string>& warnings);
+std::unique_ptr<interfaces::Handler> HandleLoadWallet(LoadWalletFn load_wallet);
enum class WalletCreationStatus {
SUCCESS,
@@ -138,7 +143,7 @@ class ReserveDestination
{
protected:
//! The wallet to reserve from
- CWallet* const pwallet;
+ const CWallet* const pwallet;
//! The ScriptPubKeyMan to reserve from. Based on type when GetReservedDestination is called
ScriptPubKeyMan* m_spk_man{nullptr};
OutputType const type;
@@ -310,6 +315,13 @@ public:
enum AmountType { DEBIT, CREDIT, IMMATURE_CREDIT, AVAILABLE_CREDIT, AMOUNTTYPE_ENUM_ELEMENTS };
CAmount GetCachableAmount(AmountType type, const isminefilter& filter, bool recalculate = false) const;
mutable CachableAmount m_amounts[AMOUNTTYPE_ENUM_ELEMENTS];
+ /**
+ * This flag is true if all m_amounts caches are empty. This is particularly
+ * useful in places where MarkDirty is conditionally called and the
+ * condition can be expensive and thus can be skipped if the flag is true.
+ * See MarkDestinationsDirty.
+ */
+ mutable bool m_is_cache_empty{true};
mutable bool fChangeCached;
mutable bool fInMempool;
mutable CAmount nChangeCached;
@@ -436,6 +448,7 @@ public:
m_amounts[IMMATURE_CREDIT].Reset();
m_amounts[AVAILABLE_CREDIT].Reset();
fChangeCached = false;
+ m_is_cache_empty = true;
}
void BindWallet(CWallet *pwalletIn)
@@ -595,7 +608,7 @@ class WalletRescanReserver; //forward declarations for ScanForWalletTransactions
class CWallet final : public WalletStorage, private interfaces::Chain::Notifications
{
private:
- CKeyingMaterial vMasterKey GUARDED_BY(cs_KeyStore);
+ CKeyingMaterial vMasterKey GUARDED_BY(cs_wallet);
bool Unlock(const CKeyingMaterial& vMasterKeyIn, bool accept_no_keys = false);
@@ -691,12 +704,19 @@ private:
*/
int m_last_block_processed_height GUARDED_BY(cs_wallet) = -1;
+ std::map<OutputType, ScriptPubKeyMan*> m_external_spk_managers;
+ std::map<OutputType, ScriptPubKeyMan*> m_internal_spk_managers;
+
+ // Indexed by a unique identifier produced by each ScriptPubKeyMan using
+ // ScriptPubKeyMan::GetID. In many cases it will be the hash of an internal structure
+ std::map<uint256, std::unique_ptr<ScriptPubKeyMan>> m_spk_managers;
+
public:
/*
* Main wallet lock.
* This lock protects all the fields added by CWallet.
*/
- mutable CCriticalSection cs_wallet;
+ mutable RecursiveMutex cs_wallet;
/** Get database handle used by this wallet. Ideally this function would
* not be necessary.
@@ -798,10 +818,9 @@ public:
bool IsSpent(const uint256& hash, unsigned int n) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
- // Whether this or any UTXO with the same CTxDestination has been spent.
- bool IsUsedDestination(const CTxDestination& dst) const;
- bool IsUsedDestination(const uint256& hash, unsigned int n) const;
- void SetUsedDestinationState(WalletBatch& batch, const uint256& hash, unsigned int n, bool used) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ // Whether this or any known UTXO with the same single key has been spent.
+ bool IsSpentKey(const uint256& hash, unsigned int n) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ void SetSpentKeyState(WalletBatch& batch, const uint256& hash, unsigned int n, bool used, std::set<CTxDestination>& tx_destinations) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
std::vector<OutputGroup> GroupOutputs(const std::vector<COutput>& outputs, bool single_coin) const;
@@ -815,8 +834,8 @@ public:
* Rescan abort properties
*/
void AbortRescan() { fAbortRescan = true; }
- bool IsAbortingRescan() { return fAbortRescan; }
- bool IsScanning() { return fScanningWallet; }
+ bool IsAbortingRescan() const { return fAbortRescan; }
+ bool IsScanning() const { return fScanningWallet; }
int64_t ScanningDuration() const { return fScanningWallet ? GetTimeMillis() - m_scanning_start : 0; }
double ScanningProgress() const { return fScanningWallet ? (double) m_scanning_progress : 0; }
@@ -899,7 +918,30 @@ public:
* calling CreateTransaction();
*/
bool FundTransaction(CMutableTransaction& tx, CAmount& nFeeRet, int& nChangePosInOut, std::string& strFailReason, bool lockUnspents, const std::set<int>& setSubtractFeeFromOutputs, CCoinControl);
- bool SignTransaction(CMutableTransaction& tx) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ // Fetch the inputs and sign with SIGHASH_ALL.
+ bool SignTransaction(CMutableTransaction& tx) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ // Sign the tx given the input coins and sighash.
+ bool SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, std::string>& input_errors) const;
+ SigningResult SignMessage(const std::string& message, const PKHash& pkhash, std::string& str_sig) const;
+
+ /**
+ * Fills out a PSBT with information from the wallet. Fills in UTXOs if we have
+ * them. Tries to sign if sign=true. Sets `complete` if the PSBT is now complete
+ * (i.e. has all required signatures or signature-parts, and is ready to
+ * finalize.) Sets `error` and returns false if something goes wrong.
+ *
+ * @param[in] psbtx PartiallySignedTransaction to fill in
+ * @param[out] complete indicates whether the PSBT is now complete
+ * @param[in] sighash_type the sighash type to use when signing (if PSBT does not specify)
+ * @param[in] sign whether to sign or not
+ * @param[in] bip32derivs whether to fill in bip32 derivation information if available
+ * return error
+ */
+ TransactionError FillPSBT(PartiallySignedTransaction& psbtx,
+ bool& complete,
+ int sighash_type = 1 /* SIGHASH_ALL */,
+ bool sign = true,
+ bool bip32derivs = true) const;
/**
* Create a new transaction paying the recipients with a set of coins
@@ -913,9 +955,9 @@ public:
* Should be called after CreateTransaction unless you want to abort
* broadcasting the transaction.
*
- * @param tx[in] The transaction to be broadcast.
- * @param mapValue[in] key-values to be set on the transaction.
- * @param orderForm[in] BIP 70 / BIP 21 order form details to be set on the transaction.
+ * @param[in] tx The transaction to be broadcast.
+ * @param[in] mapValue key-values to be set on the transaction.
+ * @param[in] orderForm BIP 70 / BIP 21 order form details to be set on the transaction.
*/
void CommitTransaction(CTransactionRef tx, mapValue_t mapValue, std::vector<std::pair<std::string, std::string>> orderForm);
@@ -951,16 +993,22 @@ public:
/** Absolute maximum transaction fee (in satoshis) used by default for the wallet */
CAmount m_default_max_tx_fee{DEFAULT_TRANSACTION_MAXFEE};
- size_t KeypoolCountExternalKeys() EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ size_t KeypoolCountExternalKeys() const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
bool TopUpKeyPool(unsigned int kpSize = 0);
- int64_t GetOldestKeyPoolTime();
+ int64_t GetOldestKeyPoolTime() const;
- std::set<std::set<CTxDestination>> GetAddressGroupings() EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
- std::map<CTxDestination, CAmount> GetAddressBalances(interfaces::Chain::Lock& locked_chain);
+ std::set<std::set<CTxDestination>> GetAddressGroupings() const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ std::map<CTxDestination, CAmount> GetAddressBalances(interfaces::Chain::Lock& locked_chain) const;
std::set<CTxDestination> GetLabelAddresses(const std::string& label) const;
+ /**
+ * Marks all outputs in each one of the destinations dirty, so their cache is
+ * reset and does not return outdated information.
+ */
+ void MarkDestinationsDirty(const std::set<CTxDestination>& destinations) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+
bool GetNewDestination(const OutputType type, const std::string label, CTxDestination& dest, std::string& error);
bool GetNewChangeDestination(const OutputType type, CTxDestination& dest, std::string& error);
@@ -1004,7 +1052,7 @@ public:
bool SetMaxVersion(int nVersion);
//! get the current wallet format (the oldest client version guaranteed to understand this wallet)
- int GetVersion() { LOCK(cs_wallet); return nWalletVersion; }
+ int GetVersion() const { LOCK(cs_wallet); return nWalletVersion; }
//! Get wallet transactions that conflict with given transaction (spend same outputs)
std::set<uint256> GetConflicts(const uint256& txid) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
@@ -1075,13 +1123,13 @@ public:
*/
void postInitProcess();
- bool BackupWallet(const std::string& strDest);
+ bool BackupWallet(const std::string& strDest) const;
/* Returns true if HD is enabled */
bool IsHDEnabled() const;
/* Returns true if the wallet can give out new addresses. This means it has keys in the keypool or can generate new keys */
- bool CanGetAddresses(bool internal = false);
+ bool CanGetAddresses(bool internal = false) const;
/**
* Blocks until the wallet state is up-to-date to /at least/ the current
@@ -1089,7 +1137,7 @@ public:
* Obviously holding cs_main/cs_wallet when going into this call may cause
* deadlock
*/
- void BlockUntilSyncedToCurrentChain() LOCKS_EXCLUDED(cs_main, cs_wallet);
+ void BlockUntilSyncedToCurrentChain() const LOCKS_EXCLUDED(cs_main, cs_wallet);
/** set a single wallet flag */
void SetWalletFlag(uint64_t flags);
@@ -1116,28 +1164,37 @@ public:
LogPrintf(("%s " + fmt).c_str(), GetDisplayName(), parameters...);
};
+ //! Returns all unique ScriptPubKeyMans in m_internal_spk_managers and m_external_spk_managers
+ std::set<ScriptPubKeyMan*> GetActiveScriptPubKeyMans() const;
+
+ //! Returns all unique ScriptPubKeyMans
+ std::set<ScriptPubKeyMan*> GetAllScriptPubKeyMans() const;
+
+ //! Get the ScriptPubKeyMan for the given OutputType and internal/external chain.
+ ScriptPubKeyMan* GetScriptPubKeyMan(const OutputType& type, bool internal) const;
+
//! Get the ScriptPubKeyMan for a script
ScriptPubKeyMan* GetScriptPubKeyMan(const CScript& script) const;
+ //! Get the ScriptPubKeyMan by id
+ ScriptPubKeyMan* GetScriptPubKeyMan(const uint256& id) const;
+
+ //! Get all of the ScriptPubKeyMans for a script given additional information in sigdata (populated by e.g. a psbt)
+ std::set<ScriptPubKeyMan*> GetScriptPubKeyMans(const CScript& script, SignatureData& sigdata) const;
//! Get the SigningProvider for a script
- const SigningProvider* GetSigningProvider(const CScript& script) const;
- const SigningProvider* GetSigningProvider(const CScript& script, SignatureData& sigdata) const;
+ std::unique_ptr<SigningProvider> GetSolvingProvider(const CScript& script) const;
+ std::unique_ptr<SigningProvider> GetSolvingProvider(const CScript& script, SignatureData& sigdata) const;
+ //! Get the LegacyScriptPubKeyMan which is used for all types, internal, and external.
LegacyScriptPubKeyMan* GetLegacyScriptPubKeyMan() const;
+ LegacyScriptPubKeyMan* GetOrCreateLegacyScriptPubKeyMan();
+
+ //! Make a LegacyScriptPubKeyMan and set it for all types, internal, and external.
+ void SetupLegacyScriptPubKeyMan();
const CKeyingMaterial& GetEncryptionKey() const override;
bool HasEncryptionKeys() const override;
- // Temporary LegacyScriptPubKeyMan accessors and aliases.
- friend class LegacyScriptPubKeyMan;
- std::unique_ptr<LegacyScriptPubKeyMan> m_spk_man = MakeUnique<LegacyScriptPubKeyMan>(*this);
- CCriticalSection& cs_KeyStore = m_spk_man->cs_KeyStore;
- LegacyScriptPubKeyMan::KeyMap& mapKeys GUARDED_BY(cs_KeyStore) = m_spk_man->mapKeys;
- LegacyScriptPubKeyMan::ScriptMap& mapScripts GUARDED_BY(cs_KeyStore) = m_spk_man->mapScripts;
- LegacyScriptPubKeyMan::CryptedKeyMap& mapCryptedKeys GUARDED_BY(cs_KeyStore) = m_spk_man->mapCryptedKeys;
- LegacyScriptPubKeyMan::WatchOnlySet& setWatchOnly GUARDED_BY(cs_KeyStore) = m_spk_man->setWatchOnly;
- LegacyScriptPubKeyMan::WatchKeyMap& mapWatchKeys GUARDED_BY(cs_KeyStore) = m_spk_man->mapWatchKeys;
-
/** Get last block processed height */
int GetLastBlockHeight() const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet)
{
@@ -1152,6 +1209,9 @@ public:
m_last_block_processed_height = block_height;
m_last_block_processed = block_hash;
};
+
+ //! Connect the signals from ScriptPubKeyMans to the signals in CWallet
+ void ConnectScriptPubKeyManNotifiers();
};
/**
diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp
index 7d04b04764..a1928f45c4 100644
--- a/src/wallet/walletdb.cpp
+++ b/src/wallet/walletdb.cpp
@@ -196,7 +196,7 @@ public:
static bool
ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue,
- CWalletScanState &wss, std::string& strType, std::string& strErr) EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet, pwallet->GetLegacyScriptPubKeyMan()->cs_wallet)
+ CWalletScanState &wss, std::string& strType, std::string& strErr) EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet)
{
try {
// Unserialize
@@ -251,7 +251,7 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue,
char fYes;
ssValue >> fYes;
if (fYes == '1') {
- pwallet->GetLegacyScriptPubKeyMan()->LoadWatchOnly(script);
+ pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadWatchOnly(script);
}
} else if (strType == DBKeys::KEY) {
CPubKey vchPubKey;
@@ -303,7 +303,7 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue,
strErr = "Error reading wallet database: CPrivKey corrupt";
return false;
}
- if (!pwallet->GetLegacyScriptPubKeyMan()->LoadKey(key, vchPubKey))
+ if (!pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadKey(key, vchPubKey))
{
strErr = "Error reading wallet database: LegacyScriptPubKeyMan::LoadKey failed";
return false;
@@ -334,7 +334,7 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue,
ssValue >> vchPrivKey;
wss.nCKeys++;
- if (!pwallet->GetLegacyScriptPubKeyMan()->LoadCryptedKey(vchPubKey, vchPrivKey))
+ if (!pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadCryptedKey(vchPubKey, vchPrivKey))
{
strErr = "Error reading wallet database: LegacyScriptPubKeyMan::LoadCryptedKey failed";
return false;
@@ -346,14 +346,14 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue,
CKeyMetadata keyMeta;
ssValue >> keyMeta;
wss.nKeyMeta++;
- pwallet->GetLegacyScriptPubKeyMan()->LoadKeyMetadata(vchPubKey.GetID(), keyMeta);
+ pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadKeyMetadata(vchPubKey.GetID(), keyMeta);
} else if (strType == DBKeys::WATCHMETA) {
CScript script;
ssKey >> script;
CKeyMetadata keyMeta;
ssValue >> keyMeta;
wss.nKeyMeta++;
- pwallet->GetLegacyScriptPubKeyMan()->LoadScriptMetadata(CScriptID(script), keyMeta);
+ pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadScriptMetadata(CScriptID(script), keyMeta);
} else if (strType == DBKeys::DEFAULTKEY) {
// We don't want or need the default key, but if there is one set,
// we want to make sure that it is valid so that we can detect corruption
@@ -369,13 +369,13 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue,
CKeyPool keypool;
ssValue >> keypool;
- pwallet->GetLegacyScriptPubKeyMan()->LoadKeyPool(nIndex, keypool);
+ pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadKeyPool(nIndex, keypool);
} else if (strType == DBKeys::CSCRIPT) {
uint160 hash;
ssKey >> hash;
CScript script;
ssValue >> script;
- if (!pwallet->GetLegacyScriptPubKeyMan()->LoadCScript(script))
+ if (!pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadCScript(script))
{
strErr = "Error reading wallet database: LegacyScriptPubKeyMan::LoadCScript failed";
return false;
@@ -391,7 +391,7 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue,
} else if (strType == DBKeys::HDCHAIN) {
CHDChain chain;
ssValue >> chain;
- pwallet->GetLegacyScriptPubKeyMan()->SetHDChain(chain, true);
+ pwallet->GetOrCreateLegacyScriptPubKeyMan()->SetHDChain(chain, true);
} else if (strType == DBKeys::FLAGS) {
uint64_t flags;
ssValue >> flags;
@@ -434,7 +434,6 @@ DBErrors WalletBatch::LoadWallet(CWallet* pwallet)
DBErrors result = DBErrors::LOAD_OK;
LOCK(pwallet->cs_wallet);
- AssertLockHeld(pwallet->GetLegacyScriptPubKeyMan()->cs_wallet);
try {
int nMinVersion = 0;
if (m_batch.Read(DBKeys::MINVERSION, nMinVersion)) {
@@ -516,8 +515,9 @@ DBErrors WalletBatch::LoadWallet(CWallet* pwallet)
// nTimeFirstKey is only reliable if all keys have metadata
if ((wss.nKeys + wss.nCKeys + wss.nWatchKeys) != wss.nKeyMeta) {
- auto spk_man = pwallet->GetLegacyScriptPubKeyMan();
+ auto spk_man = pwallet->GetOrCreateLegacyScriptPubKeyMan();
if (spk_man) {
+ LOCK(spk_man->cs_KeyStore);
spk_man->UpdateTimeFirstKey(1);
}
}
@@ -713,7 +713,6 @@ bool WalletBatch::RecoverKeysOnlyFilter(void *callbackData, CDataStream ssKey, C
{
// Required in LoadKeyMetadata():
LOCK(dummyWallet->cs_wallet);
- AssertLockHeld(dummyWallet->GetLegacyScriptPubKeyMan()->cs_wallet);
fReadOK = ReadKeyValue(dummyWallet, ssKey, ssValue,
dummyWss, strType, strErr);
}
diff --git a/src/wallet/walletdb.h b/src/wallet/walletdb.h
index 9b97318cd3..1a65125480 100644
--- a/src/wallet/walletdb.h
+++ b/src/wallet/walletdb.h
@@ -124,7 +124,7 @@ public:
std::string hdKeypath; //optional HD/bip32 keypath. Still used to determine whether a key is a seed. Also kept for backwards compatibility
CKeyID hd_seed_id; //id of the HD seed used to derive this key
KeyOriginInfo key_origin; // Key origin info with path and fingerprint
- bool has_key_origin = false; //< Whether the key_origin is useful
+ bool has_key_origin = false; //!< Whether the key_origin is useful
CKeyMetadata()
{
diff --git a/src/wallet/wallettool.cpp b/src/wallet/wallettool.cpp
index dc0cac60bd..fbfdf9dd6b 100644
--- a/src/wallet/wallettool.cpp
+++ b/src/wallet/wallettool.cpp
@@ -27,6 +27,7 @@ static std::shared_ptr<CWallet> CreateWallet(const std::string& name, const fs::
}
// dummy chain interface
std::shared_ptr<CWallet> wallet_instance(new CWallet(nullptr /* chain */, WalletLocation(name), WalletDatabase::Create(path)), WalletToolReleaseWallet);
+ LOCK(wallet_instance->cs_wallet);
bool first_run = true;
DBErrors load_wallet_ret = wallet_instance->LoadWallet(first_run);
if (load_wallet_ret != DBErrors::LOAD_OK) {
@@ -37,7 +38,7 @@ static std::shared_ptr<CWallet> CreateWallet(const std::string& name, const fs::
wallet_instance->SetMinVersion(FEATURE_HD_SPLIT);
// generate a new HD seed
- auto spk_man = wallet_instance->GetLegacyScriptPubKeyMan();
+ auto spk_man = wallet_instance->GetOrCreateLegacyScriptPubKeyMan();
CPubKey seed = spk_man->GenerateNewSeed();
spk_man->SetHDSeed(seed);
diff --git a/src/zmq/zmqpublishnotifier.cpp b/src/zmq/zmqpublishnotifier.cpp
index 0dab6a06d5..04806903c2 100644
--- a/src/zmq/zmqpublishnotifier.cpp
+++ b/src/zmq/zmqpublishnotifier.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2015-2019 The Bitcoin Core developers
+// Copyright (c) 2015-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -112,7 +112,8 @@ bool CZMQAbstractPublishNotifier::Initialize(void *pcontext)
void CZMQAbstractPublishNotifier::Shutdown()
{
- assert(psocket);
+ // Early return if Initialize was not called
+ if (!psocket) return;
int count = mapPublishNotifiers.count(address);
diff --git a/src/zmq/zmqrpc.cpp b/src/zmq/zmqrpc.cpp
index 5652877f3c..0fbefb6023 100644
--- a/src/zmq/zmqrpc.cpp
+++ b/src/zmq/zmqrpc.cpp
@@ -19,14 +19,15 @@ UniValue getzmqnotifications(const JSONRPCRequest& request)
"\nReturns information about the active ZeroMQ notifications.\n",
{},
RPCResult{
- "[\n"
- " { (json object)\n"
- " \"type\": \"pubhashtx\", (string) Type of notification\n"
- " \"address\": \"...\", (string) Address of the publisher\n"
- " \"hwm\": n (numeric) Outbound message high water mark\n"
- " },\n"
- " ...\n"
- "]\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "type", "Type of notification"},
+ {RPCResult::Type::STR, "address", "Address of the publisher"},
+ {RPCResult::Type::NUM, "hwm", "Outbound message high water mark"},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("getzmqnotifications", "")
diff --git a/test/README.md b/test/README.md
index c3e4ae9ad2..e1dab92a06 100644
--- a/test/README.md
+++ b/test/README.md
@@ -145,7 +145,7 @@ levels using the logger included in the test_framework, e.g.
`test_framework.log` and no logs are output to the console.
- when run directly, *all* logs are written to `test_framework.log` and INFO
level and above are output to the console.
-- when run on Travis, no logs are output to the console. However, if a test
+- when run by [our CI (Continuous Integration)](/ci/README.md), no logs are output to the console. However, if a test
fails, the `test_framework.log` and bitcoind `debug.log`s will all be dumped
to the console to help troubleshooting.
diff --git a/test/functional/README.md b/test/functional/README.md
index 77a9ce9acb..6582c1cbcd 100644
--- a/test/functional/README.md
+++ b/test/functional/README.md
@@ -51,10 +51,13 @@ don't have test cases for.
#### General test-writing advice
+- Instead of inline comments or no test documentation at all, log the comments to the test log, e.g.
+ `self.log.info('Create enough transactions to fill a block')`. Logs make the test code easier to read and the test
+ logic easier [to debug](/test/README.md#test-logging).
- Set `self.num_nodes` to the minimum number of nodes necessary for the test.
Having additional unrequired nodes adds to the execution time of the test as
well as memory/CPU/disk requirements (which is important when running tests in
- parallel or on Travis).
+ parallel).
- Avoid stop-starting the nodes multiple times during the test if possible. A
stop-start takes several seconds, so doing it several times blows up the
runtime of the test.
diff --git a/test/functional/combine_logs.py b/test/functional/combine_logs.py
index a70d9c4ac1..00f2833f55 100755
--- a/test/functional/combine_logs.py
+++ b/test/functional/combine_logs.py
@@ -1,4 +1,7 @@
#!/usr/bin/env python3
+# Copyright (c) 2017-2019 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Combine logs from multiple bitcoin nodes as well as the test_framework log.
This streams the combined log output to stdout. Use combine_logs.py > outputfile
diff --git a/test/functional/feature_abortnode.py b/test/functional/feature_abortnode.py
index 62c3eca07d..9b878e8bf8 100755
--- a/test/functional/feature_abortnode.py
+++ b/test/functional/feature_abortnode.py
@@ -29,7 +29,7 @@ class AbortNodeTest(BitcoinTestFramework):
datadir = get_datadir_path(self.options.tmpdir, 0)
# Deleting the undo file will result in reorg failure
- os.unlink(os.path.join(datadir, 'regtest', 'blocks', 'rev00000.dat'))
+ os.unlink(os.path.join(datadir, self.chain, 'blocks', 'rev00000.dat'))
# Connecting to a node with a more work chain will trigger a reorg
# attempt.
@@ -40,7 +40,7 @@ class AbortNodeTest(BitcoinTestFramework):
# Check that node0 aborted
self.log.info("Waiting for crash")
- wait_until(lambda: self.nodes[0].is_node_stopped(), timeout=60)
+ wait_until(lambda: self.nodes[0].is_node_stopped(), timeout=200)
self.log.info("Node crashed - now verifying restart fails")
self.nodes[0].assert_start_raises_init_error()
diff --git a/test/functional/feature_asmap.py b/test/functional/feature_asmap.py
new file mode 100755
index 0000000000..2c6553fbe2
--- /dev/null
+++ b/test/functional/feature_asmap.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python3
+# Copyright (c) 2020 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test asmap config argument for ASN-based IP bucketing.
+
+Verify node behaviour and debug log when launching bitcoind in these cases:
+
+1. `bitcoind` with no -asmap arg, using /16 prefix for IP bucketing
+
+2. `bitcoind -asmap=<absolute path>`, using the unit test skeleton asmap
+
+3. `bitcoind -asmap=<relative path>`, using the unit test skeleton asmap
+
+4. `bitcoind -asmap/-asmap=` with no file specified, using the default asmap
+
+5. `bitcoind -asmap` with no file specified and a missing default asmap file
+
+6. `bitcoind -asmap` with an empty (unparsable) default asmap file
+
+The tests are order-independent.
+
+"""
+import os
+import shutil
+
+from test_framework.test_framework import BitcoinTestFramework
+
+DEFAULT_ASMAP_FILENAME = 'ip_asn.map' # defined in src/init.cpp
+ASMAP = '../../src/test/data/asmap.raw' # path to unit test skeleton asmap
+VERSION = 'fec61fa21a9f46f3b17bdcd660d7f4cd90b966aad3aec593c99b35f0aca15853'
+
+def expected_messages(filename):
+ return ['Opened asmap file "{}" (59 bytes) from disk'.format(filename),
+ 'Using asmap version {} for IP bucketing'.format(VERSION)]
+
+class AsmapTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.setup_clean_chain = False
+ self.num_nodes = 1
+
+ def test_without_asmap_arg(self):
+ self.log.info('Test bitcoind with no -asmap arg passed')
+ self.stop_node(0)
+ with self.node.assert_debug_log(['Using /16 prefix for IP bucketing']):
+ self.start_node(0)
+
+ def test_asmap_with_absolute_path(self):
+ self.log.info('Test bitcoind -asmap=<absolute path>')
+ self.stop_node(0)
+ filename = os.path.join(self.datadir, 'my-map-file.map')
+ shutil.copyfile(self.asmap_raw, filename)
+ with self.node.assert_debug_log(expected_messages(filename)):
+ self.start_node(0, ['-asmap={}'.format(filename)])
+ os.remove(filename)
+
+ def test_asmap_with_relative_path(self):
+ self.log.info('Test bitcoind -asmap=<relative path>')
+ self.stop_node(0)
+ name = 'ASN_map'
+ filename = os.path.join(self.datadir, name)
+ shutil.copyfile(self.asmap_raw, filename)
+ with self.node.assert_debug_log(expected_messages(filename)):
+ self.start_node(0, ['-asmap={}'.format(name)])
+ os.remove(filename)
+
+ def test_default_asmap(self):
+ shutil.copyfile(self.asmap_raw, self.default_asmap)
+ for arg in ['-asmap', '-asmap=']:
+ self.log.info('Test bitcoind {} (using default map file)'.format(arg))
+ self.stop_node(0)
+ with self.node.assert_debug_log(expected_messages(self.default_asmap)):
+ self.start_node(0, [arg])
+ os.remove(self.default_asmap)
+
+ def test_default_asmap_with_missing_file(self):
+ self.log.info('Test bitcoind -asmap with missing default map file')
+ self.stop_node(0)
+ msg = "Error: Could not find asmap file \"{}\"".format(self.default_asmap)
+ self.node.assert_start_raises_init_error(extra_args=['-asmap'], expected_msg=msg)
+
+ def test_empty_asmap(self):
+ self.log.info('Test bitcoind -asmap with empty map file')
+ self.stop_node(0)
+ with open(self.default_asmap, "w", encoding="utf-8") as f:
+ f.write("")
+ msg = "Error: Could not parse asmap file \"{}\"".format(self.default_asmap)
+ self.node.assert_start_raises_init_error(extra_args=['-asmap'], expected_msg=msg)
+ os.remove(self.default_asmap)
+
+ def run_test(self):
+ self.node = self.nodes[0]
+ self.datadir = os.path.join(self.node.datadir, self.chain)
+ self.default_asmap = os.path.join(self.datadir, DEFAULT_ASMAP_FILENAME)
+ self.asmap_raw = os.path.join(os.path.dirname(os.path.realpath(__file__)), ASMAP)
+
+ self.test_without_asmap_arg()
+ self.test_asmap_with_absolute_path()
+ self.test_asmap_with_relative_path()
+ self.test_default_asmap()
+ self.test_default_asmap_with_missing_file()
+ self.test_empty_asmap()
+
+
+if __name__ == '__main__':
+ AsmapTest().main()
diff --git a/test/functional/feature_backwards_compatibility.py b/test/functional/feature_backwards_compatibility.py
new file mode 100755
index 0000000000..0db74432e2
--- /dev/null
+++ b/test/functional/feature_backwards_compatibility.py
@@ -0,0 +1,347 @@
+#!/usr/bin/env python3
+# Copyright (c) 2018-2019 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Backwards compatibility functional test
+
+Test various backwards compatibility scenarios. Download the previous node binaries:
+
+contrib/devtools/previous_release.sh -b v0.19.0.1 v0.18.1 v0.17.1
+
+Due to RPC changes introduced in various versions the below tests
+won't work for older versions without some patches or workarounds.
+
+Use only the latest patch version of each release, unless a test specifically
+needs an older patch version.
+"""
+
+import os
+import shutil
+
+from test_framework.test_framework import BitcoinTestFramework, SkipTest
+from test_framework.descriptors import descsum_create
+
+from test_framework.util import (
+ assert_equal,
+ sync_blocks,
+ sync_mempools
+)
+
+class BackwardsCompatibilityTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.setup_clean_chain = True
+ self.num_nodes = 5
+ # Add new version after each release:
+ self.extra_args = [
+ ["-addresstype=bech32"], # Pre-release: use to mine blocks
+ ["-nowallet", "-walletrbf=1", "-addresstype=bech32"], # Pre-release: use to receive coins, swap wallets, etc
+ ["-nowallet", "-walletrbf=1", "-addresstype=bech32"], # v0.19.0.1
+ ["-nowallet", "-walletrbf=1", "-addresstype=bech32"], # v0.18.1
+ ["-nowallet", "-walletrbf=1", "-addresstype=bech32"] # v0.17.1
+ ]
+
+ def setup_nodes(self):
+ if os.getenv("TEST_PREVIOUS_RELEASES") == "false":
+ raise SkipTest("backwards compatibility tests")
+
+ releases_path = os.getenv("PREVIOUS_RELEASES_DIR") or os.getcwd() + "/releases"
+ if not os.path.isdir(releases_path):
+ if os.getenv("TEST_PREVIOUS_RELEASES") == "true":
+ raise AssertionError("TEST_PREVIOUS_RELEASES=1 but releases missing: " + releases_path)
+ raise SkipTest("This test requires binaries for previous releases")
+
+ self.add_nodes(self.num_nodes, extra_args=self.extra_args, versions=[
+ None,
+ None,
+ 190000,
+ 180100,
+ 170100
+ ], binary=[
+ self.options.bitcoind,
+ self.options.bitcoind,
+ releases_path + "/v0.19.0.1/bin/bitcoind",
+ releases_path + "/v0.18.1/bin/bitcoind",
+ releases_path + "/v0.17.1/bin/bitcoind"
+ ], binary_cli=[
+ self.options.bitcoincli,
+ self.options.bitcoincli,
+ releases_path + "/v0.19.0.1/bin/bitcoin-cli",
+ releases_path + "/v0.18.1/bin/bitcoin-cli",
+ releases_path + "/v0.17.1/bin/bitcoin-cli"
+ ])
+
+ self.start_nodes()
+
+ def run_test(self):
+ self.nodes[0].generatetoaddress(101, self.nodes[0].getnewaddress())
+
+ sync_blocks(self.nodes)
+
+ # Sanity check the test framework:
+ res = self.nodes[self.num_nodes - 1].getblockchaininfo()
+ assert_equal(res['blocks'], 101)
+
+ node_master = self.nodes[self.num_nodes - 4]
+ node_v19 = self.nodes[self.num_nodes - 3]
+ node_v18 = self.nodes[self.num_nodes - 2]
+ node_v17 = self.nodes[self.num_nodes - 1]
+
+ self.log.info("Test wallet backwards compatibility...")
+ # Create a number of wallets and open them in older versions:
+
+ # w1: regular wallet, created on master: update this test when default
+ # wallets can no longer be opened by older versions.
+ node_master.createwallet(wallet_name="w1")
+ wallet = node_master.get_wallet_rpc("w1")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled']
+ assert info['keypoolsize'] > 0
+ # Create a confirmed transaction, receiving coins
+ address = wallet.getnewaddress()
+ self.nodes[0].sendtoaddress(address, 10)
+ sync_mempools(self.nodes)
+ self.nodes[0].generate(1)
+ sync_blocks(self.nodes)
+ # Create a conflicting transaction using RBF
+ return_address = self.nodes[0].getnewaddress()
+ tx1_id = self.nodes[1].sendtoaddress(return_address, 1)
+ tx2_id = self.nodes[1].bumpfee(tx1_id)["txid"]
+ # Confirm the transaction
+ sync_mempools(self.nodes)
+ self.nodes[0].generate(1)
+ sync_blocks(self.nodes)
+ # Create another conflicting transaction using RBF
+ tx3_id = self.nodes[1].sendtoaddress(return_address, 1)
+ tx4_id = self.nodes[1].bumpfee(tx3_id)["txid"]
+ # Abandon transaction, but don't confirm
+ self.nodes[1].abandontransaction(tx3_id)
+
+ # w1_v19: regular wallet, created with v0.19
+ node_v19.createwallet(wallet_name="w1_v19")
+ wallet = node_v19.get_wallet_rpc("w1_v19")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled']
+ assert info['keypoolsize'] > 0
+ # Use addmultisigaddress (see #18075)
+ address_18075 = wallet.addmultisigaddress(1, ["0296b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52", "037211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073"], "", "legacy")["address"]
+ assert wallet.getaddressinfo(address_18075)["solvable"]
+
+ # w1_v18: regular wallet, created with v0.18
+ node_v18.createwallet(wallet_name="w1_v18")
+ wallet = node_v18.get_wallet_rpc("w1_v18")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled']
+ assert info['keypoolsize'] > 0
+
+ # w2: wallet with private keys disabled, created on master: update this
+ # test when default wallets private keys disabled can no longer be
+ # opened by older versions.
+ node_master.createwallet(wallet_name="w2", disable_private_keys=True)
+ wallet = node_master.get_wallet_rpc("w2")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled'] == False
+ assert info['keypoolsize'] == 0
+
+ # w2_v19: wallet with private keys disabled, created with v0.19
+ node_v19.createwallet(wallet_name="w2_v19", disable_private_keys=True)
+ wallet = node_v19.get_wallet_rpc("w2_v19")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled'] == False
+ assert info['keypoolsize'] == 0
+
+ # w2_v18: wallet with private keys disabled, created with v0.18
+ node_v18.createwallet(wallet_name="w2_v18", disable_private_keys=True)
+ wallet = node_v18.get_wallet_rpc("w2_v18")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled'] == False
+ assert info['keypoolsize'] == 0
+
+ # w3: blank wallet, created on master: update this
+ # test when default blank wallets can no longer be opened by older versions.
+ node_master.createwallet(wallet_name="w3", blank=True)
+ wallet = node_master.get_wallet_rpc("w3")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled']
+ assert info['keypoolsize'] == 0
+
+ # w3_v19: blank wallet, created with v0.19
+ node_v19.createwallet(wallet_name="w3_v19", blank=True)
+ wallet = node_v19.get_wallet_rpc("w3_v19")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled']
+ assert info['keypoolsize'] == 0
+
+ # w3_v18: blank wallet, created with v0.18
+ node_v18.createwallet(wallet_name="w3_v18", blank=True)
+ wallet = node_v18.get_wallet_rpc("w3_v18")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled']
+ assert info['keypoolsize'] == 0
+
+ # Copy the wallets to older nodes:
+ node_master_wallets_dir = os.path.join(node_master.datadir, "regtest/wallets")
+ node_v19_wallets_dir = os.path.join(node_v19.datadir, "regtest/wallets")
+ node_v18_wallets_dir = os.path.join(node_v18.datadir, "regtest/wallets")
+ node_v17_wallets_dir = os.path.join(node_v17.datadir, "regtest/wallets")
+ node_master.unloadwallet("w1")
+ node_master.unloadwallet("w2")
+ node_v19.unloadwallet("w1_v19")
+ node_v19.unloadwallet("w2_v19")
+ node_v18.unloadwallet("w1_v18")
+ node_v18.unloadwallet("w2_v18")
+
+ # Copy wallets to v0.17
+ for wallet in os.listdir(node_master_wallets_dir):
+ shutil.copytree(
+ os.path.join(node_master_wallets_dir, wallet),
+ os.path.join(node_v17_wallets_dir, wallet)
+ )
+ for wallet in os.listdir(node_v18_wallets_dir):
+ shutil.copytree(
+ os.path.join(node_v18_wallets_dir, wallet),
+ os.path.join(node_v17_wallets_dir, wallet)
+ )
+
+ # Copy wallets to v0.18
+ for wallet in os.listdir(node_master_wallets_dir):
+ shutil.copytree(
+ os.path.join(node_master_wallets_dir, wallet),
+ os.path.join(node_v18_wallets_dir, wallet)
+ )
+
+ # Copy wallets to v0.19
+ for wallet in os.listdir(node_master_wallets_dir):
+ shutil.copytree(
+ os.path.join(node_master_wallets_dir, wallet),
+ os.path.join(node_v19_wallets_dir, wallet)
+ )
+
+ # Open the wallets in v0.19
+ node_v19.loadwallet("w1")
+ wallet = node_v19.get_wallet_rpc("w1")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled']
+ assert info['keypoolsize'] > 0
+ txs = wallet.listtransactions()
+ assert_equal(len(txs), 5)
+ assert_equal(txs[1]["txid"], tx1_id)
+ assert_equal(txs[2]["walletconflicts"], [tx1_id])
+ assert_equal(txs[1]["replaced_by_txid"], tx2_id)
+ assert not(txs[1]["abandoned"])
+ assert_equal(txs[1]["confirmations"], -1)
+ assert_equal(txs[2]["blockindex"], 1)
+ assert txs[3]["abandoned"]
+ assert_equal(txs[4]["walletconflicts"], [tx3_id])
+ assert_equal(txs[3]["replaced_by_txid"], tx4_id)
+ assert not(hasattr(txs[3], "blockindex"))
+
+ node_v19.loadwallet("w2")
+ wallet = node_v19.get_wallet_rpc("w2")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled'] == False
+ assert info['keypoolsize'] == 0
+
+ node_v19.loadwallet("w3")
+ wallet = node_v19.get_wallet_rpc("w3")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled']
+ assert info['keypoolsize'] == 0
+
+ # Open the wallets in v0.18
+ node_v18.loadwallet("w1")
+ wallet = node_v18.get_wallet_rpc("w1")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled']
+ assert info['keypoolsize'] > 0
+ txs = wallet.listtransactions()
+ assert_equal(len(txs), 5)
+ assert_equal(txs[1]["txid"], tx1_id)
+ assert_equal(txs[2]["walletconflicts"], [tx1_id])
+ assert_equal(txs[1]["replaced_by_txid"], tx2_id)
+ assert not(txs[1]["abandoned"])
+ assert_equal(txs[1]["confirmations"], -1)
+ assert_equal(txs[2]["blockindex"], 1)
+ assert txs[3]["abandoned"]
+ assert_equal(txs[4]["walletconflicts"], [tx3_id])
+ assert_equal(txs[3]["replaced_by_txid"], tx4_id)
+ assert not(hasattr(txs[3], "blockindex"))
+
+ node_v18.loadwallet("w2")
+ wallet = node_v18.get_wallet_rpc("w2")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled'] == False
+ assert info['keypoolsize'] == 0
+
+ node_v18.loadwallet("w3")
+ wallet = node_v18.get_wallet_rpc("w3")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled']
+ assert info['keypoolsize'] == 0
+
+ # Open the wallets in v0.17
+ node_v17.loadwallet("w1_v18")
+ wallet = node_v17.get_wallet_rpc("w1_v18")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled']
+ assert info['keypoolsize'] > 0
+
+ node_v17.loadwallet("w1")
+ wallet = node_v17.get_wallet_rpc("w1")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled']
+ assert info['keypoolsize'] > 0
+
+ node_v17.loadwallet("w2_v18")
+ wallet = node_v17.get_wallet_rpc("w2_v18")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled'] == False
+ assert info['keypoolsize'] == 0
+
+ node_v17.loadwallet("w2")
+ wallet = node_v17.get_wallet_rpc("w2")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled'] == False
+ assert info['keypoolsize'] == 0
+
+ # RPC loadwallet failure causes bitcoind to exit, in addition to the RPC
+ # call failure, so the following test won't work:
+ # assert_raises_rpc_error(-4, "Wallet loading failed.", node_v17.loadwallet, 'w3_v18')
+
+ # Instead, we stop node and try to launch it with the wallet:
+ self.stop_node(self.num_nodes - 1)
+ node_v17.assert_start_raises_init_error(["-wallet=w3_v18"], "Error: Error loading w3_v18: Wallet requires newer version of Bitcoin Core")
+ node_v17.assert_start_raises_init_error(["-wallet=w3"], "Error: Error loading w3: Wallet requires newer version of Bitcoin Core")
+ self.start_node(self.num_nodes - 1)
+
+ self.log.info("Test wallet upgrade path...")
+ # u1: regular wallet, created with v0.17
+ node_v17.createwallet(wallet_name="u1_v17")
+ wallet = node_v17.get_wallet_rpc("u1_v17")
+ address = wallet.getnewaddress("bech32")
+ info = wallet.getaddressinfo(address)
+ hdkeypath = info["hdkeypath"]
+ pubkey = info["pubkey"]
+
+ # Copy the 0.17 wallet to the last Bitcoin Core version and open it:
+ node_v17.unloadwallet("u1_v17")
+ shutil.copytree(
+ os.path.join(node_v17_wallets_dir, "u1_v17"),
+ os.path.join(node_master_wallets_dir, "u1_v17")
+ )
+ node_master.loadwallet("u1_v17")
+ wallet = node_master.get_wallet_rpc("u1_v17")
+ info = wallet.getaddressinfo(address)
+ descriptor = "wpkh([" + info["hdmasterfingerprint"] + hdkeypath[1:] + "]" + pubkey + ")"
+ assert_equal(info["desc"], descsum_create(descriptor))
+
+ # Copy the 0.19 wallet to the last Bitcoin Core version and open it:
+ shutil.copytree(
+ os.path.join(node_v19_wallets_dir, "w1_v19"),
+ os.path.join(node_master_wallets_dir, "w1_v19")
+ )
+ node_master.loadwallet("w1_v19")
+ wallet = node_master.get_wallet_rpc("w1_v19")
+ assert wallet.getaddressinfo(address_18075)["solvable"]
+
+if __name__ == '__main__':
+ BackwardsCompatibilityTest().main()
diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py
index c7e98bd4db..38bf2faf89 100755
--- a/test/functional/feature_block.py
+++ b/test/functional/feature_block.py
@@ -1261,7 +1261,7 @@ class FullBlockTest(BitcoinTestFramework):
self.save_spendable_output()
spend = self.get_spendable_output()
- self.send_blocks(blocks, True, timeout=960)
+ self.send_blocks(blocks, True, timeout=1920)
chain1_tip = i
# now create alt chain of same length
@@ -1273,14 +1273,14 @@ class FullBlockTest(BitcoinTestFramework):
# extend alt chain to trigger re-org
block = self.next_block("alt" + str(chain1_tip + 1), version=4)
- self.send_blocks([block], True, timeout=960)
+ self.send_blocks([block], True, timeout=1920)
# ... and re-org back to the first chain
self.move_tip(chain1_tip)
block = self.next_block(chain1_tip + 1, version=4)
self.send_blocks([block], False, force_send=True)
block = self.next_block(chain1_tip + 2, version=4)
- self.send_blocks([block], True, timeout=960)
+ self.send_blocks([block], True, timeout=1920)
self.log.info("Reject a block with an invalid block header version")
b_v1 = self.next_block('b_v1', version=1)
@@ -1401,7 +1401,7 @@ class FullBlockTest(BitcoinTestFramework):
self.nodes[0].disconnect_p2ps()
self.bootstrap_p2p(timeout=timeout)
- def send_blocks(self, blocks, success=True, reject_reason=None, force_send=False, reconnect=False, timeout=60):
+ def send_blocks(self, blocks, success=True, reject_reason=None, force_send=False, reconnect=False, timeout=960):
"""Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block.
Call with success = False if the tip shouldn't advance to the most recent block."""
diff --git a/test/functional/feature_cltv.py b/test/functional/feature_cltv.py
index 2c6f2e733b..073ed8d7c7 100755
--- a/test/functional/feature_cltv.py
+++ b/test/functional/feature_cltv.py
@@ -55,12 +55,12 @@ class BIP65Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[
- '-whitelist=127.0.0.1',
+ '-whitelist=noban@127.0.0.1',
'-par=1', # Use only one script thread to get the exact reject reason for testing
'-acceptnonstdtxn=1', # cltv_invalidate is nonstandard
]]
self.setup_clean_chain = True
- self.rpc_timeout = 120
+ self.rpc_timeout = 480
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
diff --git a/test/functional/feature_config_args.py b/test/functional/feature_config_args.py
index 801209b1c0..1a7c656274 100755
--- a/test/functional/feature_config_args.py
+++ b/test/functional/feature_config_args.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright (c) 2017-2019 The Bitcoin Core developers
+# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various command line arguments and configuration file parameters."""
@@ -23,7 +23,7 @@ class ConfArgsTest(BitcoinTestFramework):
conf.write('includeconf={}\n'.format(inc_conf_file_path))
self.nodes[0].assert_start_raises_init_error(
- expected_msg='Error: Error parsing command line arguments: Invalid parameter -dash_cli',
+ expected_msg='Error: Error parsing command line arguments: Invalid parameter -dash_cli=1',
extra_args=['-dash_cli=1'],
)
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
@@ -39,7 +39,7 @@ class ConfArgsTest(BitcoinTestFramework):
if self.is_wallet_compiled():
with open(inc_conf_file_path, 'w', encoding='utf8') as conf:
conf.write("wallet=foo\n")
- self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Config setting for -wallet only applied on regtest network when in [regtest] section.')
+ self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Config setting for -wallet only applied on %s network when in [%s] section.' % (self.chain, self.chain))
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('regtest=0\n') # mainnet
@@ -79,14 +79,44 @@ class ConfArgsTest(BitcoinTestFramework):
conf.write('') # clear
def test_log_buffer(self):
- with self.nodes[0].assert_debug_log(expected_msgs=['Warning: parsed potentially confusing double-negative -connect=0']):
+ with self.nodes[0].assert_debug_log(expected_msgs=['Warning: parsed potentially confusing double-negative -connect=0\n']):
self.start_node(0, extra_args=['-noconnect=0'])
self.stop_node(0)
+ def test_args_log(self):
+ self.log.info('Test config args logging')
+ with self.nodes[0].assert_debug_log(
+ expected_msgs=[
+ 'Command-line arg: addnode="some.node"',
+ 'Command-line arg: rpcauth=****',
+ 'Command-line arg: rpcbind=****',
+ 'Command-line arg: rpcpassword=****',
+ 'Command-line arg: rpcuser=****',
+ 'Command-line arg: torpassword=****',
+ 'Config file arg: %s="1"' % self.chain,
+ 'Config file arg: [%s] server="1"' % self.chain,
+ ],
+ unexpected_msgs=[
+ 'alice:f7efda5c189b999524f151318c0c86$d5b51b3beffbc0',
+ '127.1.1.1',
+ 'secret-rpcuser',
+ 'secret-torpassword',
+ ]):
+ self.start_node(0, extra_args=[
+ '-addnode=some.node',
+ '-rpcauth=alice:f7efda5c189b999524f151318c0c86$d5b51b3beffbc0',
+ '-rpcbind=127.1.1.1',
+ '-rpcpassword=',
+ '-rpcuser=secret-rpcuser',
+ '-torpassword=secret-torpassword',
+ ])
+ self.stop_node(0)
+
def run_test(self):
self.stop_node(0)
self.test_log_buffer()
+ self.test_args_log()
self.test_config_file_parser()
@@ -104,7 +134,7 @@ class ConfArgsTest(BitcoinTestFramework):
# Check that using non-existent datadir in conf file fails
conf_file = os.path.join(default_data_dir, "bitcoin.conf")
- # datadir needs to be set before [regtest] section
+ # datadir needs to be set before [chain] section
conf_file_contents = open(conf_file, encoding='utf8').read()
with open(conf_file, 'w', encoding='utf8') as f:
f.write("datadir=" + new_data_dir + "\n")
@@ -116,17 +146,17 @@ class ConfArgsTest(BitcoinTestFramework):
os.mkdir(new_data_dir)
self.start_node(0, ['-conf='+conf_file, '-wallet=w1'])
self.stop_node(0)
- assert os.path.exists(os.path.join(new_data_dir, 'regtest', 'blocks'))
+ assert os.path.exists(os.path.join(new_data_dir, self.chain, 'blocks'))
if self.is_wallet_compiled():
- assert os.path.exists(os.path.join(new_data_dir, 'regtest', 'wallets', 'w1'))
+ assert os.path.exists(os.path.join(new_data_dir, self.chain, 'wallets', 'w1'))
# Ensure command line argument overrides datadir in conf
os.mkdir(new_data_dir_2)
self.nodes[0].datadir = new_data_dir_2
self.start_node(0, ['-datadir='+new_data_dir_2, '-conf='+conf_file, '-wallet=w2'])
- assert os.path.exists(os.path.join(new_data_dir_2, 'regtest', 'blocks'))
+ assert os.path.exists(os.path.join(new_data_dir_2, self.chain, 'blocks'))
if self.is_wallet_compiled():
- assert os.path.exists(os.path.join(new_data_dir_2, 'regtest', 'wallets', 'w2'))
+ assert os.path.exists(os.path.join(new_data_dir_2, self.chain, 'wallets', 'w2'))
if __name__ == '__main__':
diff --git a/test/functional/feature_csv_activation.py b/test/functional/feature_csv_activation.py
index c2b4de54f2..a98480a6dd 100755
--- a/test/functional/feature_csv_activation.py
+++ b/test/functional/feature_csv_activation.py
@@ -35,6 +35,7 @@ bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evalu
bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112tx_special - test negative argument to OP_CSV
+bip112tx_emptystack - test empty stack (= no argument) OP_CSV
"""
from decimal import Decimal
from itertools import product
@@ -56,6 +57,8 @@ from test_framework.util import (
softfork_active,
)
+TESTING_TX_COUNT = 83 # Number of testing transactions: 1 BIP113 tx, 16 BIP68 txs, 66 BIP112 txs (see comments above)
+COINBASE_BLOCK_COUNT = TESTING_TX_COUNT # Number of coinbase blocks we need to generate as inputs for our txs
BASE_RELATIVE_LOCKTIME = 10
CSV_ACTIVATION_HEIGHT = 432
SEQ_DISABLE_FLAG = 1 << 31
@@ -95,6 +98,13 @@ def create_bip112special(node, input, txversion, address):
signtx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
return signtx
+def create_bip112emptystack(node, input, txversion, address):
+ tx = create_transaction(node, input, address, amount=Decimal("49.98"))
+ tx.nVersion = txversion
+ signtx = sign_transaction(node, tx)
+ signtx.vin[0].scriptSig = CScript([OP_CHECKSEQUENCEVERIFY] + list(CScript(signtx.vin[0].scriptSig)))
+ return signtx
+
def send_generic_input_tx(node, coinbases, address):
return node.sendrawtransaction(ToHex(sign_transaction(node, create_transaction(node, node.getblock(coinbases.pop())['tx'][0], address, amount=Decimal("49.99")))))
@@ -138,7 +148,12 @@ class BIP68_112_113Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
- self.extra_args = [['-whitelist=127.0.0.1', '-blockversion=4', '-addresstype=legacy']]
+ self.extra_args = [[
+ '-whitelist=noban@127.0.0.1',
+ '-blockversion=4',
+ '-addresstype=legacy',
+ '-par=1', # Use only one script thread to get the exact reject reason for testing
+ ]]
self.supports_cli = False
def skip_test_if_missing_module(self):
@@ -163,11 +178,11 @@ class BIP68_112_113Test(BitcoinTestFramework):
block.solve()
return block
- def send_blocks(self, blocks, success=True):
+ def send_blocks(self, blocks, success=True, reject_reason=None):
"""Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block.
Call with success = False if the tip shouldn't advance to the most recent block."""
- self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success)
+ self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_reason=reject_reason)
def run_test(self):
self.nodes[0].add_p2p_connection(P2PDataStore())
@@ -175,15 +190,16 @@ class BIP68_112_113Test(BitcoinTestFramework):
self.log.info("Generate blocks in the past for coinbase outputs.")
long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future
self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time
- self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2 * 32 + 1) # 82 blocks generated for inputs
+ self.coinbase_blocks = self.nodes[0].generate(COINBASE_BLOCK_COUNT) # blocks generated for inputs
self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time
- self.tipheight = 82 # height of the next block to build
+ self.tipheight = COINBASE_BLOCK_COUNT # height of the next block to build
self.last_block_time = long_past_time
self.tip = int(self.nodes[0].getbestblockhash(), 16)
self.nodeaddress = self.nodes[0].getnewaddress()
# Activation height is hardcoded
- test_blocks = self.generate_blocks(345)
+ # We advance to block height five below BIP112 activation for the following tests
+ test_blocks = self.generate_blocks(CSV_ACTIVATION_HEIGHT-5 - COINBASE_BLOCK_COUNT)
self.send_blocks(test_blocks)
assert not softfork_active(self.nodes[0], 'csv')
@@ -214,6 +230,8 @@ class BIP68_112_113Test(BitcoinTestFramework):
# 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112specialinput = send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)
+ # 1 special input with (empty stack) OP_CSV (actually will be prepended to spending scriptSig)
+ bip112emptystackinput = send_generic_input_tx(self.nodes[0],self.coinbase_blocks, self.nodeaddress)
# 1 normal input
bip113input = send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)
@@ -224,7 +242,7 @@ class BIP68_112_113Test(BitcoinTestFramework):
self.tip = int(inputblockhash, 16)
self.tipheight += 1
self.last_block_time += 600
- assert_equal(len(self.nodes[0].getblock(inputblockhash, True)["tx"]), 82 + 1)
+ assert_equal(len(self.nodes[0].getblock(inputblockhash, True)["tx"]), TESTING_TX_COUNT + 1)
# 2 more version 4 blocks
test_blocks = self.generate_blocks(2)
@@ -263,6 +281,9 @@ class BIP68_112_113Test(BitcoinTestFramework):
# -1 OP_CSV OP_DROP input
bip112tx_special_v1 = create_bip112special(self.nodes[0], bip112specialinput, 1, self.nodeaddress)
bip112tx_special_v2 = create_bip112special(self.nodes[0], bip112specialinput, 2, self.nodeaddress)
+ # (empty stack) OP_CSV input
+ bip112tx_emptystack_v1 = create_bip112emptystack(self.nodes[0], bip112emptystackinput, 1, self.nodeaddress)
+ bip112tx_emptystack_v2 = create_bip112emptystack(self.nodes[0], bip112emptystackinput, 2, self.nodeaddress)
self.log.info("TESTING")
@@ -270,11 +291,12 @@ class BIP68_112_113Test(BitcoinTestFramework):
self.log.info("Test version 1 txs")
success_txs = []
- # add BIP113 tx and -1 CSV tx
+ # BIP113 tx, -1 CSV tx and empty stack CSV tx should succeed
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1)
success_txs.append(bip113signed1)
success_txs.append(bip112tx_special_v1)
+ success_txs.append(bip112tx_emptystack_v1)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v1))
# add BIP 112 with seq=10 txs
@@ -289,11 +311,12 @@ class BIP68_112_113Test(BitcoinTestFramework):
self.log.info("Test version 2 txs")
success_txs = []
- # add BIP113 tx and -1 CSV tx
+ # BIP113 tx, -1 CSV tx and empty stack CSV tx should succeed
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2)
success_txs.append(bip113signed2)
success_txs.append(bip112tx_special_v2)
+ success_txs.append(bip112tx_emptystack_v2)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v2))
# add BIP 112 with seq=10 txs
@@ -320,7 +343,7 @@ class BIP68_112_113Test(BitcoinTestFramework):
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
- self.send_blocks([self.create_test_block([bip113tx])], success=False)
+ self.send_blocks([self.create_test_block([bip113tx])], success=False, reject_reason='bad-txns-nonfinal')
# BIP 113 tests should now pass if the locktime is < MTP
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1)
@@ -352,11 +375,11 @@ class BIP68_112_113Test(BitcoinTestFramework):
# All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512
bip68timetxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and tx['stf']]
for tx in bip68timetxs:
- self.send_blocks([self.create_test_block([tx])], success=False)
+ self.send_blocks([self.create_test_block([tx])], success=False, reject_reason='bad-txns-nonfinal')
bip68heighttxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and not tx['stf']]
for tx in bip68heighttxs:
- self.send_blocks([self.create_test_block([tx])], success=False)
+ self.send_blocks([self.create_test_block([tx])], success=False, reject_reason='bad-txns-nonfinal')
# Advance one block to 438
test_blocks = self.generate_blocks(1)
@@ -367,7 +390,7 @@ class BIP68_112_113Test(BitcoinTestFramework):
self.send_blocks([self.create_test_block(bip68success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
for tx in bip68heighttxs:
- self.send_blocks([self.create_test_block([tx])], success=False)
+ self.send_blocks([self.create_test_block([tx])], success=False, reject_reason='bad-txns-nonfinal')
# Advance one block to 439
test_blocks = self.generate_blocks(1)
@@ -381,8 +404,11 @@ class BIP68_112_113Test(BitcoinTestFramework):
self.log.info("BIP 112 tests")
self.log.info("Test version 1 txs")
- # -1 OP_CSV tx should fail
- self.send_blocks([self.create_test_block([bip112tx_special_v1])], success=False)
+ # -1 OP_CSV tx and (empty stack) OP_CSV tx should fail
+ self.send_blocks([self.create_test_block([bip112tx_special_v1])], success=False,
+ reject_reason='non-mandatory-script-verify-flag (Negative locktime)')
+ self.send_blocks([self.create_test_block([bip112tx_emptystack_v1])], success=False,
+ reject_reason='non-mandatory-script-verify-flag (Operation not valid with the current stack size)')
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass
success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v1 if tx['sdf']]
@@ -393,15 +419,19 @@ class BIP68_112_113Test(BitcoinTestFramework):
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail
fail_txs = all_rlt_txs(bip112txs_vary_nSequence_v1)
fail_txs += all_rlt_txs(bip112txs_vary_nSequence_9_v1)
- fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']]
+ fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v1 if not tx['sdf']]
fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']]
for tx in fail_txs:
- self.send_blocks([self.create_test_block([tx])], success=False)
+ self.send_blocks([self.create_test_block([tx])], success=False,
+ reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)')
self.log.info("Test version 2 txs")
- # -1 OP_CSV tx should fail
- self.send_blocks([self.create_test_block([bip112tx_special_v2])], success=False)
+ # -1 OP_CSV tx and (empty stack) OP_CSV tx should fail
+ self.send_blocks([self.create_test_block([bip112tx_special_v2])], success=False,
+ reject_reason='non-mandatory-script-verify-flag (Negative locktime)')
+ self.send_blocks([self.create_test_block([bip112tx_emptystack_v2])], success=False,
+ reject_reason='non-mandatory-script-verify-flag (Operation not valid with the current stack size)')
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met)
success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if tx['sdf']]
@@ -416,18 +446,21 @@ class BIP68_112_113Test(BitcoinTestFramework):
fail_txs = all_rlt_txs(bip112txs_vary_nSequence_9_v2)
fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if not tx['sdf']]
for tx in fail_txs:
- self.send_blocks([self.create_test_block([tx])], success=False)
+ self.send_blocks([self.create_test_block([tx])], success=False,
+ reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)')
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail
fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if tx['sdf']]
for tx in fail_txs:
- self.send_blocks([self.create_test_block([tx])], success=False)
+ self.send_blocks([self.create_test_block([tx])], success=False,
+ reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)')
# If sequencelock types mismatch, tx should fail
fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and tx['stf']]
fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']]
for tx in fail_txs:
- self.send_blocks([self.create_test_block([tx])], success=False)
+ self.send_blocks([self.create_test_block([tx])], success=False,
+ reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)')
# Remaining txs should pass, just test masking works properly
success_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and not tx['stf']]
@@ -445,7 +478,5 @@ class BIP68_112_113Test(BitcoinTestFramework):
self.send_blocks([self.create_test_block(time_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
- # TODO: Test empty stack fails
-
if __name__ == '__main__':
BIP68_112_113Test().main()
diff --git a/test/functional/feature_dersig.py b/test/functional/feature_dersig.py
index 27da49cf24..9628945a80 100755
--- a/test/functional/feature_dersig.py
+++ b/test/functional/feature_dersig.py
@@ -40,7 +40,10 @@ def unDERify(tx):
class BIP66Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
- self.extra_args = [['-whitelist=127.0.0.1', '-par=1']] # Use only one script thread to get the exact log msg for testing
+ self.extra_args = [[
+ '-whitelist=noban@127.0.0.1',
+ '-par=1', # Use only one script thread to get the exact log msg for testing
+ ]]
self.setup_clean_chain = True
self.rpc_timeout = 120
diff --git a/test/functional/feature_fee_estimation.py b/test/functional/feature_fee_estimation.py
index d2d41b1206..1b33724594 100755
--- a/test/functional/feature_fee_estimation.py
+++ b/test/functional/feature_fee_estimation.py
@@ -128,11 +128,11 @@ class EstimateFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
# mine non-standard txs (e.g. txs with "dust" outputs)
- # Force fSendTrickle to true (via whitelist)
+ # Force fSendTrickle to true (via whitelist.noban)
self.extra_args = [
- ["-acceptnonstdtxn", "-whitelist=127.0.0.1"],
- ["-acceptnonstdtxn", "-whitelist=127.0.0.1", "-blockmaxweight=68000"],
- ["-acceptnonstdtxn", "-whitelist=127.0.0.1", "-blockmaxweight=32000"],
+ ["-acceptnonstdtxn", "-whitelist=noban@127.0.0.1"],
+ ["-acceptnonstdtxn", "-whitelist=noban@127.0.0.1", "-blockmaxweight=68000"],
+ ["-acceptnonstdtxn", "-whitelist=noban@127.0.0.1", "-blockmaxweight=32000"],
]
def skip_test_if_missing_module(self):
diff --git a/test/functional/feature_filelock.py b/test/functional/feature_filelock.py
index 93fef737e4..be1c044aa5 100755
--- a/test/functional/feature_filelock.py
+++ b/test/functional/feature_filelock.py
@@ -19,7 +19,7 @@ class FilelockTest(BitcoinTestFramework):
self.nodes[0].wait_for_rpc_connection()
def run_test(self):
- datadir = os.path.join(self.nodes[0].datadir, 'regtest')
+ datadir = os.path.join(self.nodes[0].datadir, self.chain)
self.log.info("Using datadir {}".format(datadir))
self.log.info("Check that we can't start a second bitcoind instance using the same datadir")
diff --git a/test/functional/feature_help.py b/test/functional/feature_help.py
index ed1d25c0d6..e3e2456183 100755
--- a/test/functional/feature_help.py
+++ b/test/functional/feature_help.py
@@ -17,7 +17,7 @@ class HelpTest(BitcoinTestFramework):
# Don't start the node
def get_node_output(self, *, ret_code_expected):
- ret_code = self.nodes[0].process.wait(timeout=5)
+ ret_code = self.nodes[0].process.wait(timeout=60)
assert_equal(ret_code, ret_code_expected)
self.nodes[0].stdout.seek(0)
self.nodes[0].stderr.seek(0)
diff --git a/test/functional/feature_loadblock.py b/test/functional/feature_loadblock.py
index bd4b271ca7..7f1e8f5bae 100755
--- a/test/functional/feature_loadblock.py
+++ b/test/functional/feature_loadblock.py
@@ -38,7 +38,7 @@ class LoadblockTest(BitcoinTestFramework):
cfg_file = os.path.join(data_dir, "linearize.cfg")
bootstrap_file = os.path.join(self.options.tmpdir, "bootstrap.dat")
genesis_block = self.nodes[0].getblockhash(0)
- blocks_dir = os.path.join(data_dir, "regtest", "blocks")
+ blocks_dir = os.path.join(data_dir, self.chain, "blocks")
hash_list = tempfile.NamedTemporaryFile(dir=data_dir,
mode='w',
delete=False,
diff --git a/test/functional/feature_logging.py b/test/functional/feature_logging.py
index 36d6d70fcc..1bae29b302 100755
--- a/test/functional/feature_logging.py
+++ b/test/functional/feature_logging.py
@@ -16,7 +16,7 @@ class LoggingTest(BitcoinTestFramework):
self.setup_clean_chain = True
def relative_log_path(self, name):
- return os.path.join(self.nodes[0].datadir, "regtest", name)
+ return os.path.join(self.nodes[0].datadir, self.chain, name)
def run_test(self):
# test default log file name
diff --git a/test/functional/feature_maxuploadtarget.py b/test/functional/feature_maxuploadtarget.py
index 8d200915bd..974388d798 100755
--- a/test/functional/feature_maxuploadtarget.py
+++ b/test/functional/feature_maxuploadtarget.py
@@ -140,10 +140,9 @@ class MaxUploadTest(BitcoinTestFramework):
self.nodes[0].disconnect_p2ps()
- #stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
- self.log.info("Restarting nodes with -whitelist=127.0.0.1")
+ self.log.info("Restarting node 0 with noban permission and 1MB maxuploadtarget")
self.stop_node(0)
- self.start_node(0, ["-whitelist=127.0.0.1", "-maxuploadtarget=1"])
+ self.start_node(0, ["-whitelist=noban@127.0.0.1", "-maxuploadtarget=1"])
# Reconnect to self.nodes[0]
self.nodes[0].add_p2p_connection(TestP2PConn())
diff --git a/test/functional/feature_notifications.py b/test/functional/feature_notifications.py
index da00b773ad..b110a559c0 100755
--- a/test/functional/feature_notifications.py
+++ b/test/functional/feature_notifications.py
@@ -13,6 +13,16 @@ from test_framework.util import (
connect_nodes,
)
+# Linux allow all characters other than \x00
+# Windows disallow control characters (0-31) and /\?%:|"<>
+FILE_CHAR_START = 32 if os.name == 'nt' else 1
+FILE_CHAR_END = 128
+FILE_CHAR_BLACKLIST = '/\\?%*:|"<>' if os.name == 'nt' else '/'
+
+
+def notify_outputname(walletname, txid):
+ return txid if os.name == 'nt' else '{}_{}'.format(walletname, txid)
+
class NotificationsTest(BitcoinTestFramework):
def set_test_params(self):
@@ -20,6 +30,7 @@ class NotificationsTest(BitcoinTestFramework):
self.setup_clean_chain = True
def setup_network(self):
+ self.wallet = ''.join(chr(i) for i in range(FILE_CHAR_START, FILE_CHAR_END) if chr(i) not in FILE_CHAR_BLACKLIST)
self.alertnotify_dir = os.path.join(self.options.tmpdir, "alertnotify")
self.blocknotify_dir = os.path.join(self.options.tmpdir, "blocknotify")
self.walletnotify_dir = os.path.join(self.options.tmpdir, "walletnotify")
@@ -33,7 +44,8 @@ class NotificationsTest(BitcoinTestFramework):
"-blocknotify=echo > {}".format(os.path.join(self.blocknotify_dir, '%s'))],
["-blockversion=211",
"-rescan",
- "-walletnotify=echo > {}".format(os.path.join(self.walletnotify_dir, '%s'))]]
+ "-wallet={}".format(self.wallet),
+ "-walletnotify=echo > {}".format(os.path.join(self.walletnotify_dir, notify_outputname('%w', '%s')))]]
super().setup_network()
def run_test(self):
@@ -53,7 +65,7 @@ class NotificationsTest(BitcoinTestFramework):
wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10)
# directory content should equal the generated transaction hashes
- txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count)))
+ txids_rpc = list(map(lambda t: notify_outputname(self.wallet, t['txid']), self.nodes[1].listtransactions("*", block_count)))
assert_equal(sorted(txids_rpc), sorted(os.listdir(self.walletnotify_dir)))
self.stop_node(1)
for tx_file in os.listdir(self.walletnotify_dir):
@@ -67,7 +79,7 @@ class NotificationsTest(BitcoinTestFramework):
wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10)
# directory content should equal the generated transaction hashes
- txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count)))
+ txids_rpc = list(map(lambda t: notify_outputname(self.wallet, t['txid']), self.nodes[1].listtransactions("*", block_count)))
assert_equal(sorted(txids_rpc), sorted(os.listdir(self.walletnotify_dir)))
# TODO: add test for `-alertnotify` large fork notifications
diff --git a/test/functional/feature_nulldummy.py b/test/functional/feature_nulldummy.py
index aaf56a42d0..9c92ee7f90 100755
--- a/test/functional/feature_nulldummy.py
+++ b/test/functional/feature_nulldummy.py
@@ -41,7 +41,10 @@ class NULLDUMMYTest(BitcoinTestFramework):
self.setup_clean_chain = True
# This script tests NULLDUMMY activation, which is part of the 'segwit' deployment, so we go through
# normal segwit activation here (and don't use the default always-on behaviour).
- self.extra_args = [['-whitelist=127.0.0.1', '-segwitheight=432', '-addresstype=legacy']]
+ self.extra_args = [[
+ '-segwitheight=432',
+ '-addresstype=legacy',
+ ]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py
index e1e0f00530..21b6b299f6 100755
--- a/test/functional/feature_pruning.py
+++ b/test/functional/feature_pruning.py
@@ -101,7 +101,7 @@ class PruneTest(BitcoinTestFramework):
def setup_network(self):
self.setup_nodes()
- self.prunedir = os.path.join(self.nodes[2].datadir, 'regtest', 'blocks', '')
+ self.prunedir = os.path.join(self.nodes[2].datadir, self.chain, 'blocks', '')
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
@@ -279,7 +279,7 @@ class PruneTest(BitcoinTestFramework):
assert_equal(ret, node.getblockchaininfo()['pruneheight'])
def has_block(index):
- return os.path.isfile(os.path.join(self.nodes[node_number].datadir, "regtest", "blocks", "blk{:05}.dat".format(index)))
+ return os.path.isfile(os.path.join(self.nodes[node_number].datadir, self.chain, "blocks", "blk{:05}.dat".format(index)))
# should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000)
assert_raises_rpc_error(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500))
diff --git a/test/functional/feature_segwit.py b/test/functional/feature_segwit.py
index 82c7e55245..909a43c8d9 100755
--- a/test/functional/feature_segwit.py
+++ b/test/functional/feature_segwit.py
@@ -72,6 +72,7 @@ class SegWitTest(BitcoinTestFramework):
"-addresstype=legacy",
],
]
+ self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
diff --git a/test/functional/interface_rpc.py b/test/functional/interface_rpc.py
index e460db39f8..3b3b5bb0c1 100755
--- a/test/functional/interface_rpc.py
+++ b/test/functional/interface_rpc.py
@@ -33,7 +33,7 @@ class RPCInterfaceTest(BitcoinTestFramework):
command = info['active_commands'][0]
assert_equal(command['method'], 'getrpcinfo')
assert_greater_than_or_equal(command['duration'], 0)
- assert_equal(info['logpath'], os.path.join(self.nodes[0].datadir, 'regtest', 'debug.log'))
+ assert_equal(info['logpath'], os.path.join(self.nodes[0].datadir, self.chain, 'debug.log'))
def test_batch_request(self):
self.log.info("Testing basic JSON-RPC batch request...")
diff --git a/test/functional/interface_zmq.py b/test/functional/interface_zmq.py
index 5aea10fbce..89c55f31f3 100755
--- a/test/functional/interface_zmq.py
+++ b/test/functional/interface_zmq.py
@@ -59,6 +59,10 @@ class ZMQTest (BitcoinTestFramework):
# Note that the publishing order is not defined in the documentation and
# is subject to change.
import zmq
+
+ # Invalid zmq arguments don't take down the node, see #17185.
+ self.restart_node(0, ["-zmqpubrawtx=foo", "-zmqpubhashtx=bar"])
+
address = 'tcp://127.0.0.1:28332'
socket = self.ctx.socket(zmq.SUB)
socket.set(zmq.RCVTIMEO, 60000)
diff --git a/test/functional/mempool_accept.py b/test/functional/mempool_accept.py
index 9cb6b03ee0..900cabccda 100755
--- a/test/functional/mempool_accept.py
+++ b/test/functional/mempool_accept.py
@@ -8,6 +8,7 @@ from io import BytesIO
import math
from test_framework.test_framework import BitcoinTestFramework
+from test_framework.key import ECKey
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
COIN,
@@ -20,6 +21,9 @@ from test_framework.script import (
hash160,
CScript,
OP_0,
+ OP_2,
+ OP_3,
+ OP_CHECKMULTISIG,
OP_EQUAL,
OP_HASH160,
OP_RETURN,
@@ -35,7 +39,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[
- '-txindex',
+ '-txindex','-permitbaremultisig=0',
]] * self.num_nodes
self.supports_cli = False
@@ -262,6 +266,15 @@ class MempoolAcceptanceTest(BitcoinTestFramework):
rawtxs=[tx.serialize().hex()],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
+ key = ECKey()
+ key.generate()
+ pubkey = key.get_pubkey().get_bytes()
+ tx.vout[0].scriptPubKey = CScript([OP_2, pubkey, pubkey, pubkey, OP_3, OP_CHECKMULTISIG]) # Some bare multisig script (2-of-3)
+ self.check_mempool_result(
+ result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bare-multisig'}],
+ rawtxs=[tx.serialize().hex()],
+ )
+ tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin[0].scriptSig = CScript([OP_HASH160]) # Some not-pushonly scriptSig
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'scriptsig-not-pushonly'}],
diff --git a/test/functional/mempool_expiry.py b/test/functional/mempool_expiry.py
new file mode 100755
index 0000000000..8b9b7b155a
--- /dev/null
+++ b/test/functional/mempool_expiry.py
@@ -0,0 +1,100 @@
+#!/usr/bin/env python3
+# Copyright (c) 2020 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Tests that a mempool transaction expires after a given timeout and that its
+children are removed as well.
+
+Both the default expiry timeout defined by DEFAULT_MEMPOOL_EXPIRY and a user
+definable expiry timeout via the '-mempoolexpiry=<n>' command line argument
+(<n> is the timeout in hours) are tested.
+"""
+
+from datetime import timedelta
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_equal,
+ assert_raises_rpc_error,
+ find_vout_for_address,
+)
+
+DEFAULT_MEMPOOL_EXPIRY = 336 # hours
+CUSTOM_MEMPOOL_EXPIRY = 10 # hours
+
+
+class MempoolExpiryTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.num_nodes = 1
+
+ def skip_test_if_missing_module(self):
+ self.skip_if_no_wallet()
+
+ def test_transaction_expiry(self, timeout):
+ """Tests that a transaction expires after the expiry timeout and its
+ children are removed as well."""
+ node = self.nodes[0]
+
+ # Send a parent transaction that will expire.
+ parent_address = node.getnewaddress()
+ parent_txid = node.sendtoaddress(parent_address, 1.0)
+
+ # Set the mocktime to the arrival time of the parent transaction.
+ entry_time = node.getmempoolentry(parent_txid)['time']
+ node.setmocktime(entry_time)
+
+ # Create child transaction spending the parent transaction
+ vout = find_vout_for_address(node, parent_txid, parent_address)
+ inputs = [{'txid': parent_txid, 'vout': vout}]
+ outputs = {node.getnewaddress(): 0.99}
+ child_raw = node.createrawtransaction(inputs, outputs)
+ child_signed = node.signrawtransactionwithwallet(child_raw)['hex']
+
+ # Let half of the timeout elapse and broadcast the child transaction.
+ half_expiry_time = entry_time + int(60 * 60 * timeout/2)
+ node.setmocktime(half_expiry_time)
+ child_txid = node.sendrawtransaction(child_signed)
+ self.log.info('Broadcast child transaction after {} hours.'.format(
+ timedelta(seconds=(half_expiry_time-entry_time))))
+
+ # Let most of the timeout elapse and check that the parent tx is still
+ # in the mempool.
+ nearly_expiry_time = entry_time + 60 * 60 * timeout - 5
+ node.setmocktime(nearly_expiry_time)
+ # Expiry of mempool transactions is only checked when a new transaction
+ # is added to the to the mempool.
+ node.sendtoaddress(node.getnewaddress(), 1.0)
+ self.log.info('Test parent tx not expired after {} hours.'.format(
+ timedelta(seconds=(nearly_expiry_time-entry_time))))
+ assert_equal(entry_time, node.getmempoolentry(parent_txid)['time'])
+
+ # Transaction should be evicted from the mempool after the expiry time
+ # has passed.
+ expiry_time = entry_time + 60 * 60 * timeout + 5
+ node.setmocktime(expiry_time)
+ # Expiry of mempool transactions is only checked when a new transaction
+ # is added to the to the mempool.
+ node.sendtoaddress(node.getnewaddress(), 1.0)
+ self.log.info('Test parent tx expiry after {} hours.'.format(
+ timedelta(seconds=(expiry_time-entry_time))))
+ assert_raises_rpc_error(-5, 'Transaction not in mempool',
+ node.getmempoolentry, parent_txid)
+
+ # The child transaction should be removed from the mempool as well.
+ self.log.info('Test child tx is evicted as well.')
+ assert_raises_rpc_error(-5, 'Transaction not in mempool',
+ node.getmempoolentry, child_txid)
+
+ def run_test(self):
+ self.log.info('Test default mempool expiry timeout of %d hours.' %
+ DEFAULT_MEMPOOL_EXPIRY)
+ self.test_transaction_expiry(DEFAULT_MEMPOOL_EXPIRY)
+
+ self.log.info('Test custom mempool expiry timeout of %d hours.' %
+ CUSTOM_MEMPOOL_EXPIRY)
+ self.restart_node(0, ['-mempoolexpiry=%d' % CUSTOM_MEMPOOL_EXPIRY])
+ self.test_transaction_expiry(CUSTOM_MEMPOOL_EXPIRY)
+
+
+if __name__ == '__main__':
+ MempoolExpiryTest().main()
diff --git a/test/functional/mempool_packages.py b/test/functional/mempool_packages.py
index 7014105d88..a07dad18d6 100755
--- a/test/functional/mempool_packages.py
+++ b/test/functional/mempool_packages.py
@@ -12,6 +12,7 @@ from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
satoshi_round,
+ wait_until,
)
# default limits
@@ -19,13 +20,22 @@ MAX_ANCESTORS = 25
MAX_DESCENDANTS = 25
# custom limits for node1
MAX_ANCESTORS_CUSTOM = 5
+MAX_DESCENDANTS_CUSTOM = 10
+assert MAX_DESCENDANTS_CUSTOM >= MAX_ANCESTORS_CUSTOM
class MempoolPackagesTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [
- ["-maxorphantx=1000"],
- ["-maxorphantx=1000", "-limitancestorcount={}".format(MAX_ANCESTORS_CUSTOM)],
+ [
+ "-maxorphantx=1000",
+ "-whitelist=noban@127.0.0.1", # immediate tx relay
+ ],
+ [
+ "-maxorphantx=1000",
+ "-limitancestorcount={}".format(MAX_ANCESTORS_CUSTOM),
+ "-limitdescendantcount={}".format(MAX_DESCENDANTS_CUSTOM),
+ ],
]
def skip_test_if_missing_module(self):
@@ -219,9 +229,11 @@ class MempoolPackagesTest(BitcoinTestFramework):
transaction_package.append({'txid': txid, 'vout': i, 'amount': sent_value})
# Sign and send up to MAX_DESCENDANT transactions chained off the parent tx
+ chain = [] # save sent txs for the purpose of checking node1's mempool later (see below)
for i in range(MAX_DESCENDANTS - 1):
utxo = transaction_package.pop(0)
(txid, sent_value) = self.chain_transaction(self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
+ chain.append(txid)
if utxo['txid'] is parent_transaction:
tx_children.append(txid)
for j in range(10):
@@ -238,7 +250,21 @@ class MempoolPackagesTest(BitcoinTestFramework):
utxo = transaction_package.pop(0)
assert_raises_rpc_error(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
- # TODO: check that node1's mempool is as expected
+ # Check that node1's mempool is as expected, containing:
+ # - txs from previous ancestor test (-> custom ancestor limit)
+ # - parent tx for descendant test
+ # - txs chained off parent tx (-> custom descendant limit)
+ wait_until(lambda: len(self.nodes[1].getrawmempool(False)) ==
+ MAX_ANCESTORS_CUSTOM + 1 + MAX_DESCENDANTS_CUSTOM, timeout=10)
+ mempool0 = self.nodes[0].getrawmempool(False)
+ mempool1 = self.nodes[1].getrawmempool(False)
+ assert set(mempool1).issubset(set(mempool0))
+ assert parent_transaction in mempool1
+ for tx in chain[:MAX_DESCENDANTS_CUSTOM]:
+ assert tx in mempool1
+ for tx in chain[MAX_DESCENDANTS_CUSTOM:]:
+ assert tx not in mempool1
+ # TODO: more detailed check of node1's mempool (fees etc.)
# TODO: test descendant size limits
diff --git a/test/functional/mempool_persist.py b/test/functional/mempool_persist.py
index ef84de5a14..d3690b245e 100755
--- a/test/functional/mempool_persist.py
+++ b/test/functional/mempool_persist.py
@@ -117,8 +117,8 @@ class MempoolPersistTest(BitcoinTestFramework):
wait_until(lambda: self.nodes[0].getmempoolinfo()["loaded"])
assert_equal(len(self.nodes[0].getrawmempool()), 5)
- mempooldat0 = os.path.join(self.nodes[0].datadir, 'regtest', 'mempool.dat')
- mempooldat1 = os.path.join(self.nodes[1].datadir, 'regtest', 'mempool.dat')
+ mempooldat0 = os.path.join(self.nodes[0].datadir, self.chain, 'mempool.dat')
+ mempooldat1 = os.path.join(self.nodes[1].datadir, self.chain, 'mempool.dat')
self.log.debug("Remove the mempool.dat file. Verify that savemempool to disk via RPC re-creates it")
os.remove(mempooldat0)
self.nodes[0].savemempool()
diff --git a/test/functional/mempool_reorg.py b/test/functional/mempool_reorg.py
index 123f0b4c28..3b148d5cf0 100755
--- a/test/functional/mempool_reorg.py
+++ b/test/functional/mempool_reorg.py
@@ -76,7 +76,7 @@ class MempoolCoinbaseTest(BitcoinTestFramework):
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
- self.sync_all()
+ self.sync_all(timeout=360)
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, timelock_tx_id})
@@ -91,7 +91,7 @@ class MempoolCoinbaseTest(BitcoinTestFramework):
for node in self.nodes:
node.invalidateblock(new_blocks[0])
- self.sync_all()
+ self.sync_all(timeout=360)
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
diff --git a/test/functional/mining_basic.py b/test/functional/mining_basic.py
index bfeaa76c74..8262e30592 100755
--- a/test/functional/mining_basic.py
+++ b/test/functional/mining_basic.py
@@ -70,7 +70,7 @@ class MiningTest(BitcoinTestFramework):
self.log.info('getmininginfo')
mining_info = node.getmininginfo()
assert_equal(mining_info['blocks'], 200)
- assert_equal(mining_info['chain'], 'regtest')
+ assert_equal(mining_info['chain'], self.chain)
assert 'currentblocktx' not in mining_info
assert 'currentblockweight' not in mining_info
assert_equal(mining_info['difficulty'], Decimal('4.656542373906925E-10'))
diff --git a/test/functional/p2p_invalid_block.py b/test/functional/p2p_invalid_block.py
index 905534b862..801407757f 100755
--- a/test/functional/p2p_invalid_block.py
+++ b/test/functional/p2p_invalid_block.py
@@ -22,7 +22,7 @@ class InvalidBlockRequestTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
- self.extra_args = [["-whitelist=127.0.0.1"]]
+ self.extra_args = [["-whitelist=noban@127.0.0.1"]]
def run_test(self):
# Add p2p connection to node0
diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py
index 20864881c1..9876d749ff 100755
--- a/test/functional/p2p_invalid_messages.py
+++ b/test/functional/p2p_invalid_messages.py
@@ -78,7 +78,7 @@ class InvalidMessagesTest(BitcoinTestFramework):
# Peer 1, despite serving up a bunch of nonsense, should still be connected.
self.log.info("Waiting for node to drop junk messages.")
- node.p2p.sync_with_ping(timeout=320)
+ node.p2p.sync_with_ping(timeout=400)
assert node.p2p.is_connected
#
@@ -145,13 +145,13 @@ class InvalidMessagesTest(BitcoinTestFramework):
def test_magic_bytes(self):
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
- def swap_magic_bytes():
+ async def swap_magic_bytes():
conn._on_data = lambda: None # Need to ignore all incoming messages from now, since they come with "invalid" magic bytes
conn.magic_bytes = b'\x00\x11\x22\x32'
# Call .result() to block until the atomic swap is complete, otherwise
# we might run into races later on
- asyncio.run_coroutine_threadsafe(asyncio.coroutine(swap_magic_bytes)(), NetworkThread.network_event_loop).result()
+ asyncio.run_coroutine_threadsafe(swap_magic_bytes(), NetworkThread.network_event_loop).result()
with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: INVALID MESSAGESTART ping']):
conn.send_message(messages.msg_ping(nonce=0xff))
diff --git a/test/functional/p2p_permissions.py b/test/functional/p2p_permissions.py
index 37101d6143..3a7bf4bfc3 100755
--- a/test/functional/p2p_permissions.py
+++ b/test/functional/p2p_permissions.py
@@ -7,21 +7,35 @@
Test that permissions are correctly calculated and applied
"""
+from test_framework.address import ADDRESS_BCRT1_P2WSH_OP_TRUE
+from test_framework.messages import (
+ CTransaction,
+ CTxInWitness,
+ FromHex,
+)
+from test_framework.mininode import P2PDataStore
+from test_framework.script import (
+ CScript,
+ OP_TRUE,
+)
from test_framework.test_node import ErrorMatch
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
p2p_port,
+ wait_until,
)
+
class P2PPermissionsTests(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
- self.extra_args = [[],[]]
def run_test(self):
+ self.check_tx_relay()
+
self.checkpermission(
# default permissions (no specific permissions)
["-whitelist=127.0.0.1"],
@@ -48,9 +62,9 @@ class P2PPermissionsTests(BitcoinTestFramework):
ip_port = "127.0.0.1:{}".format(p2p_port(1))
self.replaceinconfig(1, "bind=127.0.0.1", "whitebind=bloomfilter,forcerelay@" + ip_port)
self.checkpermission(
- ["-whitelist=noban@127.0.0.1" ],
+ ["-whitelist=noban@127.0.0.1"],
# Check parameter interaction forcerelay should activate relay
- ["noban", "bloomfilter", "forcerelay", "relay" ],
+ ["noban", "bloomfilter", "forcerelay", "relay"],
False)
self.replaceinconfig(1, "whitebind=bloomfilter,forcerelay@" + ip_port, "bind=127.0.0.1")
@@ -83,6 +97,51 @@ class P2PPermissionsTests(BitcoinTestFramework):
self.nodes[1].assert_start_raises_init_error(["-whitelist=noban@127.0.0.1:230"], "Invalid netmask specified in", match=ErrorMatch.PARTIAL_REGEX)
self.nodes[1].assert_start_raises_init_error(["-whitebind=noban@127.0.0.1/10"], "Cannot resolve -whitebind address", match=ErrorMatch.PARTIAL_REGEX)
+ def check_tx_relay(self):
+ block_op_true = self.nodes[0].getblock(self.nodes[0].generatetoaddress(100, ADDRESS_BCRT1_P2WSH_OP_TRUE)[0])
+ self.sync_all()
+
+ self.log.debug("Create a connection from a whitelisted wallet that rebroadcasts raw txs")
+ # A python mininode is needed to send the raw transaction directly. If a full node was used, it could only
+ # rebroadcast via the inv-getdata mechanism. However, even for whitelisted connections, a full node would
+ # currently not request a txid that is already in the mempool.
+ self.restart_node(1, extra_args=["-whitelist=forcerelay@127.0.0.1"])
+ p2p_rebroadcast_wallet = self.nodes[1].add_p2p_connection(P2PDataStore())
+
+ self.log.debug("Send a tx from the wallet initially")
+ tx = FromHex(
+ CTransaction(),
+ self.nodes[0].createrawtransaction(
+ inputs=[{
+ 'txid': block_op_true['tx'][0],
+ 'vout': 0,
+ }], outputs=[{
+ ADDRESS_BCRT1_P2WSH_OP_TRUE: 5,
+ }]),
+ )
+ tx.wit.vtxinwit = [CTxInWitness()]
+ tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
+ txid = tx.rehash()
+
+ self.log.debug("Wait until tx is in node[1]'s mempool")
+ p2p_rebroadcast_wallet.send_txs_and_test([tx], self.nodes[1])
+
+ self.log.debug("Check that node[1] will send the tx to node[0] even though it is already in the mempool")
+ connect_nodes(self.nodes[1], 0)
+ with self.nodes[1].assert_debug_log(["Force relaying tx {} from whitelisted peer=0".format(txid)]):
+ p2p_rebroadcast_wallet.send_txs_and_test([tx], self.nodes[1])
+ wait_until(lambda: txid in self.nodes[0].getrawmempool())
+
+ self.log.debug("Check that node[1] will not send an invalid tx to node[0]")
+ tx.vout[0].nValue += 1
+ txid = tx.rehash()
+ p2p_rebroadcast_wallet.send_txs_and_test(
+ [tx],
+ self.nodes[1],
+ success=False,
+ reject_reason='Not relaying non-mempool transaction {} from whitelisted peer=0'.format(txid),
+ )
+
def checkpermission(self, args, expectedPermissions, whitelisted):
self.restart_node(1, args)
connect_nodes(self.nodes[0], 1)
@@ -95,9 +154,10 @@ class P2PPermissionsTests(BitcoinTestFramework):
def replaceinconfig(self, nodeid, old, new):
with open(self.nodes[nodeid].bitcoinconf, encoding="utf8") as f:
- newText=f.read().replace(old, new)
+ newText = f.read().replace(old, new)
with open(self.nodes[nodeid].bitcoinconf, 'w', encoding="utf8") as f:
f.write(newText)
+
if __name__ == '__main__':
P2PPermissionsTests().main()
diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py
index 297fa88fbe..ad5a124680 100755
--- a/test/functional/p2p_segwit.py
+++ b/test/functional/p2p_segwit.py
@@ -31,7 +31,7 @@ from test_framework.messages import (
msg_inv,
msg_tx,
msg_block,
- msg_witness_tx,
+ msg_no_witness_tx,
ser_uint256,
ser_vector,
sha256,
@@ -125,10 +125,11 @@ def test_transaction_acceptance(node, p2p, tx, with_witness, accepted, reason=No
- use the getrawmempool rpc to check for acceptance."""
reason = [reason] if reason else []
with node.assert_debug_log(expected_msgs=reason):
- p2p.send_message(msg_witness_tx(tx) if with_witness else msg_tx(tx))
+ p2p.send_message(msg_tx(tx) if with_witness else msg_no_witness_tx(tx))
p2p.sync_with_ping()
assert_equal(tx.hash in node.getrawmempool(), accepted)
+
def test_witness_block(node, p2p, block, accepted, with_witness=True, reason=None):
"""Send a block to the node and check that it's accepted
@@ -187,9 +188,9 @@ class SegWitTest(BitcoinTestFramework):
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [
- ["-whitelist=127.0.0.1", "-acceptnonstdtxn=1", "-segwitheight={}".format(SEGWIT_HEIGHT)],
- ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0", "-segwitheight={}".format(SEGWIT_HEIGHT)],
- ["-whitelist=127.0.0.1", "-acceptnonstdtxn=1", "-segwitheight=-1"]
+ ["-acceptnonstdtxn=1", "-segwitheight={}".format(SEGWIT_HEIGHT), "-whitelist=noban@127.0.0.1"],
+ ["-acceptnonstdtxn=0", "-segwitheight={}".format(SEGWIT_HEIGHT)],
+ ["-acceptnonstdtxn=1", "-segwitheight=-1"],
]
self.supports_cli = False
@@ -311,9 +312,9 @@ class SegWitTest(BitcoinTestFramework):
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
- assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
+ assert_equal(msg_no_witness_tx(tx).serialize(), msg_tx(tx).serialize())
- self.test_node.send_message(msg_witness_tx(tx))
+ self.test_node.send_message(msg_tx(tx))
self.test_node.sync_with_ping() # make sure the tx was processed
assert tx.hash in self.nodes[0].getrawmempool()
# Save this transaction for later
diff --git a/test/functional/rpc_createmultisig.py b/test/functional/rpc_createmultisig.py
index aa7f12848c..a983716177 100755
--- a/test/functional/rpc_createmultisig.py
+++ b/test/functional/rpc_createmultisig.py
@@ -4,7 +4,7 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multisig RPCs"""
-from test_framework.descriptors import descsum_create
+from test_framework.descriptors import descsum_create, drop_origins
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_raises_rpc_error,
@@ -116,9 +116,20 @@ class RpcCreateMultiSigTest(BitcoinTestFramework):
def do_multisig(self):
node0, node1, node2 = self.nodes
+ # Construct the expected descriptor
+ desc = 'multi({},{})'.format(self.nsigs, ','.join(self.pub))
+ if self.output_type == 'legacy':
+ desc = 'sh({})'.format(desc)
+ elif self.output_type == 'p2sh-segwit':
+ desc = 'sh(wsh({}))'.format(desc)
+ elif self.output_type == 'bech32':
+ desc = 'wsh({})'.format(desc)
+ desc = descsum_create(desc)
+
msig = node2.createmultisig(self.nsigs, self.pub, self.output_type)
madd = msig["address"]
mredeem = msig["redeemScript"]
+ assert_equal(desc, msig['descriptor'])
if self.output_type == 'bech32':
assert madd[0:4] == "bcrt" # actually a bech32 address
@@ -126,6 +137,7 @@ class RpcCreateMultiSigTest(BitcoinTestFramework):
msigw = node1.addmultisigaddress(self.nsigs, self.pub, None, self.output_type)
maddw = msigw["address"]
mredeemw = msigw["redeemScript"]
+ assert_equal(desc, drop_origins(msigw['descriptor']))
# addmultisigiaddress and createmultisig work the same
assert maddw == madd
assert mredeemw == mredeem
diff --git a/test/functional/rpc_dumptxoutset.py b/test/functional/rpc_dumptxoutset.py
index 7527bdfb08..438e7f68e0 100755
--- a/test/functional/rpc_dumptxoutset.py
+++ b/test/functional/rpc_dumptxoutset.py
@@ -25,7 +25,7 @@ class DumptxoutsetTest(BitcoinTestFramework):
FILENAME = 'txoutset.dat'
out = node.dumptxoutset(FILENAME)
- expected_path = Path(node.datadir) / 'regtest' / FILENAME
+ expected_path = Path(node.datadir) / self.chain / FILENAME
assert expected_path.is_file()
diff --git a/test/functional/rpc_fundrawtransaction.py b/test/functional/rpc_fundrawtransaction.py
index 6f1ae0d3ba..c435ef24ce 100755
--- a/test/functional/rpc_fundrawtransaction.py
+++ b/test/functional/rpc_fundrawtransaction.py
@@ -30,7 +30,7 @@ class RawTransactionsTest(BitcoinTestFramework):
self.setup_clean_chain = True
# This test isn't testing tx relay. Set whitelist on the peers for
# instant tx relay.
- self.extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
+ self.extra_args = [['-whitelist=noban@127.0.0.1']] * self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
diff --git a/test/functional/rpc_getaddressinfo_label_deprecation.py b/test/functional/rpc_getaddressinfo_label_deprecation.py
new file mode 100755
index 0000000000..5e739ebede
--- /dev/null
+++ b/test/functional/rpc_getaddressinfo_label_deprecation.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python3
+# Copyright (c) 2020 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""
+Test deprecation of the RPC getaddressinfo `label` field. It has been
+superceded by the `labels` field.
+
+"""
+from test_framework.test_framework import BitcoinTestFramework
+
+class GetAddressInfoLabelDeprecationTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.num_nodes = 2
+ self.setup_clean_chain = False
+ # Start node[0] with -deprecatedrpc=label, and node[1] without.
+ self.extra_args = [["-deprecatedrpc=label"], []]
+
+ def skip_test_if_missing_module(self):
+ self.skip_if_no_wallet()
+
+ def test_label_with_deprecatedrpc_flag(self):
+ self.log.info("Test getaddressinfo label with -deprecatedrpc flag")
+ node = self.nodes[0]
+ address = node.getnewaddress()
+ info = node.getaddressinfo(address)
+ assert "label" in info
+
+ def test_label_without_deprecatedrpc_flag(self):
+ self.log.info("Test getaddressinfo label without -deprecatedrpc flag")
+ node = self.nodes[1]
+ address = node.getnewaddress()
+ info = node.getaddressinfo(address)
+ assert "label" not in info
+
+ def run_test(self):
+ """Test getaddressinfo label with and without -deprecatedrpc flag."""
+ self.test_label_with_deprecatedrpc_flag()
+ self.test_label_without_deprecatedrpc_flag()
+
+
+if __name__ == '__main__':
+ GetAddressInfoLabelDeprecationTest().main()
diff --git a/test/functional/rpc_getaddressinfo_labels_purpose_deprecation.py b/test/functional/rpc_getaddressinfo_labels_purpose_deprecation.py
new file mode 100755
index 0000000000..3f2e8dee18
--- /dev/null
+++ b/test/functional/rpc_getaddressinfo_labels_purpose_deprecation.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python3
+# Copyright (c) 2020-2019 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""
+Test deprecation of RPC getaddressinfo `labels` returning an array
+containing a JSON object of `name` and purpose` key-value pairs. It now
+returns an array containing only the label name.
+
+"""
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import assert_equal
+
+LABELS_TO_TEST = frozenset({"" , "New 𝅘𝅥𝅯 $<#>&!рыба Label"})
+
+class GetAddressInfoLabelsPurposeDeprecationTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.num_nodes = 2
+ self.setup_clean_chain = False
+ # Start node[0] with -deprecatedrpc=labelspurpose and node[1] without.
+ self.extra_args = [["-deprecatedrpc=labelspurpose"], []]
+
+ def skip_test_if_missing_module(self):
+ self.skip_if_no_wallet()
+
+ def test_labels(self, node_num, label_name, expected_value):
+ node = self.nodes[node_num]
+ address = node.getnewaddress()
+ if label_name != "":
+ node.setlabel(address, label_name)
+ self.log.info(" set label to {}".format(label_name))
+ labels = node.getaddressinfo(address)["labels"]
+ self.log.info(" labels = {}".format(labels))
+ assert_equal(labels, expected_value)
+
+ def run_test(self):
+ """Test getaddressinfo labels with and without -deprecatedrpc flag."""
+ self.log.info("Test getaddressinfo labels with -deprecatedrpc flag")
+ for label in LABELS_TO_TEST:
+ self.test_labels(node_num=0, label_name=label, expected_value=[{"name": label, "purpose": "receive"}])
+
+ self.log.info("Test getaddressinfo labels without -deprecatedrpc flag")
+ for label in LABELS_TO_TEST:
+ self.test_labels(node_num=1, label_name=label, expected_value=[label])
+
+
+if __name__ == '__main__':
+ GetAddressInfoLabelsPurposeDeprecationTest().main()
diff --git a/test/functional/rpc_getdescriptorinfo.py b/test/functional/rpc_getdescriptorinfo.py
new file mode 100755
index 0000000000..977dc805ef
--- /dev/null
+++ b/test/functional/rpc_getdescriptorinfo.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python3
+# Copyright (c) 2019 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test getdescriptorinfo RPC.
+"""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.descriptors import descsum_create
+from test_framework.util import (
+ assert_equal,
+ assert_raises_rpc_error,
+)
+
+
+class DescriptorTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.num_nodes = 1
+ self.extra_args = [["-disablewallet"]]
+
+ def test_desc(self, desc, isrange, issolvable, hasprivatekeys):
+ info = self.nodes[0].getdescriptorinfo(desc)
+ assert_equal(info, self.nodes[0].getdescriptorinfo(descsum_create(desc)))
+ assert_equal(info['descriptor'], descsum_create(desc))
+ assert_equal(info['isrange'], isrange)
+ assert_equal(info['issolvable'], issolvable)
+ assert_equal(info['hasprivatekeys'], hasprivatekeys)
+
+ def run_test(self):
+ assert_raises_rpc_error(-1, 'getdescriptorinfo', self.nodes[0].getdescriptorinfo)
+ assert_raises_rpc_error(-3, 'Expected type string', self.nodes[0].getdescriptorinfo, 1)
+ assert_raises_rpc_error(-5, 'is not a valid descriptor function', self.nodes[0].getdescriptorinfo, '')
+
+ # P2PK output with the specified public key.
+ self.test_desc('pk(0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798)', isrange=False, issolvable=True, hasprivatekeys=False)
+ # P2PKH output with the specified public key.
+ self.test_desc('pkh(02c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5)', isrange=False, issolvable=True, hasprivatekeys=False)
+ # P2WPKH output with the specified public key.
+ self.test_desc('wpkh(02f9308a019258c31049344f85f89d5229b531c845836f99b08601f113bce036f9)', isrange=False, issolvable=True, hasprivatekeys=False)
+ # P2SH-P2WPKH output with the specified public key.
+ self.test_desc('sh(wpkh(03fff97bd5755eeea420453a14355235d382f6472f8568a18b2f057a1460297556))', isrange=False, issolvable=True, hasprivatekeys=False)
+ # Any P2PK, P2PKH, P2WPKH, or P2SH-P2WPKH output with the specified public key.
+ self.test_desc('combo(0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798)', isrange=False, issolvable=True, hasprivatekeys=False)
+ # An (overly complicated) P2SH-P2WSH-P2PKH output with the specified public key.
+ self.test_desc('sh(wsh(pkh(02e493dbf1c10d80f3581e4904930b1404cc6c13900ee0758474fa94abe8c4cd13)))', isrange=False, issolvable=True, hasprivatekeys=False)
+ # A bare *1-of-2* multisig output with keys in the specified order.
+ self.test_desc('multi(1,022f8bde4d1a07209355b4a7250a5c5128e88b84bddc619ab7cba8d569b240efe4,025cbdf0646e5db4eaa398f365f2ea7a0e3d419b7e0330e39ce92bddedcac4f9bc)', isrange=False, issolvable=True, hasprivatekeys=False)
+ # A P2SH *2-of-2* multisig output with keys in the specified order.
+ self.test_desc('sh(multi(2,022f01e5e15cca351daff3843fb70f3c2f0a1bdd05e5af888a67784ef3e10a2a01,03acd484e2f0c7f65309ad178a9f559abde09796974c57e714c35f110dfc27ccbe))', isrange=False, issolvable=True, hasprivatekeys=False)
+ # A P2WSH *2-of-3* multisig output with keys in the specified order.
+ self.test_desc('wsh(multi(2,03a0434d9e47f3c86235477c7b1ae6ae5d3442d49b1943c2b752a68e2a47e247c7,03774ae7f858a9411e5ef4246b70c65aac5649980be5c17891bbec17895da008cb,03d01115d548e7561b15c38f004d734633687cf4419620095bc5b0f47070afe85a))', isrange=False, issolvable=True, hasprivatekeys=False)
+ # A P2SH-P2WSH *1-of-3* multisig output with keys in the specified order.
+ self.test_desc('sh(wsh(multi(1,03f28773c2d975288bc7d1d205c3748651b075fbc6610e58cddeeddf8f19405aa8,03499fdf9e895e719cfd64e67f07d38e3226aa7b63678949e6e49b241a60e823e4,02d7924d4f7d43ea965a465ae3095ff41131e5946f3c85f79e44adbcf8e27e080e)))', isrange=False, issolvable=True, hasprivatekeys=False)
+ # A P2PK output with the public key of the specified xpub.
+ self.test_desc('pk(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B)', isrange=False, issolvable=True, hasprivatekeys=False)
+ # A P2PKH output with child key *1'/2* of the specified xpub.
+ self.test_desc("pkh(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1'/2)", isrange=False, issolvable=True, hasprivatekeys=False)
+ # A set of P2PKH outputs, but additionally specifies that the specified xpub is a child of a master with fingerprint `d34db33f`, and derived using path `44'/0'/0'`.
+ self.test_desc("pkh([d34db33f/44'/0'/0']tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/*)", isrange=True, issolvable=True, hasprivatekeys=False)
+ # A set of *1-of-2* P2WSH multisig outputs where the first multisig key is the *1/0/`i`* child of the first specified xpub and the second multisig key is the *0/0/`i`* child of the second specified xpub, and `i` is any number in a configurable range (`0-1000` by default).
+ self.test_desc("wsh(multi(1,tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/0/*,tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/0/0/*))", isrange=True, issolvable=True, hasprivatekeys=False)
+
+
+if __name__ == '__main__':
+ DescriptorTest().main()
diff --git a/test/functional/rpc_psbt.py b/test/functional/rpc_psbt.py
index 2cc9650cb2..3a63377545 100755
--- a/test/functional/rpc_psbt.py
+++ b/test/functional/rpc_psbt.py
@@ -193,12 +193,20 @@ class PSBTTest(BitcoinTestFramework):
psbt_orig = self.nodes[0].createpsbt([{"txid":txid1, "vout":vout1}, {"txid":txid2, "vout":vout2}], {self.nodes[0].getnewaddress():25.999})
# Update psbts, should only have data for one input and not the other
- psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig)['psbt']
+ psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig, False, "ALL")['psbt']
psbt1_decoded = self.nodes[0].decodepsbt(psbt1)
assert psbt1_decoded['inputs'][0] and not psbt1_decoded['inputs'][1]
- psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig)['psbt']
+ # Check that BIP32 path was added
+ assert "bip32_derivs" in psbt1_decoded['inputs'][0]
+ psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig, False, "ALL", False)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert not psbt2_decoded['inputs'][0] and psbt2_decoded['inputs'][1]
+ # Check that BIP32 paths were not added
+ assert "bip32_derivs" not in psbt2_decoded['inputs'][1]
+
+ # Sign PSBTs (workaround issue #18039)
+ psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig)['psbt']
+ psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig)['psbt']
# Combine, finalize, and send the psbts
combined = self.nodes[0].combinepsbt([psbt1, psbt2])
@@ -231,16 +239,18 @@ class PSBTTest(BitcoinTestFramework):
# Same construction without optional arguments
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}])
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
- for tx_in in decoded_psbt["tx"]["vin"]:
+ for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
+ assert "bip32_derivs" in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], 0)
# Same construction without optional arguments, for a node with -walletrbf=0
unspent1 = self.nodes[1].listunspent()[0]
psbtx_info = self.nodes[1].walletcreatefundedpsbt([{"txid":unspent1["txid"], "vout":unspent1["vout"]}], [{self.nodes[2].getnewaddress():unspent1["amount"]+1}], block_height)
decoded_psbt = self.nodes[1].decodepsbt(psbtx_info["psbt"])
- for tx_in in decoded_psbt["tx"]["vin"]:
+ for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_greater_than(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
+ assert "bip32_derivs" in psbt_in
# Make sure change address wallet does not have P2SH innerscript access to results in success
# when attempting BnB coin selection
@@ -422,5 +432,24 @@ class PSBTTest(BitcoinTestFramework):
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 spends unspendable output')
+ self.log.info("PSBT with invalid values should have error message and Creator as next")
+ analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAfA00BFgAm6tp86RowwH6BMImQNL5zXUcTT97XoLGz0BAAAAAAD/////AgD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XL87QKVAAAAABYAFPck4gF7iL4NL4wtfRAKgQbghiTUAAAAAAABAR8AgIFq49AHABYAFJUDtxf2PHo641HEOBOAIvFMNTr2AAAA')
+ assert_equal(analysis['next'], 'creator')
+ assert_equal(analysis['error'], 'PSBT is not valid. Input 0 has invalid value')
+
+ self.log.info("PSBT with signed, but not finalized, inputs should have Finalizer as next")
+ analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAZYezcxdnbXoQCmrD79t/LzDgtUo9ERqixk8wgioAobrAAAAAAD9////AlDDAAAAAAAAFgAUy/UxxZuzZswcmFnN/E9DGSiHLUsuGPUFAAAAABYAFLsH5o0R38wXx+X2cCosTMCZnQ4baAAAAAABAR8A4fUFAAAAABYAFOBI2h5thf3+Lflb2LGCsVSZwsltIgIC/i4dtVARCRWtROG0HHoGcaVklzJUcwo5homgGkSNAnJHMEQCIGx7zKcMIGr7cEES9BR4Kdt/pzPTK3fKWcGyCJXb7MVnAiALOBgqlMH4GbC1HDh/HmylmO54fyEy4lKde7/BT/PWxwEBAwQBAAAAIgYC/i4dtVARCRWtROG0HHoGcaVklzJUcwo5homgGkSNAnIYDwVpQ1QAAIABAACAAAAAgAAAAAAAAAAAAAAiAgL+CIiB59NSCssOJRGiMYQK1chahgAaaJpIXE41Cyir+xgPBWlDVAAAgAEAAIAAAACAAQAAAAAAAAAA')
+ assert_equal(analysis['next'], 'finalizer')
+
+ analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAfA00BFgAm6tp86RowwH6BMImQNL5zXUcTT97XoLGz0BAAAAAAD/////AgCAgWrj0AcAFgAUKNw0x8HRctAgmvoevm4u1SbN7XL87QKVAAAAABYAFPck4gF7iL4NL4wtfRAKgQbghiTUAAAAAAABAR8A8gUqAQAAABYAFJUDtxf2PHo641HEOBOAIvFMNTr2AAAA')
+ assert_equal(analysis['next'], 'creator')
+ assert_equal(analysis['error'], 'PSBT is not valid. Output amount invalid')
+
+ analysis = self.nodes[0].analyzepsbt('cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==')
+ assert_equal(analysis['next'], 'creator')
+ assert_equal(analysis['error'], 'PSBT is not valid. Input 0 specifies invalid prevout')
+
+ assert_raises_rpc_error(-25, 'Missing inputs', self.nodes[0].walletprocesspsbt, 'cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==')
+
if __name__ == '__main__':
PSBTTest().main()
diff --git a/test/functional/rpc_scantxoutset.py b/test/functional/rpc_scantxoutset.py
index f31e4f43bd..c3d34be0dd 100755
--- a/test/functional/rpc_scantxoutset.py
+++ b/test/functional/rpc_scantxoutset.py
@@ -54,7 +54,7 @@ class ScantxoutsetTest(BitcoinTestFramework):
self.log.info("Stop node, remove wallet, mine again some blocks...")
self.stop_node(0)
- shutil.rmtree(os.path.join(self.nodes[0].datadir, "regtest", 'wallets'))
+ shutil.rmtree(os.path.join(self.nodes[0].datadir, self.chain, 'wallets'))
self.start_node(0)
self.nodes[0].generate(110)
diff --git a/test/functional/rpc_setban.py b/test/functional/rpc_setban.py
index b1d2b6f431..1cc1fb164b 100755
--- a/test/functional/rpc_setban.py
+++ b/test/functional/rpc_setban.py
@@ -26,7 +26,7 @@ class SetBanTests(BitcoinTestFramework):
self.nodes[1].setban("127.0.0.1", "add")
# Node 0 should not be able to reconnect
- with self.nodes[1].assert_debug_log(expected_msgs=['dropped (banned)\n'], timeout=5):
+ with self.nodes[1].assert_debug_log(expected_msgs=['dropped (banned)\n'], timeout=50):
self.restart_node(1, [])
self.nodes[0].addnode("127.0.0.1:" + str(p2p_port(1)), "onetry")
diff --git a/test/functional/test_framework/address.py b/test/functional/test_framework/address.py
index 97585fe054..6a7e91216a 100644
--- a/test/functional/test_framework/address.py
+++ b/test/functional/test_framework/address.py
@@ -13,6 +13,8 @@ from . import segwit_addr
ADDRESS_BCRT1_UNSPENDABLE = 'bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq3xueyj'
ADDRESS_BCRT1_UNSPENDABLE_DESCRIPTOR = 'addr(bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq3xueyj)#juyq9d97'
+# Coins sent to this address can be spent with a witness stack of just OP_TRUE
+ADDRESS_BCRT1_P2WSH_OP_TRUE = 'bcrt1qft5p2uhsdcdc3l2ua4ap5qqfg4pjaqlp250x7us7a8qqhrxrxfsqseac85'
class AddressType(enum.Enum):
diff --git a/test/functional/test_framework/descriptors.py b/test/functional/test_framework/descriptors.py
index 29482ce01e..46b405749b 100644
--- a/test/functional/test_framework/descriptors.py
+++ b/test/functional/test_framework/descriptors.py
@@ -4,6 +4,8 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utility functions related to output descriptors"""
+import re
+
INPUT_CHARSET = "0123456789()[],'/*abcdefgh@:$%{}IJKLMNOPQRSTUVWXYZ&+-.;<=>?!^_|~ijklmnopqrstuvwxyzABCDEFGH`#\"\\ "
CHECKSUM_CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
GENERATOR = [0xf5dee51989, 0xa9fdca3312, 0x1bab10e32d, 0x3706b1677a, 0x644d626ffd]
@@ -53,3 +55,10 @@ def descsum_check(s, require=True):
return False
symbols = descsum_expand(s[:-9]) + [CHECKSUM_CHARSET.find(x) for x in s[-8:]]
return descsum_polymod(symbols) == 1
+
+def drop_origins(s):
+ '''Drop the key origins from a descriptor'''
+ desc = re.sub(r'\[.+?\]', '', s)
+ if '#' in s:
+ desc = desc[:desc.index('#')]
+ return descsum_create(desc)
diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py
index 25520a2151..4f7a9a8b13 100755
--- a/test/functional/test_framework/messages.py
+++ b/test/functional/test_framework/messages.py
@@ -1105,17 +1105,17 @@ class msg_tx:
self.tx.deserialize(f)
def serialize(self):
- return self.tx.serialize_without_witness()
+ return self.tx.serialize_with_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
-class msg_witness_tx(msg_tx):
+class msg_no_witness_tx(msg_tx):
__slots__ = ()
def serialize(self):
- return self.tx.serialize_with_witness()
+ return self.tx.serialize_without_witness()
class msg_block:
diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py
index da92c6325a..e36fb350c6 100755
--- a/test/functional/test_framework/test_framework.py
+++ b/test/functional/test_framework/test_framework.py
@@ -369,7 +369,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
# Public helper methods. These can be accessed by the subclass test scripts.
- def add_nodes(self, num_nodes, extra_args=None, *, rpchost=None, binary=None):
+ def add_nodes(self, num_nodes, extra_args=None, *, rpchost=None, binary=None, binary_cli=None, versions=None):
"""Instantiate TestNode objects.
Should only be called once after the nodes have been specified in
@@ -380,11 +380,17 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
extra_confs = [[]] * num_nodes
if extra_args is None:
extra_args = [[]] * num_nodes
+ if versions is None:
+ versions = [None] * num_nodes
if binary is None:
binary = [self.options.bitcoind] * num_nodes
+ if binary_cli is None:
+ binary_cli = [self.options.bitcoincli] * num_nodes
assert_equal(len(extra_confs), num_nodes)
assert_equal(len(extra_args), num_nodes)
+ assert_equal(len(versions), num_nodes)
assert_equal(len(binary), num_nodes)
+ assert_equal(len(binary_cli), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(
i,
@@ -393,7 +399,8 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
rpchost=rpchost,
timewait=self.rpc_timeout,
bitcoind=binary[i],
- bitcoin_cli=self.options.bitcoincli,
+ bitcoin_cli=binary_cli[i],
+ version=versions[i],
coverage_dir=self.options.coveragedir,
cwd=self.options.tmpdir,
extra_conf=extra_confs[i],
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index e5c77ae5fa..c7559ac7c8 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -60,7 +60,7 @@ class TestNode():
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
- def __init__(self, i, datadir, *, chain, rpchost, timewait, bitcoind, bitcoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False, use_valgrind=False):
+ def __init__(self, i, datadir, *, chain, rpchost, timewait, bitcoind, bitcoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False, use_valgrind=False, version=None):
"""
Kwargs:
start_perf (bool): If True, begin profiling the node with `perf` as soon as
@@ -84,6 +84,7 @@ class TestNode():
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
+ self.version = version
# Configuration for logging is set as command-line args rather than in the bitcoin.conf file.
# This means that starting a bitcoind using the temp dir to debug a failed test won't
# spam debug.log.
@@ -91,7 +92,6 @@ class TestNode():
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
- "-logthreadnames",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
@@ -107,6 +107,9 @@ class TestNode():
"--gen-suppressions=all", "--exit-on-first-error=yes",
"--error-exitcode=1", "--quiet"] + self.args
+ if self.version is None or self.version >= 190000:
+ self.args.append("-logthreadnames")
+
self.cli = TestNodeCLI(bitcoin_cli, self.datadir)
self.use_cli = use_cli
self.start_perf = start_perf
@@ -254,7 +257,11 @@ class TestNode():
return
self.log.debug("Stopping node")
try:
- self.stop(wait=wait)
+ # Do not use wait argument when testing older nodes, e.g. in feature_backwards_compatibility.py
+ if self.version is None or self.version >= 180000:
+ self.stop(wait=wait)
+ else:
+ self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
@@ -298,7 +305,9 @@ class TestNode():
wait_until(self.is_node_stopped, timeout=timeout)
@contextlib.contextmanager
- def assert_debug_log(self, expected_msgs, timeout=2):
+ def assert_debug_log(self, expected_msgs, unexpected_msgs=None, timeout=2):
+ if unexpected_msgs is None:
+ unexpected_msgs = []
time_end = time.time() + timeout
debug_log = os.path.join(self.datadir, self.chain, 'debug.log')
with open(debug_log, encoding='utf-8') as dl:
@@ -313,6 +322,9 @@ class TestNode():
dl.seek(prev_size)
log = dl.read()
print_log = " - " + "\n - ".join(log.splitlines())
+ for unexpected_msg in unexpected_msgs:
+ if re.search(re.escape(unexpected_msg), log, flags=re.MULTILINE):
+ self._raise_assertion_error('Unexpected message "{}" partially matches log:\n\n{}\n\n'.format(unexpected_msg, print_log))
for expected_msg in expected_msgs:
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
found = False
diff --git a/test/functional/test_framework/wallet_util.py b/test/functional/test_framework/wallet_util.py
index 881df80f64..eb537015fb 100755
--- a/test/functional/test_framework/wallet_util.py
+++ b/test/functional/test_framework/wallet_util.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright (c) 2018-2019 The Bitcoin Core developers
+# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Useful util functions for testing the wallet"""
@@ -88,11 +88,6 @@ def get_multisig(node):
p2sh_p2wsh_script=CScript([OP_HASH160, witness_script, OP_EQUAL]).hex(),
p2sh_p2wsh_addr=script_to_p2sh_p2wsh(script_code))
-def labels_value(name="", purpose="receive"):
- """Generate a getaddressinfo labels array from a name and purpose.
- Often used as the value of a labels kwarg for calling test_address below."""
- return [{"name": name, "purpose": purpose}]
-
def test_address(node, address, **kwargs):
"""Get address info for `address` and test whether the returned values are as expected."""
addr_info = node.getaddressinfo(address)
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index 110733c529..7edb36c151 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -158,6 +158,7 @@ BASE_SCRIPTS = [
'feature_assumevalid.py',
'example_test.py',
'wallet_txn_doublespend.py',
+ 'feature_backwards_compatibility.py',
'wallet_txn_clone.py --mineblock',
'feature_notifications.py',
'rpc_getblockfilter.py',
@@ -174,6 +175,7 @@ BASE_SCRIPTS = [
'wallet_balance.py',
'feature_nulldummy.py',
'mempool_accept.py',
+ 'mempool_expiry.py',
'wallet_import_rescan.py',
'wallet_import_with_label.py',
'rpc_bind.py --ipv4',
@@ -204,6 +206,7 @@ BASE_SCRIPTS = [
'p2p_dos_header_tree.py',
'p2p_unrequested_blocks.py',
'feature_includeconf.py',
+ 'feature_asmap.py',
'rpc_deriveaddresses.py',
'rpc_deriveaddresses.py --usecli',
'rpc_scantxoutset.py',
@@ -212,6 +215,9 @@ BASE_SCRIPTS = [
'p2p_permissions.py',
'feature_blocksdir.py',
'feature_config_args.py',
+ 'rpc_getaddressinfo_labels_purpose_deprecation.py',
+ 'rpc_getaddressinfo_label_deprecation.py',
+ 'rpc_getdescriptorinfo.py',
'rpc_help.py',
'feature_help.py',
'feature_shutdown.py',
diff --git a/test/functional/tool_wallet.py b/test/functional/tool_wallet.py
index 32ef257456..d2629ff1ed 100755
--- a/test/functional/tool_wallet.py
+++ b/test/functional/tool_wallet.py
@@ -27,7 +27,7 @@ class ToolWalletTest(BitcoinTestFramework):
def bitcoin_wallet_process(self, *args):
binary = self.config["environment"]["BUILDDIR"] + '/src/bitcoin-wallet' + self.config["environment"]["EXEEXT"]
- args = ['-datadir={}'.format(self.nodes[0].datadir), '-regtest'] + list(args)
+ args = ['-datadir={}'.format(self.nodes[0].datadir), '-chain=%s' % self.chain] + list(args)
return subprocess.Popen([binary] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
def assert_raises_tool_error(self, error, *args):
@@ -198,7 +198,7 @@ class ToolWalletTest(BitcoinTestFramework):
self.log.debug('Wallet file shasum unchanged\n')
def run_test(self):
- self.wallet_path = os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', 'wallet.dat')
+ self.wallet_path = os.path.join(self.nodes[0].datadir, self.chain, 'wallets', 'wallet.dat')
self.test_invalid_tool_commands_and_args()
# Warning: The following tests are order-dependent.
self.test_tool_wallet_info()
diff --git a/test/functional/wallet_address_types.py b/test/functional/wallet_address_types.py
index f08606e622..79b6db986b 100755
--- a/test/functional/wallet_address_types.py
+++ b/test/functional/wallet_address_types.py
@@ -82,7 +82,7 @@ class AddressTypeTest(BitcoinTestFramework):
]
# whitelist all peers to speed up tx relay / mempool sync
for args in self.extra_args:
- args.append("-whitelist=127.0.0.1")
+ args.append("-whitelist=noban@127.0.0.1")
self.supports_cli = False
def skip_test_if_missing_module(self):
diff --git a/test/functional/wallet_avoidreuse.py b/test/functional/wallet_avoidreuse.py
index 3cfb5a648b..8e2dc03ac2 100755
--- a/test/functional/wallet_avoidreuse.py
+++ b/test/functional/wallet_avoidreuse.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright (c) 2018-2019 The Bitcoin Core developers
+# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the avoid_reuse and setwalletflag features."""
@@ -70,7 +70,7 @@ class AvoidReuseTest(BitcoinTestFramework):
self.num_nodes = 2
# This test isn't testing txn relay/timing, so set whitelist on the
# peers for instant txn relay. This speeds up the test run time 2-3x.
- self.extra_args = [["-whitelist=127.0.0.1"]] * self.num_nodes
+ self.extra_args = [["-whitelist=noban@127.0.0.1"]] * self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
@@ -86,7 +86,13 @@ class AvoidReuseTest(BitcoinTestFramework):
reset_balance(self.nodes[1], self.nodes[0].getnewaddress())
self.test_fund_send_fund_senddirty()
reset_balance(self.nodes[1], self.nodes[0].getnewaddress())
- self.test_fund_send_fund_send()
+ self.test_fund_send_fund_send("legacy")
+ reset_balance(self.nodes[1], self.nodes[0].getnewaddress())
+ self.test_fund_send_fund_send("p2sh-segwit")
+ reset_balance(self.nodes[1], self.nodes[0].getnewaddress())
+ self.test_fund_send_fund_send("bech32")
+ reset_balance(self.nodes[1], self.nodes[0].getnewaddress())
+ self.test_getbalances_used()
def test_persistence(self):
'''Test that wallet files persist the avoid_reuse flag.'''
@@ -182,7 +188,7 @@ class AvoidReuseTest(BitcoinTestFramework):
assert_approx(self.nodes[1].getbalance(), 5, 0.001)
assert_approx(self.nodes[1].getbalance(avoid_reuse=False), 5, 0.001)
- def test_fund_send_fund_send(self):
+ def test_fund_send_fund_send(self, second_addr_type):
'''
Test the simple case where [1] generates a new address A, then
[0] sends 10 BTC to A.
@@ -193,7 +199,7 @@ class AvoidReuseTest(BitcoinTestFramework):
'''
self.log.info("Test fund send fund send")
- fundaddr = self.nodes[1].getnewaddress()
+ fundaddr = self.nodes[1].getnewaddress(label="", address_type="legacy")
retaddr = self.nodes[0].getnewaddress()
self.nodes[0].sendtoaddress(fundaddr, 10)
@@ -214,7 +220,19 @@ class AvoidReuseTest(BitcoinTestFramework):
# getbalances should show no used, 5 btc trusted
assert_balances(self.nodes[1], mine={"used": 0, "trusted": 5})
- self.nodes[0].sendtoaddress(fundaddr, 10)
+ # For the second send, we transmute it to a related single-key address
+ # to make sure it's also detected as re-use
+ fund_spk = self.nodes[0].getaddressinfo(fundaddr)["scriptPubKey"]
+ fund_decoded = self.nodes[0].decodescript(fund_spk)
+ if second_addr_type == "p2sh-segwit":
+ new_fundaddr = fund_decoded["segwit"]["p2sh-segwit"]
+ elif second_addr_type == "bech32":
+ new_fundaddr = fund_decoded["segwit"]["addresses"][0]
+ else:
+ new_fundaddr = fundaddr
+ assert_equal(second_addr_type, "legacy")
+
+ self.nodes[0].sendtoaddress(new_fundaddr, 10)
self.nodes[0].generate(1)
self.sync_all()
@@ -240,5 +258,35 @@ class AvoidReuseTest(BitcoinTestFramework):
assert_approx(self.nodes[1].getbalance(), 1, 0.001)
assert_approx(self.nodes[1].getbalance(avoid_reuse=False), 11, 0.001)
+ def test_getbalances_used(self):
+ '''
+ getbalances and listunspent should pick up on reused addresses
+ immediately, even for address reusing outputs created before the first
+ transaction was spending from that address
+ '''
+ self.log.info("Test getbalances used category")
+
+ # node under test should be completely empty
+ assert_equal(self.nodes[1].getbalance(avoid_reuse=False), 0)
+
+ new_addr = self.nodes[1].getnewaddress()
+ ret_addr = self.nodes[0].getnewaddress()
+
+ # send multiple transactions, reusing one address
+ for _ in range(11):
+ self.nodes[0].sendtoaddress(new_addr, 1)
+
+ self.nodes[0].generate(1)
+ self.sync_all()
+
+ # send transaction that should not use all the available outputs
+ # per the current coin selection algorithm
+ self.nodes[1].sendtoaddress(ret_addr, 5)
+
+ # getbalances and listunspent should show the remaining outputs
+ # in the reused address as used/reused
+ assert_unspent(self.nodes[1], total_count=2, total_sum=6, reused_count=1, reused_sum=1)
+ assert_balances(self.nodes[1], mine={"used": 1, "trusted": 5})
+
if __name__ == '__main__':
AvoidReuseTest().main()
diff --git a/test/functional/wallet_backup.py b/test/functional/wallet_backup.py
index bb835dc811..fb80a06433 100755
--- a/test/functional/wallet_backup.py
+++ b/test/functional/wallet_backup.py
@@ -50,10 +50,10 @@ class WalletBackupTest(BitcoinTestFramework):
# nodes 1, 2,3 are spenders, let's give them a keypool=100
# whitelist all peers to speed up tx relay / mempool sync
self.extra_args = [
- ["-keypool=100", "-whitelist=127.0.0.1"],
- ["-keypool=100", "-whitelist=127.0.0.1"],
- ["-keypool=100", "-whitelist=127.0.0.1"],
- ["-whitelist=127.0.0.1"]
+ ["-whitelist=noban@127.0.0.1", "-keypool=100"],
+ ["-whitelist=noban@127.0.0.1", "-keypool=100"],
+ ["-whitelist=noban@127.0.0.1", "-keypool=100"],
+ ["-whitelist=noban@127.0.0.1"],
]
self.rpc_timeout = 120
@@ -107,9 +107,9 @@ class WalletBackupTest(BitcoinTestFramework):
self.stop_node(2)
def erase_three(self):
- os.remove(os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', 'wallet.dat'))
- os.remove(os.path.join(self.nodes[1].datadir, 'regtest', 'wallets', 'wallet.dat'))
- os.remove(os.path.join(self.nodes[2].datadir, 'regtest', 'wallets', 'wallet.dat'))
+ os.remove(os.path.join(self.nodes[0].datadir, self.chain, 'wallets', 'wallet.dat'))
+ os.remove(os.path.join(self.nodes[1].datadir, self.chain, 'wallets', 'wallet.dat'))
+ os.remove(os.path.join(self.nodes[2].datadir, self.chain, 'wallets', 'wallet.dat'))
def run_test(self):
self.log.info("Generating initial blockchain")
@@ -167,13 +167,13 @@ class WalletBackupTest(BitcoinTestFramework):
self.erase_three()
# Start node2 with no chain
- shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'blocks'))
- shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'chainstate'))
+ shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'blocks'))
+ shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'chainstate'))
# Restore wallets from backup
- shutil.copyfile(os.path.join(self.nodes[0].datadir, 'wallet.bak'), os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', 'wallet.dat'))
- shutil.copyfile(os.path.join(self.nodes[1].datadir, 'wallet.bak'), os.path.join(self.nodes[1].datadir, 'regtest', 'wallets', 'wallet.dat'))
- shutil.copyfile(os.path.join(self.nodes[2].datadir, 'wallet.bak'), os.path.join(self.nodes[2].datadir, 'regtest', 'wallets', 'wallet.dat'))
+ shutil.copyfile(os.path.join(self.nodes[0].datadir, 'wallet.bak'), os.path.join(self.nodes[0].datadir, self.chain, 'wallets', 'wallet.dat'))
+ shutil.copyfile(os.path.join(self.nodes[1].datadir, 'wallet.bak'), os.path.join(self.nodes[1].datadir, self.chain, 'wallets', 'wallet.dat'))
+ shutil.copyfile(os.path.join(self.nodes[2].datadir, 'wallet.bak'), os.path.join(self.nodes[2].datadir, self.chain, 'wallets', 'wallet.dat'))
self.log.info("Re-starting nodes")
self.start_three()
@@ -188,8 +188,8 @@ class WalletBackupTest(BitcoinTestFramework):
self.erase_three()
#start node2 with no chain
- shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'blocks'))
- shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'chainstate'))
+ shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'blocks'))
+ shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'chainstate'))
self.start_three()
@@ -209,10 +209,10 @@ class WalletBackupTest(BitcoinTestFramework):
# Backup to source wallet file must fail
sourcePaths = [
- os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', 'wallet.dat'),
- os.path.join(self.nodes[0].datadir, 'regtest', '.', 'wallets', 'wallet.dat'),
- os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', ''),
- os.path.join(self.nodes[0].datadir, 'regtest', 'wallets')]
+ os.path.join(self.nodes[0].datadir, self.chain, 'wallets', 'wallet.dat'),
+ os.path.join(self.nodes[0].datadir, self.chain, '.', 'wallets', 'wallet.dat'),
+ os.path.join(self.nodes[0].datadir, self.chain, 'wallets', ''),
+ os.path.join(self.nodes[0].datadir, self.chain, 'wallets')]
for sourcePath in sourcePaths:
assert_raises_rpc_error(-4, "backup failed", self.nodes[0].backupwallet, sourcePath)
diff --git a/test/functional/wallet_basic.py b/test/functional/wallet_basic.py
index 2606afa0e4..15746d312c 100755
--- a/test/functional/wallet_basic.py
+++ b/test/functional/wallet_basic.py
@@ -15,10 +15,7 @@ from test_framework.util import (
connect_nodes,
wait_until,
)
-from test_framework.wallet_util import (
- labels_value,
- test_address,
-)
+from test_framework.wallet_util import test_address
class WalletTest(BitcoinTestFramework):
@@ -395,7 +392,7 @@ class WalletTest(BitcoinTestFramework):
for label in [u'рыба', u'𝅘𝅥𝅯']:
addr = self.nodes[0].getnewaddress()
self.nodes[0].setlabel(addr, label)
- test_address(self.nodes[0], addr, label=label, labels=labels_value(name=label))
+ test_address(self.nodes[0], addr, labels=[label])
assert label in self.nodes[0].listlabels()
self.nodes[0].rpc.ensure_ascii = True # restore to default
diff --git a/test/functional/wallet_bumpfee.py b/test/functional/wallet_bumpfee.py
index 0c08655833..336e246e33 100755
--- a/test/functional/wallet_bumpfee.py
+++ b/test/functional/wallet_bumpfee.py
@@ -71,27 +71,29 @@ class BumpFeeTest(BitcoinTestFramework):
test_simple_bumpfee_succeeds(self, "default", rbf_node, peer_node, dest_address)
test_simple_bumpfee_succeeds(self, "fee_rate", rbf_node, peer_node, dest_address)
test_feerate_args(self, rbf_node, peer_node, dest_address)
- test_segwit_bumpfee_succeeds(rbf_node, dest_address)
- test_nonrbf_bumpfee_fails(peer_node, dest_address)
- test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
- test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
- test_small_output_fails(rbf_node, dest_address)
- test_dust_to_fee(rbf_node, dest_address)
- test_settxfee(rbf_node, dest_address)
- test_rebumping(rbf_node, dest_address)
- test_rebumping_not_replaceable(rbf_node, dest_address)
- test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
- test_bumpfee_metadata(rbf_node, dest_address)
- test_locked_wallet_fails(rbf_node, dest_address)
- test_change_script_match(rbf_node, dest_address)
+ test_segwit_bumpfee_succeeds(self, rbf_node, dest_address)
+ test_nonrbf_bumpfee_fails(self, peer_node, dest_address)
+ test_notmine_bumpfee_fails(self, rbf_node, peer_node, dest_address)
+ test_bumpfee_with_descendant_fails(self, rbf_node, rbf_node_address, dest_address)
+ test_small_output_fails(self, rbf_node, dest_address)
+ test_dust_to_fee(self, rbf_node, dest_address)
+ test_settxfee(self, rbf_node, dest_address)
+ test_watchonly_psbt(self, peer_node, rbf_node, dest_address)
+ test_rebumping(self, rbf_node, dest_address)
+ test_rebumping_not_replaceable(self, rbf_node, dest_address)
+ test_unconfirmed_not_spendable(self, rbf_node, rbf_node_address)
+ test_bumpfee_metadata(self, rbf_node, dest_address)
+ test_locked_wallet_fails(self, rbf_node, dest_address)
+ test_change_script_match(self, rbf_node, dest_address)
test_maxtxfee_fails(self, rbf_node, dest_address)
# These tests wipe out a number of utxos that are expected in other tests
- test_small_output_with_feerate_succeeds(rbf_node, dest_address)
- test_no_more_inputs_fails(rbf_node, dest_address)
+ test_small_output_with_feerate_succeeds(self, rbf_node, dest_address)
+ test_no_more_inputs_fails(self, rbf_node, dest_address)
self.log.info("Success")
def test_simple_bumpfee_succeeds(self, mode, rbf_node, peer_node, dest_address):
+ self.log.info('Test simple bumpfee')
rbfid = spend_one_input(rbf_node, dest_address)
rbftx = rbf_node.gettransaction(rbfid)
self.sync_mempools((rbf_node, peer_node))
@@ -103,6 +105,7 @@ def test_simple_bumpfee_succeeds(self, mode, rbf_node, peer_node, dest_address):
assert_equal(bumped_tx["errors"], [])
assert bumped_tx["fee"] > -rbftx["fee"]
assert_equal(bumped_tx["origfee"], -rbftx["fee"])
+ assert "psbt" not in bumped_tx
# check that bumped_tx propagates, original tx was evicted and has a wallet conflict
self.sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
@@ -117,6 +120,7 @@ def test_simple_bumpfee_succeeds(self, mode, rbf_node, peer_node, dest_address):
assert_equal(bumpedwtx["replaces_txid"], rbfid)
def test_feerate_args(self, rbf_node, peer_node, dest_address):
+ self.log.info('Test feerate args')
rbfid = spend_one_input(rbf_node, dest_address)
self.sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
@@ -133,7 +137,8 @@ def test_feerate_args(self, rbf_node, peer_node, dest_address):
assert_raises_rpc_error(-4, "is too high (cannot be higher than", rbf_node.bumpfee, rbfid, {"fee_rate":1})
-def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
+def test_segwit_bumpfee_succeeds(self, rbf_node, dest_address):
+ self.log.info('Test that segwit-sourcing bumpfee works')
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
@@ -163,14 +168,14 @@ def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
assert rbfid not in rbf_node.getrawmempool()
-def test_nonrbf_bumpfee_fails(peer_node, dest_address):
- # cannot replace a non RBF transaction (from node which did not enable RBF)
+def test_nonrbf_bumpfee_fails(self, peer_node, dest_address):
+ self.log.info('Test that we cannot replace a non RBF transaction')
not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000"))
assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
-def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
- # cannot bump fee unless the tx has only inputs that we own.
+def test_notmine_bumpfee_fails(self, rbf_node, peer_node, dest_address):
+ self.log.info('Test that it cannot bump fee if non-owned inputs are included')
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
@@ -190,8 +195,8 @@ def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
rbf_node.bumpfee, rbfid)
-def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address):
- # cannot bump fee if the transaction has a descendant
+def test_bumpfee_with_descendant_fails(self, rbf_node, rbf_node_address, dest_address):
+ self.log.info('Test that fee cannot be bumped when it has descendant')
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = spend_one_input(rbf_node, rbf_node_address)
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000})
@@ -199,7 +204,8 @@ def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
rbf_node.sendrawtransaction(tx["hex"])
assert_raises_rpc_error(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
-def test_small_output_fails(rbf_node, dest_address):
+def test_small_output_fails(self, rbf_node, dest_address):
+ self.log.info('Test totalFee bump with small output fails')
# cannot bump fee with a too-small output
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.bumpfee(rbfid, {"totalFee": 50000})
@@ -207,7 +213,8 @@ def test_small_output_fails(rbf_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 50001})
-def test_small_output_with_feerate_succeeds(rbf_node, dest_address):
+def test_small_output_with_feerate_succeeds(self, rbf_node, dest_address):
+ self.log.info('Testing small output with feerate bump succeeds')
# Make sure additional inputs exist
rbf_node.generatetoaddress(101, rbf_node.getnewaddress())
@@ -215,9 +222,9 @@ def test_small_output_with_feerate_succeeds(rbf_node, dest_address):
input_list = rbf_node.getrawtransaction(rbfid, 1)["vin"]
assert_equal(len(input_list), 1)
original_txin = input_list[0]
- # Keep bumping until we out-spend change output
+ self.log.info('Keep bumping until transaction fee out-spends non-destination value')
tx_fee = 0
- while tx_fee < Decimal("0.0005"):
+ while True:
input_list = rbf_node.getrawtransaction(rbfid, 1)["vin"]
new_item = list(input_list)[0]
assert_equal(len(input_list), 1)
@@ -229,7 +236,11 @@ def test_small_output_with_feerate_succeeds(rbf_node, dest_address):
assert rbfid not in raw_pool
assert rbfid_new in raw_pool
rbfid = rbfid_new
- tx_fee = rbfid_new_details["origfee"]
+ tx_fee = rbfid_new_details["fee"]
+
+ # Total value from input not going to destination
+ if tx_fee > Decimal('0.00050000'):
+ break
# input(s) have been added
final_input_list = rbf_node.getrawtransaction(rbfid, 1)["vin"]
@@ -242,8 +253,8 @@ def test_small_output_with_feerate_succeeds(rbf_node, dest_address):
rbf_node.generatetoaddress(1, rbf_node.getnewaddress())
assert_equal(rbf_node.gettransaction(rbfid)["confirmations"], 1)
-def test_dust_to_fee(rbf_node, dest_address):
- # check that if output is reduced to dust, it will be converted to fee
+def test_dust_to_fee(self, rbf_node, dest_address):
+ self.log.info('Test that bumped output that is dust is dropped to fee')
# the bumped tx sets fee=49,900, but it converts to 50,000
rbfid = spend_one_input(rbf_node, dest_address)
fulltx = rbf_node.getrawtransaction(rbfid, 1)
@@ -255,7 +266,8 @@ def test_dust_to_fee(rbf_node, dest_address):
assert_equal(len(full_bumped_tx["vout"]), 1) # change output is eliminated
-def test_settxfee(rbf_node, dest_address):
+def test_settxfee(self, rbf_node, dest_address):
+ self.log.info('Test settxfee')
assert_raises_rpc_error(-8, "txfee cannot be less than min relay tx fee", rbf_node.settxfee, Decimal('0.000005'))
assert_raises_rpc_error(-8, "txfee cannot be less than wallet min fee", rbf_node.settxfee, Decimal('0.000015'))
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
@@ -270,35 +282,117 @@ def test_settxfee(rbf_node, dest_address):
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
-def test_maxtxfee_fails(test, rbf_node, dest_address):
+def test_maxtxfee_fails(self, rbf_node, dest_address):
+ self.log.info('Test that bumpfee fails when it hits -matxfee')
# size of bumped transaction (p2wpkh, 1 input, 2 outputs): 141 vbytes
# expected bumping feerate of 20 sats/vbyte => 141*20 sats = 0.00002820 btc
- test.restart_node(1, ['-maxtxfee=0.000025'] + test.extra_args[1])
+ self.restart_node(1, ['-maxtxfee=0.000025'] + self.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Unable to create transaction: Fee exceeds maximum configured by -maxtxfee", rbf_node.bumpfee, rbfid)
- test.restart_node(1, test.extra_args[1])
+ self.restart_node(1, self.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
-
-def test_rebumping(rbf_node, dest_address):
- # check that re-bumping the original tx fails, but bumping the bumper succeeds
+def test_watchonly_psbt(self, peer_node, rbf_node, dest_address):
+ self.log.info('Test that PSBT is returned for bumpfee in watchonly wallets')
+ priv_rec_desc = "wpkh([00000001/84'/1'/0']tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0/*)#rweraev0"
+ pub_rec_desc = rbf_node.getdescriptorinfo(priv_rec_desc)["descriptor"]
+ priv_change_desc = "wpkh([00000001/84'/1'/0']tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/*)#j6uzqvuh"
+ pub_change_desc = rbf_node.getdescriptorinfo(priv_change_desc)["descriptor"]
+ # Create a wallet with private keys that can sign PSBTs
+ rbf_node.createwallet(wallet_name="signer", disable_private_keys=False, blank=True)
+ signer = rbf_node.get_wallet_rpc("signer")
+ assert signer.getwalletinfo()['private_keys_enabled']
+ result = signer.importmulti([{
+ "desc": priv_rec_desc,
+ "timestamp": 0,
+ "range": [0,1],
+ "internal": False,
+ "keypool": False # Keys can only be imported to the keypool when private keys are disabled
+ },
+ {
+ "desc": priv_change_desc,
+ "timestamp": 0,
+ "range": [0, 0],
+ "internal": True,
+ "keypool": False
+ }])
+ assert_equal(result, [{'success': True}, {'success': True}])
+
+ # Create another wallet with just the public keys, which creates PSBTs
+ rbf_node.createwallet(wallet_name="watcher", disable_private_keys=True, blank=True)
+ watcher = rbf_node.get_wallet_rpc("watcher")
+ assert not watcher.getwalletinfo()['private_keys_enabled']
+
+ result = watcher.importmulti([{
+ "desc": pub_rec_desc,
+ "timestamp": 0,
+ "range": [0,10],
+ "internal": False,
+ "keypool": True,
+ "watchonly": True
+ },
+ {
+ "desc": pub_change_desc,
+ "timestamp": 0,
+ "range": [0, 10],
+ "internal": True,
+ "keypool": True,
+ "watchonly": True
+ }])
+ assert_equal(result, [{'success': True}, {'success': True}])
+
+ funding_address1 = watcher.getnewaddress(address_type='bech32')
+ funding_address2 = watcher.getnewaddress(address_type='bech32')
+ peer_node.sendmany("", {funding_address1: 0.001, funding_address2: 0.001})
+ peer_node.generate(1)
+ self.sync_all()
+
+ # Create single-input PSBT for transaction to be bumped
+ psbt = watcher.walletcreatefundedpsbt([], {dest_address:0.0005}, 0, {"feeRate": 0.00001}, True)['psbt']
+ psbt_signed = signer.walletprocesspsbt(psbt=psbt, sign=True, sighashtype="ALL", bip32derivs=True)
+ psbt_final = watcher.finalizepsbt(psbt_signed["psbt"])
+ original_txid = watcher.sendrawtransaction(psbt_final["hex"])
+ assert_equal(len(watcher.decodepsbt(psbt)["tx"]["vin"]), 1)
+
+ # Bump fee, obnoxiously high to add additional watchonly input
+ bumped_psbt = watcher.bumpfee(original_txid, {"fee_rate":0.005})
+ assert_greater_than(len(watcher.decodepsbt(bumped_psbt['psbt'])["tx"]["vin"]), 1)
+ assert "txid" not in bumped_psbt
+ assert_equal(bumped_psbt["origfee"], -watcher.gettransaction(original_txid)["fee"])
+ assert not watcher.finalizepsbt(bumped_psbt["psbt"])["complete"]
+
+ # Sign bumped transaction
+ bumped_psbt_signed = signer.walletprocesspsbt(psbt=bumped_psbt["psbt"], sign=True, sighashtype="ALL", bip32derivs=True)
+ bumped_psbt_final = watcher.finalizepsbt(bumped_psbt_signed["psbt"])
+ assert bumped_psbt_final["complete"]
+
+ # Broadcast bumped transaction
+ bumped_txid = watcher.sendrawtransaction(bumped_psbt_final["hex"])
+ assert bumped_txid in rbf_node.getrawmempool()
+ assert original_txid not in rbf_node.getrawmempool()
+
+ rbf_node.unloadwallet("watcher")
+ rbf_node.unloadwallet("signer")
+
+def test_rebumping(self, rbf_node, dest_address):
+ self.log.info('Test that re-bumping the original tx fails, but bumping successor works')
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 2000})
assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 3000})
rbf_node.bumpfee(bumped["txid"], {"totalFee": 3000})
-def test_rebumping_not_replaceable(rbf_node, dest_address):
- # check that re-bumping a non-replaceable bump tx fails
+def test_rebumping_not_replaceable(self, rbf_node, dest_address):
+ self.log.info('Test that re-bumping non-replaceable fails')
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 10000, "replaceable": False})
assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"totalFee": 20000})
-def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
- # check that unconfirmed outputs from bumped transactions are not spendable
+def test_unconfirmed_not_spendable(self, rbf_node, rbf_node_address):
+ self.log.info('Test that unconfirmed outputs from bumped txns are not spendable')
rbfid = spend_one_input(rbf_node, rbf_node_address)
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
@@ -336,7 +430,8 @@ def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
-def test_bumpfee_metadata(rbf_node, dest_address):
+def test_bumpfee_metadata(self, rbf_node, dest_address):
+ self.log.info('Test that bumped txn metadata persists to new txn record')
assert(rbf_node.getbalance() < 49)
rbf_node.generatetoaddress(101, rbf_node.getnewaddress())
rbfid = rbf_node.sendtoaddress(dest_address, 49, "comment value", "to value")
@@ -346,15 +441,17 @@ def test_bumpfee_metadata(rbf_node, dest_address):
assert_equal(bumped_wtx["to"], "to value")
-def test_locked_wallet_fails(rbf_node, dest_address):
+def test_locked_wallet_fails(self, rbf_node, dest_address):
+ self.log.info('Test that locked wallet cannot bump txn')
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
-def test_change_script_match(rbf_node, dest_address):
- """Test that the same change addresses is used for the replacement transaction when possible."""
+def test_change_script_match(self, rbf_node, dest_address):
+ self.log.info('Test that the same change addresses is used for the replacement transaction when possible.')
+
def get_change_address(tx):
tx_details = rbf_node.getrawtransaction(tx, 1)
txout_addresses = [txout['scriptPubKey']['addresses'][0] for txout in tx_details["vout"]]
@@ -398,7 +495,8 @@ def submit_block_with_tx(node, tx):
node.submitblock(block.serialize().hex())
return block
-def test_no_more_inputs_fails(rbf_node, dest_address):
+def test_no_more_inputs_fails(self, rbf_node, dest_address):
+ self.log.info('Test that bumpfee fails when there are no available confirmed outputs')
# feerate rbf requires confirmed outputs when change output doesn't exist or is insufficient
rbf_node.generatetoaddress(1, dest_address)
# spend all funds, no change output
diff --git a/test/functional/wallet_createwallet.py b/test/functional/wallet_createwallet.py
index 048b3127ff..dabd78f66c 100755
--- a/test/functional/wallet_createwallet.py
+++ b/test/functional/wallet_createwallet.py
@@ -79,7 +79,7 @@ class CreateWalletTest(BitcoinTestFramework):
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getrawchangeaddress)
# Now set a seed and it should work. Wallet should also be encrypted
- w4.walletpassphrase('pass', 2)
+ w4.walletpassphrase('pass', 60)
w4.sethdseed()
w4.getnewaddress()
w4.getrawchangeaddress()
diff --git a/test/functional/wallet_dump.py b/test/functional/wallet_dump.py
index 53edf710b9..a39dfc7895 100755
--- a/test/functional/wallet_dump.py
+++ b/test/functional/wallet_dump.py
@@ -137,7 +137,7 @@ class WalletDumpTest(BitcoinTestFramework):
# encrypt wallet, restart, unlock and dump
self.nodes[0].encryptwallet('test')
- self.nodes[0].walletpassphrase('test', 10)
+ self.nodes[0].walletpassphrase('test', 100)
# Should be a no-op:
self.nodes[0].keypoolrefill()
self.nodes[0].dumpwallet(wallet_enc_dump)
diff --git a/test/functional/wallet_groups.py b/test/functional/wallet_groups.py
index 3cf8aaf3dc..f2fa1d3e40 100755
--- a/test/functional/wallet_groups.py
+++ b/test/functional/wallet_groups.py
@@ -16,7 +16,7 @@ class WalletGroupTest(BitcoinTestFramework):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], [], ['-avoidpartialspends']]
- self.rpc_timeout = 120
+ self.rpc_timeout = 240
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
diff --git a/test/functional/wallet_hd.py b/test/functional/wallet_hd.py
index 575b501f50..7497475b67 100755
--- a/test/functional/wallet_hd.py
+++ b/test/functional/wallet_hd.py
@@ -68,11 +68,11 @@ class WalletHDTest(BitcoinTestFramework):
self.log.info("Restore backup ...")
self.stop_node(1)
- # we need to delete the complete regtest directory
+ # we need to delete the complete chain directory
# otherwise node1 would auto-recover all funds in flag the keypool keys as used
- shutil.rmtree(os.path.join(self.nodes[1].datadir, "regtest", "blocks"))
- shutil.rmtree(os.path.join(self.nodes[1].datadir, "regtest", "chainstate"))
- shutil.copyfile(os.path.join(self.nodes[1].datadir, "hd.bak"), os.path.join(self.nodes[1].datadir, "regtest", "wallets", "wallet.dat"))
+ shutil.rmtree(os.path.join(self.nodes[1].datadir, self.chain, "blocks"))
+ shutil.rmtree(os.path.join(self.nodes[1].datadir, self.chain, "chainstate"))
+ shutil.copyfile(os.path.join(self.nodes[1].datadir, "hd.bak"), os.path.join(self.nodes[1].datadir, self.chain, "wallets", "wallet.dat"))
self.start_node(1)
# Assert that derivation is deterministic
@@ -93,9 +93,9 @@ class WalletHDTest(BitcoinTestFramework):
# Try a RPC based rescan
self.stop_node(1)
- shutil.rmtree(os.path.join(self.nodes[1].datadir, "regtest", "blocks"))
- shutil.rmtree(os.path.join(self.nodes[1].datadir, "regtest", "chainstate"))
- shutil.copyfile(os.path.join(self.nodes[1].datadir, "hd.bak"), os.path.join(self.nodes[1].datadir, "regtest", "wallets", "wallet.dat"))
+ shutil.rmtree(os.path.join(self.nodes[1].datadir, self.chain, "blocks"))
+ shutil.rmtree(os.path.join(self.nodes[1].datadir, self.chain, "chainstate"))
+ shutil.copyfile(os.path.join(self.nodes[1].datadir, "hd.bak"), os.path.join(self.nodes[1].datadir, self.chain, "wallets", "wallet.dat"))
self.start_node(1, extra_args=self.extra_args[1])
connect_nodes(self.nodes[0], 1)
self.sync_all()
diff --git a/test/functional/wallet_import_with_label.py b/test/functional/wallet_import_with_label.py
index 651ecd18b2..6a9d2e8290 100755
--- a/test/functional/wallet_import_with_label.py
+++ b/test/functional/wallet_import_with_label.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright (c) 2018-2019 The Bitcoin Core developers
+# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the behavior of RPC importprivkey on set and unset labels of
@@ -11,10 +11,7 @@ with and without a label.
"""
from test_framework.test_framework import BitcoinTestFramework
-from test_framework.wallet_util import (
- labels_value,
- test_address,
-)
+from test_framework.wallet_util import test_address
class ImportWithLabel(BitcoinTestFramework):
@@ -39,8 +36,7 @@ class ImportWithLabel(BitcoinTestFramework):
address,
iswatchonly=True,
ismine=False,
- label=label,
- labels=labels_value(name=label))
+ labels=[label])
self.log.info(
"Import the watch-only address's private key without a "
@@ -48,11 +44,7 @@ class ImportWithLabel(BitcoinTestFramework):
)
priv_key = self.nodes[0].dumpprivkey(address)
self.nodes[1].importprivkey(priv_key)
-
- test_address(self.nodes[1],
- address,
- label=label,
- labels=labels_value(name=label))
+ test_address(self.nodes[1], address, labels=[label])
self.log.info(
"Test importaddress without label and importprivkey with label."
@@ -64,8 +56,7 @@ class ImportWithLabel(BitcoinTestFramework):
address2,
iswatchonly=True,
ismine=False,
- label="",
- labels=labels_value())
+ labels=[""])
self.log.info(
"Import the watch-only address's private key with a "
@@ -75,10 +66,7 @@ class ImportWithLabel(BitcoinTestFramework):
label2 = "Test Label 2"
self.nodes[1].importprivkey(priv_key2, label2)
- test_address(self.nodes[1],
- address2,
- label=label2,
- labels=labels_value(name=label2))
+ test_address(self.nodes[1], address2, labels=[label2])
self.log.info("Test importaddress with label and importprivkey with label.")
self.log.info("Import a watch-only address with a label.")
@@ -89,8 +77,7 @@ class ImportWithLabel(BitcoinTestFramework):
address3,
iswatchonly=True,
ismine=False,
- label=label3_addr,
- labels=labels_value(name=label3_addr))
+ labels=[label3_addr])
self.log.info(
"Import the watch-only address's private key with a "
@@ -100,10 +87,7 @@ class ImportWithLabel(BitcoinTestFramework):
label3_priv = "Test Label 3 for importprivkey"
self.nodes[1].importprivkey(priv_key3, label3_priv)
- test_address(self.nodes[1],
- address3,
- label=label3_priv,
- labels=labels_value(name=label3_priv))
+ test_address(self.nodes[1], address3, labels=[label3_priv])
self.log.info(
"Test importprivkey won't label new dests with the same "
@@ -117,8 +101,7 @@ class ImportWithLabel(BitcoinTestFramework):
address4,
iswatchonly=True,
ismine=False,
- label=label4_addr,
- labels=labels_value(name=label4_addr),
+ labels=[label4_addr],
embedded=None)
self.log.info(
@@ -131,15 +114,9 @@ class ImportWithLabel(BitcoinTestFramework):
self.nodes[1].importprivkey(priv_key4)
embedded_addr = self.nodes[1].getaddressinfo(address4)['embedded']['address']
- test_address(self.nodes[1],
- embedded_addr,
- label="",
- labels=labels_value())
+ test_address(self.nodes[1], embedded_addr, labels=[""])
- test_address(self.nodes[1],
- address4,
- label=label4_addr,
- labels=labels_value(name=label4_addr))
+ test_address(self.nodes[1], address4, labels=[label4_addr])
self.stop_nodes()
diff --git a/test/functional/wallet_importmulti.py b/test/functional/wallet_importmulti.py
index 5febac5998..f152fcd1a4 100755
--- a/test/functional/wallet_importmulti.py
+++ b/test/functional/wallet_importmulti.py
@@ -29,7 +29,6 @@ from test_framework.util import (
from test_framework.wallet_util import (
get_key,
get_multisig,
- labels_value,
test_address,
)
@@ -570,8 +569,7 @@ class ImportMultiTest(BitcoinTestFramework):
key.p2sh_p2wpkh_addr,
solvable=True,
ismine=True,
- label=p2sh_p2wpkh_label,
- labels=labels_value(name=p2sh_p2wpkh_label))
+ labels=[p2sh_p2wpkh_label])
# Test ranged descriptor fails if range is not specified
xpriv = "tprv8ZgxMBicQKsPeuVhWwi6wuMQGfPKi9Li5GtX35jVNknACgqe3CY4g5xgkfDDJcmtF7o1QnxWDRYw4H5P26PXq7sbcUkEqeR4fg3Kxp2tigg"
@@ -642,8 +640,7 @@ class ImportMultiTest(BitcoinTestFramework):
key.p2pkh_addr,
solvable=True,
ismine=False,
- label=p2pkh_label,
- labels=labels_value(name=p2pkh_label))
+ labels=[p2pkh_label])
# Test import fails if both desc and scriptPubKey are provided
key = get_key(self.nodes[0])
diff --git a/test/functional/wallet_keypool_topup.py b/test/functional/wallet_keypool_topup.py
index 2e70a9e0a5..829633a050 100755
--- a/test/functional/wallet_keypool_topup.py
+++ b/test/functional/wallet_keypool_topup.py
@@ -30,7 +30,7 @@ class KeypoolRestoreTest(BitcoinTestFramework):
self.skip_if_no_wallet()
def run_test(self):
- wallet_path = os.path.join(self.nodes[1].datadir, "regtest", "wallets", "wallet.dat")
+ wallet_path = os.path.join(self.nodes[1].datadir, self.chain, "wallets", "wallet.dat")
wallet_backup_path = os.path.join(self.nodes[1].datadir, "wallet.bak")
self.nodes[0].generate(101)
diff --git a/test/functional/wallet_labels.py b/test/functional/wallet_labels.py
index 33e067601e..337d2e55d9 100755
--- a/test/functional/wallet_labels.py
+++ b/test/functional/wallet_labels.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright (c) 2016-2019 The Bitcoin Core developers
+# Copyright (c) 2016-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test label RPCs.
@@ -13,10 +13,8 @@ from collections import defaultdict
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
-from test_framework.wallet_util import (
- labels_value,
- test_address,
-)
+from test_framework.wallet_util import test_address
+
class WalletLabelsTest(BitcoinTestFramework):
def set_test_params(self):
@@ -157,12 +155,7 @@ class Label:
if self.receive_address is not None:
assert self.receive_address in self.addresses
for address in self.addresses:
- test_address(
- node,
- address,
- label=self.name,
- labels=labels_value(name=self.name, purpose=self.purpose[address])
- )
+ test_address(node, address, labels=[self.name])
assert self.name in node.listlabels()
assert_equal(
node.getaddressesbylabel(self.name),
diff --git a/test/functional/wallet_listreceivedby.py b/test/functional/wallet_listreceivedby.py
index afd473306d..b0590b149a 100755
--- a/test/functional/wallet_listreceivedby.py
+++ b/test/functional/wallet_listreceivedby.py
@@ -11,10 +11,7 @@ from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
-from test_framework.wallet_util import (
- labels_value,
- test_address,
-)
+from test_framework.wallet_util import test_address
class ReceivedByTest(BitcoinTestFramework):
@@ -131,7 +128,7 @@ class ReceivedByTest(BitcoinTestFramework):
# set pre-state
label = ''
address = self.nodes[1].getnewaddress()
- test_address(self.nodes[1], address, label=label, labels=labels_value(name=label))
+ test_address(self.nodes[1], address, labels=[label])
received_by_label_json = [r for r in self.nodes[1].listreceivedbylabel() if r["label"] == label][0]
balance_by_label = self.nodes[1].getreceivedbylabel(label)
diff --git a/test/functional/wallet_listsinceblock.py b/test/functional/wallet_listsinceblock.py
index 6f248c9bd3..229eda9806 100755
--- a/test/functional/wallet_listsinceblock.py
+++ b/test/functional/wallet_listsinceblock.py
@@ -2,7 +2,7 @@
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-"""Test the listsincelast RPC."""
+"""Test the listsinceblock RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.messages import BIP125_SEQUENCE_NUMBER
@@ -38,6 +38,7 @@ class ListSinceBlockTest(BitcoinTestFramework):
self.double_spends_filtered()
def test_no_blockhash(self):
+ self.log.info("Test no blockhash")
txid = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
blockhash, = self.nodes[2].generate(1)
blockheight = self.nodes[2].getblockheader(blockhash)['height']
@@ -63,6 +64,7 @@ class ListSinceBlockTest(BitcoinTestFramework):
"transactions": txs})
def test_invalid_blockhash(self):
+ self.log.info("Test invalid blockhash")
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
"42759cde25462784395a337460bde75f58e73d3f08bd31fdc3507cbac856a2c4")
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
@@ -100,6 +102,7 @@ class ListSinceBlockTest(BitcoinTestFramework):
This test only checks that [tx0] is present.
'''
+ self.log.info("Test reorg")
# Split network into two
self.split_network()
@@ -110,7 +113,7 @@ class ListSinceBlockTest(BitcoinTestFramework):
# generate on both sides
lastblockhash = self.nodes[1].generate(6)[5]
self.nodes[2].generate(7)
- self.log.info('lastblockhash=%s' % (lastblockhash))
+ self.log.debug('lastblockhash={}'.format(lastblockhash))
self.sync_all(self.nodes[:2])
self.sync_all(self.nodes[2:])
@@ -155,6 +158,7 @@ class ListSinceBlockTest(BitcoinTestFramework):
until the fork point, and to include all transactions that relate to the
node wallet.
'''
+ self.log.info("Test double spend")
self.sync_all()
@@ -234,6 +238,7 @@ class ListSinceBlockTest(BitcoinTestFramework):
3. It is listed with a confirmation count of 2 (bb3, bb4), not
3 (aa1, aa2, aa3).
'''
+ self.log.info("Test double send")
self.sync_all()
@@ -302,6 +307,7 @@ class ListSinceBlockTest(BitcoinTestFramework):
`listsinceblock` was returning conflicted transactions even if they
occurred before the specified cutoff blockhash
'''
+ self.log.info("Test spends filtered")
spending_node = self.nodes[2]
dest_address = spending_node.getnewaddress()
diff --git a/test/functional/wallet_multiwallet.py b/test/functional/wallet_multiwallet.py
index 85d900f6cc..f2fa41b647 100755
--- a/test/functional/wallet_multiwallet.py
+++ b/test/functional/wallet_multiwallet.py
@@ -38,7 +38,7 @@ class MultiWalletTest(BitcoinTestFramework):
def run_test(self):
node = self.nodes[0]
- data_dir = lambda *p: os.path.join(node.datadir, 'regtest', *p)
+ data_dir = lambda *p: os.path.join(node.datadir, self.chain, *p)
wallet_dir = lambda *p: data_dir('wallets', *p)
wallet = lambda name: node.get_wallet_rpc(name)
@@ -186,7 +186,7 @@ class MultiWalletTest(BitcoinTestFramework):
assert_equal(w4.getbalance(), 3)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
- assert_equal(batch[0]["result"]["chain"], "regtest")
+ assert_equal(batch[0]["result"]["chain"], self.chain)
assert_equal(batch[1]["result"]["walletname"], "w1")
self.log.info('Check for per-wallet settxfee call')
diff --git a/test/functional/wallet_reorgsrestore.py b/test/functional/wallet_reorgsrestore.py
index f48018e9fb..497a5dd95e 100755
--- a/test/functional/wallet_reorgsrestore.py
+++ b/test/functional/wallet_reorgsrestore.py
@@ -90,7 +90,7 @@ class ReorgsRestoreTest(BitcoinTestFramework):
# Node0 wallet file is loaded on longest sync'ed node1
self.stop_node(1)
self.nodes[0].backupwallet(os.path.join(self.nodes[0].datadir, 'wallet.bak'))
- shutil.copyfile(os.path.join(self.nodes[0].datadir, 'wallet.bak'), os.path.join(self.nodes[1].datadir, 'regtest', 'wallet.dat'))
+ shutil.copyfile(os.path.join(self.nodes[0].datadir, 'wallet.bak'), os.path.join(self.nodes[1].datadir, self.chain, 'wallet.dat'))
self.start_node(1)
tx_after_reorg = self.nodes[1].gettransaction(txid)
# Check that normal confirmed tx is confirmed again but with different blockhash
diff --git a/test/fuzz/test_runner.py b/test/fuzz/test_runner.py
index bbdd047465..520a2b5a95 100755
--- a/test/fuzz/test_runner.py
+++ b/test/fuzz/test_runner.py
@@ -12,37 +12,6 @@ import sys
import subprocess
import logging
-# Fuzzers known to lack a seed corpus in https://github.com/bitcoin-core/qa-assets/tree/master/fuzz_seed_corpus
-FUZZERS_MISSING_CORPORA = [
- "addr_info_deserialize",
- "base_encode_decode",
- "block",
- "block_file_info_deserialize",
- "block_filter_deserialize",
- "block_header_and_short_txids_deserialize",
- "fee_rate_deserialize",
- "flat_file_pos_deserialize",
- "hex",
- "integer",
- "key_origin_info_deserialize",
- "merkle_block_deserialize",
- "out_point_deserialize",
- "parse_hd_keypath",
- "parse_numbers",
- "parse_script",
- "parse_univalue",
- "partial_merkle_tree_deserialize",
- "partially_signed_transaction_deserialize",
- "prefilled_transaction_deserialize",
- "psbt_input_deserialize",
- "psbt_output_deserialize",
- "pub_key_deserialize",
- "script_deserialize",
- "sub_net_deserialize",
- "tx_in",
- "tx_in_deserialize",
- "tx_out",
-]
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
@@ -59,6 +28,16 @@ def main():
help='If true, export coverage information to files in the seed corpus',
)
parser.add_argument(
+ '--valgrind',
+ action='store_true',
+ help='If true, run fuzzing binaries under the valgrind memory error detector',
+ )
+ parser.add_argument(
+ '-x',
+ '--exclude',
+ help="A comma-separated list of targets to exclude",
+ )
+ parser.add_argument(
'seed_dir',
help='The seed corpus to run on (must contain subfolders for each fuzz target).',
)
@@ -92,7 +71,7 @@ def main():
logging.error("No fuzz targets found")
sys.exit(1)
- logging.info("Fuzz targets found: {}".format(test_list_all))
+ logging.debug("{} fuzz target(s) found: {}".format(len(test_list_all), " ".join(sorted(test_list_all))))
args.target = args.target or test_list_all # By default run all
test_list_error = list(set(args.target).difference(set(test_list_all)))
@@ -101,7 +80,29 @@ def main():
test_list_selection = list(set(test_list_all).intersection(set(args.target)))
if not test_list_selection:
logging.error("No fuzz targets selected")
- logging.info("Fuzz targets selected: {}".format(test_list_selection))
+ if args.exclude:
+ for excluded_target in args.exclude.split(","):
+ if excluded_target not in test_list_selection:
+ logging.error("Target \"{}\" not found in current target list.".format(excluded_target))
+ continue
+ test_list_selection.remove(excluded_target)
+ test_list_selection.sort()
+
+ logging.info("{} of {} detected fuzz target(s) selected: {}".format(len(test_list_selection), len(test_list_all), " ".join(test_list_selection)))
+
+ test_list_seedless = []
+ for t in test_list_selection:
+ corpus_path = os.path.join(args.seed_dir, t)
+ if not os.path.exists(corpus_path) or len(os.listdir(corpus_path)) == 0:
+ test_list_seedless.append(t)
+ test_list_seedless.sort()
+ if test_list_seedless:
+ logging.info(
+ "Fuzzing harnesses lacking a seed corpus: {}".format(
+ " ".join(test_list_seedless)
+ )
+ )
+ logging.info("Please consider adding a fuzz seed corpus at https://github.com/bitcoin-core/qa-assets")
try:
help_output = subprocess.run(
@@ -109,7 +110,7 @@ def main():
os.path.join(config["environment"]["BUILDDIR"], 'src', 'test', 'fuzz', test_list_selection[0]),
'-help=1',
],
- timeout=10,
+ timeout=20,
check=True,
stderr=subprocess.PIPE,
universal_newlines=True,
@@ -126,25 +127,34 @@ def main():
test_list=test_list_selection,
build_dir=config["environment"]["BUILDDIR"],
export_coverage=args.export_coverage,
+ use_valgrind=args.valgrind,
)
-def run_once(*, corpus, test_list, build_dir, export_coverage):
+def run_once(*, corpus, test_list, build_dir, export_coverage, use_valgrind):
for t in test_list:
corpus_path = os.path.join(corpus, t)
- if t in FUZZERS_MISSING_CORPORA:
- os.makedirs(corpus_path, exist_ok=True)
+ os.makedirs(corpus_path, exist_ok=True)
args = [
os.path.join(build_dir, 'src', 'test', 'fuzz', t),
'-runs=1',
- '-detect_leaks=0',
corpus_path,
]
+ if use_valgrind:
+ args = ['valgrind', '--quiet', '--error-exitcode=1'] + args
logging.debug('Run {} with args {}'.format(t, args))
result = subprocess.run(args, stderr=subprocess.PIPE, universal_newlines=True)
output = result.stderr
logging.debug('Output: {}'.format(output))
- result.check_returncode()
+ try:
+ result.check_returncode()
+ except subprocess.CalledProcessError as e:
+ if e.stdout:
+ logging.info(e.stdout)
+ if e.stderr:
+ logging.info(e.stderr)
+ logging.info("Target \"{}\" failed with exit code {}: {}".format(t, e.returncode, " ".join(args)))
+ sys.exit(1)
if not export_coverage:
continue
for l in output.splitlines():
diff --git a/test/lint/README.md b/test/lint/README.md
index f415d619ee..6b95cc3540 100644
--- a/test/lint/README.md
+++ b/test/lint/README.md
@@ -21,6 +21,7 @@ maintained:
* for `src/leveldb`: https://github.com/bitcoin-core/leveldb.git (branch bitcoin-fork)
* for `src/univalue`: https://github.com/bitcoin-core/univalue.git (branch master)
* for `src/crypto/ctaes`: https://github.com/bitcoin-core/ctaes.git (branch master)
+* for `src/crc32c`: https://github.com/google/crc32c.git (branch master)
Usage: `git-subtree-check.sh DIR (COMMIT)`
diff --git a/test/lint/extended-lint-all.sh b/test/lint/extended-lint-all.sh
index 1cdd52621b..65c51e02f5 100755
--- a/test/lint/extended-lint-all.sh
+++ b/test/lint/extended-lint-all.sh
@@ -1,6 +1,6 @@
#!/usr/bin/env bash
#
-# Copyright (c) 2019-2018 The Bitcoin Core developers
+# Copyright (c) 2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
diff --git a/test/lint/extended-lint-cppcheck.sh b/test/lint/extended-lint-cppcheck.sh
index 06c11ebba9..ae18d74ebf 100755
--- a/test/lint/extended-lint-cppcheck.sh
+++ b/test/lint/extended-lint-cppcheck.sh
@@ -1,6 +1,6 @@
#!/usr/bin/env bash
#
-# Copyright (c) 2019-2018 The Bitcoin Core developers
+# Copyright (c) 2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
@@ -65,8 +65,8 @@ function join_array {
ENABLED_CHECKS_REGEXP=$(join_array "|" "${ENABLED_CHECKS[@]}")
IGNORED_WARNINGS_REGEXP=$(join_array "|" "${IGNORED_WARNINGS[@]}")
-WARNINGS=$(git ls-files -- "*.cpp" "*.h" ":(exclude)src/leveldb/" ":(exclude)src/secp256k1/" ":(exclude)src/univalue/" | \
- xargs cppcheck --enable=all -j "$(getconf _NPROCESSORS_ONLN)" --language=c++ --std=c++11 --template=gcc -D__cplusplus -DCLIENT_VERSION_BUILD -DCLIENT_VERSION_IS_RELEASE -DCLIENT_VERSION_MAJOR -DCLIENT_VERSION_MINOR -DCLIENT_VERSION_REVISION -DCOPYRIGHT_YEAR -DDEBUG -DHAVE_WORKING_BOOST_SLEEP_FOR -I src/ -q 2>&1 | sort -u | \
+WARNINGS=$(git ls-files -- "*.cpp" "*.h" ":(exclude)src/leveldb/" ":(exclude)src/crc32c/" ":(exclude)src/secp256k1/" ":(exclude)src/univalue/" | \
+ xargs cppcheck --enable=all -j "$(getconf _NPROCESSORS_ONLN)" --language=c++ --std=c++11 --template=gcc -D__cplusplus -DCLIENT_VERSION_BUILD -DCLIENT_VERSION_IS_RELEASE -DCLIENT_VERSION_MAJOR -DCLIENT_VERSION_MINOR -DCLIENT_VERSION_REVISION -DCOPYRIGHT_YEAR -DDEBUG -I src/ -q 2>&1 | sort -u | \
grep -E "${ENABLED_CHECKS_REGEXP}" | \
grep -vE "${IGNORED_WARNINGS_REGEXP}")
if [[ ${WARNINGS} != "" ]]; then
diff --git a/test/lint/lint-circular-dependencies.sh b/test/lint/lint-circular-dependencies.sh
index ee17e7912d..bbd94dd6c7 100755
--- a/test/lint/lint-circular-dependencies.sh
+++ b/test/lint/lint-circular-dependencies.sh
@@ -13,11 +13,7 @@ EXPECTED_CIRCULAR_DEPENDENCIES=(
"index/txindex -> validation -> index/txindex"
"policy/fees -> txmempool -> policy/fees"
"qt/addresstablemodel -> qt/walletmodel -> qt/addresstablemodel"
- "qt/bantablemodel -> qt/clientmodel -> qt/bantablemodel"
- "qt/bitcoingui -> qt/utilitydialog -> qt/bitcoingui"
"qt/bitcoingui -> qt/walletframe -> qt/bitcoingui"
- "qt/bitcoingui -> qt/walletview -> qt/bitcoingui"
- "qt/clientmodel -> qt/peertablemodel -> qt/clientmodel"
"qt/recentrequeststablemodel -> qt/walletmodel -> qt/recentrequeststablemodel"
"qt/sendcoinsdialog -> qt/walletmodel -> qt/sendcoinsdialog"
"qt/transactiontablemodel -> qt/walletmodel -> qt/transactiontablemodel"
@@ -25,7 +21,6 @@ EXPECTED_CIRCULAR_DEPENDENCIES=(
"wallet/fees -> wallet/wallet -> wallet/fees"
"wallet/wallet -> wallet/walletdb -> wallet/wallet"
"policy/fees -> txmempool -> validation -> policy/fees"
- "wallet/scriptpubkeyman -> wallet/wallet -> wallet/scriptpubkeyman"
)
EXIT_CODE=0
diff --git a/test/lint/lint-format-strings.py b/test/lint/lint-format-strings.py
index 99127e01f8..2870432bff 100755
--- a/test/lint/lint-format-strings.py
+++ b/test/lint/lint-format-strings.py
@@ -17,12 +17,13 @@ FALSE_POSITIVES = [
("src/index/base.cpp", "FatalError(const char* fmt, const Args&... args)"),
("src/netbase.cpp", "LogConnectFailure(bool manual_connection, const char* fmt, const Args&... args)"),
("src/util/system.cpp", "strprintf(_(COPYRIGHT_HOLDERS).translated, COPYRIGHT_HOLDERS_SUBSTITUTION)"),
+ ("src/validationinterface.cpp", "LogPrint(BCLog::VALIDATION, fmt \"\\n\", __VA_ARGS__)"),
("src/wallet/wallet.h", "WalletLogPrintf(std::string fmt, Params... parameters)"),
("src/wallet/wallet.h", "LogPrintf((\"%s \" + fmt).c_str(), GetDisplayName(), parameters...)"),
+ ("src/wallet/scriptpubkeyman.h", "WalletLogPrintf(std::string fmt, Params... parameters)"),
+ ("src/wallet/scriptpubkeyman.h", "LogPrintf((\"%s \" + fmt).c_str(), m_storage.GetDisplayName(), parameters...)"),
("src/logging.h", "LogPrintf(const char* fmt, const Args&... args)"),
("src/wallet/scriptpubkeyman.h", "WalletLogPrintf(const std::string& fmt, const Params&... parameters)"),
- ("src/wallet/scriptpubkeyman.cpp", "WalletLogPrintf(fmt, parameters...)"),
- ("src/wallet/scriptpubkeyman.cpp", "WalletLogPrintf(const std::string& fmt, const Params&... parameters)"),
]
diff --git a/test/lint/lint-format-strings.sh b/test/lint/lint-format-strings.sh
index 6cb486689b..184c3682c8 100755
--- a/test/lint/lint-format-strings.sh
+++ b/test/lint/lint-format-strings.sh
@@ -34,7 +34,7 @@ if ! python3 -m doctest test/lint/lint-format-strings.py; then
fi
for S in "${FUNCTION_NAMES_AND_NUMBER_OF_LEADING_ARGUMENTS[@]}"; do
IFS="," read -r FUNCTION_NAME SKIP_ARGUMENTS <<< "${S}"
- for MATCHING_FILE in $(git grep --full-name -l "${FUNCTION_NAME}" -- "*.c" "*.cpp" "*.h" | sort | grep -vE "^src/(leveldb|secp256k1|tinyformat|univalue)"); do
+ for MATCHING_FILE in $(git grep --full-name -l "${FUNCTION_NAME}" -- "*.c" "*.cpp" "*.h" | sort | grep -vE "^src/(leveldb|secp256k1|tinyformat|univalue|test/fuzz/strprintf.cpp)"); do
MATCHING_FILES+=("${MATCHING_FILE}")
done
if ! test/lint/lint-format-strings.py --skip-arguments "${SKIP_ARGUMENTS}" "${FUNCTION_NAME}" "${MATCHING_FILES[@]}"; then
diff --git a/test/lint/lint-include-guards.sh b/test/lint/lint-include-guards.sh
index 2d3beaf582..3a0494c190 100755
--- a/test/lint/lint-include-guards.sh
+++ b/test/lint/lint-include-guards.sh
@@ -10,7 +10,7 @@ export LC_ALL=C
HEADER_ID_PREFIX="BITCOIN_"
HEADER_ID_SUFFIX="_H"
-REGEXP_EXCLUDE_FILES_WITH_PREFIX="src/(crypto/ctaes/|leveldb/|secp256k1/|test/fuzz/FuzzedDataProvider.h|tinyformat.h|univalue/)"
+REGEXP_EXCLUDE_FILES_WITH_PREFIX="src/(crypto/ctaes/|leveldb/|crc32c/|secp256k1/|test/fuzz/FuzzedDataProvider.h|tinyformat.h|univalue/)"
EXIT_CODE=0
for HEADER_FILE in $(git ls-files -- "*.h" | grep -vE "^${REGEXP_EXCLUDE_FILES_WITH_PREFIX}")
diff --git a/test/lint/lint-includes.sh b/test/lint/lint-includes.sh
index bb2bd4e56c..1cece6a525 100755
--- a/test/lint/lint-includes.sh
+++ b/test/lint/lint-includes.sh
@@ -9,7 +9,7 @@
# Check includes: Check for duplicate includes. Enforce bracket syntax includes.
export LC_ALL=C
-IGNORE_REGEXP="/(leveldb|secp256k1|univalue)/"
+IGNORE_REGEXP="/(leveldb|secp256k1|univalue|crc32c)/"
# cd to root folder of git repo for git ls-files to work properly
cd "$(dirname $0)/../.." || exit 1
@@ -53,7 +53,6 @@ EXPECTED_BOOST_INCLUDES=(
boost/algorithm/string/classification.hpp
boost/algorithm/string/replace.hpp
boost/algorithm/string/split.hpp
- boost/chrono/chrono.hpp
boost/date_time/posix_time/posix_time.hpp
boost/filesystem.hpp
boost/filesystem/fstream.hpp
diff --git a/test/lint/lint-locale-dependence.sh b/test/lint/lint-locale-dependence.sh
index c1a05fc945..0cb38b6fdb 100755
--- a/test/lint/lint-locale-dependence.sh
+++ b/test/lint/lint-locale-dependence.sh
@@ -1,4 +1,7 @@
#!/usr/bin/env bash
+# Copyright (c) 2018-2019 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C
KNOWN_VIOLATIONS=(
@@ -19,6 +22,7 @@ KNOWN_VIOLATIONS=(
"src/test/blockchain_tests.cpp.*std::to_string"
"src/test/dbwrapper_tests.cpp:.*snprintf"
"src/test/denialofservice_tests.cpp.*std::to_string"
+ "src/test/fuzz/locale.cpp"
"src/test/fuzz/parse_numbers.cpp:.*atoi"
"src/test/key_tests.cpp.*std::to_string"
"src/test/net_tests.cpp.*std::to_string"
diff --git a/test/lint/lint-python-utf8-encoding.sh b/test/lint/lint-python-utf8-encoding.sh
index d03c20205d..773855bed1 100755
--- a/test/lint/lint-python-utf8-encoding.sh
+++ b/test/lint/lint-python-utf8-encoding.sh
@@ -9,7 +9,7 @@
export LC_ALL=C
EXIT_CODE=0
-OUTPUT=$(git grep " open(" -- "*.py" | grep -vE "encoding=.(ascii|utf8|utf-8)." | grep -vE "open\([^,]*, ['\"][^'\"]*b[^'\"]*['\"]")
+OUTPUT=$(git grep " open(" -- "*.py" ":(exclude)src/crc32c/" | grep -vE "encoding=.(ascii|utf8|utf-8)." | grep -vE "open\([^,]*, ['\"][^'\"]*b[^'\"]*['\"]")
if [[ ${OUTPUT} != "" ]]; then
echo "Python's open(...) seems to be used to open text files without explicitly"
echo "specifying encoding=\"utf8\":"
@@ -17,7 +17,7 @@ if [[ ${OUTPUT} != "" ]]; then
echo "${OUTPUT}"
EXIT_CODE=1
fi
-OUTPUT=$(git grep "check_output(" -- "*.py" | grep "universal_newlines=True" | grep -vE "encoding=.(ascii|utf8|utf-8).")
+OUTPUT=$(git grep "check_output(" -- "*.py" ":(exclude)src/crc32c/"| grep "universal_newlines=True" | grep -vE "encoding=.(ascii|utf8|utf-8).")
if [[ ${OUTPUT} != "" ]]; then
echo "Python's check_output(...) seems to be used to get program outputs without explicitly"
echo "specifying encoding=\"utf8\":"
diff --git a/test/lint/lint-shebang.sh b/test/lint/lint-shebang.sh
index fda22592d3..a666fdfecf 100755
--- a/test/lint/lint-shebang.sh
+++ b/test/lint/lint-shebang.sh
@@ -1,4 +1,8 @@
#!/usr/bin/env bash
+# Copyright (c) 2018 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
# Assert expected shebang lines
export LC_ALL=C
diff --git a/test/lint/lint-shell.sh b/test/lint/lint-shell.sh
index 63624e3ae0..f59b2c9945 100755
--- a/test/lint/lint-shell.sh
+++ b/test/lint/lint-shell.sh
@@ -41,7 +41,7 @@ if ! shellcheck "$EXCLUDE" $(git ls-files -- '*.sh' | grep -vE 'src/(leveldb|sec
fi
if ! command -v yq > /dev/null; then
- echo "Skipping Gitian desriptor scripts checking since yq is not installed."
+ echo "Skipping Gitian descriptor scripts checking since yq is not installed."
exit $EXIT_CODE
fi
diff --git a/test/lint/lint-spelling.ignore-words.txt b/test/lint/lint-spelling.ignore-words.txt
index 576ae94098..a7a97eb41f 100644
--- a/test/lint/lint-spelling.ignore-words.txt
+++ b/test/lint/lint-spelling.ignore-words.txt
@@ -12,3 +12,5 @@ keyserver
homogenous
setban
hist
+ser
+unselect
diff --git a/test/lint/lint-spelling.sh b/test/lint/lint-spelling.sh
index c7a3d0de44..cb84727ba5 100755
--- a/test/lint/lint-spelling.sh
+++ b/test/lint/lint-spelling.sh
@@ -15,6 +15,6 @@ if ! command -v codespell > /dev/null; then
fi
IGNORE_WORDS_FILE=test/lint/lint-spelling.ignore-words.txt
-if ! codespell --check-filenames --disable-colors --quiet-level=7 --ignore-words=${IGNORE_WORDS_FILE} $(git ls-files -- ":(exclude)build-aux/m4/" ":(exclude)contrib/seeds/*.txt" ":(exclude)depends/" ":(exclude)doc/release-notes/" ":(exclude)src/leveldb/" ":(exclude)src/qt/locale/" ":(exclude)src/qt/*.qrc" ":(exclude)src/secp256k1/" ":(exclude)src/univalue/"); then
+if ! codespell --check-filenames --disable-colors --quiet-level=7 --ignore-words=${IGNORE_WORDS_FILE} $(git ls-files -- ":(exclude)build-aux/m4/" ":(exclude)contrib/seeds/*.txt" ":(exclude)depends/" ":(exclude)doc/release-notes/" ":(exclude)src/leveldb/" ":(exclude)src/crc32c/" ":(exclude)src/qt/locale/" ":(exclude)src/qt/*.qrc" ":(exclude)src/secp256k1/" ":(exclude)src/univalue/"); then
echo "^ Warning: codespell identified likely spelling errors. Any false positives? Add them to the list of ignored words in ${IGNORE_WORDS_FILE}"
fi
diff --git a/test/lint/lint-submodule.sh b/test/lint/lint-submodule.sh
new file mode 100755
index 0000000000..d9aa021df7
--- /dev/null
+++ b/test/lint/lint-submodule.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2020 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+# This script checks for git modules
+export LC_ALL=C
+EXIT_CODE=0
+
+CMD=$(git submodule status --recursive)
+if test -n "$CMD";
+then
+ echo These submodules were found, delete them:
+ echo "$CMD"
+ EXIT_CODE=1
+fi
+
+exit $EXIT_CODE
+
diff --git a/test/lint/lint-whitespace.sh b/test/lint/lint-whitespace.sh
index 861faf8516..d8bdb0a8d7 100755
--- a/test/lint/lint-whitespace.sh
+++ b/test/lint/lint-whitespace.sh
@@ -31,14 +31,14 @@ if [ -z "${TRAVIS_COMMIT_RANGE}" ]; then
fi
showdiff() {
- if ! git diff -U0 "${TRAVIS_COMMIT_RANGE}" -- "." ":(exclude)depends/patches/" ":(exclude)src/leveldb/" ":(exclude)src/secp256k1/" ":(exclude)src/univalue/" ":(exclude)doc/release-notes/" ":(exclude)src/qt/locale/"; then
+ if ! git diff -U0 "${TRAVIS_COMMIT_RANGE}" -- "." ":(exclude)depends/patches/" ":(exclude)src/leveldb/" ":(exclude)src/crc32c/" ":(exclude)src/secp256k1/" ":(exclude)src/univalue/" ":(exclude)doc/release-notes/" ":(exclude)src/qt/locale/"; then
echo "Failed to get a diff"
exit 1
fi
}
showcodediff() {
- if ! git diff -U0 "${TRAVIS_COMMIT_RANGE}" -- *.cpp *.h *.md *.py *.sh ":(exclude)src/leveldb/" ":(exclude)src/secp256k1/" ":(exclude)src/univalue/" ":(exclude)doc/release-notes/" ":(exclude)src/qt/locale/"; then
+ if ! git diff -U0 "${TRAVIS_COMMIT_RANGE}" -- *.cpp *.h *.md *.py *.sh ":(exclude)src/leveldb/" ":(exclude)src/crc32c/" ":(exclude)src/secp256k1/" ":(exclude)src/univalue/" ":(exclude)doc/release-notes/" ":(exclude)src/qt/locale/"; then
echo "Failed to get a diff"
exit 1
fi
diff --git a/test/sanitizer_suppressions/tsan b/test/sanitizer_suppressions/tsan
index 70eea34363..b9c5c038d0 100644
--- a/test/sanitizer_suppressions/tsan
+++ b/test/sanitizer_suppressions/tsan
@@ -7,6 +7,14 @@ deadlock:WalletBatch
# Intentional deadlock in tests
deadlock:TestPotentialDeadLockDetected
+# Race due to unprotected calls to thread-unsafe BOOST_TEST_MESSAGE from different threads:
+# * G_TEST_LOG_FUN in the index thread
+# * boost test case invoker (entering a test case) in the main thread
+# TODO: get rid of BOOST_ macros, see also https://github.com/bitcoin/bitcoin/issues/8670
+race:blockfilter_index_initial_sync_invoker
+race:txindex_initial_sync_invoker
+race:validation_block_tests::TestSubscriber
+
# Wildcard for all gui tests, should be replaced with non-wildcard suppressions
race:src/qt/test/*
deadlock:src/qt/test/*
diff --git a/test/sanitizer_suppressions/ubsan b/test/sanitizer_suppressions/ubsan
index f5de358bcb..b3d9b9e6ec 100644
--- a/test/sanitizer_suppressions/ubsan
+++ b/test/sanitizer_suppressions/ubsan
@@ -1,7 +1,5 @@
# -fsanitize=undefined suppressions
# =================================
-alignment:move.h
-alignment:prevector.h
float-divide-by-zero:policy/fees.cpp
float-divide-by-zero:validation.cpp
float-divide-by-zero:wallet/wallet.cpp
@@ -84,3 +82,4 @@ implicit-signed-integer-truncation:test/skiplist_tests.cpp
implicit-signed-integer-truncation:torcontrol.cpp
implicit-unsigned-integer-truncation:crypto/*
implicit-unsigned-integer-truncation:leveldb/*
+implicit-integer-sign-change:crc32c/*