aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore5
-rw-r--r--.travis.yml44
-rw-r--r--.tx/config2
-rw-r--r--CONTRIBUTING.md71
-rw-r--r--Makefile.am40
-rw-r--r--build-aux/m4/ax_boost_chrono.m4118
-rw-r--r--build_msvc/.gitignore14
-rw-r--r--build_msvc/README.md2
-rw-r--r--build_msvc/bitcoin_config.h12
-rw-r--r--build_msvc/libleveldb/libleveldb.vcxproj14
-rw-r--r--ci/README.md2
-rwxr-xr-xci/lint/06_script.sh1
-rwxr-xr-xci/test/00_setup_env.sh3
-rw-r--r--ci/test/00_setup_env_arm.sh3
-rw-r--r--ci/test/00_setup_env_i686_centos.sh1
-rw-r--r--ci/test/00_setup_env_mac.sh1
-rw-r--r--ci/test/00_setup_env_mac_host.sh1
-rw-r--r--ci/test/00_setup_env_native_asan.sh3
-rw-r--r--ci/test/00_setup_env_native_fuzz.sh3
-rw-r--r--ci/test/00_setup_env_native_fuzz_with_valgrind.sh18
-rw-r--r--ci/test/00_setup_env_native_nowallet.sh1
-rw-r--r--ci/test/00_setup_env_native_qt5.sh2
-rw-r--r--ci/test/00_setup_env_native_tsan.sh3
-rw-r--r--ci/test/00_setup_env_native_valgrind.sh6
-rw-r--r--ci/test/00_setup_env_s390x.sh1
-rw-r--r--ci/test/00_setup_env_win64.sh1
-rwxr-xr-xci/test/04_install.sh27
-rwxr-xr-xci/test/05_before_script.sh7
-rwxr-xr-xci/test/06_script_b.sh2
-rw-r--r--configure.ac228
-rw-r--r--contrib/devtools/README.md2
-rwxr-xr-xcontrib/devtools/copyright_header.py1
-rwxr-xr-xcontrib/devtools/previous_release.sh149
-rwxr-xr-xcontrib/devtools/symbol-check.py49
-rw-r--r--contrib/gitian-descriptors/gitian-linux.yml16
-rw-r--r--contrib/gitian-descriptors/gitian-osx.yml13
-rw-r--r--contrib/gitian-descriptors/gitian-win.yml18
-rw-r--r--contrib/guix/README.md2
-rwxr-xr-xcontrib/guix/guix-build.sh2
-rw-r--r--contrib/guix/manifest.scm14
-rw-r--r--contrib/windeploy/win-codesign.cert56
-rw-r--r--depends/README.md2
-rw-r--r--depends/config.site.in2
-rw-r--r--depends/packages/boost.mk2
-rw-r--r--depends/packages/native_libdmg-hfsplus.mk2
-rw-r--r--doc/Doxyfile.in3
-rw-r--r--doc/build-freebsd.md19
-rw-r--r--doc/build-netbsd.md14
-rw-r--r--doc/build-openbsd.md12
-rw-r--r--doc/build-osx.md1
-rw-r--r--doc/build-unix.md2
-rw-r--r--doc/descriptors.md1
-rw-r--r--doc/developer-notes.md130
-rw-r--r--doc/files.md4
-rw-r--r--doc/fuzzing.md249
-rw-r--r--doc/release-notes-15437.md53
-rw-r--r--doc/release-notes-15954.md4
-rw-r--r--doc/release-notes-17056.md4
-rw-r--r--doc/release-notes-17410.md5
-rw-r--r--doc/release-notes-17437.md5
-rw-r--r--doc/release-notes-17578.md13
-rw-r--r--doc/release-notes.md131
-rw-r--r--doc/release-notes/release-notes-0.19.1.md115
-rw-r--r--doc/release-process.md5
-rw-r--r--src/Makefile.am13
-rw-r--r--src/Makefile.bench.include3
-rw-r--r--src/Makefile.crc32c.include75
-rw-r--r--src/Makefile.leveldb.include44
-rw-r--r--src/Makefile.test.include354
-rw-r--r--src/addrman.cpp10
-rw-r--r--src/addrman.h11
-rw-r--r--src/banman.h9
-rw-r--r--src/bench/ccoins_caching.cpp42
-rw-r--r--src/bench/examples.cpp2
-rw-r--r--src/bench/verify_script.cpp23
-rw-r--r--src/bitcoin-cli.cpp2
-rw-r--r--src/bitcoin-tx.cpp3
-rw-r--r--src/bitcoind.cpp2
-rw-r--r--src/blockencodings.h140
-rw-r--r--src/chain.h6
-rw-r--r--src/chainparams.cpp4
-rw-r--r--src/chainparams.h3
-rw-r--r--src/compressor.h10
-rw-r--r--src/consensus/merkle.cpp2
-rw-r--r--src/consensus/validation.h61
-rw-r--r--src/core_read.cpp8
-rw-r--r--src/crc32c/.appveyor.yml37
-rw-r--r--src/crc32c/.clang-format3
-rw-r--r--src/crc32c/.clang_complete8
-rw-r--r--src/crc32c/.gitignore8
-rw-r--r--src/crc32c/.gitmodules0
-rw-r--r--src/crc32c/.travis.yml76
-rw-r--r--src/crc32c/.ycm_extra_conf.py142
-rw-r--r--src/crc32c/AUTHORS9
-rw-r--r--src/crc32c/CMakeLists.txt423
-rw-r--r--src/crc32c/CONTRIBUTING.md23
-rw-r--r--src/crc32c/Crc32cConfig.cmake5
-rw-r--r--src/crc32c/LICENSE28
-rw-r--r--src/crc32c/README.md125
-rw-r--r--src/crc32c/include/crc32c/crc32c.h89
-rw-r--r--src/crc32c/src/crc32c.cc39
-rw-r--r--src/crc32c/src/crc32c_arm64.cc126
-rw-r--r--src/crc32c/src/crc32c_arm64.h27
-rw-r--r--src/crc32c/src/crc32c_arm64_linux_check.h50
-rw-r--r--src/crc32c/src/crc32c_arm64_unittest.cc24
-rw-r--r--src/crc32c/src/crc32c_benchmark.cc106
-rw-r--r--src/crc32c/src/crc32c_capi_unittest.c66
-rw-r--r--src/crc32c/src/crc32c_config.h.in36
-rw-r--r--src/crc32c/src/crc32c_extend_unittests.h112
-rw-r--r--src/crc32c/src/crc32c_internal.h23
-rw-r--r--src/crc32c/src/crc32c_portable.cc351
-rw-r--r--src/crc32c/src/crc32c_portable_unittest.cc20
-rw-r--r--src/crc32c/src/crc32c_prefetch.h46
-rw-r--r--src/crc32c/src/crc32c_prefetch_unittest.cc9
-rw-r--r--src/crc32c/src/crc32c_read_le.h53
-rw-r--r--src/crc32c/src/crc32c_read_le_unittest.cc32
-rw-r--r--src/crc32c/src/crc32c_round_up.h34
-rw-r--r--src/crc32c/src/crc32c_round_up_unittest.cc84
-rw-r--r--src/crc32c/src/crc32c_sse42.cc258
-rw-r--r--src/crc32c/src/crc32c_sse42.h33
-rw-r--r--src/crc32c/src/crc32c_sse42_check.h50
-rw-r--r--src/crc32c/src/crc32c_sse42_unittest.cc24
-rw-r--r--src/crc32c/src/crc32c_test_main.cc22
-rw-r--r--src/crc32c/src/crc32c_unittest.cc129
-rw-r--r--src/flatfile.h2
-rw-r--r--src/httprpc.cpp2
-rw-r--r--src/httpserver.cpp8
-rw-r--r--src/index/base.cpp5
-rw-r--r--src/index/base.h5
-rw-r--r--src/indirectmap.h2
-rw-r--r--src/init.cpp93
-rw-r--r--src/interfaces/chain.cpp23
-rw-r--r--src/interfaces/chain.h19
-rw-r--r--src/interfaces/node.cpp13
-rw-r--r--src/interfaces/node.h6
-rw-r--r--src/interfaces/wallet.cpp34
-rw-r--r--src/interfaces/wallet.h20
-rw-r--r--src/leveldb/.appveyor.yml35
-rw-r--r--src/leveldb/.clang-format18
-rw-r--r--src/leveldb/.gitignore21
-rw-r--r--src/leveldb/.travis.yml81
-rw-r--r--src/leveldb/CMakeLists.txt465
-rw-r--r--src/leveldb/CONTRIBUTING.md4
-rw-r--r--src/leveldb/Makefile424
-rw-r--r--src/leveldb/README.md87
-rw-r--r--src/leveldb/WINDOWS.md39
-rw-r--r--src/leveldb/benchmarks/db_bench.cc (renamed from src/leveldb/db/db_bench.cc)203
-rw-r--r--src/leveldb/benchmarks/db_bench_sqlite3.cc (renamed from src/leveldb/doc/bench/db_bench_sqlite3.cc)184
-rw-r--r--src/leveldb/benchmarks/db_bench_tree_db.cc (renamed from src/leveldb/doc/bench/db_bench_tree_db.cc)130
-rwxr-xr-xsrc/leveldb/build_detect_platform259
-rw-r--r--src/leveldb/cmake/leveldbConfig.cmake1
-rw-r--r--src/leveldb/db/autocompact_test.cc36
-rw-r--r--src/leveldb/db/builder.cc25
-rw-r--r--src/leveldb/db/builder.h8
-rw-r--r--src/leveldb/db/c.cc391
-rw-r--r--src/leveldb/db/c_test.c36
-rw-r--r--src/leveldb/db/corruption_test.cc100
-rw-r--r--src/leveldb/db/db_impl.cc547
-rw-r--r--src/leveldb/db/db_impl.h146
-rw-r--r--src/leveldb/db/db_iter.cc93
-rw-r--r--src/leveldb/db/db_iter.h12
-rw-r--r--src/leveldb/db/db_test.cc722
-rw-r--r--src/leveldb/db/dbformat.cc43
-rw-r--r--src/leveldb/db/dbformat.h80
-rw-r--r--src/leveldb/db/dbformat_test.cc95
-rw-r--r--src/leveldb/db/dumpfile.cc29
-rw-r--r--src/leveldb/db/fault_injection_test.cc132
-rw-r--r--src/leveldb/db/filename.cc37
-rw-r--r--src/leveldb/db/filename.h31
-rw-r--r--src/leveldb/db/filename_test.cc86
-rw-r--r--src/leveldb/db/leveldbutil.cc21
-rw-r--r--src/leveldb/db/log_reader.cc28
-rw-r--r--src/leveldb/db/log_reader.h47
-rw-r--r--src/leveldb/db/log_test.cc291
-rw-r--r--src/leveldb/db/log_writer.cc29
-rw-r--r--src/leveldb/db/log_writer.h14
-rw-r--r--src/leveldb/db/memtable.cc68
-rw-r--r--src/leveldb/db/memtable.h23
-rw-r--r--src/leveldb/db/recovery_test.cc72
-rw-r--r--src/leveldb/db/repair.cc95
-rw-r--r--src/leveldb/db/skiplist.h182
-rw-r--r--src/leveldb/db/skiplist_test.cc65
-rw-r--r--src/leveldb/db/snapshot.h78
-rw-r--r--src/leveldb/db/table_cache.cc47
-rw-r--r--src/leveldb/db/table_cache.h33
-rw-r--r--src/leveldb/db/version_edit.cc55
-rw-r--r--src/leveldb/db/version_edit.h27
-rw-r--r--src/leveldb/db/version_edit_test.cc6
-rw-r--r--src/leveldb/db/version_set.cc517
-rw-r--r--src/leveldb/db/version_set.h123
-rw-r--r--src/leveldb/db/version_set_test.cc243
-rw-r--r--src/leveldb/db/write_batch.cc23
-rw-r--r--src/leveldb/db/write_batch_internal.h9
-rw-r--r--src/leveldb/db/write_batch_test.cc73
-rw-r--r--src/leveldb/doc/benchmark.html6
-rw-r--r--src/leveldb/doc/impl.md14
-rw-r--r--src/leveldb/doc/index.md10
-rw-r--r--src/leveldb/helpers/memenv/memenv.cc203
-rw-r--r--src/leveldb/helpers/memenv/memenv.h4
-rw-r--r--src/leveldb/helpers/memenv/memenv_test.cc60
-rw-r--r--src/leveldb/include/leveldb/c.h320
-rw-r--r--src/leveldb/include/leveldb/cache.h21
-rw-r--r--src/leveldb/include/leveldb/comparator.h11
-rw-r--r--src/leveldb/include/leveldb/db.h58
-rw-r--r--src/leveldb/include/leveldb/dumpfile.h5
-rw-r--r--src/leveldb/include/leveldb/env.h220
-rw-r--r--src/leveldb/include/leveldb/export.h33
-rw-r--r--src/leveldb/include/leveldb/filter_policy.h12
-rw-r--r--src/leveldb/include/leveldb/iterator.h34
-rw-r--r--src/leveldb/include/leveldb/options.h100
-rw-r--r--src/leveldb/include/leveldb/slice.h38
-rw-r--r--src/leveldb/include/leveldb/status.h52
-rw-r--r--src/leveldb/include/leveldb/table.h33
-rw-r--r--src/leveldb/include/leveldb/table_builder.h11
-rw-r--r--src/leveldb/include/leveldb/write_batch.h37
-rw-r--r--src/leveldb/issues/issue178_test.cc12
-rw-r--r--src/leveldb/issues/issue200_test.cc10
-rw-r--r--src/leveldb/issues/issue320_test.cc128
-rw-r--r--src/leveldb/port/README.md (renamed from src/leveldb/port/README)2
-rw-r--r--src/leveldb/port/atomic_pointer.h245
-rw-r--r--src/leveldb/port/port.h8
-rw-r--r--src/leveldb/port/port_config.h.in39
-rw-r--r--src/leveldb/port/port_example.h67
-rw-r--r--src/leveldb/port/port_posix.cc67
-rw-r--r--src/leveldb/port/port_posix.h161
-rw-r--r--src/leveldb/port/port_posix_sse.cc110
-rw-r--r--src/leveldb/port/port_stdcxx.h153
-rw-r--r--src/leveldb/port/port_win.cc158
-rw-r--r--src/leveldb/port/port_win.h184
-rw-r--r--src/leveldb/port/thread_annotations.h78
-rw-r--r--src/leveldb/port/win/stdint.h24
-rw-r--r--src/leveldb/table/block.cc69
-rw-r--r--src/leveldb/table/block.h16
-rw-r--r--src/leveldb/table/block_builder.cc23
-rw-r--r--src/leveldb/table/block_builder.h26
-rw-r--r--src/leveldb/table/filter_block.cc19
-rw-r--r--src/leveldb/table/filter_block.h21
-rw-r--r--src/leveldb/table/filter_block_test.cc38
-rw-r--r--src/leveldb/table/format.cc11
-rw-r--r--src/leveldb/table/format.h42
-rw-r--r--src/leveldb/table/iterator.cc75
-rw-r--r--src/leveldb/table/iterator_wrapper.h58
-rw-r--r--src/leveldb/table/merger.cc52
-rw-r--r--src/leveldb/table/merger.h4
-rw-r--r--src/leveldb/table/table.cc68
-rw-r--r--src/leveldb/table/table_builder.cc61
-rw-r--r--src/leveldb/table/table_test.cc315
-rw-r--r--src/leveldb/table/two_level_iterator.cc91
-rw-r--r--src/leveldb/table/two_level_iterator.h11
-rw-r--r--src/leveldb/util/arena.cc18
-rw-r--r--src/leveldb/util/arena.h25
-rw-r--r--src/leveldb/util/arena_test.cc19
-rw-r--r--src/leveldb/util/bloom.cc27
-rw-r--r--src/leveldb/util/bloom_test.cc56
-rw-r--r--src/leveldb/util/cache.cc123
-rw-r--r--src/leveldb/util/cache_test.cc60
-rw-r--r--src/leveldb/util/coding.cc84
-rw-r--r--src/leveldb/util/coding.h136
-rw-r--r--src/leveldb/util/coding_test.cc42
-rw-r--r--src/leveldb/util/comparator.cc40
-rw-r--r--src/leveldb/util/crc32c.cc634
-rw-r--r--src/leveldb/util/crc32c.h6
-rw-r--r--src/leveldb/util/crc32c_test.cc31
-rw-r--r--src/leveldb/util/env.cc26
-rw-r--r--src/leveldb/util/env_posix.cc1122
-rw-r--r--src/leveldb/util/env_posix_test.cc300
-rw-r--r--src/leveldb/util/env_test.cc235
-rw-r--r--src/leveldb/util/env_win.cc902
-rw-r--r--src/leveldb/util/env_windows.cc849
-rw-r--r--src/leveldb/util/env_windows_test.cc64
-rw-r--r--src/leveldb/util/env_windows_test_helper.h25
-rw-r--r--src/leveldb/util/filter_policy.cc2
-rw-r--r--src/leveldb/util/hash.cc15
-rw-r--r--src/leveldb/util/hash.h4
-rw-r--r--src/leveldb/util/hash_test.cc32
-rw-r--r--src/leveldb/util/histogram.cc207
-rw-r--r--src/leveldb/util/histogram.h20
-rw-r--r--src/leveldb/util/logging.cc52
-rw-r--r--src/leveldb/util/logging.h14
-rw-r--r--src/leveldb/util/logging_test.cc143
-rw-r--r--src/leveldb/util/mutexlock.h12
-rw-r--r--src/leveldb/util/no_destructor.h46
-rw-r--r--src/leveldb/util/no_destructor_test.cc47
-rw-r--r--src/leveldb/util/options.cc18
-rw-r--r--src/leveldb/util/posix_logger.h168
-rw-r--r--src/leveldb/util/random.h9
-rw-r--r--src/leveldb/util/status.cc14
-rw-r--r--src/leveldb/util/status_test.cc40
-rw-r--r--src/leveldb/util/testharness.cc18
-rw-r--r--src/leveldb/util/testharness.h85
-rw-r--r--src/leveldb/util/testutil.cc12
-rw-r--r--src/leveldb/util/testutil.h29
-rw-r--r--src/leveldb/util/windows_logger.h124
-rw-r--r--src/logging/timer.h2
-rw-r--r--src/memusage.h2
-rw-r--r--src/miner.cpp3
-rw-r--r--src/net.cpp36
-rw-r--r--src/net.h21
-rw-r--r--src/net_permissions.h4
-rw-r--r--src/net_processing.cpp98
-rw-r--r--src/net_processing.h11
-rw-r--r--src/netaddress.cpp7
-rw-r--r--src/netaddress.h2
-rw-r--r--src/node/coinstats.cpp2
-rw-r--r--src/node/context.cpp1
-rw-r--r--src/node/context.h2
-rw-r--r--src/node/psbt.cpp31
-rw-r--r--src/node/transaction.cpp3
-rw-r--r--src/outputtype.cpp14
-rw-r--r--src/prevector.h23
-rw-r--r--src/qt/bitcoingui.cpp2
-rw-r--r--src/qt/bitcoinstrings.cpp9
-rw-r--r--src/qt/clientmodel.cpp5
-rw-r--r--src/qt/forms/debugwindow.ui60
-rw-r--r--src/qt/guiutil.cpp4
-rw-r--r--src/qt/guiutil.h4
-rw-r--r--src/qt/locale/bitcoin_en.ts616
-rw-r--r--src/qt/optionsmodel.cpp3
-rw-r--r--src/qt/overviewpage.cpp8
-rw-r--r--src/qt/peertablemodel.cpp4
-rw-r--r--src/qt/receivecoinsdialog.cpp4
-rw-r--r--src/qt/rpcconsole.cpp15
-rw-r--r--src/qt/sendcoinsdialog.cpp21
-rw-r--r--src/qt/signverifymessagedialog.cpp112
-rw-r--r--src/qt/splashscreen.cpp2
-rw-r--r--src/qt/walletcontroller.cpp4
-rw-r--r--src/qt/walletframe.cpp4
-rw-r--r--src/qt/walletmodel.cpp24
-rw-r--r--src/qt/walletmodel.h2
-rw-r--r--src/qt/walletview.h2
-rw-r--r--src/random.cpp17
-rw-r--r--src/randomenv.cpp19
-rw-r--r--src/reverselock.h34
-rw-r--r--src/rpc/blockchain.cpp646
-rw-r--r--src/rpc/client.cpp1
-rw-r--r--src/rpc/mining.cpp228
-rw-r--r--src/rpc/misc.cpp183
-rw-r--r--src/rpc/net.cpp298
-rw-r--r--src/rpc/rawtransaction.cpp577
-rw-r--r--src/rpc/rawtransaction_util.cpp56
-rw-r--r--src/rpc/rawtransaction_util.h2
-rw-r--r--src/rpc/server.cpp33
-rw-r--r--src/rpc/util.cpp121
-rw-r--r--src/rpc/util.h154
-rw-r--r--src/scheduler.cpp80
-rw-r--r--src/scheduler.h62
-rw-r--r--src/script/descriptor.cpp300
-rw-r--r--src/script/descriptor.h56
-rw-r--r--src/script/interpreter.cpp126
-rw-r--r--src/script/script_error.cpp2
-rw-r--r--src/script/sign.cpp51
-rw-r--r--src/script/sign.h4
-rw-r--r--src/script/signingprovider.h46
-rw-r--r--src/serialize.h188
-rw-r--r--src/span.h14
-rw-r--r--src/support/lockedpool.cpp3
-rw-r--r--src/sync.cpp19
-rw-r--r--src/sync.h43
-rw-r--r--src/test/addrman_tests.cpp51
-rw-r--r--src/test/blockchain_tests.cpp5
-rw-r--r--src/test/blockfilter_index_tests.cpp2
-rw-r--r--src/test/bswap_tests.cpp20
-rw-r--r--src/test/checkqueue_tests.cpp2
-rw-r--r--src/test/data/script_tests.json26
-rw-r--r--src/test/data/tx_invalid.json2
-rw-r--r--src/test/data/tx_valid.json6
-rw-r--r--src/test/dbwrapper_tests.cpp8
-rw-r--r--src/test/denialofservice_tests.cpp13
-rw-r--r--src/test/descriptor_tests.cpp144
-rw-r--r--src/test/fuzz/addrdb.cpp43
-rw-r--r--src/test/fuzz/base_encode_decode.cpp5
-rw-r--r--src/test/fuzz/block.cpp6
-rw-r--r--src/test/fuzz/block_header.cpp41
-rw-r--r--src/test/fuzz/blockfilter.cpp44
-rw-r--r--src/test/fuzz/bloom_filter.cpp80
-rw-r--r--src/test/fuzz/chain.cpp65
-rw-r--r--src/test/fuzz/descriptor_parse.cpp2
-rw-r--r--src/test/fuzz/deserialize.cpp18
-rw-r--r--src/test/fuzz/eval_script.cpp2
-rw-r--r--src/test/fuzz/fee_rate.cpp40
-rw-r--r--src/test/fuzz/float.cpp42
-rw-r--r--src/test/fuzz/hex.cpp23
-rw-r--r--src/test/fuzz/integer.cpp145
-rw-r--r--src/test/fuzz/key.cpp309
-rw-r--r--src/test/fuzz/key_io.cpp50
-rw-r--r--src/test/fuzz/locale.cpp96
-rw-r--r--src/test/fuzz/multiplication_overflow.cpp55
-rw-r--r--src/test/fuzz/net_permissions.cpp51
-rw-r--r--src/test/fuzz/netaddress.cpp134
-rw-r--r--src/test/fuzz/p2p_transport_deserializer.cpp47
-rw-r--r--src/test/fuzz/parse_univalue.cpp18
-rw-r--r--src/test/fuzz/process_message.cpp98
-rw-r--r--src/test/fuzz/protocol.cpp32
-rw-r--r--src/test/fuzz/psbt.cpp2
-rw-r--r--src/test/fuzz/rolling_bloom_filter.cpp50
-rw-r--r--src/test/fuzz/script.cpp40
-rw-r--r--src/test/fuzz/script_flags.cpp2
-rw-r--r--src/test/fuzz/script_ops.cpp67
-rw-r--r--src/test/fuzz/scriptnum_ops.cpp137
-rw-r--r--src/test/fuzz/signature_checker.cpp68
-rw-r--r--src/test/fuzz/string.cpp89
-rw-r--r--src/test/fuzz/strprintf.cpp47
-rw-r--r--src/test/fuzz/timedata.cpp29
-rw-r--r--src/test/fuzz/transaction.cpp26
-rw-r--r--src/test/fuzz/util.h106
-rw-r--r--src/test/key_tests.cpp3
-rw-r--r--src/test/net_tests.cpp3
-rw-r--r--src/test/reverselock_tests.cpp45
-rw-r--r--src/test/scheduler_tests.cpp96
-rw-r--r--src/test/serialize_tests.cpp26
-rw-r--r--src/test/settings_tests.cpp3
-rw-r--r--src/test/timedata_tests.cpp3
-rw-r--r--src/test/transaction_tests.cpp113
-rw-r--r--src/test/txindex_tests.cpp4
-rw-r--r--src/test/util/setup_common.cpp16
-rw-r--r--src/test/util/setup_common.h2
-rw-r--r--src/test/util/transaction_utils.cpp32
-rw-r--r--src/test/util/transaction_utils.h10
-rw-r--r--src/test/util_tests.cpp142
-rw-r--r--src/test/util_threadnames_tests.cpp5
-rw-r--r--src/test/validation_block_tests.cpp4
-rw-r--r--src/test/validation_flush_tests.cpp26
-rw-r--r--src/txdb.cpp2
-rw-r--r--src/txmempool.cpp11
-rw-r--r--src/txmempool.h6
-rw-r--r--src/undo.h69
-rw-r--r--src/univalue/Makefile.am2
-rw-r--r--src/univalue/lib/univalue_read.cpp11
-rw-r--r--src/univalue/test/fail45.json1
-rw-r--r--src/univalue/test/pass4.json1
-rw-r--r--src/univalue/test/unitester.cpp2
-rw-r--r--src/util/message.cpp92
-rw-r--r--src/util/message.h76
-rw-r--r--src/util/moneystr.cpp24
-rw-r--r--src/util/moneystr.h2
-rw-r--r--src/util/string.h14
-rw-r--r--src/util/system.cpp10
-rw-r--r--src/util/system.h3
-rw-r--r--src/util/time.cpp37
-rw-r--r--src/util/time.h4
-rw-r--r--src/util/validation.cpp25
-rw-r--r--src/util/validation.h18
-rw-r--r--src/validation.cpp82
-rw-r--r--src/validationinterface.cpp13
-rw-r--r--src/validationinterface.h34
-rw-r--r--src/wallet/db.cpp6
-rw-r--r--src/wallet/db.h4
-rw-r--r--src/wallet/feebumper.cpp129
-rw-r--r--src/wallet/feebumper.h10
-rw-r--r--src/wallet/init.cpp2
-rw-r--r--src/wallet/load.cpp4
-rw-r--r--src/wallet/psbtwallet.cpp77
-rw-r--r--src/wallet/psbtwallet.h32
-rw-r--r--src/wallet/rpcdump.cpp59
-rw-r--r--src/wallet/rpcwallet.cpp976
-rw-r--r--src/wallet/scriptpubkeyman.cpp101
-rw-r--r--src/wallet/scriptpubkeyman.h42
-rw-r--r--src/wallet/test/coinselector_tests.cpp13
-rw-r--r--src/wallet/test/psbt_wallet_tests.cpp7
-rw-r--r--src/wallet/test/scriptpubkeyman_tests.cpp43
-rw-r--r--src/wallet/wallet.cpp267
-rw-r--r--src/wallet/wallet.h72
-rw-r--r--src/zmq/zmqnotificationinterface.cpp2
-rw-r--r--src/zmq/zmqnotificationinterface.h2
-rw-r--r--src/zmq/zmqrpc.cpp17
-rw-r--r--test/README.md2
-rw-r--r--test/functional/README.md8
-rw-r--r--test/functional/data/invalid_txs.py30
-rwxr-xr-xtest/functional/feature_abortnode.py4
-rwxr-xr-xtest/functional/feature_asmap.py106
-rwxr-xr-xtest/functional/feature_assumevalid.py4
-rwxr-xr-xtest/functional/feature_backwards_compatibility.py347
-rwxr-xr-xtest/functional/feature_block.py20
-rwxr-xr-xtest/functional/feature_cltv.py4
-rwxr-xr-xtest/functional/feature_csv_activation.py79
-rwxr-xr-xtest/functional/feature_dersig.py8
-rwxr-xr-xtest/functional/feature_fee_estimation.py25
-rwxr-xr-xtest/functional/feature_maxuploadtarget.py5
-rwxr-xr-xtest/functional/feature_notifications.py18
-rwxr-xr-xtest/functional/feature_nulldummy.py5
-rwxr-xr-xtest/functional/feature_segwit.py1
-rwxr-xr-xtest/functional/framework_test_script.py44
-rwxr-xr-xtest/functional/mempool_accept.py5
-rwxr-xr-xtest/functional/mempool_expiry.py100
-rwxr-xr-xtest/functional/mempool_packages.py32
-rwxr-xr-xtest/functional/mempool_reorg.py5
-rwxr-xr-xtest/functional/p2p_invalid_block.py2
-rwxr-xr-xtest/functional/p2p_permissions.py10
-rwxr-xr-xtest/functional/p2p_segwit.py11
-rwxr-xr-xtest/functional/rpc_createmultisig.py14
-rwxr-xr-xtest/functional/rpc_estimatefee.py51
-rwxr-xr-xtest/functional/rpc_fundrawtransaction.py2
-rwxr-xr-xtest/functional/rpc_getdescriptorinfo.py65
-rwxr-xr-xtest/functional/rpc_psbt.py22
-rw-r--r--test/functional/test_framework/bignum.py58
-rw-r--r--test/functional/test_framework/descriptors.py9
-rwxr-xr-xtest/functional/test_framework/messages.py1
-rw-r--r--test/functional/test_framework/script.py285
-rwxr-xr-xtest/functional/test_framework/test_framework.py11
-rwxr-xr-xtest/functional/test_framework/test_node.py13
-rwxr-xr-xtest/functional/test_runner.py23
-rwxr-xr-xtest/functional/wallet_address_types.py2
-rwxr-xr-xtest/functional/wallet_avoidreuse.py2
-rwxr-xr-xtest/functional/wallet_backup.py8
-rwxr-xr-xtest/functional/wallet_bumpfee.py160
-rwxr-xr-xtest/functional/wallet_bumpfee_totalfee_deprecation.py54
-rwxr-xr-xtest/functional/wallet_createwallet.py10
-rwxr-xr-xtest/functional/wallet_groups.py6
-rwxr-xr-xtest/functional/wallet_listsinceblock.py10
-rwxr-xr-xtest/functional/wallet_multiwallet.py1
-rwxr-xr-xtest/functional/wallet_resendwallettransactions.py4
-rwxr-xr-xtest/fuzz/test_runner.py131
-rw-r--r--test/lint/README.md1
-rwxr-xr-xtest/lint/extended-lint-cppcheck.sh4
-rwxr-xr-xtest/lint/lint-include-guards.sh2
-rwxr-xr-xtest/lint/lint-includes.sh3
-rwxr-xr-xtest/lint/lint-locale-dependence.sh17
-rwxr-xr-xtest/lint/lint-python-utf8-encoding.sh4
-rwxr-xr-xtest/lint/lint-shell.sh2
-rw-r--r--test/lint/lint-spelling.ignore-words.txt2
-rwxr-xr-xtest/lint/lint-spelling.sh2
-rwxr-xr-xtest/lint/lint-submodule.sh20
-rwxr-xr-xtest/lint/lint-whitespace.sh4
-rw-r--r--test/sanitizer_suppressions/ubsan3
-rw-r--r--test/util/data/bitcoin-util-test.json40
-rw-r--r--test/util/data/txcreatescript5.hex1
-rw-r--r--test/util/data/txcreatescript6.hex1
527 files changed, 20933 insertions, 12802 deletions
diff --git a/.gitignore b/.gitignore
index db493811c5..1c487f43a7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -90,7 +90,7 @@ src/qt/bitcoin-qt.includes
*.qm
Makefile
!depends/Makefile
-bitcoin-qt
+src/qt/bitcoin-qt
Bitcoin-Qt.app
background.tiff*
@@ -108,6 +108,9 @@ qrc_*.cpp
.DS_Store
build
+# Previous releases
+releases
+
#lcov
*.gcno
*.gcda
diff --git a/.travis.yml b/.travis.yml
index fbc81b2614..1ac3b9969f 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -25,6 +25,8 @@
# [1] https://docs.travis-ci.com/user/caching/#build-phases
# [2] https://docs.travis-ci.com/user/customizing-the-build#build-timeouts
+version: ~> 1.0
+
dist: xenial
os: linux
language: minimal
@@ -35,6 +37,7 @@ cache:
- $TRAVIS_BUILD_DIR/depends/built
- $TRAVIS_BUILD_DIR/depends/sdk-sources
- $TRAVIS_BUILD_DIR/ci/scratch/.ccache
+ - $TRAVIS_BUILD_DIR/releases/$HOST
# macOS
- $HOME/Library/Caches/Homebrew
- /usr/local/Homebrew
@@ -83,17 +86,19 @@ jobs:
- stage: test
name: 'ARM [GOAL: install] [buster] [unit tests, functional tests]'
- arch: arm64
+ arch: arm64 # Can disable QEMU_USER_CMD and run the tests natively without qemu
env: >-
FILE_ENV="./ci/test/00_setup_env_arm.sh"
- QEMU_USER_CMD="" # Can run the tests natively without qemu
+ QEMU_USER_CMD=""
- - stage: test
- name: 'S390x [GOAL: install] [buster] [unit tests, functional tests]'
- arch: s390x
- env: >-
- FILE_ENV="./ci/test/00_setup_env_s390x.sh"
- QEMU_USER_CMD="" # Can run the tests natively without qemu
+# s390 build was disabled temporarily because of disk space issues on the Travis VM
+#
+# - stage: test
+# name: 'S390x [GOAL: install] [buster] [unit tests, functional tests]'
+# arch: s390x # Can disable QEMU_USER_CMD and run the tests natively without qemu
+# env: >-
+# FILE_ENV="./ci/test/00_setup_env_s390x.sh"
+# QEMU_USER_CMD=""
- stage: test
name: 'Win64 [GOAL: deploy] [unit tests, no gui, no functional tests]'
@@ -106,7 +111,7 @@ jobs:
FILE_ENV="./ci/test/00_setup_env_i686_centos.sh"
- stage: test
- name: 'x86_64 Linux [GOAL: install] [bionic] [uses qt5 dev package and some depends packages] [unsigned char]'
+ name: 'x86_64 Linux [GOAL: install] [bionic] [previous releases, uses qt5 dev package and some depends packages] [unsigned char]'
env: >-
FILE_ENV="./ci/test/00_setup_env_native_qt5.sh"
@@ -132,6 +137,11 @@ jobs:
FILE_ENV="./ci/test/00_setup_env_native_fuzz.sh"
- stage: test
+ name: 'x86_64 Linux [GOAL: install] [bionic] [no depends, only system libs, fuzzers under valgrind]'
+ env: >-
+ FILE_ENV="./ci/test/00_setup_env_native_fuzz_with_valgrind.sh"
+
+ - stage: test
name: 'x86_64 Linux [GOAL: install] [bionic] [no wallet]'
env: >-
FILE_ENV="./ci/test/00_setup_env_native_nowallet.sh"
@@ -145,8 +155,20 @@ jobs:
name: 'macOS 10.14 native [GOAL: install] [GUI] [no depends]'
os: osx
# Use the most recent version:
- # Xcode 11.2.1, macOS 10.14, JDK 13.0.1, SDK 10.15
+ # Xcode 11.3.1, macOS 10.14, SDK 10.15
# https://docs.travis-ci.com/user/reference/osx/#macos-version
- osx_image: xcode11.2
+ osx_image: xcode11.3
+ addons:
+ homebrew:
+ packages:
+ - libtool
+ - berkeley-db4
+ - boost
+ - miniupnpc
+ - qt
+ - qrencode
+ - python3
+ - ccache
+ - zeromq
env: >-
FILE_ENV="./ci/test/00_setup_env_mac_host.sh"
diff --git a/.tx/config b/.tx/config
index 0e18a0df98..cd9e237158 100644
--- a/.tx/config
+++ b/.tx/config
@@ -1,7 +1,7 @@
[main]
host = https://www.transifex.com
-[bitcoin.qt-translation-019x]
+[bitcoin.qt-translation-020x]
file_filter = src/qt/locale/bitcoin_<lang>.ts
source_file = src/qt/locale/bitcoin_en.ts
source_lang = en
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 33c797d799..7216db0500 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -46,15 +46,15 @@ facilitates social contribution, easy testing and peer review.
To contribute a patch, the workflow is as follows:
- 1. Fork repository ([only the first time](https://help.github.com/en/articles/fork-a-repo)).
+ 1. Fork repository ([only for the first time](https://help.github.com/en/articles/fork-a-repo))
1. Create topic branch
1. Commit patches
The project coding conventions in the [developer notes](doc/developer-notes.md)
-must be adhered to.
+must be followed.
-In general [commits should be atomic](https://en.wikipedia.org/wiki/Atomic_commit#Atomic_commit_convention)
-and diffs should be easy to read. For this reason do not mix any formatting
+In general, [commits should be atomic](https://en.wikipedia.org/wiki/Atomic_commit#Atomic_commit_convention)
+and diffs should be easy to read. For this reason, do not mix any formatting
fixes or code moves with actual code changes.
Commit messages should be verbose by default consisting of a short subject line
@@ -100,7 +100,7 @@ Examples:
qt: Add feed bump button
log: Fix typo in log message
-Note that translations should not be submitted as pull requests, please see
+Note that translations should not be submitted as pull requests. Please see
[Translation Process](https://github.com/bitcoin/bitcoin/blob/master/doc/translation_process.md)
for more information on helping with translations.
@@ -113,16 +113,16 @@ patch does together with any justification/reasoning. You should include
references to any discussions (for example other tickets or mailing list
discussions).
-At this stage one should expect comments and review from other contributors. You
+At this stage, one should expect comments and review from other contributors. You
can add more commits to your pull request by committing them locally and pushing
to your fork until you have satisfied all feedback.
-Note: Code review is a burdensome but important part of the development process, and as such, certain types of pull requests are rejected. In general, if the **improvements** do not warrant the **review effort** required, the PR has a high chance of being rejected. It is up to the PR author to convince the reviewers that the changes warrant the review effort, and if reviewers are "Concept NAK'ing" the PR, the author may need to present arguments and/or do research backing their suggested changes.
+Note: Code review is a burdensome but important part of the development process, and as such, certain types of pull requests are rejected. In general, if the **improvements** do not warrant the **review effort** required, the PR has a high chance of being rejected. It is up to the PR author to convince the reviewers that the changes warrant the review effort, and if reviewers are "Concept NACK'ing" the PR, the author may need to present arguments and/or do research backing their suggested changes.
-Squashing Commits
----------------------------
-If your pull request is accepted for merging, you may be asked by a maintainer
-to squash and or [rebase](https://git-scm.com/docs/git-rebase) your commits
+### Squashing Commits
+
+If your pull request contains fixup commits (commits that change the same line of code repeatedly) or too fine-grained
+commits, you may be asked to [squash](https://git-scm.com/docs/git-rebase#_interactive_mode) your commits
before it will be merged. The basic squashing workflow is shown below.
git checkout your_branch_name
@@ -133,8 +133,8 @@ before it will be merged. The basic squashing workflow is shown below.
# Save and quit.
git push -f # (force push to GitHub)
-Please update the resulting commit message if needed, it should read as a
-coherent message. In most cases this means that you should not just list the
+Please update the resulting commit message if needed. It should read as a
+coherent message. In most cases, this means that you should not just list the
interim commits.
If you have problems with squashing (or other workflows with `git`), you can
@@ -149,6 +149,20 @@ the respective change set.
The length of time required for peer review is unpredictable and will vary from
pull request to pull request.
+### Rebasing Changes
+
+When a pull request conflicts with the target branch, you may be asked to rebase it on top of the current target branch.
+The `git rebase` command will take care of rebuilding your commits on top of the new base.
+
+This project aims to have a clean git history, where code changes are only made in non-merge commits. This simplifies
+auditability because merge commits can be assumed to not contain arbitrary code changes. Merge commits should be signed,
+and the resulting git tree hash must be deterministic and reproducible. The script in
+[/contrib/verify-commits](/contrib/verify-commits) checks that.
+
+After a rebase, reviewers are encouraged to sign off on the force push. This should be relatively straightforward with
+the `git range-diff` tool explained in the [productivity
+notes](/doc/productivity.md#diff-the-diffs-with-git-range-diff). To avoid needless review churn, maintainers will
+generally merge pull requests that received the most review attention first.
Pull Request Philosophy
-----------------------
@@ -173,9 +187,9 @@ in the future, they may be removed by the Repository Maintainer.
Refactoring is a necessary part of any software project's evolution. The
following guidelines cover refactoring pull requests for the project.
-There are three categories of refactoring, code only moves, code style fixes,
-code refactoring. In general refactoring pull requests should not mix these
-three kinds of activity in order to make refactoring pull requests easy to
+There are three categories of refactoring: code-only moves, code style fixes, and
+code refactoring. In general, refactoring pull requests should not mix these
+three kinds of activities in order to make refactoring pull requests easy to
review and uncontroversial. In all cases, refactoring PRs must not change the
behaviour of code within the pull request (bugs must be preserved as is).
@@ -309,6 +323,31 @@ about:
when someone else is asking for feedback on their code, and universe balances out.
+Backporting
+-----------
+
+Security and bug fixes can be backported from `master` to release
+branches.
+If the backport is non-trivial, it may be appropriate to open an
+additional PR, to backport the change, only after the original PR
+has been merged.
+Otherwise, backports will be done in batches and
+the maintainers will use the proper `Needs backport (...)` labels
+when needed (the original author does not need to worry).
+
+A backport should contain the following metadata in the commit body:
+
+```
+Github-Pull: #<PR number>
+Rebased-From: <commit hash of the original commit>
+```
+
+Have a look at [an example backport PR](
+https://github.com/bitcoin/bitcoin/pull/16189).
+
+Also see the [backport.py script](
+https://github.com/bitcoin-core/bitcoin-maintainer-tools#backport).
+
Release Policy
--------------
diff --git a/Makefile.am b/Makefile.am
index 22b83e80dd..c35f5080aa 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1,4 +1,4 @@
-# Copyright (c) 2013-2016 The Bitcoin Core developers
+# Copyright (c) 2013-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -43,16 +43,7 @@ OSX_INSTALLER_ICONS=$(top_srcdir)/src/qt/res/icons/bitcoin.icns
OSX_PLIST=$(top_builddir)/share/qt/Info.plist #not installed
OSX_QT_TRANSLATIONS = da,de,es,hu,ru,uk,zh_CN,zh_TW
-DIST_DOCS = \
- README.md \
- $(wildcard doc/*.md) \
- $(wildcard doc/release-notes/*.md)
-DIST_CONTRIB = $(top_srcdir)/contrib/bitcoin-cli.bash-completion \
- $(top_srcdir)/contrib/bitcoin-tx.bash-completion \
- $(top_srcdir)/contrib/bitcoind.bash-completion \
- $(top_srcdir)/contrib/debian/copyright \
- $(top_srcdir)/contrib/init \
- $(top_srcdir)/contrib/install_db4.sh \
+DIST_CONTRIB = \
$(top_srcdir)/contrib/linearize/linearize-data.py \
$(top_srcdir)/contrib/linearize/linearize-hashes.py
@@ -77,7 +68,7 @@ OSX_PACKAGING = $(OSX_DEPLOY_SCRIPT) $(OSX_FANCY_PLIST) $(OSX_INSTALLER_ICONS) \
COVERAGE_INFO = baseline.info \
test_bitcoin_filtered.info total_coverage.info \
baseline_filtered.info functional_test.info functional_test_filtered.info \
- test_bitcoin_coverage.info test_bitcoin.info
+ test_bitcoin_coverage.info test_bitcoin.info fuzz.info fuzz_coverage.info
dist-hook:
-$(GIT) archive --format=tar HEAD -- src/clientversion.cpp | $(AMTAR) -C $(top_distdir) -xf -
@@ -194,6 +185,7 @@ LCOV_FILTER_PATTERN = \
-p "/usr/lib/" \
-p "/usr/lib64/" \
-p "src/leveldb/" \
+ -p "src/crc32c/" \
-p "src/bench/" \
-p "src/univalue" \
-p "src/crypto/ctaes" \
@@ -207,6 +199,15 @@ baseline_filtered.info: baseline.info
$(abs_builddir)/contrib/filter-lcov.py $(LCOV_FILTER_PATTERN) $< $@
$(LCOV) -a $@ $(LCOV_OPTS) -o $@
+fuzz.info: baseline_filtered.info
+ @TIMEOUT=15 test/fuzz/test_runner.py qa-assets/fuzz_seed_corpus -l DEBUG
+ $(LCOV) -c $(LCOV_OPTS) -d $(abs_builddir)/src --t fuzz-tests -o $@
+ $(LCOV) -z $(LCOV_OPTS) -d $(abs_builddir)/src
+
+fuzz_filtered.info: fuzz.info
+ $(abs_builddir)/contrib/filter-lcov.py $(LCOV_FILTER_PATTERN) $< $@
+ $(LCOV) -a $@ $(LCOV_OPTS) -o $@
+
test_bitcoin.info: baseline_filtered.info
$(MAKE) -C src/ check
$(LCOV) -c $(LCOV_OPTS) -d $(abs_builddir)/src -t test_bitcoin -o $@
@@ -225,12 +226,19 @@ functional_test_filtered.info: functional_test.info
$(abs_builddir)/contrib/filter-lcov.py $(LCOV_FILTER_PATTERN) $< $@
$(LCOV) -a $@ $(LCOV_OPTS) -o $@
+fuzz_coverage.info: fuzz_filtered.info
+ $(LCOV) -a $(LCOV_OPTS) baseline_filtered.info -a fuzz_filtered.info -o $@ | $(GREP) "\%" | $(AWK) '{ print substr($$3,2,50) "/" $$5 }' > coverage_percent.txt
+
test_bitcoin_coverage.info: baseline_filtered.info test_bitcoin_filtered.info
$(LCOV) -a $(LCOV_OPTS) baseline_filtered.info -a test_bitcoin_filtered.info -o $@
total_coverage.info: test_bitcoin_filtered.info functional_test_filtered.info
$(LCOV) -a $(LCOV_OPTS) baseline_filtered.info -a test_bitcoin_filtered.info -a functional_test_filtered.info -o $@ | $(GREP) "\%" | $(AWK) '{ print substr($$3,2,50) "/" $$5 }' > coverage_percent.txt
+fuzz.coverage/.dirstamp: fuzz_coverage.info
+ $(GENHTML) -s $(LCOV_OPTS) $< -o $(@D)
+ @touch $@
+
test_bitcoin.coverage/.dirstamp: test_bitcoin_coverage.info
$(GENHTML) -s $(LCOV_OPTS) $< -o $(@D)
@touch $@
@@ -239,13 +247,15 @@ total.coverage/.dirstamp: total_coverage.info
$(GENHTML) -s $(LCOV_OPTS) $< -o $(@D)
@touch $@
+cov_fuzz: fuzz.coverage/.dirstamp
+
cov: test_bitcoin.coverage/.dirstamp total.coverage/.dirstamp
endif
dist_noinst_SCRIPTS = autogen.sh
-EXTRA_DIST = $(DIST_SHARE) $(DIST_CONTRIB) $(DIST_DOCS) $(WINDOWS_PACKAGING) $(OSX_PACKAGING) $(BIN_CHECKS)
+EXTRA_DIST = $(DIST_SHARE) $(DIST_CONTRIB) $(WINDOWS_PACKAGING) $(OSX_PACKAGING) $(BIN_CHECKS)
EXTRA_DIST += \
test/functional \
@@ -300,6 +310,8 @@ EXTRA_DIST += \
test/util/data/txcreatescript3.json \
test/util/data/txcreatescript4.hex \
test/util/data/txcreatescript4.json \
+ test/util/data/txcreatescript5.hex \
+ test/util/data/txcreatescript6.hex \
test/util/data/txcreatesignv1.hex \
test/util/data/txcreatesignv1.json \
test/util/data/txcreatesignv2.hex \
@@ -327,6 +339,6 @@ clean-docs:
rm -rf doc/doxygen
clean-local: clean-docs
- rm -rf coverage_percent.txt test_bitcoin.coverage/ total.coverage/ test/tmp/ cache/ $(OSX_APP)
+ rm -rf coverage_percent.txt test_bitcoin.coverage/ total.coverage/ fuzz.coverage/ test/tmp/ cache/ $(OSX_APP)
rm -rf test/functional/__pycache__ test/functional/test_framework/__pycache__ test/cache share/rpcauth/__pycache__
rm -rf osx_volname dist/ dpi36.background.tiff dpi72.background.tiff
diff --git a/build-aux/m4/ax_boost_chrono.m4 b/build-aux/m4/ax_boost_chrono.m4
deleted file mode 100644
index 4cd3b86041..0000000000
--- a/build-aux/m4/ax_boost_chrono.m4
+++ /dev/null
@@ -1,118 +0,0 @@
-# ===========================================================================
-# https://www.gnu.org/software/autoconf-archive/ax_boost_chrono.html
-# ===========================================================================
-#
-# SYNOPSIS
-#
-# AX_BOOST_CHRONO
-#
-# DESCRIPTION
-#
-# Test for Chrono library from the Boost C++ libraries. The macro requires
-# a preceding call to AX_BOOST_BASE. Further documentation is available at
-# <http://randspringer.de/boost/index.html>.
-#
-# This macro calls:
-#
-# AC_SUBST(BOOST_CHRONO_LIB)
-#
-# And sets:
-#
-# HAVE_BOOST_CHRONO
-#
-# LICENSE
-#
-# Copyright (c) 2012 Xiyue Deng <manphiz@gmail.com>
-#
-# Copying and distribution of this file, with or without modification, are
-# permitted in any medium without royalty provided the copyright notice
-# and this notice are preserved. This file is offered as-is, without any
-# warranty.
-
-#serial 5
-
-AC_DEFUN([AX_BOOST_CHRONO],
-[
- AC_ARG_WITH([boost-chrono],
- AS_HELP_STRING([--with-boost-chrono@<:@=special-lib@:>@],
- [use the Chrono library from boost - it is possible to specify a certain library for the linker
- e.g. --with-boost-chrono=boost_chrono-gcc-mt ]),
- [
- if test "$withval" = "no"; then
- want_boost="no"
- elif test "$withval" = "yes"; then
- want_boost="yes"
- ax_boost_user_chrono_lib=""
- else
- want_boost="yes"
- ax_boost_user_chrono_lib="$withval"
- fi
- ],
- [want_boost="yes"]
- )
-
- if test "x$want_boost" = "xyes"; then
- AC_REQUIRE([AC_PROG_CC])
- AC_REQUIRE([AC_CANONICAL_BUILD])
- CPPFLAGS_SAVED="$CPPFLAGS"
- CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
- export CPPFLAGS
-
- LDFLAGS_SAVED="$LDFLAGS"
- LDFLAGS="$LDFLAGS $BOOST_LDFLAGS"
- export LDFLAGS
-
- AC_CACHE_CHECK(whether the Boost::Chrono library is available,
- ax_cv_boost_chrono,
- [AC_LANG_PUSH([C++])
- CXXFLAGS_SAVE=$CXXFLAGS
-
- AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[@%:@include <boost/chrono.hpp>]],
- [[boost::chrono::system_clock::time_point* time = new boost::chrono::system_clock::time_point; delete time;]])],
- ax_cv_boost_chrono=yes, ax_cv_boost_chrono=no)
- CXXFLAGS=$CXXFLAGS_SAVE
- AC_LANG_POP([C++])
- ])
- if test "x$ax_cv_boost_chrono" = "xyes"; then
- AC_SUBST(BOOST_CPPFLAGS)
-
- AC_DEFINE(HAVE_BOOST_CHRONO,,[define if the Boost::Chrono library is available])
- BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/@<:@^\/@:>@*//'`
-
- LDFLAGS_SAVE=$LDFLAGS
- if test "x$ax_boost_user_chrono_lib" = "x"; then
- for libextension in `ls $BOOSTLIBDIR/libboost_chrono*.so* $BOOSTLIBDIR/libboost_chrono*.dylib* $BOOSTLIBDIR/libboost_chrono*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^lib\(boost_chrono.*\)\.so.*$;\1;' -e 's;^lib\(boost_chrono.*\)\.dylib.*$;\1;' -e 's;^lib\(boost_chrono.*\)\.a.*$;\1;'` ; do
- ax_lib=${libextension}
- AC_CHECK_LIB($ax_lib, exit,
- [BOOST_CHRONO_LIB="-l$ax_lib"; AC_SUBST(BOOST_CHRONO_LIB) link_chrono="yes"; break],
- [link_chrono="no"])
- done
- if test "x$link_chrono" != "xyes"; then
- for libextension in `ls $BOOSTLIBDIR/boost_chrono*.dll* $BOOSTLIBDIR/boost_chrono*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^\(boost_chrono.*\)\.dll.*$;\1;' -e 's;^\(boost_chrono.*\)\.a.*$;\1;'` ; do
- ax_lib=${libextension}
- AC_CHECK_LIB($ax_lib, exit,
- [BOOST_CHRONO_LIB="-l$ax_lib"; AC_SUBST(BOOST_CHRONO_LIB) link_chrono="yes"; break],
- [link_chrono="no"])
- done
- fi
-
- else
- for ax_lib in $ax_boost_user_chrono_lib boost_chrono-$ax_boost_user_chrono_lib; do
- AC_CHECK_LIB($ax_lib, exit,
- [BOOST_CHRONO_LIB="-l$ax_lib"; AC_SUBST(BOOST_CHRONO_LIB) link_chrono="yes"; break],
- [link_chrono="no"])
- done
-
- fi
- if test "x$ax_lib" = "x"; then
- AC_MSG_ERROR(Could not find a version of the Boost::Chrono library!)
- fi
- if test "x$link_chrono" = "xno"; then
- AC_MSG_ERROR(Could not link against $ax_lib !)
- fi
- fi
-
- CPPFLAGS="$CPPFLAGS_SAVED"
- LDFLAGS="$LDFLAGS_SAVED"
- fi
-])
diff --git a/build_msvc/.gitignore b/build_msvc/.gitignore
index 4d4aef7e35..3e71c7b8d3 100644
--- a/build_msvc/.gitignore
+++ b/build_msvc/.gitignore
@@ -8,7 +8,19 @@ packages/*
*/Release
*/x64
*.vcxproj.user
-*.vcxproj
+
+# .vcxproj files that are auto-generated by the msvc-autogen.py script.
+libbitcoin_cli/libbitcoin_cli.vcxproj
+libbitcoin_common/libbitcoin_common.vcxproj
+libbitcoin_crypto/libbitcoin_crypto.vcxproj
+libbitcoin_server/libbitcoin_server.vcxproj
+libbitcoin_util/libbitcoin_util.vcxproj
+libbitcoin_wallet_tool/libbitcoin_wallet_tool.vcxproj
+libbitcoin_wallet/libbitcoin_wallet.vcxproj
+libbitcoin_zmq/libbitcoin_zmq.vcxproj
+bench_bitcoin/bench_bitcoin.vcxproj
+libtest_util/libtest_util.vcxproj
+
*/Win32
libbitcoin_qt/QtGeneratedFiles/*
test_bitcoin-qt/QtGeneratedFiles/*
diff --git a/build_msvc/README.md b/build_msvc/README.md
index e5aaf57abf..d4e710d55b 100644
--- a/build_msvc/README.md
+++ b/build_msvc/README.md
@@ -13,6 +13,7 @@ The minimal steps required to build Bitcoin Core with the msbuild toolchain are
```
vcpkg install --triplet x64-windows-static berkeleydb boost-filesystem boost-multi-index boost-signals2 boost-test boost-thread libevent[thread] rapidcheck zeromq double-conversion
+vcpkg integrate install
py -3 build_msvc\msvc-autogen.py
msbuild /m build_msvc\bitcoin.sln /p:Platform=x64 /p:Configuration=Release /t:build
```
@@ -56,6 +57,7 @@ The instructions below use `vcpkg` to install the dependencies.
```
PS >.\vcpkg install --triplet x64-windows-static $(Get-Content -Path build_msvc\vcpkg-packages.txt).split()
+PS >.\vcpkg integrate install
```
- Use Python to generate `*.vcxproj` from Makefile
diff --git a/build_msvc/bitcoin_config.h b/build_msvc/bitcoin_config.h
index 3178f2a3d8..5f0640ac27 100644
--- a/build_msvc/bitcoin_config.h
+++ b/build_msvc/bitcoin_config.h
@@ -47,9 +47,6 @@
/* define if the Boost library is available */
#define HAVE_BOOST /**/
-/* define if the Boost::Chrono library is available */
-#define HAVE_BOOST_CHRONO /**/
-
/* define if the Boost::Filesystem library is available */
#define HAVE_BOOST_FILESYSTEM /**/
@@ -258,12 +255,6 @@
/* Define if the visibility attribute is supported. */
#define HAVE_VISIBILITY_ATTRIBUTE 1
-/* Define this symbol if boost sleep works */
-/* #undef HAVE_WORKING_BOOST_SLEEP */
-
-/* Define this symbol if boost sleep_for works */
-#define HAVE_WORKING_BOOST_SLEEP_FOR 1
-
/* Define to the sub-directory where libtool stores uninstalled libraries. */
#define LT_OBJDIR ".libs/"
@@ -313,9 +304,6 @@
/* Define this symbol to build in assembly routines */
//#define USE_ASM 1
-/* Define this symbol if coverage is enabled */
-/* #undef USE_COVERAGE */
-
/* Define if dbus support should be compiled in */
/* #undef USE_DBUS */
diff --git a/build_msvc/libleveldb/libleveldb.vcxproj b/build_msvc/libleveldb/libleveldb.vcxproj
index f855923c62..1610ae7d86 100644
--- a/build_msvc/libleveldb/libleveldb.vcxproj
+++ b/build_msvc/libleveldb/libleveldb.vcxproj
@@ -24,8 +24,6 @@
<ClCompile Include="..\..\src\leveldb\db\version_set.cc" />
<ClCompile Include="..\..\src\leveldb\db\write_batch.cc" />
<ClCompile Include="..\..\src\leveldb\helpers\memenv\memenv.cc" />
- <ClCompile Include="..\..\src\leveldb\port\port_posix_sse.cc" />
- <ClCompile Include="..\..\src\leveldb\port\port_win.cc" />
<ClCompile Include="..\..\src\leveldb\table\block.cc" />
<ClCompile Include="..\..\src\leveldb\table\block_builder.cc" />
<ClCompile Include="..\..\src\leveldb\table\filter_block.cc" />
@@ -42,7 +40,7 @@
<ClCompile Include="..\..\src\leveldb\util\comparator.cc" />
<ClCompile Include="..\..\src\leveldb\util\crc32c.cc" />
<ClCompile Include="..\..\src\leveldb\util\env.cc" />
- <ClCompile Include="..\..\src\leveldb\util\env_win.cc" />
+ <ClCompile Include="..\..\src\leveldb\util\env_windows.cc" />
<ClCompile Include="..\..\src\leveldb\util\filter_policy.cc" />
<ClCompile Include="..\..\src\leveldb\util\hash.cc" />
<ClCompile Include="..\..\src\leveldb\util\histogram.cc" />
@@ -51,11 +49,11 @@
<ClCompile Include="..\..\src\leveldb\util\status.cc" />
</ItemGroup>
<ItemDefinitionGroup>
- <ClCompile>
- <PreprocessorDefinitions>_CRT_NONSTDC_NO_DEPRECATE;LEVELDB_PLATFORM_WINDOWS;LEVELDB_ATOMIC_PRESENT;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <DisableSpecificWarnings>4244;4267;4312;</DisableSpecificWarnings>
- <AdditionalIncludeDirectories>..\..\src\leveldb;..\..\src\leveldb\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- </ClCompile>
+ <ClCompile>
+ <PreprocessorDefinitions>HAVE_CRC32C=0;HAVE_SNAPPY=0;__STDC_LIMIT_MACROS;LEVELDB_IS_BIG_ENDIAN=0;_UNICODE;UNICODE;_CRT_NONSTDC_NO_DEPRECATE;LEVELDB_PLATFORM_WINDOWS;LEVELDB_ATOMIC_PRESENT;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <DisableSpecificWarnings>4244;4267;4312;4722;</DisableSpecificWarnings>
+ <AdditionalIncludeDirectories>..\..\src\leveldb;..\..\src\leveldb\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ </ClCompile>
</ItemDefinitionGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
diff --git a/ci/README.md b/ci/README.md
index 880e49b459..d2ea255b4b 100644
--- a/ci/README.md
+++ b/ci/README.md
@@ -13,7 +13,7 @@ If the repository is not a fresh git clone, you might have to clean files from p
The ci needs to perform various sysadmin tasks such as installing packages or writing to the user's home directory.
While most of the actions are done inside a docker container, this is not possible for all. Thus, cache directories,
-such as the depends cache or ccache, are mounted as read-write into the docker container. While it should be fine to run
+such as the depends cache, previous release binaries, or ccache, are mounted as read-write into the docker container. While it should be fine to run
the ci system locally on you development box, the ci scripts can generally be assumed to have received less review and
testing compared to other parts of the codebase. If you want to keep the work tree clean, you might want to run the ci
system in a virtual machine with a Linux operating system of your choice.
diff --git a/ci/lint/06_script.sh b/ci/lint/06_script.sh
index ae8122f9af..003bdf3c29 100755
--- a/ci/lint/06_script.sh
+++ b/ci/lint/06_script.sh
@@ -14,6 +14,7 @@ test/lint/git-subtree-check.sh src/crypto/ctaes
test/lint/git-subtree-check.sh src/secp256k1
test/lint/git-subtree-check.sh src/univalue
test/lint/git-subtree-check.sh src/leveldb
+test/lint/git-subtree-check.sh src/crc32c
test/lint/check-doc.py
test/lint/check-rpc-mappings.py .
test/lint/lint-all.sh
diff --git a/ci/test/00_setup_env.sh b/ci/test/00_setup_env.sh
index a008d51523..4c22e4e6c5 100755
--- a/ci/test/00_setup_env.sh
+++ b/ci/test/00_setup_env.sh
@@ -33,7 +33,9 @@ export HOST=${HOST:-$("$BASE_ROOT_DIR/depends/config.guess")}
export USE_BUSY_BOX=${USE_BUSY_BOX:-false}
export RUN_UNIT_TESTS=${RUN_UNIT_TESTS:-true}
export RUN_FUNCTIONAL_TESTS=${RUN_FUNCTIONAL_TESTS:-true}
+export TEST_PREVIOUS_RELEASES=${TEST_PREVIOUS_RELEASES:-false}
export RUN_FUZZ_TESTS=${RUN_FUZZ_TESTS:-false}
+export CONTAINER_NAME=${CONTAINER_NAME:-ci_unnamed}
export DOCKER_NAME_TAG=${DOCKER_NAME_TAG:-ubuntu:18.04}
# Randomize test order.
# See https://www.boost.org/doc/libs/1_71_0/libs/test/doc/html/boost_test/utf_reference/rt_param_reference/random.html
@@ -49,6 +51,7 @@ export CCACHE_DIR=${CCACHE_DIR:-$BASE_SCRATCH_DIR/.ccache}
export DEPENDS_DIR=${DEPENDS_DIR:-$BASE_ROOT_DIR/depends}
# Folder where the build is done (bin and lib).
export BASE_OUTDIR=${BASE_OUTDIR:-$BASE_SCRATCH_DIR/out/$HOST}
+export PREVIOUS_RELEASES_DIR=${PREVIOUS_RELEASES_DIR:-$BASE_ROOT_DIR/releases/$HOST}
export SDK_URL=${SDK_URL:-https://bitcoincore.org/depends-sources/sdks}
export WINEDEBUG=${WINEDEBUG:-fixme-all}
export DOCKER_PACKAGES=${DOCKER_PACKAGES:-build-essential libtool autotools-dev automake pkg-config bsdmainutils curl ca-certificates ccache python3 rsync git procps}
diff --git a/ci/test/00_setup_env_arm.sh b/ci/test/00_setup_env_arm.sh
index 2a522f5a8f..2b30b4a5e3 100644
--- a/ci/test/00_setup_env_arm.sh
+++ b/ci/test/00_setup_env_arm.sh
@@ -16,6 +16,7 @@ if [ -n "$QEMU_USER_CMD" ]; then
# Likely cross-compiling, so install the needed gcc and qemu-user
export PACKAGES="$PACKAGES qemu-user"
fi
+export CONTAINER_NAME=ci_arm_linux
# Use debian to avoid 404 apt errors when cross compiling
export DOCKER_NAME_TAG="debian:buster"
export USE_BUSY_BOX=true
@@ -24,4 +25,4 @@ export RUN_FUNCTIONAL_TESTS=true
export GOAL="install"
# -Wno-psabi is to disable ABI warnings: "note: parameter passing for argument of type ... changed in GCC 7.1"
# This could be removed once the ABI change warning does not show up by default
-export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CXXFLAGS=-Wno-psabi"
+export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CXXFLAGS=-Wno-psabi --enable-werror"
diff --git a/ci/test/00_setup_env_i686_centos.sh b/ci/test/00_setup_env_i686_centos.sh
index b875dceef0..5688799f9e 100644
--- a/ci/test/00_setup_env_i686_centos.sh
+++ b/ci/test/00_setup_env_i686_centos.sh
@@ -7,6 +7,7 @@
export LC_ALL=C.UTF-8
export HOST=i686-pc-linux-gnu
+export CONTAINER_NAME=ci_i686_centos_7
export DOCKER_NAME_TAG=centos:7
export DOCKER_PACKAGES="gcc-c++ glibc-devel.x86_64 libstdc++-devel.x86_64 glibc-devel.i686 libstdc++-devel.i686 ccache libtool make git python3 python36-zmq which patch lbzip2 dash"
export GOAL="install"
diff --git a/ci/test/00_setup_env_mac.sh b/ci/test/00_setup_env_mac.sh
index 2f3d45f1fc..6ed6e40bd6 100644
--- a/ci/test/00_setup_env_mac.sh
+++ b/ci/test/00_setup_env_mac.sh
@@ -6,6 +6,7 @@
export LC_ALL=C.UTF-8
+export CONTAINER_NAME=ci_macos_cross
export HOST=x86_64-apple-darwin16
export PACKAGES="cmake imagemagick libcap-dev librsvg2-bin libz-dev libbz2-dev libtiff-tools python3-dev python3-setuptools"
export OSX_SDK=10.14
diff --git a/ci/test/00_setup_env_mac_host.sh b/ci/test/00_setup_env_mac_host.sh
index 0b437a723f..5753c3af31 100644
--- a/ci/test/00_setup_env_mac_host.sh
+++ b/ci/test/00_setup_env_mac_host.sh
@@ -7,7 +7,6 @@
export LC_ALL=C.UTF-8
export HOST=x86_64-apple-darwin16
-export BREW_PACKAGES="automake berkeley-db4 libtool boost miniupnpc pkg-config qt qrencode python3 ccache zeromq"
export PIP_PACKAGES="zmq"
export RUN_CI_ON_HOST=true
export RUN_UNIT_TESTS=true
diff --git a/ci/test/00_setup_env_native_asan.sh b/ci/test/00_setup_env_native_asan.sh
index 2ffd3c5107..d5f39daaf5 100644
--- a/ci/test/00_setup_env_native_asan.sh
+++ b/ci/test/00_setup_env_native_asan.sh
@@ -6,7 +6,8 @@
export LC_ALL=C.UTF-8
-export PACKAGES="clang-8 llvm-8 python3-zmq qtbase5-dev qttools5-dev-tools libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-chrono-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev libqrencode-dev"
+export CONTAINER_NAME=ci_native_asan
+export PACKAGES="clang-8 llvm-8 python3-zmq qtbase5-dev qttools5-dev-tools libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev libqrencode-dev"
# Use clang-8 instead of default clang (which is clang-6 on Bionic) to avoid spurious segfaults when running on ppc64le
export NO_DEPENDS=1
export GOAL="install"
diff --git a/ci/test/00_setup_env_native_fuzz.sh b/ci/test/00_setup_env_native_fuzz.sh
index b0405bb762..a739ad50d2 100644
--- a/ci/test/00_setup_env_native_fuzz.sh
+++ b/ci/test/00_setup_env_native_fuzz.sh
@@ -6,7 +6,8 @@
export LC_ALL=C.UTF-8
-export PACKAGES="clang-8 llvm-8 python3 libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-chrono-dev libboost-test-dev libboost-thread-dev"
+export CONTAINER_NAME=ci_native_fuzz
+export PACKAGES="clang-8 llvm-8 python3 libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev"
export NO_DEPENDS=1
export RUN_UNIT_TESTS=false
export RUN_FUNCTIONAL_TESTS=false
diff --git a/ci/test/00_setup_env_native_fuzz_with_valgrind.sh b/ci/test/00_setup_env_native_fuzz_with_valgrind.sh
new file mode 100644
index 0000000000..fabb3affa4
--- /dev/null
+++ b/ci/test/00_setup_env_native_fuzz_with_valgrind.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2019 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+export LC_ALL=C.UTF-8
+
+export CONTAINER_NAME=ci_native_fuzz_valgrind
+export PACKAGES="clang-8 llvm-8 python3 libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev valgrind"
+export NO_DEPENDS=1
+export RUN_UNIT_TESTS=false
+export RUN_FUNCTIONAL_TESTS=false
+export RUN_FUZZ_TESTS=true
+export FUZZ_TESTS_CONFIG="--valgrind"
+export GOAL="install"
+export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer CC=clang-8 CXX=clang++-8"
+# Use clang-8, instead of default clang on bionic, which is clang-6 and does not come with libfuzzer on aarch64
diff --git a/ci/test/00_setup_env_native_nowallet.sh b/ci/test/00_setup_env_native_nowallet.sh
index 53348559be..6bb371920c 100644
--- a/ci/test/00_setup_env_native_nowallet.sh
+++ b/ci/test/00_setup_env_native_nowallet.sh
@@ -6,6 +6,7 @@
export LC_ALL=C.UTF-8
+export CONTAINER_NAME=ci_native_nowallet
export PACKAGES="python3-zmq"
export DEP_OPTS="NO_WALLET=1"
export GOAL="install"
diff --git a/ci/test/00_setup_env_native_qt5.sh b/ci/test/00_setup_env_native_qt5.sh
index 622ec3cfe1..21c15236d2 100644
--- a/ci/test/00_setup_env_native_qt5.sh
+++ b/ci/test/00_setup_env_native_qt5.sh
@@ -6,8 +6,10 @@
export LC_ALL=C.UTF-8
+export CONTAINER_NAME=ci_native_qt5
export PACKAGES="python3-zmq qtbase5-dev qttools5-dev-tools libdbus-1-dev libharfbuzz-dev"
export DEP_OPTS="NO_QT=1 NO_UPNP=1 DEBUG=1 ALLOW_HOST_PACKAGES=1"
export TEST_RUNNER_EXTRA="--coverage --extended --exclude feature_dbcrash" # Run extended tests so that coverage does not fail, but exclude the very slow dbcrash
export GOAL="install"
+export TEST_PREVIOUS_RELEASES=true
export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-glibc-back-compat --enable-reduce-exports --enable-debug CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\""
diff --git a/ci/test/00_setup_env_native_tsan.sh b/ci/test/00_setup_env_native_tsan.sh
index e9b7a7bba1..4d3f345ca6 100644
--- a/ci/test/00_setup_env_native_tsan.sh
+++ b/ci/test/00_setup_env_native_tsan.sh
@@ -6,8 +6,9 @@
export LC_ALL=C.UTF-8
+export CONTAINER_NAME=ci_native_tsan
export DOCKER_NAME_TAG=ubuntu:16.04
-export PACKAGES="clang llvm python3-zmq qtbase5-dev qttools5-dev-tools libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-chrono-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev libqrencode-dev"
+export PACKAGES="clang llvm python3-zmq qtbase5-dev qttools5-dev-tools libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev libqrencode-dev"
export NO_DEPENDS=1
export GOAL="install"
export BITCOIN_CONFIG="--enable-zmq --disable-wallet --with-gui=qt5 CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' --with-sanitizers=thread --disable-hardening --disable-asm CC=clang CXX=clang++"
diff --git a/ci/test/00_setup_env_native_valgrind.sh b/ci/test/00_setup_env_native_valgrind.sh
index 906ffd7d79..2a7b32cefc 100644
--- a/ci/test/00_setup_env_native_valgrind.sh
+++ b/ci/test/00_setup_env_native_valgrind.sh
@@ -6,10 +6,10 @@
export LC_ALL=C.UTF-8
-export PACKAGES="valgrind clang llvm python3-zmq libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-chrono-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev"
+export CONTAINER_NAME=ci_native_valgrind
+export PACKAGES="valgrind clang llvm python3-zmq libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev"
export USE_VALGRIND=1
export NO_DEPENDS=1
-export TEST_RUNNER_EXTRA="p2p_segwit.py" # Only run one test for now. TODO enable all and bump timeouts
-export RUN_FUNCTIONAL_TESTS=true
+export TEST_RUNNER_EXTRA="--exclude rpc_bind" # Excluded for now, see https://github.com/bitcoin/bitcoin/issues/17765#issuecomment-602068547
export GOAL="install"
export BITCOIN_CONFIG="--enable-zmq --with-incompatible-bdb --with-gui=no CC=clang CXX=clang++" # TODO enable GUI
diff --git a/ci/test/00_setup_env_s390x.sh b/ci/test/00_setup_env_s390x.sh
index 6452feb5f2..72ad99fe5d 100644
--- a/ci/test/00_setup_env_s390x.sh
+++ b/ci/test/00_setup_env_s390x.sh
@@ -17,6 +17,7 @@ if [ -n "$QEMU_USER_CMD" ]; then
export PACKAGES="$PACKAGES g++-s390x-linux-gnu qemu-user libc6:s390x libstdc++6:s390x libfontconfig1:s390x libxcb1:s390x"
fi
# Use debian to avoid 404 apt errors
+export CONTAINER_NAME=ci_s390x
export DOCKER_NAME_TAG="debian:buster"
export RUN_UNIT_TESTS=true
export RUN_FUNCTIONAL_TESTS=true
diff --git a/ci/test/00_setup_env_win64.sh b/ci/test/00_setup_env_win64.sh
index 1e04c4287a..a34933731c 100644
--- a/ci/test/00_setup_env_win64.sh
+++ b/ci/test/00_setup_env_win64.sh
@@ -6,6 +6,7 @@
export LC_ALL=C.UTF-8
+export CONTAINER_NAME=ci_win64
export HOST=x86_64-w64-mingw32
export PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine-binfmt wine64"
export RUN_FUNCTIONAL_TESTS=false
diff --git a/ci/test/04_install.sh b/ci/test/04_install.sh
index 4d5859e4d3..acf7eeb920 100755
--- a/ci/test/04_install.sh
+++ b/ci/test/04_install.sh
@@ -14,38 +14,19 @@ if [[ $QEMU_USER_CMD == qemu-s390* ]]; then
fi
if [ "$TRAVIS_OS_NAME" == "osx" ]; then
- set +o errexit
- pushd /usr/local/Homebrew || exit 1
- git reset --hard origin/master
- popd || exit 1
- set -o errexit
- ${CI_RETRY_EXE} brew unlink python@2
- ${CI_RETRY_EXE} brew update
- # brew upgrade returns an error if any of the packages is already up to date
- # Failure is safe to ignore, unless we really need an update.
- brew upgrade $BREW_PACKAGES || true
-
- # install new packages (brew install returns an error if already installed)
- for i in $BREW_PACKAGES; do
- if ! brew list | grep -q $i; then
- ${CI_RETRY_EXE} brew install $i
- fi
- done
-
export PATH="/usr/local/opt/ccache/libexec:$PATH"
-
${CI_RETRY_EXE} pip3 install $PIP_PACKAGES
-
fi
mkdir -p "${BASE_SCRATCH_DIR}"
mkdir -p "${CCACHE_DIR}"
+mkdir -p "${PREVIOUS_RELEASES_DIR}"
export ASAN_OPTIONS="detect_stack_use_after_return=1:check_initialization_order=1:strict_init_order=1"
export LSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/lsan"
export TSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/tsan:log_path=${BASE_SCRATCH_DIR}/sanitizer-output/tsan"
export UBSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/ubsan:print_stacktrace=1:halt_on_error=1:report_error_type=1"
-env | grep -E '^(BITCOIN_CONFIG|BASE_|QEMU_|CCACHE_|WINEDEBUG|LC_ALL|BOOST_TEST_RANDOM|CONFIG_SHELL|(ASAN|LSAN|TSAN|UBSAN)_OPTIONS)' | tee /tmp/env
+env | grep -E '^(BITCOIN_CONFIG|BASE_|QEMU_|CCACHE_|WINEDEBUG|LC_ALL|BOOST_TEST_RANDOM|CONFIG_SHELL|(ASAN|LSAN|TSAN|UBSAN)_OPTIONS|TEST_PREVIOUS_RELEASES|PREVIOUS_RELEASES_DIR)' | tee /tmp/env
if [[ $HOST = *-mingw32 ]]; then
DOCKER_ADMIN="--cap-add SYS_ADMIN"
elif [[ $BITCOIN_CONFIG = *--with-sanitizers=*address* ]]; then # If ran with (ASan + LSan), Docker needs access to ptrace (https://github.com/google/sanitizers/issues/764)
@@ -62,8 +43,10 @@ if [ -z "$RUN_CI_ON_HOST" ]; then
--mount type=bind,src=$BASE_ROOT_DIR,dst=/ro_base,readonly \
--mount type=bind,src=$CCACHE_DIR,dst=$CCACHE_DIR \
--mount type=bind,src=$DEPENDS_DIR,dst=$DEPENDS_DIR \
+ --mount type=bind,src=$PREVIOUS_RELEASES_DIR,dst=$PREVIOUS_RELEASES_DIR \
-w $BASE_ROOT_DIR \
--env-file /tmp/env \
+ --name $CONTAINER_NAME \
$DOCKER_NAME_TAG)
DOCKER_EXEC () {
@@ -99,7 +82,9 @@ else
fi
if [ ! -d ${DIR_QA_ASSETS} ]; then
+ if [ "$RUN_FUZZ_TESTS" = "true" ]; then
DOCKER_EXEC git clone https://github.com/bitcoin-core/qa-assets ${DIR_QA_ASSETS}
+ fi
fi
export DIR_FUZZ_IN=${DIR_QA_ASSETS}/fuzz_seed_corpus/
diff --git a/ci/test/05_before_script.sh b/ci/test/05_before_script.sh
index d8aa5c87a2..933f4cea91 100755
--- a/ci/test/05_before_script.sh
+++ b/ci/test/05_before_script.sh
@@ -26,7 +26,7 @@ if [[ $HOST = *-mingw32 ]]; then
fi
if [ -z "$NO_DEPENDS" ]; then
if [[ $DOCKER_NAME_TAG == centos* ]]; then
- # CentOS has problems building the depends if the config shell is not explicitely set
+ # CentOS has problems building the depends if the config shell is not explicitly set
# (i.e. for libevent a Makefile with an empty SHELL variable is generated, leading to
# an error as the first command is executed)
SHELL_OPTS="CONFIG_SHELL=/bin/bash"
@@ -35,3 +35,8 @@ if [ -z "$NO_DEPENDS" ]; then
fi
DOCKER_EXEC $SHELL_OPTS make $MAKEJOBS -C depends HOST=$HOST $DEP_OPTS
fi
+if [ "$TEST_PREVIOUS_RELEASES" = "true" ]; then
+ BEGIN_FOLD previous-versions
+ DOCKER_EXEC contrib/devtools/previous_release.sh -b -t "$PREVIOUS_RELEASES_DIR" v0.17.1 v0.18.1 v0.19.0.1
+ END_FOLD
+fi
diff --git a/ci/test/06_script_b.sh b/ci/test/06_script_b.sh
index 537493a710..3b32513353 100755
--- a/ci/test/06_script_b.sh
+++ b/ci/test/06_script_b.sh
@@ -36,6 +36,6 @@ fi
if [ "$RUN_FUZZ_TESTS" = "true" ]; then
BEGIN_FOLD fuzz-tests
- DOCKER_EXEC test/fuzz/test_runner.py -l DEBUG ${DIR_FUZZ_IN}
+ DOCKER_EXEC test/fuzz/test_runner.py ${FUZZ_TESTS_CONFIG} -l DEBUG ${DIR_FUZZ_IN}
END_FOLD
fi
diff --git a/configure.ac b/configure.ac
index 18f3104acb..1f85dd3a99 100644
--- a/configure.ac
+++ b/configure.ac
@@ -36,14 +36,14 @@ dnl faketime breaks configure and is only needed for make. Disable it here.
unset FAKETIME
dnl Automake init set-up and checks
-AM_INIT_AUTOMAKE([no-define subdir-objects foreign])
+AM_INIT_AUTOMAKE([1.13 no-define subdir-objects foreign])
dnl faketime messes with timestamps and causes configure to be re-run.
dnl --disable-maintainer-mode can be used to bypass this.
AM_MAINTAINER_MODE([enable])
dnl make the compilation flags quiet unless V=1 is used
-m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
+AM_SILENT_RULES([yes])
dnl Compiler checks (here before libtool).
if test "x${CXXFLAGS+set}" = "xset"; then
@@ -262,6 +262,13 @@ AC_ARG_ENABLE([gprof],
[enable_gprof=$enableval],
[enable_gprof=no])
+dnl Pass compiler & liner flags that make builds deterministic
+AC_ARG_ENABLE([determinism],
+ [AS_HELP_STRING([--enable-determinism],
+ [Enable compilation flags that make builds deterministic (default is no)])],
+ [enable_determinism=$enableval],
+ [enable_determinism=no])
+
dnl Turn warnings into errors
AC_ARG_ENABLE([werror],
[AS_HELP_STRING([--enable-werror],
@@ -330,6 +337,7 @@ if test "x$enable_werror" = "xyes"; then
AX_CHECK_COMPILE_FLAG([-Werror=thread-safety-analysis],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=thread-safety-analysis"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Werror=unused-variable],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=unused-variable"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Werror=date-time],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=date-time"],,[[$CXXFLAG_WERROR]])
+ AX_CHECK_COMPILE_FLAG([-Werror=return-type],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=return-type"],,[[$CXXFLAG_WERROR]])
fi
if test "x$CXXFLAGS_overridden" = "xno"; then
@@ -355,7 +363,7 @@ if test "x$CXXFLAGS_overridden" = "xno"; then
AX_CHECK_COMPILE_FLAG([-Wimplicit-fallthrough],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-implicit-fallthrough"],,[[$CXXFLAG_WERROR]])
fi
-enable_hwcrc32=no
+enable_sse42=no
enable_sse41=no
enable_avx2=no
enable_shani=no
@@ -365,6 +373,8 @@ if test "x$use_asm" = "xyes"; then
dnl Check for optional instruction set support. Enabling these does _not_ imply that all code will
dnl be compiled with them, rather that specific objects/libs may use them after checking for runtime
dnl compatibility.
+
+dnl x86
AX_CHECK_COMPILE_FLAG([-msse4.2],[[SSE42_CXXFLAGS="-msse4.2"]],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-msse4.1],[[SSE41_CXXFLAGS="-msse4.1"]],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-mavx -mavx2],[[AVX2_CXXFLAGS="-mavx -mavx2"]],,[[$CXXFLAG_WERROR]])
@@ -372,7 +382,7 @@ AX_CHECK_COMPILE_FLAG([-msse4 -msha],[[SHANI_CXXFLAGS="-msse4 -msha"]],,[[$CXXFL
TEMP_CXXFLAGS="$CXXFLAGS"
CXXFLAGS="$CXXFLAGS $SSE42_CXXFLAGS"
-AC_MSG_CHECKING(for assembler crc32 support)
+AC_MSG_CHECKING(for SSE4.2 intrinsics)
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#include <stdint.h>
#if defined(_MSC_VER)
@@ -387,7 +397,7 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
l = _mm_crc32_u64(l, 0);
return l;
]])],
- [ AC_MSG_RESULT(yes); enable_hwcrc32=yes],
+ [ AC_MSG_RESULT(yes); enable_sse42=yes],
[ AC_MSG_RESULT(no)]
)
CXXFLAGS="$TEMP_CXXFLAGS"
@@ -439,6 +449,24 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
)
CXXFLAGS="$TEMP_CXXFLAGS"
+# ARM
+AX_CHECK_COMPILE_FLAG([-march=armv8-a+crc+crypto],[[ARM_CRC_CXXFLAGS="-march=armv8-a+crc+crypto"]],,[[$CXXFLAG_WERROR]])
+
+TEMP_CXXFLAGS="$CXXFLAGS"
+CXXFLAGS="$CXXFLAGS $ARM_CRC_CXXFLAGS"
+AC_MSG_CHECKING(for ARM CRC32 intrinsics)
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+ #include <arm_acle.h>
+ #include <arm_neon.h>
+ ]],[[
+ __crc32cb(0, 0); __crc32ch(0, 0); __crc32cw(0, 0); __crc32cd(0, 0);
+ vmull_p64(0, 0);
+ ]])],
+ [ AC_MSG_RESULT(yes); enable_arm_crc=yes; ],
+ [ AC_MSG_RESULT(no)]
+)
+CXXFLAGS="$TEMP_CXXFLAGS"
+
fi
CPPFLAGS="$CPPFLAGS -DHAVE_BUILD_INFO -D__STDC_FORMAT_MACROS"
@@ -517,7 +545,6 @@ case $host in
fi
CPPFLAGS="$CPPFLAGS -D_MT -DWIN32 -D_WINDOWS -DBOOST_THREAD_USE_LIB -D_WIN32_WINNT=0x0601"
- LEVELDB_TARGET_FLAGS="-DOS_WINDOWS"
if test "x$CXXFLAGS_overridden" = "xno"; then
CXXFLAGS="$CXXFLAGS -w"
fi
@@ -533,7 +560,6 @@ case $host in
;;
*darwin*)
TARGET_OS=darwin
- LEVELDB_TARGET_FLAGS="-DOS_MACOSX"
if test x$cross_compiling != xyes; then
BUILD_OS=darwin
AC_PATH_PROGS([RSVG_CONVERT], [rsvg-convert rsvg],rsvg-convert)
@@ -585,35 +611,9 @@ case $host in
*android*)
dnl make sure android stays above linux for hosts like *linux-android*
TARGET_OS=android
- LEVELDB_TARGET_FLAGS="-DOS_ANDROID"
;;
*linux*)
TARGET_OS=linux
- LEVELDB_TARGET_FLAGS="-DOS_LINUX"
- ;;
- *kfreebsd*)
- LEVELDB_TARGET_FLAGS="-DOS_KFREEBSD"
- ;;
- *freebsd*)
- LEVELDB_TARGET_FLAGS="-DOS_FREEBSD"
- ;;
- *openbsd*)
- LEVELDB_TARGET_FLAGS="-DOS_OPENBSD"
- ;;
- *netbsd*)
- LEVELDB_TARGET_FLAGS="-DOS_NETBSD"
- ;;
- *dragonfly*)
- LEVELDB_TARGET_FLAGS="-DOS_DRAGONFLYBSD"
- ;;
- *solaris*)
- LEVELDB_TARGET_FLAGS="-DOS_SOLARIS"
- ;;
- *hpux*)
- LEVELDB_TARGET_FLAGS="-DOS_HPUX"
- ;;
- *)
- AC_MSG_ERROR(Cannot build leveldb for $host. Please file a bug report.)
;;
esac
@@ -649,7 +649,6 @@ if test x$use_lcov = xyes; then
[AC_MSG_ERROR("lcov testing requested but --coverage linker flag does not work")])
AX_CHECK_COMPILE_FLAG([--coverage],[CXXFLAGS="$CXXFLAGS --coverage"],
[AC_MSG_ERROR("lcov testing requested but --coverage flag does not work")])
- AC_DEFINE(USE_COVERAGE, 1, [Define this symbol if coverage is enabled])
CXXFLAGS="$CXXFLAGS -Og"
fi
@@ -682,8 +681,6 @@ if test x$ac_cv_sys_large_files != x &&
CPPFLAGS="$CPPFLAGS -D_LARGE_FILES=$ac_cv_sys_large_files"
fi
-AX_CHECK_LINK_FLAG([[-Wl,--large-address-aware]], [LDFLAGS="$LDFLAGS -Wl,--large-address-aware"])
-
AX_GCC_FUNC_ATTRIBUTE([visibility])
AX_GCC_FUNC_ATTRIBUTE([dllexport])
AX_GCC_FUNC_ATTRIBUTE([dllimport])
@@ -773,6 +770,12 @@ if test x$TARGET_OS = xdarwin; then
AX_CHECK_LINK_FLAG([[-Wl,-bind_at_load]], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,-bind_at_load"])
fi
+if test x$enable_determinism = xyes; then
+ if test x$TARGET_OS = xwindows; then
+ AX_CHECK_LINK_FLAG([[-Wl,--no-insert-timestamp]], [LDFLAGS="$LDFLAGS -Wl,--no-insert-timestamp"])
+ fi
+fi
+
AC_CHECK_HEADERS([endian.h sys/endian.h byteswap.h stdio.h stdlib.h unistd.h strings.h sys/types.h sys/stat.h sys/select.h sys/prctl.h sys/sysctl.h vm/vm_param.h sys/vmmeter.h sys/resources.h])
dnl FD_ZERO may be dependent on a declaration of memcpy, e.g. in SmartOS
@@ -885,11 +888,6 @@ if test "x$use_thread_local" = xyes || { test "x$use_thread_local" = xauto && te
dnl https://gist.github.com/jamesob/fe9a872051a88b2025b1aa37bfa98605
AC_MSG_RESULT(no)
;;
- *darwin*)
- dnl TODO enable thread_local on later versions of Darwin where it is
- dnl supported (per https://stackoverflow.com/a/29929949)
- AC_MSG_RESULT(no)
- ;;
*freebsd*)
dnl FreeBSD's implementation of thread_local is also buggy (per
dnl https://groups.google.com/d/msg/bsdmailinglist/22ncTZAbDp4/Dii_pII5AwAJ)
@@ -936,11 +934,10 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <unistd.h>
AC_MSG_CHECKING(for sysctl)
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <sys/types.h>
#include <sys/sysctl.h>]],
- [[ static const int name[2] = {CTL_KERN, KERN_VERSION};
- #ifdef __linux__
+ [[ #ifdef __linux__
#error "Don't use sysctl on Linux, it's deprecated even when it works"
#endif
- sysctl(name, 2, nullptr, nullptr, nullptr, 0); ]])],
+ sysctl(nullptr, 2, nullptr, nullptr, nullptr, 0); ]])],
[ AC_MSG_RESULT(yes); AC_DEFINE(HAVE_SYSCTL, 1,[Define this symbol if the BSD sysctl() is available]) ],
[ AC_MSG_RESULT(no)]
)
@@ -948,7 +945,10 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <sys/types.h>
AC_MSG_CHECKING(for sysctl KERN_ARND)
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <sys/types.h>
#include <sys/sysctl.h>]],
- [[ static const int name[2] = {CTL_KERN, KERN_ARND};
+ [[ #ifdef __linux__
+ #error "Don't use sysctl on Linux, it's deprecated even when it works"
+ #endif
+ static int name[2] = {CTL_KERN, KERN_ARND};
sysctl(name, 2, nullptr, nullptr, nullptr, 0); ]])],
[ AC_MSG_RESULT(yes); AC_DEFINE(HAVE_SYSCTL_ARND, 1,[Define this symbol if the BSD sysctl(KERN_ARND) is available]) ],
[ AC_MSG_RESULT(no)]
@@ -962,6 +962,72 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <stdint.h>
[ AC_MSG_RESULT(no)]
)
+dnl LevelDB platform checks
+AC_MSG_CHECKING(for fdatasync)
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <unistd.h>]],
+ [[ fdatasync(0); ]])],
+ [ AC_MSG_RESULT(yes); HAVE_FDATASYNC=1 ],
+ [ AC_MSG_RESULT(no); HAVE_FDATASYNC=0 ]
+)
+
+AC_MSG_CHECKING(for F_FULLFSYNC)
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <fcntl.h>]],
+ [[ fcntl(0, F_FULLFSYNC, 0); ]])],
+ [ AC_MSG_RESULT(yes); HAVE_FULLFSYNC=1 ],
+ [ AC_MSG_RESULT(no); HAVE_FULLFSYNC=0 ]
+)
+
+AC_MSG_CHECKING(for O_CLOEXEC)
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <fcntl.h>]],
+ [[ open("", O_CLOEXEC); ]])],
+ [ AC_MSG_RESULT(yes); HAVE_O_CLOEXEC=1 ],
+ [ AC_MSG_RESULT(no); HAVE_O_CLOEXEC=0 ]
+)
+
+dnl crc32c platform checks
+AC_MSG_CHECKING(for __builtin_prefetch)
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[
+ char data = 0;
+ const char* address = &data;
+ __builtin_prefetch(address, 0, 0);
+ ]])],
+ [ AC_MSG_RESULT(yes); HAVE_BUILTIN_PREFETCH=1 ],
+ [ AC_MSG_RESULT(no); HAVE_BUILTIN_PREFETCH=0 ]
+)
+
+AC_MSG_CHECKING(for _mm_prefetch)
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <xmmintrin.h>]], [[
+ char data = 0;
+ const char* address = &data;
+ _mm_prefetch(address, _MM_HINT_NTA);
+ ]])],
+ [ AC_MSG_RESULT(yes); HAVE_MM_PREFETCH=1 ],
+ [ AC_MSG_RESULT(no); HAVE_MM_PREFETCH=0 ]
+)
+
+AC_MSG_CHECKING(for strong getauxval support in the system headers)
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+ #include <arm_acle.h>
+ #include <arm_neon.h>
+ #include <sys/auxv.h>
+ ]], [[
+ getauxval(AT_HWCAP);
+ ]])],
+ [ AC_MSG_RESULT(yes); HAVE_STRONG_GETAUXVAL=1 ],
+ [ AC_MSG_RESULT(no); HAVE_STRONG_GETAUXVAL=0 ]
+)
+
+AC_MSG_CHECKING(for weak getauxval support in the compiler)
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+ unsigned long getauxval(unsigned long type) __attribute__((weak));
+ #define AT_HWCAP 16
+ ]], [[
+ getauxval(AT_HWCAP);
+ ]])],
+ [ AC_MSG_RESULT(yes); HAVE_WEAK_GETAUXVAL=1 ],
+ [ AC_MSG_RESULT(no); HAVE_WEAK_GETAUXVAL=0 ]
+)
+
dnl Check for reduced exports
if test x$use_reduce_exports = xyes; then
AX_CHECK_COMPILE_FLAG([-fvisibility=hidden],[RE_CXXFLAGS="-fvisibility=hidden"],
@@ -1074,7 +1140,6 @@ fi
AX_BOOST_SYSTEM
AX_BOOST_FILESYSTEM
AX_BOOST_THREAD
-AX_BOOST_CHRONO
dnl Boost 1.56 through 1.62 allow using std::atomic instead of its own atomic
dnl counter implementations. In 1.63 and later the std::atomic approach is default.
@@ -1141,7 +1206,7 @@ fi
if test x$use_boost = xyes; then
-BOOST_LIBS="$BOOST_LDFLAGS $BOOST_SYSTEM_LIB $BOOST_FILESYSTEM_LIB $BOOST_THREAD_LIB $BOOST_CHRONO_LIB"
+BOOST_LIBS="$BOOST_LDFLAGS $BOOST_SYSTEM_LIB $BOOST_FILESYSTEM_LIB $BOOST_THREAD_LIB"
dnl If boost (prior to 1.57) was built without c++11, it emulated scoped enums
@@ -1179,57 +1244,6 @@ AC_LINK_IFELSE([AC_LANG_PROGRAM([[
LIBS="$TEMP_LIBS"
CPPFLAGS="$TEMP_CPPFLAGS"
-dnl Boost >= 1.50 uses sleep_for rather than the now-deprecated sleep, however
-dnl it was broken from 1.50 to 1.52 when backed by nanosleep. Use sleep_for if
-dnl a working version is available, else fall back to sleep. sleep was removed
-dnl after 1.56.
-dnl If neither is available, abort.
-TEMP_LIBS="$LIBS"
-LIBS="$BOOST_LIBS $LIBS"
-TEMP_CPPFLAGS="$CPPFLAGS"
-CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
-AC_LINK_IFELSE([AC_LANG_PROGRAM([[
- #include <boost/thread/thread.hpp>
- #include <boost/version.hpp>
- ]],[[
- #if BOOST_VERSION >= 105000 && (!defined(BOOST_HAS_NANOSLEEP) || BOOST_VERSION >= 105200)
- boost::this_thread::sleep_for(boost::chrono::milliseconds(0));
- #else
- choke me
- #endif
- ]])],
- [boost_sleep=yes;
- AC_DEFINE(HAVE_WORKING_BOOST_SLEEP_FOR, 1, [Define this symbol if boost sleep_for works])],
- [boost_sleep=no])
-LIBS="$TEMP_LIBS"
-CPPFLAGS="$TEMP_CPPFLAGS"
-
-if test x$boost_sleep != xyes; then
-TEMP_LIBS="$LIBS"
-LIBS="$BOOST_LIBS $LIBS"
-TEMP_CPPFLAGS="$CPPFLAGS"
-CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
-AC_LINK_IFELSE([AC_LANG_PROGRAM([[
- #include <boost/version.hpp>
- #include <boost/thread.hpp>
- #include <boost/date_time/posix_time/posix_time_types.hpp>
- ]],[[
- #if BOOST_VERSION <= 105600
- boost::this_thread::sleep(boost::posix_time::milliseconds(0));
- #else
- choke me
- #endif
- ]])],
- [boost_sleep=yes; AC_DEFINE(HAVE_WORKING_BOOST_SLEEP, 1, [Define this symbol if boost sleep works])],
- [boost_sleep=no])
-LIBS="$TEMP_LIBS"
-CPPFLAGS="$TEMP_CPPFLAGS"
-fi
-
-if test x$boost_sleep != xyes; then
- AC_MSG_ERROR(No working boost sleep implementation found.)
-fi
-
fi
if test x$use_pkgconfig = xyes; then
@@ -1240,7 +1254,7 @@ if test x$use_pkgconfig = xyes; then
if test x$use_qr != xno; then
BITCOIN_QT_CHECK([PKG_CHECK_MODULES([QR], [libqrencode], [have_qrencode=yes], [have_qrencode=no])])
fi
- if test x$build_bitcoin_cli$build_bitcoind$bitcoin_enable_qt$use_tests != xnononono; then
+ if test x$build_bitcoin_cli$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench != xnonononono; then
PKG_CHECK_MODULES([EVENT], [libevent],, [AC_MSG_ERROR(libevent not found.)])
if test x$TARGET_OS != xwindows; then
PKG_CHECK_MODULES([EVENT_PTHREADS], [libevent_pthreads],, [AC_MSG_ERROR(libevent_pthreads not found.)])
@@ -1260,7 +1274,7 @@ if test x$use_pkgconfig = xyes; then
)
else
- if test x$build_bitcoin_cli$build_bitcoind$bitcoin_enable_qt$use_tests != xnononono; then
+ if test x$build_bitcoin_cli$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench != xnonononono; then
AC_CHECK_HEADER([event2/event.h],, AC_MSG_ERROR(libevent headers missing),)
AC_CHECK_LIB([event],[main],EVENT_LIBS=-levent,AC_MSG_ERROR(libevent missing))
if test x$TARGET_OS != xwindows; then
@@ -1517,11 +1531,13 @@ AM_CONDITIONAL([USE_QRCODE], [test x$use_qr = xyes])
AM_CONDITIONAL([USE_LCOV],[test x$use_lcov = xyes])
AM_CONDITIONAL([GLIBC_BACK_COMPAT],[test x$use_glibc_compat = xyes])
AM_CONDITIONAL([HARDEN],[test x$use_hardening = xyes])
-AM_CONDITIONAL([ENABLE_HWCRC32],[test x$enable_hwcrc32 = xyes])
+AM_CONDITIONAL([ENABLE_SSE42],[test x$enable_sse42 = xyes])
AM_CONDITIONAL([ENABLE_SSE41],[test x$enable_sse41 = xyes])
AM_CONDITIONAL([ENABLE_AVX2],[test x$enable_avx2 = xyes])
AM_CONDITIONAL([ENABLE_SHANI],[test x$enable_shani = xyes])
+AM_CONDITIONAL([ENABLE_ARM_CRC],[test x$enable_arm_crc = xyes])
AM_CONDITIONAL([USE_ASM],[test x$use_asm = xyes])
+AM_CONDITIONAL([WORDS_BIGENDIAN],[test x$ac_cv_c_bigendian = xyes])
AC_DEFINE(CLIENT_VERSION_MAJOR, _CLIENT_VERSION_MAJOR, [Major version])
AC_DEFINE(CLIENT_VERSION_MINOR, _CLIENT_VERSION_MINOR, [Minor version])
@@ -1568,23 +1584,31 @@ AC_SUBST(SSE42_CXXFLAGS)
AC_SUBST(SSE41_CXXFLAGS)
AC_SUBST(AVX2_CXXFLAGS)
AC_SUBST(SHANI_CXXFLAGS)
+AC_SUBST(ARM_CRC_CXXFLAGS)
AC_SUBST(LIBTOOL_APP_LDFLAGS)
AC_SUBST(USE_UPNP)
AC_SUBST(USE_QRCODE)
AC_SUBST(BOOST_LIBS)
AC_SUBST(TESTDEFS)
-AC_SUBST(LEVELDB_TARGET_FLAGS)
AC_SUBST(MINIUPNPC_CPPFLAGS)
AC_SUBST(MINIUPNPC_LIBS)
AC_SUBST(EVENT_LIBS)
AC_SUBST(EVENT_PTHREADS_LIBS)
AC_SUBST(ZMQ_LIBS)
AC_SUBST(QR_LIBS)
+AC_SUBST(HAVE_FDATASYNC)
+AC_SUBST(HAVE_FULLFSYNC)
+AC_SUBST(HAVE_O_CLOEXEC)
+AC_SUBST(HAVE_BUILTIN_PREFETCH)
+AC_SUBST(HAVE_MM_PREFETCH)
+AC_SUBST(HAVE_STRONG_GETAUXVAL)
+AC_SUBST(HAVE_WEAK_GETAUXVAL)
AC_CONFIG_FILES([Makefile src/Makefile doc/man/Makefile share/setup.nsi share/qt/Info.plist test/config.ini])
AC_CONFIG_FILES([contrib/devtools/split-debug.sh],[chmod +x contrib/devtools/split-debug.sh])
AM_COND_IF([HAVE_DOXYGEN], [AC_CONFIG_FILES([doc/Doxyfile])])
AC_CONFIG_LINKS([contrib/filter-lcov.py:contrib/filter-lcov.py])
AC_CONFIG_LINKS([test/functional/test_runner.py:test/functional/test_runner.py])
+AC_CONFIG_LINKS([test/fuzz/test_runner.py:test/fuzz/test_runner.py])
AC_CONFIG_LINKS([test/util/bitcoin-util-test.py:test/util/bitcoin-util-test.py])
AC_CONFIG_LINKS([test/util/rpcauth-test.py:test/util/rpcauth-test.py])
diff --git a/contrib/devtools/README.md b/contrib/devtools/README.md
index 515a0d8fc6..f5533719c0 100644
--- a/contrib/devtools/README.md
+++ b/contrib/devtools/README.md
@@ -109,7 +109,7 @@ certain symbols and are only linked against allowed libraries.
For Linux this means checking for allowed gcc, glibc and libstdc++ version symbols.
This makes sure they are still compatible with the minimum supported distribution versions.
-For macOS we check that the executables are only linked against libraries we allow.
+For macOS and Windows we check that the executables are only linked against libraries we allow.
Example usage after a gitian build:
diff --git a/contrib/devtools/copyright_header.py b/contrib/devtools/copyright_header.py
index 1b71245aab..92120eaff7 100755
--- a/contrib/devtools/copyright_header.py
+++ b/contrib/devtools/copyright_header.py
@@ -34,6 +34,7 @@ EXCLUDE_DIRS = [
"src/leveldb/",
"src/secp256k1/",
"src/univalue/",
+ "src/crc32c/",
]
INCLUDE = ['*.h', '*.cpp', '*.cc', '*.c', '*.mm', '*.py', '*.sh', '*.bash-completion']
diff --git a/contrib/devtools/previous_release.sh b/contrib/devtools/previous_release.sh
new file mode 100755
index 0000000000..efd035f778
--- /dev/null
+++ b/contrib/devtools/previous_release.sh
@@ -0,0 +1,149 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2018-2019 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+# Build previous releases.
+
+export LC_ALL=C
+
+CONFIG_FLAGS=""
+FUNCTIONAL_TESTS=0
+DELETE_EXISTING=0
+USE_DEPENDS=0
+DOWNLOAD_BINARY=0
+CONFIG_FLAGS=""
+TARGET="releases"
+
+while getopts ":hfrdbt:" opt; do
+ case $opt in
+ h)
+ echo "Usage: .previous_release.sh [options] tag1 tag2"
+ echo " options:"
+ echo " -h Print this message"
+ echo " -f Configure for functional tests"
+ echo " -r Remove existing directory"
+ echo " -d Use depends"
+ echo " -b Download release binary"
+ echo " -t Target directory (default: releases)"
+ exit 0
+ ;;
+ f)
+ FUNCTIONAL_TESTS=1
+ CONFIG_FLAGS="$CONFIG_FLAGS --without-gui --disable-tests --disable-bench"
+ ;;
+ r)
+ DELETE_EXISTING=1
+ ;;
+ d)
+ USE_DEPENDS=1
+ ;;
+ b)
+ DOWNLOAD_BINARY=1
+ ;;
+ t)
+ TARGET=$OPTARG
+ ;;
+ \?)
+ echo "Invalid option: -$OPTARG" >&2
+ exit 1
+ ;;
+ esac
+done
+
+shift $((OPTIND-1))
+
+if [ -z "$1" ]; then
+ echo "Specify release tag(s), e.g.: .previous_release v0.15.1"
+ exit 1
+fi
+
+if [ ! -d "$TARGET" ]; then
+ mkdir -p $TARGET
+fi
+
+if [ "$DOWNLOAD_BINARY" -eq "1" ]; then
+ HOST="${HOST:-$(./depends/config.guess)}"
+ case "$HOST" in
+ x86_64-*-linux*)
+ PLATFORM=x86_64-linux-gnu
+ ;;
+ x86_64-apple-darwin*)
+ PLATFORM=osx64
+ ;;
+ *)
+ echo "Not sure which binary to download for $HOST."
+ exit 1
+ ;;
+ esac
+fi
+
+echo "Releases directory: $TARGET"
+pushd "$TARGET" || exit 1
+{
+ for tag in "$@"
+ do
+ if [ "$DELETE_EXISTING" -eq "1" ]; then
+ if [ -d "$tag" ]; then
+ rm -r "$tag"
+ fi
+ fi
+
+ if [ "$DOWNLOAD_BINARY" -eq "0" ]; then
+
+ if [ ! -d "$tag" ]; then
+ if [ -z $(git tag -l "$tag") ]; then
+ echo "Tag $tag not found"
+ exit 1
+ fi
+
+ git clone https://github.com/bitcoin/bitcoin "$tag"
+ pushd "$tag" || exit 1
+ {
+ git checkout "$tag"
+ if [ "$USE_DEPENDS" -eq "1" ]; then
+ pushd depends || exit 1
+ {
+ if [ "$FUNCTIONAL_TESTS" -eq "1" ]; then
+ make NO_QT=1
+ else
+ make
+ fi
+ HOST="${HOST:-$(./config.guess)}"
+ }
+ popd || exit 1
+ CONFIG_FLAGS="--prefix=$PWD/depends/$HOST $CONFIG_FLAGS"
+ fi
+ ./autogen.sh
+ ./configure $CONFIG_FLAGS
+ make
+ # Move binaries, so they're in the same place as in the release download:
+ mkdir bin
+ mv src/bitcoind src/bitcoin-cli src/bitcoin-tx bin
+ if [ "$FUNCTIONAL_TESTS" -eq "0" ]; then
+ mv src/qt/bitcoin-qt bin
+ fi
+ }
+ popd || exit 1
+ fi
+ else
+ if [ -d "$tag" ]; then
+ echo "Using cached $tag"
+ else
+ mkdir "$tag"
+ if [[ "$tag" =~ v(.*)(rc[0-9]+)$ ]]; then
+ BIN_PATH="bin/bitcoin-core-${BASH_REMATCH[1]}/test.${BASH_REMATCH[2]}"
+ else
+ BIN_PATH="bin/bitcoin-core-${tag:1}"
+ fi
+ URL="https://bitcoin.org/$BIN_PATH/bitcoin-${tag:1}-$PLATFORM.tar.gz"
+ echo "Fetching: $URL"
+ curl -O $URL
+ tar -zxf "bitcoin-${tag:1}-$PLATFORM.tar.gz" -C "$tag" --strip-components=1 "bitcoin-${tag:1}"
+ rm "bitcoin-${tag:1}-$PLATFORM.tar.gz"
+ fi
+ fi
+ done
+}
+popd || exit 1
diff --git a/contrib/devtools/symbol-check.py b/contrib/devtools/symbol-check.py
index f92d997621..6949cb7ced 100755
--- a/contrib/devtools/symbol-check.py
+++ b/contrib/devtools/symbol-check.py
@@ -3,9 +3,8 @@
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
-A script to check that the (Linux) executables produced by gitian only contain
-allowed gcc and glibc version symbols. This makes sure they are still compatible
-with the minimum supported Linux distribution versions.
+A script to check that the executables produced by gitian only contain
+certain symbols and are only linked against allowed libraries.
Example usage:
@@ -53,6 +52,7 @@ IGNORE_EXPORTS = {
}
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')
+OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
OTOOL_CMD = os.getenv('OTOOL', '/usr/bin/otool')
# Allowed NEEDED libraries
@@ -101,6 +101,26 @@ MACHO_ALLOWED_LIBRARIES = {
'libobjc.A.dylib', # Objective-C runtime library
}
+PE_ALLOWED_LIBRARIES = {
+'ADVAPI32.dll', # security & registry
+'IPHLPAPI.DLL', # IP helper API
+'KERNEL32.dll', # win32 base APIs
+'msvcrt.dll', # C standard library for MSVC
+'SHELL32.dll', # shell API
+'USER32.dll', # user interface
+'WS2_32.dll', # sockets
+# bitcoin-qt only
+'dwmapi.dll', # desktop window manager
+'GDI32.dll', # graphics device interface
+'IMM32.dll', # input method editor
+'ole32.dll', # component object model
+'OLEAUT32.dll', # OLE Automation API
+'SHLWAPI.dll', # light weight shell API
+'UxTheme.dll',
+'VERSION.dll', # version checking
+'WINMM.dll', # WinMM audio API
+}
+
class CPPFilt(object):
'''
Demangle C++ symbol names.
@@ -218,6 +238,26 @@ def check_MACHO_libraries(filename) -> bool:
ok = False
return ok
+def pe_read_libraries(filename) -> List[str]:
+ p = subprocess.Popen([OBJDUMP_CMD, '-x', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
+ (stdout, stderr) = p.communicate()
+ if p.returncode:
+ raise IOError('Error opening file')
+ libraries = []
+ for line in stdout.splitlines():
+ if 'DLL Name:' in line:
+ tokens = line.split(': ')
+ libraries.append(tokens[1])
+ return libraries
+
+def check_PE_libraries(filename) -> bool:
+ ok = True
+ for dylib in pe_read_libraries(filename):
+ if dylib not in PE_ALLOWED_LIBRARIES:
+ print('{} is not in ALLOWED_LIBRARIES!'.format(dylib))
+ ok = False
+ return ok
+
CHECKS = {
'ELF': [
('IMPORTED_SYMBOLS', check_imported_symbols),
@@ -226,6 +266,9 @@ CHECKS = {
],
'MACHO': [
('DYNAMIC_LIBRARIES', check_MACHO_libraries)
+],
+'PE' : [
+ ('DYNAMIC_LIBRARIES', check_PE_libraries)
]
}
diff --git a/contrib/gitian-descriptors/gitian-linux.yml b/contrib/gitian-descriptors/gitian-linux.yml
index 257dd8ba30..a13a42d391 100644
--- a/contrib/gitian-descriptors/gitian-linux.yml
+++ b/contrib/gitian-descriptors/gitian-linux.yml
@@ -40,7 +40,7 @@ script: |
set -e -o pipefail
WRAP_DIR=$HOME/wrapped
- HOSTS="i686-pc-linux-gnu x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu"
+ HOSTS="x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu"
CONFIGFLAGS="--enable-glibc-back-compat --enable-reduce-exports --disable-bench --disable-gui-tests"
FAKETIME_HOST_PROGS="gcc g++"
FAKETIME_PROGS="date ar ranlib nm"
@@ -147,13 +147,6 @@ script: |
SOURCEDIST=$(echo bitcoin-*.tar.gz)
DISTNAME=${SOURCEDIST/%.tar.gz}
- # Correct tar file order
- mkdir -p temp
- pushd temp
- tar -xf ../$SOURCEDIST
- find bitcoin-* | sort | tar --mtime="$REFERENCE_DATETIME" --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > ../$SOURCEDIST
- popd
-
# Workaround for tarball not building with the bare tag version (prep)
make -C src obj/build.h
@@ -190,11 +183,12 @@ script: |
rm -rf ${DISTNAME}/lib/pkgconfig
find ${DISTNAME}/bin -type f -executable -print0 | xargs -0 -n1 -I{} ../contrib/devtools/split-debug.sh {} {} {}.dbg
find ${DISTNAME}/lib -type f -print0 | xargs -0 -n1 -I{} ../contrib/devtools/split-debug.sh {} {} {}.dbg
- cp ../README.md ${DISTNAME}/
+ cp ../../README.md ${DISTNAME}/
find ${DISTNAME} -not -name "*.dbg" | sort | tar --mtime="$REFERENCE_DATETIME" --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-${i}.tar.gz
find ${DISTNAME} -name "*.dbg" | sort | tar --mtime="$REFERENCE_DATETIME" --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-${i}-debug.tar.gz
cd ../../
rm -rf distsrc-${i}
done
- mkdir -p $OUTDIR/src
- mv $SOURCEDIST $OUTDIR/src
+
+ mkdir -p ${OUTDIR}/src
+ git archive --output=${OUTDIR}/src/${DISTNAME}.tar.gz HEAD
diff --git a/contrib/gitian-descriptors/gitian-osx.yml b/contrib/gitian-descriptors/gitian-osx.yml
index 2b6aa599e0..58531c81b4 100644
--- a/contrib/gitian-descriptors/gitian-osx.yml
+++ b/contrib/gitian-descriptors/gitian-osx.yml
@@ -110,13 +110,6 @@ script: |
SOURCEDIST=$(echo bitcoin-*.tar.gz)
DISTNAME=${SOURCEDIST/%.tar.gz}
- # Correct tar file order
- mkdir -p temp
- pushd temp
- tar -xf ../$SOURCEDIST
- find bitcoin-* | sort | tar --mtime="$REFERENCE_DATETIME" --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > ../$SOURCEDIST
- popd
-
# Workaround for tarball not building with the bare tag version (prep)
make -C src obj/build.h
@@ -166,6 +159,8 @@ script: |
find ${DISTNAME} | sort | tar --mtime="$REFERENCE_DATETIME" --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-${i}.tar.gz
cd ../../
done
- mkdir -p $OUTDIR/src
- mv $SOURCEDIST $OUTDIR/src
+
+ mkdir -p ${OUTDIR}/src
+ git archive --output=${OUTDIR}/src/${DISTNAME}.tar.gz HEAD
+
mv ${OUTDIR}/${DISTNAME}-x86_64-*.tar.gz ${OUTDIR}/${DISTNAME}-osx64.tar.gz
diff --git a/contrib/gitian-descriptors/gitian-win.yml b/contrib/gitian-descriptors/gitian-win.yml
index de2e45190a..c5eea97c77 100644
--- a/contrib/gitian-descriptors/gitian-win.yml
+++ b/contrib/gitian-descriptors/gitian-win.yml
@@ -34,8 +34,8 @@ script: |
CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests"
FAKETIME_HOST_PROGS="ar ranlib nm windres strip objcopy"
FAKETIME_PROGS="date makensis zip"
- HOST_CFLAGS="-O2 -g"
- HOST_CXXFLAGS="-O2 -g"
+ HOST_CFLAGS="-O2 -g -fno-ident"
+ HOST_CXXFLAGS="-O2 -g -fno-ident"
export QT_RCC_TEST=1
export QT_RCC_SOURCE_DATE_OVERRIDE=1
@@ -117,13 +117,6 @@ script: |
SOURCEDIST=$(echo bitcoin-*.tar.gz)
DISTNAME=${SOURCEDIST/%.tar.gz}
- # Correct tar file order
- mkdir -p temp
- pushd temp
- tar -xf ../$SOURCEDIST
- find bitcoin-* | sort | tar --mtime="$REFERENCE_DATETIME" --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > ../$SOURCEDIST
- popd
-
# Workaround for tarball not building with the bare tag version (prep)
make -C src obj/build.h
@@ -145,6 +138,7 @@ script: |
CONFIG_SITE=${BASEPREFIX}/${i}/share/config.site ./configure --prefix=/ --disable-ccache --disable-maintainer-mode --disable-dependency-tracking ${CONFIGFLAGS} CFLAGS="${HOST_CFLAGS}" CXXFLAGS="${HOST_CXXFLAGS}"
make ${MAKEOPTS}
make ${MAKEOPTS} -C src check-security
+ make ${MAKEOPTS} -C src check-symbols
make deploy
make install DESTDIR=${INSTALLPATH}
cp -f --target-directory="${OUTDIR}" ./bitcoin-*-setup-unsigned.exe
@@ -161,8 +155,10 @@ script: |
cd ../../
rm -rf distsrc-${i}
done
- mkdir -p $OUTDIR/src
- mv $SOURCEDIST $OUTDIR/src
+
+ mkdir -p ${OUTDIR}/src
+ git archive --output=${OUTDIR}/src/${DISTNAME}.tar.gz HEAD
+
cp -rf contrib/windeploy $BUILD_DIR
cd $BUILD_DIR/windeploy
mkdir unsigned
diff --git a/contrib/guix/README.md b/contrib/guix/README.md
index 46d755886c..8500379025 100644
--- a/contrib/guix/README.md
+++ b/contrib/guix/README.md
@@ -114,7 +114,7 @@ find output/ -type f -print0 | sort -z | xargs -r0 sha256sum
* _**HOSTS**_
Override the space-separated list of platform triples for which to perform a
- bootstrappable build. _(defaults to "i686-linux-gnu x86\_64-linux-gnu
+ bootstrappable build. _(defaults to "x86\_64-linux-gnu
arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu")_
> Windows and OS X platform triplet support are WIP.
diff --git a/contrib/guix/guix-build.sh b/contrib/guix/guix-build.sh
index 5e0c681f29..2daa8aba5e 100755
--- a/contrib/guix/guix-build.sh
+++ b/contrib/guix/guix-build.sh
@@ -20,7 +20,7 @@ time-machine() {
}
# Deterministically build Bitcoin Core for HOSTs (overriable by environment)
-for host in ${HOSTS=i686-linux-gnu x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu}; do
+for host in ${HOSTS=x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu}; do
# Display proper warning when the user interrupts the build
trap 'echo "** INT received while building ${host}, you may want to clean up the relevant output and distsrc-* directories before rebuilding"' INT
diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm
index 23b656cad7..c25ac2977b 100644
--- a/contrib/guix/manifest.scm
+++ b/contrib/guix/manifest.scm
@@ -21,17 +21,6 @@
(guix profiles)
(guix utils))
-(define (make-ssp-fixed-gcc xgcc)
- "Given a XGCC package, return a modified package that uses the SSP function
-from glibc instead of from libssp.so. Taken from:
-http://www.linuxfromscratch.org/hlfs/view/development/chapter05/gcc-pass1.html"
- (package
- (inherit xgcc)
- (arguments
- (substitute-keyword-arguments (package-arguments xgcc)
- ((#:make-flags flags)
- `(cons "gcc_cv_libc_provides_ssp=yes" ,flags))))))
-
(define (make-gcc-rpath-link xgcc)
"Given a XGCC package, return a modified package that replace each instance of
-rpath in the default system spec that's inserted by Guix with -rpath-link"
@@ -104,8 +93,7 @@ chain for " target " development."))
(base-gcc-for-libc gcc-5)
(base-kernel-headers linux-libre-headers-4.19)
(base-libc glibc-2.27)
- (base-gcc (make-gcc-rpath-link
- (make-ssp-fixed-gcc gcc-9))))
+ (base-gcc (make-gcc-rpath-link gcc-9)))
"Convenience wrapper around MAKE-CROSS-TOOLCHAIN with default values
desirable for building Bitcoin Core release binaries."
(make-cross-toolchain target
diff --git a/contrib/windeploy/win-codesign.cert b/contrib/windeploy/win-codesign.cert
index 5bc5dc5809..4023a5b638 100644
--- a/contrib/windeploy/win-codesign.cert
+++ b/contrib/windeploy/win-codesign.cert
@@ -1,34 +1,34 @@
-----BEGIN CERTIFICATE-----
-MIIFcTCCBFmgAwIBAgIRALWcUnSOxv9FQW3xdaMDO6swDQYJKoZIhvcNAQELBQAw
+MIIFdDCCBFygAwIBAgIRAL98pqZb/N9LuNaNxKsHNGQwDQYJKoZIhvcNAQELBQAw
fDELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
A1UEBxMHU2FsZm9yZDEYMBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMSQwIgYDVQQD
-ExtTZWN0aWdvIFJTQSBDb2RlIFNpZ25pbmcgQ0EwHhcNMTkwMzI3MDAwMDAwWhcN
-MjAwMzI2MjM1OTU5WjCBtDELMAkGA1UEBhMCQ0gxDTALBgNVBBEMBDgwMDUxCzAJ
-BgNVBAgMAlpIMRAwDgYDVQQHDAdaw7xyaWNoMRcwFQYDVQQJDA5NYXR0ZW5nYXNz
-ZSAyNzEuMCwGA1UECgwlQml0Y29pbiBDb3JlIENvZGUgU2lnbmluZyBBc3NvY2lh
-dGlvbjEuMCwGA1UEAwwlQml0Y29pbiBDb3JlIENvZGUgU2lnbmluZyBBc3NvY2lh
-dGlvbjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK04VDwiY1wxcW3E
-WTTGmnbciCwETwC96DG4qcoH2PPNsVy3dfwGh0C02Qj2vL64IfwIGUFSgREvyjZk
-CNhEuJO2e0nO0rKNNH5v/JO+P7/VYPZkF5a3uUz9ulmihULXioieHB/q0l6BmiJL
-+cYaMVfidL9Y+IJwgiTqjnpRhv1Ik083SPsu6GcfQT9MJfY/+xse2EP0l4GfdFE6
-DRcWjiC8UHpfpGYcImzSFZZpbFbqoAyhueCl28QU4f8QAbS6BqNfaAK9MMACWDcK
-eTz3C5JK6CiUxOnGIxilXhljuybFUjR4jGl5eTRpuPWk95NTTYS36q+bx/1nYelx
-0n4nnDMCAwEAAaOCAbMwggGvMB8GA1UdIwQYMBaAFA7hOqhTOjHVir7Bu61nGgOF
-rTQOMB0GA1UdDgQWBBRbN7ECrPCdVvh58enwy3Dix46h2jAOBgNVHQ8BAf8EBAMC
-B4AwDAYDVR0TAQH/BAIwADATBgNVHSUEDDAKBggrBgEFBQcDAzARBglghkgBhvhC
-AQEEBAMCBBAwQAYDVR0gBDkwNzA1BgwrBgEEAbIxAQIBAwIwJTAjBggrBgEFBQcC
-ARYXaHR0cHM6Ly9zZWN0aWdvLmNvbS9DUFMwQwYDVR0fBDwwOjA4oDagNIYyaHR0
-cDovL2NybC5zZWN0aWdvLmNvbS9TZWN0aWdvUlNBQ29kZVNpZ25pbmdDQS5jcmww
-cwYIKwYBBQUHAQEEZzBlMD4GCCsGAQUFBzAChjJodHRwOi8vY3J0LnNlY3RpZ28u
-Y29tL1NlY3RpZ29SU0FDb2RlU2lnbmluZ0NBLmNydDAjBggrBgEFBQcwAYYXaHR0
-cDovL29jc3Auc2VjdGlnby5jb20wKwYDVR0RBCQwIoEgam9uYXNAYml0Y29pbmNv
-cmVjb2Rlc2lnbmluZy5vcmcwDQYJKoZIhvcNAQELBQADggEBAF/AIXcFBWCC2Red
-SHN4Cvko5mdSkDNgzjVFc+OwAJ5RdOgbERde4PnHm3Qmrnx+uMetVnmrC8Fv1Iwb
-kkR0bdbWBj6lF6zMsClIN6WJEfY+qfj1qi7wyucu+3OElYRC9bm5Lf0mEHQr8lJ1
-lGvAjPh+/hmxoVNbHFMZ1Ea+BrbjVwiSznt0gzdMh0CispBZKLWCIwRwi+hFjQrw
-Z7RLH8HeCJ5Ojl/OTDQqh6AylQ7l9w9KHsUt4Jqy/AnCCyAj2/6xjdwnuo3tCZwb
-g/9CydiAacD/83odphEeC2iBa+0wsj9bWmyYKY7S9n0u+wm3wBfZbSVMDDPk/la1
-3qCUDLk=
+ExtTZWN0aWdvIFJTQSBDb2RlIFNpZ25pbmcgQ0EwHhcNMjAwMzI0MDAwMDAwWhcN
+MjEwMzI0MjM1OTU5WjCBtzELMAkGA1UEBhMCQ0gxDTALBgNVBBEMBDgwMDUxDjAM
+BgNVBAgMBVN0YXRlMRAwDgYDVQQHDAdaw7xyaWNoMRcwFQYDVQQJDA5NYXR0ZW5n
+YXNzZSAyNzEuMCwGA1UECgwlQml0Y29pbiBDb3JlIENvZGUgU2lnbmluZyBBc3Nv
+Y2lhdGlvbjEuMCwGA1UEAwwlQml0Y29pbiBDb3JlIENvZGUgU2lnbmluZyBBc3Nv
+Y2lhdGlvbjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMtxC8N4r/jE
+OGOdFy/0UtiUvEczPZf9WYZz/7paAkc75XopHIE5/ssmoEX27gG9K00tf3Q62QAx
+inZUPWkNTh8X0l+6uSGiIBFIV7dDgztIxnPcxaqw0k7Q2TEqKJvb5qm16zX6WfXJ
+R2r6O5utUdQ3AarHnQq9fwdM1j5+ywS5u52te74ENgDMTMKUuB2J3KH1ASg5PAtO
+CjPqPL+ZXJ7eT3M0Z+Lbu5ISZSqZB48BcCwOo/fOO0dAiLT9FE1iVtaCpBKHqGmd
+glRjPzZdgDv8g28etRmk8wQ5pQmfL2gBjt/LtIgMPTdHHETKLxJO5H3y0CNx1vzL
+ql7xNMxELxkCAwEAAaOCAbMwggGvMB8GA1UdIwQYMBaAFA7hOqhTOjHVir7Bu61n
+GgOFrTQOMB0GA1UdDgQWBBSHBbl82FUJiUkXyyYJog1awYRsxjAOBgNVHQ8BAf8E
+BAMCB4AwDAYDVR0TAQH/BAIwADATBgNVHSUEDDAKBggrBgEFBQcDAzARBglghkgB
+hvhCAQEEBAMCBBAwQAYDVR0gBDkwNzA1BgwrBgEEAbIxAQIBAwIwJTAjBggrBgEF
+BQcCARYXaHR0cHM6Ly9zZWN0aWdvLmNvbS9DUFMwQwYDVR0fBDwwOjA4oDagNIYy
+aHR0cDovL2NybC5zZWN0aWdvLmNvbS9TZWN0aWdvUlNBQ29kZVNpZ25pbmdDQS5j
+cmwwcwYIKwYBBQUHAQEEZzBlMD4GCCsGAQUFBzAChjJodHRwOi8vY3J0LnNlY3Rp
+Z28uY29tL1NlY3RpZ29SU0FDb2RlU2lnbmluZ0NBLmNydDAjBggrBgEFBQcwAYYX
+aHR0cDovL29jc3Auc2VjdGlnby5jb20wKwYDVR0RBCQwIoEgam9uYXNAYml0Y29p
+bmNvcmVjb2Rlc2lnbmluZy5vcmcwDQYJKoZIhvcNAQELBQADggEBAAU59qJzQ2ED
+aTMIQTsU01zIhZJ/xwQh78i0v2Mnr46RvzYrZOev+btF3SyUYD8veNnbYlY6yEYq
+Vb+/PQnE3t1xlqR80qiTZCk/Wmxx/qKvQuWeRL5QQgvsCmWBpycQ7PNfwzOWxbPE
+b0Hb2/VFFZfR9iltkfeInRUrzS96CJGYtm7dMf2JtnXYBcwpn1N8BSMH4nXVyN8g
+VEE5KyjE7+/awYiSST7+e6Y7FE5AJ4f3FjqnRm+2XetTVqITwMLKZMoV283nSEeH
+fA4FNAMGz9QeV38ol65NNqFP2vSSgVoPK79orqH9OOW2LSobt2qun+euddJIQeYV
+CMP90b/2WPc=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB
diff --git a/depends/README.md b/depends/README.md
index 93f619983f..9461887a33 100644
--- a/depends/README.md
+++ b/depends/README.md
@@ -46,7 +46,7 @@ The paths are automatically configured and no other options are needed unless ta
sudo apt-get install curl librsvg2-bin libtiff-tools bsdmainutils cmake imagemagick libcap-dev libz-dev libbz2-dev python3-setuptools
-#### For Win32/Win64 cross compilation
+#### For Win64 cross compilation
- see [build-windows.md](../doc/build-windows.md#cross-compilation-for-ubuntu-and-windows-subsystem-for-linux)
diff --git a/depends/config.site.in b/depends/config.site.in
index c5731e5269..fb9bf713cc 100644
--- a/depends/config.site.in
+++ b/depends/config.site.in
@@ -59,7 +59,7 @@ PKG_CONFIG="`which pkg-config` --static"
# avoid ruining the cache. Sigh.
export PKG_CONFIG_PATH=$depends_prefix/share/pkgconfig:$depends_prefix/lib/pkgconfig
if test -z "@allow_host_packages@"; then
- export PKGCONFIG_LIBDIR=
+ export PKG_CONFIG_LIBDIR=$depends_prefix/lib/pkgconfig
fi
CPPFLAGS="-I$depends_prefix/include/ $CPPFLAGS"
diff --git a/depends/packages/boost.mk b/depends/packages/boost.mk
index cd0e70fb1c..cbe4fe4d97 100644
--- a/depends/packages/boost.mk
+++ b/depends/packages/boost.mk
@@ -22,7 +22,7 @@ $(package)_config_opts_armv7a_android=address-model=32
$(package)_toolset_$(host_os)=gcc
$(package)_archiver_$(host_os)=$($(package)_ar)
$(package)_toolset_darwin=clang-darwin
-$(package)_config_libraries=chrono,filesystem,system,thread,test
+$(package)_config_libraries=filesystem,system,thread,test
$(package)_cxxflags=-std=c++11 -fvisibility=hidden
$(package)_cxxflags_linux=-fPIC
$(package)_cxxflags_android=-fPIC
diff --git a/depends/packages/native_libdmg-hfsplus.mk b/depends/packages/native_libdmg-hfsplus.mk
index 8493f1d979..c0f0ce74de 100644
--- a/depends/packages/native_libdmg-hfsplus.mk
+++ b/depends/packages/native_libdmg-hfsplus.mk
@@ -12,7 +12,7 @@ define $(package)_preprocess_cmds
endef
define $(package)_config_cmds
- cmake -DCMAKE_INSTALL_PREFIX:PATH=$(build_prefix) ..
+ cmake -DCMAKE_INSTALL_PREFIX:PATH=$(build_prefix) -DCMAKE_C_FLAGS="-Wl,--build-id=none" ..
endef
define $(package)_build_cmds
diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in
index cd7ccf80ab..7e307ab7c8 100644
--- a/doc/Doxyfile.in
+++ b/doc/Doxyfile.in
@@ -861,7 +861,8 @@ RECURSIVE = YES
# Note that relative paths are relative to the directory from which doxygen is
# run.
-EXCLUDE = src/leveldb \
+EXCLUDE = src/crc32c \
+ src/leveldb \
src/json \
src/test \
src/qt/test
diff --git a/doc/build-freebsd.md b/doc/build-freebsd.md
index 4831623504..f48855a344 100644
--- a/doc/build-freebsd.md
+++ b/doc/build-freebsd.md
@@ -10,7 +10,7 @@ This guide does not contain instructions for building the GUI.
You will need the following dependencies, which can be installed as root via pkg:
-```shell
+```bash
pkg install autoconf automake boost-libs git gmake libevent libtool pkgconf
git clone https://github.com/bitcoin/bitcoin.git
@@ -18,7 +18,7 @@ git clone https://github.com/bitcoin/bitcoin.git
In order to run the test suite (recommended), you will need to have Python 3 installed:
-```shell
+```bash
pkg install python3
```
@@ -29,32 +29,33 @@ See [dependencies.md](dependencies.md) for a complete overview.
BerkeleyDB is only necessary for the wallet functionality. To skip this, pass
`--disable-wallet` to `./configure` and skip to the next section.
-```shell
+```bash
./contrib/install_db4.sh `pwd`
export BDB_PREFIX="$PWD/db4"
```
## Building Bitcoin Core
-**Important**: Use `gmake` (the non-GNU `make` will exit with an error):
+**Important**: Use `gmake` (the non-GNU `make` will exit with an error).
With wallet:
-```shell
+```bash
./autogen.sh
./configure --with-gui=no \
BDB_LIBS="-L${BDB_PREFIX}/lib -ldb_cxx-4.8" \
- BDB_CFLAGS="-I${BDB_PREFIX}/include"
+ BDB_CFLAGS="-I${BDB_PREFIX}/include" \
+ MAKE=gmake
```
Without wallet:
-```shell
+```bash
./autogen.sh
-./configure --with-gui=no --disable-wallet
+./configure --with-gui=no --disable-wallet MAKE=gmake
```
followed by:
-```shell
+```bash
gmake # use -jX here for parallelism
gmake check # Run tests if Python 3 is available
```
diff --git a/doc/build-netbsd.md b/doc/build-netbsd.md
index ab422f6aa7..47049a780e 100644
--- a/doc/build-netbsd.md
+++ b/doc/build-netbsd.md
@@ -37,13 +37,13 @@ from ports, for the same reason as boost above (g++/libstd++ incompatibility).
If you have to build it yourself, you can use [the installation script included
in contrib/](/contrib/install_db4.sh) like so:
-```shell
+```bash
./contrib/install_db4.sh `pwd`
```
from the root of the repository. Then set `BDB_PREFIX` for the next section:
-```shell
+```bash
export BDB_PREFIX="$PWD/db4"
```
@@ -52,24 +52,26 @@ export BDB_PREFIX="$PWD/db4"
**Important**: Use `gmake` (the non-GNU `make` will exit with an error).
With wallet:
-```
+```bash
./autogen.sh
./configure --with-gui=no CPPFLAGS="-I/usr/pkg/include" \
LDFLAGS="-L/usr/pkg/lib" \
BOOST_CPPFLAGS="-I/usr/pkg/include" \
BOOST_LDFLAGS="-L/usr/pkg/lib" \
BDB_LIBS="-L${BDB_PREFIX}/lib -ldb_cxx-4.8" \
- BDB_CFLAGS="-I${BDB_PREFIX}/include"
+ BDB_CFLAGS="-I${BDB_PREFIX}/include" \
+ MAKE=gmake
```
Without wallet:
-```
+```bash
./autogen.sh
./configure --with-gui=no --disable-wallet \
CPPFLAGS="-I/usr/pkg/include" \
LDFLAGS="-L/usr/pkg/lib" \
BOOST_CPPFLAGS="-I/usr/pkg/include" \
- BOOST_LDFLAGS="-L/usr/pkg/lib"
+ BOOST_LDFLAGS="-L/usr/pkg/lib" \
+ MAKE=gmake
```
Build and run the tests:
diff --git a/doc/build-openbsd.md b/doc/build-openbsd.md
index dad2566a6c..53c647ae34 100644
--- a/doc/build-openbsd.md
+++ b/doc/build-openbsd.md
@@ -38,19 +38,19 @@ from ports, for the same reason as boost above (g++/libstd++ incompatibility).
If you have to build it yourself, you can use [the installation script included
in contrib/](/contrib/install_db4.sh) like so:
-```shell
+```bash
./contrib/install_db4.sh `pwd` CC=cc CXX=c++
```
from the root of the repository. Then set `BDB_PREFIX` for the next section:
-```shell
+```bash
export BDB_PREFIX="$PWD/db4"
```
### Building Bitcoin Core
-**Important**: use `gmake`, not `make`. The non-GNU `make` will exit with a horrible error.
+**Important**: Use `gmake` (the non-GNU `make` will exit with an error).
Preparation:
```bash
@@ -70,12 +70,14 @@ Make sure `BDB_PREFIX` is set to the appropriate path from the above steps.
To configure with wallet:
```bash
./configure --with-gui=no CC=cc CXX=c++ \
- BDB_LIBS="-L${BDB_PREFIX}/lib -ldb_cxx-4.8" BDB_CFLAGS="-I${BDB_PREFIX}/include"
+ BDB_LIBS="-L${BDB_PREFIX}/lib -ldb_cxx-4.8" \
+ BDB_CFLAGS="-I${BDB_PREFIX}/include" \
+ MAKE=gmake
```
To configure without wallet:
```bash
-./configure --disable-wallet --with-gui=no CC=cc CXX=c++
+./configure --disable-wallet --with-gui=no CC=cc CXX=c++ MAKE=gmake
```
Build and run the tests:
diff --git a/doc/build-osx.md b/doc/build-osx.md
index 86b5c5b602..7b76117c8b 100644
--- a/doc/build-osx.md
+++ b/doc/build-osx.md
@@ -22,6 +22,7 @@ Then install [Homebrew](https://brew.sh).
brew install automake berkeley-db4 libtool boost miniupnpc pkg-config python qt libevent qrencode
```
+If you run into issues, check [Homebrew's troubleshooting page](https://docs.brew.sh/Troubleshooting).
See [dependencies.md](dependencies.md) for a complete overview.
If you want to build the disk image with `make deploy` (.dmg / optional), you need RSVG:
diff --git a/doc/build-unix.md b/doc/build-unix.md
index e799e709fa..6b51db5f55 100644
--- a/doc/build-unix.md
+++ b/doc/build-unix.md
@@ -80,7 +80,7 @@ Build requirements:
Now, you can either build from self-compiled [depends](/depends/README.md) or install the required dependencies:
- sudo apt-get install libevent-dev libboost-system-dev libboost-filesystem-dev libboost-chrono-dev libboost-test-dev libboost-thread-dev
+ sudo apt-get install libevent-dev libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev
BerkeleyDB is required for the wallet.
diff --git a/doc/descriptors.md b/doc/descriptors.md
index e31665b129..181ff77e50 100644
--- a/doc/descriptors.md
+++ b/doc/descriptors.md
@@ -17,6 +17,7 @@ Supporting RPCs are:
(`regtest` only, since v0.19).
- `utxoupdatepsbt` takes as input descriptors to add information to the psbt
(since v0.19).
+- `createmultisig` and `addmultisigaddress` return descriptors as well (since v0.20)
This document describes the language. For the specifics on usage, see the RPC
documentation for the functions mentioned above.
diff --git a/doc/developer-notes.md b/doc/developer-notes.md
index 3ef35cfcfa..da07080724 100644
--- a/doc/developer-notes.md
+++ b/doc/developer-notes.md
@@ -43,6 +43,7 @@ Developer Notes
- [Suggestions and examples](#suggestions-and-examples)
- [Release notes](#release-notes)
- [RPC interface guidelines](#rpc-interface-guidelines)
+ - [Internal interface guidelines](#internal-interface-guidelines)
<!-- markdown-toc end -->
@@ -288,6 +289,7 @@ $ valgrind --suppressions=contrib/valgrind.supp src/test/test_bitcoin
$ valgrind --suppressions=contrib/valgrind.supp --leak-check=full \
--show-leak-kinds=all src/test/test_bitcoin --log_level=test_suite
$ valgrind -v --leak-check=full src/bitcoind -printtoconsole
+$ ./test/functional/test_runner.py --valgrind
```
### Compiling for test coverage
@@ -858,6 +860,10 @@ Current subtrees include:
- **Note**: Follow the instructions in [Upgrading LevelDB](#upgrading-leveldb) when
merging upstream changes to the LevelDB subtree.
+- src/crc32c
+ - Used by leveldb for hardware acceleration of CRC32C checksums for data integrity.
+ - Upstream at https://github.com/google/crc32c ; Maintained by Google.
+
- src/secp256k1
- Upstream at https://github.com/bitcoin-core/secp256k1/ ; actively maintained by Core contributors.
@@ -1085,7 +1091,8 @@ A few guidelines for introducing and reviewing new RPC interfaces:
new RPC is replacing a deprecated RPC, to avoid both RPCs confusingly
showing up in the command list.
-- Use *invalid* bech32 addresses for `RPCExamples` help documentation.
+- Use *invalid* bech32 addresses (e.g. in the constant array `EXAMPLE_ADDRESS`) for
+ `RPCExamples` help documentation.
- *Rationale*: Prevent accidental transactions by users and encourage the use
of bech32 addresses by default.
@@ -1094,3 +1101,124 @@ A few guidelines for introducing and reviewing new RPC interfaces:
timestamps in the documentation.
- *Rationale*: User-facing consistency.
+
+Internal interface guidelines
+-----------------------------
+
+Internal interfaces between parts of the codebase that are meant to be
+independent (node, wallet, GUI), are defined in
+[`src/interfaces/`](../src/interfaces/). The main interface classes defined
+there are [`interfaces::Chain`](../src/interfaces/chain.h), used by wallet to
+access the node's latest chain state,
+[`interfaces::Node`](../src/interfaces/node.h), used by the GUI to control the
+node, and [`interfaces::Wallet`](../src/interfaces/wallet.h), used by the GUI
+to control an individual wallet. There are also more specialized interface
+types like [`interfaces::Handler`](../src/interfaces/handler.h)
+[`interfaces::ChainClient`](../src/interfaces/chain.h) passed to and from
+various interface methods.
+
+Interface classes are written in a particular style so node, wallet, and GUI
+code doesn't need to run in the same process, and so the class declarations
+work more easily with tools and libraries supporting interprocess
+communication:
+
+- Interface classes should be abstract and have methods that are [pure
+ virtual](https://en.cppreference.com/w/cpp/language/abstract_class). This
+ allows multiple implementations to inherit from the same interface class,
+ particularly so one implementation can execute functionality in the local
+ process, and other implementations can forward calls to remote processes.
+
+- Interface method definitions should wrap existing functionality instead of
+ implementing new functionality. Any substantial new node or wallet
+ functionality should be implemented in [`src/node/`](../src/node/) or
+ [`src/wallet/`](../src/wallet/) and just exposed in
+ [`src/interfaces/`](../src/interfaces/) instead of being implemented there,
+ so it can be more modular and accessible to unit tests.
+
+- Interface method parameter and return types should either be serializable or
+ be other interface classes. Interface methods shouldn't pass references to
+ objects that can't be serialized or accessed from another process.
+
+ Examples:
+
+ ```c++
+ // Good: takes string argument and returns interface class pointer
+ virtual unique_ptr<interfaces::Wallet> loadWallet(std::string filename) = 0;
+
+ // Bad: returns CWallet reference that can't be used from another process
+ virtual CWallet& loadWallet(std::string filename) = 0;
+ ```
+
+ ```c++
+ // Good: accepts and returns primitive types
+ virtual bool findBlock(const uint256& hash, int& out_height, int64_t& out_time) = 0;
+
+ // Bad: returns pointer to internal node in a linked list inaccessible to
+ // other processes
+ virtual const CBlockIndex* findBlock(const uint256& hash) = 0;
+ ```
+
+ ```c++
+ // Good: takes plain callback type and returns interface pointer
+ using TipChangedFn = std::function<void(int block_height, int64_t block_time)>;
+ virtual std::unique_ptr<interfaces::Handler> handleTipChanged(TipChangedFn fn) = 0;
+
+ // Bad: returns boost connection specific to local process
+ using TipChangedFn = std::function<void(int block_height, int64_t block_time)>;
+ virtual boost::signals2::scoped_connection connectTipChanged(TipChangedFn fn) = 0;
+ ```
+
+- For consistency and friendliness to code generation tools, interface method
+ input and inout parameters should be ordered first and output parameters
+ should come last.
+
+ Example:
+
+ ```c++
+ // Good: error output param is last
+ virtual bool broadcastTransaction(const CTransactionRef& tx, CAmount max_fee, std::string& error) = 0;
+
+ // Bad: error output param is between input params
+ virtual bool broadcastTransaction(const CTransactionRef& tx, std::string& error, CAmount max_fee) = 0;
+ ```
+
+- For friendliness to code generation tools, interface methods should not be
+ overloaded:
+
+ Example:
+
+ ```c++
+ // Good: method names are unique
+ virtual bool disconnectByAddress(const CNetAddr& net_addr) = 0;
+ virtual bool disconnectById(NodeId id) = 0;
+
+ // Bad: methods are overloaded by type
+ virtual bool disconnect(const CNetAddr& net_addr) = 0;
+ virtual bool disconnect(NodeId id) = 0;
+ ```
+
+- For consistency and friendliness to code generation tools, interface method
+ names should be `lowerCamelCase` and standalone function names should be
+ `UpperCamelCase`.
+
+ Examples:
+
+ ```c++
+ // Good: lowerCamelCase method name
+ virtual void blockConnected(const CBlock& block, int height) = 0;
+
+ // Bad: uppercase class method
+ virtual void BlockConnected(const CBlock& block, int height) = 0;
+ ```
+
+ ```c++
+ // Good: UpperCamelCase standalone function name
+ std::unique_ptr<Node> MakeNode(LocalInit& init);
+
+ // Bad: lowercase standalone function
+ std::unique_ptr<Node> makeNode(LocalInit& init);
+ ```
+
+ Note: This last convention isn't generally followed outside of
+ [`src/interfaces/`](../src/interfaces/), though it did come up for discussion
+ before in [#14635](https://github.com/bitcoin/bitcoin/pull/14635).
diff --git a/doc/files.md b/doc/files.md
index c2296b45fa..cd23d547bb 100644
--- a/doc/files.md
+++ b/doc/files.md
@@ -78,6 +78,10 @@ Subdirectory | File(s) | Description
3. A wallet database path can be specified by `-wallet` option.
+4. `wallet.dat` files must not be shared across different node instances, as that can result in key-reuse and double-spends due the lack of synchronization between instances.
+
+5. Any copy or backup of the wallet should be done through a `backupwallet` call in order to update and lock the wallet, preventing any file corruption caused by updates during the copy.
+
## GUI settings
`bitcoin-qt` uses [`QSettings`](https://doc.qt.io/qt-5/qsettings.html) class; this implies platform-specific [locations where application settings are stored](https://doc.qt.io/qt-5/qsettings.html#locations-where-application-settings-are-stored).
diff --git a/doc/fuzzing.md b/doc/fuzzing.md
index c34ca4cb59..9642337821 100644
--- a/doc/fuzzing.md
+++ b/doc/fuzzing.md
@@ -1,125 +1,93 @@
-Fuzz-testing Bitcoin Core
-==========================
-
-A special test harness in `src/test/fuzz/` is provided for each fuzz target to
-provide an easy entry point for fuzzers and the like. In this document we'll
-describe how to use it with AFL and libFuzzer.
-
-## Preparing fuzzing
-
-The fuzzer needs some inputs to work on, but the inputs or seeds can be used
-interchangeably between libFuzzer and AFL.
-
-Extract the example seeds (or other starting inputs) into the inputs
-directory before starting fuzzing.
-
-```
-git clone https://github.com/bitcoin-core/qa-assets
-export DIR_FUZZ_IN=$PWD/qa-assets/fuzz_seed_corpus
-```
-
-AFL needs an input directory with examples, and an output directory where it
-will place examples that it found. These can be anywhere in the file system,
-we'll define environment variables to make it easy to reference them.
-
-So, only for AFL you need to configure the outputs path:
-
-```
-mkdir outputs
-export AFLOUT=$PWD/outputs
-```
-
-libFuzzer will use the input directory as output directory.
-
-## AFL
-
-### Building AFL
-
-It is recommended to always use the latest version of afl:
-```
-wget http://lcamtuf.coredump.cx/afl/releases/afl-latest.tgz
-tar -zxvf afl-latest.tgz
-cd afl-<version>
-make
-export AFLPATH=$PWD
-```
-
-For macOS you may need to ignore x86 compilation checks when running `make`:
-`AFL_NO_X86=1 make`.
-
-### Instrumentation
-
-To build Bitcoin Core using AFL instrumentation (this assumes that the
-`AFLPATH` was set as above):
-```
-./configure --disable-ccache --disable-shared --enable-tests --enable-fuzz CC=${AFLPATH}/afl-gcc CXX=${AFLPATH}/afl-g++
-export AFL_HARDEN=1
-make
-```
-
-If you are using clang you will need to substitute `afl-gcc` with `afl-clang`
-and `afl-g++` with `afl-clang++`, so the first line above becomes:
-```
-./configure --disable-ccache --disable-shared --enable-tests --enable-fuzz CC=${AFLPATH}/afl-clang CXX=${AFLPATH}/afl-clang++
-```
-
-We disable ccache because we don't want to pollute the ccache with instrumented
-objects, and similarly don't want to use non-instrumented cached objects linked
-in.
-
-The fuzzing can be sped up significantly (~200x) by using `afl-clang-fast` and
-`afl-clang-fast++` in place of `afl-gcc` and `afl-g++` when compiling. When
-compiling using `afl-clang-fast`/`afl-clang-fast++` the resulting
-binary will be instrumented in such a way that the AFL
-features "persistent mode" and "deferred forkserver" can be used. See
-https://github.com/google/AFL/tree/master/llvm_mode for details.
-
-### Fuzzing
-
-To start the actual fuzzing use:
-
-```
-export FUZZ_TARGET=bech32 # Pick a fuzz_target
-mkdir ${AFLOUT}/${FUZZ_TARGET}
-$AFLPATH/afl-fuzz -i ${DIR_FUZZ_IN}/${FUZZ_TARGET} -o ${AFLOUT}/${FUZZ_TARGET} -m52 -- src/test/fuzz/${FUZZ_TARGET}
-```
-
-You may have to change a few kernel parameters to test optimally - `afl-fuzz`
-will print an error and suggestion if so.
-
-On macOS you may need to set `AFL_NO_FORKSRV=1` to get the target to run.
-```
-export FUZZ_TARGET=bech32 # Pick a fuzz_target
-mkdir ${AFLOUT}/${FUZZ_TARGET}
-AFL_NO_FORKSRV=1 $AFLPATH/afl-fuzz -i ${DIR_FUZZ_IN}/${FUZZ_TARGET} -o ${AFLOUT}/${FUZZ_TARGET} -m52 -- src/test/fuzz/${FUZZ_TARGET}
-```
-
-## libFuzzer
-
-A recent version of `clang`, the address/undefined sanitizers (ASan/UBSan) and
-libFuzzer is needed (all found in the `compiler-rt` runtime libraries package).
-
-To build all fuzz targets with libFuzzer, run
-
-```
-./configure --disable-ccache --enable-fuzz --with-sanitizers=fuzzer,address,undefined CC=clang CXX=clang++
-make
-```
-
-See https://llvm.org/docs/LibFuzzer.html#running on how to run the libFuzzer
-instrumented executable.
-
-Alternatively, you can run the script through the fuzzing test harness (only
-libFuzzer supported so far). You need to pass it the inputs directory and
-the specific test target you want to run.
-
-```
-./test/fuzz/test_runner.py ${DIR_FUZZ_IN} bech32
-```
-
-### macOS hints for libFuzzer
-
-The default clang/llvm version supplied by Apple on macOS does not include
+# Fuzzing Bitcoin Core using libFuzzer
+
+## Quickstart guide
+
+To quickly get started fuzzing Bitcoin Core using [libFuzzer](https://llvm.org/docs/LibFuzzer.html):
+
+```sh
+$ git clone https://github.com/bitcoin/bitcoin
+$ cd bitcoin/
+$ ./autogen.sh
+$ CC=clang CXX=clang++ ./configure --enable-fuzz --with-sanitizers=address,fuzzer,undefined
+# macOS users: If you have problem with this step then make sure to read "macOS hints for
+# libFuzzer" on https://github.com/bitcoin/bitcoin/blob/master/doc/fuzzing.md#macos-hints-for-libfuzzer
+$ make
+$ src/test/fuzz/process_message
+# abort fuzzing using ctrl-c
+```
+
+## Fuzzing harnesses, fuzzing output and fuzzing corpora
+
+[`process_message`](https://github.com/bitcoin/bitcoin/blob/master/src/test/fuzz/process_message.cpp) is a fuzzing harness for the [`ProcessMessage(...)` function (`net_processing`)](https://github.com/bitcoin/bitcoin/blob/master/src/net_processing.cpp). The available fuzzing harnesses are found in [`src/test/fuzz/`](https://github.com/bitcoin/bitcoin/tree/master/src/test/fuzz).
+
+The fuzzer will output `NEW` every time it has created a test input that covers new areas of the code under test. For more information on how to interpret the fuzzer output, see the [libFuzzer documentation](https://llvm.org/docs/LibFuzzer.html).
+
+If you specify a corpus directory then any new coverage increasing inputs will be saved there:
+
+```sh
+$ mkdir -p process_message-seeded-from-thin-air/
+$ src/test/fuzz/process_message process_message-seeded-from-thin-air/
+INFO: Seed: 840522292
+INFO: Loaded 1 modules (424174 inline 8-bit counters): 424174 [0x55e121ef9ab8, 0x55e121f613a6),
+INFO: Loaded 1 PC tables (424174 PCs): 424174 [0x55e121f613a8,0x55e1225da288),
+INFO: 0 files found in process_message-seeded-from-thin-air/
+INFO: -max_len is not provided; libFuzzer will not generate inputs larger than 4096 bytes
+INFO: A corpus is not provided, starting from an empty corpus
+#2 INITED cov: 94 ft: 95 corp: 1/1b exec/s: 0 rss: 150Mb
+#3 NEW cov: 95 ft: 96 corp: 2/3b lim: 4 exec/s: 0 rss: 150Mb L: 2/2 MS: 1 InsertByte-
+#4 NEW cov: 96 ft: 98 corp: 3/7b lim: 4 exec/s: 0 rss: 150Mb L: 4/4 MS: 1 CrossOver-
+#21 NEW cov: 96 ft: 100 corp: 4/11b lim: 4 exec/s: 0 rss: 150Mb L: 4/4 MS: 2 ChangeBit-CrossOver-
+#324 NEW cov: 101 ft: 105 corp: 5/12b lim: 6 exec/s: 0 rss: 150Mb L: 6/6 MS: 5 CrossOver-ChangeBit-CopyPart-ChangeBit-ChangeBinInt-
+#1239 REDUCE cov: 102 ft: 106 corp: 6/24b lim: 14 exec/s: 0 rss: 150Mb L: 13/13 MS: 5 ChangeBit-CrossOver-EraseBytes-ChangeBit-InsertRepeatedBytes-
+#1272 REDUCE cov: 102 ft: 106 corp: 6/23b lim: 14 exec/s: 0 rss: 150Mb L: 12/12 MS: 3 ChangeBinInt-ChangeBit-EraseBytes-
+ NEW_FUNC[1/677]: 0x55e11f456690 in std::_Function_base::~_Function_base() /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/c++/8/bits/std_function.h:255
+ NEW_FUNC[2/677]: 0x55e11f465800 in CDataStream::CDataStream(std::vector<unsigned char, std::allocator<unsigned char> > const&, int, int) src/./streams.h:248
+#2125 REDUCE cov: 4820 ft: 4867 corp: 7/29b lim: 21 exec/s: 0 rss: 155Mb L: 6/12 MS: 2 CopyPart-CMP- DE: "block"-
+ NEW_FUNC[1/9]: 0x55e11f64d790 in std::_Rb_tree<uint256, std::pair<uint256 const, std::chrono::duration<long, std::ratio<1l, 1000000l> > >, std::_Select1st<std::pair<uint256 const, std::chrono::duration<long, std::ratio<1l, 1000000l> > > >, std::less<uint256>, std::allocator<std::pair<uint256 const, std::chrono::duration<long, std::ratio<1l, 1000000l> > > > >::~_Rb_tree() /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/c++/8/bits/stl_tree.h:972
+ NEW_FUNC[2/9]: 0x55e11f64d870 in std::_Rb_tree<uint256, std::pair<uint256 const, std::chrono::duration<long, std::ratio<1l, 1000000l> > >, std::_Select1st<std::pair<uint256 const, std::chrono::duration<long, std::ratio<1l, 1000000l> > > >, std::less<uint256>, std::allocator<std::pair<uint256 const, std::chrono::duration<long, std::ratio<1l, 1000000l> > > > >::_M_erase(std::_Rb_tree_node<std::pair<uint256 const, std::chrono::duration<long, std::ratio<1l, 1000000l> > > >*) /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/c++/8/bits/stl_tree.h:1875
+#2228 NEW cov: 4898 ft: 4971 corp: 8/35b lim: 21 exec/s: 0 rss: 156Mb L: 6/12 MS: 3 EraseBytes-CopyPart-PersAutoDict- DE: "block"-
+ NEW_FUNC[1/5]: 0x55e11f46df70 in std::enable_if<__and_<std::allocator_traits<zero_after_free_allocator<char> >::__construct_helper<char, unsigned char const&>::type>::value, void>::type std::allocator_traits<zero_after_free_allocator<char> >::_S_construct<char, unsigned char const&>(zero_after_free_allocator<char>&, char*, unsigned char const&) /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/c++/8/bits/alloc_traits.h:243
+ NEW_FUNC[2/5]: 0x55e11f477390 in std::vector<unsigned char, std::allocator<unsigned char> >::data() /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/c++/8/bits/stl_vector.h:1056
+#2456 NEW cov: 4933 ft: 5042 corp: 9/55b lim: 21 exec/s: 0 rss: 160Mb L: 20/20 MS: 3 ChangeByte-InsertRepeatedBytes-PersAutoDict- DE: "block"-
+#2467 NEW cov: 4933 ft: 5043 corp: 10/76b lim: 21 exec/s: 0 rss: 161Mb L: 21/21 MS: 1 InsertByte-
+#4215 NEW cov: 4941 ft: 5129 corp: 17/205b lim: 29 exec/s: 4215 rss: 350Mb L: 29/29 MS: 5 InsertByte-ChangeBit-CopyPart-InsertRepeatedBytes-CrossOver-
+#4567 REDUCE cov: 4941 ft: 5129 corp: 17/204b lim: 29 exec/s: 4567 rss: 404Mb L: 24/29 MS: 2 ChangeByte-EraseBytes-
+#6642 NEW cov: 4941 ft: 5138 corp: 18/244b lim: 43 exec/s: 2214 rss: 450Mb L: 43/43 MS: 3 CopyPart-CMP-CrossOver- DE: "verack"-
+# abort fuzzing using ctrl-c
+$ ls process_message-seeded-from-thin-air/
+349ac589fc66a09abc0b72bb4ae445a7a19e2cd8 4df479f1f421f2ea64b383cd4919a272604087a7
+a640312c98dcc55d6744730c33e41c5168c55f09 b135de16e4709558c0797c15f86046d31c5d86d7
+c000f7b41b05139de8b63f4cbf7d1ad4c6e2aa7f fc52cc00ec1eb1c08470e69f809ae4993fa70082
+$ cat --show-nonprinting process_message-seeded-from-thin-air/349ac589fc66a09abc0b72bb4ae445a7a19e2cd8
+block^@M-^?M-^?M-^?M-^?M-^?nM-^?M-^?
+```
+
+In this case the fuzzer managed to create a `block` message which when passed to `ProcessMessage(...)` increased coverage.
+
+The project's collection of seed corpora is found in the [`bitcoin-core/qa-assets`](https://github.com/bitcoin-core/qa-assets) repo.
+
+To fuzz `process_message` using the [`bitcoin-core/qa-assets`](https://github.com/bitcoin-core/qa-assets) seed corpus:
+
+```sh
+$ git clone https://github.com/bitcoin-core/qa-assets
+$ src/test/fuzz/process_message qa-assets/fuzz_seed_corpus/process_message/
+INFO: Seed: 1346407872
+INFO: Loaded 1 modules (424174 inline 8-bit counters): 424174 [0x55d8a9004ab8, 0x55d8a906c3a6),
+INFO: Loaded 1 PC tables (424174 PCs): 424174 [0x55d8a906c3a8,0x55d8a96e5288),
+INFO: 991 files found in qa-assets/fuzz_seed_corpus/process_message/
+INFO: -max_len is not provided; libFuzzer will not generate inputs larger than 4096 bytes
+INFO: seed corpus: files: 991 min: 1b max: 1858b total: 288291b rss: 150Mb
+#993 INITED cov: 7063 ft: 8236 corp: 25/3821b exec/s: 0 rss: 181Mb
+…
+```
+
+If you find coverage increasing inputs when fuzzing you are highly encouraged to submit them for inclusion in the [`bitcoin-core/qa-assets`](https://github.com/bitcoin-core/qa-assets) repo.
+
+Every single pull request submitted against the Bitcoin Core repo is automatically tested against all inputs in the [`bitcoin-core/qa-assets`](https://github.com/bitcoin-core/qa-assets) repo. Contributing new coverage increasing inputs is an easy way to help make Bitcoin Core more robust.
+
+## macOS hints for libFuzzer
+
+The default Clang/LLVM version supplied by Apple on macOS does not include
fuzzing libraries, so macOS users will need to install a full version, for
example using `brew install llvm`.
@@ -128,11 +96,40 @@ may need to run `./configure` with `--disable-asm` to avoid errors
with certain assembly code from Bitcoin Core's code. See [developer notes on sanitizers](https://github.com/bitcoin/bitcoin/blob/master/doc/developer-notes.md#sanitizers)
for more information.
-You may also need to take care of giving the correct path for clang and
-clang++, like `CC=/path/to/clang CXX=/path/to/clang++` if the non-systems
-clang does not come first in your path.
+You may also need to take care of giving the correct path for `clang` and
+`clang++`, like `CC=/path/to/clang CXX=/path/to/clang++` if the non-systems
+`clang` does not come first in your path.
Full configure that was tested on macOS Catalina with `brew` installed `llvm`:
+
+```sh
+./configure --enable-fuzz --with-sanitizers=fuzzer,address,undefined CC=/usr/local/opt/llvm/bin/clang CXX=/usr/local/opt/llvm/bin/clang++ --disable-asm
```
-./configure --disable-ccache --enable-fuzz --with-sanitizers=fuzzer,address,undefined CC=/usr/local/opt/llvm/bin/clang CXX=/usr/local/opt/llvm/bin/clang++ --disable-asm
+
+Read the [libFuzzer documentation](https://llvm.org/docs/LibFuzzer.html) for more information. This [libFuzzer tutorial](https://github.com/google/fuzzing/blob/master/tutorial/libFuzzerTutorial.md) might also be of interest.
+
+# Fuzzing Bitcoin Core using american fuzzy lop (`afl-fuzz`)
+
+## Quickstart guide
+
+To quickly get started fuzzing Bitcoin Core using [`afl-fuzz`](https://github.com/google/afl):
+
+```sh
+$ git clone https://github.com/bitcoin/bitcoin
+$ cd bitcoin/
+$ git clone https://github.com/google/afl
+$ make -C afl/
+$ make -C afl/llvm_mode/
+$ ./autogen.sh
+$ CC=$(pwd)/afl/afl-clang-fast CXX=$(pwd)/afl/afl-clang-fast++ ./configure --enable-fuzz
+$ make
+# For macOS you may need to ignore x86 compilation checks when running "make". If so,
+# try compiling using: AFL_NO_X86=1 make
+$ mkdir -p inputs/ outputs/
+$ echo A > inputs/thin-air-input
+$ afl/afl-fuzz -i inputs/ -o outputs/ -- src/test/fuzz/bech32
+# You may have to change a few kernel parameters to test optimally - afl-fuzz
+# will print an error and suggestion if so.
```
+
+Read the [`afl-fuzz` documentation](https://github.com/google/afl) for more information.
diff --git a/doc/release-notes-15437.md b/doc/release-notes-15437.md
deleted file mode 100644
index 6614207757..0000000000
--- a/doc/release-notes-15437.md
+++ /dev/null
@@ -1,53 +0,0 @@
-P2P and network changes
------------------------
-
-#### Removal of reject network messages from Bitcoin Core (BIP61)
-
-The command line option to enable BIP61 (`-enablebip61`) has been removed.
-
-This feature has been disabled by default since Bitcoin Core version 0.18.0.
-Nodes on the network can not generally be trusted to send valid ("reject")
-messages, so this should only ever be used when connected to a trusted node.
-Please use the recommended alternatives if you rely on this deprecated feature:
-
-* Testing or debugging of implementations of the Bitcoin P2P network protocol
- should be done by inspecting the log messages that are produced by a recent
- version of Bitcoin Core. Bitcoin Core logs debug messages
- (`-debug=<category>`) to a stream (`-printtoconsole`) or to a file
- (`-debuglogfile=<debug.log>`).
-
-* Testing the validity of a block can be achieved by specific RPCs:
- - `submitblock`
- - `getblocktemplate` with `'mode'` set to `'proposal'` for blocks with
- potentially invalid POW
-
-* Testing the validity of a transaction can be achieved by specific RPCs:
- - `sendrawtransaction`
- - `testmempoolaccept`
-
-* Wallets should not use the absence of "reject" messages to indicate a
- transaction has propagated the network, nor should wallets use "reject"
- messages to set transaction fees. Wallets should rather use fee estimation
- to determine transaction fees and set replace-by-fee if desired. Thus, they
- could wait until the transaction has confirmed (taking into account the fee
- target they set (compare the RPC `estimatesmartfee`)) or listen for the
- transaction announcement by other network peers to check for propagation.
-
-The removal of BIP61 REJECT message support also has the following minor RPC
-and logging implications:
-
-* `testmempoolaccept` and `sendrawtransaction` no longer return the P2P REJECT
- code when a transaction is not accepted to the mempool. They still return the
- verbal reject reason.
-
-* Log messages that previously reported the REJECT code when a transaction was
- not accepted to the mempool now no longer report the REJECT code. The reason
- for rejection is still reported.
-
-Updated RPCs
-------------
-
-- `testmempoolaccept` and `sendrawtransaction` no longer return the P2P REJECT
- code when a transaction is not accepted to the mempool. See the Section
- _Removal of reject network messages from Bitcoin Core (BIP61)_ for details on
- the removal of BIP61 REJECT message support.
diff --git a/doc/release-notes-15954.md b/doc/release-notes-15954.md
deleted file mode 100644
index f4d2c5688c..0000000000
--- a/doc/release-notes-15954.md
+++ /dev/null
@@ -1,4 +0,0 @@
-Configuration option changes
------------------------------
-
-Importing blocks upon startup via the `bootstrap.dat` file no longer occurs by default. The file must now be specified with `-loadblock=<file>`.
diff --git a/doc/release-notes-17056.md b/doc/release-notes-17056.md
deleted file mode 100644
index 23d5a8c8cd..0000000000
--- a/doc/release-notes-17056.md
+++ /dev/null
@@ -1,4 +0,0 @@
-Low-level RPC Changes
-===
-
-- A new descriptor type `sortedmulti(...)` has been added to support multisig scripts where the public keys are sorted lexicographically in the resulting script.
diff --git a/doc/release-notes-17410.md b/doc/release-notes-17410.md
deleted file mode 100644
index 08ed353889..0000000000
--- a/doc/release-notes-17410.md
+++ /dev/null
@@ -1,5 +0,0 @@
-Command-line options
---------------------
-
-- The `-debug=db` logging category has been renamed to `-debug=walletdb`, to distinguish it from `coindb`.
- `-debug=db` has been deprecated and will be removed in the next major release.
diff --git a/doc/release-notes-17437.md b/doc/release-notes-17437.md
deleted file mode 100644
index 3edfd00a38..0000000000
--- a/doc/release-notes-17437.md
+++ /dev/null
@@ -1,5 +0,0 @@
-Low-level RPC Changes
-===
-
-- The RPC gettransaction, listtransactions and listsinceblock responses now also
-includes the height of the block that contains the wallet transaction, if any.
diff --git a/doc/release-notes-17578.md b/doc/release-notes-17578.md
deleted file mode 100644
index 664d17fd78..0000000000
--- a/doc/release-notes-17578.md
+++ /dev/null
@@ -1,13 +0,0 @@
-Deprecated or removed RPCs
---------------------------
-
-- RPC `getaddressinfo` changes:
-
- - the `label` field has been deprecated in favor of the `labels` field and
- will be removed in 0.21. It can be re-enabled in the interim by launching
- with `-deprecatedrpc=label`.
-
- - the `labels` behavior of returning an array of JSON objects containing name
- and purpose key/value pairs has been deprecated in favor of an array of
- label names and will be removed in 0.21. The previous behavior can be
- re-enabled in the interim by launching with `-deprecatedrpc=labelspurpose`.
diff --git a/doc/release-notes.md b/doc/release-notes.md
index 99ca53c597..cd6a4d6b59 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -1,132 +1,5 @@
-*After branching off for a major version release of Bitcoin Core, use this
-template to create the initial release notes draft.*
+Please edit the release notes here:
-*The release notes draft is a temporary file that can be added to by anyone. See
-[/doc/developer-notes.md#release-notes](/doc/developer-notes.md#release-notes)
-for the process.*
-
-*Create the draft, named* "*version* Release Notes Draft"
-*(e.g. "0.20.0 Release Notes Draft"), as a collaborative wiki in:*
-
-https://github.com/bitcoin-core/bitcoin-devwiki/wiki/
+https://github.com/bitcoin-core/bitcoin-devwiki/wiki/0.20.0-Release-Notes-Draft
*Before the final release, move the notes back to this git repository.*
-
-*version* Release Notes Draft
-===============================
-
-Bitcoin Core version *version* is now available from:
-
- <https://bitcoincore.org/bin/bitcoin-core-*version*/>
-
-This release includes new features, various bug fixes and performance
-improvements, as well as updated translations.
-
-Please report bugs using the issue tracker at GitHub:
-
- <https://github.com/bitcoin/bitcoin/issues>
-
-To receive security and update notifications, please subscribe to:
-
- <https://bitcoincore.org/en/list/announcements/join/>
-
-How to Upgrade
-==============
-
-If you are running an older version, shut it down. Wait until it has completely
-shut down (which might take a few minutes for older versions), then run the
-installer (on Windows) or just copy over `/Applications/Bitcoin-Qt` (on Mac)
-or `bitcoind`/`bitcoin-qt` (on Linux).
-
-Upgrading directly from a version of Bitcoin Core that has reached its EOL is
-possible, but it might take some time if the datadir needs to be migrated. Old
-wallet versions of Bitcoin Core are generally supported.
-
-Compatibility
-==============
-
-Bitcoin Core is supported and extensively tested on operating systems using
-the Linux kernel, macOS 10.12+, and Windows 7 and newer. It is not recommended
-to use Bitcoin Core on unsupported systems.
-
-Bitcoin Core should also work on most other Unix-like systems but is not
-as frequently tested on them.
-
-From Bitcoin Core 0.20.0 onwards, macOS versions earlier than 10.12 are no
-longer supported. Additionally, Bitcoin Core does not yet change appearance
-when macOS "dark mode" is activated.
-
-In addition to previously supported CPU platforms, this release's pre-compiled
-distribution provides binaries for the RISC-V platform.
-
-Notable changes
-===============
-
-Build System
-------------
-
-- OpenSSL is no longer used by Bitcoin Core. The last usage of the library
-was removed in #17265.
-
-- glibc 2.17 or greater is now required to run the release binaries. This
-retains compatibility with RHEL 7, CentOS 7, Debian 8 and Ubuntu 14.04 LTS.
-Further details can be found in #17538.
-
-New RPCs
---------
-
-New settings
-------------
-
-- RPC Whitelist system. It can give certain RPC users permissions to only some RPC calls.
-It can be set with two command line arguments (`rpcwhitelist` and `rpcwhitelistdefault`). (#12763)
-
-Updated settings
-----------------
-
-Updated RPCs
-------------
-
-Note: some low-level RPC changes mainly useful for testing are described in the
-Low-level Changes section below.
-
-GUI changes
------------
-
-- The "Start Bitcoin Core on system login" option has been removed on macOS.
-
-Wallet
-------
-
-- The wallet now by default uses bech32 addresses when using RPC, and creates native segwit change outputs.
-- The way that output trust was computed has been fixed in #16766, which impacts confirmed/unconfirmed balance status and coin selection.
-
-Low-level changes
-=================
-
-Command line
-------------
-
-Command line options prefixed with main/test/regtest network names like
-`-main.port=8333` `-test.server=1` previously were allowed but ignored. Now
-they trigger "Invalid parameter" errors on startup.
-
-Tests
------
-
-- It is now an error to use an unqualified `walletdir=path` setting in the config file if running on testnet or regtest
- networks. The setting now needs to be qualified as `chain.walletdir=path` or placed in the appropriate `[chain]`
- section. (#17447)
-
-- `-fallbackfee` was 0 (disabled) by default for the main chain, but 0.0002 by default for the test chains. Now it is 0
- by default for all chains. Testnet and regtest users will have to add `fallbackfee=0.0002` to their configuration if
- they weren't setting it and they want it to keep working like before. (#16524)
-
-Credits
-=======
-
-Thanks to everyone who directly contributed to this release:
-
-
-As well as to everyone that helped with translations on
-[Transifex](https://www.transifex.com/bitcoin/bitcoin/).
diff --git a/doc/release-notes/release-notes-0.19.1.md b/doc/release-notes/release-notes-0.19.1.md
new file mode 100644
index 0000000000..5746bebb0d
--- /dev/null
+++ b/doc/release-notes/release-notes-0.19.1.md
@@ -0,0 +1,115 @@
+0.19.1 Release Notes
+===============================
+
+Bitcoin Core version 0.19.1 is now available from:
+
+ <https://bitcoincore.org/bin/bitcoin-core-0.19.1/>
+
+This minor release includes various bug fixes and performance
+improvements, as well as updated translations.
+
+Please report bugs using the issue tracker at GitHub:
+
+ <https://github.com/bitcoin/bitcoin/issues>
+
+To receive security and update notifications, please subscribe to:
+
+ <https://bitcoincore.org/en/list/announcements/join/>
+
+How to Upgrade
+==============
+
+If you are running an older version, shut it down. Wait until it has completely
+shut down (which might take a few minutes for older versions), then run the
+installer (on Windows) or just copy over `/Applications/Bitcoin-Qt` (on Mac)
+or `bitcoind`/`bitcoin-qt` (on Linux).
+
+Upgrading directly from a version of Bitcoin Core that has reached its EOL is
+possible, but it might take some time if the datadir needs to be migrated. Old
+wallet versions of Bitcoin Core are generally supported.
+
+Compatibility
+==============
+
+Bitcoin Core is supported and extensively tested on operating systems using
+the Linux kernel, macOS 10.10+, and Windows 7 and newer. It is not recommended
+to use Bitcoin Core on unsupported systems.
+
+Bitcoin Core should also work on most other Unix-like systems but is not
+as frequently tested on them.
+
+From Bitcoin Core 0.17.0 onwards, macOS versions earlier than 10.10 are no
+longer supported, as Bitcoin Core is now built using Qt 5.9.x which requires
+macOS 10.10+. Additionally, Bitcoin Core does not yet change appearance when
+macOS "dark mode" is activated.
+
+In addition to previously supported CPU platforms, this release's pre-compiled
+distribution provides binaries for the RISC-V platform.
+
+0.19.1 change log
+=================
+
+### Wallet
+- #17643 Fix origfee return for bumpfee with feerate arg (instagibbs)
+- #16963 Fix `unique_ptr` usage in boost::signals2 (promag)
+- #17258 Fix issue with conflicted mempool tx in listsinceblock (adamjonas, mchrostowski)
+- #17924 Bug: IsUsedDestination shouldn't use key id as script id for ScriptHash (instagibbs)
+- #17621 IsUsedDestination should count any known single-key address (instagibbs)
+- #17843 Reset reused transactions cache (fjahr)
+
+### RPC and other APIs
+- #17687 cli: Fix fatal leveldb error when specifying -blockfilterindex=basic twice (brakmic)
+- #17728 require second argument only for scantxoutset start action (achow101)
+- #17445 zmq: Fix due to invalid argument and multiple notifiers (promag)
+- #17524 psbt: handle unspendable psbts (achow101)
+- #17156 psbt: check that various indexes and amounts are within bounds (achow101)
+
+### GUI
+- #17427 Fix missing qRegisterMetaType for `size_t` (hebasto)
+- #17695 disable File-\>CreateWallet during startup (fanquake)
+- #17634 Fix comparison function signature (hebasto)
+- #18062 Fix unintialized WalletView::progressDialog (promag)
+
+### Tests and QA
+- #17416 Appveyor improvement - text file for vcpkg package list (sipsorcery)
+- #17488 fix "bitcoind already running" warnings on macOS (fanquake)
+- #17980 add missing #include to fix compiler errors (kallewoof)
+
+### Platform support
+- #17736 Update msvc build for Visual Studio 2019 v16.4 (sipsorcery)
+- #17364 Updates to appveyor config for VS2019 and Qt5.9.8 + msvc project fixes (sipsorcery)
+- #17887 bug-fix macos: give free bytes to `F_PREALLOCATE` (kallewoof)
+
+### Miscellaneous
+- #17897 init: Stop indexes on shutdown after ChainStateFlushed callback (jimpo)
+- #17450 util: Add missing headers to util/fees.cpp (hebasto)
+- #17654 Unbreak build with Boost 1.72.0 (jbeich)
+- #17857 scripts: Fix symbol-check & security-check argument passing (fanquake)
+- #17762 Log to net category for exceptions in ProcessMessages (laanwj)
+- #18100 Update univalue subtree (MarcoFalke)
+
+Credits
+=======
+
+Thanks to everyone who directly contributed to this release:
+
+- Aaron Clauson
+- Adam Jonas
+- Andrew Chow
+- Fabian Jahr
+- fanquake
+- Gregory Sanders
+- Harris
+- Hennadii Stepanov
+- Jan Beich
+- Jim Posen
+- João Barbosa
+- Karl-Johan Alm
+- Luke Dashjr
+- MarcoFalke
+- Michael Chrostowski
+- Russell Yanofsky
+- Wladimir J. van der Laan
+
+As well as to everyone that helped with translations on
+[Transifex](https://www.transifex.com/bitcoin/bitcoin/).
diff --git a/doc/release-process.md b/doc/release-process.md
index 1ffef3e106..e0f29f6ad7 100644
--- a/doc/release-process.md
+++ b/doc/release-process.md
@@ -121,7 +121,7 @@ Ensure gitian-builder is up-to-date:
echo '5a60e0a4b3e0b4d655317b2f12a810211c50242138322b16e7e01c6fbb89d92f inputs/osslsigncode-2.0.tar.gz' | sha256sum -c
popd
-Create the macOS SDK tarball, see the [macOS build instructions](build-osx.md#deterministic-macos-dmg-notes) for details, and copy it into the inputs directory.
+Create the macOS SDK tarball, see the [macdeploy instructions](/contrib/macdeploy/README.md#deterministic-macos-dmg-notes) for details, and copy it into the inputs directory.
### Optional: Seed the Gitian sources cache and offline git repositories
@@ -268,7 +268,6 @@ The list of files should be:
```
bitcoin-${VERSION}-aarch64-linux-gnu.tar.gz
bitcoin-${VERSION}-arm-linux-gnueabihf.tar.gz
-bitcoin-${VERSION}-i686-pc-linux-gnu.tar.gz
bitcoin-${VERSION}-riscv64-linux-gnu.tar.gz
bitcoin-${VERSION}-x86_64-linux-gnu.tar.gz
bitcoin-${VERSION}-osx64.tar.gz
@@ -329,8 +328,6 @@ bitcoin.org (see below for bitcoin.org update instructions).
- Update packaging repo
- - Notify BlueMatt so that he can start building [the PPAs](https://launchpad.net/~bitcoin/+archive/ubuntu/bitcoin)
-
- Push the flatpak to flathub, e.g. https://github.com/flathub/org.bitcoincore.bitcoin-qt/pull/2
- Push the latest version to master (if applicable), e.g. https://github.com/bitcoin-core/packaging/pull/32
diff --git a/src/Makefile.am b/src/Makefile.am
index e58a89ca03..8c927f330b 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -178,7 +178,6 @@ BITCOIN_CORE_H = \
random.h \
randomenv.h \
reverse_iterator.h \
- reverselock.h \
rpc/blockchain.h \
rpc/client.h \
rpc/protocol.h \
@@ -220,6 +219,7 @@ BITCOIN_CORE_H = \
util/system.h \
util/macros.h \
util/memory.h \
+ util/message.h \
util/moneystr.h \
util/rbf.h \
util/settings.h \
@@ -228,7 +228,6 @@ BITCOIN_CORE_H = \
util/time.h \
util/translation.h \
util/url.h \
- util/validation.h \
util/vector.h \
validation.h \
validationinterface.h \
@@ -242,7 +241,6 @@ BITCOIN_CORE_H = \
wallet/fees.h \
wallet/ismine.h \
wallet/load.h \
- wallet/psbtwallet.h \
wallet/rpcwallet.h \
wallet/scriptpubkeyman.h \
wallet/wallet.h \
@@ -350,7 +348,6 @@ libbitcoin_wallet_a_SOURCES = \
wallet/feebumper.cpp \
wallet/fees.cpp \
wallet/load.cpp \
- wallet/psbtwallet.cpp \
wallet/rpcdump.cpp \
wallet/rpcwallet.cpp \
wallet/scriptpubkeyman.cpp \
@@ -517,6 +514,7 @@ libbitcoin_util_a_SOURCES = \
util/error.cpp \
util/fees.cpp \
util/system.cpp \
+ util/message.cpp \
util/moneystr.cpp \
util/rbf.cpp \
util/settings.cpp \
@@ -526,7 +524,6 @@ libbitcoin_util_a_SOURCES = \
util/string.cpp \
util/time.cpp \
util/url.cpp \
- util/validation.cpp \
$(BITCOIN_CORE_H)
if GLIBC_BACK_COMPAT
@@ -706,6 +703,11 @@ if TARGET_DARWIN
$(AM_V_at) OTOOL=$(OTOOL) $(PYTHON) $(top_srcdir)/contrib/devtools/symbol-check.py $(bin_PROGRAMS)
endif
+if TARGET_WINDOWS
+ @echo "Checking Windows dynamic libraries..."
+ $(AM_V_at) OBJDUMP=$(OBJDUMP) $(PYTHON) $(top_srcdir)/contrib/devtools/symbol-check.py $(bin_PROGRAMS)
+endif
+
if GLIBC_BACK_COMPAT
@echo "Checking glibc back compat..."
$(AM_V_at) READELF=$(READELF) CPPFILT=$(CPPFILT) $(PYTHON) $(top_srcdir)/contrib/devtools/symbol-check.py $(bin_PROGRAMS)
@@ -718,6 +720,7 @@ if HARDEN
endif
if EMBEDDED_LEVELDB
+include Makefile.crc32c.include
include Makefile.leveldb.include
endif
diff --git a/src/Makefile.bench.include b/src/Makefile.bench.include
index 1c97e22de8..eae8b1fcd1 100644
--- a/src/Makefile.bench.include
+++ b/src/Makefile.bench.include
@@ -43,12 +43,11 @@ bench_bench_bitcoin_SOURCES = \
nodist_bench_bench_bitcoin_SOURCES = $(GENERATED_BENCH_FILES)
-bench_bench_bitcoin_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) $(EVENT_CLFAGS) $(EVENT_PTHREADS_CFLAGS) -I$(builddir)/bench/
+bench_bench_bitcoin_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) $(EVENT_CFLAGS) $(EVENT_PTHREADS_CFLAGS) -I$(builddir)/bench/
bench_bench_bitcoin_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
bench_bench_bitcoin_LDADD = \
$(LIBBITCOIN_SERVER) \
$(LIBBITCOIN_WALLET) \
- $(LIBBITCOIN_SERVER) \
$(LIBBITCOIN_COMMON) \
$(LIBBITCOIN_UTIL) \
$(LIBBITCOIN_CONSENSUS) \
diff --git a/src/Makefile.crc32c.include b/src/Makefile.crc32c.include
new file mode 100644
index 0000000000..802b3a2e4b
--- /dev/null
+++ b/src/Makefile.crc32c.include
@@ -0,0 +1,75 @@
+# Copyright (c) 2019 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+LIBCRC32C_INT = crc32c/libcrc32c.a
+LIBLEVELDB_SSE42_INT = leveldb/libleveldb_sse42.a
+
+EXTRA_LIBRARIES += $(LIBCRC32C_INT)
+
+LIBCRC32C = $(LIBCRC32C_INT)
+
+CRC32C_CPPFLAGS_INT =
+CRC32C_CPPFLAGS_INT += -I$(srcdir)/crc32c/include
+CRC32C_CPPFLAGS_INT += -DHAVE_BUILTIN_PREFETCH=@HAVE_BUILTIN_PREFETCH@
+CRC32C_CPPFLAGS_INT += -DHAVE_MM_PREFETCH=@HAVE_MM_PREFETCH@
+CRC32C_CPPFLAGS_INT += -DHAVE_STRONG_GETAUXVAL=@HAVE_STRONG_GETAUXVAL@
+CRC32C_CPPFLAGS_INT += -DHAVE_WEAK_GETAUXVAL=@HAVE_WEAK_GETAUXVAL@
+CRC32C_CPPFLAGS_INT += -DCRC32C_TESTS_BUILT_WITH_GLOG=0
+
+if ENABLE_SSE42
+CRC32C_CPPFLAGS_INT += -DHAVE_SSE42=1
+else
+CRC32C_CPPFLAGS_INT += -DHAVE_SSE42=0
+endif
+
+if ENABLE_ARM_CRC
+CRC32C_CPPFLAGS_INT += -DHAVE_ARM64_CRC32C=1
+else
+CRC32C_CPPFLAGS_INT += -DHAVE_ARM64_CRC32C=0
+endif
+
+if WORDS_BIGENDIAN
+CRC32C_CPPFLAGS_INT += -DBYTE_ORDER_BIG_ENDIAN=1
+else
+CRC32C_CPPFLAGS_INT += -DBYTE_ORDER_BIG_ENDIAN=0
+endif
+
+crc32c_libcrc32c_a_CPPFLAGS = $(AM_CPPFLAGS) $(CRC32C_CPPFLAGS_INT) $(CRC32C_CPPFLAGS)
+crc32c_libcrc32c_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+
+crc32c_libcrc32c_a_SOURCES =
+crc32c_libcrc32c_a_SOURCES += crc32c/include/crc32c/crc32c.h
+crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_arm64.h
+crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_arm64_linux_check.h
+crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_internal.h
+crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_prefetch.h
+crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_read_le.h
+crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_round_up.h
+crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_sse42_check.h
+crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_sse42.h
+
+crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c.cc
+crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_portable.cc
+
+if ENABLE_SSE42
+LIBCRC32C_SSE42_INT = crc32c/libcrc32c_sse42.a
+EXTRA_LIBRARIES += $(LIBCRC32C_SSE42_INT)
+LIBCRC32C += $(LIBCRC32C_SSE42_INT)
+
+crc32c_libcrc32c_sse42_a_CPPFLAGS = $(crc32c_libcrc32c_a_CPPFLAGS)
+crc32c_libcrc32c_sse42_a_CXXFLAGS = $(crc32c_libcrc32c_a_CXXFLAGS) $(SSE42_CXXFLAGS)
+
+crc32c_libcrc32c_sse42_a_SOURCES = crc32c/src/crc32c_sse42.cc
+endif
+
+if ENABLE_ARM_CRC
+LIBCRC32C_ARM_CRC_INT = crc32c/libcrc32c_arm_crc.a
+EXTRA_LIBRARIES += $(LIBCRC32C_ARM_CRC_INT)
+LIBCRC32C += $(LIBCRC32C_ARM_CRC_INT)
+
+crc32c_libcrc32c_arm_crc_a_CPPFLAGS = $(crc32c_libcrc32c_a_CPPFLAGS)
+crc32c_libcrc32c_arm_crc_a_CXXFLAGS = $(crc32c_libcrc32c_a_CXXFLAGS) $(ARM_CRC_CXXFLAGS)
+
+crc32c_libcrc32c_arm_crc_a_SOURCES = crc32c/src/crc32c_arm64.cc
+endif
diff --git a/src/Makefile.leveldb.include b/src/Makefile.leveldb.include
index bd08bcb4ed..04b53471e4 100644
--- a/src/Makefile.leveldb.include
+++ b/src/Makefile.leveldb.include
@@ -4,27 +4,33 @@
LIBLEVELDB_INT = leveldb/libleveldb.a
LIBMEMENV_INT = leveldb/libmemenv.a
-LIBLEVELDB_SSE42_INT = leveldb/libleveldb_sse42.a
EXTRA_LIBRARIES += $(LIBLEVELDB_INT)
EXTRA_LIBRARIES += $(LIBMEMENV_INT)
-EXTRA_LIBRARIES += $(LIBLEVELDB_SSE42_INT)
-LIBLEVELDB += $(LIBLEVELDB_INT)
+LIBLEVELDB += $(LIBLEVELDB_INT) $(LIBCRC32C)
LIBMEMENV += $(LIBMEMENV_INT)
-LIBLEVELDB_SSE42 = $(LIBLEVELDB_SSE42_INT)
LEVELDB_CPPFLAGS += -I$(srcdir)/leveldb/include
LEVELDB_CPPFLAGS += -I$(srcdir)/leveldb/helpers/memenv
LEVELDB_CPPFLAGS_INT =
LEVELDB_CPPFLAGS_INT += -I$(srcdir)/leveldb
-LEVELDB_CPPFLAGS_INT += $(LEVELDB_TARGET_FLAGS)
-LEVELDB_CPPFLAGS_INT += -DLEVELDB_ATOMIC_PRESENT
+LEVELDB_CPPFLAGS_INT += -I$(srcdir)/crc32c/include
LEVELDB_CPPFLAGS_INT += -D__STDC_LIMIT_MACROS
+LEVELDB_CPPFLAGS_INT += -DHAVE_SNAPPY=0 -DHAVE_CRC32C=1
+LEVELDB_CPPFLAGS_INT += -DHAVE_FDATASYNC=@HAVE_FDATASYNC@
+LEVELDB_CPPFLAGS_INT += -DHAVE_FULLFSYNC=@HAVE_FULLFSYNC@
+LEVELDB_CPPFLAGS_INT += -DHAVE_O_CLOEXEC=@HAVE_O_CLOEXEC@
+
+if WORDS_BIGENDIAN
+LEVELDB_CPPFLAGS_INT += -DLEVELDB_IS_BIG_ENDIAN=1
+else
+LEVELDB_CPPFLAGS_INT += -DLEVELDB_IS_BIG_ENDIAN=0
+endif
if TARGET_WINDOWS
-LEVELDB_CPPFLAGS_INT += -DLEVELDB_PLATFORM_WINDOWS -D__USE_MINGW_ANSI_STDIO=1
+LEVELDB_CPPFLAGS_INT += -DLEVELDB_PLATFORM_WINDOWS -D_UNICODE -DUNICODE -D__USE_MINGW_ANSI_STDIO=1
else
LEVELDB_CPPFLAGS_INT += -DLEVELDB_PLATFORM_POSIX
endif
@@ -33,12 +39,8 @@ leveldb_libleveldb_a_CPPFLAGS = $(AM_CPPFLAGS) $(LEVELDB_CPPFLAGS_INT) $(LEVELDB
leveldb_libleveldb_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
leveldb_libleveldb_a_SOURCES=
-leveldb_libleveldb_a_SOURCES += leveldb/port/atomic_pointer.h
-leveldb_libleveldb_a_SOURCES += leveldb/port/port_example.h
-leveldb_libleveldb_a_SOURCES += leveldb/port/port_posix.h
-leveldb_libleveldb_a_SOURCES += leveldb/port/win/stdint.h
+leveldb_libleveldb_a_SOURCES += leveldb/port/port_stdcxx.h
leveldb_libleveldb_a_SOURCES += leveldb/port/port.h
-leveldb_libleveldb_a_SOURCES += leveldb/port/port_win.h
leveldb_libleveldb_a_SOURCES += leveldb/port/thread_annotations.h
leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/db.h
leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/options.h
@@ -47,6 +49,7 @@ leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/filter_policy.h
leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/slice.h
leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/table_builder.h
leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/env.h
+leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/export.h
leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/c.h
leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/iterator.h
leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/cache.h
@@ -78,6 +81,7 @@ leveldb_libleveldb_a_SOURCES += leveldb/table/format.h
leveldb_libleveldb_a_SOURCES += leveldb/table/iterator_wrapper.h
leveldb_libleveldb_a_SOURCES += leveldb/util/crc32c.h
leveldb_libleveldb_a_SOURCES += leveldb/util/env_posix_test_helper.h
+leveldb_libleveldb_a_SOURCES += leveldb/util/env_windows_test_helper.h
leveldb_libleveldb_a_SOURCES += leveldb/util/arena.h
leveldb_libleveldb_a_SOURCES += leveldb/util/random.h
leveldb_libleveldb_a_SOURCES += leveldb/util/posix_logger.h
@@ -87,7 +91,9 @@ leveldb_libleveldb_a_SOURCES += leveldb/util/coding.h
leveldb_libleveldb_a_SOURCES += leveldb/util/testutil.h
leveldb_libleveldb_a_SOURCES += leveldb/util/mutexlock.h
leveldb_libleveldb_a_SOURCES += leveldb/util/logging.h
+leveldb_libleveldb_a_SOURCES += leveldb/util/no_destructor.h
leveldb_libleveldb_a_SOURCES += leveldb/util/testharness.h
+leveldb_libleveldb_a_SOURCES += leveldb/util/windows_logger.h
leveldb_libleveldb_a_SOURCES += leveldb/db/builder.cc
leveldb_libleveldb_a_SOURCES += leveldb/db/c.cc
@@ -120,7 +126,6 @@ leveldb_libleveldb_a_SOURCES += leveldb/util/coding.cc
leveldb_libleveldb_a_SOURCES += leveldb/util/comparator.cc
leveldb_libleveldb_a_SOURCES += leveldb/util/crc32c.cc
leveldb_libleveldb_a_SOURCES += leveldb/util/env.cc
-leveldb_libleveldb_a_SOURCES += leveldb/util/env_posix.cc
leveldb_libleveldb_a_SOURCES += leveldb/util/filter_policy.cc
leveldb_libleveldb_a_SOURCES += leveldb/util/hash.cc
leveldb_libleveldb_a_SOURCES += leveldb/util/histogram.cc
@@ -129,21 +134,12 @@ leveldb_libleveldb_a_SOURCES += leveldb/util/options.cc
leveldb_libleveldb_a_SOURCES += leveldb/util/status.cc
if TARGET_WINDOWS
-leveldb_libleveldb_a_SOURCES += leveldb/util/env_win.cc
-leveldb_libleveldb_a_SOURCES += leveldb/port/port_win.cc
+leveldb_libleveldb_a_SOURCES += leveldb/util/env_windows.cc
else
-leveldb_libleveldb_a_SOURCES += leveldb/port/port_posix.cc
+leveldb_libleveldb_a_SOURCES += leveldb/util/env_posix.cc
endif
leveldb_libmemenv_a_CPPFLAGS = $(leveldb_libleveldb_a_CPPFLAGS)
leveldb_libmemenv_a_CXXFLAGS = $(leveldb_libleveldb_a_CXXFLAGS)
leveldb_libmemenv_a_SOURCES = leveldb/helpers/memenv/memenv.cc
leveldb_libmemenv_a_SOURCES += leveldb/helpers/memenv/memenv.h
-
-leveldb_libleveldb_sse42_a_CPPFLAGS = $(leveldb_libleveldb_a_CPPFLAGS)
-leveldb_libleveldb_sse42_a_CXXFLAGS = $(leveldb_libleveldb_a_CXXFLAGS)
-if ENABLE_HWCRC32
-leveldb_libleveldb_sse42_a_CPPFLAGS += -DLEVELDB_PLATFORM_POSIX_SSE
-leveldb_libleveldb_sse42_a_CXXFLAGS += $(SSE42_CXXFLAGS)
-endif
-leveldb_libleveldb_sse42_a_SOURCES = leveldb/port/port_posix_sse.cc
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index c76f30de8e..2938ccdc9f 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -2,9 +2,9 @@
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
FUZZ_TARGETS = \
test/fuzz/addr_info_deserialize \
+ test/fuzz/addrdb \
test/fuzz/address_deserialize \
test/fuzz/addrman_deserialize \
test/fuzz/asmap \
@@ -15,29 +15,42 @@ FUZZ_TARGETS = \
test/fuzz/block_deserialize \
test/fuzz/block_file_info_deserialize \
test/fuzz/block_filter_deserialize \
+ test/fuzz/block_header \
test/fuzz/block_header_and_short_txids_deserialize \
+ test/fuzz/blockfilter \
test/fuzz/blockheader_deserialize \
test/fuzz/blocklocator_deserialize \
test/fuzz/blockmerkleroot \
test/fuzz/blocktransactions_deserialize \
test/fuzz/blocktransactionsrequest_deserialize \
test/fuzz/blockundo_deserialize \
+ test/fuzz/bloom_filter \
test/fuzz/bloomfilter_deserialize \
+ test/fuzz/chain \
test/fuzz/coins_deserialize \
test/fuzz/decode_tx \
test/fuzz/descriptor_parse \
test/fuzz/diskblockindex_deserialize \
test/fuzz/eval_script \
+ test/fuzz/fee_rate \
test/fuzz/fee_rate_deserialize \
test/fuzz/flat_file_pos_deserialize \
+ test/fuzz/float \
test/fuzz/hex \
test/fuzz/integer \
test/fuzz/inv_deserialize \
+ test/fuzz/key \
+ test/fuzz/key_io \
test/fuzz/key_origin_info_deserialize \
+ test/fuzz/locale \
test/fuzz/merkle_block_deserialize \
test/fuzz/messageheader_deserialize \
+ test/fuzz/multiplication_overflow \
+ test/fuzz/net_permissions \
test/fuzz/netaddr_deserialize \
+ test/fuzz/netaddress \
test/fuzz/out_point_deserialize \
+ test/fuzz/p2p_transport_deserializer \
test/fuzz/parse_hd_keypath \
test/fuzz/parse_iso8601 \
test/fuzz/parse_numbers \
@@ -46,23 +59,58 @@ FUZZ_TARGETS = \
test/fuzz/partial_merkle_tree_deserialize \
test/fuzz/partially_signed_transaction_deserialize \
test/fuzz/prefilled_transaction_deserialize \
+ test/fuzz/process_message \
+ test/fuzz/process_message_addr \
+ test/fuzz/process_message_block \
+ test/fuzz/process_message_blocktxn \
+ test/fuzz/process_message_cmpctblock \
+ test/fuzz/process_message_feefilter \
+ test/fuzz/process_message_filteradd \
+ test/fuzz/process_message_filterclear \
+ test/fuzz/process_message_filterload \
+ test/fuzz/process_message_getaddr \
+ test/fuzz/process_message_getblocks \
+ test/fuzz/process_message_getblocktxn \
+ test/fuzz/process_message_getdata \
+ test/fuzz/process_message_getheaders \
+ test/fuzz/process_message_headers \
+ test/fuzz/process_message_inv \
+ test/fuzz/process_message_mempool \
+ test/fuzz/process_message_notfound \
+ test/fuzz/process_message_ping \
+ test/fuzz/process_message_pong \
+ test/fuzz/process_message_sendcmpct \
+ test/fuzz/process_message_sendheaders \
+ test/fuzz/process_message_tx \
+ test/fuzz/process_message_verack \
+ test/fuzz/process_message_version \
+ test/fuzz/protocol \
test/fuzz/psbt \
test/fuzz/psbt_input_deserialize \
test/fuzz/psbt_output_deserialize \
test/fuzz/pub_key_deserialize \
+ test/fuzz/rolling_bloom_filter \
test/fuzz/script \
test/fuzz/script_deserialize \
test/fuzz/script_flags \
+ test/fuzz/script_ops \
+ test/fuzz/scriptnum_ops \
test/fuzz/service_deserialize \
+ test/fuzz/signature_checker \
+ test/fuzz/snapshotmetadata_deserialize \
test/fuzz/spanparsing \
+ test/fuzz/string \
test/fuzz/strprintf \
test/fuzz/sub_net_deserialize \
+ test/fuzz/timedata \
test/fuzz/transaction \
test/fuzz/tx_in \
test/fuzz/tx_in_deserialize \
test/fuzz/tx_out \
test/fuzz/txoutcompressor_deserialize \
- test/fuzz/txundo_deserialize
+ test/fuzz/txundo_deserialize \
+ test/fuzz/uint160_deserialize \
+ test/fuzz/uint256_deserialize
if ENABLE_FUZZ
noinst_PROGRAMS += $(FUZZ_TARGETS:=)
@@ -96,7 +144,8 @@ BITCOIN_TEST_SUITE = \
FUZZ_SUITE = \
test/fuzz/fuzz.cpp \
test/fuzz/fuzz.h \
- test/fuzz/FuzzedDataProvider.h
+ test/fuzz/FuzzedDataProvider.h \
+ test/fuzz/util.h
FUZZ_SUITE_LD_COMMON = \
$(LIBBITCOIN_SERVER) \
@@ -209,7 +258,8 @@ BITCOIN_TESTS += \
wallet/test/wallet_crypto_tests.cpp \
wallet/test/coinselector_tests.cpp \
wallet/test/init_tests.cpp \
- wallet/test/ismine_tests.cpp
+ wallet/test/ismine_tests.cpp \
+ wallet/test/scriptpubkeyman_tests.cpp
BITCOIN_TEST_SUITE += \
wallet/test/wallet_test_fixture.cpp \
@@ -244,6 +294,12 @@ test_fuzz_addr_info_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
test_fuzz_addr_info_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_addr_info_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+test_fuzz_addrdb_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_addrdb_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_addrdb_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_addrdb_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_addrdb_SOURCES = $(FUZZ_SUITE) test/fuzz/addrdb.cpp
+
test_fuzz_address_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DADDRESS_DESERIALIZE=1
test_fuzz_address_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
test_fuzz_address_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
@@ -304,12 +360,24 @@ test_fuzz_block_filter_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
test_fuzz_block_filter_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_block_filter_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+test_fuzz_block_header_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_block_header_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_block_header_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_block_header_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_block_header_SOURCES = $(FUZZ_SUITE) test/fuzz/block_header.cpp
+
test_fuzz_block_header_and_short_txids_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBLOCK_HEADER_AND_SHORT_TXIDS_DESERIALIZE=1
test_fuzz_block_header_and_short_txids_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
test_fuzz_block_header_and_short_txids_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
test_fuzz_block_header_and_short_txids_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_block_header_and_short_txids_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+test_fuzz_blockfilter_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_blockfilter_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_blockfilter_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_blockfilter_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_blockfilter_SOURCES = $(FUZZ_SUITE) test/fuzz/blockfilter.cpp
+
test_fuzz_blockheader_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBLOCKHEADER_DESERIALIZE=1
test_fuzz_blockheader_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
test_fuzz_blockheader_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
@@ -346,12 +414,24 @@ test_fuzz_blockundo_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
test_fuzz_blockundo_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_blockundo_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+test_fuzz_bloom_filter_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_bloom_filter_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_bloom_filter_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_bloom_filter_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_bloom_filter_SOURCES = $(FUZZ_SUITE) test/fuzz/bloom_filter.cpp
+
test_fuzz_bloomfilter_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBLOOMFILTER_DESERIALIZE=1
test_fuzz_bloomfilter_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
test_fuzz_bloomfilter_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
test_fuzz_bloomfilter_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_bloomfilter_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+test_fuzz_chain_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_chain_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_chain_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_chain_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_chain_SOURCES = $(FUZZ_SUITE) test/fuzz/chain.cpp
+
test_fuzz_coins_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DCOINS_DESERIALIZE=1
test_fuzz_coins_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
test_fuzz_coins_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
@@ -382,6 +462,12 @@ test_fuzz_eval_script_LDADD = $(FUZZ_SUITE_LD_COMMON)
test_fuzz_eval_script_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_eval_script_SOURCES = $(FUZZ_SUITE) test/fuzz/eval_script.cpp
+test_fuzz_fee_rate_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_fee_rate_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_fee_rate_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_fee_rate_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_fee_rate_SOURCES = $(FUZZ_SUITE) test/fuzz/fee_rate.cpp
+
test_fuzz_fee_rate_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DFEE_RATE_DESERIALIZE=1
test_fuzz_fee_rate_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
test_fuzz_fee_rate_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
@@ -394,6 +480,12 @@ test_fuzz_flat_file_pos_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
test_fuzz_flat_file_pos_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_flat_file_pos_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+test_fuzz_float_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_float_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_float_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_float_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_float_SOURCES = $(FUZZ_SUITE) test/fuzz/float.cpp
+
test_fuzz_hex_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
test_fuzz_hex_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
test_fuzz_hex_LDADD = $(FUZZ_SUITE_LD_COMMON)
@@ -412,12 +504,30 @@ test_fuzz_inv_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
test_fuzz_inv_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_inv_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+test_fuzz_key_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_key_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_key_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_key_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_key_SOURCES = $(FUZZ_SUITE) test/fuzz/key.cpp
+
+test_fuzz_key_io_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_key_io_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_key_io_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_key_io_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_key_io_SOURCES = $(FUZZ_SUITE) test/fuzz/key_io.cpp
+
test_fuzz_key_origin_info_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DKEY_ORIGIN_INFO_DESERIALIZE=1
test_fuzz_key_origin_info_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
test_fuzz_key_origin_info_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
test_fuzz_key_origin_info_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_key_origin_info_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+test_fuzz_locale_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_locale_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_locale_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_locale_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_locale_SOURCES = $(FUZZ_SUITE) test/fuzz/locale.cpp
+
test_fuzz_merkle_block_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMERKLE_BLOCK_DESERIALIZE=1
test_fuzz_merkle_block_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
test_fuzz_merkle_block_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
@@ -430,18 +540,42 @@ test_fuzz_messageheader_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
test_fuzz_messageheader_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_messageheader_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+test_fuzz_multiplication_overflow_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_multiplication_overflow_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_multiplication_overflow_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_multiplication_overflow_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_multiplication_overflow_SOURCES = $(FUZZ_SUITE) test/fuzz/multiplication_overflow.cpp
+
+test_fuzz_net_permissions_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_net_permissions_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_net_permissions_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_net_permissions_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_net_permissions_SOURCES = $(FUZZ_SUITE) test/fuzz/net_permissions.cpp
+
test_fuzz_netaddr_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DNETADDR_DESERIALIZE=1
test_fuzz_netaddr_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
test_fuzz_netaddr_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
test_fuzz_netaddr_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_netaddr_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+test_fuzz_netaddress_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_netaddress_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_netaddress_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_netaddress_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_netaddress_SOURCES = $(FUZZ_SUITE) test/fuzz/netaddress.cpp
+
test_fuzz_out_point_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DOUT_POINT_DESERIALIZE=1
test_fuzz_out_point_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
test_fuzz_out_point_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
test_fuzz_out_point_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_out_point_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+test_fuzz_p2p_transport_deserializer_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_p2p_transport_deserializer_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_p2p_transport_deserializer_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_p2p_transport_deserializer_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_p2p_transport_deserializer_SOURCES = $(FUZZ_SUITE) test/fuzz/p2p_transport_deserializer.cpp
+
test_fuzz_parse_hd_keypath_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
test_fuzz_parse_hd_keypath_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
test_fuzz_parse_hd_keypath_LDADD = $(FUZZ_SUITE_LD_COMMON)
@@ -490,6 +624,162 @@ test_fuzz_prefilled_transaction_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
test_fuzz_prefilled_transaction_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_prefilled_transaction_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+test_fuzz_process_message_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_process_message_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_addr_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=addr
+test_fuzz_process_message_addr_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_addr_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_addr_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_addr_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_block_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=block
+test_fuzz_process_message_block_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_block_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_block_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_block_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_blocktxn_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=blocktxn
+test_fuzz_process_message_blocktxn_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_blocktxn_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_blocktxn_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_blocktxn_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_cmpctblock_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=cmpctblock
+test_fuzz_process_message_cmpctblock_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_cmpctblock_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_cmpctblock_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_cmpctblock_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_feefilter_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=feefilter
+test_fuzz_process_message_feefilter_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_feefilter_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_feefilter_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_feefilter_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_filteradd_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=filteradd
+test_fuzz_process_message_filteradd_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_filteradd_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_filteradd_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_filteradd_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_filterclear_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=filterclear
+test_fuzz_process_message_filterclear_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_filterclear_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_filterclear_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_filterclear_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_filterload_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=filterload
+test_fuzz_process_message_filterload_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_filterload_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_filterload_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_filterload_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_getaddr_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=getaddr
+test_fuzz_process_message_getaddr_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_getaddr_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_getaddr_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_getaddr_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_getblocks_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=getblocks
+test_fuzz_process_message_getblocks_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_getblocks_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_getblocks_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_getblocks_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_getblocktxn_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=getblocktxn
+test_fuzz_process_message_getblocktxn_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_getblocktxn_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_getblocktxn_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_getblocktxn_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_getdata_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=getdata
+test_fuzz_process_message_getdata_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_getdata_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_getdata_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_getdata_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_getheaders_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=getheaders
+test_fuzz_process_message_getheaders_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_getheaders_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_getheaders_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_getheaders_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_headers_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=headers
+test_fuzz_process_message_headers_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_headers_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_headers_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_headers_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_inv_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=inv
+test_fuzz_process_message_inv_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_inv_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_inv_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_inv_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_mempool_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=mempool
+test_fuzz_process_message_mempool_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_mempool_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_mempool_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_mempool_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_notfound_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=notfound
+test_fuzz_process_message_notfound_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_notfound_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_notfound_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_notfound_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_ping_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=ping
+test_fuzz_process_message_ping_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_ping_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_ping_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_ping_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_pong_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=pong
+test_fuzz_process_message_pong_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_pong_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_pong_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_pong_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_sendcmpct_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=sendcmpct
+test_fuzz_process_message_sendcmpct_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_sendcmpct_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_sendcmpct_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_sendcmpct_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_sendheaders_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=sendheaders
+test_fuzz_process_message_sendheaders_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_sendheaders_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_sendheaders_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_sendheaders_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_tx_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=tx
+test_fuzz_process_message_tx_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_tx_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_tx_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_tx_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_verack_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=verack
+test_fuzz_process_message_verack_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_verack_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_verack_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_verack_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_process_message_version_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DMESSAGE_TYPE=version
+test_fuzz_process_message_version_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_process_message_version_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_process_message_version_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_process_message_version_SOURCES = $(FUZZ_SUITE) test/fuzz/process_message.cpp
+
+test_fuzz_protocol_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_protocol_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_protocol_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_protocol_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_protocol_SOURCES = $(FUZZ_SUITE) test/fuzz/protocol.cpp
+
test_fuzz_psbt_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
test_fuzz_psbt_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
test_fuzz_psbt_LDADD = $(FUZZ_SUITE_LD_COMMON)
@@ -514,6 +804,12 @@ test_fuzz_pub_key_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
test_fuzz_pub_key_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_pub_key_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+test_fuzz_rolling_bloom_filter_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_rolling_bloom_filter_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_rolling_bloom_filter_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_rolling_bloom_filter_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_rolling_bloom_filter_SOURCES = $(FUZZ_SUITE) test/fuzz/rolling_bloom_filter.cpp
+
test_fuzz_script_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
test_fuzz_script_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
test_fuzz_script_LDADD = $(FUZZ_SUITE_LD_COMMON)
@@ -532,18 +828,48 @@ test_fuzz_script_flags_LDADD = $(FUZZ_SUITE_LD_COMMON)
test_fuzz_script_flags_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_script_flags_SOURCES = $(FUZZ_SUITE) test/fuzz/script_flags.cpp
+test_fuzz_script_ops_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_script_ops_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_script_ops_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_script_ops_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_script_ops_SOURCES = $(FUZZ_SUITE) test/fuzz/script_ops.cpp
+
+test_fuzz_scriptnum_ops_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_scriptnum_ops_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_scriptnum_ops_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_scriptnum_ops_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_scriptnum_ops_SOURCES = $(FUZZ_SUITE) test/fuzz/scriptnum_ops.cpp
+
test_fuzz_service_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DSERVICE_DESERIALIZE=1
test_fuzz_service_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
test_fuzz_service_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
test_fuzz_service_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_service_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+test_fuzz_signature_checker_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_signature_checker_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_signature_checker_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_signature_checker_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_signature_checker_SOURCES = $(FUZZ_SUITE) test/fuzz/signature_checker.cpp
+
+test_fuzz_snapshotmetadata_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DSNAPSHOTMETADATA_DESERIALIZE=1
+test_fuzz_snapshotmetadata_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_snapshotmetadata_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_snapshotmetadata_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_snapshotmetadata_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+
test_fuzz_spanparsing_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
test_fuzz_spanparsing_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
test_fuzz_spanparsing_LDADD = $(FUZZ_SUITE_LD_COMMON)
test_fuzz_spanparsing_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_spanparsing_SOURCES = $(FUZZ_SUITE) test/fuzz/spanparsing.cpp
+test_fuzz_string_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_string_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_string_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_string_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_string_SOURCES = $(FUZZ_SUITE) test/fuzz/string.cpp
+
test_fuzz_strprintf_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
test_fuzz_strprintf_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
test_fuzz_strprintf_LDADD = $(FUZZ_SUITE_LD_COMMON)
@@ -556,6 +882,12 @@ test_fuzz_sub_net_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
test_fuzz_sub_net_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_sub_net_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+test_fuzz_timedata_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_timedata_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_timedata_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_timedata_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_timedata_SOURCES = $(FUZZ_SUITE) test/fuzz/timedata.cpp
+
test_fuzz_transaction_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
test_fuzz_transaction_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
test_fuzz_transaction_LDADD = $(FUZZ_SUITE_LD_COMMON)
@@ -592,6 +924,18 @@ test_fuzz_txundo_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
test_fuzz_txundo_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_txundo_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+test_fuzz_uint160_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DUINT160_DESERIALIZE=1
+test_fuzz_uint160_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_uint160_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_uint160_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_uint160_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+
+test_fuzz_uint256_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DUINT256_DESERIALIZE=1
+test_fuzz_uint256_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_uint256_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_uint256_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_uint256_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp
+
endif # ENABLE_FUZZ
nodist_test_test_bitcoin_SOURCES = $(GENERATED_TEST_FILES)
@@ -639,7 +983,7 @@ endif
%.cpp.test: %.cpp
@echo Running tests: `cat $< | grep -E "(BOOST_FIXTURE_TEST_SUITE\\(|BOOST_AUTO_TEST_SUITE\\()" | cut -d '(' -f 2 | cut -d ',' -f 1 | cut -d ')' -f 1` from $<
- $(AM_V_at)$(TEST_BINARY) -l test_suite -t "`cat $< | grep -E "(BOOST_FIXTURE_TEST_SUITE\\(|BOOST_AUTO_TEST_SUITE\\()" | cut -d '(' -f 2 | cut -d ',' -f 1 | cut -d ')' -f 1`" > $<.log 2>&1 || (cat $<.log && false)
+ $(AM_V_at)$(TEST_BINARY) --catch_system_errors=no -l test_suite -t "`cat $< | grep -E "(BOOST_FIXTURE_TEST_SUITE\\(|BOOST_AUTO_TEST_SUITE\\()" | cut -d '(' -f 2 | cut -d ',' -f 1 | cut -d ')' -f 1`" > $<.log 2>&1 || (cat $<.log && false)
%.json.h: %.json
@$(MKDIR_P) $(@D)
diff --git a/src/addrman.cpp b/src/addrman.cpp
index 121ae4bf7e..2f8a3a0bd5 100644
--- a/src/addrman.cpp
+++ b/src/addrman.cpp
@@ -6,8 +6,8 @@
#include <addrman.h>
#include <hash.h>
-#include <serialize.h>
#include <logging.h>
+#include <serialize.h>
int CAddrInfo::GetTriedBucket(const uint256& nKey, const std::vector<bool> &asmap) const
{
@@ -15,7 +15,7 @@ int CAddrInfo::GetTriedBucket(const uint256& nKey, const std::vector<bool> &asma
uint64_t hash2 = (CHashWriter(SER_GETHASH, 0) << nKey << GetGroup(asmap) << (hash1 % ADDRMAN_TRIED_BUCKETS_PER_GROUP)).GetCheapHash();
int tried_bucket = hash2 % ADDRMAN_TRIED_BUCKET_COUNT;
uint32_t mapped_as = GetMappedAS(asmap);
- LogPrint(BCLog::NET, "IP %s mapped to AS%i belongs to tried bucket %i.\n", ToStringIP(), mapped_as, tried_bucket);
+ LogPrint(BCLog::NET, "IP %s mapped to AS%i belongs to tried bucket %i\n", ToStringIP(), mapped_as, tried_bucket);
return tried_bucket;
}
@@ -26,7 +26,7 @@ int CAddrInfo::GetNewBucket(const uint256& nKey, const CNetAddr& src, const std:
uint64_t hash2 = (CHashWriter(SER_GETHASH, 0) << nKey << vchSourceGroupKey << (hash1 % ADDRMAN_NEW_BUCKETS_PER_SOURCE_GROUP)).GetCheapHash();
int new_bucket = hash2 % ADDRMAN_NEW_BUCKET_COUNT;
uint32_t mapped_as = GetMappedAS(asmap);
- LogPrint(BCLog::NET, "IP %s mapped to AS%i belongs to new bucket %i.\n", ToStringIP(), mapped_as, new_bucket);
+ LogPrint(BCLog::NET, "IP %s mapped to AS%i belongs to new bucket %i\n", ToStringIP(), mapped_as, new_bucket);
return new_bucket;
}
@@ -630,12 +630,12 @@ std::vector<bool> CAddrMan::DecodeAsmap(fs::path path)
FILE *filestr = fsbridge::fopen(path, "rb");
CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
if (file.IsNull()) {
- LogPrintf("Failed to open asmap file from disk.\n");
+ LogPrintf("Failed to open asmap file from disk\n");
return bits;
}
fseek(filestr, 0, SEEK_END);
int length = ftell(filestr);
- LogPrintf("Opened asmap file %s (%d bytes) from disk.\n", path, length);
+ LogPrintf("Opened asmap file %s (%d bytes) from disk\n", path, length);
fseek(filestr, 0, SEEK_SET);
char cur_byte;
for (int i = 0; i < length; ++i) {
diff --git a/src/addrman.h b/src/addrman.h
index 5901611bee..8e82020df0 100644
--- a/src/addrman.h
+++ b/src/addrman.h
@@ -6,23 +6,22 @@
#ifndef BITCOIN_ADDRMAN_H
#define BITCOIN_ADDRMAN_H
+#include <clientversion.h>
#include <netaddress.h>
#include <protocol.h>
#include <random.h>
#include <sync.h>
#include <timedata.h>
#include <util/system.h>
-#include <clientversion.h>
+#include <fs.h>
+#include <hash.h>
+#include <iostream>
#include <map>
#include <set>
#include <stdint.h>
-#include <vector>
-#include <iostream>
#include <streams.h>
-#include <fs.h>
-#include <hash.h>
-
+#include <vector>
/**
* Extended statistics about a CAddress
diff --git a/src/banman.h b/src/banman.h
index 8984874914..6bea2e75e9 100644
--- a/src/banman.h
+++ b/src/banman.h
@@ -5,16 +5,19 @@
#ifndef BITCOIN_BANMAN_H
#define BITCOIN_BANMAN_H
-#include <cstdint>
-#include <memory>
-
#include <addrdb.h>
#include <fs.h>
#include <net_types.h> // For banmap_t
#include <sync.h>
+#include <chrono>
+#include <cstdint>
+#include <memory>
+
// NOTE: When adjusting this, update rpcnet:setban's help ("24h")
static constexpr unsigned int DEFAULT_MISBEHAVING_BANTIME = 60 * 60 * 24; // Default 24-hour ban
+// How often to dump addresses to banlist.dat
+static constexpr std::chrono::minutes DUMP_BANS_INTERVAL{15};
class CClientUIInterface;
class CNetAddr;
diff --git a/src/bench/ccoins_caching.cpp b/src/bench/ccoins_caching.cpp
index c313029ea8..e9dd40293f 100644
--- a/src/bench/ccoins_caching.cpp
+++ b/src/bench/ccoins_caching.cpp
@@ -6,47 +6,10 @@
#include <coins.h>
#include <policy/policy.h>
#include <script/signingprovider.h>
+#include <test/util/transaction_utils.h>
#include <vector>
-// FIXME: Dedup with SetupDummyInputs in test/transaction_tests.cpp.
-//
-// Helper: create two dummy transactions, each with
-// two outputs. The first has 11 and 50 COIN outputs
-// paid to a TX_PUBKEY, the second 21 and 22 COIN outputs
-// paid to a TX_PUBKEYHASH.
-//
-static std::vector<CMutableTransaction>
-SetupDummyInputs(FillableSigningProvider& keystoreRet, CCoinsViewCache& coinsRet)
-{
- std::vector<CMutableTransaction> dummyTransactions;
- dummyTransactions.resize(2);
-
- // Add some keys to the keystore:
- CKey key[4];
- for (int i = 0; i < 4; i++) {
- key[i].MakeNewKey(i % 2);
- keystoreRet.AddKey(key[i]);
- }
-
- // Create some dummy input transactions
- dummyTransactions[0].vout.resize(2);
- dummyTransactions[0].vout[0].nValue = 11 * COIN;
- dummyTransactions[0].vout[0].scriptPubKey << ToByteVector(key[0].GetPubKey()) << OP_CHECKSIG;
- dummyTransactions[0].vout[1].nValue = 50 * COIN;
- dummyTransactions[0].vout[1].scriptPubKey << ToByteVector(key[1].GetPubKey()) << OP_CHECKSIG;
- AddCoins(coinsRet, CTransaction(dummyTransactions[0]), 0);
-
- dummyTransactions[1].vout.resize(2);
- dummyTransactions[1].vout[0].nValue = 21 * COIN;
- dummyTransactions[1].vout[0].scriptPubKey = GetScriptForDestination(PKHash(key[2].GetPubKey()));
- dummyTransactions[1].vout[1].nValue = 22 * COIN;
- dummyTransactions[1].vout[1].scriptPubKey = GetScriptForDestination(PKHash(key[3].GetPubKey()));
- AddCoins(coinsRet, CTransaction(dummyTransactions[1]), 0);
-
- return dummyTransactions;
-}
-
// Microbenchmark for simple accesses to a CCoinsViewCache database. Note from
// laanwj, "replicating the actual usage patterns of the client is hard though,
// many times micro-benchmarks of the database showed completely different
@@ -58,7 +21,8 @@ static void CCoinsCaching(benchmark::State& state)
FillableSigningProvider keystore;
CCoinsView coinsDummy;
CCoinsViewCache coins(&coinsDummy);
- std::vector<CMutableTransaction> dummyTransactions = SetupDummyInputs(keystore, coins);
+ std::vector<CMutableTransaction> dummyTransactions =
+ SetupDummyInputs(keystore, coins, {11 * COIN, 50 * COIN, 21 * COIN, 22 * COIN});
CMutableTransaction t1;
t1.vin.resize(3);
diff --git a/src/bench/examples.cpp b/src/bench/examples.cpp
index 60a4fbf0ba..a2fdab5609 100644
--- a/src/bench/examples.cpp
+++ b/src/bench/examples.cpp
@@ -10,7 +10,7 @@
static void Sleep100ms(benchmark::State& state)
{
while (state.KeepRunning()) {
- MilliSleep(100);
+ UninterruptibleSleep(std::chrono::milliseconds{100});
}
}
diff --git a/src/bench/verify_script.cpp b/src/bench/verify_script.cpp
index 31e166cc27..0b34ae3f95 100644
--- a/src/bench/verify_script.cpp
+++ b/src/bench/verify_script.cpp
@@ -71,4 +71,27 @@ static void VerifyScriptBench(benchmark::State& state)
}
}
+static void VerifyNestedIfScript(benchmark::State& state) {
+ std::vector<std::vector<unsigned char>> stack;
+ CScript script;
+ for (int i = 0; i < 100; ++i) {
+ script << OP_1 << OP_IF;
+ }
+ for (int i = 0; i < 1000; ++i) {
+ script << OP_1;
+ }
+ for (int i = 0; i < 100; ++i) {
+ script << OP_ENDIF;
+ }
+ while (state.KeepRunning()) {
+ auto stack_copy = stack;
+ ScriptError error;
+ bool ret = EvalScript(stack_copy, script, 0, BaseSignatureChecker(), SigVersion::BASE, &error);
+ assert(ret);
+ }
+}
+
+
BENCHMARK(VerifyScriptBench, 6300);
+
+BENCHMARK(VerifyNestedIfScript, 100);
diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp
index c085095a2b..6982eaab61 100644
--- a/src/bitcoin-cli.cpp
+++ b/src/bitcoin-cli.cpp
@@ -524,7 +524,7 @@ static int CommandLineRPC(int argc, char *argv[])
}
catch (const CConnectionFailed&) {
if (fWait)
- MilliSleep(1000);
+ UninterruptibleSleep(std::chrono::milliseconds{1000});
else
throw;
}
diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp
index 735f55fba7..b4b2d7ed52 100644
--- a/src/bitcoin-tx.cpp
+++ b/src/bitcoin-tx.cpp
@@ -20,6 +20,7 @@
#include <util/moneystr.h>
#include <util/rbf.h>
#include <util/strencodings.h>
+#include <util/string.h>
#include <util/system.h>
#include <util/translation.h>
@@ -357,7 +358,7 @@ static void MutateTxAddOutMultiSig(CMutableTransaction& tx, const std::string& s
if (required < 1 || required > MAX_PUBKEYS_PER_MULTISIG || numkeys < 1 || numkeys > MAX_PUBKEYS_PER_MULTISIG || numkeys < required)
throw std::runtime_error("multisig parameter mismatch. Required " \
- + std::to_string(required) + " of " + std::to_string(numkeys) + "signatures.");
+ + ToString(required) + " of " + ToString(numkeys) + "signatures.");
// extract and validate PUBKEYs
std::vector<CPubKey> pubkeys;
diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp
index 4b5cea4849..e284dce0d5 100644
--- a/src/bitcoind.cpp
+++ b/src/bitcoind.cpp
@@ -29,7 +29,7 @@ static void WaitForShutdown(NodeContext& node)
{
while (!ShutdownRequested())
{
- MilliSleep(200);
+ UninterruptibleSleep(std::chrono::milliseconds{200});
}
Interrupt(node);
}
diff --git a/src/blockencodings.h b/src/blockencodings.h
index 55ed8989bb..be50166cfc 100644
--- a/src/blockencodings.h
+++ b/src/blockencodings.h
@@ -10,18 +10,29 @@
class CTxMemPool;
-// Dumb helper to handle CTransaction compression at serialize-time
-struct TransactionCompressor {
-private:
- CTransactionRef& tx;
-public:
- explicit TransactionCompressor(CTransactionRef& txIn) : tx(txIn) {}
+// Transaction compression schemes for compact block relay can be introduced by writing
+// an actual formatter here.
+using TransactionCompression = DefaultFormatter;
- ADD_SERIALIZE_METHODS;
+class DifferenceFormatter
+{
+ uint64_t m_shift = 0;
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
- READWRITE(tx); //TODO: Compress tx encoding
+public:
+ template<typename Stream, typename I>
+ void Ser(Stream& s, I v)
+ {
+ if (v < m_shift || v >= std::numeric_limits<uint64_t>::max()) throw std::ios_base::failure("differential value overflow");
+ WriteCompactSize(s, v - m_shift);
+ m_shift = uint64_t(v) + 1;
+ }
+ template<typename Stream, typename I>
+ void Unser(Stream& s, I& v)
+ {
+ uint64_t n = ReadCompactSize(s);
+ m_shift += n;
+ if (m_shift < n || m_shift >= std::numeric_limits<uint64_t>::max() || m_shift < std::numeric_limits<I>::min() || m_shift > std::numeric_limits<I>::max()) throw std::ios_base::failure("differential value overflow");
+ v = I(m_shift++);
}
};
@@ -31,39 +42,9 @@ public:
uint256 blockhash;
std::vector<uint16_t> indexes;
- ADD_SERIALIZE_METHODS;
-
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
- READWRITE(blockhash);
- uint64_t indexes_size = (uint64_t)indexes.size();
- READWRITE(COMPACTSIZE(indexes_size));
- if (ser_action.ForRead()) {
- size_t i = 0;
- while (indexes.size() < indexes_size) {
- indexes.resize(std::min((uint64_t)(1000 + indexes.size()), indexes_size));
- for (; i < indexes.size(); i++) {
- uint64_t index = 0;
- READWRITE(COMPACTSIZE(index));
- if (index > std::numeric_limits<uint16_t>::max())
- throw std::ios_base::failure("index overflowed 16 bits");
- indexes[i] = index;
- }
- }
-
- int32_t offset = 0;
- for (size_t j = 0; j < indexes.size(); j++) {
- if (int32_t(indexes[j]) + offset > std::numeric_limits<uint16_t>::max())
- throw std::ios_base::failure("indexes overflowed 16 bits");
- indexes[j] = indexes[j] + offset;
- offset = int32_t(indexes[j]) + 1;
- }
- } else {
- for (size_t i = 0; i < indexes.size(); i++) {
- uint64_t index = indexes[i] - (i == 0 ? 0 : (indexes[i - 1] + 1));
- READWRITE(COMPACTSIZE(index));
- }
- }
+ SERIALIZE_METHODS(BlockTransactionsRequest, obj)
+ {
+ READWRITE(obj.blockhash, Using<VectorFormatter<DifferenceFormatter>>(obj.indexes));
}
};
@@ -77,24 +58,9 @@ public:
explicit BlockTransactions(const BlockTransactionsRequest& req) :
blockhash(req.blockhash), txn(req.indexes.size()) {}
- ADD_SERIALIZE_METHODS;
-
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
- READWRITE(blockhash);
- uint64_t txn_size = (uint64_t)txn.size();
- READWRITE(COMPACTSIZE(txn_size));
- if (ser_action.ForRead()) {
- size_t i = 0;
- while (txn.size() < txn_size) {
- txn.resize(std::min((uint64_t)(1000 + txn.size()), txn_size));
- for (; i < txn.size(); i++)
- READWRITE(TransactionCompressor(txn[i]));
- }
- } else {
- for (size_t i = 0; i < txn.size(); i++)
- READWRITE(TransactionCompressor(txn[i]));
- }
+ SERIALIZE_METHODS(BlockTransactions, obj)
+ {
+ READWRITE(obj.blockhash, Using<VectorFormatter<TransactionCompression>>(obj.txn));
}
};
@@ -105,17 +71,7 @@ struct PrefilledTransaction {
uint16_t index;
CTransactionRef tx;
- ADD_SERIALIZE_METHODS;
-
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
- uint64_t idx = index;
- READWRITE(COMPACTSIZE(idx));
- if (idx > std::numeric_limits<uint16_t>::max())
- throw std::ios_base::failure("index overflowed 16-bits");
- index = idx;
- READWRITE(TransactionCompressor(tx));
- }
+ SERIALIZE_METHODS(PrefilledTransaction, obj) { READWRITE(COMPACTSIZE(obj.index), Using<TransactionCompression>(obj.tx)); }
};
typedef enum ReadStatus_t
@@ -153,43 +109,15 @@ public:
size_t BlockTxCount() const { return shorttxids.size() + prefilledtxn.size(); }
- ADD_SERIALIZE_METHODS;
-
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
- READWRITE(header);
- READWRITE(nonce);
-
- uint64_t shorttxids_size = (uint64_t)shorttxids.size();
- READWRITE(COMPACTSIZE(shorttxids_size));
+ SERIALIZE_METHODS(CBlockHeaderAndShortTxIDs, obj)
+ {
+ READWRITE(obj.header, obj.nonce, Using<VectorFormatter<CustomUintFormatter<SHORTTXIDS_LENGTH>>>(obj.shorttxids), obj.prefilledtxn);
if (ser_action.ForRead()) {
- size_t i = 0;
- while (shorttxids.size() < shorttxids_size) {
- shorttxids.resize(std::min((uint64_t)(1000 + shorttxids.size()), shorttxids_size));
- for (; i < shorttxids.size(); i++) {
- uint32_t lsb = 0; uint16_t msb = 0;
- READWRITE(lsb);
- READWRITE(msb);
- shorttxids[i] = (uint64_t(msb) << 32) | uint64_t(lsb);
- static_assert(SHORTTXIDS_LENGTH == 6, "shorttxids serialization assumes 6-byte shorttxids");
- }
- }
- } else {
- for (size_t i = 0; i < shorttxids.size(); i++) {
- uint32_t lsb = shorttxids[i] & 0xffffffff;
- uint16_t msb = (shorttxids[i] >> 32) & 0xffff;
- READWRITE(lsb);
- READWRITE(msb);
+ if (obj.BlockTxCount() > std::numeric_limits<uint16_t>::max()) {
+ throw std::ios_base::failure("indexes overflowed 16 bits");
}
+ obj.FillShortTxIDSelector();
}
-
- READWRITE(prefilledtxn);
-
- if (BlockTxCount() > std::numeric_limits<uint16_t>::max())
- throw std::ios_base::failure("indexes overflowed 16 bits");
-
- if (ser_action.ForRead())
- FillShortTxIDSelector();
}
};
diff --git a/src/chain.h b/src/chain.h
index 48bcb8bfdd..64c016a1d6 100644
--- a/src/chain.h
+++ b/src/chain.h
@@ -333,12 +333,12 @@ public:
SERIALIZE_METHODS(CDiskBlockIndex, obj)
{
int _nVersion = s.GetVersion();
- if (!(s.GetType() & SER_GETHASH)) READWRITE(VARINT(_nVersion, VarIntMode::NONNEGATIVE_SIGNED));
+ if (!(s.GetType() & SER_GETHASH)) READWRITE(VARINT_MODE(_nVersion, VarIntMode::NONNEGATIVE_SIGNED));
- READWRITE(VARINT(obj.nHeight, VarIntMode::NONNEGATIVE_SIGNED));
+ READWRITE(VARINT_MODE(obj.nHeight, VarIntMode::NONNEGATIVE_SIGNED));
READWRITE(VARINT(obj.nStatus));
READWRITE(VARINT(obj.nTx));
- if (obj.nStatus & (BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO)) READWRITE(VARINT(obj.nFile, VarIntMode::NONNEGATIVE_SIGNED));
+ if (obj.nStatus & (BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO)) READWRITE(VARINT_MODE(obj.nFile, VarIntMode::NONNEGATIVE_SIGNED));
if (obj.nStatus & BLOCK_HAVE_DATA) READWRITE(VARINT(obj.nDataPos));
if (obj.nStatus & BLOCK_HAVE_UNDO) READWRITE(VARINT(obj.nUndoPos));
diff --git a/src/chainparams.cpp b/src/chainparams.cpp
index 31592b0f0a..a9183ac970 100644
--- a/src/chainparams.cpp
+++ b/src/chainparams.cpp
@@ -135,6 +135,7 @@ public:
fDefaultConsistencyChecks = false;
fRequireStandard = true;
m_is_test_chain = false;
+ m_is_mockable_chain = false;
checkpointData = {
{
@@ -231,7 +232,7 @@ public:
fDefaultConsistencyChecks = false;
fRequireStandard = false;
m_is_test_chain = true;
-
+ m_is_mockable_chain = false;
checkpointData = {
{
@@ -303,6 +304,7 @@ public:
fDefaultConsistencyChecks = true;
fRequireStandard = true;
m_is_test_chain = true;
+ m_is_mockable_chain = true;
checkpointData = {
{
diff --git a/src/chainparams.h b/src/chainparams.h
index 63398e587e..379c75e4be 100644
--- a/src/chainparams.h
+++ b/src/chainparams.h
@@ -68,6 +68,8 @@ public:
bool RequireStandard() const { return fRequireStandard; }
/** If this chain is exclusively used for testing */
bool IsTestChain() const { return m_is_test_chain; }
+ /** If this chain allows time to be mocked */
+ bool IsMockableChain() const { return m_is_mockable_chain; }
uint64_t PruneAfterHeight() const { return nPruneAfterHeight; }
/** Minimum free space (in GB) needed for data directory */
uint64_t AssumedBlockchainSize() const { return m_assumed_blockchain_size; }
@@ -102,6 +104,7 @@ protected:
bool fDefaultConsistencyChecks;
bool fRequireStandard;
bool m_is_test_chain;
+ bool m_is_mockable_chain;
CCheckpointData checkpointData;
ChainTxData chainTxData;
};
diff --git a/src/compressor.h b/src/compressor.h
index 7bb60d311e..223603e7e9 100644
--- a/src/compressor.h
+++ b/src/compressor.h
@@ -15,7 +15,17 @@ bool CompressScript(const CScript& script, std::vector<unsigned char> &out);
unsigned int GetSpecialScriptSize(unsigned int nSize);
bool DecompressScript(CScript& script, unsigned int nSize, const std::vector<unsigned char> &out);
+/**
+ * Compress amount.
+ *
+ * nAmount is of type uint64_t and thus cannot be negative. If you're passing in
+ * a CAmount (int64_t), make sure to properly handle the case where the amount
+ * is negative before calling CompressAmount(...).
+ *
+ * @pre Function defined only for 0 <= nAmount <= MAX_MONEY.
+ */
uint64_t CompressAmount(uint64_t nAmount);
+
uint64_t DecompressAmount(uint64_t nAmount);
/** Compact serializer for scripts.
diff --git a/src/consensus/merkle.cpp b/src/consensus/merkle.cpp
index 843985e54c..241cc316a6 100644
--- a/src/consensus/merkle.cpp
+++ b/src/consensus/merkle.cpp
@@ -10,7 +10,7 @@
that the following merkle tree algorithm has a serious flaw related to
duplicate txids, resulting in a vulnerability (CVE-2012-2459).
- The reason is that if the number of hashes in the list at a given time
+ The reason is that if the number of hashes in the list at a given level
is odd, the last one is duplicated before computing the next level (which
is unusual in Merkle trees). This results in certain sequences of
transactions leading to the same merkle root. For example, these two
diff --git a/src/consensus/validation.h b/src/consensus/validation.h
index 8a3abb31f4..a79e7b9d12 100644
--- a/src/consensus/validation.h
+++ b/src/consensus/validation.h
@@ -16,7 +16,7 @@
* provider of the transaction should be banned/ignored/disconnected/etc.
*/
enum class TxValidationResult {
- TX_RESULT_UNSET, //!< initial value. Tx has not yet been rejected
+ TX_RESULT_UNSET = 0, //!< initial value. Tx has not yet been rejected
TX_CONSENSUS, //!< invalid by consensus rules
/**
* Invalid by a change to consensus rules more recent than SegWit.
@@ -50,7 +50,7 @@ enum class TxValidationResult {
* useful for some other use-cases.
*/
enum class BlockValidationResult {
- BLOCK_RESULT_UNSET, //!< initial value. Block has not yet been rejected
+ BLOCK_RESULT_UNSET = 0, //!< initial value. Block has not yet been rejected
BLOCK_CONSENSUS, //!< invalid by consensus rules (excluding any below reasons)
/**
* Invalid by a change to consensus rules more recent than SegWit.
@@ -71,31 +71,31 @@ enum class BlockValidationResult {
-/** Base class for capturing information about block/transaction validation. This is subclassed
+/** Template for capturing information about block/transaction validation. This is instantiated
* by TxValidationState and BlockValidationState for validation information on transactions
* and blocks respectively. */
+template <typename Result>
class ValidationState {
private:
enum mode_state {
MODE_VALID, //!< everything ok
MODE_INVALID, //!< network rule violation (DoS value may be set)
MODE_ERROR, //!< run-time error
- } m_mode;
+ } m_mode{MODE_VALID};
+ Result m_result{};
std::string m_reject_reason;
std::string m_debug_message;
-protected:
- void Invalid(const std::string &reject_reason="",
+public:
+ bool Invalid(Result result,
+ const std::string &reject_reason="",
const std::string &debug_message="")
{
+ m_result = result;
m_reject_reason = reject_reason;
m_debug_message = debug_message;
if (m_mode != MODE_ERROR) m_mode = MODE_INVALID;
+ return false;
}
-public:
- // ValidationState is abstract. Have a pure virtual destructor.
- virtual ~ValidationState() = 0;
-
- ValidationState() : m_mode(MODE_VALID) {}
bool Error(const std::string& reject_reason)
{
if (m_mode == MODE_VALID)
@@ -106,40 +106,25 @@ public:
bool IsValid() const { return m_mode == MODE_VALID; }
bool IsInvalid() const { return m_mode == MODE_INVALID; }
bool IsError() const { return m_mode == MODE_ERROR; }
+ Result GetResult() const { return m_result; }
std::string GetRejectReason() const { return m_reject_reason; }
std::string GetDebugMessage() const { return m_debug_message; }
-};
+ std::string ToString() const
+ {
+ if (IsValid()) {
+ return "Valid";
+ }
-inline ValidationState::~ValidationState() {};
+ if (!m_debug_message.empty()) {
+ return m_reject_reason + ", " + m_debug_message;
+ }
-class TxValidationState : public ValidationState {
-private:
- TxValidationResult m_result = TxValidationResult::TX_RESULT_UNSET;
-public:
- bool Invalid(TxValidationResult result,
- const std::string &reject_reason="",
- const std::string &debug_message="")
- {
- m_result = result;
- ValidationState::Invalid(reject_reason, debug_message);
- return false;
+ return m_reject_reason;
}
- TxValidationResult GetResult() const { return m_result; }
};
-class BlockValidationState : public ValidationState {
-private:
- BlockValidationResult m_result = BlockValidationResult::BLOCK_RESULT_UNSET;
-public:
- bool Invalid(BlockValidationResult result,
- const std::string &reject_reason="",
- const std::string &debug_message="") {
- m_result = result;
- ValidationState::Invalid(reject_reason, debug_message);
- return false;
- }
- BlockValidationResult GetResult() const { return m_result; }
-};
+class TxValidationState : public ValidationState<TxValidationResult> {};
+class BlockValidationState : public ValidationState<BlockValidationResult> {};
// These implement the weight = (stripped_size * 4) + witness_size formula,
// using only serialization with and without witness data. As witness_size
diff --git a/src/core_read.cpp b/src/core_read.cpp
index 9a65b02585..d036955641 100644
--- a/src/core_read.cpp
+++ b/src/core_read.cpp
@@ -59,6 +59,14 @@ CScript ParseScript(const std::string& s)
{
// Number
int64_t n = atoi64(*w);
+
+ //limit the range of numbers ParseScript accepts in decimal
+ //since numbers outside -0xFFFFFFFF...0xFFFFFFFF are illegal in scripts
+ if (n > int64_t{0xffffffff} || n < -1 * int64_t{0xffffffff}) {
+ throw std::runtime_error("script parse error: decimal numeric value only allowed in the "
+ "range -0xFFFFFFFF...0xFFFFFFFF");
+ }
+
result << n;
}
else if (w->substr(0,2) == "0x" && w->size() > 2 && IsHex(std::string(w->begin()+2, w->end())))
diff --git a/src/crc32c/.appveyor.yml b/src/crc32c/.appveyor.yml
new file mode 100644
index 0000000000..7345746750
--- /dev/null
+++ b/src/crc32c/.appveyor.yml
@@ -0,0 +1,37 @@
+# Build matrix / environment variables are explained on:
+# https://www.appveyor.com/docs/appveyor-yml/
+# This file can be validated on: https://ci.appveyor.com/tools/validate-yaml
+
+version: "{build}"
+
+environment:
+ matrix:
+ # AppVeyor currently has no custom job name feature.
+ # http://help.appveyor.com/discussions/questions/1623-can-i-provide-a-friendly-name-for-jobs
+ - JOB: Visual Studio 2017
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
+ CMAKE_GENERATOR: Visual Studio 15 2017
+
+platform:
+ - x86
+ - x64
+
+configuration:
+ - RelWithDebInfo
+ - Debug
+
+build_script:
+ - git submodule update --init --recursive
+ - mkdir build
+ - cd build
+ - if "%platform%"=="x64" set CMAKE_GENERATOR=%CMAKE_GENERATOR% Win64
+ - cmake --version
+ - cmake .. -G "%CMAKE_GENERATOR%" -DCRC32C_USE_GLOG=0
+ -DCMAKE_CONFIGURATION_TYPES="%CONFIGURATION%"
+ - cmake --build . --config "%CONFIGURATION%"
+ - cd ..
+
+test_script:
+ - build\%CONFIGURATION%\crc32c_tests.exe
+ - build\%CONFIGURATION%\crc32c_capi_tests.exe
+ - build\%CONFIGURATION%\crc32c_bench.exe
diff --git a/src/crc32c/.clang-format b/src/crc32c/.clang-format
new file mode 100644
index 0000000000..be9b80799f
--- /dev/null
+++ b/src/crc32c/.clang-format
@@ -0,0 +1,3 @@
+---
+Language: Cpp
+BasedOnStyle: Google
diff --git a/src/crc32c/.clang_complete b/src/crc32c/.clang_complete
new file mode 100644
index 0000000000..fa6757c6f3
--- /dev/null
+++ b/src/crc32c/.clang_complete
@@ -0,0 +1,8 @@
+-Ibuild/include/
+-Ibuild/third_party/glog/
+-Iinclude/
+-Ithird_party/benchmark/include/
+-Ithird_party/googletest/googletest/include/
+-Ithird_party/googletest/googlemock/include/
+-Ithird_party/glog/src/
+-std=c++11
diff --git a/src/crc32c/.gitignore b/src/crc32c/.gitignore
new file mode 100644
index 0000000000..61769727e3
--- /dev/null
+++ b/src/crc32c/.gitignore
@@ -0,0 +1,8 @@
+# Editors.
+*.sw*
+.DS_Store
+/.vscode
+
+# Build directory.
+build/
+out/
diff --git a/src/crc32c/.gitmodules b/src/crc32c/.gitmodules
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/src/crc32c/.gitmodules
diff --git a/src/crc32c/.travis.yml b/src/crc32c/.travis.yml
new file mode 100644
index 0000000000..d990a89f07
--- /dev/null
+++ b/src/crc32c/.travis.yml
@@ -0,0 +1,76 @@
+# Build matrix / environment variables are explained on:
+# http://about.travis-ci.org/docs/user/build-configuration/
+# This file can be validated on: http://lint.travis-ci.org/
+
+language: cpp
+dist: bionic
+osx_image: xcode10.3
+
+compiler:
+- gcc
+- clang
+os:
+- linux
+- osx
+
+env:
+- GLOG=1 SHARED_LIB=0 BUILD_TYPE=Debug
+- GLOG=1 SHARED_LIB=0 BUILD_TYPE=RelWithDebInfo
+- GLOG=0 SHARED_LIB=0 BUILD_TYPE=Debug
+- GLOG=0 SHARED_LIB=0 BUILD_TYPE=RelWithDebInfo
+- GLOG=0 SHARED_LIB=1 BUILD_TYPE=Debug
+- GLOG=0 SHARED_LIB=1 BUILD_TYPE=RelWithDebInfo
+
+addons:
+ apt:
+ sources:
+ - sourceline: 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main'
+ key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key'
+ - sourceline: 'ppa:ubuntu-toolchain-r/test'
+ packages:
+ - clang-9
+ - cmake
+ - gcc-9
+ - g++-9
+ - ninja-build
+ homebrew:
+ packages:
+ - cmake
+ - gcc@9
+ - llvm@9
+ - ninja
+ update: true
+
+install:
+# The following Homebrew packages aren't linked by default, and need to be
+# prepended to the path explicitly.
+- if [ "$TRAVIS_OS_NAME" = "osx" ]; then
+ export PATH="$(brew --prefix llvm)/bin:$PATH";
+ fi
+# /usr/bin/gcc points to an older compiler on both Linux and macOS.
+- if [ "$CXX" = "g++" ]; then export CXX="g++-9" CC="gcc-9"; fi
+# /usr/bin/clang points to an older compiler on both Linux and macOS.
+#
+# Homebrew's llvm package doesn't ship a versioned clang++ binary, so the values
+# below don't work on macOS. Fortunately, the path change above makes the
+# default values (clang and clang++) resolve to the correct compiler on macOS.
+- if [ "$TRAVIS_OS_NAME" = "linux" ]; then
+ if [ "$CXX" = "clang++" ]; then export CXX="clang++-9" CC="clang-9"; fi;
+ fi
+- echo ${CC}
+- echo ${CXX}
+- ${CXX} --version
+- cmake --version
+
+before_script:
+- mkdir -p build && cd build
+- cmake .. -G Ninja -DCRC32C_USE_GLOG=$GLOG -DCMAKE_BUILD_TYPE=$BUILD_TYPE
+ -DBUILD_SHARED_LIBS=$SHARED_LIB -DCMAKE_INSTALL_PREFIX=$HOME/.local
+- cmake --build .
+- cd ..
+
+script:
+- build/crc32c_tests
+- build/crc32c_capi_tests
+- build/crc32c_bench
+- cd build && cmake --build . --target install
diff --git a/src/crc32c/.ycm_extra_conf.py b/src/crc32c/.ycm_extra_conf.py
new file mode 100644
index 0000000000..536aadcec8
--- /dev/null
+++ b/src/crc32c/.ycm_extra_conf.py
@@ -0,0 +1,142 @@
+# Copyright 2017 The CRC32C Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""YouCompleteMe configuration that interprets a .clang_complete file.
+
+This module implementes the YouCompleteMe configuration API documented at:
+https://github.com/Valloric/ycmd#ycm_extra_confpy-specification
+
+The implementation loads and processes a .clang_complete file, documented at:
+https://github.com/Rip-Rip/clang_complete/blob/master/README.md
+"""
+
+import os
+
+# Flags added to the list in .clang_complete.
+BASE_FLAGS = [
+ '-Werror', # Unlike clang_complete, YCM can also be used as a linter.
+ '-DUSE_CLANG_COMPLETER', # YCM needs this.
+ '-xc++', # YCM needs this to avoid compiling headers as C code.
+]
+
+# Clang flags that take in paths.
+# See https://clang.llvm.org/docs/ClangCommandLineReference.html
+PATH_FLAGS = [
+ '-isystem',
+ '-I',
+ '-iquote',
+ '--sysroot='
+]
+
+
+def DirectoryOfThisScript():
+ """Returns the absolute path to the directory containing this script."""
+ return os.path.dirname(os.path.abspath(__file__))
+
+
+def MakeRelativePathsInFlagsAbsolute(flags, build_root):
+ """Expands relative paths in a list of Clang command-line flags.
+
+ Args:
+ flags: The list of flags passed to Clang.
+ build_root: The current directory when running the Clang compiler. Should be
+ an absolute path.
+
+ Returns:
+ A list of flags with relative paths replaced by absolute paths.
+ """
+ new_flags = []
+ make_next_absolute = False
+ for flag in flags:
+ new_flag = flag
+
+ if make_next_absolute:
+ make_next_absolute = False
+ if not flag.startswith('/'):
+ new_flag = os.path.join(build_root, flag)
+
+ for path_flag in PATH_FLAGS:
+ if flag == path_flag:
+ make_next_absolute = True
+ break
+
+ if flag.startswith(path_flag):
+ path = flag[len(path_flag):]
+ new_flag = path_flag + os.path.join(build_root, path)
+ break
+
+ if new_flag:
+ new_flags.append(new_flag)
+ return new_flags
+
+
+def FindNearest(target, path, build_root):
+ """Looks for a file with a specific name closest to a project path.
+
+ This is similar to the logic used by a version-control system (like git) to
+ find its configuration directory (.git) based on the current directory when a
+ command is invoked.
+
+ Args:
+ target: The file name to search for.
+ path: The directory where the search starts. The search will explore the
+ given directory's ascendants using the parent relationship. Should be an
+ absolute path.
+ build_root: A directory that acts as a fence for the search. If the search
+ reaches this directory, it will not advance to its parent. Should be an
+ absolute path.
+
+ Returns:
+ The path to a file with the desired name. None if the search failed.
+ """
+ candidate = os.path.join(path, target)
+ if os.path.isfile(candidate):
+ return candidate
+
+ if path == build_root:
+ return None
+
+ parent = os.path.dirname(path)
+ if parent == path:
+ return None
+
+ return FindNearest(target, parent, build_root)
+
+
+def FlagsForClangComplete(file_path, build_root):
+ """Reads the .clang_complete flags for a source file.
+
+ Args:
+ file_path: The path to the source file. Should be inside the project. Used
+ to locate the relevant .clang_complete file.
+ build_root: The current directory when running the Clang compiler for this
+ file. Should be an absolute path.
+
+ Returns:
+ A list of strings, where each element is a Clang command-line flag.
+ """
+ clang_complete_path = FindNearest('.clang_complete', file_path, build_root)
+ if clang_complete_path is None:
+ return None
+ clang_complete_flags = open(clang_complete_path, 'r').read().splitlines()
+ return clang_complete_flags
+
+
+def FlagsForFile(filename, **kwargs):
+ """Implements the YouCompleteMe API."""
+
+ # kwargs can be used to pass 'client_data' to the YCM configuration. This
+ # configuration script does not need any extra information, so
+ # pylint: disable=unused-argument
+
+ build_root = DirectoryOfThisScript()
+ file_path = os.path.realpath(filename)
+
+ flags = BASE_FLAGS
+ clang_flags = FlagsForClangComplete(file_path, build_root)
+ if clang_flags:
+ flags += clang_flags
+
+ final_flags = MakeRelativePathsInFlagsAbsolute(flags, build_root)
+
+ return {'flags': final_flags}
diff --git a/src/crc32c/AUTHORS b/src/crc32c/AUTHORS
new file mode 100644
index 0000000000..6f1f6871a6
--- /dev/null
+++ b/src/crc32c/AUTHORS
@@ -0,0 +1,9 @@
+# This is the list of CRC32C authors for copyright purposes.
+#
+# This does not necessarily list everyone who has contributed code, since in
+# some cases, their employer may be the copyright holder. To see the full list
+# of contributors, see the revision history in source control.
+Google Inc.
+
+Fangming Fang <Fangming.Fang@arm.com>
+Vadim Skipin <vadim.skipin@gmail.com>
diff --git a/src/crc32c/CMakeLists.txt b/src/crc32c/CMakeLists.txt
new file mode 100644
index 0000000000..111a3e3614
--- /dev/null
+++ b/src/crc32c/CMakeLists.txt
@@ -0,0 +1,423 @@
+# Copyright 2017 The CRC32C Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+cmake_minimum_required(VERSION 3.1)
+project(Crc32c VERSION 1.1.0 LANGUAGES C CXX)
+
+# This project can use C11, but will gracefully decay down to C89.
+set(CMAKE_C_STANDARD 11)
+set(CMAKE_C_STANDARD_REQUIRED OFF)
+set(CMAKE_C_EXTENSIONS OFF)
+
+# This project requires C++11.
+set(CMAKE_CXX_STANDARD 11)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
+set(CMAKE_CXX_EXTENSIONS OFF)
+
+# https://github.com/izenecloud/cmake/blob/master/SetCompilerWarningAll.cmake
+if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ # Use the highest warning level for Visual Studio.
+ set(CMAKE_CXX_WARNING_LEVEL 4)
+ if(CMAKE_CXX_FLAGS MATCHES "/W[0-4]")
+ string(REGEX REPLACE "/W[0-4]" "/W4" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ else(CMAKE_CXX_FLAGS MATCHES "/W[0-4]")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4")
+ endif(CMAKE_CXX_FLAGS MATCHES "/W[0-4]")
+
+ # Disable C++ exceptions.
+ string(REGEX REPLACE "/EH[a-z]+" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHs-c-")
+ add_definitions(-D_HAS_EXCEPTIONS=0)
+
+ # Disable RTTI.
+ string(REGEX REPLACE "/GR" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /GR-")
+else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ # Use -Wall for clang and gcc.
+ if(NOT CMAKE_CXX_FLAGS MATCHES "-Wall")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall")
+ endif(NOT CMAKE_CXX_FLAGS MATCHES "-Wall")
+
+ # Use -Wextra for clang and gcc.
+ if(NOT CMAKE_CXX_FLAGS MATCHES "-Wextra")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wextra")
+ endif(NOT CMAKE_CXX_FLAGS MATCHES "-Wextra")
+
+ # Use -Werror for clang and gcc.
+ if(NOT CMAKE_CXX_FLAGS MATCHES "-Werror")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror")
+ endif(NOT CMAKE_CXX_FLAGS MATCHES "-Werror")
+
+ # Disable C++ exceptions.
+ string(REGEX REPLACE "-fexceptions" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions")
+
+ # Disable RTTI.
+ string(REGEX REPLACE "-frtti" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti")
+endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+
+option(CRC32C_BUILD_TESTS "Build CRC32C's unit tests" ON)
+option(CRC32C_BUILD_BENCHMARKS "Build CRC32C's benchmarks" ON)
+option(CRC32C_USE_GLOG "Build CRC32C's tests with Google Logging" ON)
+option(CRC32C_INSTALL "Install CRC32C's header and library" ON)
+
+include(TestBigEndian)
+test_big_endian(BYTE_ORDER_BIG_ENDIAN)
+
+include(CheckCXXCompilerFlag)
+# Used by glog.
+check_cxx_compiler_flag(-Wno-deprecated CRC32C_HAVE_NO_DEPRECATED)
+# Used by glog.
+check_cxx_compiler_flag(-Wno-sign-compare CRC32C_HAVE_NO_SIGN_COMPARE)
+# Used by glog.
+check_cxx_compiler_flag(-Wno-unused-parameter CRC32C_HAVE_NO_UNUSED_PARAMETER)
+# Used by googletest.
+check_cxx_compiler_flag(-Wno-missing-field-initializers
+ CRC32C_HAVE_NO_MISSING_FIELD_INITIALIZERS)
+
+# Check for __builtin_prefetch support in the compiler.
+include(CheckCXXSourceCompiles)
+check_cxx_source_compiles("
+int main() {
+ char data = 0;
+ const char* address = &data;
+ __builtin_prefetch(address, 0, 0);
+ return 0;
+}
+" HAVE_BUILTIN_PREFETCH)
+
+# Check for _mm_prefetch support in the compiler.
+include(CheckCXXSourceCompiles)
+check_cxx_source_compiles("
+#if defined(_MSC_VER)
+#include <intrin.h>
+#else // !defined(_MSC_VER)
+#include <xmmintrin.h>
+#endif // defined(_MSC_VER)
+
+int main() {
+ char data = 0;
+ const char* address = &data;
+ _mm_prefetch(address, _MM_HINT_NTA);
+ return 0;
+}
+" HAVE_MM_PREFETCH)
+
+# Check for SSE4.2 support in the compiler.
+set(OLD_CMAKE_REQURED_FLAGS ${CMAKE_REQUIRED_FLAGS})
+if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} /arch:AVX")
+else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -msse4.2")
+endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+check_cxx_source_compiles("
+#if defined(_MSC_VER)
+#include <intrin.h>
+#else // !defined(_MSC_VER)
+#include <cpuid.h>
+#include <nmmintrin.h>
+#endif // defined(_MSC_VER)
+
+int main() {
+ _mm_crc32_u8(0, 0); _mm_crc32_u32(0, 0);
+#if defined(_M_X64) || defined(__x86_64__)
+ _mm_crc32_u64(0, 0);
+#endif // defined(_M_X64) || defined(__x86_64__)
+ return 0;
+}
+" HAVE_SSE42)
+set(CMAKE_REQUIRED_FLAGS ${OLD_CMAKE_REQURED_FLAGS})
+
+# Check for ARMv8 w/ CRC and CRYPTO extensions support in the compiler.
+set(OLD_CMAKE_REQURED_FLAGS ${CMAKE_REQUIRED_FLAGS})
+if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ # TODO(pwnall): Insert correct flag when VS gets ARM CRC32C support.
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} /arch:NOTYET")
+else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -march=armv8-a+crc+crypto")
+endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+check_cxx_source_compiles("
+#include <arm_acle.h>
+#include <arm_neon.h>
+
+int main() {
+ __crc32cb(0, 0); __crc32ch(0, 0); __crc32cw(0, 0); __crc32cd(0, 0);
+ vmull_p64(0, 0);
+ return 0;
+}
+" HAVE_ARM64_CRC32C)
+set(CMAKE_REQUIRED_FLAGS ${OLD_CMAKE_REQURED_FLAGS})
+
+# Check for strong getauxval() support in the system headers.
+check_cxx_source_compiles("
+#include <arm_acle.h>
+#include <arm_neon.h>
+#include <sys/auxv.h>
+
+int main() {
+ getauxval(AT_HWCAP);
+ return 0;
+}
+" HAVE_STRONG_GETAUXVAL)
+
+# Check for weak getauxval() support in the compiler.
+check_cxx_source_compiles("
+unsigned long getauxval(unsigned long type) __attribute__((weak));
+#define AT_HWCAP 16
+
+int main() {
+ getauxval(AT_HWCAP);
+ return 0;
+}
+" HAVE_WEAK_GETAUXVAL)
+
+if(CRC32C_USE_GLOG)
+ # glog requires this setting to avoid using dynamic_cast.
+ set(DISABLE_RTTI ON CACHE BOOL "" FORCE)
+
+ # glog's test targets trigger deprecation warnings, and compiling them burns
+ # CPU cycles on the CI.
+ set(BUILD_TESTING_SAVED "${BUILD_TESTING}")
+ set(BUILD_TESTING OFF CACHE BOOL "" FORCE)
+ add_subdirectory("third_party/glog" EXCLUDE_FROM_ALL)
+ set(BUILD_TESTING "${BUILD_TESTING_SAVED}" CACHE BOOL "" FORCE)
+
+ # glog triggers deprecation warnings on OSX.
+ # https://github.com/google/glog/issues/185
+ if(CRC32C_HAVE_NO_DEPRECATED)
+ set_property(TARGET glog APPEND PROPERTY COMPILE_OPTIONS -Wno-deprecated)
+ endif(CRC32C_HAVE_NO_DEPRECATED)
+
+ # glog triggers sign comparison warnings on gcc.
+ if(CRC32C_HAVE_NO_SIGN_COMPARE)
+ set_property(TARGET glog APPEND PROPERTY COMPILE_OPTIONS -Wno-sign-compare)
+ endif(CRC32C_HAVE_NO_SIGN_COMPARE)
+
+ # glog triggers unused parameter warnings on clang.
+ if(CRC32C_HAVE_NO_UNUSED_PARAMETER)
+ set_property(TARGET glog
+ APPEND PROPERTY COMPILE_OPTIONS -Wno-unused-parameter)
+ endif(CRC32C_HAVE_NO_UNUSED_PARAMETER)
+
+ set(CRC32C_TESTS_BUILT_WITH_GLOG 1)
+endif(CRC32C_USE_GLOG)
+
+configure_file(
+ "src/crc32c_config.h.in"
+ "${PROJECT_BINARY_DIR}/include/crc32c/crc32c_config.h"
+)
+
+include_directories("${PROJECT_BINARY_DIR}/include")
+
+# ARM64 CRC32C code is built separately, so we don't accidentally compile
+# unsupported instructions into code that gets run without ARM32 support.
+add_library(crc32c_arm64 OBJECT "")
+target_sources(crc32c_arm64
+ PRIVATE
+ "${PROJECT_BINARY_DIR}/include/crc32c/crc32c_config.h"
+ "src/crc32c_arm64.cc"
+ "src/crc32c_arm64.h"
+)
+if(HAVE_ARM64_CRC32C)
+ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ # TODO(pwnall): Insert correct flag when VS gets ARM64 CRC32C support.
+ target_compile_options(crc32c_arm64 PRIVATE "/arch:NOTYET")
+ else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ target_compile_options(crc32c_arm64 PRIVATE "-march=armv8-a+crc+crypto")
+ endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+endif(HAVE_ARM64_CRC32C)
+
+# CMake only enables PIC by default in SHARED and MODULE targets.
+if(BUILD_SHARED_LIBS)
+ set_property(TARGET crc32c_arm64 PROPERTY POSITION_INDEPENDENT_CODE TRUE)
+endif(BUILD_SHARED_LIBS)
+
+# SSE4.2 code is built separately, so we don't accidentally compile unsupported
+# instructions into code that gets run without SSE4.2 support.
+add_library(crc32c_sse42 OBJECT "")
+target_sources(crc32c_sse42
+ PRIVATE
+ "${PROJECT_BINARY_DIR}/include/crc32c/crc32c_config.h"
+ "src/crc32c_sse42.cc"
+ "src/crc32c_sse42.h"
+)
+if(HAVE_SSE42)
+ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ target_compile_options(crc32c_sse42 PRIVATE "/arch:AVX")
+ else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ target_compile_options(crc32c_sse42 PRIVATE "-msse4.2")
+ endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+endif(HAVE_SSE42)
+
+# CMake only enables PIC by default in SHARED and MODULE targets.
+if(BUILD_SHARED_LIBS)
+ set_property(TARGET crc32c_sse42 PROPERTY POSITION_INDEPENDENT_CODE TRUE)
+endif(BUILD_SHARED_LIBS)
+
+# Must be included before CMAKE_INSTALL_INCLUDEDIR is used.
+include(GNUInstallDirs)
+
+add_library(crc32c ""
+ # TODO(pwnall): Move the TARGET_OBJECTS generator expressions to the PRIVATE
+ # section of target_sources when cmake_minimum_required becomes 3.9 or above.
+ $<TARGET_OBJECTS:crc32c_arm64>
+ $<TARGET_OBJECTS:crc32c_sse42>
+)
+target_sources(crc32c
+ PRIVATE
+ "${PROJECT_BINARY_DIR}/include/crc32c/crc32c_config.h"
+ "src/crc32c_arm64.h"
+ "src/crc32c_arm64_linux_check.h"
+ "src/crc32c_internal.h"
+ "src/crc32c_portable.cc"
+ "src/crc32c_prefetch.h"
+ "src/crc32c_read_le.h"
+ "src/crc32c_round_up.h"
+ "src/crc32c_sse42.h"
+ "src/crc32c_sse42_check.h"
+ "src/crc32c.cc"
+
+ # Only CMake 3.3+ supports PUBLIC sources in targets exported by "install".
+ $<$<VERSION_GREATER:CMAKE_VERSION,3.2>:PUBLIC>
+ "include/crc32c/crc32c.h"
+)
+
+target_include_directories(crc32c
+ PUBLIC
+ $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include>
+ $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
+)
+
+target_compile_definitions(crc32c
+PRIVATE
+ CRC32C_HAVE_CONFIG_H=1
+)
+
+set_target_properties(crc32c
+ PROPERTIES VERSION ${PROJECT_VERSION} SOVERSION ${PROJECT_VERSION_MAJOR})
+
+# Warnings as errors in Visual Studio for this project's targets.
+if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ set_property(TARGET crc32c APPEND PROPERTY COMPILE_OPTIONS "/WX")
+ set_property(TARGET crc32c_arm64 APPEND PROPERTY COMPILE_OPTIONS "/WX")
+ set_property(TARGET crc32c_sse42 APPEND PROPERTY COMPILE_OPTIONS "/WX")
+endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+
+if(CRC32C_BUILD_TESTS)
+ enable_testing()
+
+ # Prevent overriding the parent project's compiler/linker settings on Windows.
+ set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
+ set(install_gtest OFF)
+ set(install_gmock OFF)
+
+ # This project is tested using GoogleTest.
+ add_subdirectory("third_party/googletest")
+
+ # GoogleTest triggers a missing field initializers warning.
+ if(CRC32C_HAVE_NO_MISSING_FIELD_INITIALIZERS)
+ set_property(TARGET gtest
+ APPEND PROPERTY COMPILE_OPTIONS -Wno-missing-field-initializers)
+ set_property(TARGET gmock
+ APPEND PROPERTY COMPILE_OPTIONS -Wno-missing-field-initializers)
+ endif(CRC32C_HAVE_NO_MISSING_FIELD_INITIALIZERS)
+
+ add_executable(crc32c_tests "")
+ target_sources(crc32c_tests
+ PRIVATE
+ "${PROJECT_BINARY_DIR}/include/crc32c/crc32c_config.h"
+ "src/crc32c_arm64_unittest.cc"
+ "src/crc32c_extend_unittests.h"
+ "src/crc32c_portable_unittest.cc"
+ "src/crc32c_prefetch_unittest.cc"
+ "src/crc32c_read_le_unittest.cc"
+ "src/crc32c_round_up_unittest.cc"
+ "src/crc32c_sse42_unittest.cc"
+ "src/crc32c_unittest.cc"
+ "src/crc32c_test_main.cc"
+ )
+ target_link_libraries(crc32c_tests crc32c gtest)
+
+ # Warnings as errors in Visual Studio for this project's targets.
+ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ set_property(TARGET crc32c_tests APPEND PROPERTY COMPILE_OPTIONS "/WX")
+ endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+
+ if(CRC32C_USE_GLOG)
+ target_link_libraries(crc32c_tests glog)
+ endif(CRC32C_USE_GLOG)
+
+ add_test(NAME crc32c_tests COMMAND crc32c_tests)
+
+ add_executable(crc32c_capi_tests "")
+ target_sources(crc32c_capi_tests
+ PRIVATE
+ "src/crc32c_capi_unittest.c"
+ )
+ target_link_libraries(crc32c_capi_tests crc32c)
+
+ # Warnings as errors in Visual Studio for this project's targets.
+ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ set_property(TARGET crc32c_capi_tests APPEND PROPERTY COMPILE_OPTIONS "/WX")
+ endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+
+ add_test(NAME crc32c_capi_tests COMMAND crc32c_capi_tests)
+endif(CRC32C_BUILD_TESTS)
+
+if(CRC32C_BUILD_BENCHMARKS)
+ add_executable(crc32c_bench "")
+ target_sources(crc32c_bench
+ PRIVATE
+ "${PROJECT_BINARY_DIR}/include/crc32c/crc32c_config.h"
+ "src/crc32c_benchmark.cc"
+ )
+ target_link_libraries(crc32c_bench crc32c)
+
+ # This project uses Google benchmark for benchmarking.
+ set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "" FORCE)
+ set(BENCHMARK_ENABLE_EXCEPTIONS OFF CACHE BOOL "" FORCE)
+ add_subdirectory("third_party/benchmark")
+ target_link_libraries(crc32c_bench benchmark)
+
+ if(CRC32C_USE_GLOG)
+ target_link_libraries(crc32c_bench glog)
+ endif(CRC32C_USE_GLOG)
+
+ # Warnings as errors in Visual Studio for this project's targets.
+ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ set_property(TARGET crc32c_bench APPEND PROPERTY COMPILE_OPTIONS "/WX")
+ endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+endif(CRC32C_BUILD_BENCHMARKS)
+
+if(CRC32C_INSTALL)
+ install(TARGETS crc32c
+ EXPORT Crc32cTargets
+ RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
+ LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ )
+ install(
+ FILES
+ "include/crc32c/crc32c.h"
+ DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/crc32c"
+ )
+
+ include(CMakePackageConfigHelpers)
+ write_basic_package_version_file(
+ "${PROJECT_BINARY_DIR}/Crc32cConfigVersion.cmake"
+ COMPATIBILITY SameMajorVersion
+ )
+ install(
+ EXPORT Crc32cTargets
+ NAMESPACE Crc32c::
+ DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/Crc32c"
+ )
+ install(
+ FILES
+ "Crc32cConfig.cmake"
+ "${PROJECT_BINARY_DIR}/Crc32cConfigVersion.cmake"
+ DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/Crc32c"
+ )
+endif(CRC32C_INSTALL)
diff --git a/src/crc32c/CONTRIBUTING.md b/src/crc32c/CONTRIBUTING.md
new file mode 100644
index 0000000000..ae319c70ac
--- /dev/null
+++ b/src/crc32c/CONTRIBUTING.md
@@ -0,0 +1,23 @@
+# How to Contribute
+
+We'd love to accept your patches and contributions to this project. There are
+just a few small guidelines you need to follow.
+
+## Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License
+Agreement. You (or your employer) retain the copyright to your contribution,
+this simply gives us permission to use and redistribute your contributions as
+part of the project. Head over to <https://cla.developers.google.com/> to see
+your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one
+(even if it was for a different project), you probably don't need to do it
+again.
+
+## Code reviews
+
+All submissions, including submissions by project members, require review. We
+use GitHub pull requests for this purpose. Consult
+[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
+information on using pull requests.
diff --git a/src/crc32c/Crc32cConfig.cmake b/src/crc32c/Crc32cConfig.cmake
new file mode 100644
index 0000000000..4d6057ec26
--- /dev/null
+++ b/src/crc32c/Crc32cConfig.cmake
@@ -0,0 +1,5 @@
+# Copyright 2017 The CRC32C Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+include("${CMAKE_CURRENT_LIST_DIR}/Crc32cTargets.cmake")
diff --git a/src/crc32c/LICENSE b/src/crc32c/LICENSE
new file mode 100644
index 0000000000..8c8735cf12
--- /dev/null
+++ b/src/crc32c/LICENSE
@@ -0,0 +1,28 @@
+Copyright 2017, The CRC32C Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/crc32c/README.md b/src/crc32c/README.md
new file mode 100644
index 0000000000..0bd69f7f09
--- /dev/null
+++ b/src/crc32c/README.md
@@ -0,0 +1,125 @@
+# CRC32C
+
+[![Build Status](https://travis-ci.org/google/crc32c.svg?branch=master)](https://travis-ci.org/google/crc32c)
+[![Build Status](https://ci.appveyor.com/api/projects/status/moiq7331pett4xuj/branch/master?svg=true)](https://ci.appveyor.com/project/pwnall/crc32c)
+
+New file format authors should consider
+[HighwayHash](https://github.com/google/highwayhash). The initial version of
+this code was extracted from [LevelDB](https://github.com/google/leveldb), which
+is a stable key-value store that is widely used at Google.
+
+This project collects a few CRC32C implementations under an umbrella that
+dispatches to a suitable implementation based on the host computer's hardware
+capabilities.
+
+CRC32C is specified as the CRC that uses the iSCSI polynomial in
+[RFC 3720](https://tools.ietf.org/html/rfc3720#section-12.1). The polynomial was
+introduced by G. Castagnoli, S. Braeuer and M. Herrmann. CRC32C is used in
+software such as Btrfs, ext4, Ceph and leveldb.
+
+
+## Usage
+
+```cpp
+#include "crc32c/crc32c.h"
+
+int main() {
+ const std::uint8_t buffer[] = {0, 0, 0, 0};
+ std::uint32_t result;
+
+ // Process a raw buffer.
+ result = crc32c::Crc32c(buffer, 4);
+
+ // Process a std::string.
+ std::string string;
+ string.resize(4);
+ result = crc32c::Crc32c(string);
+
+ // If you have C++17 support, process a std::string_view.
+ std::string_view string_view(string);
+ result = crc32c::Crc32c(string_view);
+
+ return 0;
+}
+```
+
+
+## Prerequisites
+
+This project uses [CMake](https://cmake.org/) for building and testing. CMake is
+available in all popular Linux distributions, as well as in
+[Homebrew](https://brew.sh/).
+
+This project uses submodules for dependency management.
+
+```bash
+git submodule update --init --recursive
+```
+
+If you're using [Atom](https://atom.io/), the following packages can help.
+
+```bash
+apm install autocomplete-clang build build-cmake clang-format language-cmake \
+ linter linter-clang
+```
+
+If you don't mind more setup in return for more speed, replace
+`autocomplete-clang` and `linter-clang` with `you-complete-me`. This requires
+[setting up ycmd](https://github.com/Valloric/ycmd#building).
+
+```bash
+apm install autocomplete-plus build build-cmake clang-format language-cmake \
+ linter you-complete-me
+```
+
+## Building
+
+The following commands build and install the project.
+
+```bash
+mkdir build
+cd build
+cmake -DCRC32C_BUILD_TESTS=0 -DCRC32C_BUILD_BENCHMARKS=0 .. && make all install
+```
+
+
+## Development
+
+The following command (when executed from `build/`) (re)builds the project and
+runs the tests.
+
+```bash
+cmake .. && cmake --build . && ctest --output-on-failure
+```
+
+
+### Android testing
+
+The following command builds the project against the Android NDK, which is
+useful for benchmarking against ARM processors.
+
+```bash
+cmake .. -DCMAKE_SYSTEM_NAME=Android -DCMAKE_ANDROID_ARCH_ABI=arm64-v8a \
+ -DCMAKE_ANDROID_NDK=$HOME/Library/Android/sdk/ndk-bundle \
+ -DCMAKE_ANDROID_NDK_TOOLCHAIN_VERSION=clang \
+ -DCMAKE_ANDROID_STL_TYPE=c++_static -DCRC32C_USE_GLOG=0 \
+ -DCMAKE_BUILD_TYPE=Release && cmake --build .
+```
+
+The following commands install and run the benchmarks.
+
+```bash
+adb push crc32c_bench /data/local/tmp
+adb shell chmod +x /data/local/tmp/crc32c_bench
+adb shell 'cd /data/local/tmp && ./crc32c_bench'
+adb shell rm /data/local/tmp/crc32c_bench
+```
+
+The following commands install and run the tests.
+
+```bash
+adb push crc32c_tests /data/local/tmp
+adb shell chmod +x /data/local/tmp/crc32c_tests
+adb shell 'cd /data/local/tmp && ./crc32c_tests'
+adb shell rm /data/local/tmp/crc32c_tests
+```
diff --git a/src/crc32c/include/crc32c/crc32c.h b/src/crc32c/include/crc32c/crc32c.h
new file mode 100644
index 0000000000..e8a78170a9
--- /dev/null
+++ b/src/crc32c/include/crc32c/crc32c.h
@@ -0,0 +1,89 @@
+/* Copyright 2017 The CRC32C Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style license that can be
+ found in the LICENSE file. See the AUTHORS file for names of contributors. */
+
+#ifndef CRC32C_CRC32C_H_
+#define CRC32C_CRC32C_H_
+
+/* The API exported by the CRC32C project. */
+
+#if defined(__cplusplus)
+
+#include <cstddef>
+#include <cstdint>
+#include <string>
+
+#else /* !defined(__cplusplus) */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#endif /* !defined(__cplusplus) */
+
+
+/* The C API. */
+
+#if defined(__cplusplus)
+extern "C" {
+#endif /* defined(__cplusplus) */
+
+/* Extends "crc" with the CRC32C of "count" bytes in the buffer pointed by
+ "data" */
+uint32_t crc32c_extend(uint32_t crc, const uint8_t* data, size_t count);
+
+/* Computes the CRC32C of "count" bytes in the buffer pointed by "data". */
+uint32_t crc32c_value(const uint8_t* data, size_t count);
+
+#ifdef __cplusplus
+} /* end extern "C" */
+#endif /* defined(__cplusplus) */
+
+
+/* The C++ API. */
+
+#if defined(__cplusplus)
+
+namespace crc32c {
+
+// Extends "crc" with the CRC32C of "count" bytes in the buffer pointed by
+// "data".
+uint32_t Extend(uint32_t crc, const uint8_t* data, size_t count);
+
+// Computes the CRC32C of "count" bytes in the buffer pointed by "data".
+inline uint32_t Crc32c(const uint8_t* data, size_t count) {
+ return Extend(0, data, count);
+}
+
+// Computes the CRC32C of "count" bytes in the buffer pointed by "data".
+inline uint32_t Crc32c(const char* data, size_t count) {
+ return Extend(0, reinterpret_cast<const uint8_t*>(data), count);
+}
+
+// Computes the CRC32C of the string's content.
+inline uint32_t Crc32c(const std::string& string) {
+ return Crc32c(reinterpret_cast<const uint8_t*>(string.data()),
+ string.size());
+}
+
+} // namespace crc32c
+
+#if __cplusplus > 201402L
+#if __has_include(<string_view>)
+#include <string_view>
+
+namespace crc32c {
+
+// Computes the CRC32C of the bytes in the string_view.
+inline uint32_t Crc32c(const std::string_view& string_view) {
+ return Crc32c(reinterpret_cast<const uint8_t*>(string_view.data()),
+ string_view.size());
+}
+
+} // namespace crc32c
+
+#endif // __has_include(<string_view>)
+#endif // __cplusplus > 201402L
+
+#endif /* defined(__cplusplus) */
+
+#endif // CRC32C_CRC32C_H_
diff --git a/src/crc32c/src/crc32c.cc b/src/crc32c/src/crc32c.cc
new file mode 100644
index 0000000000..4d3018af47
--- /dev/null
+++ b/src/crc32c/src/crc32c.cc
@@ -0,0 +1,39 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "crc32c/crc32c.h"
+
+#include <cstddef>
+#include <cstdint>
+
+#include "./crc32c_arm64.h"
+#include "./crc32c_arm64_linux_check.h"
+#include "./crc32c_internal.h"
+#include "./crc32c_sse42.h"
+#include "./crc32c_sse42_check.h"
+
+namespace crc32c {
+
+uint32_t Extend(uint32_t crc, const uint8_t* data, size_t count) {
+#if HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
+ static bool can_use_sse42 = CanUseSse42();
+ if (can_use_sse42) return ExtendSse42(crc, data, count);
+#elif HAVE_ARM64_CRC32C
+ static bool can_use_arm_linux = CanUseArm64Linux();
+ if (can_use_arm_linux) return ExtendArm64(crc, data, count);
+#endif // HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
+
+ return ExtendPortable(crc, data, count);
+}
+
+extern "C" uint32_t crc32c_extend(uint32_t crc, const uint8_t* data,
+ size_t count) {
+ return crc32c::Extend(crc, data, count);
+}
+
+extern "C" uint32_t crc32c_value(const uint8_t* data, size_t count) {
+ return crc32c::Crc32c(data, count);
+}
+
+} // namespace crc32c
diff --git a/src/crc32c/src/crc32c_arm64.cc b/src/crc32c/src/crc32c_arm64.cc
new file mode 100644
index 0000000000..b872245f95
--- /dev/null
+++ b/src/crc32c/src/crc32c_arm64.cc
@@ -0,0 +1,126 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "./crc32c_arm64.h"
+
+// In a separate source file to allow this accelerated CRC32C function to be
+// compiled with the appropriate compiler flags to enable ARM NEON CRC32C
+// instructions.
+
+// This implementation is based on https://github.com/google/leveldb/pull/490.
+
+#include <cstddef>
+#include <cstdint>
+
+#include "./crc32c_internal.h"
+#ifdef CRC32C_HAVE_CONFIG_H
+#include "crc32c/crc32c_config.h"
+#endif
+
+#if HAVE_ARM64_CRC32C
+
+#include <arm_acle.h>
+#include <arm_neon.h>
+
+#define KBYTES 1032
+#define SEGMENTBYTES 256
+
+// compute 8bytes for each segment parallelly
+#define CRC32C32BYTES(P, IND) \
+ do { \
+ crc1 = __crc32cd( \
+ crc1, *((const uint64_t *)(P) + (SEGMENTBYTES / 8) * 1 + (IND))); \
+ crc2 = __crc32cd( \
+ crc2, *((const uint64_t *)(P) + (SEGMENTBYTES / 8) * 2 + (IND))); \
+ crc3 = __crc32cd( \
+ crc3, *((const uint64_t *)(P) + (SEGMENTBYTES / 8) * 3 + (IND))); \
+ crc0 = __crc32cd( \
+ crc0, *((const uint64_t *)(P) + (SEGMENTBYTES / 8) * 0 + (IND))); \
+ } while (0);
+
+// compute 8*8 bytes for each segment parallelly
+#define CRC32C256BYTES(P, IND) \
+ do { \
+ CRC32C32BYTES((P), (IND)*8 + 0) \
+ CRC32C32BYTES((P), (IND)*8 + 1) \
+ CRC32C32BYTES((P), (IND)*8 + 2) \
+ CRC32C32BYTES((P), (IND)*8 + 3) \
+ CRC32C32BYTES((P), (IND)*8 + 4) \
+ CRC32C32BYTES((P), (IND)*8 + 5) \
+ CRC32C32BYTES((P), (IND)*8 + 6) \
+ CRC32C32BYTES((P), (IND)*8 + 7) \
+ } while (0);
+
+// compute 4*8*8 bytes for each segment parallelly
+#define CRC32C1024BYTES(P) \
+ do { \
+ CRC32C256BYTES((P), 0) \
+ CRC32C256BYTES((P), 1) \
+ CRC32C256BYTES((P), 2) \
+ CRC32C256BYTES((P), 3) \
+ (P) += 4 * SEGMENTBYTES; \
+ } while (0)
+
+namespace crc32c {
+
+uint32_t ExtendArm64(uint32_t crc, const uint8_t *buf, size_t size) {
+ int64_t length = size;
+ uint32_t crc0, crc1, crc2, crc3;
+ uint64_t t0, t1, t2;
+
+ // k0=CRC(x^(3*SEGMENTBYTES*8)), k1=CRC(x^(2*SEGMENTBYTES*8)),
+ // k2=CRC(x^(SEGMENTBYTES*8))
+ const poly64_t k0 = 0x8d96551c, k1 = 0xbd6f81f8, k2 = 0xdcb17aa4;
+
+ crc = crc ^ kCRC32Xor;
+ const uint8_t *p = reinterpret_cast<const uint8_t *>(buf);
+
+ while (length >= KBYTES) {
+ crc0 = crc;
+ crc1 = 0;
+ crc2 = 0;
+ crc3 = 0;
+
+ // Process 1024 bytes in parallel.
+ CRC32C1024BYTES(p);
+
+ // Merge the 4 partial CRC32C values.
+ t2 = (uint64_t)vmull_p64(crc2, k2);
+ t1 = (uint64_t)vmull_p64(crc1, k1);
+ t0 = (uint64_t)vmull_p64(crc0, k0);
+ crc = __crc32cd(crc3, *(uint64_t *)p);
+ p += sizeof(uint64_t);
+ crc ^= __crc32cd(0, t2);
+ crc ^= __crc32cd(0, t1);
+ crc ^= __crc32cd(0, t0);
+
+ length -= KBYTES;
+ }
+
+ while (length >= 8) {
+ crc = __crc32cd(crc, *(uint64_t *)p);
+ p += 8;
+ length -= 8;
+ }
+
+ if (length & 4) {
+ crc = __crc32cw(crc, *(uint32_t *)p);
+ p += 4;
+ }
+
+ if (length & 2) {
+ crc = __crc32ch(crc, *(uint16_t *)p);
+ p += 2;
+ }
+
+ if (length & 1) {
+ crc = __crc32cb(crc, *p);
+ }
+
+ return crc ^ kCRC32Xor;
+}
+
+} // namespace crc32c
+
+#endif // HAVE_ARM64_CRC32C
diff --git a/src/crc32c/src/crc32c_arm64.h b/src/crc32c/src/crc32c_arm64.h
new file mode 100644
index 0000000000..100cd56ec8
--- /dev/null
+++ b/src/crc32c/src/crc32c_arm64.h
@@ -0,0 +1,27 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+// Linux-specific code checking the availability for ARM CRC32C instructions.
+
+#ifndef CRC32C_CRC32C_ARM_LINUX_H_
+#define CRC32C_CRC32C_ARM_LINUX_H_
+
+#include <cstddef>
+#include <cstdint>
+
+#ifdef CRC32C_HAVE_CONFIG_H
+#include "crc32c/crc32c_config.h"
+#endif
+
+#if HAVE_ARM64_CRC32C
+
+namespace crc32c {
+
+uint32_t ExtendArm64(uint32_t crc, const uint8_t* data, size_t count);
+
+} // namespace crc32c
+
+#endif // HAVE_ARM64_CRC32C
+
+#endif // CRC32C_CRC32C_ARM_LINUX_H_
diff --git a/src/crc32c/src/crc32c_arm64_linux_check.h b/src/crc32c/src/crc32c_arm64_linux_check.h
new file mode 100644
index 0000000000..1a20a757bb
--- /dev/null
+++ b/src/crc32c/src/crc32c_arm64_linux_check.h
@@ -0,0 +1,50 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+// ARM Linux-specific code checking for the availability of CRC32C instructions.
+
+#ifndef CRC32C_CRC32C_ARM_LINUX_CHECK_H_
+#define CRC32C_CRC32C_ARM_LINUX_CHECK_H_
+
+// X86-specific code checking for the availability of SSE4.2 instructions.
+
+#include <cstddef>
+#include <cstdint>
+
+#ifdef CRC32C_HAVE_CONFIG_H
+#include "crc32c/crc32c_config.h"
+#endif
+
+#if HAVE_ARM64_CRC32C
+
+#if HAVE_STRONG_GETAUXVAL
+#include <sys/auxv.h>
+#elif HAVE_WEAK_GETAUXVAL
+// getauxval() is not available on Android until API level 20. Link it as a weak
+// symbol.
+extern "C" unsigned long getauxval(unsigned long type) __attribute__((weak));
+
+#define AT_HWCAP 16
+#endif // HAVE_STRONG_GETAUXVAL || HAVE_WEAK_GETAUXVAL
+
+namespace crc32c {
+
+inline bool CanUseArm64Linux() {
+#if HAVE_STRONG_GETAUXVAL || HAVE_WEAK_GETAUXVAL
+ // From 'arch/arm64/include/uapi/asm/hwcap.h' in Linux kernel source code.
+ constexpr unsigned long kHWCAP_PMULL = 1 << 4;
+ constexpr unsigned long kHWCAP_CRC32 = 1 << 7;
+ unsigned long hwcap = (&getauxval != nullptr) ? getauxval(AT_HWCAP) : 0;
+ return (hwcap & (kHWCAP_PMULL | kHWCAP_CRC32)) ==
+ (kHWCAP_PMULL | kHWCAP_CRC32);
+#else
+ return false;
+#endif // HAVE_STRONG_GETAUXVAL || HAVE_WEAK_GETAUXVAL
+}
+
+} // namespace crc32c
+
+#endif // HAVE_ARM64_CRC32C
+
+#endif // CRC32C_CRC32C_ARM_LINUX_CHECK_H_
diff --git a/src/crc32c/src/crc32c_arm64_unittest.cc b/src/crc32c/src/crc32c_arm64_unittest.cc
new file mode 100644
index 0000000000..6f917d9c0c
--- /dev/null
+++ b/src/crc32c/src/crc32c_arm64_unittest.cc
@@ -0,0 +1,24 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "gtest/gtest.h"
+
+#include "./crc32c_arm64.h"
+#include "./crc32c_extend_unittests.h"
+
+namespace crc32c {
+
+#if HAVE_ARM64_CRC32C
+
+struct Arm64TestTraits {
+ static uint32_t Extend(uint32_t crc, const uint8_t* data, size_t count) {
+ return ExtendArm64(crc, data, count);
+ }
+};
+
+INSTANTIATE_TYPED_TEST_SUITE_P(Arm64, ExtendTest, Arm64TestTraits);
+
+#endif // HAVE_ARM64_CRC32C
+
+} // namespace crc32c
diff --git a/src/crc32c/src/crc32c_benchmark.cc b/src/crc32c/src/crc32c_benchmark.cc
new file mode 100644
index 0000000000..c464304b3f
--- /dev/null
+++ b/src/crc32c/src/crc32c_benchmark.cc
@@ -0,0 +1,106 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include <cstddef>
+#include <cstdint>
+
+#ifdef CRC32C_HAVE_CONFIG_H
+#include "crc32c/crc32c_config.h"
+#endif
+
+#include "benchmark/benchmark.h"
+
+#if CRC32C_TESTS_BUILT_WITH_GLOG
+#include "glog/logging.h"
+#endif // CRC32C_TESTS_BUILT_WITH_GLOG
+
+#include "./crc32c_arm64.h"
+#include "./crc32c_arm64_linux_check.h"
+#include "./crc32c_internal.h"
+#include "./crc32c_sse42.h"
+#include "./crc32c_sse42_check.h"
+#include "crc32c/crc32c.h"
+
+class CRC32CBenchmark : public benchmark::Fixture {
+ public:
+ void SetUp(const benchmark::State& state) override {
+ block_size_ = static_cast<size_t>(state.range(0));
+ block_data_ = std::string(block_size_, 'x');
+ block_buffer_ = reinterpret_cast<const uint8_t*>(block_data_.data());
+ }
+
+ protected:
+ std::string block_data_;
+ const uint8_t* block_buffer_;
+ size_t block_size_;
+};
+
+BENCHMARK_DEFINE_F(CRC32CBenchmark, Public)(benchmark::State& state) {
+ uint32_t crc = 0;
+ for (auto _ : state)
+ crc = crc32c::Extend(crc, block_buffer_, block_size_);
+ state.SetBytesProcessed(state.iterations() * block_size_);
+}
+BENCHMARK_REGISTER_F(CRC32CBenchmark, Public)
+ ->RangeMultiplier(16)
+ ->Range(256, 16777216); // Block size.
+
+BENCHMARK_DEFINE_F(CRC32CBenchmark, Portable)(benchmark::State& state) {
+ uint32_t crc = 0;
+ for (auto _ : state)
+ crc = crc32c::ExtendPortable(crc, block_buffer_, block_size_);
+ state.SetBytesProcessed(state.iterations() * block_size_);
+}
+BENCHMARK_REGISTER_F(CRC32CBenchmark, Portable)
+ ->RangeMultiplier(16)
+ ->Range(256, 16777216); // Block size.
+
+#if HAVE_ARM64_CRC32C
+
+BENCHMARK_DEFINE_F(CRC32CBenchmark, ArmLinux)(benchmark::State& state) {
+ if (!crc32c::CanUseArm64Linux()) {
+ state.SkipWithError("ARM CRC32C instructions not available or not enabled");
+ return;
+ }
+
+ uint32_t crc = 0;
+ for (auto _ : state)
+ crc = crc32c::ExtendArm64(crc, block_buffer_, block_size_);
+ state.SetBytesProcessed(state.iterations() * block_size_);
+}
+BENCHMARK_REGISTER_F(CRC32CBenchmark, ArmLinux)
+ ->RangeMultiplier(16)
+ ->Range(256, 16777216); // Block size.
+
+#endif // HAVE_ARM64_CRC32C
+
+#if HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
+
+BENCHMARK_DEFINE_F(CRC32CBenchmark, Sse42)(benchmark::State& state) {
+ if (!crc32c::CanUseSse42()) {
+ state.SkipWithError("SSE4.2 instructions not available or not enabled");
+ return;
+ }
+
+ uint32_t crc = 0;
+ for (auto _ : state)
+ crc = crc32c::ExtendSse42(crc, block_buffer_, block_size_);
+ state.SetBytesProcessed(state.iterations() * block_size_);
+}
+BENCHMARK_REGISTER_F(CRC32CBenchmark, Sse42)
+ ->RangeMultiplier(16)
+ ->Range(256, 16777216); // Block size.
+
+#endif // HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
+
+int main(int argc, char** argv) {
+#if CRC32C_TESTS_BUILT_WITH_GLOG
+ google::InitGoogleLogging(argv[0]);
+ google::InstallFailureSignalHandler();
+#endif // CRC32C_TESTS_BUILT_WITH_GLOG
+
+ benchmark::Initialize(&argc, argv);
+ benchmark::RunSpecifiedBenchmarks();
+ return 0;
+}
diff --git a/src/crc32c/src/crc32c_capi_unittest.c b/src/crc32c/src/crc32c_capi_unittest.c
new file mode 100644
index 0000000000..c8993a0959
--- /dev/null
+++ b/src/crc32c/src/crc32c_capi_unittest.c
@@ -0,0 +1,66 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "crc32c/crc32c.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+int main() {
+ /* From rfc3720 section B.4. */
+ uint8_t buf[32];
+
+ memset(buf, 0, sizeof(buf));
+ if ((uint32_t)0x8a9136aa != crc32c_value(buf, sizeof(buf))) {
+ printf("crc32c_value(zeros) test failed\n");
+ return 1;
+ }
+
+ memset(buf, 0xff, sizeof(buf));
+ if ((uint32_t)0x62a8ab43 != crc32c_value(buf, sizeof(buf))) {
+ printf("crc32c_value(0xff) test failed\n");
+ return 1;
+ }
+
+ for (size_t i = 0; i < 32; ++i)
+ buf[i] = (uint8_t)i;
+ if ((uint32_t)0x46dd794e != crc32c_value(buf, sizeof(buf))) {
+ printf("crc32c_value(0..31) test failed\n");
+ return 1;
+ }
+
+ for (size_t i = 0; i < 32; ++i)
+ buf[i] = (uint8_t)(31 - i);
+ if ((uint32_t)0x113fdb5c != crc32c_value(buf, sizeof(buf))) {
+ printf("crc32c_value(31..0) test failed\n");
+ return 1;
+ }
+
+ uint8_t data[48] = {
+ 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ };
+ if ((uint32_t)0xd9963a56 != crc32c_value(data, sizeof(data))) {
+ printf("crc32c_value(31..0) test failed\n");
+ return 1;
+ }
+
+ const uint8_t* hello_space_world = (const uint8_t*)"hello world";
+ const uint8_t* hello_space = (const uint8_t*)"hello ";
+ const uint8_t* world = (const uint8_t*)"world";
+
+ if (crc32c_value(hello_space_world, 11) !=
+ crc32c_extend(crc32c_value(hello_space, 6), world, 5)) {
+ printf("crc32c_extend test failed\n");
+ return 1;
+ }
+
+ printf("All tests passed\n");
+ return 0;
+}
diff --git a/src/crc32c/src/crc32c_config.h.in b/src/crc32c/src/crc32c_config.h.in
new file mode 100644
index 0000000000..4034fa5644
--- /dev/null
+++ b/src/crc32c/src/crc32c_config.h.in
@@ -0,0 +1,36 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef CRC32C_CRC32C_CONFIG_H_
+#define CRC32C_CRC32C_CONFIG_H_
+
+// Define to 1 if building for a big-endian platform.
+#cmakedefine01 BYTE_ORDER_BIG_ENDIAN
+
+// Define to 1 if the compiler has the __builtin_prefetch intrinsic.
+#cmakedefine01 HAVE_BUILTIN_PREFETCH
+
+// Define to 1 if targeting X86 and the compiler has the _mm_prefetch intrinsic.
+#cmakedefine01 HAVE_MM_PREFETCH
+
+// Define to 1 if targeting X86 and the compiler has the _mm_crc32_u{8,32,64}
+// intrinsics.
+#cmakedefine01 HAVE_SSE42
+
+// Define to 1 if targeting ARM and the compiler has the __crc32c{b,h,w,d} and
+// the vmull_p64 intrinsics.
+#cmakedefine01 HAVE_ARM64_CRC32C
+
+// Define to 1 if the system libraries have the getauxval function in the
+// <sys/auxv.h> header. Should be true on Linux and Android API level 20+.
+#cmakedefine01 HAVE_STRONG_GETAUXVAL
+
+// Define to 1 if the compiler supports defining getauxval as a weak symbol.
+// Should be true for any compiler that supports __attribute__((weak)).
+#cmakedefine01 HAVE_WEAK_GETAUXVAL
+
+// Define to 1 if CRC32C tests have been built with Google Logging.
+#cmakedefine01 CRC32C_TESTS_BUILT_WITH_GLOG
+
+#endif // CRC32C_CRC32C_CONFIG_H_
diff --git a/src/crc32c/src/crc32c_extend_unittests.h b/src/crc32c/src/crc32c_extend_unittests.h
new file mode 100644
index 0000000000..0732973737
--- /dev/null
+++ b/src/crc32c/src/crc32c_extend_unittests.h
@@ -0,0 +1,112 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef CRC32C_CRC32C_EXTEND_UNITTESTS_H_
+#define CRC32C_CRC32C_EXTEND_UNITTESTS_H_
+
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+
+#include "gtest/gtest.h"
+
+// Common test cases for all implementations of CRC32C_Extend().
+
+namespace crc32c {
+
+template<typename TestTraits>
+class ExtendTest : public testing::Test {};
+
+TYPED_TEST_SUITE_P(ExtendTest);
+
+TYPED_TEST_P(ExtendTest, StandardResults) {
+ // From rfc3720 section B.4.
+ uint8_t buf[32];
+
+ std::memset(buf, 0, sizeof(buf));
+ EXPECT_EQ(static_cast<uint32_t>(0x8a9136aa),
+ TypeParam::Extend(0, buf, sizeof(buf)));
+
+ std::memset(buf, 0xff, sizeof(buf));
+ EXPECT_EQ(static_cast<uint32_t>(0x62a8ab43),
+ TypeParam::Extend(0, buf, sizeof(buf)));
+
+ for (int i = 0; i < 32; ++i)
+ buf[i] = static_cast<uint8_t>(i);
+ EXPECT_EQ(static_cast<uint32_t>(0x46dd794e),
+ TypeParam::Extend(0, buf, sizeof(buf)));
+
+ for (int i = 0; i < 32; ++i)
+ buf[i] = static_cast<uint8_t>(31 - i);
+ EXPECT_EQ(static_cast<uint32_t>(0x113fdb5c),
+ TypeParam::Extend(0, buf, sizeof(buf)));
+
+ uint8_t data[48] = {
+ 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ };
+ EXPECT_EQ(static_cast<uint32_t>(0xd9963a56),
+ TypeParam::Extend(0, data, sizeof(data)));
+}
+
+TYPED_TEST_P(ExtendTest, HelloWorld) {
+ const uint8_t* hello_space_world =
+ reinterpret_cast<const uint8_t*>("hello world");
+ const uint8_t* hello_space = reinterpret_cast<const uint8_t*>("hello ");
+ const uint8_t* world = reinterpret_cast<const uint8_t*>("world");
+
+ EXPECT_EQ(TypeParam::Extend(0, hello_space_world, 11),
+ TypeParam::Extend(TypeParam::Extend(0, hello_space, 6), world, 5));
+}
+
+TYPED_TEST_P(ExtendTest, BufferSlicing) {
+ uint8_t buffer[48] = {
+ 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ };
+
+ for (size_t i = 0; i < 48; ++i) {
+ for (size_t j = i + 1; j <= 48; ++j) {
+ uint32_t crc = 0;
+
+ if (i > 0) crc = TypeParam::Extend(crc, buffer, i);
+ crc = TypeParam::Extend(crc, buffer + i, j - i);
+ if (j < 48) crc = TypeParam::Extend(crc, buffer + j, 48 - j);
+
+ EXPECT_EQ(static_cast<uint32_t>(0xd9963a56), crc);
+ }
+ }
+}
+
+TYPED_TEST_P(ExtendTest, LargeBufferSlicing) {
+ uint8_t buffer[2048];
+ for (size_t i = 0; i < 2048; i++)
+ buffer[i] = static_cast<uint8_t>(3 * i * i + 7 * i + 11);
+
+ for (size_t i = 0; i < 2048; ++i) {
+ for (size_t j = i + 1; j <= 2048; ++j) {
+ uint32_t crc = 0;
+
+ if (i > 0) crc = TypeParam::Extend(crc, buffer, i);
+ crc = TypeParam::Extend(crc, buffer + i, j - i);
+ if (j < 2048) crc = TypeParam::Extend(crc, buffer + j, 2048 - j);
+
+ EXPECT_EQ(static_cast<uint32_t>(0x36dcc753), crc);
+ }
+ }
+}
+
+REGISTER_TYPED_TEST_SUITE_P(ExtendTest,
+ StandardResults,
+ HelloWorld,
+ BufferSlicing,
+ LargeBufferSlicing);
+
+} // namespace crc32c
+
+#endif // CRC32C_CRC32C_EXTEND_UNITTESTS_H_
diff --git a/src/crc32c/src/crc32c_internal.h b/src/crc32c/src/crc32c_internal.h
new file mode 100644
index 0000000000..2bd23dea43
--- /dev/null
+++ b/src/crc32c/src/crc32c_internal.h
@@ -0,0 +1,23 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef CRC32C_CRC32C_INTERNAL_H_
+#define CRC32C_CRC32C_INTERNAL_H_
+
+// Internal functions that may change between releases.
+
+#include <cstddef>
+#include <cstdint>
+
+namespace crc32c {
+
+// Un-accelerated implementation that works on all CPUs.
+uint32_t ExtendPortable(uint32_t crc, const uint8_t* data, size_t count);
+
+// CRCs are pre- and post- conditioned by xoring with all ones.
+static constexpr const uint32_t kCRC32Xor = static_cast<uint32_t>(0xffffffffU);
+
+} // namespace crc32c
+
+#endif // CRC32C_CRC32C_INTERNAL_H_
diff --git a/src/crc32c/src/crc32c_portable.cc b/src/crc32c/src/crc32c_portable.cc
new file mode 100644
index 0000000000..31ec6eac53
--- /dev/null
+++ b/src/crc32c/src/crc32c_portable.cc
@@ -0,0 +1,351 @@
+// Copyright 2008 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "./crc32c_internal.h"
+
+#include <cstddef>
+#include <cstdint>
+
+#include "./crc32c_prefetch.h"
+#include "./crc32c_read_le.h"
+#include "./crc32c_round_up.h"
+
+namespace {
+
+const uint32_t kByteExtensionTable[256] = {
+ 0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, 0xc79a971f, 0x35f1141c,
+ 0x26a1e7e8, 0xd4ca64eb, 0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b,
+ 0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24, 0x105ec76f, 0xe235446c,
+ 0xf165b798, 0x030e349b, 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384,
+ 0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54, 0x5d1d08bf, 0xaf768bbc,
+ 0xbc267848, 0x4e4dfb4b, 0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a,
+ 0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35, 0xaa64d611, 0x580f5512,
+ 0x4b5fa6e6, 0xb93425e5, 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa,
+ 0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45, 0xf779deae, 0x05125dad,
+ 0x1642ae59, 0xe4292d5a, 0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a,
+ 0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595, 0x417b1dbc, 0xb3109ebf,
+ 0xa0406d4b, 0x522bee48, 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957,
+ 0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687, 0x0c38d26c, 0xfe53516f,
+ 0xed03a29b, 0x1f682198, 0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927,
+ 0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38, 0xdbfc821c, 0x2997011f,
+ 0x3ac7f2eb, 0xc8ac71e8, 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7,
+ 0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096, 0xa65c047d, 0x5437877e,
+ 0x4767748a, 0xb50cf789, 0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859,
+ 0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46, 0x7198540d, 0x83f3d70e,
+ 0x90a324fa, 0x62c8a7f9, 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6,
+ 0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36, 0x3cdb9bdd, 0xceb018de,
+ 0xdde0eb2a, 0x2f8b6829, 0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c,
+ 0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93, 0x082f63b7, 0xfa44e0b4,
+ 0xe9141340, 0x1b7f9043, 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c,
+ 0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3, 0x55326b08, 0xa759e80b,
+ 0xb4091bff, 0x466298fc, 0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c,
+ 0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033, 0xa24bb5a6, 0x502036a5,
+ 0x4370c551, 0xb11b4652, 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d,
+ 0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d, 0xef087a76, 0x1d63f975,
+ 0x0e330a81, 0xfc588982, 0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d,
+ 0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622, 0x38cc2a06, 0xcaa7a905,
+ 0xd9f75af1, 0x2b9cd9f2, 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed,
+ 0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530, 0x0417b1db, 0xf67c32d8,
+ 0xe52cc12c, 0x1747422f, 0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff,
+ 0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0, 0xd3d3e1ab, 0x21b862a8,
+ 0x32e8915c, 0xc083125f, 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540,
+ 0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90, 0x9e902e7b, 0x6cfbad78,
+ 0x7fab5e8c, 0x8dc0dd8f, 0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee,
+ 0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1, 0x69e9f0d5, 0x9b8273d6,
+ 0x88d28022, 0x7ab90321, 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e,
+ 0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81, 0x34f4f86a, 0xc69f7b69,
+ 0xd5cf889d, 0x27a40b9e, 0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e,
+ 0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351};
+
+const uint32_t kStrideExtensionTable0[256] = {
+ 0x00000000, 0x30d23865, 0x61a470ca, 0x517648af, 0xc348e194, 0xf39ad9f1,
+ 0xa2ec915e, 0x923ea93b, 0x837db5d9, 0xb3af8dbc, 0xe2d9c513, 0xd20bfd76,
+ 0x4035544d, 0x70e76c28, 0x21912487, 0x11431ce2, 0x03171d43, 0x33c52526,
+ 0x62b36d89, 0x526155ec, 0xc05ffcd7, 0xf08dc4b2, 0xa1fb8c1d, 0x9129b478,
+ 0x806aa89a, 0xb0b890ff, 0xe1ced850, 0xd11ce035, 0x4322490e, 0x73f0716b,
+ 0x228639c4, 0x125401a1, 0x062e3a86, 0x36fc02e3, 0x678a4a4c, 0x57587229,
+ 0xc566db12, 0xf5b4e377, 0xa4c2abd8, 0x941093bd, 0x85538f5f, 0xb581b73a,
+ 0xe4f7ff95, 0xd425c7f0, 0x461b6ecb, 0x76c956ae, 0x27bf1e01, 0x176d2664,
+ 0x053927c5, 0x35eb1fa0, 0x649d570f, 0x544f6f6a, 0xc671c651, 0xf6a3fe34,
+ 0xa7d5b69b, 0x97078efe, 0x8644921c, 0xb696aa79, 0xe7e0e2d6, 0xd732dab3,
+ 0x450c7388, 0x75de4bed, 0x24a80342, 0x147a3b27, 0x0c5c750c, 0x3c8e4d69,
+ 0x6df805c6, 0x5d2a3da3, 0xcf149498, 0xffc6acfd, 0xaeb0e452, 0x9e62dc37,
+ 0x8f21c0d5, 0xbff3f8b0, 0xee85b01f, 0xde57887a, 0x4c692141, 0x7cbb1924,
+ 0x2dcd518b, 0x1d1f69ee, 0x0f4b684f, 0x3f99502a, 0x6eef1885, 0x5e3d20e0,
+ 0xcc0389db, 0xfcd1b1be, 0xada7f911, 0x9d75c174, 0x8c36dd96, 0xbce4e5f3,
+ 0xed92ad5c, 0xdd409539, 0x4f7e3c02, 0x7fac0467, 0x2eda4cc8, 0x1e0874ad,
+ 0x0a724f8a, 0x3aa077ef, 0x6bd63f40, 0x5b040725, 0xc93aae1e, 0xf9e8967b,
+ 0xa89eded4, 0x984ce6b1, 0x890ffa53, 0xb9ddc236, 0xe8ab8a99, 0xd879b2fc,
+ 0x4a471bc7, 0x7a9523a2, 0x2be36b0d, 0x1b315368, 0x096552c9, 0x39b76aac,
+ 0x68c12203, 0x58131a66, 0xca2db35d, 0xfaff8b38, 0xab89c397, 0x9b5bfbf2,
+ 0x8a18e710, 0xbacadf75, 0xebbc97da, 0xdb6eafbf, 0x49500684, 0x79823ee1,
+ 0x28f4764e, 0x18264e2b, 0x18b8ea18, 0x286ad27d, 0x791c9ad2, 0x49cea2b7,
+ 0xdbf00b8c, 0xeb2233e9, 0xba547b46, 0x8a864323, 0x9bc55fc1, 0xab1767a4,
+ 0xfa612f0b, 0xcab3176e, 0x588dbe55, 0x685f8630, 0x3929ce9f, 0x09fbf6fa,
+ 0x1baff75b, 0x2b7dcf3e, 0x7a0b8791, 0x4ad9bff4, 0xd8e716cf, 0xe8352eaa,
+ 0xb9436605, 0x89915e60, 0x98d24282, 0xa8007ae7, 0xf9763248, 0xc9a40a2d,
+ 0x5b9aa316, 0x6b489b73, 0x3a3ed3dc, 0x0aecebb9, 0x1e96d09e, 0x2e44e8fb,
+ 0x7f32a054, 0x4fe09831, 0xddde310a, 0xed0c096f, 0xbc7a41c0, 0x8ca879a5,
+ 0x9deb6547, 0xad395d22, 0xfc4f158d, 0xcc9d2de8, 0x5ea384d3, 0x6e71bcb6,
+ 0x3f07f419, 0x0fd5cc7c, 0x1d81cddd, 0x2d53f5b8, 0x7c25bd17, 0x4cf78572,
+ 0xdec92c49, 0xee1b142c, 0xbf6d5c83, 0x8fbf64e6, 0x9efc7804, 0xae2e4061,
+ 0xff5808ce, 0xcf8a30ab, 0x5db49990, 0x6d66a1f5, 0x3c10e95a, 0x0cc2d13f,
+ 0x14e49f14, 0x2436a771, 0x7540efde, 0x4592d7bb, 0xd7ac7e80, 0xe77e46e5,
+ 0xb6080e4a, 0x86da362f, 0x97992acd, 0xa74b12a8, 0xf63d5a07, 0xc6ef6262,
+ 0x54d1cb59, 0x6403f33c, 0x3575bb93, 0x05a783f6, 0x17f38257, 0x2721ba32,
+ 0x7657f29d, 0x4685caf8, 0xd4bb63c3, 0xe4695ba6, 0xb51f1309, 0x85cd2b6c,
+ 0x948e378e, 0xa45c0feb, 0xf52a4744, 0xc5f87f21, 0x57c6d61a, 0x6714ee7f,
+ 0x3662a6d0, 0x06b09eb5, 0x12caa592, 0x22189df7, 0x736ed558, 0x43bced3d,
+ 0xd1824406, 0xe1507c63, 0xb02634cc, 0x80f40ca9, 0x91b7104b, 0xa165282e,
+ 0xf0136081, 0xc0c158e4, 0x52fff1df, 0x622dc9ba, 0x335b8115, 0x0389b970,
+ 0x11ddb8d1, 0x210f80b4, 0x7079c81b, 0x40abf07e, 0xd2955945, 0xe2476120,
+ 0xb331298f, 0x83e311ea, 0x92a00d08, 0xa272356d, 0xf3047dc2, 0xc3d645a7,
+ 0x51e8ec9c, 0x613ad4f9, 0x304c9c56, 0x009ea433};
+
+const uint32_t kStrideExtensionTable1[256] = {
+ 0x00000000, 0x54075546, 0xa80eaa8c, 0xfc09ffca, 0x55f123e9, 0x01f676af,
+ 0xfdff8965, 0xa9f8dc23, 0xabe247d2, 0xffe51294, 0x03eced5e, 0x57ebb818,
+ 0xfe13643b, 0xaa14317d, 0x561dceb7, 0x021a9bf1, 0x5228f955, 0x062fac13,
+ 0xfa2653d9, 0xae21069f, 0x07d9dabc, 0x53de8ffa, 0xafd77030, 0xfbd02576,
+ 0xf9cabe87, 0xadcdebc1, 0x51c4140b, 0x05c3414d, 0xac3b9d6e, 0xf83cc828,
+ 0x043537e2, 0x503262a4, 0xa451f2aa, 0xf056a7ec, 0x0c5f5826, 0x58580d60,
+ 0xf1a0d143, 0xa5a78405, 0x59ae7bcf, 0x0da92e89, 0x0fb3b578, 0x5bb4e03e,
+ 0xa7bd1ff4, 0xf3ba4ab2, 0x5a429691, 0x0e45c3d7, 0xf24c3c1d, 0xa64b695b,
+ 0xf6790bff, 0xa27e5eb9, 0x5e77a173, 0x0a70f435, 0xa3882816, 0xf78f7d50,
+ 0x0b86829a, 0x5f81d7dc, 0x5d9b4c2d, 0x099c196b, 0xf595e6a1, 0xa192b3e7,
+ 0x086a6fc4, 0x5c6d3a82, 0xa064c548, 0xf463900e, 0x4d4f93a5, 0x1948c6e3,
+ 0xe5413929, 0xb1466c6f, 0x18beb04c, 0x4cb9e50a, 0xb0b01ac0, 0xe4b74f86,
+ 0xe6add477, 0xb2aa8131, 0x4ea37efb, 0x1aa42bbd, 0xb35cf79e, 0xe75ba2d8,
+ 0x1b525d12, 0x4f550854, 0x1f676af0, 0x4b603fb6, 0xb769c07c, 0xe36e953a,
+ 0x4a964919, 0x1e911c5f, 0xe298e395, 0xb69fb6d3, 0xb4852d22, 0xe0827864,
+ 0x1c8b87ae, 0x488cd2e8, 0xe1740ecb, 0xb5735b8d, 0x497aa447, 0x1d7df101,
+ 0xe91e610f, 0xbd193449, 0x4110cb83, 0x15179ec5, 0xbcef42e6, 0xe8e817a0,
+ 0x14e1e86a, 0x40e6bd2c, 0x42fc26dd, 0x16fb739b, 0xeaf28c51, 0xbef5d917,
+ 0x170d0534, 0x430a5072, 0xbf03afb8, 0xeb04fafe, 0xbb36985a, 0xef31cd1c,
+ 0x133832d6, 0x473f6790, 0xeec7bbb3, 0xbac0eef5, 0x46c9113f, 0x12ce4479,
+ 0x10d4df88, 0x44d38ace, 0xb8da7504, 0xecdd2042, 0x4525fc61, 0x1122a927,
+ 0xed2b56ed, 0xb92c03ab, 0x9a9f274a, 0xce98720c, 0x32918dc6, 0x6696d880,
+ 0xcf6e04a3, 0x9b6951e5, 0x6760ae2f, 0x3367fb69, 0x317d6098, 0x657a35de,
+ 0x9973ca14, 0xcd749f52, 0x648c4371, 0x308b1637, 0xcc82e9fd, 0x9885bcbb,
+ 0xc8b7de1f, 0x9cb08b59, 0x60b97493, 0x34be21d5, 0x9d46fdf6, 0xc941a8b0,
+ 0x3548577a, 0x614f023c, 0x635599cd, 0x3752cc8b, 0xcb5b3341, 0x9f5c6607,
+ 0x36a4ba24, 0x62a3ef62, 0x9eaa10a8, 0xcaad45ee, 0x3eced5e0, 0x6ac980a6,
+ 0x96c07f6c, 0xc2c72a2a, 0x6b3ff609, 0x3f38a34f, 0xc3315c85, 0x973609c3,
+ 0x952c9232, 0xc12bc774, 0x3d2238be, 0x69256df8, 0xc0ddb1db, 0x94dae49d,
+ 0x68d31b57, 0x3cd44e11, 0x6ce62cb5, 0x38e179f3, 0xc4e88639, 0x90efd37f,
+ 0x39170f5c, 0x6d105a1a, 0x9119a5d0, 0xc51ef096, 0xc7046b67, 0x93033e21,
+ 0x6f0ac1eb, 0x3b0d94ad, 0x92f5488e, 0xc6f21dc8, 0x3afbe202, 0x6efcb744,
+ 0xd7d0b4ef, 0x83d7e1a9, 0x7fde1e63, 0x2bd94b25, 0x82219706, 0xd626c240,
+ 0x2a2f3d8a, 0x7e2868cc, 0x7c32f33d, 0x2835a67b, 0xd43c59b1, 0x803b0cf7,
+ 0x29c3d0d4, 0x7dc48592, 0x81cd7a58, 0xd5ca2f1e, 0x85f84dba, 0xd1ff18fc,
+ 0x2df6e736, 0x79f1b270, 0xd0096e53, 0x840e3b15, 0x7807c4df, 0x2c009199,
+ 0x2e1a0a68, 0x7a1d5f2e, 0x8614a0e4, 0xd213f5a2, 0x7beb2981, 0x2fec7cc7,
+ 0xd3e5830d, 0x87e2d64b, 0x73814645, 0x27861303, 0xdb8fecc9, 0x8f88b98f,
+ 0x267065ac, 0x727730ea, 0x8e7ecf20, 0xda799a66, 0xd8630197, 0x8c6454d1,
+ 0x706dab1b, 0x246afe5d, 0x8d92227e, 0xd9957738, 0x259c88f2, 0x719bddb4,
+ 0x21a9bf10, 0x75aeea56, 0x89a7159c, 0xdda040da, 0x74589cf9, 0x205fc9bf,
+ 0xdc563675, 0x88516333, 0x8a4bf8c2, 0xde4cad84, 0x2245524e, 0x76420708,
+ 0xdfbadb2b, 0x8bbd8e6d, 0x77b471a7, 0x23b324e1};
+
+const uint32_t kStrideExtensionTable2[256] = {
+ 0x00000000, 0x678efd01, 0xcf1dfa02, 0xa8930703, 0x9bd782f5, 0xfc597ff4,
+ 0x54ca78f7, 0x334485f6, 0x3243731b, 0x55cd8e1a, 0xfd5e8919, 0x9ad07418,
+ 0xa994f1ee, 0xce1a0cef, 0x66890bec, 0x0107f6ed, 0x6486e636, 0x03081b37,
+ 0xab9b1c34, 0xcc15e135, 0xff5164c3, 0x98df99c2, 0x304c9ec1, 0x57c263c0,
+ 0x56c5952d, 0x314b682c, 0x99d86f2f, 0xfe56922e, 0xcd1217d8, 0xaa9cead9,
+ 0x020fedda, 0x658110db, 0xc90dcc6c, 0xae83316d, 0x0610366e, 0x619ecb6f,
+ 0x52da4e99, 0x3554b398, 0x9dc7b49b, 0xfa49499a, 0xfb4ebf77, 0x9cc04276,
+ 0x34534575, 0x53ddb874, 0x60993d82, 0x0717c083, 0xaf84c780, 0xc80a3a81,
+ 0xad8b2a5a, 0xca05d75b, 0x6296d058, 0x05182d59, 0x365ca8af, 0x51d255ae,
+ 0xf94152ad, 0x9ecfafac, 0x9fc85941, 0xf846a440, 0x50d5a343, 0x375b5e42,
+ 0x041fdbb4, 0x639126b5, 0xcb0221b6, 0xac8cdcb7, 0x97f7ee29, 0xf0791328,
+ 0x58ea142b, 0x3f64e92a, 0x0c206cdc, 0x6bae91dd, 0xc33d96de, 0xa4b36bdf,
+ 0xa5b49d32, 0xc23a6033, 0x6aa96730, 0x0d279a31, 0x3e631fc7, 0x59ede2c6,
+ 0xf17ee5c5, 0x96f018c4, 0xf371081f, 0x94fff51e, 0x3c6cf21d, 0x5be20f1c,
+ 0x68a68aea, 0x0f2877eb, 0xa7bb70e8, 0xc0358de9, 0xc1327b04, 0xa6bc8605,
+ 0x0e2f8106, 0x69a17c07, 0x5ae5f9f1, 0x3d6b04f0, 0x95f803f3, 0xf276fef2,
+ 0x5efa2245, 0x3974df44, 0x91e7d847, 0xf6692546, 0xc52da0b0, 0xa2a35db1,
+ 0x0a305ab2, 0x6dbea7b3, 0x6cb9515e, 0x0b37ac5f, 0xa3a4ab5c, 0xc42a565d,
+ 0xf76ed3ab, 0x90e02eaa, 0x387329a9, 0x5ffdd4a8, 0x3a7cc473, 0x5df23972,
+ 0xf5613e71, 0x92efc370, 0xa1ab4686, 0xc625bb87, 0x6eb6bc84, 0x09384185,
+ 0x083fb768, 0x6fb14a69, 0xc7224d6a, 0xa0acb06b, 0x93e8359d, 0xf466c89c,
+ 0x5cf5cf9f, 0x3b7b329e, 0x2a03aaa3, 0x4d8d57a2, 0xe51e50a1, 0x8290ada0,
+ 0xb1d42856, 0xd65ad557, 0x7ec9d254, 0x19472f55, 0x1840d9b8, 0x7fce24b9,
+ 0xd75d23ba, 0xb0d3debb, 0x83975b4d, 0xe419a64c, 0x4c8aa14f, 0x2b045c4e,
+ 0x4e854c95, 0x290bb194, 0x8198b697, 0xe6164b96, 0xd552ce60, 0xb2dc3361,
+ 0x1a4f3462, 0x7dc1c963, 0x7cc63f8e, 0x1b48c28f, 0xb3dbc58c, 0xd455388d,
+ 0xe711bd7b, 0x809f407a, 0x280c4779, 0x4f82ba78, 0xe30e66cf, 0x84809bce,
+ 0x2c139ccd, 0x4b9d61cc, 0x78d9e43a, 0x1f57193b, 0xb7c41e38, 0xd04ae339,
+ 0xd14d15d4, 0xb6c3e8d5, 0x1e50efd6, 0x79de12d7, 0x4a9a9721, 0x2d146a20,
+ 0x85876d23, 0xe2099022, 0x878880f9, 0xe0067df8, 0x48957afb, 0x2f1b87fa,
+ 0x1c5f020c, 0x7bd1ff0d, 0xd342f80e, 0xb4cc050f, 0xb5cbf3e2, 0xd2450ee3,
+ 0x7ad609e0, 0x1d58f4e1, 0x2e1c7117, 0x49928c16, 0xe1018b15, 0x868f7614,
+ 0xbdf4448a, 0xda7ab98b, 0x72e9be88, 0x15674389, 0x2623c67f, 0x41ad3b7e,
+ 0xe93e3c7d, 0x8eb0c17c, 0x8fb73791, 0xe839ca90, 0x40aacd93, 0x27243092,
+ 0x1460b564, 0x73ee4865, 0xdb7d4f66, 0xbcf3b267, 0xd972a2bc, 0xbefc5fbd,
+ 0x166f58be, 0x71e1a5bf, 0x42a52049, 0x252bdd48, 0x8db8da4b, 0xea36274a,
+ 0xeb31d1a7, 0x8cbf2ca6, 0x242c2ba5, 0x43a2d6a4, 0x70e65352, 0x1768ae53,
+ 0xbffba950, 0xd8755451, 0x74f988e6, 0x137775e7, 0xbbe472e4, 0xdc6a8fe5,
+ 0xef2e0a13, 0x88a0f712, 0x2033f011, 0x47bd0d10, 0x46bafbfd, 0x213406fc,
+ 0x89a701ff, 0xee29fcfe, 0xdd6d7908, 0xbae38409, 0x1270830a, 0x75fe7e0b,
+ 0x107f6ed0, 0x77f193d1, 0xdf6294d2, 0xb8ec69d3, 0x8ba8ec25, 0xec261124,
+ 0x44b51627, 0x233beb26, 0x223c1dcb, 0x45b2e0ca, 0xed21e7c9, 0x8aaf1ac8,
+ 0xb9eb9f3e, 0xde65623f, 0x76f6653c, 0x1178983d};
+
+const uint32_t kStrideExtensionTable3[256] = {
+ 0x00000000, 0xf20c0dfe, 0xe1f46d0d, 0x13f860f3, 0xc604aceb, 0x3408a115,
+ 0x27f0c1e6, 0xd5fccc18, 0x89e52f27, 0x7be922d9, 0x6811422a, 0x9a1d4fd4,
+ 0x4fe183cc, 0xbded8e32, 0xae15eec1, 0x5c19e33f, 0x162628bf, 0xe42a2541,
+ 0xf7d245b2, 0x05de484c, 0xd0228454, 0x222e89aa, 0x31d6e959, 0xc3dae4a7,
+ 0x9fc30798, 0x6dcf0a66, 0x7e376a95, 0x8c3b676b, 0x59c7ab73, 0xabcba68d,
+ 0xb833c67e, 0x4a3fcb80, 0x2c4c517e, 0xde405c80, 0xcdb83c73, 0x3fb4318d,
+ 0xea48fd95, 0x1844f06b, 0x0bbc9098, 0xf9b09d66, 0xa5a97e59, 0x57a573a7,
+ 0x445d1354, 0xb6511eaa, 0x63add2b2, 0x91a1df4c, 0x8259bfbf, 0x7055b241,
+ 0x3a6a79c1, 0xc866743f, 0xdb9e14cc, 0x29921932, 0xfc6ed52a, 0x0e62d8d4,
+ 0x1d9ab827, 0xef96b5d9, 0xb38f56e6, 0x41835b18, 0x527b3beb, 0xa0773615,
+ 0x758bfa0d, 0x8787f7f3, 0x947f9700, 0x66739afe, 0x5898a2fc, 0xaa94af02,
+ 0xb96ccff1, 0x4b60c20f, 0x9e9c0e17, 0x6c9003e9, 0x7f68631a, 0x8d646ee4,
+ 0xd17d8ddb, 0x23718025, 0x3089e0d6, 0xc285ed28, 0x17792130, 0xe5752cce,
+ 0xf68d4c3d, 0x048141c3, 0x4ebe8a43, 0xbcb287bd, 0xaf4ae74e, 0x5d46eab0,
+ 0x88ba26a8, 0x7ab62b56, 0x694e4ba5, 0x9b42465b, 0xc75ba564, 0x3557a89a,
+ 0x26afc869, 0xd4a3c597, 0x015f098f, 0xf3530471, 0xe0ab6482, 0x12a7697c,
+ 0x74d4f382, 0x86d8fe7c, 0x95209e8f, 0x672c9371, 0xb2d05f69, 0x40dc5297,
+ 0x53243264, 0xa1283f9a, 0xfd31dca5, 0x0f3dd15b, 0x1cc5b1a8, 0xeec9bc56,
+ 0x3b35704e, 0xc9397db0, 0xdac11d43, 0x28cd10bd, 0x62f2db3d, 0x90fed6c3,
+ 0x8306b630, 0x710abbce, 0xa4f677d6, 0x56fa7a28, 0x45021adb, 0xb70e1725,
+ 0xeb17f41a, 0x191bf9e4, 0x0ae39917, 0xf8ef94e9, 0x2d1358f1, 0xdf1f550f,
+ 0xcce735fc, 0x3eeb3802, 0xb13145f8, 0x433d4806, 0x50c528f5, 0xa2c9250b,
+ 0x7735e913, 0x8539e4ed, 0x96c1841e, 0x64cd89e0, 0x38d46adf, 0xcad86721,
+ 0xd92007d2, 0x2b2c0a2c, 0xfed0c634, 0x0cdccbca, 0x1f24ab39, 0xed28a6c7,
+ 0xa7176d47, 0x551b60b9, 0x46e3004a, 0xb4ef0db4, 0x6113c1ac, 0x931fcc52,
+ 0x80e7aca1, 0x72eba15f, 0x2ef24260, 0xdcfe4f9e, 0xcf062f6d, 0x3d0a2293,
+ 0xe8f6ee8b, 0x1afae375, 0x09028386, 0xfb0e8e78, 0x9d7d1486, 0x6f711978,
+ 0x7c89798b, 0x8e857475, 0x5b79b86d, 0xa975b593, 0xba8dd560, 0x4881d89e,
+ 0x14983ba1, 0xe694365f, 0xf56c56ac, 0x07605b52, 0xd29c974a, 0x20909ab4,
+ 0x3368fa47, 0xc164f7b9, 0x8b5b3c39, 0x795731c7, 0x6aaf5134, 0x98a35cca,
+ 0x4d5f90d2, 0xbf539d2c, 0xacabfddf, 0x5ea7f021, 0x02be131e, 0xf0b21ee0,
+ 0xe34a7e13, 0x114673ed, 0xc4babff5, 0x36b6b20b, 0x254ed2f8, 0xd742df06,
+ 0xe9a9e704, 0x1ba5eafa, 0x085d8a09, 0xfa5187f7, 0x2fad4bef, 0xdda14611,
+ 0xce5926e2, 0x3c552b1c, 0x604cc823, 0x9240c5dd, 0x81b8a52e, 0x73b4a8d0,
+ 0xa64864c8, 0x54446936, 0x47bc09c5, 0xb5b0043b, 0xff8fcfbb, 0x0d83c245,
+ 0x1e7ba2b6, 0xec77af48, 0x398b6350, 0xcb876eae, 0xd87f0e5d, 0x2a7303a3,
+ 0x766ae09c, 0x8466ed62, 0x979e8d91, 0x6592806f, 0xb06e4c77, 0x42624189,
+ 0x519a217a, 0xa3962c84, 0xc5e5b67a, 0x37e9bb84, 0x2411db77, 0xd61dd689,
+ 0x03e11a91, 0xf1ed176f, 0xe215779c, 0x10197a62, 0x4c00995d, 0xbe0c94a3,
+ 0xadf4f450, 0x5ff8f9ae, 0x8a0435b6, 0x78083848, 0x6bf058bb, 0x99fc5545,
+ 0xd3c39ec5, 0x21cf933b, 0x3237f3c8, 0xc03bfe36, 0x15c7322e, 0xe7cb3fd0,
+ 0xf4335f23, 0x063f52dd, 0x5a26b1e2, 0xa82abc1c, 0xbbd2dcef, 0x49ded111,
+ 0x9c221d09, 0x6e2e10f7, 0x7dd67004, 0x8fda7dfa};
+
+constexpr const ptrdiff_t kPrefetchHorizon = 256;
+
+} // namespace
+
+namespace crc32c {
+
+uint32_t ExtendPortable(uint32_t crc, const uint8_t* data, size_t size) {
+ const uint8_t* p = data;
+ const uint8_t* e = p + size;
+ uint32_t l = crc ^ kCRC32Xor;
+
+// Process one byte at a time.
+#define STEP1 \
+ do { \
+ int c = (l & 0xff) ^ *p++; \
+ l = kByteExtensionTable[c] ^ (l >> 8); \
+ } while (0)
+
+// Process one of the 4 strides of 4-byte data.
+#define STEP4(s) \
+ do { \
+ crc##s = ReadUint32LE(p + s * 4) ^ kStrideExtensionTable3[crc##s & 0xff] ^ \
+ kStrideExtensionTable2[(crc##s >> 8) & 0xff] ^ \
+ kStrideExtensionTable1[(crc##s >> 16) & 0xff] ^ \
+ kStrideExtensionTable0[crc##s >> 24]; \
+ } while (0)
+
+// Process a 16-byte swath of 4 strides, each of which has 4 bytes of data.
+#define STEP16 \
+ do { \
+ STEP4(0); \
+ STEP4(1); \
+ STEP4(2); \
+ STEP4(3); \
+ p += 16; \
+ } while (0)
+
+// Process 4 bytes that were already loaded into a word.
+#define STEP4W(w) \
+ do { \
+ w ^= l; \
+ for (size_t i = 0; i < 4; ++i) { \
+ w = (w >> 8) ^ kByteExtensionTable[w & 0xff]; \
+ } \
+ l = w; \
+ } while (0)
+
+ // Point x at first 4-byte aligned byte in the buffer. This might be past the
+ // end of the buffer.
+ const uint8_t* x = RoundUp<4>(p);
+ if (x <= e) {
+ // Process bytes p is 4-byte aligned.
+ while (p != x) {
+ STEP1;
+ }
+ }
+
+ if ((e - p) >= 16) {
+ // Load a 16-byte swath into the stride partial results.
+ uint32_t crc0 = ReadUint32LE(p + 0 * 4) ^ l;
+ uint32_t crc1 = ReadUint32LE(p + 1 * 4);
+ uint32_t crc2 = ReadUint32LE(p + 2 * 4);
+ uint32_t crc3 = ReadUint32LE(p + 3 * 4);
+ p += 16;
+
+ while ((e - p) > kPrefetchHorizon) {
+ RequestPrefetch(p + kPrefetchHorizon);
+
+ // Process 64 bytes at a time.
+ STEP16;
+ STEP16;
+ STEP16;
+ STEP16;
+ }
+
+ // Process one 16-byte swath at a time.
+ while ((e - p) >= 16) {
+ STEP16;
+ }
+
+ // Advance one word at a time as far as possible.
+ while ((e - p) >= 4) {
+ STEP4(0);
+ uint32_t tmp = crc0;
+ crc0 = crc1;
+ crc1 = crc2;
+ crc2 = crc3;
+ crc3 = tmp;
+ p += 4;
+ }
+
+ // Combine the 4 partial stride results.
+ l = 0;
+ STEP4W(crc0);
+ STEP4W(crc1);
+ STEP4W(crc2);
+ STEP4W(crc3);
+ }
+
+ // Process the last few bytes.
+ while (p != e) {
+ STEP1;
+ }
+#undef STEP4W
+#undef STEP16
+#undef STEP4
+#undef STEP1
+ return l ^ kCRC32Xor;
+}
+
+} // namespace crc32c
diff --git a/src/crc32c/src/crc32c_portable_unittest.cc b/src/crc32c/src/crc32c_portable_unittest.cc
new file mode 100644
index 0000000000..5098e2c373
--- /dev/null
+++ b/src/crc32c/src/crc32c_portable_unittest.cc
@@ -0,0 +1,20 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "gtest/gtest.h"
+
+#include "./crc32c_extend_unittests.h"
+#include "./crc32c_internal.h"
+
+namespace crc32c {
+
+struct PortableTestTraits {
+ static uint32_t Extend(uint32_t crc, const uint8_t* data, size_t count) {
+ return ExtendPortable(crc, data, count);
+ }
+};
+
+INSTANTIATE_TYPED_TEST_SUITE_P(Portable, ExtendTest, PortableTestTraits);
+
+} // namespace crc32c
diff --git a/src/crc32c/src/crc32c_prefetch.h b/src/crc32c/src/crc32c_prefetch.h
new file mode 100644
index 0000000000..aec7d54e84
--- /dev/null
+++ b/src/crc32c/src/crc32c_prefetch.h
@@ -0,0 +1,46 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef CRC32C_CRC32C_PREFETCH_H_
+#define CRC32C_CRC32C_PREFETCH_H_
+
+#include <cstddef>
+#include <cstdint>
+
+#ifdef CRC32C_HAVE_CONFIG_H
+#include "crc32c/crc32c_config.h"
+#endif
+
+#if HAVE_MM_PREFETCH
+
+#if defined(_MSC_VER)
+#include <intrin.h>
+#else // !defined(_MSC_VER)
+#include <xmmintrin.h>
+#endif // defined(_MSC_VER)
+
+#endif // HAVE_MM_PREFETCH
+
+namespace crc32c {
+
+// Ask the hardware to prefetch the data at the given address into the L1 cache.
+inline void RequestPrefetch(const uint8_t* address) {
+#if HAVE_BUILTIN_PREFETCH
+ // Clang and GCC implement the __builtin_prefetch non-standard extension,
+ // which maps to the best instruction on the target architecture.
+ __builtin_prefetch(reinterpret_cast<const char*>(address), 0 /* Read only. */,
+ 0 /* No temporal locality. */);
+#elif HAVE_MM_PREFETCH
+ // Visual Studio doesn't implement __builtin_prefetch, but exposes the
+ // PREFETCHNTA instruction via the _mm_prefetch intrinsic.
+ _mm_prefetch(reinterpret_cast<const char*>(address), _MM_HINT_NTA);
+#else
+ // No prefetch support. Silence compiler warnings.
+ (void)address;
+#endif // HAVE_BUILTIN_PREFETCH
+}
+
+} // namespace crc32c
+
+#endif // CRC32C_CRC32C_ROUND_UP_H_
diff --git a/src/crc32c/src/crc32c_prefetch_unittest.cc b/src/crc32c/src/crc32c_prefetch_unittest.cc
new file mode 100644
index 0000000000..b34ed2d5fe
--- /dev/null
+++ b/src/crc32c/src/crc32c_prefetch_unittest.cc
@@ -0,0 +1,9 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "./crc32c_prefetch.h"
+
+// There is no easy way to test cache prefetching. We can only test that the
+// crc32c_prefetch.h header compiles on its own, so it doesn't have any unstated
+// dependencies.
diff --git a/src/crc32c/src/crc32c_read_le.h b/src/crc32c/src/crc32c_read_le.h
new file mode 100644
index 0000000000..3bd45fe3aa
--- /dev/null
+++ b/src/crc32c/src/crc32c_read_le.h
@@ -0,0 +1,53 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef CRC32C_CRC32C_READ_LE_H_
+#define CRC32C_CRC32C_READ_LE_H_
+
+#include <cstdint>
+#include <cstring>
+
+#ifdef CRC32C_HAVE_CONFIG_H
+#include "crc32c/crc32c_config.h"
+#endif
+
+namespace crc32c {
+
+// Reads a little-endian 32-bit integer from a 32-bit-aligned buffer.
+inline uint32_t ReadUint32LE(const uint8_t* buffer) {
+#if BYTE_ORDER_BIG_ENDIAN
+ return ((static_cast<uint32_t>(static_cast<uint8_t>(buffer[0]))) |
+ (static_cast<uint32_t>(static_cast<uint8_t>(buffer[1])) << 8) |
+ (static_cast<uint32_t>(static_cast<uint8_t>(buffer[2])) << 16) |
+ (static_cast<uint32_t>(static_cast<uint8_t>(buffer[3])) << 24));
+#else // !BYTE_ORDER_BIG_ENDIAN
+ uint32_t result;
+ // This should be optimized to a single instruction.
+ std::memcpy(&result, buffer, sizeof(result));
+ return result;
+#endif // BYTE_ORDER_BIG_ENDIAN
+}
+
+// Reads a little-endian 64-bit integer from a 64-bit-aligned buffer.
+inline uint64_t ReadUint64LE(const uint8_t* buffer) {
+#if BYTE_ORDER_BIG_ENDIAN
+ return ((static_cast<uint32_t>(static_cast<uint8_t>(buffer[0]))) |
+ (static_cast<uint32_t>(static_cast<uint8_t>(buffer[1])) << 8) |
+ (static_cast<uint32_t>(static_cast<uint8_t>(buffer[2])) << 16) |
+ (static_cast<uint32_t>(static_cast<uint8_t>(buffer[3])) << 24) |
+ (static_cast<uint32_t>(static_cast<uint8_t>(buffer[4])) << 32) |
+ (static_cast<uint32_t>(static_cast<uint8_t>(buffer[5])) << 40) |
+ (static_cast<uint32_t>(static_cast<uint8_t>(buffer[6])) << 48) |
+ (static_cast<uint32_t>(static_cast<uint8_t>(buffer[7])) << 56));
+#else // !BYTE_ORDER_BIG_ENDIAN
+ uint64_t result;
+ // This should be optimized to a single instruction.
+ std::memcpy(&result, buffer, sizeof(result));
+ return result;
+#endif // BYTE_ORDER_BIG_ENDIAN
+}
+
+} // namespace crc32c
+
+#endif // CRC32C_CRC32C_READ_LE_H_
diff --git a/src/crc32c/src/crc32c_read_le_unittest.cc b/src/crc32c/src/crc32c_read_le_unittest.cc
new file mode 100644
index 0000000000..2a30302adf
--- /dev/null
+++ b/src/crc32c/src/crc32c_read_le_unittest.cc
@@ -0,0 +1,32 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "./crc32c_read_le.h"
+
+#include <cstddef>
+#include <cstdint>
+
+#include "gtest/gtest.h"
+
+#include "./crc32c_round_up.h"
+
+namespace crc32c {
+
+TEST(Crc32CReadLETest, ReadUint32LE) {
+ // little-endian 0x12345678
+ alignas(4) uint8_t bytes[] = {0x78, 0x56, 0x34, 0x12};
+
+ ASSERT_EQ(RoundUp<4>(bytes), bytes) << "Stack array is not aligned";
+ EXPECT_EQ(static_cast<uint32_t>(0x12345678), ReadUint32LE(bytes));
+}
+
+TEST(Crc32CReadLETest, ReadUint64LE) {
+ // little-endian 0x123456789ABCDEF0
+ alignas(8) uint8_t bytes[] = {0xF0, 0xDE, 0xBC, 0x9A, 0x78, 0x56, 0x34, 0x12};
+
+ ASSERT_EQ(RoundUp<8>(bytes), bytes) << "Stack array is not aligned";
+ EXPECT_EQ(static_cast<uint64_t>(0x123456789ABCDEF0), ReadUint64LE(bytes));
+}
+
+} // namespace crc32c
diff --git a/src/crc32c/src/crc32c_round_up.h b/src/crc32c/src/crc32c_round_up.h
new file mode 100644
index 0000000000..d3b922beb9
--- /dev/null
+++ b/src/crc32c/src/crc32c_round_up.h
@@ -0,0 +1,34 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef CRC32C_CRC32C_ROUND_UP_H_
+#define CRC32C_CRC32C_ROUND_UP_H_
+
+#include <cstddef>
+#include <cstdint>
+
+namespace crc32c {
+
+// Returns the smallest number >= the given number that is evenly divided by N.
+//
+// N must be a power of two.
+template <int N>
+constexpr inline uintptr_t RoundUp(uintptr_t pointer) {
+ static_assert((N & (N - 1)) == 0, "N must be a power of two");
+ return (pointer + (N - 1)) & ~(N - 1);
+}
+
+// Returns the smallest address >= the given address that is aligned to N bytes.
+//
+// N must be a power of two.
+template <int N>
+constexpr inline const uint8_t* RoundUp(const uint8_t* pointer) {
+ static_assert((N & (N - 1)) == 0, "N must be a power of two");
+ return reinterpret_cast<uint8_t*>(
+ RoundUp<N>(reinterpret_cast<uintptr_t>(pointer)));
+}
+
+} // namespace crc32c
+
+#endif // CRC32C_CRC32C_ROUND_UP_H_
diff --git a/src/crc32c/src/crc32c_round_up_unittest.cc b/src/crc32c/src/crc32c_round_up_unittest.cc
new file mode 100644
index 0000000000..5ff657bb5c
--- /dev/null
+++ b/src/crc32c/src/crc32c_round_up_unittest.cc
@@ -0,0 +1,84 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "./crc32c_round_up.h"
+
+#include <cstddef>
+#include <cstdint>
+
+#include "gtest/gtest.h"
+
+namespace crc32c {
+
+TEST(CRC32CRoundUpTest, RoundUpUintptr) {
+ uintptr_t zero = 0;
+
+ ASSERT_EQ(zero, RoundUp<1>(zero));
+ ASSERT_EQ(1U, RoundUp<1>(1U));
+ ASSERT_EQ(2U, RoundUp<1>(2U));
+ ASSERT_EQ(3U, RoundUp<1>(3U));
+ ASSERT_EQ(~static_cast<uintptr_t>(0), RoundUp<1>(~static_cast<uintptr_t>(0)));
+ ASSERT_EQ(~static_cast<uintptr_t>(1), RoundUp<1>(~static_cast<uintptr_t>(1)));
+ ASSERT_EQ(~static_cast<uintptr_t>(2), RoundUp<1>(~static_cast<uintptr_t>(2)));
+ ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<1>(~static_cast<uintptr_t>(3)));
+
+ ASSERT_EQ(zero, RoundUp<2>(zero));
+ ASSERT_EQ(2U, RoundUp<2>(1U));
+ ASSERT_EQ(2U, RoundUp<2>(2U));
+ ASSERT_EQ(4U, RoundUp<2>(3U));
+ ASSERT_EQ(4U, RoundUp<2>(4U));
+ ASSERT_EQ(6U, RoundUp<2>(5U));
+ ASSERT_EQ(6U, RoundUp<2>(6U));
+ ASSERT_EQ(8U, RoundUp<2>(7U));
+ ASSERT_EQ(8U, RoundUp<2>(8U));
+ ASSERT_EQ(~static_cast<uintptr_t>(1), RoundUp<2>(~static_cast<uintptr_t>(1)));
+ ASSERT_EQ(~static_cast<uintptr_t>(1), RoundUp<2>(~static_cast<uintptr_t>(2)));
+ ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<2>(~static_cast<uintptr_t>(3)));
+ ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<2>(~static_cast<uintptr_t>(4)));
+
+ ASSERT_EQ(zero, RoundUp<4>(zero));
+ ASSERT_EQ(4U, RoundUp<4>(1U));
+ ASSERT_EQ(4U, RoundUp<4>(2U));
+ ASSERT_EQ(4U, RoundUp<4>(3U));
+ ASSERT_EQ(4U, RoundUp<4>(4U));
+ ASSERT_EQ(8U, RoundUp<4>(5U));
+ ASSERT_EQ(8U, RoundUp<4>(6U));
+ ASSERT_EQ(8U, RoundUp<4>(7U));
+ ASSERT_EQ(8U, RoundUp<4>(8U));
+ ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<4>(~static_cast<uintptr_t>(3)));
+ ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<4>(~static_cast<uintptr_t>(4)));
+ ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<4>(~static_cast<uintptr_t>(5)));
+ ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<4>(~static_cast<uintptr_t>(6)));
+ ASSERT_EQ(~static_cast<uintptr_t>(7), RoundUp<4>(~static_cast<uintptr_t>(7)));
+ ASSERT_EQ(~static_cast<uintptr_t>(7), RoundUp<4>(~static_cast<uintptr_t>(8)));
+ ASSERT_EQ(~static_cast<uintptr_t>(7), RoundUp<4>(~static_cast<uintptr_t>(9)));
+}
+
+TEST(CRC32CRoundUpTest, RoundUpPointer) {
+ uintptr_t zero = 0, three = 3, four = 4, seven = 7, eight = 8;
+
+ const uint8_t* zero_ptr = reinterpret_cast<const uint8_t*>(zero);
+ const uint8_t* three_ptr = reinterpret_cast<const uint8_t*>(three);
+ const uint8_t* four_ptr = reinterpret_cast<const uint8_t*>(four);
+ const uint8_t* seven_ptr = reinterpret_cast<const uint8_t*>(seven);
+ const uint8_t* eight_ptr = reinterpret_cast<uint8_t*>(eight);
+
+ ASSERT_EQ(zero_ptr, RoundUp<1>(zero_ptr));
+ ASSERT_EQ(zero_ptr, RoundUp<4>(zero_ptr));
+ ASSERT_EQ(zero_ptr, RoundUp<8>(zero_ptr));
+
+ ASSERT_EQ(three_ptr, RoundUp<1>(three_ptr));
+ ASSERT_EQ(four_ptr, RoundUp<4>(three_ptr));
+ ASSERT_EQ(eight_ptr, RoundUp<8>(three_ptr));
+
+ ASSERT_EQ(four_ptr, RoundUp<1>(four_ptr));
+ ASSERT_EQ(four_ptr, RoundUp<4>(four_ptr));
+ ASSERT_EQ(eight_ptr, RoundUp<8>(four_ptr));
+
+ ASSERT_EQ(seven_ptr, RoundUp<1>(seven_ptr));
+ ASSERT_EQ(eight_ptr, RoundUp<4>(seven_ptr));
+ ASSERT_EQ(eight_ptr, RoundUp<8>(four_ptr));
+}
+
+} // namespace crc32c
diff --git a/src/crc32c/src/crc32c_sse42.cc b/src/crc32c/src/crc32c_sse42.cc
new file mode 100644
index 0000000000..139520428e
--- /dev/null
+++ b/src/crc32c/src/crc32c_sse42.cc
@@ -0,0 +1,258 @@
+// Copyright 2008 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "./crc32c_sse42.h"
+
+// In a separate source file to allow this accelerated CRC32C function to be
+// compiled with the appropriate compiler flags to enable SSE4.2 instructions.
+
+// This implementation is loosely based on Intel Pub 323405 from April 2011,
+// "Fast CRC Computation for iSCSI Polynomial Using CRC32 Instruction".
+
+#include <cstddef>
+#include <cstdint>
+
+#include "./crc32c_internal.h"
+#include "./crc32c_prefetch.h"
+#include "./crc32c_read_le.h"
+#include "./crc32c_round_up.h"
+#ifdef CRC32C_HAVE_CONFIG_H
+#include "crc32c/crc32c_config.h"
+#endif
+
+#if HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
+
+#if defined(_MSC_VER)
+#include <intrin.h>
+#else // !defined(_MSC_VER)
+#include <nmmintrin.h>
+#endif // defined(_MSC_VER)
+
+namespace crc32c {
+
+namespace {
+
+constexpr const ptrdiff_t kGroups = 3;
+constexpr const ptrdiff_t kBlock0Size = 16 * 1024 / kGroups / 64 * 64;
+constexpr const ptrdiff_t kBlock1Size = 4 * 1024 / kGroups / 8 * 8;
+constexpr const ptrdiff_t kBlock2Size = 1024 / kGroups / 8 * 8;
+
+const uint32_t kBlock0SkipTable[8][16] = {
+ {0x00000000, 0xff770459, 0xfb027e43, 0x04757a1a, 0xf3e88a77, 0x0c9f8e2e,
+ 0x08eaf434, 0xf79df06d, 0xe23d621f, 0x1d4a6646, 0x193f1c5c, 0xe6481805,
+ 0x11d5e868, 0xeea2ec31, 0xead7962b, 0x15a09272},
+ {0x00000000, 0xc196b2cf, 0x86c1136f, 0x4757a1a0, 0x086e502f, 0xc9f8e2e0,
+ 0x8eaf4340, 0x4f39f18f, 0x10dca05e, 0xd14a1291, 0x961db331, 0x578b01fe,
+ 0x18b2f071, 0xd92442be, 0x9e73e31e, 0x5fe551d1},
+ {0x00000000, 0x21b940bc, 0x43728178, 0x62cbc1c4, 0x86e502f0, 0xa75c424c,
+ 0xc5978388, 0xe42ec334, 0x08267311, 0x299f33ad, 0x4b54f269, 0x6aedb2d5,
+ 0x8ec371e1, 0xaf7a315d, 0xcdb1f099, 0xec08b025},
+ {0x00000000, 0x104ce622, 0x2099cc44, 0x30d52a66, 0x41339888, 0x517f7eaa,
+ 0x61aa54cc, 0x71e6b2ee, 0x82673110, 0x922bd732, 0xa2fefd54, 0xb2b21b76,
+ 0xc354a998, 0xd3184fba, 0xe3cd65dc, 0xf38183fe},
+ {0x00000000, 0x012214d1, 0x024429a2, 0x03663d73, 0x04885344, 0x05aa4795,
+ 0x06cc7ae6, 0x07ee6e37, 0x0910a688, 0x0832b259, 0x0b548f2a, 0x0a769bfb,
+ 0x0d98f5cc, 0x0cbae11d, 0x0fdcdc6e, 0x0efec8bf},
+ {0x00000000, 0x12214d10, 0x24429a20, 0x3663d730, 0x48853440, 0x5aa47950,
+ 0x6cc7ae60, 0x7ee6e370, 0x910a6880, 0x832b2590, 0xb548f2a0, 0xa769bfb0,
+ 0xd98f5cc0, 0xcbae11d0, 0xfdcdc6e0, 0xefec8bf0},
+ {0x00000000, 0x27f8a7f1, 0x4ff14fe2, 0x6809e813, 0x9fe29fc4, 0xb81a3835,
+ 0xd013d026, 0xf7eb77d7, 0x3a294979, 0x1dd1ee88, 0x75d8069b, 0x5220a16a,
+ 0xa5cbd6bd, 0x8233714c, 0xea3a995f, 0xcdc23eae},
+ {0x00000000, 0x745292f2, 0xe8a525e4, 0x9cf7b716, 0xd4a63d39, 0xa0f4afcb,
+ 0x3c0318dd, 0x48518a2f, 0xaca00c83, 0xd8f29e71, 0x44052967, 0x3057bb95,
+ 0x780631ba, 0x0c54a348, 0x90a3145e, 0xe4f186ac},
+};
+const uint32_t kBlock1SkipTable[8][16] = {
+ {0x00000000, 0x79113270, 0xf22264e0, 0x8b335690, 0xe1a8bf31, 0x98b98d41,
+ 0x138adbd1, 0x6a9be9a1, 0xc6bd0893, 0xbfac3ae3, 0x349f6c73, 0x4d8e5e03,
+ 0x2715b7a2, 0x5e0485d2, 0xd537d342, 0xac26e132},
+ {0x00000000, 0x889667d7, 0x14c0b95f, 0x9c56de88, 0x298172be, 0xa1171569,
+ 0x3d41cbe1, 0xb5d7ac36, 0x5302e57c, 0xdb9482ab, 0x47c25c23, 0xcf543bf4,
+ 0x7a8397c2, 0xf215f015, 0x6e432e9d, 0xe6d5494a},
+ {0x00000000, 0xa605caf8, 0x49e7e301, 0xefe229f9, 0x93cfc602, 0x35ca0cfa,
+ 0xda282503, 0x7c2deffb, 0x2273faf5, 0x8476300d, 0x6b9419f4, 0xcd91d30c,
+ 0xb1bc3cf7, 0x17b9f60f, 0xf85bdff6, 0x5e5e150e},
+ {0x00000000, 0x44e7f5ea, 0x89cfebd4, 0xcd281e3e, 0x1673a159, 0x529454b3,
+ 0x9fbc4a8d, 0xdb5bbf67, 0x2ce742b2, 0x6800b758, 0xa528a966, 0xe1cf5c8c,
+ 0x3a94e3eb, 0x7e731601, 0xb35b083f, 0xf7bcfdd5},
+ {0x00000000, 0x59ce8564, 0xb39d0ac8, 0xea538fac, 0x62d66361, 0x3b18e605,
+ 0xd14b69a9, 0x8885eccd, 0xc5acc6c2, 0x9c6243a6, 0x7631cc0a, 0x2fff496e,
+ 0xa77aa5a3, 0xfeb420c7, 0x14e7af6b, 0x4d292a0f},
+ {0x00000000, 0x8eb5fb75, 0x1887801b, 0x96327b6e, 0x310f0036, 0xbfbafb43,
+ 0x2988802d, 0xa73d7b58, 0x621e006c, 0xecabfb19, 0x7a998077, 0xf42c7b02,
+ 0x5311005a, 0xdda4fb2f, 0x4b968041, 0xc5237b34},
+ {0x00000000, 0xc43c00d8, 0x8d947741, 0x49a87799, 0x1ec49873, 0xdaf898ab,
+ 0x9350ef32, 0x576cefea, 0x3d8930e6, 0xf9b5303e, 0xb01d47a7, 0x7421477f,
+ 0x234da895, 0xe771a84d, 0xaed9dfd4, 0x6ae5df0c},
+ {0x00000000, 0x7b1261cc, 0xf624c398, 0x8d36a254, 0xe9a5f1c1, 0x92b7900d,
+ 0x1f813259, 0x64935395, 0xd6a79573, 0xadb5f4bf, 0x208356eb, 0x5b913727,
+ 0x3f0264b2, 0x4410057e, 0xc926a72a, 0xb234c6e6},
+};
+const uint32_t kBlock2SkipTable[8][16] = {
+ {0x00000000, 0x8f158014, 0x1bc776d9, 0x94d2f6cd, 0x378eedb2, 0xb89b6da6,
+ 0x2c499b6b, 0xa35c1b7f, 0x6f1ddb64, 0xe0085b70, 0x74daadbd, 0xfbcf2da9,
+ 0x589336d6, 0xd786b6c2, 0x4354400f, 0xcc41c01b},
+ {0x00000000, 0xde3bb6c8, 0xb99b1b61, 0x67a0ada9, 0x76da4033, 0xa8e1f6fb,
+ 0xcf415b52, 0x117aed9a, 0xedb48066, 0x338f36ae, 0x542f9b07, 0x8a142dcf,
+ 0x9b6ec055, 0x4555769d, 0x22f5db34, 0xfcce6dfc},
+ {0x00000000, 0xde85763d, 0xb8e69a8b, 0x6663ecb6, 0x742143e7, 0xaaa435da,
+ 0xccc7d96c, 0x1242af51, 0xe84287ce, 0x36c7f1f3, 0x50a41d45, 0x8e216b78,
+ 0x9c63c429, 0x42e6b214, 0x24855ea2, 0xfa00289f},
+ {0x00000000, 0xd569796d, 0xaf3e842b, 0x7a57fd46, 0x5b917ea7, 0x8ef807ca,
+ 0xf4affa8c, 0x21c683e1, 0xb722fd4e, 0x624b8423, 0x181c7965, 0xcd750008,
+ 0xecb383e9, 0x39dafa84, 0x438d07c2, 0x96e47eaf},
+ {0x00000000, 0x6ba98c6d, 0xd75318da, 0xbcfa94b7, 0xab4a4745, 0xc0e3cb28,
+ 0x7c195f9f, 0x17b0d3f2, 0x5378f87b, 0x38d17416, 0x842be0a1, 0xef826ccc,
+ 0xf832bf3e, 0x939b3353, 0x2f61a7e4, 0x44c82b89},
+ {0x00000000, 0xa6f1f0f6, 0x480f971d, 0xeefe67eb, 0x901f2e3a, 0x36eedecc,
+ 0xd810b927, 0x7ee149d1, 0x25d22a85, 0x8323da73, 0x6dddbd98, 0xcb2c4d6e,
+ 0xb5cd04bf, 0x133cf449, 0xfdc293a2, 0x5b336354},
+ {0x00000000, 0x4ba4550a, 0x9748aa14, 0xdcecff1e, 0x2b7d22d9, 0x60d977d3,
+ 0xbc3588cd, 0xf791ddc7, 0x56fa45b2, 0x1d5e10b8, 0xc1b2efa6, 0x8a16baac,
+ 0x7d87676b, 0x36233261, 0xeacfcd7f, 0xa16b9875},
+ {0x00000000, 0xadf48b64, 0x5e056039, 0xf3f1eb5d, 0xbc0ac072, 0x11fe4b16,
+ 0xe20fa04b, 0x4ffb2b2f, 0x7df9f615, 0xd00d7d71, 0x23fc962c, 0x8e081d48,
+ 0xc1f33667, 0x6c07bd03, 0x9ff6565e, 0x3202dd3a},
+};
+
+constexpr const ptrdiff_t kPrefetchHorizon = 256;
+
+} // namespace
+
+uint32_t ExtendSse42(uint32_t crc, const uint8_t* data, size_t size) {
+ const uint8_t* p = data;
+ const uint8_t* e = data + size;
+ uint32_t l = crc ^ kCRC32Xor;
+
+#define STEP1 \
+ do { \
+ l = _mm_crc32_u8(l, *p++); \
+ } while (0)
+
+#define STEP4(crc) \
+ do { \
+ crc = _mm_crc32_u32(crc, ReadUint32LE(p)); \
+ p += 4; \
+ } while (0)
+
+#define STEP8(crc, data) \
+ do { \
+ crc = _mm_crc32_u64(crc, ReadUint64LE(data)); \
+ data += 8; \
+ } while (0)
+
+#define STEP8BY3(crc0, crc1, crc2, p0, p1, p2) \
+ do { \
+ STEP8(crc0, p0); \
+ STEP8(crc1, p1); \
+ STEP8(crc2, p2); \
+ } while (0)
+
+#define STEP8X3(crc0, crc1, crc2, bs) \
+ do { \
+ crc0 = _mm_crc32_u64(crc0, ReadUint64LE(p)); \
+ crc1 = _mm_crc32_u64(crc1, ReadUint64LE(p + bs)); \
+ crc2 = _mm_crc32_u64(crc2, ReadUint64LE(p + 2 * bs)); \
+ p += 8; \
+ } while (0)
+
+#define SKIP_BLOCK(crc, tab) \
+ do { \
+ crc = tab[0][crc & 0xf] ^ tab[1][(crc >> 4) & 0xf] ^ \
+ tab[2][(crc >> 8) & 0xf] ^ tab[3][(crc >> 12) & 0xf] ^ \
+ tab[4][(crc >> 16) & 0xf] ^ tab[5][(crc >> 20) & 0xf] ^ \
+ tab[6][(crc >> 24) & 0xf] ^ tab[7][(crc >> 28) & 0xf]; \
+ } while (0)
+
+ // Point x at first 8-byte aligned byte in the buffer. This might be past the
+ // end of the buffer.
+ const uint8_t* x = RoundUp<8>(p);
+ if (x <= e) {
+ // Process bytes p is 8-byte aligned.
+ while (p != x) {
+ STEP1;
+ }
+ }
+
+ // Proccess the data in predetermined block sizes with tables for quickly
+ // combining the checksum. Experimentally it's better to use larger block
+ // sizes where possible so use a hierarchy of decreasing block sizes.
+ uint64_t l64 = l;
+ while ((e - p) >= kGroups * kBlock0Size) {
+ uint64_t l641 = 0;
+ uint64_t l642 = 0;
+ for (int i = 0; i < kBlock0Size; i += 8 * 8) {
+ // Prefetch ahead to hide latency.
+ RequestPrefetch(p + kPrefetchHorizon);
+ RequestPrefetch(p + kBlock0Size + kPrefetchHorizon);
+ RequestPrefetch(p + 2 * kBlock0Size + kPrefetchHorizon);
+
+ // Process 64 bytes at a time.
+ STEP8X3(l64, l641, l642, kBlock0Size);
+ STEP8X3(l64, l641, l642, kBlock0Size);
+ STEP8X3(l64, l641, l642, kBlock0Size);
+ STEP8X3(l64, l641, l642, kBlock0Size);
+ STEP8X3(l64, l641, l642, kBlock0Size);
+ STEP8X3(l64, l641, l642, kBlock0Size);
+ STEP8X3(l64, l641, l642, kBlock0Size);
+ STEP8X3(l64, l641, l642, kBlock0Size);
+ }
+
+ // Combine results.
+ SKIP_BLOCK(l64, kBlock0SkipTable);
+ l64 ^= l641;
+ SKIP_BLOCK(l64, kBlock0SkipTable);
+ l64 ^= l642;
+ p += (kGroups - 1) * kBlock0Size;
+ }
+ while ((e - p) >= kGroups * kBlock1Size) {
+ uint64_t l641 = 0;
+ uint64_t l642 = 0;
+ for (int i = 0; i < kBlock1Size; i += 8) {
+ STEP8X3(l64, l641, l642, kBlock1Size);
+ }
+ SKIP_BLOCK(l64, kBlock1SkipTable);
+ l64 ^= l641;
+ SKIP_BLOCK(l64, kBlock1SkipTable);
+ l64 ^= l642;
+ p += (kGroups - 1) * kBlock1Size;
+ }
+ while ((e - p) >= kGroups * kBlock2Size) {
+ uint64_t l641 = 0;
+ uint64_t l642 = 0;
+ for (int i = 0; i < kBlock2Size; i += 8) {
+ STEP8X3(l64, l641, l642, kBlock2Size);
+ }
+ SKIP_BLOCK(l64, kBlock2SkipTable);
+ l64 ^= l641;
+ SKIP_BLOCK(l64, kBlock2SkipTable);
+ l64 ^= l642;
+ p += (kGroups - 1) * kBlock2Size;
+ }
+
+ // Process bytes 16 at a time
+ while ((e - p) >= 16) {
+ STEP8(l64, p);
+ STEP8(l64, p);
+ }
+
+ l = static_cast<uint32_t>(l64);
+ // Process the last few bytes.
+ while (p != e) {
+ STEP1;
+ }
+#undef SKIP_BLOCK
+#undef STEP8X3
+#undef STEP8BY3
+#undef STEP8
+#undef STEP4
+#undef STEP1
+
+ return l ^ kCRC32Xor;
+}
+
+} // namespace crc32c
+
+#endif // HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
diff --git a/src/crc32c/src/crc32c_sse42.h b/src/crc32c/src/crc32c_sse42.h
new file mode 100644
index 0000000000..95da926632
--- /dev/null
+++ b/src/crc32c/src/crc32c_sse42.h
@@ -0,0 +1,33 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef CRC32C_CRC32C_SSE42_H_
+#define CRC32C_CRC32C_SSE42_H_
+
+// X86-specific code.
+
+#include <cstddef>
+#include <cstdint>
+
+#ifdef CRC32C_HAVE_CONFIG_H
+#include "crc32c/crc32c_config.h"
+#endif
+
+// The hardware-accelerated implementation is only enabled for 64-bit builds,
+// because a straightforward 32-bit implementation actually runs slower than the
+// portable version. Most X86 machines are 64-bit nowadays, so it doesn't make
+// much sense to spend time building an optimized hardware-accelerated
+// implementation.
+#if HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
+
+namespace crc32c {
+
+// SSE4.2-accelerated implementation in crc32c_sse42.cc
+uint32_t ExtendSse42(uint32_t crc, const uint8_t* data, size_t count);
+
+} // namespace crc32c
+
+#endif // HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
+
+#endif // CRC32C_CRC32C_SSE42_H_
diff --git a/src/crc32c/src/crc32c_sse42_check.h b/src/crc32c/src/crc32c_sse42_check.h
new file mode 100644
index 0000000000..e7528912a6
--- /dev/null
+++ b/src/crc32c/src/crc32c_sse42_check.h
@@ -0,0 +1,50 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef CRC32C_CRC32C_SSE42_CHECK_H_
+#define CRC32C_CRC32C_SSE42_CHECK_H_
+
+// X86-specific code checking the availability of SSE4.2 instructions.
+
+#include <cstddef>
+#include <cstdint>
+
+#ifdef CRC32C_HAVE_CONFIG_H
+#include "crc32c/crc32c_config.h"
+#endif
+
+#if HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
+
+// If the compiler supports SSE4.2, it definitely supports X86.
+
+#if defined(_MSC_VER)
+#include <intrin.h>
+
+namespace crc32c {
+
+inline bool CanUseSse42() {
+ int cpu_info[4];
+ __cpuid(cpu_info, 1);
+ return (cpu_info[2] & (1 << 20)) != 0;
+}
+
+} // namespace crc32c
+
+#else // !defined(_MSC_VER)
+#include <cpuid.h>
+
+namespace crc32c {
+
+inline bool CanUseSse42() {
+ unsigned int eax, ebx, ecx, edx;
+ return __get_cpuid(1, &eax, &ebx, &ecx, &edx) && ((ecx & (1 << 20)) != 0);
+}
+
+} // namespace crc32c
+
+#endif // defined(_MSC_VER)
+
+#endif // HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
+
+#endif // CRC32C_CRC32C_SSE42_CHECK_H_
diff --git a/src/crc32c/src/crc32c_sse42_unittest.cc b/src/crc32c/src/crc32c_sse42_unittest.cc
new file mode 100644
index 0000000000..c73ad8ddd1
--- /dev/null
+++ b/src/crc32c/src/crc32c_sse42_unittest.cc
@@ -0,0 +1,24 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "gtest/gtest.h"
+
+#include "./crc32c_extend_unittests.h"
+#include "./crc32c_sse42.h"
+
+namespace crc32c {
+
+#if HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
+
+struct Sse42TestTraits {
+ static uint32_t Extend(uint32_t crc, const uint8_t* data, size_t count) {
+ return ExtendSse42(crc, data, count);
+ }
+};
+
+INSTANTIATE_TYPED_TEST_SUITE_P(Sse42, ExtendTest, Sse42TestTraits);
+
+#endif // HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
+
+} // namespace crc32c
diff --git a/src/crc32c/src/crc32c_test_main.cc b/src/crc32c/src/crc32c_test_main.cc
new file mode 100644
index 0000000000..275ee380c6
--- /dev/null
+++ b/src/crc32c/src/crc32c_test_main.cc
@@ -0,0 +1,22 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifdef CRC32C_HAVE_CONFIG_H
+#include "crc32c/crc32c_config.h"
+#endif
+
+#include "gtest/gtest.h"
+
+#if CRC32C_TESTS_BUILT_WITH_GLOG
+#include "glog/logging.h"
+#endif // CRC32C_TESTS_BUILT_WITH_GLOG
+
+int main(int argc, char** argv) {
+#if CRC32C_TESTS_BUILT_WITH_GLOG
+ google::InitGoogleLogging(argv[0]);
+ google::InstallFailureSignalHandler();
+#endif // CRC32C_TESTS_BUILT_WITH_GLOG
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/src/crc32c/src/crc32c_unittest.cc b/src/crc32c/src/crc32c_unittest.cc
new file mode 100644
index 0000000000..d6c6af680c
--- /dev/null
+++ b/src/crc32c/src/crc32c_unittest.cc
@@ -0,0 +1,129 @@
+// Copyright 2017 The CRC32C Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "crc32c/crc32c.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+
+#include "gtest/gtest.h"
+
+#include "./crc32c_extend_unittests.h"
+
+TEST(Crc32CTest, Crc32c) {
+ // From rfc3720 section B.4.
+ uint8_t buf[32];
+
+ std::memset(buf, 0, sizeof(buf));
+ EXPECT_EQ(static_cast<uint32_t>(0x8a9136aa),
+ crc32c::Crc32c(buf, sizeof(buf)));
+
+ std::memset(buf, 0xff, sizeof(buf));
+ EXPECT_EQ(static_cast<uint32_t>(0x62a8ab43),
+ crc32c::Crc32c(buf, sizeof(buf)));
+
+ for (size_t i = 0; i < 32; ++i)
+ buf[i] = static_cast<uint8_t>(i);
+ EXPECT_EQ(static_cast<uint32_t>(0x46dd794e),
+ crc32c::Crc32c(buf, sizeof(buf)));
+
+ for (size_t i = 0; i < 32; ++i)
+ buf[i] = static_cast<uint8_t>(31 - i);
+ EXPECT_EQ(static_cast<uint32_t>(0x113fdb5c),
+ crc32c::Crc32c(buf, sizeof(buf)));
+
+ uint8_t data[48] = {
+ 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ };
+ EXPECT_EQ(static_cast<uint32_t>(0xd9963a56),
+ crc32c::Crc32c(data, sizeof(data)));
+}
+
+namespace crc32c {
+
+struct ApiTestTraits {
+ static uint32_t Extend(uint32_t crc, const uint8_t* data, size_t count) {
+ return ::crc32c::Extend(crc, data, count);
+ }
+};
+
+INSTANTIATE_TYPED_TEST_SUITE_P(Api, ExtendTest, ApiTestTraits);
+
+} // namespace crc32c
+
+TEST(CRC32CTest, Crc32cCharPointer) {
+ char buf[32];
+
+ std::memset(buf, 0, sizeof(buf));
+ EXPECT_EQ(static_cast<uint32_t>(0x8a9136aa),
+ crc32c::Crc32c(buf, sizeof(buf)));
+
+ std::memset(buf, 0xff, sizeof(buf));
+ EXPECT_EQ(static_cast<uint32_t>(0x62a8ab43),
+ crc32c::Crc32c(buf, sizeof(buf)));
+
+ for (size_t i = 0; i < 32; ++i)
+ buf[i] = static_cast<char>(i);
+ EXPECT_EQ(static_cast<uint32_t>(0x46dd794e),
+ crc32c::Crc32c(buf, sizeof(buf)));
+
+ for (size_t i = 0; i < 32; ++i)
+ buf[i] = static_cast<char>(31 - i);
+ EXPECT_EQ(static_cast<uint32_t>(0x113fdb5c),
+ crc32c::Crc32c(buf, sizeof(buf)));
+}
+
+TEST(CRC32CTest, Crc32cStdString) {
+ std::string buf;
+ buf.resize(32);
+
+ for (size_t i = 0; i < 32; ++i)
+ buf[i] = static_cast<char>(0x00);
+ EXPECT_EQ(static_cast<uint32_t>(0x8a9136aa), crc32c::Crc32c(buf));
+
+ for (size_t i = 0; i < 32; ++i)
+ buf[i] = '\xff';
+ EXPECT_EQ(static_cast<uint32_t>(0x62a8ab43), crc32c::Crc32c(buf));
+
+ for (size_t i = 0; i < 32; ++i)
+ buf[i] = static_cast<char>(i);
+ EXPECT_EQ(static_cast<uint32_t>(0x46dd794e), crc32c::Crc32c(buf));
+
+ for (size_t i = 0; i < 32; ++i)
+ buf[i] = static_cast<char>(31 - i);
+ EXPECT_EQ(static_cast<uint32_t>(0x113fdb5c), crc32c::Crc32c(buf));
+}
+
+#if __cplusplus > 201402L
+#if __has_include(<string_view>)
+
+TEST(CRC32CTest, Crc32cStdStringView) {
+ uint8_t buf[32];
+ std::string_view view(reinterpret_cast<const char*>(buf), sizeof(buf));
+
+ std::memset(buf, 0, sizeof(buf));
+ EXPECT_EQ(static_cast<uint32_t>(0x8a9136aa), crc32c::Crc32c(view));
+
+ std::memset(buf, 0xff, sizeof(buf));
+ EXPECT_EQ(static_cast<uint32_t>(0x62a8ab43), crc32c::Crc32c(view));
+
+ for (size_t i = 0; i < 32; ++i)
+ buf[i] = static_cast<uint8_t>(i);
+ EXPECT_EQ(static_cast<uint32_t>(0x46dd794e), crc32c::Crc32c(view));
+
+ for (size_t i = 0; i < 32; ++i)
+ buf[i] = static_cast<uint8_t>(31 - i);
+ EXPECT_EQ(static_cast<uint32_t>(0x113fdb5c), crc32c::Crc32c(view));
+}
+
+#endif // __has_include(<string_view>)
+#endif // __cplusplus > 201402L
+
+#define TESTED_EXTEND Extend
+#include "./crc32c_extend_unittests.h"
+#undef TESTED_EXTEND
diff --git a/src/flatfile.h b/src/flatfile.h
index 374ceff411..d80682d383 100644
--- a/src/flatfile.h
+++ b/src/flatfile.h
@@ -20,7 +20,7 @@ struct FlatFilePos
template <typename Stream, typename Operation>
inline void SerializationOp(Stream& s, Operation ser_action) {
- READWRITE(VARINT(nFile, VarIntMode::NONNEGATIVE_SIGNED));
+ READWRITE(VARINT_MODE(nFile, VarIntMode::NONNEGATIVE_SIGNED));
READWRITE(VARINT(nPos));
}
diff --git a/src/httprpc.cpp b/src/httprpc.cpp
index ff75789223..4d49736140 100644
--- a/src/httprpc.cpp
+++ b/src/httprpc.cpp
@@ -174,7 +174,7 @@ static bool HTTPReq_JSONRPC(HTTPRequest* req, const std::string &)
/* Deter brute-forcing
If this results in a DoS the user really
shouldn't have their RPC port exposed. */
- MilliSleep(250);
+ UninterruptibleSleep(std::chrono::milliseconds{250});
req->WriteHeader("WWW-Authenticate", WWW_AUTH_HEADER_DATA);
req->WriteReply(HTTP_UNAUTHORIZED);
diff --git a/src/httpserver.cpp b/src/httpserver.cpp
index 0e13b85806..11d73b7c9a 100644
--- a/src/httpserver.cpp
+++ b/src/httpserver.cpp
@@ -236,7 +236,7 @@ static void http_request_cb(struct evhttp_request* req, void* arg)
if (hreq->GetRequestMethod() == HTTPRequest::UNKNOWN) {
LogPrint(BCLog::HTTP, "HTTP request from %s rejected: Unknown HTTP request method\n",
hreq->GetPeer().ToString());
- hreq->WriteReply(HTTP_BADMETHOD);
+ hreq->WriteReply(HTTP_BAD_METHOD);
return;
}
@@ -268,10 +268,10 @@ static void http_request_cb(struct evhttp_request* req, void* arg)
item.release(); /* if true, queue took ownership */
else {
LogPrintf("WARNING: request rejected because http work queue depth exceeded, it can be increased with the -rpcworkqueue= setting\n");
- item->req->WriteReply(HTTP_INTERNAL, "Work queue depth exceeded");
+ item->req->WriteReply(HTTP_INTERNAL_SERVER_ERROR, "Work queue depth exceeded");
}
} else {
- hreq->WriteReply(HTTP_NOTFOUND);
+ hreq->WriteReply(HTTP_NOT_FOUND);
}
}
@@ -519,7 +519,7 @@ HTTPRequest::~HTTPRequest()
if (!replySent) {
// Keep track of whether reply was sent to avoid request leaks
LogPrintf("%s: Unhandled request\n", __func__);
- WriteReply(HTTP_INTERNAL, "Unhandled request");
+ WriteReply(HTTP_INTERNAL_SERVER_ERROR, "Unhandled request");
}
// evhttpd cleans up the request, as long as a reply was sent.
}
diff --git a/src/index/base.cpp b/src/index/base.cpp
index dcb8e99fc1..ba71830b6e 100644
--- a/src/index/base.cpp
+++ b/src/index/base.cpp
@@ -188,8 +188,7 @@ bool BaseIndex::Rewind(const CBlockIndex* current_tip, const CBlockIndex* new_ti
return true;
}
-void BaseIndex::BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex,
- const std::vector<CTransactionRef>& txn_conflicted)
+void BaseIndex::BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex)
{
if (!m_synced) {
return;
@@ -270,7 +269,7 @@ void BaseIndex::ChainStateFlushed(const CBlockLocator& locator)
Commit();
}
-bool BaseIndex::BlockUntilSyncedToCurrentChain()
+bool BaseIndex::BlockUntilSyncedToCurrentChain() const
{
AssertLockNotHeld(cs_main);
diff --git a/src/index/base.h b/src/index/base.h
index d0088d9c9a..95d83b9b47 100644
--- a/src/index/base.h
+++ b/src/index/base.h
@@ -64,8 +64,7 @@ private:
bool Commit();
protected:
- void BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex,
- const std::vector<CTransactionRef>& txn_conflicted) override;
+ void BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex) override;
void ChainStateFlushed(const CBlockLocator& locator) override;
@@ -97,7 +96,7 @@ public:
/// sync once and only needs to process blocks in the ValidationInterface
/// queue. If the index is catching up from far behind, this method does
/// not block and immediately returns false.
- bool BlockUntilSyncedToCurrentChain();
+ bool BlockUntilSyncedToCurrentChain() const;
void Interrupt();
diff --git a/src/indirectmap.h b/src/indirectmap.h
index 76da4a6bd5..417d500bd4 100644
--- a/src/indirectmap.h
+++ b/src/indirectmap.h
@@ -5,6 +5,8 @@
#ifndef BITCOIN_INDIRECTMAP_H
#define BITCOIN_INDIRECTMAP_H
+#include <map>
+
template <class T>
struct DereferencingComparator { bool operator()(const T a, const T b) const { return *a < *b; } };
diff --git a/src/init.cpp b/src/init.cpp
index 90d2624c7f..eb5329df66 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -47,12 +47,11 @@
#include <txdb.h>
#include <txmempool.h>
#include <ui_interface.h>
+#include <util/asmap.h>
#include <util/moneystr.h>
#include <util/system.h>
#include <util/threadnames.h>
#include <util/translation.h>
-#include <util/validation.h>
-#include <util/asmap.h>
#include <validation.h>
#include <hash.h>
@@ -74,6 +73,7 @@
#include <boost/algorithm/string/classification.hpp>
#include <boost/algorithm/string/replace.hpp>
#include <boost/algorithm/string/split.hpp>
+#include <boost/signals2/signal.hpp>
#include <boost/thread.hpp>
#if ENABLE_ZMQ
@@ -87,10 +87,6 @@ static const bool DEFAULT_PROXYRANDOMIZE = true;
static const bool DEFAULT_REST_ENABLE = false;
static const bool DEFAULT_STOPAFTERBLOCKIMPORT = false;
-// Dump addresses to banlist.dat every 15 minutes (900s)
-static constexpr int DUMP_BANS_INTERVAL = 60 * 15;
-
-
#ifdef WIN32
// Win32 LevelDB doesn't use filedescriptors, and the ones used for
// accessing block files don't count towards the fd_set size limit
@@ -157,7 +153,6 @@ NODISCARD static bool CreatePidFile()
static std::unique_ptr<ECCVerifyHandle> globalVerifyHandle;
static boost::thread_group threadGroup;
-static CScheduler scheduler;
void Interrupt(NodeContext& node)
{
@@ -208,6 +203,7 @@ void Shutdown(NodeContext& node)
// After everything has been shut down, but before things get flushed, stop the
// CScheduler/checkqueue threadGroup
+ if (node.scheduler) node.scheduler->stop();
threadGroup.interrupt_all();
threadGroup.join_all();
@@ -295,6 +291,7 @@ void Shutdown(NodeContext& node)
globalVerifyHandle.reset();
ECC_Stop();
if (node.mempool) node.mempool = nullptr;
+ node.scheduler.reset();
LogPrintf("%s: done\n", __func__);
}
@@ -409,6 +406,7 @@ void SetupServerArgs()
ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
gArgs.AddArg("-addnode=<ip>", "Add a node to connect to and attempt to keep the connection open (see the `addnode` RPC command help for more info). This option can be specified multiple times to add multiple nodes.", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
+ gArgs.AddArg("-asmap=<file>", strprintf("Specify asn mapping used for bucketing of the peers (default: %s). Relative paths will be prefixed by the net-specific datadir location.", DEFAULT_ASMAP_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
gArgs.AddArg("-banscore=<n>", strprintf("Threshold for disconnecting misbehaving peers (default: %u)", DEFAULT_BANSCORE_THRESHOLD), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
gArgs.AddArg("-bantime=<n>", strprintf("Number of seconds to keep misbehaving peers from reconnecting (default: %u)", DEFAULT_MISBEHAVING_BANTIME), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
gArgs.AddArg("-bind=<addr>", "Bind to given address and always listen on it. Use [host]:port notation for IPv6", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
@@ -437,7 +435,6 @@ void SetupServerArgs()
gArgs.AddArg("-peertimeout=<n>", strprintf("Specify p2p connection timeout in seconds. This option determines the amount of time a peer may be inactive before the connection to it is dropped. (minimum: 1, default: %d)", DEFAULT_PEER_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CONNECTION);
gArgs.AddArg("-torcontrol=<ip>:<port>", strprintf("Tor control port to use if onion listening enabled (default: %s)", DEFAULT_TOR_CONTROL), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
gArgs.AddArg("-torpassword=<pass>", "Tor control port password (default: empty)", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::CONNECTION);
- gArgs.AddArg("-asmap=<file>", "Specify asn mapping used for bucketing of the peers. Path should be relative to the -datadir path.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
#ifdef USE_UPNP
#if USE_UPNP
gArgs.AddArg("-upnp", "Use UPnP to map the listening port (default: 1 when listening and no -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
@@ -450,7 +447,7 @@ void SetupServerArgs()
gArgs.AddArg("-whitebind=<[permissions@]addr>", "Bind to given address and whitelist peers connecting to it. "
"Use [host]:port notation for IPv6. Allowed permissions are bloomfilter (allow requesting BIP37 filtered blocks and transactions), "
"noban (do not ban for misbehavior), "
- "forcerelay (relay even non-standard transactions), "
+ "forcerelay (relay transactions that are already in the mempool; implies relay), "
"relay (relay even in -blocksonly mode), "
"and mempool (allow requesting BIP35 mempool contents). "
"Specify multiple permissions separated by commas (default: noban,mempool,relay). Can be specified multiple times.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
@@ -527,7 +524,7 @@ void SetupServerArgs()
gArgs.AddArg("-datacarriersize", strprintf("Maximum size of data in data carrier transactions we relay and mine (default: %u)", MAX_OP_RETURN_RELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
gArgs.AddArg("-minrelaytxfee=<amt>", strprintf("Fees (in %s/kB) smaller than this are considered zero fee for relaying, mining and transaction creation (default: %s)",
CURRENCY_UNIT, FormatMoney(DEFAULT_MIN_RELAY_TX_FEE)), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
- gArgs.AddArg("-whitelistforcerelay", strprintf("Add 'forcerelay' permission to whitelisted inbound peers with default permissions. This will relay transactions even if the transactions were already in the mempool or violate local relay policy. (default: %d)", DEFAULT_WHITELISTFORCERELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
+ gArgs.AddArg("-whitelistforcerelay", strprintf("Add 'forcerelay' permission to whitelisted inbound peers with default permissions. This will relay transactions even if the transactions were already in the mempool. (default: %d)", DEFAULT_WHITELISTFORCERELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
gArgs.AddArg("-whitelistrelay", strprintf("Add 'relay' permission to whitelisted inbound peers with default permissions. This will accept relayed transactions even when not relaying transactions (default: %d)", DEFAULT_WHITELISTRELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
@@ -710,7 +707,7 @@ static void ThreadImport(std::vector<fs::path> vImportFiles)
// scan for better chains in the block chain database, that are not yet connected in the active best chain
BlockValidationState state;
if (!ActivateBestChain(state, chainparams)) {
- LogPrintf("Failed to connect best block (%s)\n", FormatStateMessage(state));
+ LogPrintf("Failed to connect best block (%s)\n", state.ToString());
StartShutdown();
return;
}
@@ -1268,16 +1265,19 @@ bool AppInitMain(NodeContext& node)
}
}
+ assert(!node.scheduler);
+ node.scheduler = MakeUnique<CScheduler>();
+
// Start the lightweight task scheduler thread
- CScheduler::Function serviceLoop = std::bind(&CScheduler::serviceQueue, &scheduler);
+ CScheduler::Function serviceLoop = [&node]{ node.scheduler->serviceQueue(); };
threadGroup.create_thread(std::bind(&TraceThread<CScheduler::Function>, "scheduler", serviceLoop));
// Gather some entropy once per minute.
- scheduler.scheduleEvery([]{
+ node.scheduler->scheduleEvery([]{
RandAddPeriodic();
- }, 60000);
+ }, std::chrono::minutes{1});
- GetMainSignals().RegisterBackgroundSignalScheduler(scheduler);
+ GetMainSignals().RegisterBackgroundSignalScheduler(*node.scheduler);
// Create client interfaces for wallets that are supposed to be loaded
// according to -wallet and -disablewallet options. This only constructs
@@ -1326,8 +1326,12 @@ bool AppInitMain(NodeContext& node)
node.banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", &uiInterface, gArgs.GetArg("-bantime", DEFAULT_MISBEHAVING_BANTIME));
assert(!node.connman);
node.connman = std::unique_ptr<CConnman>(new CConnman(GetRand(std::numeric_limits<uint64_t>::max()), GetRand(std::numeric_limits<uint64_t>::max())));
+ // Make mempool generally available in the node context. For example the connection manager, wallet, or RPC threads,
+ // which are all started after this, may use it from the node context.
+ assert(!node.mempool);
+ node.mempool = &::mempool;
- node.peer_logic.reset(new PeerLogicValidation(node.connman.get(), node.banman.get(), scheduler));
+ node.peer_logic.reset(new PeerLogicValidation(node.connman.get(), node.banman.get(), *node.scheduler, *node.mempool));
RegisterValidationInterface(node.peer_logic.get());
// sanitize comments per BIP-0014, format user agent and check total size
@@ -1416,6 +1420,31 @@ bool AppInitMain(NodeContext& node)
return InitError(ResolveErrMsg("externalip", strAddr));
}
+ // Read asmap file if configured
+ if (gArgs.IsArgSet("-asmap")) {
+ fs::path asmap_path = fs::path(gArgs.GetArg("-asmap", ""));
+ if (asmap_path.empty()) {
+ asmap_path = DEFAULT_ASMAP_FILENAME;
+ }
+ if (!asmap_path.is_absolute()) {
+ asmap_path = GetDataDir() / asmap_path;
+ }
+ if (!fs::exists(asmap_path)) {
+ InitError(strprintf(_("Could not find asmap file %s").translated, asmap_path));
+ return false;
+ }
+ std::vector<bool> asmap = CAddrMan::DecodeAsmap(asmap_path);
+ if (asmap.size() == 0) {
+ InitError(strprintf(_("Could not parse asmap file %s").translated, asmap_path));
+ return false;
+ }
+ const uint256 asmap_version = SerializeHash(asmap);
+ node.connman->SetAsmap(std::move(asmap));
+ LogPrintf("Using asmap version %s for IP bucketing\n", asmap_version.ToString());
+ } else {
+ LogPrintf("Using /16 prefix for IP bucketing\n");
+ }
+
#if ENABLE_ZMQ
g_zmq_notification_interface = CZMQNotificationInterface::Create();
@@ -1650,11 +1679,6 @@ bool AppInitMain(NodeContext& node)
return false;
}
- // Now that the chain state is loaded, make mempool generally available in the node context. For example the
- // connection manager, wallet, or RPC threads, which are all started after this, may use it from the node context.
- assert(!node.mempool);
- node.mempool = &::mempool;
-
fs::path est_path = GetDataDir() / FEE_ESTIMATES_FILENAME;
CAutoFile est_filein(fsbridge::fopen(est_path, "rb"), SER_DISK, CLIENT_VERSION);
// Allowed to fail as this file IS missing on first startup.
@@ -1819,42 +1843,23 @@ bool AppInitMain(NodeContext& node)
connOptions.m_specified_outgoing = connect;
}
}
- if (!node.connman->Start(scheduler, connOptions)) {
+ if (!node.connman->Start(*node.scheduler, connOptions)) {
return false;
}
- // Read asmap file if configured
- if (gArgs.IsArgSet("-asmap")) {
- std::string asmap_file = gArgs.GetArg("-asmap", "");
- if (asmap_file.empty()) {
- asmap_file = DEFAULT_ASMAP_FILENAME;
- }
- const fs::path asmap_path = GetDataDir() / asmap_file;
- std::vector<bool> asmap = CAddrMan::DecodeAsmap(asmap_path);
- if (asmap.size() == 0) {
- InitError(strprintf(_("Could not find or parse specified asmap: '%s'").translated, asmap_path));
- return false;
- }
- const uint256 asmap_version = SerializeHash(asmap);
- node.connman->SetAsmap(std::move(asmap));
- LogPrintf("Using asmap version %s for IP bucketing.\n", asmap_version.ToString());
- } else {
- LogPrintf("Using /16 prefix for IP bucketing.\n");
- }
-
// ********************************************************* Step 13: finished
SetRPCWarmupFinished();
uiInterface.InitMessage(_("Done loading").translated);
for (const auto& client : node.chain_clients) {
- client->start(scheduler);
+ client->start(*node.scheduler);
}
BanMan* banman = node.banman.get();
- scheduler.scheduleEvery([banman]{
+ node.scheduler->scheduleEvery([banman]{
banman->DumpBanlist();
- }, DUMP_BANS_INTERVAL * 1000);
+ }, DUMP_BANS_INTERVAL);
return true;
}
diff --git a/src/interfaces/chain.cpp b/src/interfaces/chain.cpp
index 5a3420349f..775a89f4cf 100644
--- a/src/interfaces/chain.cpp
+++ b/src/interfaces/chain.cpp
@@ -166,27 +166,25 @@ public:
}
void TransactionAddedToMempool(const CTransactionRef& tx) override
{
- m_notifications->TransactionAddedToMempool(tx);
+ m_notifications->transactionAddedToMempool(tx);
}
void TransactionRemovedFromMempool(const CTransactionRef& tx) override
{
- m_notifications->TransactionRemovedFromMempool(tx);
+ m_notifications->transactionRemovedFromMempool(tx);
}
- void BlockConnected(const std::shared_ptr<const CBlock>& block,
- const CBlockIndex* index,
- const std::vector<CTransactionRef>& tx_conflicted) override
+ void BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* index) override
{
- m_notifications->BlockConnected(*block, tx_conflicted, index->nHeight);
+ m_notifications->blockConnected(*block, index->nHeight);
}
void BlockDisconnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* index) override
{
- m_notifications->BlockDisconnected(*block, index->nHeight);
+ m_notifications->blockDisconnected(*block, index->nHeight);
}
void UpdatedBlockTip(const CBlockIndex* index, const CBlockIndex* fork_index, bool is_ibd) override
{
- m_notifications->UpdatedBlockTip();
+ m_notifications->updatedBlockTip();
}
- void ChainStateFlushed(const CBlockLocator& locator) override { m_notifications->ChainStateFlushed(locator); }
+ void ChainStateFlushed(const CBlockLocator& locator) override { m_notifications->chainStateFlushed(locator); }
Chain& m_chain;
Chain::Notifications* m_notifications;
};
@@ -279,7 +277,10 @@ public:
auto it = ::mempool.GetIter(txid);
return it && (*it)->GetCountWithDescendants() > 1;
}
- bool broadcastTransaction(const CTransactionRef& tx, std::string& err_string, const CAmount& max_tx_fee, bool relay) override
+ bool broadcastTransaction(const CTransactionRef& tx,
+ const CAmount& max_tx_fee,
+ bool relay,
+ std::string& err_string) override
{
const TransactionError err = BroadcastTransaction(m_node, tx, err_string, max_tx_fee, relay, /*wait_callback*/ false);
// Chain clients only care about failures to accept the tx to the mempool. Disregard non-mempool related failures.
@@ -367,7 +368,7 @@ public:
{
LOCK2(::cs_main, ::mempool.cs);
for (const CTxMemPoolEntry& entry : ::mempool.mapTx) {
- notifications.TransactionAddedToMempool(entry.GetSharedTx());
+ notifications.transactionAddedToMempool(entry.GetSharedTx());
}
}
NodeContext& m_node;
diff --git a/src/interfaces/chain.h b/src/interfaces/chain.h
index 7304f82749..caefa87e11 100644
--- a/src/interfaces/chain.h
+++ b/src/interfaces/chain.h
@@ -154,7 +154,10 @@ public:
//! Transaction is added to memory pool, if the transaction fee is below the
//! amount specified by max_tx_fee, and broadcast to all peers if relay is set to true.
//! Return false if the transaction could not be added due to the fee or for another reason.
- virtual bool broadcastTransaction(const CTransactionRef& tx, std::string& err_string, const CAmount& max_tx_fee, bool relay) = 0;
+ virtual bool broadcastTransaction(const CTransactionRef& tx,
+ const CAmount& max_tx_fee,
+ bool relay,
+ std::string& err_string) = 0;
//! Calculate mempool ancestor and descendant counts for the given transaction.
virtual void getTransactionAncestry(const uint256& txid, size_t& ancestors, size_t& descendants) = 0;
@@ -217,12 +220,12 @@ public:
{
public:
virtual ~Notifications() {}
- virtual void TransactionAddedToMempool(const CTransactionRef& tx) {}
- virtual void TransactionRemovedFromMempool(const CTransactionRef& ptx) {}
- virtual void BlockConnected(const CBlock& block, const std::vector<CTransactionRef>& tx_conflicted, int height) {}
- virtual void BlockDisconnected(const CBlock& block, int height) {}
- virtual void UpdatedBlockTip() {}
- virtual void ChainStateFlushed(const CBlockLocator& locator) {}
+ virtual void transactionAddedToMempool(const CTransactionRef& tx) {}
+ virtual void transactionRemovedFromMempool(const CTransactionRef& ptx) {}
+ virtual void blockConnected(const CBlock& block, int height) {}
+ virtual void blockDisconnected(const CBlock& block, int height) {}
+ virtual void updatedBlockTip() {}
+ virtual void chainStateFlushed(const CBlockLocator& locator) {}
};
//! Register handler for notifications.
@@ -245,7 +248,7 @@ public:
//! Current RPC serialization flags.
virtual int rpcSerializationFlags() = 0;
- //! Synchronously send TransactionAddedToMempool notifications about all
+ //! Synchronously send transactionAddedToMempool notifications about all
//! current mempool transactions to the specified handler and return after
//! the last one is sent. These notifications aren't coordinated with async
//! notifications sent by handleNotifications, so out of date async
diff --git a/src/interfaces/node.cpp b/src/interfaces/node.cpp
index 8a64a9d26a..905173d20b 100644
--- a/src/interfaces/node.cpp
+++ b/src/interfaces/node.cpp
@@ -37,6 +37,8 @@
#include <univalue.h>
+#include <boost/signals2/signal.hpp>
+
class CWallet;
fs::path GetWalletDir();
std::vector<fs::path> ListWalletDir();
@@ -150,14 +152,14 @@ public:
}
return false;
}
- bool disconnect(const CNetAddr& net_addr) override
+ bool disconnectByAddress(const CNetAddr& net_addr) override
{
if (m_context.connman) {
return m_context.connman->DisconnectNode(net_addr);
}
return false;
}
- bool disconnect(NodeId id) override
+ bool disconnectById(NodeId id) override
{
if (m_context.connman) {
return m_context.connman->DisconnectNode(id);
@@ -260,12 +262,11 @@ public:
{
return MakeWallet(LoadWallet(*m_context.chain, name, error, warnings));
}
- WalletCreationStatus createWallet(const SecureString& passphrase, uint64_t wallet_creation_flags, const std::string& name, std::string& error, std::vector<std::string>& warnings, std::unique_ptr<Wallet>& result) override
+ std::unique_ptr<Wallet> createWallet(const SecureString& passphrase, uint64_t wallet_creation_flags, const std::string& name, std::string& error, std::vector<std::string>& warnings, WalletCreationStatus& status) override
{
std::shared_ptr<CWallet> wallet;
- WalletCreationStatus status = CreateWallet(*m_context.chain, passphrase, wallet_creation_flags, name, error, warnings, wallet);
- result = MakeWallet(wallet);
- return status;
+ status = CreateWallet(*m_context.chain, passphrase, wallet_creation_flags, name, error, warnings, wallet);
+ return MakeWallet(wallet);
}
std::unique_ptr<Handler> handleInitMessage(InitMessageFn fn) override
{
diff --git a/src/interfaces/node.h b/src/interfaces/node.h
index 38aeb06324..53a20886cd 100644
--- a/src/interfaces/node.h
+++ b/src/interfaces/node.h
@@ -124,10 +124,10 @@ public:
virtual bool unban(const CSubNet& ip) = 0;
//! Disconnect node by address.
- virtual bool disconnect(const CNetAddr& net_addr) = 0;
+ virtual bool disconnectByAddress(const CNetAddr& net_addr) = 0;
//! Disconnect node by id.
- virtual bool disconnect(NodeId id) = 0;
+ virtual bool disconnectById(NodeId id) = 0;
//! Get total bytes recv.
virtual int64_t getTotalBytesRecv() = 0;
@@ -204,7 +204,7 @@ public:
virtual std::unique_ptr<Wallet> loadWallet(const std::string& name, std::string& error, std::vector<std::string>& warnings) = 0;
//! Create a wallet from file
- virtual WalletCreationStatus createWallet(const SecureString& passphrase, uint64_t wallet_creation_flags, const std::string& name, std::string& error, std::vector<std::string>& warnings, std::unique_ptr<Wallet>& result) = 0;
+ virtual std::unique_ptr<Wallet> createWallet(const SecureString& passphrase, uint64_t wallet_creation_flags, const std::string& name, std::string& error, std::vector<std::string>& warnings, WalletCreationStatus& status) = 0;
//! Register handler for init messages.
using InitMessageFn = std::function<void(const std::string& message)>;
diff --git a/src/interfaces/wallet.cpp b/src/interfaces/wallet.cpp
index baea71d0bb..bb6bb4923f 100644
--- a/src/interfaces/wallet.cpp
+++ b/src/interfaces/wallet.cpp
@@ -19,7 +19,6 @@
#include <wallet/fees.h>
#include <wallet/ismine.h>
#include <wallet/load.h>
-#include <wallet/psbtwallet.h>
#include <wallet/rpcwallet.h>
#include <wallet/wallet.h>
@@ -119,19 +118,15 @@ public:
}
bool getPubKey(const CScript& script, const CKeyID& address, CPubKey& pub_key) override
{
- std::unique_ptr<SigningProvider> provider = m_wallet->GetSigningProvider(script);
+ std::unique_ptr<SigningProvider> provider = m_wallet->GetSolvingProvider(script);
if (provider) {
return provider->GetPubKey(address, pub_key);
}
return false;
}
- bool getPrivKey(const CScript& script, const CKeyID& address, CKey& key) override
+ SigningResult signMessage(const std::string& message, const PKHash& pkhash, std::string& str_sig) override
{
- std::unique_ptr<SigningProvider> provider = m_wallet->GetSigningProvider(script);
- if (provider) {
- return provider->GetKey(address, key);
- }
- return false;
+ return m_wallet->SignMessage(message, pkhash, str_sig);
}
bool isSpendable(const CTxDestination& dest) override { return m_wallet->IsMine(dest) & ISMINE_SPENDABLE; }
bool haveWatchOnly() override
@@ -258,19 +253,12 @@ public:
}
bool createBumpTransaction(const uint256& txid,
const CCoinControl& coin_control,
- CAmount total_fee,
std::vector<std::string>& errors,
CAmount& old_fee,
CAmount& new_fee,
CMutableTransaction& mtx) override
{
- if (total_fee > 0) {
- return feebumper::CreateTotalBumpTransaction(m_wallet.get(), txid, coin_control, total_fee, errors, old_fee, new_fee, mtx) ==
- feebumper::Result::OK;
- } else {
- return feebumper::CreateRateBumpTransaction(*m_wallet.get(), txid, coin_control, errors, old_fee, new_fee, mtx) ==
- feebumper::Result::OK;
- }
+ return feebumper::CreateRateBumpTransaction(*m_wallet.get(), txid, coin_control, errors, old_fee, new_fee, mtx) == feebumper::Result::OK;
}
bool signBumpTransaction(CMutableTransaction& mtx) override { return feebumper::SignTransaction(*m_wallet.get(), mtx); }
bool commitBumpTransaction(const uint256& txid,
@@ -357,13 +345,13 @@ public:
}
return {};
}
- TransactionError fillPSBT(PartiallySignedTransaction& psbtx,
- bool& complete,
- int sighash_type = 1 /* SIGHASH_ALL */,
- bool sign = true,
- bool bip32derivs = false) override
+ TransactionError fillPSBT(int sighash_type,
+ bool sign,
+ bool bip32derivs,
+ PartiallySignedTransaction& psbtx,
+ bool& complete) override
{
- return FillPSBT(m_wallet.get(), psbtx, complete, sighash_type, sign, bip32derivs);
+ return m_wallet->FillPSBT(psbtx, complete, sighash_type, sign, bip32derivs);
}
WalletBalances getBalances() override
{
@@ -469,7 +457,7 @@ public:
unsigned int getConfirmTarget() override { return m_wallet->m_confirm_target; }
bool hdEnabled() override { return m_wallet->IsHDEnabled(); }
bool canGetAddresses() override { return m_wallet->CanGetAddresses(); }
- bool IsWalletFlagSet(uint64_t flag) override { return m_wallet->IsWalletFlagSet(flag); }
+ bool privateKeysDisabled() override { return m_wallet->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS); }
OutputType getDefaultAddressType() override { return m_wallet->m_default_address_type; }
OutputType getDefaultChangeType() override { return m_wallet->m_default_change_type; }
CAmount getDefaultMaxTxFee() override { return m_wallet->m_default_max_tx_fee; }
diff --git a/src/interfaces/wallet.h b/src/interfaces/wallet.h
index d4280e8091..0e551b0a96 100644
--- a/src/interfaces/wallet.h
+++ b/src/interfaces/wallet.h
@@ -10,6 +10,7 @@
#include <script/standard.h> // For CTxDestination
#include <support/allocators/secure.h> // For SecureString
#include <ui_interface.h> // For ChangeType
+#include <util/message.h>
#include <functional>
#include <map>
@@ -84,8 +85,8 @@ public:
//! Get public key.
virtual bool getPubKey(const CScript& script, const CKeyID& address, CPubKey& pub_key) = 0;
- //! Get private key.
- virtual bool getPrivKey(const CScript& script, const CKeyID& address, CKey& key) = 0;
+ //! Sign message
+ virtual SigningResult signMessage(const std::string& message, const PKHash& pkhash, std::string& str_sig) = 0;
//! Return whether wallet has private key.
virtual bool isSpendable(const CTxDestination& dest) = 0;
@@ -154,7 +155,6 @@ public:
//! Create bump transaction.
virtual bool createBumpTransaction(const uint256& txid,
const CCoinControl& coin_control,
- CAmount total_fee,
std::vector<std::string>& errors,
CAmount& old_fee,
CAmount& new_fee,
@@ -192,11 +192,11 @@ public:
int& num_blocks) = 0;
//! Fill PSBT.
- virtual TransactionError fillPSBT(PartiallySignedTransaction& psbtx,
- bool& complete,
- int sighash_type = 1 /* SIGHASH_ALL */,
- bool sign = true,
- bool bip32derivs = false) = 0;
+ virtual TransactionError fillPSBT(int sighash_type,
+ bool sign,
+ bool bip32derivs,
+ PartiallySignedTransaction& psbtx,
+ bool& complete) = 0;
//! Get balances.
virtual WalletBalances getBalances() = 0;
@@ -248,8 +248,8 @@ public:
// Return whether the wallet is blank.
virtual bool canGetAddresses() = 0;
- // check if a certain wallet flag is set.
- virtual bool IsWalletFlagSet(uint64_t flag) = 0;
+ // Return whether private keys enabled.
+ virtual bool privateKeysDisabled() = 0;
// Get default address type.
virtual OutputType getDefaultAddressType() = 0;
diff --git a/src/leveldb/.appveyor.yml b/src/leveldb/.appveyor.yml
new file mode 100644
index 0000000000..c24b17e805
--- /dev/null
+++ b/src/leveldb/.appveyor.yml
@@ -0,0 +1,35 @@
+# Build matrix / environment variables are explained on:
+# https://www.appveyor.com/docs/appveyor-yml/
+# This file can be validated on: https://ci.appveyor.com/tools/validate-yaml
+
+version: "{build}"
+
+environment:
+ matrix:
+ # AppVeyor currently has no custom job name feature.
+ # http://help.appveyor.com/discussions/questions/1623-can-i-provide-a-friendly-name-for-jobs
+ - JOB: Visual Studio 2017
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
+ CMAKE_GENERATOR: Visual Studio 15 2017
+
+platform:
+ - x86
+ - x64
+
+configuration:
+ - RelWithDebInfo
+ - Debug
+
+build_script:
+ - git submodule update --init --recursive
+ - mkdir build
+ - cd build
+ - if "%platform%"=="x64" set CMAKE_GENERATOR=%CMAKE_GENERATOR% Win64
+ - cmake --version
+ - cmake .. -G "%CMAKE_GENERATOR%"
+ -DCMAKE_CONFIGURATION_TYPES="%CONFIGURATION%"
+ - cmake --build . --config "%CONFIGURATION%"
+ - cd ..
+
+test_script:
+ - cd build && ctest --verbose --build-config "%CONFIGURATION%" && cd ..
diff --git a/src/leveldb/.clang-format b/src/leveldb/.clang-format
new file mode 100644
index 0000000000..f493f75382
--- /dev/null
+++ b/src/leveldb/.clang-format
@@ -0,0 +1,18 @@
+# Run manually to reformat a file:
+# clang-format -i --style=file <file>
+# find . -iname '*.cc' -o -iname '*.h' -o -iname '*.h.in' | xargs clang-format -i --style=file
+BasedOnStyle: Google
+DerivePointerAlignment: false
+
+# Public headers are in a different location in the internal Google repository.
+# Order them so that when imported to the authoritative repository they will be
+# in correct alphabetical order.
+IncludeCategories:
+ - Regex: '^(<|"(benchmarks|db|helpers)/)'
+ Priority: 1
+ - Regex: '^"(leveldb)/'
+ Priority: 2
+ - Regex: '^(<|"(issues|port|table|third_party|util)/)'
+ Priority: 3
+ - Regex: '.*'
+ Priority: 4
diff --git a/src/leveldb/.gitignore b/src/leveldb/.gitignore
index 71d87a4eeb..c4b242534f 100644
--- a/src/leveldb/.gitignore
+++ b/src/leveldb/.gitignore
@@ -1,13 +1,8 @@
-build_config.mk
-*.a
-*.o
-*.dylib*
-*.so
-*.so.*
-*_test
-db_bench
-leveldbutil
-Release
-Debug
-Benchmark
-vs2010.*
+# Editors.
+*.sw*
+.vscode
+.DS_Store
+
+# Build directory.
+build/
+out/
diff --git a/src/leveldb/.travis.yml b/src/leveldb/.travis.yml
index f5bd74c454..42cbe64fd0 100644
--- a/src/leveldb/.travis.yml
+++ b/src/leveldb/.travis.yml
@@ -1,13 +1,82 @@
+# Build matrix / environment variables are explained on:
+# http://about.travis-ci.org/docs/user/build-configuration/
+# This file can be validated on: http://lint.travis-ci.org/
+
language: cpp
+dist: bionic
+osx_image: xcode10.3
+
compiler:
-- clang
- gcc
+- clang
os:
- linux
- osx
-sudo: false
-before_install:
-- echo $LANG
-- echo $LC_ALL
+
+env:
+- BUILD_TYPE=Debug
+- BUILD_TYPE=RelWithDebInfo
+
+addons:
+ apt:
+ sources:
+ - sourceline: 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main'
+ key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key'
+ - sourceline: 'ppa:ubuntu-toolchain-r/test'
+ packages:
+ - clang-9
+ - cmake
+ - gcc-9
+ - g++-9
+ - libgoogle-perftools-dev
+ - libkyotocabinet-dev
+ - libsnappy-dev
+ - libsqlite3-dev
+ - ninja-build
+ homebrew:
+ packages:
+ - cmake
+ - crc32c
+ - gcc@9
+ - gperftools
+ - kyoto-cabinet
+ - llvm@9
+ - ninja
+ - snappy
+ - sqlite3
+ update: true
+
+install:
+# The following Homebrew packages aren't linked by default, and need to be
+# prepended to the path explicitly.
+- if [ "$TRAVIS_OS_NAME" = "osx" ]; then
+ export PATH="$(brew --prefix llvm)/bin:$PATH";
+ fi
+# /usr/bin/gcc points to an older compiler on both Linux and macOS.
+- if [ "$CXX" = "g++" ]; then export CXX="g++-9" CC="gcc-9"; fi
+# /usr/bin/clang points to an older compiler on both Linux and macOS.
+#
+# Homebrew's llvm package doesn't ship a versioned clang++ binary, so the values
+# below don't work on macOS. Fortunately, the path change above makes the
+# default values (clang and clang++) resolve to the correct compiler on macOS.
+- if [ "$TRAVIS_OS_NAME" = "linux" ]; then
+ if [ "$CXX" = "clang++" ]; then export CXX="clang++-9" CC="clang-9"; fi;
+ fi
+- echo ${CC}
+- echo ${CXX}
+- ${CXX} --version
+- cmake --version
+
+before_script:
+- mkdir -p build && cd build
+- cmake .. -G Ninja -DCMAKE_BUILD_TYPE=$BUILD_TYPE
+ -DCMAKE_INSTALL_PREFIX=$HOME/.local
+- cmake --build .
+- cd ..
+
script:
-- make -j 4 check
+- cd build && ctest --verbose && cd ..
+- "if [ -f build/db_bench ] ; then build/db_bench ; fi"
+- "if [ -f build/db_bench_sqlite3 ] ; then build/db_bench_sqlite3 ; fi"
+- "if [ -f build/db_bench_tree_db ] ; then build/db_bench_tree_db ; fi"
+- cd build && cmake --build . --target install
diff --git a/src/leveldb/CMakeLists.txt b/src/leveldb/CMakeLists.txt
new file mode 100644
index 0000000000..1cb46256c2
--- /dev/null
+++ b/src/leveldb/CMakeLists.txt
@@ -0,0 +1,465 @@
+# Copyright 2017 The LevelDB Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+cmake_minimum_required(VERSION 3.9)
+# Keep the version below in sync with the one in db.h
+project(leveldb VERSION 1.22.0 LANGUAGES C CXX)
+
+# This project can use C11, but will gracefully decay down to C89.
+set(CMAKE_C_STANDARD 11)
+set(CMAKE_C_STANDARD_REQUIRED OFF)
+set(CMAKE_C_EXTENSIONS OFF)
+
+# This project requires C++11.
+set(CMAKE_CXX_STANDARD 11)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
+set(CMAKE_CXX_EXTENSIONS OFF)
+
+if (WIN32)
+ set(LEVELDB_PLATFORM_NAME LEVELDB_PLATFORM_WINDOWS)
+ # TODO(cmumford): Make UNICODE configurable for Windows.
+ add_definitions(-D_UNICODE -DUNICODE)
+else (WIN32)
+ set(LEVELDB_PLATFORM_NAME LEVELDB_PLATFORM_POSIX)
+endif (WIN32)
+
+option(LEVELDB_BUILD_TESTS "Build LevelDB's unit tests" ON)
+option(LEVELDB_BUILD_BENCHMARKS "Build LevelDB's benchmarks" ON)
+option(LEVELDB_INSTALL "Install LevelDB's header and library" ON)
+
+include(TestBigEndian)
+test_big_endian(LEVELDB_IS_BIG_ENDIAN)
+
+include(CheckIncludeFile)
+check_include_file("unistd.h" HAVE_UNISTD_H)
+
+include(CheckLibraryExists)
+check_library_exists(crc32c crc32c_value "" HAVE_CRC32C)
+check_library_exists(snappy snappy_compress "" HAVE_SNAPPY)
+check_library_exists(tcmalloc malloc "" HAVE_TCMALLOC)
+
+include(CheckCXXSymbolExists)
+# Using check_cxx_symbol_exists() instead of check_c_symbol_exists() because
+# we're including the header from C++, and feature detection should use the same
+# compiler language that the project will use later. Principles aside, some
+# versions of do not expose fdatasync() in <unistd.h> in standard C mode
+# (-std=c11), but do expose the function in standard C++ mode (-std=c++11).
+check_cxx_symbol_exists(fdatasync "unistd.h" HAVE_FDATASYNC)
+check_cxx_symbol_exists(F_FULLFSYNC "fcntl.h" HAVE_FULLFSYNC)
+check_cxx_symbol_exists(O_CLOEXEC "fcntl.h" HAVE_O_CLOEXEC)
+
+if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ # Disable C++ exceptions.
+ string(REGEX REPLACE "/EH[a-z]+" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHs-c-")
+ add_definitions(-D_HAS_EXCEPTIONS=0)
+
+ # Disable RTTI.
+ string(REGEX REPLACE "/GR" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /GR-")
+else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ # Enable strict prototype warnings for C code in clang and gcc.
+ if(NOT CMAKE_C_FLAGS MATCHES "-Wstrict-prototypes")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wstrict-prototypes")
+ endif(NOT CMAKE_C_FLAGS MATCHES "-Wstrict-prototypes")
+
+ # Disable C++ exceptions.
+ string(REGEX REPLACE "-fexceptions" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions")
+
+ # Disable RTTI.
+ string(REGEX REPLACE "-frtti" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti")
+endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+
+# Test whether -Wthread-safety is available. See
+# https://clang.llvm.org/docs/ThreadSafetyAnalysis.html
+include(CheckCXXCompilerFlag)
+check_cxx_compiler_flag(-Wthread-safety HAVE_CLANG_THREAD_SAFETY)
+
+include(CheckCXXSourceCompiles)
+
+# Test whether C++17 __has_include is available.
+check_cxx_source_compiles("
+#if defined(__has_include) && __has_include(<string>)
+#include <string>
+#endif
+int main() { std::string str; return 0; }
+" HAVE_CXX17_HAS_INCLUDE)
+
+set(LEVELDB_PUBLIC_INCLUDE_DIR "include/leveldb")
+set(LEVELDB_PORT_CONFIG_DIR "include/port")
+
+configure_file(
+ "port/port_config.h.in"
+ "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
+)
+
+include_directories(
+ "${PROJECT_BINARY_DIR}/include"
+ "."
+)
+
+if(BUILD_SHARED_LIBS)
+ # Only export LEVELDB_EXPORT symbols from the shared library.
+ add_compile_options(-fvisibility=hidden)
+endif(BUILD_SHARED_LIBS)
+
+# Must be included before CMAKE_INSTALL_INCLUDEDIR is used.
+include(GNUInstallDirs)
+
+add_library(leveldb "")
+target_sources(leveldb
+ PRIVATE
+ "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
+ "db/builder.cc"
+ "db/builder.h"
+ "db/c.cc"
+ "db/db_impl.cc"
+ "db/db_impl.h"
+ "db/db_iter.cc"
+ "db/db_iter.h"
+ "db/dbformat.cc"
+ "db/dbformat.h"
+ "db/dumpfile.cc"
+ "db/filename.cc"
+ "db/filename.h"
+ "db/log_format.h"
+ "db/log_reader.cc"
+ "db/log_reader.h"
+ "db/log_writer.cc"
+ "db/log_writer.h"
+ "db/memtable.cc"
+ "db/memtable.h"
+ "db/repair.cc"
+ "db/skiplist.h"
+ "db/snapshot.h"
+ "db/table_cache.cc"
+ "db/table_cache.h"
+ "db/version_edit.cc"
+ "db/version_edit.h"
+ "db/version_set.cc"
+ "db/version_set.h"
+ "db/write_batch_internal.h"
+ "db/write_batch.cc"
+ "port/port_stdcxx.h"
+ "port/port.h"
+ "port/thread_annotations.h"
+ "table/block_builder.cc"
+ "table/block_builder.h"
+ "table/block.cc"
+ "table/block.h"
+ "table/filter_block.cc"
+ "table/filter_block.h"
+ "table/format.cc"
+ "table/format.h"
+ "table/iterator_wrapper.h"
+ "table/iterator.cc"
+ "table/merger.cc"
+ "table/merger.h"
+ "table/table_builder.cc"
+ "table/table.cc"
+ "table/two_level_iterator.cc"
+ "table/two_level_iterator.h"
+ "util/arena.cc"
+ "util/arena.h"
+ "util/bloom.cc"
+ "util/cache.cc"
+ "util/coding.cc"
+ "util/coding.h"
+ "util/comparator.cc"
+ "util/crc32c.cc"
+ "util/crc32c.h"
+ "util/env.cc"
+ "util/filter_policy.cc"
+ "util/hash.cc"
+ "util/hash.h"
+ "util/logging.cc"
+ "util/logging.h"
+ "util/mutexlock.h"
+ "util/no_destructor.h"
+ "util/options.cc"
+ "util/random.h"
+ "util/status.cc"
+
+ # Only CMake 3.3+ supports PUBLIC sources in targets exported by "install".
+ $<$<VERSION_GREATER:CMAKE_VERSION,3.2>:PUBLIC>
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/c.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/cache.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/comparator.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/db.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/dumpfile.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/env.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/export.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/filter_policy.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/iterator.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/options.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/slice.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/status.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/table_builder.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/table.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/write_batch.h"
+)
+
+if (WIN32)
+ target_sources(leveldb
+ PRIVATE
+ "util/env_windows.cc"
+ "util/windows_logger.h"
+ )
+else (WIN32)
+ target_sources(leveldb
+ PRIVATE
+ "util/env_posix.cc"
+ "util/posix_logger.h"
+ )
+endif (WIN32)
+
+# MemEnv is not part of the interface and could be pulled to a separate library.
+target_sources(leveldb
+ PRIVATE
+ "helpers/memenv/memenv.cc"
+ "helpers/memenv/memenv.h"
+)
+
+target_include_directories(leveldb
+ PUBLIC
+ $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include>
+ $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
+)
+
+set_target_properties(leveldb
+ PROPERTIES VERSION ${PROJECT_VERSION} SOVERSION ${PROJECT_VERSION_MAJOR})
+
+target_compile_definitions(leveldb
+ PRIVATE
+ # Used by include/export.h when building shared libraries.
+ LEVELDB_COMPILE_LIBRARY
+ # Used by port/port.h.
+ ${LEVELDB_PLATFORM_NAME}=1
+)
+if (NOT HAVE_CXX17_HAS_INCLUDE)
+ target_compile_definitions(leveldb
+ PRIVATE
+ LEVELDB_HAS_PORT_CONFIG_H=1
+ )
+endif(NOT HAVE_CXX17_HAS_INCLUDE)
+
+if(BUILD_SHARED_LIBS)
+ target_compile_definitions(leveldb
+ PUBLIC
+ # Used by include/export.h.
+ LEVELDB_SHARED_LIBRARY
+ )
+endif(BUILD_SHARED_LIBS)
+
+if(HAVE_CLANG_THREAD_SAFETY)
+ target_compile_options(leveldb
+ PUBLIC
+ -Werror -Wthread-safety)
+endif(HAVE_CLANG_THREAD_SAFETY)
+
+if(HAVE_CRC32C)
+ target_link_libraries(leveldb crc32c)
+endif(HAVE_CRC32C)
+if(HAVE_SNAPPY)
+ target_link_libraries(leveldb snappy)
+endif(HAVE_SNAPPY)
+if(HAVE_TCMALLOC)
+ target_link_libraries(leveldb tcmalloc)
+endif(HAVE_TCMALLOC)
+
+# Needed by port_stdcxx.h
+find_package(Threads REQUIRED)
+target_link_libraries(leveldb Threads::Threads)
+
+add_executable(leveldbutil
+ "db/leveldbutil.cc"
+)
+target_link_libraries(leveldbutil leveldb)
+
+if(LEVELDB_BUILD_TESTS)
+ enable_testing()
+
+ function(leveldb_test test_file)
+ get_filename_component(test_target_name "${test_file}" NAME_WE)
+
+ add_executable("${test_target_name}" "")
+ target_sources("${test_target_name}"
+ PRIVATE
+ "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
+ "util/testharness.cc"
+ "util/testharness.h"
+ "util/testutil.cc"
+ "util/testutil.h"
+
+ "${test_file}"
+ )
+ target_link_libraries("${test_target_name}" leveldb)
+ target_compile_definitions("${test_target_name}"
+ PRIVATE
+ ${LEVELDB_PLATFORM_NAME}=1
+ )
+ if (NOT HAVE_CXX17_HAS_INCLUDE)
+ target_compile_definitions("${test_target_name}"
+ PRIVATE
+ LEVELDB_HAS_PORT_CONFIG_H=1
+ )
+ endif(NOT HAVE_CXX17_HAS_INCLUDE)
+
+ add_test(NAME "${test_target_name}" COMMAND "${test_target_name}")
+ endfunction(leveldb_test)
+
+ leveldb_test("db/c_test.c")
+ leveldb_test("db/fault_injection_test.cc")
+
+ leveldb_test("issues/issue178_test.cc")
+ leveldb_test("issues/issue200_test.cc")
+ leveldb_test("issues/issue320_test.cc")
+
+ leveldb_test("util/env_test.cc")
+ leveldb_test("util/status_test.cc")
+ leveldb_test("util/no_destructor_test.cc")
+
+ if(NOT BUILD_SHARED_LIBS)
+ leveldb_test("db/autocompact_test.cc")
+ leveldb_test("db/corruption_test.cc")
+ leveldb_test("db/db_test.cc")
+ leveldb_test("db/dbformat_test.cc")
+ leveldb_test("db/filename_test.cc")
+ leveldb_test("db/log_test.cc")
+ leveldb_test("db/recovery_test.cc")
+ leveldb_test("db/skiplist_test.cc")
+ leveldb_test("db/version_edit_test.cc")
+ leveldb_test("db/version_set_test.cc")
+ leveldb_test("db/write_batch_test.cc")
+
+ leveldb_test("helpers/memenv/memenv_test.cc")
+
+ leveldb_test("table/filter_block_test.cc")
+ leveldb_test("table/table_test.cc")
+
+ leveldb_test("util/arena_test.cc")
+ leveldb_test("util/bloom_test.cc")
+ leveldb_test("util/cache_test.cc")
+ leveldb_test("util/coding_test.cc")
+ leveldb_test("util/crc32c_test.cc")
+ leveldb_test("util/hash_test.cc")
+ leveldb_test("util/logging_test.cc")
+
+ # TODO(costan): This test also uses
+ # "util/env_{posix|windows}_test_helper.h"
+ if (WIN32)
+ leveldb_test("util/env_windows_test.cc")
+ else (WIN32)
+ leveldb_test("util/env_posix_test.cc")
+ endif (WIN32)
+ endif(NOT BUILD_SHARED_LIBS)
+endif(LEVELDB_BUILD_TESTS)
+
+if(LEVELDB_BUILD_BENCHMARKS)
+ function(leveldb_benchmark bench_file)
+ get_filename_component(bench_target_name "${bench_file}" NAME_WE)
+
+ add_executable("${bench_target_name}" "")
+ target_sources("${bench_target_name}"
+ PRIVATE
+ "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
+ "util/histogram.cc"
+ "util/histogram.h"
+ "util/testharness.cc"
+ "util/testharness.h"
+ "util/testutil.cc"
+ "util/testutil.h"
+
+ "${bench_file}"
+ )
+ target_link_libraries("${bench_target_name}" leveldb)
+ target_compile_definitions("${bench_target_name}"
+ PRIVATE
+ ${LEVELDB_PLATFORM_NAME}=1
+ )
+ if (NOT HAVE_CXX17_HAS_INCLUDE)
+ target_compile_definitions("${bench_target_name}"
+ PRIVATE
+ LEVELDB_HAS_PORT_CONFIG_H=1
+ )
+ endif(NOT HAVE_CXX17_HAS_INCLUDE)
+ endfunction(leveldb_benchmark)
+
+ if(NOT BUILD_SHARED_LIBS)
+ leveldb_benchmark("benchmarks/db_bench.cc")
+ endif(NOT BUILD_SHARED_LIBS)
+
+ check_library_exists(sqlite3 sqlite3_open "" HAVE_SQLITE3)
+ if(HAVE_SQLITE3)
+ leveldb_benchmark("benchmarks/db_bench_sqlite3.cc")
+ target_link_libraries(db_bench_sqlite3 sqlite3)
+ endif(HAVE_SQLITE3)
+
+ # check_library_exists is insufficient here because the library names have
+ # different manglings when compiled with clang or gcc, at least when installed
+ # with Homebrew on Mac.
+ set(OLD_CMAKE_REQURED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES})
+ list(APPEND CMAKE_REQUIRED_LIBRARIES kyotocabinet)
+ check_cxx_source_compiles("
+#include <kcpolydb.h>
+
+int main() {
+ kyotocabinet::TreeDB* db = new kyotocabinet::TreeDB();
+ delete db;
+ return 0;
+}
+ " HAVE_KYOTOCABINET)
+ set(CMAKE_REQUIRED_LIBRARIES ${OLD_CMAKE_REQURED_LIBRARIES})
+ if(HAVE_KYOTOCABINET)
+ leveldb_benchmark("benchmarks/db_bench_tree_db.cc")
+ target_link_libraries(db_bench_tree_db kyotocabinet)
+ endif(HAVE_KYOTOCABINET)
+endif(LEVELDB_BUILD_BENCHMARKS)
+
+if(LEVELDB_INSTALL)
+ install(TARGETS leveldb
+ EXPORT leveldbTargets
+ RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
+ LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ )
+ install(
+ FILES
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/c.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/cache.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/comparator.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/db.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/dumpfile.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/env.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/export.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/filter_policy.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/iterator.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/options.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/slice.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/status.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/table_builder.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/table.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/write_batch.h"
+ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/leveldb
+ )
+
+ include(CMakePackageConfigHelpers)
+ write_basic_package_version_file(
+ "${PROJECT_BINARY_DIR}/leveldbConfigVersion.cmake"
+ COMPATIBILITY SameMajorVersion
+ )
+ install(
+ EXPORT leveldbTargets
+ NAMESPACE leveldb::
+ DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/leveldb"
+ )
+ install(
+ FILES
+ "cmake/leveldbConfig.cmake"
+ "${PROJECT_BINARY_DIR}/leveldbConfigVersion.cmake"
+ DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/leveldb"
+ )
+endif(LEVELDB_INSTALL)
diff --git a/src/leveldb/CONTRIBUTING.md b/src/leveldb/CONTRIBUTING.md
index cd600ff46b..a74572a596 100644
--- a/src/leveldb/CONTRIBUTING.md
+++ b/src/leveldb/CONTRIBUTING.md
@@ -31,6 +31,6 @@ the CLA.
## Writing Code ##
-If your contribution contains code, please make sure that it follows
-[the style guide](http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml).
+If your contribution contains code, please make sure that it follows
+[the style guide](http://google.github.io/styleguide/cppguide.html).
Otherwise we will have to ask you to make changes, and that's no fun for anyone.
diff --git a/src/leveldb/Makefile b/src/leveldb/Makefile
deleted file mode 100644
index f7cc7d736c..0000000000
--- a/src/leveldb/Makefile
+++ /dev/null
@@ -1,424 +0,0 @@
-# Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#-----------------------------------------------
-# Uncomment exactly one of the lines labelled (A), (B), and (C) below
-# to switch between compilation modes.
-
-# (A) Production use (optimized mode)
-OPT ?= -O2 -DNDEBUG
-# (B) Debug mode, w/ full line-level debugging symbols
-# OPT ?= -g2
-# (C) Profiling mode: opt, but w/debugging symbols
-# OPT ?= -O2 -g2 -DNDEBUG
-#-----------------------------------------------
-
-# detect what platform we're building on
-$(shell CC="$(CC)" CXX="$(CXX)" TARGET_OS="$(TARGET_OS)" \
- ./build_detect_platform build_config.mk ./)
-# this file is generated by the previous line to set build flags and sources
-include build_config.mk
-
-TESTS = \
- db/autocompact_test \
- db/c_test \
- db/corruption_test \
- db/db_test \
- db/dbformat_test \
- db/fault_injection_test \
- db/filename_test \
- db/log_test \
- db/recovery_test \
- db/skiplist_test \
- db/version_edit_test \
- db/version_set_test \
- db/write_batch_test \
- helpers/memenv/memenv_test \
- issues/issue178_test \
- issues/issue200_test \
- table/filter_block_test \
- table/table_test \
- util/arena_test \
- util/bloom_test \
- util/cache_test \
- util/coding_test \
- util/crc32c_test \
- util/env_posix_test \
- util/env_test \
- util/hash_test
-
-UTILS = \
- db/db_bench \
- db/leveldbutil
-
-# Put the object files in a subdirectory, but the application at the top of the object dir.
-PROGNAMES := $(notdir $(TESTS) $(UTILS))
-
-# On Linux may need libkyotocabinet-dev for dependency.
-BENCHMARKS = \
- doc/bench/db_bench_sqlite3 \
- doc/bench/db_bench_tree_db
-
-CFLAGS += -I. -I./include $(PLATFORM_CCFLAGS) $(OPT)
-CXXFLAGS += -I. -I./include $(PLATFORM_CXXFLAGS) $(OPT)
-
-LDFLAGS += $(PLATFORM_LDFLAGS)
-LIBS += $(PLATFORM_LIBS)
-
-SIMULATOR_OUTDIR=out-ios-x86
-DEVICE_OUTDIR=out-ios-arm
-
-ifeq ($(PLATFORM), IOS)
-# Note: iOS should probably be using libtool, not ar.
-AR=xcrun ar
-SIMULATORSDK=$(shell xcrun -sdk iphonesimulator --show-sdk-path)
-DEVICESDK=$(shell xcrun -sdk iphoneos --show-sdk-path)
-DEVICE_CFLAGS = -isysroot "$(DEVICESDK)" -arch armv6 -arch armv7 -arch armv7s -arch arm64
-SIMULATOR_CFLAGS = -isysroot "$(SIMULATORSDK)" -arch i686 -arch x86_64
-STATIC_OUTDIR=out-ios-universal
-else
-STATIC_OUTDIR=out-static
-SHARED_OUTDIR=out-shared
-STATIC_PROGRAMS := $(addprefix $(STATIC_OUTDIR)/, $(PROGNAMES))
-SHARED_PROGRAMS := $(addprefix $(SHARED_OUTDIR)/, db_bench)
-endif
-
-STATIC_LIBOBJECTS := $(addprefix $(STATIC_OUTDIR)/, $(SOURCES:.cc=.o))
-STATIC_MEMENVOBJECTS := $(addprefix $(STATIC_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o))
-
-DEVICE_LIBOBJECTS := $(addprefix $(DEVICE_OUTDIR)/, $(SOURCES:.cc=.o))
-DEVICE_MEMENVOBJECTS := $(addprefix $(DEVICE_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o))
-
-SIMULATOR_LIBOBJECTS := $(addprefix $(SIMULATOR_OUTDIR)/, $(SOURCES:.cc=.o))
-SIMULATOR_MEMENVOBJECTS := $(addprefix $(SIMULATOR_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o))
-
-SHARED_LIBOBJECTS := $(addprefix $(SHARED_OUTDIR)/, $(SOURCES:.cc=.o))
-SHARED_MEMENVOBJECTS := $(addprefix $(SHARED_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o))
-
-TESTUTIL := $(STATIC_OUTDIR)/util/testutil.o
-TESTHARNESS := $(STATIC_OUTDIR)/util/testharness.o $(TESTUTIL)
-
-STATIC_TESTOBJS := $(addprefix $(STATIC_OUTDIR)/, $(addsuffix .o, $(TESTS)))
-STATIC_UTILOBJS := $(addprefix $(STATIC_OUTDIR)/, $(addsuffix .o, $(UTILS)))
-STATIC_ALLOBJS := $(STATIC_LIBOBJECTS) $(STATIC_MEMENVOBJECTS) $(STATIC_TESTOBJS) $(STATIC_UTILOBJS) $(TESTHARNESS)
-DEVICE_ALLOBJS := $(DEVICE_LIBOBJECTS) $(DEVICE_MEMENVOBJECTS)
-SIMULATOR_ALLOBJS := $(SIMULATOR_LIBOBJECTS) $(SIMULATOR_MEMENVOBJECTS)
-
-default: all
-
-# Should we build shared libraries?
-ifneq ($(PLATFORM_SHARED_EXT),)
-
-# Many leveldb test apps use non-exported API's. Only build a subset for testing.
-SHARED_ALLOBJS := $(SHARED_LIBOBJECTS) $(SHARED_MEMENVOBJECTS) $(TESTHARNESS)
-
-ifneq ($(PLATFORM_SHARED_VERSIONED),true)
-SHARED_LIB1 = libleveldb.$(PLATFORM_SHARED_EXT)
-SHARED_LIB2 = $(SHARED_LIB1)
-SHARED_LIB3 = $(SHARED_LIB1)
-SHARED_LIBS = $(SHARED_LIB1)
-SHARED_MEMENVLIB = $(SHARED_OUTDIR)/libmemenv.a
-else
-# Update db.h if you change these.
-SHARED_VERSION_MAJOR = 1
-SHARED_VERSION_MINOR = 20
-SHARED_LIB1 = libleveldb.$(PLATFORM_SHARED_EXT)
-SHARED_LIB2 = $(SHARED_LIB1).$(SHARED_VERSION_MAJOR)
-SHARED_LIB3 = $(SHARED_LIB1).$(SHARED_VERSION_MAJOR).$(SHARED_VERSION_MINOR)
-SHARED_LIBS = $(SHARED_OUTDIR)/$(SHARED_LIB1) $(SHARED_OUTDIR)/$(SHARED_LIB2) $(SHARED_OUTDIR)/$(SHARED_LIB3)
-$(SHARED_OUTDIR)/$(SHARED_LIB1): $(SHARED_OUTDIR)/$(SHARED_LIB3)
- ln -fs $(SHARED_LIB3) $(SHARED_OUTDIR)/$(SHARED_LIB1)
-$(SHARED_OUTDIR)/$(SHARED_LIB2): $(SHARED_OUTDIR)/$(SHARED_LIB3)
- ln -fs $(SHARED_LIB3) $(SHARED_OUTDIR)/$(SHARED_LIB2)
-SHARED_MEMENVLIB = $(SHARED_OUTDIR)/libmemenv.a
-endif
-
-$(SHARED_OUTDIR)/$(SHARED_LIB3): $(SHARED_LIBOBJECTS)
- $(CXX) $(LDFLAGS) $(PLATFORM_SHARED_LDFLAGS)$(SHARED_LIB2) $(SHARED_LIBOBJECTS) -o $(SHARED_OUTDIR)/$(SHARED_LIB3) $(LIBS)
-
-endif # PLATFORM_SHARED_EXT
-
-all: $(SHARED_LIBS) $(SHARED_PROGRAMS) $(STATIC_OUTDIR)/libleveldb.a $(STATIC_OUTDIR)/libmemenv.a $(STATIC_PROGRAMS)
-
-check: $(STATIC_PROGRAMS)
- for t in $(notdir $(TESTS)); do echo "***** Running $$t"; $(STATIC_OUTDIR)/$$t || exit 1; done
-
-clean:
- -rm -rf out-static out-shared out-ios-x86 out-ios-arm out-ios-universal
- -rm -f build_config.mk
- -rm -rf ios-x86 ios-arm
-
-$(STATIC_OUTDIR):
- mkdir $@
-
-$(STATIC_OUTDIR)/db: | $(STATIC_OUTDIR)
- mkdir $@
-
-$(STATIC_OUTDIR)/helpers/memenv: | $(STATIC_OUTDIR)
- mkdir -p $@
-
-$(STATIC_OUTDIR)/port: | $(STATIC_OUTDIR)
- mkdir $@
-
-$(STATIC_OUTDIR)/table: | $(STATIC_OUTDIR)
- mkdir $@
-
-$(STATIC_OUTDIR)/util: | $(STATIC_OUTDIR)
- mkdir $@
-
-.PHONY: STATIC_OBJDIRS
-STATIC_OBJDIRS: \
- $(STATIC_OUTDIR)/db \
- $(STATIC_OUTDIR)/port \
- $(STATIC_OUTDIR)/table \
- $(STATIC_OUTDIR)/util \
- $(STATIC_OUTDIR)/helpers/memenv
-
-$(SHARED_OUTDIR):
- mkdir $@
-
-$(SHARED_OUTDIR)/db: | $(SHARED_OUTDIR)
- mkdir $@
-
-$(SHARED_OUTDIR)/helpers/memenv: | $(SHARED_OUTDIR)
- mkdir -p $@
-
-$(SHARED_OUTDIR)/port: | $(SHARED_OUTDIR)
- mkdir $@
-
-$(SHARED_OUTDIR)/table: | $(SHARED_OUTDIR)
- mkdir $@
-
-$(SHARED_OUTDIR)/util: | $(SHARED_OUTDIR)
- mkdir $@
-
-.PHONY: SHARED_OBJDIRS
-SHARED_OBJDIRS: \
- $(SHARED_OUTDIR)/db \
- $(SHARED_OUTDIR)/port \
- $(SHARED_OUTDIR)/table \
- $(SHARED_OUTDIR)/util \
- $(SHARED_OUTDIR)/helpers/memenv
-
-$(DEVICE_OUTDIR):
- mkdir $@
-
-$(DEVICE_OUTDIR)/db: | $(DEVICE_OUTDIR)
- mkdir $@
-
-$(DEVICE_OUTDIR)/helpers/memenv: | $(DEVICE_OUTDIR)
- mkdir -p $@
-
-$(DEVICE_OUTDIR)/port: | $(DEVICE_OUTDIR)
- mkdir $@
-
-$(DEVICE_OUTDIR)/table: | $(DEVICE_OUTDIR)
- mkdir $@
-
-$(DEVICE_OUTDIR)/util: | $(DEVICE_OUTDIR)
- mkdir $@
-
-.PHONY: DEVICE_OBJDIRS
-DEVICE_OBJDIRS: \
- $(DEVICE_OUTDIR)/db \
- $(DEVICE_OUTDIR)/port \
- $(DEVICE_OUTDIR)/table \
- $(DEVICE_OUTDIR)/util \
- $(DEVICE_OUTDIR)/helpers/memenv
-
-$(SIMULATOR_OUTDIR):
- mkdir $@
-
-$(SIMULATOR_OUTDIR)/db: | $(SIMULATOR_OUTDIR)
- mkdir $@
-
-$(SIMULATOR_OUTDIR)/helpers/memenv: | $(SIMULATOR_OUTDIR)
- mkdir -p $@
-
-$(SIMULATOR_OUTDIR)/port: | $(SIMULATOR_OUTDIR)
- mkdir $@
-
-$(SIMULATOR_OUTDIR)/table: | $(SIMULATOR_OUTDIR)
- mkdir $@
-
-$(SIMULATOR_OUTDIR)/util: | $(SIMULATOR_OUTDIR)
- mkdir $@
-
-.PHONY: SIMULATOR_OBJDIRS
-SIMULATOR_OBJDIRS: \
- $(SIMULATOR_OUTDIR)/db \
- $(SIMULATOR_OUTDIR)/port \
- $(SIMULATOR_OUTDIR)/table \
- $(SIMULATOR_OUTDIR)/util \
- $(SIMULATOR_OUTDIR)/helpers/memenv
-
-$(STATIC_ALLOBJS): | STATIC_OBJDIRS
-$(DEVICE_ALLOBJS): | DEVICE_OBJDIRS
-$(SIMULATOR_ALLOBJS): | SIMULATOR_OBJDIRS
-$(SHARED_ALLOBJS): | SHARED_OBJDIRS
-
-ifeq ($(PLATFORM), IOS)
-$(DEVICE_OUTDIR)/libleveldb.a: $(DEVICE_LIBOBJECTS)
- rm -f $@
- $(AR) -rs $@ $(DEVICE_LIBOBJECTS)
-
-$(SIMULATOR_OUTDIR)/libleveldb.a: $(SIMULATOR_LIBOBJECTS)
- rm -f $@
- $(AR) -rs $@ $(SIMULATOR_LIBOBJECTS)
-
-$(DEVICE_OUTDIR)/libmemenv.a: $(DEVICE_MEMENVOBJECTS)
- rm -f $@
- $(AR) -rs $@ $(DEVICE_MEMENVOBJECTS)
-
-$(SIMULATOR_OUTDIR)/libmemenv.a: $(SIMULATOR_MEMENVOBJECTS)
- rm -f $@
- $(AR) -rs $@ $(SIMULATOR_MEMENVOBJECTS)
-
-# For iOS, create universal object libraries to be used on both the simulator and
-# a device.
-$(STATIC_OUTDIR)/libleveldb.a: $(STATIC_OUTDIR) $(DEVICE_OUTDIR)/libleveldb.a $(SIMULATOR_OUTDIR)/libleveldb.a
- lipo -create $(DEVICE_OUTDIR)/libleveldb.a $(SIMULATOR_OUTDIR)/libleveldb.a -output $@
-
-$(STATIC_OUTDIR)/libmemenv.a: $(STATIC_OUTDIR) $(DEVICE_OUTDIR)/libmemenv.a $(SIMULATOR_OUTDIR)/libmemenv.a
- lipo -create $(DEVICE_OUTDIR)/libmemenv.a $(SIMULATOR_OUTDIR)/libmemenv.a -output $@
-else
-$(STATIC_OUTDIR)/libleveldb.a:$(STATIC_LIBOBJECTS)
- rm -f $@
- $(AR) -rs $@ $(STATIC_LIBOBJECTS)
-
-$(STATIC_OUTDIR)/libmemenv.a:$(STATIC_MEMENVOBJECTS)
- rm -f $@
- $(AR) -rs $@ $(STATIC_MEMENVOBJECTS)
-endif
-
-$(SHARED_MEMENVLIB):$(SHARED_MEMENVOBJECTS)
- rm -f $@
- $(AR) -rs $@ $(SHARED_MEMENVOBJECTS)
-
-$(STATIC_OUTDIR)/db_bench:db/db_bench.cc $(STATIC_LIBOBJECTS) $(TESTUTIL)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/db_bench.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/db_bench_sqlite3:doc/bench/db_bench_sqlite3.cc $(STATIC_LIBOBJECTS) $(TESTUTIL)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) doc/bench/db_bench_sqlite3.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) -o $@ -lsqlite3 $(LIBS)
-
-$(STATIC_OUTDIR)/db_bench_tree_db:doc/bench/db_bench_tree_db.cc $(STATIC_LIBOBJECTS) $(TESTUTIL)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) doc/bench/db_bench_tree_db.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) -o $@ -lkyotocabinet $(LIBS)
-
-$(STATIC_OUTDIR)/leveldbutil:db/leveldbutil.cc $(STATIC_LIBOBJECTS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/leveldbutil.cc $(STATIC_LIBOBJECTS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/arena_test:util/arena_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) util/arena_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/autocompact_test:db/autocompact_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/autocompact_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/bloom_test:util/bloom_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) util/bloom_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/c_test:$(STATIC_OUTDIR)/db/c_test.o $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(STATIC_OUTDIR)/db/c_test.o $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/cache_test:util/cache_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) util/cache_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/coding_test:util/coding_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) util/coding_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/corruption_test:db/corruption_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/corruption_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/crc32c_test:util/crc32c_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) util/crc32c_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/db_test:db/db_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/db_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/dbformat_test:db/dbformat_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/dbformat_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/env_posix_test:util/env_posix_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) util/env_posix_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/env_test:util/env_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) util/env_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/fault_injection_test:db/fault_injection_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/fault_injection_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/filename_test:db/filename_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/filename_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/filter_block_test:table/filter_block_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) table/filter_block_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/hash_test:util/hash_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) util/hash_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/issue178_test:issues/issue178_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) issues/issue178_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/issue200_test:issues/issue200_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) issues/issue200_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/log_test:db/log_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/log_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/recovery_test:db/recovery_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/recovery_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/table_test:table/table_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) table/table_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/skiplist_test:db/skiplist_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/skiplist_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/version_edit_test:db/version_edit_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/version_edit_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/version_set_test:db/version_set_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/version_set_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/write_batch_test:db/write_batch_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/write_batch_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/memenv_test:$(STATIC_OUTDIR)/helpers/memenv/memenv_test.o $(STATIC_OUTDIR)/libmemenv.a $(STATIC_OUTDIR)/libleveldb.a $(TESTHARNESS)
- $(XCRUN) $(CXX) $(LDFLAGS) $(STATIC_OUTDIR)/helpers/memenv/memenv_test.o $(STATIC_OUTDIR)/libmemenv.a $(STATIC_OUTDIR)/libleveldb.a $(TESTHARNESS) -o $@ $(LIBS)
-
-$(SHARED_OUTDIR)/db_bench:$(SHARED_OUTDIR)/db/db_bench.o $(SHARED_LIBS) $(TESTUTIL)
- $(XCRUN) $(CXX) $(LDFLAGS) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) $(SHARED_OUTDIR)/db/db_bench.o $(TESTUTIL) $(SHARED_OUTDIR)/$(SHARED_LIB3) -o $@ $(LIBS)
-
-.PHONY: run-shared
-run-shared: $(SHARED_OUTDIR)/db_bench
- LD_LIBRARY_PATH=$(SHARED_OUTDIR) $(SHARED_OUTDIR)/db_bench
-
-$(SIMULATOR_OUTDIR)/%.o: %.cc
- xcrun -sdk iphonesimulator $(CXX) $(CXXFLAGS) $(SIMULATOR_CFLAGS) -c $< -o $@
-
-$(DEVICE_OUTDIR)/%.o: %.cc
- xcrun -sdk iphoneos $(CXX) $(CXXFLAGS) $(DEVICE_CFLAGS) -c $< -o $@
-
-$(SIMULATOR_OUTDIR)/%.o: %.c
- xcrun -sdk iphonesimulator $(CC) $(CFLAGS) $(SIMULATOR_CFLAGS) -c $< -o $@
-
-$(DEVICE_OUTDIR)/%.o: %.c
- xcrun -sdk iphoneos $(CC) $(CFLAGS) $(DEVICE_CFLAGS) -c $< -o $@
-
-$(STATIC_OUTDIR)/%.o: %.cc
- $(CXX) $(CXXFLAGS) -c $< -o $@
-
-$(STATIC_OUTDIR)/%.o: %.c
- $(CC) $(CFLAGS) -c $< -o $@
-
-$(SHARED_OUTDIR)/%.o: %.cc
- $(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) -c $< -o $@
-
-$(SHARED_OUTDIR)/%.o: %.c
- $(CC) $(CFLAGS) $(PLATFORM_SHARED_CFLAGS) -c $< -o $@
-
-$(STATIC_OUTDIR)/port/port_posix_sse.o: port/port_posix_sse.cc
- $(CXX) $(CXXFLAGS) $(PLATFORM_SSEFLAGS) -c $< -o $@
-
-$(SHARED_OUTDIR)/port/port_posix_sse.o: port/port_posix_sse.cc
- $(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) $(PLATFORM_SSEFLAGS) -c $< -o $@
diff --git a/src/leveldb/README.md b/src/leveldb/README.md
index a010c50858..dadfd5693e 100644
--- a/src/leveldb/README.md
+++ b/src/leveldb/README.md
@@ -1,10 +1,12 @@
**LevelDB is a fast key-value storage library written at Google that provides an ordered mapping from string keys to string values.**
[![Build Status](https://travis-ci.org/google/leveldb.svg?branch=master)](https://travis-ci.org/google/leveldb)
+[![Build status](https://ci.appveyor.com/api/projects/status/g2j5j4rfkda6eyw5/branch/master?svg=true)](https://ci.appveyor.com/project/pwnall/leveldb)
Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
# Features
+
* Keys and values are arbitrary byte arrays.
* Data is stored sorted by key.
* Callers can provide a custom comparison function to override the sort order.
@@ -16,15 +18,55 @@ Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
* External activity (file system operations etc.) is relayed through a virtual interface so users can customize the operating system interactions.
# Documentation
- [LevelDB library documentation](https://github.com/google/leveldb/blob/master/doc/index.md) is online and bundled with the source code.
+ [LevelDB library documentation](https://github.com/google/leveldb/blob/master/doc/index.md) is online and bundled with the source code.
# Limitations
+
* This is not a SQL database. It does not have a relational data model, it does not support SQL queries, and it has no support for indexes.
* Only a single process (possibly multi-threaded) can access a particular database at a time.
* There is no client-server support builtin to the library. An application that needs such support will have to wrap their own server around the library.
+# Building
+
+This project supports [CMake](https://cmake.org/) out of the box.
+
+### Build for POSIX
+
+Quick start:
+
+```bash
+mkdir -p build && cd build
+cmake -DCMAKE_BUILD_TYPE=Release .. && cmake --build .
+```
+
+### Building for Windows
+
+First generate the Visual Studio 2017 project/solution files:
+
+```cmd
+mkdir build
+cd build
+cmake -G "Visual Studio 15" ..
+```
+The default default will build for x86. For 64-bit run:
+
+```cmd
+cmake -G "Visual Studio 15 Win64" ..
+```
+
+To compile the Windows solution from the command-line:
+
+```cmd
+devenv /build Debug leveldb.sln
+```
+
+or open leveldb.sln in Visual Studio and build from within.
+
+Please see the CMake documentation and `CMakeLists.txt` for more advanced usage.
+
# Contributing to the leveldb Project
+
The leveldb project welcomes contributions. leveldb's primary goal is to be
a reliable and fast key/value store. Changes that are in line with the
features/limitations outlined above, and meet the requirements below,
@@ -32,10 +74,10 @@ will be considered.
Contribution requirements:
-1. **POSIX only**. We _generally_ will only accept changes that are both
- compiled, and tested on a POSIX platform - usually Linux. Very small
- changes will sometimes be accepted, but consider that more of an
- exception than the rule.
+1. **Tested platforms only**. We _generally_ will only accept changes for
+ platforms that are compiled and tested. This means POSIX (for Linux and
+ macOS) or Windows. Very small changes will sometimes be accepted, but
+ consider that more of an exception than the rule.
2. **Stable API**. We strive very hard to maintain a stable API. Changes that
require changes for projects using leveldb _might_ be rejected without
@@ -44,7 +86,16 @@ Contribution requirements:
3. **Tests**: All changes must be accompanied by a new (or changed) test, or
a sufficient explanation as to why a new (or changed) test is not required.
+4. **Consistent Style**: This project conforms to the
+ [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html).
+ To ensure your changes are properly formatted please run:
+
+ ```
+ clang-format -i --style=file <file>
+ ```
+
## Submitting a Pull Request
+
Before any pull request will be accepted the author must first sign a
Contributor License Agreement (CLA) at https://cla.developers.google.com/.
@@ -138,37 +189,37 @@ uncompressed blocks in memory, the read performance improves again:
See [doc/index.md](doc/index.md) for more explanation. See
[doc/impl.md](doc/impl.md) for a brief overview of the implementation.
-The public interface is in include/*.h. Callers should not include or
+The public interface is in include/leveldb/*.h. Callers should not include or
rely on the details of any other header files in this package. Those
internal APIs may be changed without warning.
Guide to header files:
-* **include/db.h**: Main interface to the DB: Start here
+* **include/leveldb/db.h**: Main interface to the DB: Start here.
-* **include/options.h**: Control over the behavior of an entire database,
+* **include/leveldb/options.h**: Control over the behavior of an entire database,
and also control over the behavior of individual reads and writes.
-* **include/comparator.h**: Abstraction for user-specified comparison function.
+* **include/leveldb/comparator.h**: Abstraction for user-specified comparison function.
If you want just bytewise comparison of keys, you can use the default
comparator, but clients can write their own comparator implementations if they
-want custom ordering (e.g. to handle different character encodings, etc.)
+want custom ordering (e.g. to handle different character encodings, etc.).
-* **include/iterator.h**: Interface for iterating over data. You can get
+* **include/leveldb/iterator.h**: Interface for iterating over data. You can get
an iterator from a DB object.
-* **include/write_batch.h**: Interface for atomically applying multiple
+* **include/leveldb/write_batch.h**: Interface for atomically applying multiple
updates to a database.
-* **include/slice.h**: A simple module for maintaining a pointer and a
+* **include/leveldb/slice.h**: A simple module for maintaining a pointer and a
length into some other byte array.
-* **include/status.h**: Status is returned from many of the public interfaces
+* **include/leveldb/status.h**: Status is returned from many of the public interfaces
and is used to report success and various kinds of errors.
-* **include/env.h**:
+* **include/leveldb/env.h**:
Abstraction of the OS environment. A posix implementation of this interface is
-in util/env_posix.cc
+in util/env_posix.cc.
-* **include/table.h, include/table_builder.h**: Lower-level modules that most
-clients probably won't use directly
+* **include/leveldb/table.h, include/leveldb/table_builder.h**: Lower-level modules that most
+clients probably won't use directly.
diff --git a/src/leveldb/WINDOWS.md b/src/leveldb/WINDOWS.md
deleted file mode 100644
index 5b76c2448f..0000000000
--- a/src/leveldb/WINDOWS.md
+++ /dev/null
@@ -1,39 +0,0 @@
-# Building LevelDB On Windows
-
-## Prereqs
-
-Install the [Windows Software Development Kit version 7.1](http://www.microsoft.com/downloads/dlx/en-us/listdetailsview.aspx?FamilyID=6b6c21d2-2006-4afa-9702-529fa782d63b).
-
-Download and extract the [Snappy source distribution](http://snappy.googlecode.com/files/snappy-1.0.5.tar.gz)
-
-1. Open the "Windows SDK 7.1 Command Prompt" :
- Start Menu -> "Microsoft Windows SDK v7.1" > "Windows SDK 7.1 Command Prompt"
-2. Change the directory to the leveldb project
-
-## Building the Static lib
-
-* 32 bit Version
-
- setenv /x86
- msbuild.exe /p:Configuration=Release /p:Platform=Win32 /p:Snappy=..\snappy-1.0.5
-
-* 64 bit Version
-
- setenv /x64
- msbuild.exe /p:Configuration=Release /p:Platform=x64 /p:Snappy=..\snappy-1.0.5
-
-
-## Building and Running the Benchmark app
-
-* 32 bit Version
-
- setenv /x86
- msbuild.exe /p:Configuration=Benchmark /p:Platform=Win32 /p:Snappy=..\snappy-1.0.5
- Benchmark\leveldb.exe
-
-* 64 bit Version
-
- setenv /x64
- msbuild.exe /p:Configuration=Benchmark /p:Platform=x64 /p:Snappy=..\snappy-1.0.5
- x64\Benchmark\leveldb.exe
-
diff --git a/src/leveldb/db/db_bench.cc b/src/leveldb/benchmarks/db_bench.cc
index 3ad19a512b..3696023b70 100644
--- a/src/leveldb/db/db_bench.cc
+++ b/src/leveldb/benchmarks/db_bench.cc
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include <sys/types.h>
#include <stdio.h>
#include <stdlib.h>
-#include "db/db_impl.h"
-#include "db/version_set.h"
+#include <sys/types.h>
+
#include "leveldb/cache.h"
#include "leveldb/db.h"
#include "leveldb/env.h"
+#include "leveldb/filter_policy.h"
#include "leveldb/write_batch.h"
#include "port/port.h"
#include "util/crc32c.h"
@@ -35,7 +35,6 @@
// seekrandom -- N random seeks
// open -- cost of opening a DB
// crc32c -- repeated crc32c of 4K of data
-// acquireload -- load N*1000 times
// Meta operations:
// compact -- Compact the entire DB
// stats -- Print DB stats
@@ -57,9 +56,7 @@ static const char* FLAGS_benchmarks =
"fill100K,"
"crc32c,"
"snappycomp,"
- "snappyuncomp,"
- "acquireload,"
- ;
+ "snappyuncomp,";
// Number of key/values to place in database
static int FLAGS_num = 1000000;
@@ -112,12 +109,12 @@ static bool FLAGS_use_existing_db = false;
static bool FLAGS_reuse_logs = false;
// Use the db with the following name.
-static const char* FLAGS_db = NULL;
+static const char* FLAGS_db = nullptr;
namespace leveldb {
namespace {
-leveldb::Env* g_env = NULL;
+leveldb::Env* g_env = nullptr;
// Helper for quickly generating random data.
class RandomGenerator {
@@ -158,7 +155,7 @@ static Slice TrimSpace(Slice s) {
start++;
}
size_t limit = s.size();
- while (limit > start && isspace(s[limit-1])) {
+ while (limit > start && isspace(s[limit - 1])) {
limit--;
}
return Slice(s.data() + start, limit - start);
@@ -190,14 +187,12 @@ class Stats {
void Start() {
next_report_ = 100;
- last_op_finish_ = start_;
hist_.Clear();
done_ = 0;
bytes_ = 0;
seconds_ = 0;
- start_ = g_env->NowMicros();
- finish_ = start_;
message_.clear();
+ start_ = finish_ = last_op_finish_ = g_env->NowMicros();
}
void Merge(const Stats& other) {
@@ -217,9 +212,7 @@ class Stats {
seconds_ = (finish_ - start_) * 1e-6;
}
- void AddMessage(Slice msg) {
- AppendWithSpace(&message_, msg);
- }
+ void AddMessage(Slice msg) { AppendWithSpace(&message_, msg); }
void FinishedSingleOp() {
if (FLAGS_histogram) {
@@ -235,21 +228,26 @@ class Stats {
done_++;
if (done_ >= next_report_) {
- if (next_report_ < 1000) next_report_ += 100;
- else if (next_report_ < 5000) next_report_ += 500;
- else if (next_report_ < 10000) next_report_ += 1000;
- else if (next_report_ < 50000) next_report_ += 5000;
- else if (next_report_ < 100000) next_report_ += 10000;
- else if (next_report_ < 500000) next_report_ += 50000;
- else next_report_ += 100000;
+ if (next_report_ < 1000)
+ next_report_ += 100;
+ else if (next_report_ < 5000)
+ next_report_ += 500;
+ else if (next_report_ < 10000)
+ next_report_ += 1000;
+ else if (next_report_ < 50000)
+ next_report_ += 5000;
+ else if (next_report_ < 100000)
+ next_report_ += 10000;
+ else if (next_report_ < 500000)
+ next_report_ += 50000;
+ else
+ next_report_ += 100000;
fprintf(stderr, "... finished %d ops%30s\r", done_, "");
fflush(stderr);
}
}
- void AddBytes(int64_t n) {
- bytes_ += n;
- }
+ void AddBytes(int64_t n) { bytes_ += n; }
void Report(const Slice& name) {
// Pretend at least one op was done in case we are running a benchmark
@@ -268,11 +266,8 @@ class Stats {
}
AppendWithSpace(&extra, message_);
- fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
- name.ToString().c_str(),
- seconds_ * 1e6 / done_,
- (extra.empty() ? "" : " "),
- extra.c_str());
+ fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
+ seconds_ * 1e6 / done_, (extra.empty() ? "" : " "), extra.c_str());
if (FLAGS_histogram) {
fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
}
@@ -283,8 +278,8 @@ class Stats {
// State shared by all concurrent executions of the same benchmark.
struct SharedState {
port::Mutex mu;
- port::CondVar cv;
- int total;
+ port::CondVar cv GUARDED_BY(mu);
+ int total GUARDED_BY(mu);
// Each thread goes through the following states:
// (1) initializing
@@ -292,24 +287,22 @@ struct SharedState {
// (3) running
// (4) done
- int num_initialized;
- int num_done;
- bool start;
+ int num_initialized GUARDED_BY(mu);
+ int num_done GUARDED_BY(mu);
+ bool start GUARDED_BY(mu);
- SharedState() : cv(&mu) { }
+ SharedState(int total)
+ : cv(&mu), total(total), num_initialized(0), num_done(0), start(false) {}
};
// Per-thread state for concurrent executions of the same benchmark.
struct ThreadState {
- int tid; // 0..n-1 when running in n threads
- Random rand; // Has different seeds for different threads
+ int tid; // 0..n-1 when running in n threads
+ Random rand; // Has different seeds for different threads
Stats stats;
SharedState* shared;
- ThreadState(int index)
- : tid(index),
- rand(1000 + index) {
- }
+ ThreadState(int index) : tid(index), rand(1000 + index), shared(nullptr) {}
};
} // namespace
@@ -335,20 +328,20 @@ class Benchmark {
static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
fprintf(stdout, "Entries: %d\n", num_);
fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
- ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_)
- / 1048576.0));
+ ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
+ 1048576.0));
fprintf(stdout, "FileSize: %.1f MB (estimated)\n",
- (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_)
- / 1048576.0));
+ (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
+ 1048576.0));
PrintWarnings();
fprintf(stdout, "------------------------------------------------\n");
}
void PrintWarnings() {
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
- fprintf(stdout,
- "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
- );
+ fprintf(
+ stdout,
+ "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
#endif
#ifndef NDEBUG
fprintf(stdout,
@@ -366,22 +359,22 @@ class Benchmark {
}
void PrintEnvironment() {
- fprintf(stderr, "LevelDB: version %d.%d\n",
- kMajorVersion, kMinorVersion);
+ fprintf(stderr, "LevelDB: version %d.%d\n", kMajorVersion,
+ kMinorVersion);
#if defined(__linux)
- time_t now = time(NULL);
+ time_t now = time(nullptr);
fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline
FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
- if (cpuinfo != NULL) {
+ if (cpuinfo != nullptr) {
char line[1000];
int num_cpus = 0;
std::string cpu_type;
std::string cache_size;
- while (fgets(line, sizeof(line), cpuinfo) != NULL) {
+ while (fgets(line, sizeof(line), cpuinfo) != nullptr) {
const char* sep = strchr(line, ':');
- if (sep == NULL) {
+ if (sep == nullptr) {
continue;
}
Slice key = TrimSpace(Slice(line, sep - 1 - line));
@@ -402,16 +395,16 @@ class Benchmark {
public:
Benchmark()
- : cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : NULL),
- filter_policy_(FLAGS_bloom_bits >= 0
- ? NewBloomFilterPolicy(FLAGS_bloom_bits)
- : NULL),
- db_(NULL),
- num_(FLAGS_num),
- value_size_(FLAGS_value_size),
- entries_per_batch_(1),
- reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
- heap_counter_(0) {
+ : cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : nullptr),
+ filter_policy_(FLAGS_bloom_bits >= 0
+ ? NewBloomFilterPolicy(FLAGS_bloom_bits)
+ : nullptr),
+ db_(nullptr),
+ num_(FLAGS_num),
+ value_size_(FLAGS_value_size),
+ entries_per_batch_(1),
+ reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
+ heap_counter_(0) {
std::vector<std::string> files;
g_env->GetChildren(FLAGS_db, &files);
for (size_t i = 0; i < files.size(); i++) {
@@ -435,12 +428,12 @@ class Benchmark {
Open();
const char* benchmarks = FLAGS_benchmarks;
- while (benchmarks != NULL) {
+ while (benchmarks != nullptr) {
const char* sep = strchr(benchmarks, ',');
Slice name;
- if (sep == NULL) {
+ if (sep == nullptr) {
name = benchmarks;
- benchmarks = NULL;
+ benchmarks = nullptr;
} else {
name = Slice(benchmarks, sep - benchmarks);
benchmarks = sep + 1;
@@ -453,7 +446,7 @@ class Benchmark {
entries_per_batch_ = 1;
write_options_ = WriteOptions();
- void (Benchmark::*method)(ThreadState*) = NULL;
+ void (Benchmark::*method)(ThreadState*) = nullptr;
bool fresh_db = false;
int num_threads = FLAGS_threads;
@@ -510,8 +503,6 @@ class Benchmark {
method = &Benchmark::Compact;
} else if (name == Slice("crc32c")) {
method = &Benchmark::Crc32c;
- } else if (name == Slice("acquireload")) {
- method = &Benchmark::AcquireLoad;
} else if (name == Slice("snappycomp")) {
method = &Benchmark::SnappyCompress;
} else if (name == Slice("snappyuncomp")) {
@@ -523,7 +514,7 @@ class Benchmark {
} else if (name == Slice("sstables")) {
PrintStats("leveldb.sstables");
} else {
- if (name != Slice()) { // No error message for empty name
+ if (!name.empty()) { // No error message for empty name
fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str());
}
}
@@ -532,16 +523,16 @@ class Benchmark {
if (FLAGS_use_existing_db) {
fprintf(stdout, "%-12s : skipped (--use_existing_db is true)\n",
name.ToString().c_str());
- method = NULL;
+ method = nullptr;
} else {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
DestroyDB(FLAGS_db, Options());
Open();
}
}
- if (method != NULL) {
+ if (method != nullptr) {
RunBenchmark(num_threads, name, method);
}
}
@@ -585,11 +576,7 @@ class Benchmark {
void RunBenchmark(int n, Slice name,
void (Benchmark::*method)(ThreadState*)) {
- SharedState shared;
- shared.total = n;
- shared.num_initialized = 0;
- shared.num_done = 0;
- shared.start = false;
+ SharedState shared(n);
ThreadArg* arg = new ThreadArg[n];
for (int i = 0; i < n; i++) {
@@ -643,22 +630,6 @@ class Benchmark {
thread->stats.AddMessage(label);
}
- void AcquireLoad(ThreadState* thread) {
- int dummy;
- port::AtomicPointer ap(&dummy);
- int count = 0;
- void *ptr = NULL;
- thread->stats.AddMessage("(each op is 1000 loads)");
- while (count < 100000) {
- for (int i = 0; i < 1000; i++) {
- ptr = ap.Acquire_Load();
- }
- count++;
- thread->stats.FinishedSingleOp();
- }
- if (ptr == NULL) exit(1); // Disable unused variable warning.
- }
-
void SnappyCompress(ThreadState* thread) {
RandomGenerator gen;
Slice input = gen.Generate(Options().block_size);
@@ -692,8 +663,8 @@ class Benchmark {
int64_t bytes = 0;
char* uncompressed = new char[input.size()];
while (ok && bytes < 1024 * 1048576) { // Compress 1G
- ok = port::Snappy_Uncompress(compressed.data(), compressed.size(),
- uncompressed);
+ ok = port::Snappy_Uncompress(compressed.data(), compressed.size(),
+ uncompressed);
bytes += input.size();
thread->stats.FinishedSingleOp();
}
@@ -707,7 +678,7 @@ class Benchmark {
}
void Open() {
- assert(db_ == NULL);
+ assert(db_ == nullptr);
Options options;
options.env = g_env;
options.create_if_missing = !FLAGS_use_existing_db;
@@ -733,13 +704,9 @@ class Benchmark {
}
}
- void WriteSeq(ThreadState* thread) {
- DoWrite(thread, true);
- }
+ void WriteSeq(ThreadState* thread) { DoWrite(thread, true); }
- void WriteRandom(ThreadState* thread) {
- DoWrite(thread, false);
- }
+ void WriteRandom(ThreadState* thread) { DoWrite(thread, false); }
void DoWrite(ThreadState* thread, bool seq) {
if (num_ != FLAGS_num) {
@@ -755,7 +722,7 @@ class Benchmark {
for (int i = 0; i < num_; i += entries_per_batch_) {
batch.Clear();
for (int j = 0; j < entries_per_batch_; j++) {
- const int k = seq ? i+j : (thread->rand.Next() % FLAGS_num);
+ const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num);
char key[100];
snprintf(key, sizeof(key), "%016d", k);
batch.Put(key, gen.Generate(value_size_));
@@ -865,7 +832,7 @@ class Benchmark {
for (int i = 0; i < num_; i += entries_per_batch_) {
batch.Clear();
for (int j = 0; j < entries_per_batch_; j++) {
- const int k = seq ? i+j : (thread->rand.Next() % FLAGS_num);
+ const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num);
char key[100];
snprintf(key, sizeof(key), "%016d", k);
batch.Delete(key);
@@ -879,13 +846,9 @@ class Benchmark {
}
}
- void DeleteSeq(ThreadState* thread) {
- DoDelete(thread, true);
- }
+ void DeleteSeq(ThreadState* thread) { DoDelete(thread, true); }
- void DeleteRandom(ThreadState* thread) {
- DoDelete(thread, false);
- }
+ void DeleteRandom(ThreadState* thread) { DoDelete(thread, false); }
void ReadWhileWriting(ThreadState* thread) {
if (thread->tid > 0) {
@@ -917,9 +880,7 @@ class Benchmark {
}
}
- void Compact(ThreadState* thread) {
- db_->CompactRange(NULL, NULL);
- }
+ void Compact(ThreadState* thread) { db_->CompactRange(nullptr, nullptr); }
void PrintStats(const char* key) {
std::string stats;
@@ -1008,10 +969,10 @@ int main(int argc, char** argv) {
leveldb::g_env = leveldb::Env::Default();
// Choose a location for the test database if none given with --db=<path>
- if (FLAGS_db == NULL) {
- leveldb::g_env->GetTestDirectory(&default_db_path);
- default_db_path += "/dbbench";
- FLAGS_db = default_db_path.c_str();
+ if (FLAGS_db == nullptr) {
+ leveldb::g_env->GetTestDirectory(&default_db_path);
+ default_db_path += "/dbbench";
+ FLAGS_db = default_db_path.c_str();
}
leveldb::Benchmark benchmark;
diff --git a/src/leveldb/doc/bench/db_bench_sqlite3.cc b/src/leveldb/benchmarks/db_bench_sqlite3.cc
index e63aaa8dcc..f183f4fcfd 100644
--- a/src/leveldb/doc/bench/db_bench_sqlite3.cc
+++ b/src/leveldb/benchmarks/db_bench_sqlite3.cc
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include <sqlite3.h>
#include <stdio.h>
#include <stdlib.h>
-#include <sqlite3.h>
+
#include "util/histogram.h"
#include "util/random.h"
#include "util/testutil.h"
@@ -38,8 +39,7 @@ static const char* FLAGS_benchmarks =
"fillrand100K,"
"fillseq100K,"
"readseq,"
- "readrand100K,"
- ;
+ "readrand100K,";
// Number of key/values to place in database
static int FLAGS_num = 1000000;
@@ -76,10 +76,9 @@ static bool FLAGS_transaction = true;
static bool FLAGS_WAL_enabled = true;
// Use the db with the following name.
-static const char* FLAGS_db = NULL;
+static const char* FLAGS_db = nullptr;
-inline
-static void ExecErrorCheck(int status, char *err_msg) {
+inline static void ExecErrorCheck(int status, char* err_msg) {
if (status != SQLITE_OK) {
fprintf(stderr, "SQL error: %s\n", err_msg);
sqlite3_free(err_msg);
@@ -87,27 +86,25 @@ static void ExecErrorCheck(int status, char *err_msg) {
}
}
-inline
-static void StepErrorCheck(int status) {
+inline static void StepErrorCheck(int status) {
if (status != SQLITE_DONE) {
fprintf(stderr, "SQL step error: status = %d\n", status);
exit(1);
}
}
-inline
-static void ErrorCheck(int status) {
+inline static void ErrorCheck(int status) {
if (status != SQLITE_OK) {
fprintf(stderr, "sqlite3 error: status = %d\n", status);
exit(1);
}
}
-inline
-static void WalCheckpoint(sqlite3* db_) {
+inline static void WalCheckpoint(sqlite3* db_) {
// Flush all writes to disk
if (FLAGS_WAL_enabled) {
- sqlite3_wal_checkpoint_v2(db_, NULL, SQLITE_CHECKPOINT_FULL, NULL, NULL);
+ sqlite3_wal_checkpoint_v2(db_, nullptr, SQLITE_CHECKPOINT_FULL, nullptr,
+ nullptr);
}
}
@@ -152,7 +149,7 @@ static Slice TrimSpace(Slice s) {
start++;
}
int limit = s.size();
- while (limit > start && isspace(s[limit-1])) {
+ while (limit > start && isspace(s[limit - 1])) {
limit--;
}
return Slice(s.data() + start, limit - start);
@@ -176,7 +173,7 @@ class Benchmark {
// State kept for progress messages
int done_;
- int next_report_; // When to report next
+ int next_report_; // When to report next
void PrintHeader() {
const int kKeySize = 16;
@@ -185,17 +182,17 @@ class Benchmark {
fprintf(stdout, "Values: %d bytes each\n", FLAGS_value_size);
fprintf(stdout, "Entries: %d\n", num_);
fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
- ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_)
- / 1048576.0));
+ ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
+ 1048576.0));
PrintWarnings();
fprintf(stdout, "------------------------------------------------\n");
}
void PrintWarnings() {
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
- fprintf(stdout,
- "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
- );
+ fprintf(
+ stdout,
+ "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
#endif
#ifndef NDEBUG
fprintf(stdout,
@@ -207,18 +204,18 @@ class Benchmark {
fprintf(stderr, "SQLite: version %s\n", SQLITE_VERSION);
#if defined(__linux)
- time_t now = time(NULL);
+ time_t now = time(nullptr);
fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline
FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
- if (cpuinfo != NULL) {
+ if (cpuinfo != nullptr) {
char line[1000];
int num_cpus = 0;
std::string cpu_type;
std::string cache_size;
- while (fgets(line, sizeof(line), cpuinfo) != NULL) {
+ while (fgets(line, sizeof(line), cpuinfo) != nullptr) {
const char* sep = strchr(line, ':');
- if (sep == NULL) {
+ if (sep == nullptr) {
continue;
}
Slice key = TrimSpace(Slice(line, sep - 1 - line));
@@ -261,13 +258,20 @@ class Benchmark {
done_++;
if (done_ >= next_report_) {
- if (next_report_ < 1000) next_report_ += 100;
- else if (next_report_ < 5000) next_report_ += 500;
- else if (next_report_ < 10000) next_report_ += 1000;
- else if (next_report_ < 50000) next_report_ += 5000;
- else if (next_report_ < 100000) next_report_ += 10000;
- else if (next_report_ < 500000) next_report_ += 50000;
- else next_report_ += 100000;
+ if (next_report_ < 1000)
+ next_report_ += 100;
+ else if (next_report_ < 5000)
+ next_report_ += 500;
+ else if (next_report_ < 10000)
+ next_report_ += 1000;
+ else if (next_report_ < 50000)
+ next_report_ += 5000;
+ else if (next_report_ < 100000)
+ next_report_ += 10000;
+ else if (next_report_ < 500000)
+ next_report_ += 50000;
+ else
+ next_report_ += 100000;
fprintf(stderr, "... finished %d ops%30s\r", done_, "");
fflush(stderr);
}
@@ -285,16 +289,14 @@ class Benchmark {
snprintf(rate, sizeof(rate), "%6.1f MB/s",
(bytes_ / 1048576.0) / (finish - start_));
if (!message_.empty()) {
- message_ = std::string(rate) + " " + message_;
+ message_ = std::string(rate) + " " + message_;
} else {
message_ = rate;
}
}
- fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
- name.ToString().c_str(),
- (finish - start_) * 1e6 / done_,
- (message_.empty() ? "" : " "),
+ fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
+ (finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "),
message_.c_str());
if (FLAGS_histogram) {
fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
@@ -303,22 +305,16 @@ class Benchmark {
}
public:
- enum Order {
- SEQUENTIAL,
- RANDOM
- };
- enum DBState {
- FRESH,
- EXISTING
- };
+ enum Order { SEQUENTIAL, RANDOM };
+ enum DBState { FRESH, EXISTING };
Benchmark()
- : db_(NULL),
- db_num_(0),
- num_(FLAGS_num),
- reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
- bytes_(0),
- rand_(301) {
+ : db_(nullptr),
+ db_num_(0),
+ num_(FLAGS_num),
+ reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
+ bytes_(0),
+ rand_(301) {
std::vector<std::string> files;
std::string test_dir;
Env::Default()->GetTestDirectory(&test_dir);
@@ -345,12 +341,12 @@ class Benchmark {
Open();
const char* benchmarks = FLAGS_benchmarks;
- while (benchmarks != NULL) {
+ while (benchmarks != nullptr) {
const char* sep = strchr(benchmarks, ',');
Slice name;
- if (sep == NULL) {
+ if (sep == nullptr) {
name = benchmarks;
- benchmarks = NULL;
+ benchmarks = nullptr;
} else {
name = Slice(benchmarks, sep - benchmarks);
benchmarks = sep + 1;
@@ -415,20 +411,18 @@ class Benchmark {
}
void Open() {
- assert(db_ == NULL);
+ assert(db_ == nullptr);
int status;
char file_name[100];
- char* err_msg = NULL;
+ char* err_msg = nullptr;
db_num_++;
// Open database
std::string tmp_dir;
Env::Default()->GetTestDirectory(&tmp_dir);
- snprintf(file_name, sizeof(file_name),
- "%s/dbbench_sqlite3-%d.db",
- tmp_dir.c_str(),
- db_num_);
+ snprintf(file_name, sizeof(file_name), "%s/dbbench_sqlite3-%d.db",
+ tmp_dir.c_str(), db_num_);
status = sqlite3_open(file_name, &db_);
if (status) {
fprintf(stderr, "open error: %s\n", sqlite3_errmsg(db_));
@@ -439,7 +433,7 @@ class Benchmark {
char cache_size[100];
snprintf(cache_size, sizeof(cache_size), "PRAGMA cache_size = %d",
FLAGS_num_pages);
- status = sqlite3_exec(db_, cache_size, NULL, NULL, &err_msg);
+ status = sqlite3_exec(db_, cache_size, nullptr, nullptr, &err_msg);
ExecErrorCheck(status, err_msg);
// FLAGS_page_size is defaulted to 1024
@@ -447,7 +441,7 @@ class Benchmark {
char page_size[100];
snprintf(page_size, sizeof(page_size), "PRAGMA page_size = %d",
FLAGS_page_size);
- status = sqlite3_exec(db_, page_size, NULL, NULL, &err_msg);
+ status = sqlite3_exec(db_, page_size, nullptr, nullptr, &err_msg);
ExecErrorCheck(status, err_msg);
}
@@ -457,26 +451,28 @@ class Benchmark {
// LevelDB's default cache size is a combined 4 MB
std::string WAL_checkpoint = "PRAGMA wal_autocheckpoint = 4096";
- status = sqlite3_exec(db_, WAL_stmt.c_str(), NULL, NULL, &err_msg);
+ status = sqlite3_exec(db_, WAL_stmt.c_str(), nullptr, nullptr, &err_msg);
ExecErrorCheck(status, err_msg);
- status = sqlite3_exec(db_, WAL_checkpoint.c_str(), NULL, NULL, &err_msg);
+ status =
+ sqlite3_exec(db_, WAL_checkpoint.c_str(), nullptr, nullptr, &err_msg);
ExecErrorCheck(status, err_msg);
}
// Change locking mode to exclusive and create tables/index for database
std::string locking_stmt = "PRAGMA locking_mode = EXCLUSIVE";
std::string create_stmt =
- "CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))";
- std::string stmt_array[] = { locking_stmt, create_stmt };
+ "CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))";
+ std::string stmt_array[] = {locking_stmt, create_stmt};
int stmt_array_length = sizeof(stmt_array) / sizeof(std::string);
for (int i = 0; i < stmt_array_length; i++) {
- status = sqlite3_exec(db_, stmt_array[i].c_str(), NULL, NULL, &err_msg);
+ status =
+ sqlite3_exec(db_, stmt_array[i].c_str(), nullptr, nullptr, &err_msg);
ExecErrorCheck(status, err_msg);
}
}
- void Write(bool write_sync, Order order, DBState state,
- int num_entries, int value_size, int entries_per_batch) {
+ void Write(bool write_sync, Order order, DBState state, int num_entries,
+ int value_size, int entries_per_batch) {
// Create new database if state == FRESH
if (state == FRESH) {
if (FLAGS_use_existing_db) {
@@ -484,7 +480,7 @@ class Benchmark {
return;
}
sqlite3_close(db_);
- db_ = NULL;
+ db_ = nullptr;
Open();
Start();
}
@@ -495,7 +491,7 @@ class Benchmark {
message_ = msg;
}
- char* err_msg = NULL;
+ char* err_msg = nullptr;
int status;
sqlite3_stmt *replace_stmt, *begin_trans_stmt, *end_trans_stmt;
@@ -504,20 +500,20 @@ class Benchmark {
std::string end_trans_str = "END TRANSACTION;";
// Check for synchronous flag in options
- std::string sync_stmt = (write_sync) ? "PRAGMA synchronous = FULL" :
- "PRAGMA synchronous = OFF";
- status = sqlite3_exec(db_, sync_stmt.c_str(), NULL, NULL, &err_msg);
+ std::string sync_stmt =
+ (write_sync) ? "PRAGMA synchronous = FULL" : "PRAGMA synchronous = OFF";
+ status = sqlite3_exec(db_, sync_stmt.c_str(), nullptr, nullptr, &err_msg);
ExecErrorCheck(status, err_msg);
// Preparing sqlite3 statements
- status = sqlite3_prepare_v2(db_, replace_str.c_str(), -1,
- &replace_stmt, NULL);
+ status = sqlite3_prepare_v2(db_, replace_str.c_str(), -1, &replace_stmt,
+ nullptr);
ErrorCheck(status);
status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
- &begin_trans_stmt, NULL);
+ &begin_trans_stmt, nullptr);
ErrorCheck(status);
- status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1,
- &end_trans_stmt, NULL);
+ status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, &end_trans_stmt,
+ nullptr);
ErrorCheck(status);
bool transaction = (entries_per_batch > 1);
@@ -535,16 +531,16 @@ class Benchmark {
const char* value = gen_.Generate(value_size).data();
// Create values for key-value pair
- const int k = (order == SEQUENTIAL) ? i + j :
- (rand_.Next() % num_entries);
+ const int k =
+ (order == SEQUENTIAL) ? i + j : (rand_.Next() % num_entries);
char key[100];
snprintf(key, sizeof(key), "%016d", k);
// Bind KV values into replace_stmt
status = sqlite3_bind_blob(replace_stmt, 1, key, 16, SQLITE_STATIC);
ErrorCheck(status);
- status = sqlite3_bind_blob(replace_stmt, 2, value,
- value_size, SQLITE_STATIC);
+ status = sqlite3_bind_blob(replace_stmt, 2, value, value_size,
+ SQLITE_STATIC);
ErrorCheck(status);
// Execute replace_stmt
@@ -588,12 +584,12 @@ class Benchmark {
// Preparing sqlite3 statements
status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
- &begin_trans_stmt, NULL);
+ &begin_trans_stmt, nullptr);
ErrorCheck(status);
- status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1,
- &end_trans_stmt, NULL);
+ status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, &end_trans_stmt,
+ nullptr);
ErrorCheck(status);
- status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &read_stmt, NULL);
+ status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &read_stmt, nullptr);
ErrorCheck(status);
bool transaction = (entries_per_batch > 1);
@@ -618,7 +614,8 @@ class Benchmark {
ErrorCheck(status);
// Execute read statement
- while ((status = sqlite3_step(read_stmt)) == SQLITE_ROW) {}
+ while ((status = sqlite3_step(read_stmt)) == SQLITE_ROW) {
+ }
StepErrorCheck(status);
// Reset SQLite statement for another use
@@ -648,10 +645,10 @@ class Benchmark {
void ReadSequential() {
int status;
- sqlite3_stmt *pStmt;
+ sqlite3_stmt* pStmt;
std::string read_str = "SELECT * FROM test ORDER BY key";
- status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &pStmt, NULL);
+ status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &pStmt, nullptr);
ErrorCheck(status);
for (int i = 0; i < reads_ && SQLITE_ROW == sqlite3_step(pStmt); i++) {
bytes_ += sqlite3_column_bytes(pStmt, 1) + sqlite3_column_bytes(pStmt, 2);
@@ -661,7 +658,6 @@ class Benchmark {
status = sqlite3_finalize(pStmt);
ErrorCheck(status);
}
-
};
} // namespace leveldb
@@ -706,10 +702,10 @@ int main(int argc, char** argv) {
}
// Choose a location for the test database if none given with --db=<path>
- if (FLAGS_db == NULL) {
- leveldb::Env::Default()->GetTestDirectory(&default_db_path);
- default_db_path += "/dbbench";
- FLAGS_db = default_db_path.c_str();
+ if (FLAGS_db == nullptr) {
+ leveldb::Env::Default()->GetTestDirectory(&default_db_path);
+ default_db_path += "/dbbench";
+ FLAGS_db = default_db_path.c_str();
}
leveldb::Benchmark benchmark;
diff --git a/src/leveldb/doc/bench/db_bench_tree_db.cc b/src/leveldb/benchmarks/db_bench_tree_db.cc
index 4ca381f11f..b2f6646d89 100644
--- a/src/leveldb/doc/bench/db_bench_tree_db.cc
+++ b/src/leveldb/benchmarks/db_bench_tree_db.cc
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include <kcpolydb.h>
#include <stdio.h>
#include <stdlib.h>
-#include <kcpolydb.h>
+
#include "util/histogram.h"
#include "util/random.h"
#include "util/testutil.h"
@@ -34,8 +35,7 @@ static const char* FLAGS_benchmarks =
"fillrand100K,"
"fillseq100K,"
"readseq100K,"
- "readrand100K,"
- ;
+ "readrand100K,";
// Number of key/values to place in database
static int FLAGS_num = 1000000;
@@ -69,11 +69,9 @@ static bool FLAGS_use_existing_db = false;
static bool FLAGS_compression = true;
// Use the db with the following name.
-static const char* FLAGS_db = NULL;
+static const char* FLAGS_db = nullptr;
-inline
-static void DBSynchronize(kyotocabinet::TreeDB* db_)
-{
+inline static void DBSynchronize(kyotocabinet::TreeDB* db_) {
// Synchronize will flush writes to disk
if (!db_->synchronize()) {
fprintf(stderr, "synchronize error: %s\n", db_->error().name());
@@ -121,7 +119,7 @@ static Slice TrimSpace(Slice s) {
start++;
}
int limit = s.size();
- while (limit > start && isspace(s[limit-1])) {
+ while (limit > start && isspace(s[limit - 1])) {
limit--;
}
return Slice(s.data() + start, limit - start);
@@ -146,7 +144,7 @@ class Benchmark {
// State kept for progress messages
int done_;
- int next_report_; // When to report next
+ int next_report_; // When to report next
void PrintHeader() {
const int kKeySize = 16;
@@ -157,20 +155,20 @@ class Benchmark {
static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
fprintf(stdout, "Entries: %d\n", num_);
fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
- ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_)
- / 1048576.0));
+ ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
+ 1048576.0));
fprintf(stdout, "FileSize: %.1f MB (estimated)\n",
- (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_)
- / 1048576.0));
+ (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
+ 1048576.0));
PrintWarnings();
fprintf(stdout, "------------------------------------------------\n");
}
void PrintWarnings() {
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
- fprintf(stdout,
- "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
- );
+ fprintf(
+ stdout,
+ "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
#endif
#ifndef NDEBUG
fprintf(stdout,
@@ -183,18 +181,18 @@ class Benchmark {
kyotocabinet::VERSION, kyotocabinet::LIBVER, kyotocabinet::LIBREV);
#if defined(__linux)
- time_t now = time(NULL);
+ time_t now = time(nullptr);
fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline
FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
- if (cpuinfo != NULL) {
+ if (cpuinfo != nullptr) {
char line[1000];
int num_cpus = 0;
std::string cpu_type;
std::string cache_size;
- while (fgets(line, sizeof(line), cpuinfo) != NULL) {
+ while (fgets(line, sizeof(line), cpuinfo) != nullptr) {
const char* sep = strchr(line, ':');
- if (sep == NULL) {
+ if (sep == nullptr) {
continue;
}
Slice key = TrimSpace(Slice(line, sep - 1 - line));
@@ -237,13 +235,20 @@ class Benchmark {
done_++;
if (done_ >= next_report_) {
- if (next_report_ < 1000) next_report_ += 100;
- else if (next_report_ < 5000) next_report_ += 500;
- else if (next_report_ < 10000) next_report_ += 1000;
- else if (next_report_ < 50000) next_report_ += 5000;
- else if (next_report_ < 100000) next_report_ += 10000;
- else if (next_report_ < 500000) next_report_ += 50000;
- else next_report_ += 100000;
+ if (next_report_ < 1000)
+ next_report_ += 100;
+ else if (next_report_ < 5000)
+ next_report_ += 500;
+ else if (next_report_ < 10000)
+ next_report_ += 1000;
+ else if (next_report_ < 50000)
+ next_report_ += 5000;
+ else if (next_report_ < 100000)
+ next_report_ += 10000;
+ else if (next_report_ < 500000)
+ next_report_ += 50000;
+ else
+ next_report_ += 100000;
fprintf(stderr, "... finished %d ops%30s\r", done_, "");
fflush(stderr);
}
@@ -261,16 +266,14 @@ class Benchmark {
snprintf(rate, sizeof(rate), "%6.1f MB/s",
(bytes_ / 1048576.0) / (finish - start_));
if (!message_.empty()) {
- message_ = std::string(rate) + " " + message_;
+ message_ = std::string(rate) + " " + message_;
} else {
message_ = rate;
}
}
- fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
- name.ToString().c_str(),
- (finish - start_) * 1e6 / done_,
- (message_.empty() ? "" : " "),
+ fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
+ (finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "),
message_.c_str());
if (FLAGS_histogram) {
fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
@@ -279,21 +282,15 @@ class Benchmark {
}
public:
- enum Order {
- SEQUENTIAL,
- RANDOM
- };
- enum DBState {
- FRESH,
- EXISTING
- };
+ enum Order { SEQUENTIAL, RANDOM };
+ enum DBState { FRESH, EXISTING };
Benchmark()
- : db_(NULL),
- num_(FLAGS_num),
- reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
- bytes_(0),
- rand_(301) {
+ : db_(nullptr),
+ num_(FLAGS_num),
+ reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
+ bytes_(0),
+ rand_(301) {
std::vector<std::string> files;
std::string test_dir;
Env::Default()->GetTestDirectory(&test_dir);
@@ -321,12 +318,12 @@ class Benchmark {
Open(false);
const char* benchmarks = FLAGS_benchmarks;
- while (benchmarks != NULL) {
+ while (benchmarks != nullptr) {
const char* sep = strchr(benchmarks, ',');
Slice name;
- if (sep == NULL) {
+ if (sep == nullptr) {
name = benchmarks;
- benchmarks = NULL;
+ benchmarks = nullptr;
} else {
name = Slice(benchmarks, sep - benchmarks);
benchmarks = sep + 1;
@@ -386,8 +383,8 @@ class Benchmark {
}
private:
- void Open(bool sync) {
- assert(db_ == NULL);
+ void Open(bool sync) {
+ assert(db_ == nullptr);
// Initialize db_
db_ = new kyotocabinet::TreeDB();
@@ -395,16 +392,14 @@ class Benchmark {
db_num_++;
std::string test_dir;
Env::Default()->GetTestDirectory(&test_dir);
- snprintf(file_name, sizeof(file_name),
- "%s/dbbench_polyDB-%d.kct",
- test_dir.c_str(),
- db_num_);
+ snprintf(file_name, sizeof(file_name), "%s/dbbench_polyDB-%d.kct",
+ test_dir.c_str(), db_num_);
// Create tuning options and open the database
- int open_options = kyotocabinet::PolyDB::OWRITER |
- kyotocabinet::PolyDB::OCREATE;
- int tune_options = kyotocabinet::TreeDB::TSMALL |
- kyotocabinet::TreeDB::TLINEAR;
+ int open_options =
+ kyotocabinet::PolyDB::OWRITER | kyotocabinet::PolyDB::OCREATE;
+ int tune_options =
+ kyotocabinet::TreeDB::TSMALL | kyotocabinet::TreeDB::TLINEAR;
if (FLAGS_compression) {
tune_options |= kyotocabinet::TreeDB::TCOMPRESS;
db_->tune_compressor(&comp_);
@@ -412,7 +407,7 @@ class Benchmark {
db_->tune_options(tune_options);
db_->tune_page_cache(FLAGS_cache_size);
db_->tune_page(FLAGS_page_size);
- db_->tune_map(256LL<<20);
+ db_->tune_map(256LL << 20);
if (sync) {
open_options |= kyotocabinet::PolyDB::OAUTOSYNC;
}
@@ -421,8 +416,8 @@ class Benchmark {
}
}
- void Write(bool sync, Order order, DBState state,
- int num_entries, int value_size, int entries_per_batch) {
+ void Write(bool sync, Order order, DBState state, int num_entries,
+ int value_size, int entries_per_batch) {
// Create new database if state == FRESH
if (state == FRESH) {
if (FLAGS_use_existing_db) {
@@ -430,7 +425,7 @@ class Benchmark {
return;
}
delete db_;
- db_ = NULL;
+ db_ = nullptr;
Open(sync);
Start(); // Do not count time taken to destroy/open
}
@@ -442,8 +437,7 @@ class Benchmark {
}
// Write to database
- for (int i = 0; i < num_entries; i++)
- {
+ for (int i = 0; i < num_entries; i++) {
const int k = (order == SEQUENTIAL) ? i : (rand_.Next() % num_entries);
char key[100];
snprintf(key, sizeof(key), "%016d", k);
@@ -516,10 +510,10 @@ int main(int argc, char** argv) {
}
// Choose a location for the test database if none given with --db=<path>
- if (FLAGS_db == NULL) {
- leveldb::Env::Default()->GetTestDirectory(&default_db_path);
- default_db_path += "/dbbench";
- FLAGS_db = default_db_path.c_str();
+ if (FLAGS_db == nullptr) {
+ leveldb::Env::Default()->GetTestDirectory(&default_db_path);
+ default_db_path += "/dbbench";
+ FLAGS_db = default_db_path.c_str();
}
leveldb::Benchmark benchmark;
diff --git a/src/leveldb/build_detect_platform b/src/leveldb/build_detect_platform
deleted file mode 100755
index 4a94715900..0000000000
--- a/src/leveldb/build_detect_platform
+++ /dev/null
@@ -1,259 +0,0 @@
-#!/bin/sh
-#
-# Detects OS we're compiling on and outputs a file specified by the first
-# argument, which in turn gets read while processing Makefile.
-#
-# The output will set the following variables:
-# CC C Compiler path
-# CXX C++ Compiler path
-# PLATFORM_LDFLAGS Linker flags
-# PLATFORM_LIBS Libraries flags
-# PLATFORM_SHARED_EXT Extension for shared libraries
-# PLATFORM_SHARED_LDFLAGS Flags for building shared library
-# This flag is embedded just before the name
-# of the shared library without intervening spaces
-# PLATFORM_SHARED_CFLAGS Flags for compiling objects for shared library
-# PLATFORM_CCFLAGS C compiler flags
-# PLATFORM_CXXFLAGS C++ compiler flags. Will contain:
-# PLATFORM_SHARED_VERSIONED Set to 'true' if platform supports versioned
-# shared libraries, empty otherwise.
-#
-# The PLATFORM_CCFLAGS and PLATFORM_CXXFLAGS might include the following:
-#
-# -DLEVELDB_ATOMIC_PRESENT if <atomic> is present
-# -DLEVELDB_PLATFORM_POSIX for Posix-based platforms
-# -DSNAPPY if the Snappy library is present
-#
-
-OUTPUT=$1
-PREFIX=$2
-if test -z "$OUTPUT" || test -z "$PREFIX"; then
- echo "usage: $0 <output-filename> <directory_prefix>" >&2
- exit 1
-fi
-
-# Delete existing output, if it exists
-rm -f $OUTPUT
-touch $OUTPUT
-
-if test -z "$CC"; then
- CC=cc
-fi
-
-if test -z "$CXX"; then
- CXX=g++
-fi
-
-if test -z "$TMPDIR"; then
- TMPDIR=/tmp
-fi
-
-# Detect OS
-if test -z "$TARGET_OS"; then
- TARGET_OS=`uname -s`
-fi
-
-COMMON_FLAGS=
-CROSS_COMPILE=
-PLATFORM_CCFLAGS=
-PLATFORM_CXXFLAGS=
-PLATFORM_LDFLAGS=
-PLATFORM_LIBS=
-PLATFORM_SHARED_EXT="so"
-PLATFORM_SHARED_LDFLAGS="-shared -Wl,-soname -Wl,"
-PLATFORM_SHARED_CFLAGS="-fPIC"
-PLATFORM_SHARED_VERSIONED=true
-PLATFORM_SSEFLAGS=
-
-MEMCMP_FLAG=
-if [ "$CXX" = "g++" ]; then
- # Use libc's memcmp instead of GCC's memcmp. This results in ~40%
- # performance improvement on readrandom under gcc 4.4.3 on Linux/x86.
- MEMCMP_FLAG="-fno-builtin-memcmp"
-fi
-
-case "$TARGET_OS" in
- CYGWIN_*)
- PLATFORM=OS_LINUX
- COMMON_FLAGS="$MEMCMP_FLAG -lpthread -DOS_LINUX -DCYGWIN"
- PLATFORM_LDFLAGS="-lpthread"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- ;;
- Darwin)
- PLATFORM=OS_MACOSX
- COMMON_FLAGS="$MEMCMP_FLAG -DOS_MACOSX"
- PLATFORM_SHARED_EXT=dylib
- [ -z "$INSTALL_PATH" ] && INSTALL_PATH=`pwd`
- PLATFORM_SHARED_LDFLAGS="-dynamiclib -install_name $INSTALL_PATH/"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- ;;
- Linux)
- PLATFORM=OS_LINUX
- COMMON_FLAGS="$MEMCMP_FLAG -pthread -DOS_LINUX"
- PLATFORM_LDFLAGS="-pthread"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- ;;
- SunOS)
- PLATFORM=OS_SOLARIS
- COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_SOLARIS"
- PLATFORM_LIBS="-lpthread -lrt"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- ;;
- FreeBSD)
- PLATFORM=OS_FREEBSD
- COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_FREEBSD"
- PLATFORM_LIBS="-lpthread"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- ;;
- GNU/kFreeBSD)
- PLATFORM=OS_KFREEBSD
- COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_KFREEBSD"
- PLATFORM_LIBS="-lpthread"
- PORT_FILE=port/port_posix.cc
- ;;
- NetBSD)
- PLATFORM=OS_NETBSD
- COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_NETBSD"
- PLATFORM_LIBS="-lpthread -lgcc_s"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- ;;
- OpenBSD)
- PLATFORM=OS_OPENBSD
- COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_OPENBSD"
- PLATFORM_LDFLAGS="-pthread"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- ;;
- DragonFly)
- PLATFORM=OS_DRAGONFLYBSD
- COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_DRAGONFLYBSD"
- PLATFORM_LIBS="-lpthread"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- ;;
- OS_ANDROID_CROSSCOMPILE)
- PLATFORM=OS_ANDROID
- COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_ANDROID -DLEVELDB_PLATFORM_POSIX"
- PLATFORM_LDFLAGS="" # All pthread features are in the Android C library
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- CROSS_COMPILE=true
- ;;
- HP-UX)
- PLATFORM=OS_HPUX
- COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_HPUX"
- PLATFORM_LDFLAGS="-pthread"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- # man ld: +h internal_name
- PLATFORM_SHARED_LDFLAGS="-shared -Wl,+h -Wl,"
- ;;
- IOS)
- PLATFORM=IOS
- COMMON_FLAGS="$MEMCMP_FLAG -DOS_MACOSX"
- [ -z "$INSTALL_PATH" ] && INSTALL_PATH=`pwd`
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- PLATFORM_SHARED_EXT=
- PLATFORM_SHARED_LDFLAGS=
- PLATFORM_SHARED_CFLAGS=
- PLATFORM_SHARED_VERSIONED=
- ;;
- OS_WINDOWS_CROSSCOMPILE | NATIVE_WINDOWS)
- PLATFORM=OS_WINDOWS
- COMMON_FLAGS="-fno-builtin-memcmp -D_REENTRANT -DOS_WINDOWS -DLEVELDB_PLATFORM_WINDOWS -DWINVER=0x0500 -D__USE_MINGW_ANSI_STDIO=1"
- PLATFORM_SOURCES="util/env_win.cc"
- PLATFORM_LIBS="-lshlwapi"
- PORT_FILE=port/port_win.cc
- CROSS_COMPILE=true
- ;;
- *)
- echo "Unknown platform!" >&2
- exit 1
-esac
-
-# We want to make a list of all cc files within util, db, table, and helpers
-# except for the test and benchmark files. By default, find will output a list
-# of all files matching either rule, so we need to append -print to make the
-# prune take effect.
-DIRS="$PREFIX/db $PREFIX/util $PREFIX/table"
-
-set -f # temporarily disable globbing so that our patterns aren't expanded
-PRUNE_TEST="-name *test*.cc -prune"
-PRUNE_BENCH="-name *_bench.cc -prune"
-PRUNE_TOOL="-name leveldbutil.cc -prune"
-PORTABLE_FILES=`find $DIRS $PRUNE_TEST -o $PRUNE_BENCH -o $PRUNE_TOOL -o -name '*.cc' -print | sort | sed "s,^$PREFIX/,," | tr "\n" " "`
-
-set +f # re-enable globbing
-
-# The sources consist of the portable files, plus the platform-specific port
-# file.
-echo "SOURCES=$PORTABLE_FILES $PORT_FILE $PORT_SSE_FILE" >> $OUTPUT
-echo "MEMENV_SOURCES=helpers/memenv/memenv.cc" >> $OUTPUT
-
-if [ "$CROSS_COMPILE" = "true" ]; then
- # Cross-compiling; do not try any compilation tests.
- true
-else
- CXXOUTPUT="${TMPDIR}/leveldb_build_detect_platform-cxx.$$"
-
- # If -std=c++0x works, use <atomic> as fallback for when memory barriers
- # are not available.
- $CXX $CXXFLAGS -std=c++0x -x c++ - -o $CXXOUTPUT 2>/dev/null <<EOF
- #include <atomic>
- int main() {}
-EOF
- if [ "$?" = 0 ]; then
- COMMON_FLAGS="$COMMON_FLAGS -DLEVELDB_PLATFORM_POSIX -DLEVELDB_ATOMIC_PRESENT"
- PLATFORM_CXXFLAGS="-std=c++0x"
- else
- COMMON_FLAGS="$COMMON_FLAGS -DLEVELDB_PLATFORM_POSIX"
- fi
-
- # Test whether tcmalloc is available
- $CXX $CXXFLAGS -x c++ - -o $CXXOUTPUT -ltcmalloc 2>/dev/null <<EOF
- int main() {}
-EOF
- if [ "$?" = 0 ]; then
- PLATFORM_LIBS="$PLATFORM_LIBS -ltcmalloc"
- fi
-
- rm -f $CXXOUTPUT 2>/dev/null
-
- # Test if gcc SSE 4.2 is supported
- $CXX $CXXFLAGS -x c++ - -o $CXXOUTPUT -msse4.2 2>/dev/null <<EOF
- int main() {}
-EOF
- if [ "$?" = 0 ]; then
- PLATFORM_SSEFLAGS="-msse4.2"
- fi
-
- rm -f $CXXOUTPUT 2>/dev/null
-fi
-
-# Use the SSE 4.2 CRC32C intrinsics iff runtime checks indicate compiler supports them.
-if [ -n "$PLATFORM_SSEFLAGS" ]; then
- PLATFORM_SSEFLAGS="$PLATFORM_SSEFLAGS -DLEVELDB_PLATFORM_POSIX_SSE"
-fi
-
-PLATFORM_CCFLAGS="$PLATFORM_CCFLAGS $COMMON_FLAGS"
-PLATFORM_CXXFLAGS="$PLATFORM_CXXFLAGS $COMMON_FLAGS"
-
-echo "CC=$CC" >> $OUTPUT
-echo "CXX=$CXX" >> $OUTPUT
-echo "PLATFORM=$PLATFORM" >> $OUTPUT
-echo "PLATFORM_LDFLAGS=$PLATFORM_LDFLAGS" >> $OUTPUT
-echo "PLATFORM_LIBS=$PLATFORM_LIBS" >> $OUTPUT
-echo "PLATFORM_CCFLAGS=$PLATFORM_CCFLAGS" >> $OUTPUT
-echo "PLATFORM_CXXFLAGS=$PLATFORM_CXXFLAGS" >> $OUTPUT
-echo "PLATFORM_SSEFLAGS=$PLATFORM_SSEFLAGS" >> $OUTPUT
-echo "PLATFORM_SHARED_CFLAGS=$PLATFORM_SHARED_CFLAGS" >> $OUTPUT
-echo "PLATFORM_SHARED_EXT=$PLATFORM_SHARED_EXT" >> $OUTPUT
-echo "PLATFORM_SHARED_LDFLAGS=$PLATFORM_SHARED_LDFLAGS" >> $OUTPUT
-echo "PLATFORM_SHARED_VERSIONED=$PLATFORM_SHARED_VERSIONED" >> $OUTPUT
diff --git a/src/leveldb/cmake/leveldbConfig.cmake b/src/leveldb/cmake/leveldbConfig.cmake
new file mode 100644
index 0000000000..eea6e5c477
--- /dev/null
+++ b/src/leveldb/cmake/leveldbConfig.cmake
@@ -0,0 +1 @@
+include("${CMAKE_CURRENT_LIST_DIR}/leveldbTargets.cmake")
diff --git a/src/leveldb/db/autocompact_test.cc b/src/leveldb/db/autocompact_test.cc
index d20a2362c3..e6c97a05a6 100644
--- a/src/leveldb/db/autocompact_test.cc
+++ b/src/leveldb/db/autocompact_test.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include "leveldb/db.h"
#include "db/db_impl.h"
#include "leveldb/cache.h"
+#include "leveldb/db.h"
#include "util/testharness.h"
#include "util/testutil.h"
@@ -12,11 +12,6 @@ namespace leveldb {
class AutoCompactTest {
public:
- std::string dbname_;
- Cache* tiny_cache_;
- Options options_;
- DB* db_;
-
AutoCompactTest() {
dbname_ = test::TmpDir() + "/autocompact_test";
tiny_cache_ = NewLRUCache(100);
@@ -47,6 +42,12 @@ class AutoCompactTest {
}
void DoReads(int n);
+
+ private:
+ std::string dbname_;
+ Cache* tiny_cache_;
+ Options options_;
+ DB* db_;
};
static const int kValueSize = 200 * 1024;
@@ -81,17 +82,16 @@ void AutoCompactTest::DoReads(int n) {
ASSERT_LT(read, 100) << "Taking too long to compact";
Iterator* iter = db_->NewIterator(ReadOptions());
for (iter->SeekToFirst();
- iter->Valid() && iter->key().ToString() < limit_key;
- iter->Next()) {
+ iter->Valid() && iter->key().ToString() < limit_key; iter->Next()) {
// Drop data
}
delete iter;
// Wait a little bit to allow any triggered compactions to complete.
Env::Default()->SleepForMicroseconds(1000000);
uint64_t size = Size(Key(0), Key(n));
- fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n",
- read+1, size/1048576.0, Size(Key(n), Key(kCount))/1048576.0);
- if (size <= initial_size/10) {
+ fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n", read + 1,
+ size / 1048576.0, Size(Key(n), Key(kCount)) / 1048576.0);
+ if (size <= initial_size / 10) {
break;
}
}
@@ -100,19 +100,13 @@ void AutoCompactTest::DoReads(int n) {
// is pretty much unchanged.
const int64_t final_other_size = Size(Key(n), Key(kCount));
ASSERT_LE(final_other_size, initial_other_size + 1048576);
- ASSERT_GE(final_other_size, initial_other_size/5 - 1048576);
+ ASSERT_GE(final_other_size, initial_other_size / 5 - 1048576);
}
-TEST(AutoCompactTest, ReadAll) {
- DoReads(kCount);
-}
+TEST(AutoCompactTest, ReadAll) { DoReads(kCount); }
-TEST(AutoCompactTest, ReadHalf) {
- DoReads(kCount/2);
-}
+TEST(AutoCompactTest, ReadHalf) { DoReads(kCount / 2); }
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/db/builder.cc b/src/leveldb/db/builder.cc
index f419882197..9520ee4535 100644
--- a/src/leveldb/db/builder.cc
+++ b/src/leveldb/db/builder.cc
@@ -4,8 +4,8 @@
#include "db/builder.h"
-#include "db/filename.h"
#include "db/dbformat.h"
+#include "db/filename.h"
#include "db/table_cache.h"
#include "db/version_edit.h"
#include "leveldb/db.h"
@@ -14,12 +14,8 @@
namespace leveldb {
-Status BuildTable(const std::string& dbname,
- Env* env,
- const Options& options,
- TableCache* table_cache,
- Iterator* iter,
- FileMetaData* meta) {
+Status BuildTable(const std::string& dbname, Env* env, const Options& options,
+ TableCache* table_cache, Iterator* iter, FileMetaData* meta) {
Status s;
meta->file_size = 0;
iter->SeekToFirst();
@@ -41,14 +37,10 @@ Status BuildTable(const std::string& dbname,
}
// Finish and check for builder errors
+ s = builder->Finish();
if (s.ok()) {
- s = builder->Finish();
- if (s.ok()) {
- meta->file_size = builder->FileSize();
- assert(meta->file_size > 0);
- }
- } else {
- builder->Abandon();
+ meta->file_size = builder->FileSize();
+ assert(meta->file_size > 0);
}
delete builder;
@@ -60,12 +52,11 @@ Status BuildTable(const std::string& dbname,
s = file->Close();
}
delete file;
- file = NULL;
+ file = nullptr;
if (s.ok()) {
// Verify that the table is usable
- Iterator* it = table_cache->NewIterator(ReadOptions(),
- meta->number,
+ Iterator* it = table_cache->NewIterator(ReadOptions(), meta->number,
meta->file_size);
s = it->status();
delete it;
diff --git a/src/leveldb/db/builder.h b/src/leveldb/db/builder.h
index 62431fcf44..7bd0b8049b 100644
--- a/src/leveldb/db/builder.h
+++ b/src/leveldb/db/builder.h
@@ -22,12 +22,8 @@ class VersionEdit;
// *meta will be filled with metadata about the generated table.
// If no data is present in *iter, meta->file_size will be set to
// zero, and no Table file will be produced.
-extern Status BuildTable(const std::string& dbname,
- Env* env,
- const Options& options,
- TableCache* table_cache,
- Iterator* iter,
- FileMetaData* meta);
+Status BuildTable(const std::string& dbname, Env* env, const Options& options,
+ TableCache* table_cache, Iterator* iter, FileMetaData* meta);
} // namespace leveldb
diff --git a/src/leveldb/db/c.cc b/src/leveldb/db/c.cc
index b23e3dcc9d..3a492f9ac5 100644
--- a/src/leveldb/db/c.cc
+++ b/src/leveldb/db/c.cc
@@ -4,10 +4,9 @@
#include "leveldb/c.h"
-#include <stdlib.h>
-#ifndef WIN32
-#include <unistd.h>
-#endif
+#include <cstdint>
+#include <cstdlib>
+
#include "leveldb/cache.h"
#include "leveldb/comparator.h"
#include "leveldb/db.h"
@@ -45,69 +44,72 @@ using leveldb::WriteOptions;
extern "C" {
-struct leveldb_t { DB* rep; };
-struct leveldb_iterator_t { Iterator* rep; };
-struct leveldb_writebatch_t { WriteBatch rep; };
-struct leveldb_snapshot_t { const Snapshot* rep; };
-struct leveldb_readoptions_t { ReadOptions rep; };
-struct leveldb_writeoptions_t { WriteOptions rep; };
-struct leveldb_options_t { Options rep; };
-struct leveldb_cache_t { Cache* rep; };
-struct leveldb_seqfile_t { SequentialFile* rep; };
-struct leveldb_randomfile_t { RandomAccessFile* rep; };
-struct leveldb_writablefile_t { WritableFile* rep; };
-struct leveldb_logger_t { Logger* rep; };
-struct leveldb_filelock_t { FileLock* rep; };
+struct leveldb_t {
+ DB* rep;
+};
+struct leveldb_iterator_t {
+ Iterator* rep;
+};
+struct leveldb_writebatch_t {
+ WriteBatch rep;
+};
+struct leveldb_snapshot_t {
+ const Snapshot* rep;
+};
+struct leveldb_readoptions_t {
+ ReadOptions rep;
+};
+struct leveldb_writeoptions_t {
+ WriteOptions rep;
+};
+struct leveldb_options_t {
+ Options rep;
+};
+struct leveldb_cache_t {
+ Cache* rep;
+};
+struct leveldb_seqfile_t {
+ SequentialFile* rep;
+};
+struct leveldb_randomfile_t {
+ RandomAccessFile* rep;
+};
+struct leveldb_writablefile_t {
+ WritableFile* rep;
+};
+struct leveldb_logger_t {
+ Logger* rep;
+};
+struct leveldb_filelock_t {
+ FileLock* rep;
+};
struct leveldb_comparator_t : public Comparator {
- void* state_;
- void (*destructor_)(void*);
- int (*compare_)(
- void*,
- const char* a, size_t alen,
- const char* b, size_t blen);
- const char* (*name_)(void*);
+ ~leveldb_comparator_t() override { (*destructor_)(state_); }
- virtual ~leveldb_comparator_t() {
- (*destructor_)(state_);
- }
-
- virtual int Compare(const Slice& a, const Slice& b) const {
+ int Compare(const Slice& a, const Slice& b) const override {
return (*compare_)(state_, a.data(), a.size(), b.data(), b.size());
}
- virtual const char* Name() const {
- return (*name_)(state_);
- }
+ const char* Name() const override { return (*name_)(state_); }
// No-ops since the C binding does not support key shortening methods.
- virtual void FindShortestSeparator(std::string*, const Slice&) const { }
- virtual void FindShortSuccessor(std::string* key) const { }
-};
+ void FindShortestSeparator(std::string*, const Slice&) const override {}
+ void FindShortSuccessor(std::string* key) const override {}
-struct leveldb_filterpolicy_t : public FilterPolicy {
void* state_;
void (*destructor_)(void*);
+ int (*compare_)(void*, const char* a, size_t alen, const char* b,
+ size_t blen);
const char* (*name_)(void*);
- char* (*create_)(
- void*,
- const char* const* key_array, const size_t* key_length_array,
- int num_keys,
- size_t* filter_length);
- unsigned char (*key_match_)(
- void*,
- const char* key, size_t length,
- const char* filter, size_t filter_length);
-
- virtual ~leveldb_filterpolicy_t() {
- (*destructor_)(state_);
- }
+};
- virtual const char* Name() const {
- return (*name_)(state_);
- }
+struct leveldb_filterpolicy_t : public FilterPolicy {
+ ~leveldb_filterpolicy_t() override { (*destructor_)(state_); }
- virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
+ const char* Name() const override { return (*name_)(state_); }
+
+ void CreateFilter(const Slice* keys, int n, std::string* dst) const override {
std::vector<const char*> key_pointers(n);
std::vector<size_t> key_sizes(n);
for (int i = 0; i < n; i++) {
@@ -120,10 +122,19 @@ struct leveldb_filterpolicy_t : public FilterPolicy {
free(filter);
}
- virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const {
- return (*key_match_)(state_, key.data(), key.size(),
- filter.data(), filter.size());
+ bool KeyMayMatch(const Slice& key, const Slice& filter) const override {
+ return (*key_match_)(state_, key.data(), key.size(), filter.data(),
+ filter.size());
}
+
+ void* state_;
+ void (*destructor_)(void*);
+ const char* (*name_)(void*);
+ char* (*create_)(void*, const char* const* key_array,
+ const size_t* key_length_array, int num_keys,
+ size_t* filter_length);
+ uint8_t (*key_match_)(void*, const char* key, size_t length,
+ const char* filter, size_t filter_length);
};
struct leveldb_env_t {
@@ -132,10 +143,10 @@ struct leveldb_env_t {
};
static bool SaveError(char** errptr, const Status& s) {
- assert(errptr != NULL);
+ assert(errptr != nullptr);
if (s.ok()) {
return false;
- } else if (*errptr == NULL) {
+ } else if (*errptr == nullptr) {
*errptr = strdup(s.ToString().c_str());
} else {
// TODO(sanjay): Merge with existing error?
@@ -151,13 +162,11 @@ static char* CopyString(const std::string& str) {
return result;
}
-leveldb_t* leveldb_open(
- const leveldb_options_t* options,
- const char* name,
- char** errptr) {
+leveldb_t* leveldb_open(const leveldb_options_t* options, const char* name,
+ char** errptr) {
DB* db;
if (SaveError(errptr, DB::Open(options->rep, std::string(name), &db))) {
- return NULL;
+ return nullptr;
}
leveldb_t* result = new leveldb_t;
result->rep = db;
@@ -169,40 +178,27 @@ void leveldb_close(leveldb_t* db) {
delete db;
}
-void leveldb_put(
- leveldb_t* db,
- const leveldb_writeoptions_t* options,
- const char* key, size_t keylen,
- const char* val, size_t vallen,
- char** errptr) {
+void leveldb_put(leveldb_t* db, const leveldb_writeoptions_t* options,
+ const char* key, size_t keylen, const char* val, size_t vallen,
+ char** errptr) {
SaveError(errptr,
db->rep->Put(options->rep, Slice(key, keylen), Slice(val, vallen)));
}
-void leveldb_delete(
- leveldb_t* db,
- const leveldb_writeoptions_t* options,
- const char* key, size_t keylen,
- char** errptr) {
+void leveldb_delete(leveldb_t* db, const leveldb_writeoptions_t* options,
+ const char* key, size_t keylen, char** errptr) {
SaveError(errptr, db->rep->Delete(options->rep, Slice(key, keylen)));
}
-
-void leveldb_write(
- leveldb_t* db,
- const leveldb_writeoptions_t* options,
- leveldb_writebatch_t* batch,
- char** errptr) {
+void leveldb_write(leveldb_t* db, const leveldb_writeoptions_t* options,
+ leveldb_writebatch_t* batch, char** errptr) {
SaveError(errptr, db->rep->Write(options->rep, &batch->rep));
}
-char* leveldb_get(
- leveldb_t* db,
- const leveldb_readoptions_t* options,
- const char* key, size_t keylen,
- size_t* vallen,
- char** errptr) {
- char* result = NULL;
+char* leveldb_get(leveldb_t* db, const leveldb_readoptions_t* options,
+ const char* key, size_t keylen, size_t* vallen,
+ char** errptr) {
+ char* result = nullptr;
std::string tmp;
Status s = db->rep->Get(options->rep, Slice(key, keylen), &tmp);
if (s.ok()) {
@@ -218,45 +214,40 @@ char* leveldb_get(
}
leveldb_iterator_t* leveldb_create_iterator(
- leveldb_t* db,
- const leveldb_readoptions_t* options) {
+ leveldb_t* db, const leveldb_readoptions_t* options) {
leveldb_iterator_t* result = new leveldb_iterator_t;
result->rep = db->rep->NewIterator(options->rep);
return result;
}
-const leveldb_snapshot_t* leveldb_create_snapshot(
- leveldb_t* db) {
+const leveldb_snapshot_t* leveldb_create_snapshot(leveldb_t* db) {
leveldb_snapshot_t* result = new leveldb_snapshot_t;
result->rep = db->rep->GetSnapshot();
return result;
}
-void leveldb_release_snapshot(
- leveldb_t* db,
- const leveldb_snapshot_t* snapshot) {
+void leveldb_release_snapshot(leveldb_t* db,
+ const leveldb_snapshot_t* snapshot) {
db->rep->ReleaseSnapshot(snapshot->rep);
delete snapshot;
}
-char* leveldb_property_value(
- leveldb_t* db,
- const char* propname) {
+char* leveldb_property_value(leveldb_t* db, const char* propname) {
std::string tmp;
if (db->rep->GetProperty(Slice(propname), &tmp)) {
// We use strdup() since we expect human readable output.
return strdup(tmp.c_str());
} else {
- return NULL;
+ return nullptr;
}
}
-void leveldb_approximate_sizes(
- leveldb_t* db,
- int num_ranges,
- const char* const* range_start_key, const size_t* range_start_key_len,
- const char* const* range_limit_key, const size_t* range_limit_key_len,
- uint64_t* sizes) {
+void leveldb_approximate_sizes(leveldb_t* db, int num_ranges,
+ const char* const* range_start_key,
+ const size_t* range_start_key_len,
+ const char* const* range_limit_key,
+ const size_t* range_limit_key_len,
+ uint64_t* sizes) {
Range* ranges = new Range[num_ranges];
for (int i = 0; i < num_ranges; i++) {
ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]);
@@ -266,28 +257,23 @@ void leveldb_approximate_sizes(
delete[] ranges;
}
-void leveldb_compact_range(
- leveldb_t* db,
- const char* start_key, size_t start_key_len,
- const char* limit_key, size_t limit_key_len) {
+void leveldb_compact_range(leveldb_t* db, const char* start_key,
+ size_t start_key_len, const char* limit_key,
+ size_t limit_key_len) {
Slice a, b;
db->rep->CompactRange(
- // Pass NULL Slice if corresponding "const char*" is NULL
- (start_key ? (a = Slice(start_key, start_key_len), &a) : NULL),
- (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : NULL));
+ // Pass null Slice if corresponding "const char*" is null
+ (start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
+ (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
}
-void leveldb_destroy_db(
- const leveldb_options_t* options,
- const char* name,
- char** errptr) {
+void leveldb_destroy_db(const leveldb_options_t* options, const char* name,
+ char** errptr) {
SaveError(errptr, DestroyDB(name, options->rep));
}
-void leveldb_repair_db(
- const leveldb_options_t* options,
- const char* name,
- char** errptr) {
+void leveldb_repair_db(const leveldb_options_t* options, const char* name,
+ char** errptr) {
SaveError(errptr, RepairDB(name, options->rep));
}
@@ -296,7 +282,7 @@ void leveldb_iter_destroy(leveldb_iterator_t* iter) {
delete iter;
}
-unsigned char leveldb_iter_valid(const leveldb_iterator_t* iter) {
+uint8_t leveldb_iter_valid(const leveldb_iterator_t* iter) {
return iter->rep->Valid();
}
@@ -312,13 +298,9 @@ void leveldb_iter_seek(leveldb_iterator_t* iter, const char* k, size_t klen) {
iter->rep->Seek(Slice(k, klen));
}
-void leveldb_iter_next(leveldb_iterator_t* iter) {
- iter->rep->Next();
-}
+void leveldb_iter_next(leveldb_iterator_t* iter) { iter->rep->Next(); }
-void leveldb_iter_prev(leveldb_iterator_t* iter) {
- iter->rep->Prev();
-}
+void leveldb_iter_prev(leveldb_iterator_t* iter) { iter->rep->Prev(); }
const char* leveldb_iter_key(const leveldb_iterator_t* iter, size_t* klen) {
Slice s = iter->rep->key();
@@ -340,41 +322,34 @@ leveldb_writebatch_t* leveldb_writebatch_create() {
return new leveldb_writebatch_t;
}
-void leveldb_writebatch_destroy(leveldb_writebatch_t* b) {
- delete b;
-}
+void leveldb_writebatch_destroy(leveldb_writebatch_t* b) { delete b; }
-void leveldb_writebatch_clear(leveldb_writebatch_t* b) {
- b->rep.Clear();
-}
+void leveldb_writebatch_clear(leveldb_writebatch_t* b) { b->rep.Clear(); }
-void leveldb_writebatch_put(
- leveldb_writebatch_t* b,
- const char* key, size_t klen,
- const char* val, size_t vlen) {
+void leveldb_writebatch_put(leveldb_writebatch_t* b, const char* key,
+ size_t klen, const char* val, size_t vlen) {
b->rep.Put(Slice(key, klen), Slice(val, vlen));
}
-void leveldb_writebatch_delete(
- leveldb_writebatch_t* b,
- const char* key, size_t klen) {
+void leveldb_writebatch_delete(leveldb_writebatch_t* b, const char* key,
+ size_t klen) {
b->rep.Delete(Slice(key, klen));
}
-void leveldb_writebatch_iterate(
- leveldb_writebatch_t* b,
- void* state,
- void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen),
- void (*deleted)(void*, const char* k, size_t klen)) {
+void leveldb_writebatch_iterate(const leveldb_writebatch_t* b, void* state,
+ void (*put)(void*, const char* k, size_t klen,
+ const char* v, size_t vlen),
+ void (*deleted)(void*, const char* k,
+ size_t klen)) {
class H : public WriteBatch::Handler {
public:
void* state_;
void (*put_)(void*, const char* k, size_t klen, const char* v, size_t vlen);
void (*deleted_)(void*, const char* k, size_t klen);
- virtual void Put(const Slice& key, const Slice& value) {
+ void Put(const Slice& key, const Slice& value) override {
(*put_)(state_, key.data(), key.size(), value.data(), value.size());
}
- virtual void Delete(const Slice& key) {
+ void Delete(const Slice& key) override {
(*deleted_)(state_, key.data(), key.size());
}
};
@@ -385,47 +360,43 @@ void leveldb_writebatch_iterate(
b->rep.Iterate(&handler);
}
-leveldb_options_t* leveldb_options_create() {
- return new leveldb_options_t;
+void leveldb_writebatch_append(leveldb_writebatch_t* destination,
+ const leveldb_writebatch_t* source) {
+ destination->rep.Append(source->rep);
}
-void leveldb_options_destroy(leveldb_options_t* options) {
- delete options;
-}
+leveldb_options_t* leveldb_options_create() { return new leveldb_options_t; }
+
+void leveldb_options_destroy(leveldb_options_t* options) { delete options; }
-void leveldb_options_set_comparator(
- leveldb_options_t* opt,
- leveldb_comparator_t* cmp) {
+void leveldb_options_set_comparator(leveldb_options_t* opt,
+ leveldb_comparator_t* cmp) {
opt->rep.comparator = cmp;
}
-void leveldb_options_set_filter_policy(
- leveldb_options_t* opt,
- leveldb_filterpolicy_t* policy) {
+void leveldb_options_set_filter_policy(leveldb_options_t* opt,
+ leveldb_filterpolicy_t* policy) {
opt->rep.filter_policy = policy;
}
-void leveldb_options_set_create_if_missing(
- leveldb_options_t* opt, unsigned char v) {
+void leveldb_options_set_create_if_missing(leveldb_options_t* opt, uint8_t v) {
opt->rep.create_if_missing = v;
}
-void leveldb_options_set_error_if_exists(
- leveldb_options_t* opt, unsigned char v) {
+void leveldb_options_set_error_if_exists(leveldb_options_t* opt, uint8_t v) {
opt->rep.error_if_exists = v;
}
-void leveldb_options_set_paranoid_checks(
- leveldb_options_t* opt, unsigned char v) {
+void leveldb_options_set_paranoid_checks(leveldb_options_t* opt, uint8_t v) {
opt->rep.paranoid_checks = v;
}
void leveldb_options_set_env(leveldb_options_t* opt, leveldb_env_t* env) {
- opt->rep.env = (env ? env->rep : NULL);
+ opt->rep.env = (env ? env->rep : nullptr);
}
void leveldb_options_set_info_log(leveldb_options_t* opt, leveldb_logger_t* l) {
- opt->rep.info_log = (l ? l->rep : NULL);
+ opt->rep.info_log = (l ? l->rep : nullptr);
}
void leveldb_options_set_write_buffer_size(leveldb_options_t* opt, size_t s) {
@@ -448,17 +419,18 @@ void leveldb_options_set_block_restart_interval(leveldb_options_t* opt, int n) {
opt->rep.block_restart_interval = n;
}
+void leveldb_options_set_max_file_size(leveldb_options_t* opt, size_t s) {
+ opt->rep.max_file_size = s;
+}
+
void leveldb_options_set_compression(leveldb_options_t* opt, int t) {
opt->rep.compression = static_cast<CompressionType>(t);
}
leveldb_comparator_t* leveldb_comparator_create(
- void* state,
- void (*destructor)(void*),
- int (*compare)(
- void*,
- const char* a, size_t alen,
- const char* b, size_t blen),
+ void* state, void (*destructor)(void*),
+ int (*compare)(void*, const char* a, size_t alen, const char* b,
+ size_t blen),
const char* (*name)(void*)) {
leveldb_comparator_t* result = new leveldb_comparator_t;
result->state_ = state;
@@ -468,22 +440,15 @@ leveldb_comparator_t* leveldb_comparator_create(
return result;
}
-void leveldb_comparator_destroy(leveldb_comparator_t* cmp) {
- delete cmp;
-}
+void leveldb_comparator_destroy(leveldb_comparator_t* cmp) { delete cmp; }
leveldb_filterpolicy_t* leveldb_filterpolicy_create(
- void* state,
- void (*destructor)(void*),
- char* (*create_filter)(
- void*,
- const char* const* key_array, const size_t* key_length_array,
- int num_keys,
- size_t* filter_length),
- unsigned char (*key_may_match)(
- void*,
- const char* key, size_t length,
- const char* filter, size_t filter_length),
+ void* state, void (*destructor)(void*),
+ char* (*create_filter)(void*, const char* const* key_array,
+ const size_t* key_length_array, int num_keys,
+ size_t* filter_length),
+ uint8_t (*key_may_match)(void*, const char* key, size_t length,
+ const char* filter, size_t filter_length),
const char* (*name)(void*)) {
leveldb_filterpolicy_t* result = new leveldb_filterpolicy_t;
result->state_ = state;
@@ -503,7 +468,8 @@ leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(int bits_per_key) {
// they delegate to a NewBloomFilterPolicy() instead of user
// supplied C functions.
struct Wrapper : public leveldb_filterpolicy_t {
- const FilterPolicy* rep_;
+ static void DoNothing(void*) {}
+
~Wrapper() { delete rep_; }
const char* Name() const { return rep_->Name(); }
void CreateFilter(const Slice* keys, int n, std::string* dst) const {
@@ -512,11 +478,12 @@ leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(int bits_per_key) {
bool KeyMayMatch(const Slice& key, const Slice& filter) const {
return rep_->KeyMayMatch(key, filter);
}
- static void DoNothing(void*) { }
+
+ const FilterPolicy* rep_;
};
Wrapper* wrapper = new Wrapper;
wrapper->rep_ = NewBloomFilterPolicy(bits_per_key);
- wrapper->state_ = NULL;
+ wrapper->state_ = nullptr;
wrapper->destructor_ = &Wrapper::DoNothing;
return wrapper;
}
@@ -525,37 +492,29 @@ leveldb_readoptions_t* leveldb_readoptions_create() {
return new leveldb_readoptions_t;
}
-void leveldb_readoptions_destroy(leveldb_readoptions_t* opt) {
- delete opt;
-}
+void leveldb_readoptions_destroy(leveldb_readoptions_t* opt) { delete opt; }
-void leveldb_readoptions_set_verify_checksums(
- leveldb_readoptions_t* opt,
- unsigned char v) {
+void leveldb_readoptions_set_verify_checksums(leveldb_readoptions_t* opt,
+ uint8_t v) {
opt->rep.verify_checksums = v;
}
-void leveldb_readoptions_set_fill_cache(
- leveldb_readoptions_t* opt, unsigned char v) {
+void leveldb_readoptions_set_fill_cache(leveldb_readoptions_t* opt, uint8_t v) {
opt->rep.fill_cache = v;
}
-void leveldb_readoptions_set_snapshot(
- leveldb_readoptions_t* opt,
- const leveldb_snapshot_t* snap) {
- opt->rep.snapshot = (snap ? snap->rep : NULL);
+void leveldb_readoptions_set_snapshot(leveldb_readoptions_t* opt,
+ const leveldb_snapshot_t* snap) {
+ opt->rep.snapshot = (snap ? snap->rep : nullptr);
}
leveldb_writeoptions_t* leveldb_writeoptions_create() {
return new leveldb_writeoptions_t;
}
-void leveldb_writeoptions_destroy(leveldb_writeoptions_t* opt) {
- delete opt;
-}
+void leveldb_writeoptions_destroy(leveldb_writeoptions_t* opt) { delete opt; }
-void leveldb_writeoptions_set_sync(
- leveldb_writeoptions_t* opt, unsigned char v) {
+void leveldb_writeoptions_set_sync(leveldb_writeoptions_t* opt, uint8_t v) {
opt->rep.sync = v;
}
@@ -582,16 +541,22 @@ void leveldb_env_destroy(leveldb_env_t* env) {
delete env;
}
-void leveldb_free(void* ptr) {
- free(ptr);
-}
+char* leveldb_env_get_test_directory(leveldb_env_t* env) {
+ std::string result;
+ if (!env->rep->GetTestDirectory(&result).ok()) {
+ return nullptr;
+ }
-int leveldb_major_version() {
- return kMajorVersion;
+ char* buffer = static_cast<char*>(malloc(result.size() + 1));
+ memcpy(buffer, result.data(), result.size());
+ buffer[result.size()] = '\0';
+ return buffer;
}
-int leveldb_minor_version() {
- return kMinorVersion;
-}
+void leveldb_free(void* ptr) { free(ptr); }
+
+int leveldb_major_version() { return kMajorVersion; }
+
+int leveldb_minor_version() { return kMinorVersion; }
} // end extern "C"
diff --git a/src/leveldb/db/c_test.c b/src/leveldb/db/c_test.c
index 7cd5ee0207..16c77eed6a 100644
--- a/src/leveldb/db/c_test.c
+++ b/src/leveldb/db/c_test.c
@@ -8,24 +8,14 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <sys/types.h>
-#include <unistd.h>
const char* phase = "";
-static char dbname[200];
static void StartPhase(const char* name) {
fprintf(stderr, "=== Test %s\n", name);
phase = name;
}
-static const char* GetTempDir(void) {
- const char* ret = getenv("TEST_TMPDIR");
- if (ret == NULL || ret[0] == '\0')
- ret = "/tmp";
- return ret;
-}
-
#define CheckNoError(err) \
if ((err) != NULL) { \
fprintf(stderr, "%s:%d: %s: %s\n", __FILE__, __LINE__, phase, (err)); \
@@ -130,7 +120,7 @@ static const char* CmpName(void* arg) {
}
// Custom filter policy
-static unsigned char fake_filter_result = 1;
+static uint8_t fake_filter_result = 1;
static void FilterDestroy(void* arg) { }
static const char* FilterName(void* arg) {
return "TestFilter";
@@ -145,10 +135,8 @@ static char* FilterCreate(
memcpy(result, "fake", 4);
return result;
}
-unsigned char FilterKeyMatch(
- void* arg,
- const char* key, size_t length,
- const char* filter, size_t filter_length) {
+uint8_t FilterKeyMatch(void* arg, const char* key, size_t length,
+ const char* filter, size_t filter_length) {
CheckCondition(filter_length == 4);
CheckCondition(memcmp(filter, "fake", 4) == 0);
return fake_filter_result;
@@ -162,21 +150,19 @@ int main(int argc, char** argv) {
leveldb_options_t* options;
leveldb_readoptions_t* roptions;
leveldb_writeoptions_t* woptions;
+ char* dbname;
char* err = NULL;
int run = -1;
CheckCondition(leveldb_major_version() >= 1);
CheckCondition(leveldb_minor_version() >= 1);
- snprintf(dbname, sizeof(dbname),
- "%s/leveldb_c_test-%d",
- GetTempDir(),
- ((int) geteuid()));
-
StartPhase("create_objects");
cmp = leveldb_comparator_create(NULL, CmpDestroy, CmpCompare, CmpName);
env = leveldb_create_default_env();
cache = leveldb_cache_create_lru(100000);
+ dbname = leveldb_env_get_test_directory(env);
+ CheckCondition(dbname != NULL);
options = leveldb_options_create();
leveldb_options_set_comparator(options, cmp);
@@ -189,6 +175,7 @@ int main(int argc, char** argv) {
leveldb_options_set_max_open_files(options, 10);
leveldb_options_set_block_size(options, 1024);
leveldb_options_set_block_restart_interval(options, 8);
+ leveldb_options_set_max_file_size(options, 3 << 20);
leveldb_options_set_compression(options, leveldb_no_compression);
roptions = leveldb_readoptions_create();
@@ -239,12 +226,18 @@ int main(int argc, char** argv) {
leveldb_writebatch_clear(wb);
leveldb_writebatch_put(wb, "bar", 3, "b", 1);
leveldb_writebatch_put(wb, "box", 3, "c", 1);
- leveldb_writebatch_delete(wb, "bar", 3);
+
+ leveldb_writebatch_t* wb2 = leveldb_writebatch_create();
+ leveldb_writebatch_delete(wb2, "bar", 3);
+ leveldb_writebatch_append(wb, wb2);
+ leveldb_writebatch_destroy(wb2);
+
leveldb_write(db, woptions, wb, &err);
CheckNoError(err);
CheckGet(db, roptions, "foo", "hello");
CheckGet(db, roptions, "bar", NULL);
CheckGet(db, roptions, "box", "c");
+
int pos = 0;
leveldb_writebatch_iterate(wb, &pos, CheckPut, CheckDel);
CheckCondition(pos == 3);
@@ -381,6 +374,7 @@ int main(int argc, char** argv) {
leveldb_options_destroy(options);
leveldb_readoptions_destroy(roptions);
leveldb_writeoptions_destroy(woptions);
+ leveldb_free(dbname);
leveldb_cache_destroy(cache);
leveldb_comparator_destroy(cmp);
leveldb_env_destroy(env);
diff --git a/src/leveldb/db/corruption_test.cc b/src/leveldb/db/corruption_test.cc
index 37a484d25f..42f5237c65 100644
--- a/src/leveldb/db/corruption_test.cc
+++ b/src/leveldb/db/corruption_test.cc
@@ -2,20 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include "leveldb/db.h"
-
-#include <errno.h>
-#include <fcntl.h>
-#include <sys/stat.h>
#include <sys/types.h>
-#include "leveldb/cache.h"
-#include "leveldb/env.h"
-#include "leveldb/table.h"
-#include "leveldb/write_batch.h"
+
#include "db/db_impl.h"
#include "db/filename.h"
#include "db/log_format.h"
#include "db/version_set.h"
+#include "leveldb/cache.h"
+#include "leveldb/db.h"
+#include "leveldb/table.h"
+#include "leveldb/write_batch.h"
#include "util/logging.h"
#include "util/testharness.h"
#include "util/testutil.h"
@@ -26,44 +22,35 @@ static const int kValueSize = 1000;
class CorruptionTest {
public:
- test::ErrorEnv env_;
- std::string dbname_;
- Cache* tiny_cache_;
- Options options_;
- DB* db_;
-
- CorruptionTest() {
- tiny_cache_ = NewLRUCache(100);
+ CorruptionTest()
+ : db_(nullptr),
+ dbname_("/memenv/corruption_test"),
+ tiny_cache_(NewLRUCache(100)) {
options_.env = &env_;
options_.block_cache = tiny_cache_;
- dbname_ = test::TmpDir() + "/corruption_test";
DestroyDB(dbname_, options_);
- db_ = NULL;
options_.create_if_missing = true;
Reopen();
options_.create_if_missing = false;
}
~CorruptionTest() {
- delete db_;
- DestroyDB(dbname_, Options());
- delete tiny_cache_;
+ delete db_;
+ delete tiny_cache_;
}
Status TryReopen() {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
return DB::Open(options_, dbname_, &db_);
}
- void Reopen() {
- ASSERT_OK(TryReopen());
- }
+ void Reopen() { ASSERT_OK(TryReopen()); }
void RepairDB() {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
ASSERT_OK(::leveldb::RepairDB(dbname_, options_));
}
@@ -71,7 +58,7 @@ class CorruptionTest {
std::string key_space, value_space;
WriteBatch batch;
for (int i = 0; i < n; i++) {
- //if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n);
+ // if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n);
Slice key = Key(i, &key_space);
batch.Clear();
batch.Put(key, Value(i, &value_space));
@@ -100,8 +87,7 @@ class CorruptionTest {
// Ignore boundary keys.
continue;
}
- if (!ConsumeDecimalNumber(&in, &key) ||
- !in.empty() ||
+ if (!ConsumeDecimalNumber(&in, &key) || !in.empty() ||
key < next_expected) {
bad_keys++;
continue;
@@ -126,14 +112,13 @@ class CorruptionTest {
void Corrupt(FileType filetype, int offset, int bytes_to_corrupt) {
// Pick file to corrupt
std::vector<std::string> filenames;
- ASSERT_OK(env_.GetChildren(dbname_, &filenames));
+ ASSERT_OK(env_.target()->GetChildren(dbname_, &filenames));
uint64_t number;
FileType type;
std::string fname;
int picked_number = -1;
for (size_t i = 0; i < filenames.size(); i++) {
- if (ParseFileName(filenames[i], &number, &type) &&
- type == filetype &&
+ if (ParseFileName(filenames[i], &number, &type) && type == filetype &&
int(number) > picked_number) { // Pick latest file
fname = dbname_ + "/" + filenames[i];
picked_number = number;
@@ -141,35 +126,32 @@ class CorruptionTest {
}
ASSERT_TRUE(!fname.empty()) << filetype;
- struct stat sbuf;
- if (stat(fname.c_str(), &sbuf) != 0) {
- const char* msg = strerror(errno);
- ASSERT_TRUE(false) << fname << ": " << msg;
- }
+ uint64_t file_size;
+ ASSERT_OK(env_.target()->GetFileSize(fname, &file_size));
if (offset < 0) {
// Relative to end of file; make it absolute
- if (-offset > sbuf.st_size) {
+ if (-offset > file_size) {
offset = 0;
} else {
- offset = sbuf.st_size + offset;
+ offset = file_size + offset;
}
}
- if (offset > sbuf.st_size) {
- offset = sbuf.st_size;
+ if (offset > file_size) {
+ offset = file_size;
}
- if (offset + bytes_to_corrupt > sbuf.st_size) {
- bytes_to_corrupt = sbuf.st_size - offset;
+ if (offset + bytes_to_corrupt > file_size) {
+ bytes_to_corrupt = file_size - offset;
}
// Do it
std::string contents;
- Status s = ReadFileToString(Env::Default(), fname, &contents);
+ Status s = ReadFileToString(env_.target(), fname, &contents);
ASSERT_TRUE(s.ok()) << s.ToString();
for (int i = 0; i < bytes_to_corrupt; i++) {
contents[i + offset] ^= 0x80;
}
- s = WriteStringToFile(Env::Default(), contents, fname);
+ s = WriteStringToFile(env_.target(), contents, fname);
ASSERT_TRUE(s.ok()) << s.ToString();
}
@@ -197,12 +179,20 @@ class CorruptionTest {
Random r(k);
return test::RandomString(&r, kValueSize, storage);
}
+
+ test::ErrorEnv env_;
+ Options options_;
+ DB* db_;
+
+ private:
+ std::string dbname_;
+ Cache* tiny_cache_;
};
TEST(CorruptionTest, Recovery) {
Build(100);
Check(100, 100);
- Corrupt(kLogFile, 19, 1); // WriteBatch tag for first record
+ Corrupt(kLogFile, 19, 1); // WriteBatch tag for first record
Corrupt(kLogFile, log::kBlockSize + 1000, 1); // Somewhere in second block
Reopen();
@@ -237,8 +227,8 @@ TEST(CorruptionTest, TableFile) {
Build(100);
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
dbi->TEST_CompactMemTable();
- dbi->TEST_CompactRange(0, NULL, NULL);
- dbi->TEST_CompactRange(1, NULL, NULL);
+ dbi->TEST_CompactRange(0, nullptr, nullptr);
+ dbi->TEST_CompactRange(1, nullptr, nullptr);
Corrupt(kTableFile, 100, 1);
Check(90, 99);
@@ -251,8 +241,8 @@ TEST(CorruptionTest, TableFileRepair) {
Build(100);
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
dbi->TEST_CompactMemTable();
- dbi->TEST_CompactRange(0, NULL, NULL);
- dbi->TEST_CompactRange(1, NULL, NULL);
+ dbi->TEST_CompactRange(0, nullptr, nullptr);
+ dbi->TEST_CompactRange(1, nullptr, nullptr);
Corrupt(kTableFile, 100, 1);
RepairDB();
@@ -302,7 +292,7 @@ TEST(CorruptionTest, CorruptedDescriptor) {
ASSERT_OK(db_->Put(WriteOptions(), "foo", "hello"));
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
dbi->TEST_CompactMemTable();
- dbi->TEST_CompactRange(0, NULL, NULL);
+ dbi->TEST_CompactRange(0, nullptr, nullptr);
Corrupt(kDescriptorFile, 0, 1000);
Status s = TryReopen();
@@ -343,7 +333,7 @@ TEST(CorruptionTest, CompactionInputErrorParanoid) {
Corrupt(kTableFile, 100, 1);
env_.SleepForMicroseconds(100000);
}
- dbi->CompactRange(NULL, NULL);
+ dbi->CompactRange(nullptr, nullptr);
// Write must fail because of corrupted table
std::string tmp1, tmp2;
@@ -369,6 +359,4 @@ TEST(CorruptionTest, UnrelatedKeys) {
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/db/db_impl.cc b/src/leveldb/db/db_impl.cc
index 3bb58e560a..65e31724bc 100644
--- a/src/leveldb/db/db_impl.cc
+++ b/src/leveldb/db/db_impl.cc
@@ -4,12 +4,15 @@
#include "db/db_impl.h"
+#include <stdint.h>
+#include <stdio.h>
+
#include <algorithm>
+#include <atomic>
#include <set>
#include <string>
-#include <stdint.h>
-#include <stdio.h>
#include <vector>
+
#include "db/builder.h"
#include "db/db_iter.h"
#include "db/dbformat.h"
@@ -39,16 +42,33 @@ const int kNumNonTableCacheFiles = 10;
// Information kept for every waiting writer
struct DBImpl::Writer {
+ explicit Writer(port::Mutex* mu)
+ : batch(nullptr), sync(false), done(false), cv(mu) {}
+
Status status;
WriteBatch* batch;
bool sync;
bool done;
port::CondVar cv;
-
- explicit Writer(port::Mutex* mu) : cv(mu) { }
};
struct DBImpl::CompactionState {
+ // Files produced by compaction
+ struct Output {
+ uint64_t number;
+ uint64_t file_size;
+ InternalKey smallest, largest;
+ };
+
+ Output* current_output() { return &outputs[outputs.size() - 1]; }
+
+ explicit CompactionState(Compaction* c)
+ : compaction(c),
+ smallest_snapshot(0),
+ outfile(nullptr),
+ builder(nullptr),
+ total_bytes(0) {}
+
Compaction* const compaction;
// Sequence numbers < smallest_snapshot are not significant since we
@@ -57,12 +77,6 @@ struct DBImpl::CompactionState {
// we can drop all entries for the same key with sequence numbers < S.
SequenceNumber smallest_snapshot;
- // Files produced by compaction
- struct Output {
- uint64_t number;
- uint64_t file_size;
- InternalKey smallest, largest;
- };
std::vector<Output> outputs;
// State kept for output being generated
@@ -70,19 +84,10 @@ struct DBImpl::CompactionState {
TableBuilder* builder;
uint64_t total_bytes;
-
- Output* current_output() { return &outputs[outputs.size()-1]; }
-
- explicit CompactionState(Compaction* c)
- : compaction(c),
- outfile(NULL),
- builder(NULL),
- total_bytes(0) {
- }
};
// Fix user-supplied options to be reasonable
-template <class T,class V>
+template <class T, class V>
static void ClipToRange(T* ptr, V minvalue, V maxvalue) {
if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
@@ -93,27 +98,32 @@ Options SanitizeOptions(const std::string& dbname,
const Options& src) {
Options result = src;
result.comparator = icmp;
- result.filter_policy = (src.filter_policy != NULL) ? ipolicy : NULL;
- ClipToRange(&result.max_open_files, 64 + kNumNonTableCacheFiles, 50000);
- ClipToRange(&result.write_buffer_size, 64<<10, 1<<30);
- ClipToRange(&result.max_file_size, 1<<20, 1<<30);
- ClipToRange(&result.block_size, 1<<10, 4<<20);
- if (result.info_log == NULL) {
+ result.filter_policy = (src.filter_policy != nullptr) ? ipolicy : nullptr;
+ ClipToRange(&result.max_open_files, 64 + kNumNonTableCacheFiles, 50000);
+ ClipToRange(&result.write_buffer_size, 64 << 10, 1 << 30);
+ ClipToRange(&result.max_file_size, 1 << 20, 1 << 30);
+ ClipToRange(&result.block_size, 1 << 10, 4 << 20);
+ if (result.info_log == nullptr) {
// Open a log file in the same directory as the db
src.env->CreateDir(dbname); // In case it does not exist
src.env->RenameFile(InfoLogFileName(dbname), OldInfoLogFileName(dbname));
Status s = src.env->NewLogger(InfoLogFileName(dbname), &result.info_log);
if (!s.ok()) {
// No place suitable for logging
- result.info_log = NULL;
+ result.info_log = nullptr;
}
}
- if (result.block_cache == NULL) {
+ if (result.block_cache == nullptr) {
result.block_cache = NewLRUCache(8 << 20);
}
return result;
}
+static int TableCacheSize(const Options& sanitized_options) {
+ // Reserve ten files or so for other uses and give the rest to TableCache.
+ return sanitized_options.max_open_files - kNumNonTableCacheFiles;
+}
+
DBImpl::DBImpl(const Options& raw_options, const std::string& dbname)
: env_(raw_options.env),
internal_comparator_(raw_options.comparator),
@@ -123,44 +133,39 @@ DBImpl::DBImpl(const Options& raw_options, const std::string& dbname)
owns_info_log_(options_.info_log != raw_options.info_log),
owns_cache_(options_.block_cache != raw_options.block_cache),
dbname_(dbname),
- db_lock_(NULL),
- shutting_down_(NULL),
- bg_cv_(&mutex_),
- mem_(NULL),
- imm_(NULL),
- logfile_(NULL),
+ table_cache_(new TableCache(dbname_, options_, TableCacheSize(options_))),
+ db_lock_(nullptr),
+ shutting_down_(false),
+ background_work_finished_signal_(&mutex_),
+ mem_(nullptr),
+ imm_(nullptr),
+ has_imm_(false),
+ logfile_(nullptr),
logfile_number_(0),
- log_(NULL),
+ log_(nullptr),
seed_(0),
tmp_batch_(new WriteBatch),
- bg_compaction_scheduled_(false),
- manual_compaction_(NULL) {
- has_imm_.Release_Store(NULL);
-
- // Reserve ten files or so for other uses and give the rest to TableCache.
- const int table_cache_size = options_.max_open_files - kNumNonTableCacheFiles;
- table_cache_ = new TableCache(dbname_, &options_, table_cache_size);
-
- versions_ = new VersionSet(dbname_, &options_, table_cache_,
- &internal_comparator_);
-}
+ background_compaction_scheduled_(false),
+ manual_compaction_(nullptr),
+ versions_(new VersionSet(dbname_, &options_, table_cache_,
+ &internal_comparator_)) {}
DBImpl::~DBImpl() {
- // Wait for background work to finish
+ // Wait for background work to finish.
mutex_.Lock();
- shutting_down_.Release_Store(this); // Any non-NULL value is ok
- while (bg_compaction_scheduled_) {
- bg_cv_.Wait();
+ shutting_down_.store(true, std::memory_order_release);
+ while (background_compaction_scheduled_) {
+ background_work_finished_signal_.Wait();
}
mutex_.Unlock();
- if (db_lock_ != NULL) {
+ if (db_lock_ != nullptr) {
env_->UnlockFile(db_lock_);
}
delete versions_;
- if (mem_ != NULL) mem_->Unref();
- if (imm_ != NULL) imm_->Unref();
+ if (mem_ != nullptr) mem_->Unref();
+ if (imm_ != nullptr) imm_->Unref();
delete tmp_batch_;
delete log_;
delete logfile_;
@@ -216,6 +221,8 @@ void DBImpl::MaybeIgnoreError(Status* s) const {
}
void DBImpl::DeleteObsoleteFiles() {
+ mutex_.AssertHeld();
+
if (!bg_error_.ok()) {
// After a background error, we don't know whether a new version may
// or may not have been committed, so we cannot safely garbage collect.
@@ -227,11 +234,12 @@ void DBImpl::DeleteObsoleteFiles() {
versions_->AddLiveFiles(&live);
std::vector<std::string> filenames;
- env_->GetChildren(dbname_, &filenames); // Ignoring errors on purpose
+ env_->GetChildren(dbname_, &filenames); // Ignoring errors on purpose
uint64_t number;
FileType type;
- for (size_t i = 0; i < filenames.size(); i++) {
- if (ParseFileName(filenames[i], &number, &type)) {
+ std::vector<std::string> files_to_delete;
+ for (std::string& filename : filenames) {
+ if (ParseFileName(filename, &number, &type)) {
bool keep = true;
switch (type) {
case kLogFile:
@@ -259,26 +267,34 @@ void DBImpl::DeleteObsoleteFiles() {
}
if (!keep) {
+ files_to_delete.push_back(std::move(filename));
if (type == kTableFile) {
table_cache_->Evict(number);
}
- Log(options_.info_log, "Delete type=%d #%lld\n",
- int(type),
+ Log(options_.info_log, "Delete type=%d #%lld\n", static_cast<int>(type),
static_cast<unsigned long long>(number));
- env_->DeleteFile(dbname_ + "/" + filenames[i]);
}
}
}
+
+ // While deleting all files unblock other threads. All files being deleted
+ // have unique names which will not collide with newly created files and
+ // are therefore safe to delete while allowing other threads to proceed.
+ mutex_.Unlock();
+ for (const std::string& filename : files_to_delete) {
+ env_->DeleteFile(dbname_ + "/" + filename);
+ }
+ mutex_.Lock();
}
-Status DBImpl::Recover(VersionEdit* edit, bool *save_manifest) {
+Status DBImpl::Recover(VersionEdit* edit, bool* save_manifest) {
mutex_.AssertHeld();
// Ignore error from CreateDir since the creation of the DB is
// committed only when the descriptor is created, and this directory
// may already exist from a previous failed creation attempt.
env_->CreateDir(dbname_);
- assert(db_lock_ == NULL);
+ assert(db_lock_ == nullptr);
Status s = env_->LockFile(LockFileName(dbname_), &db_lock_);
if (!s.ok()) {
return s;
@@ -296,8 +312,8 @@ Status DBImpl::Recover(VersionEdit* edit, bool *save_manifest) {
}
} else {
if (options_.error_if_exists) {
- return Status::InvalidArgument(
- dbname_, "exists (error_if_exists is true)");
+ return Status::InvalidArgument(dbname_,
+ "exists (error_if_exists is true)");
}
}
@@ -369,12 +385,12 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
Env* env;
Logger* info_log;
const char* fname;
- Status* status; // NULL if options_.paranoid_checks==false
- virtual void Corruption(size_t bytes, const Status& s) {
+ Status* status; // null if options_.paranoid_checks==false
+ void Corruption(size_t bytes, const Status& s) override {
Log(info_log, "%s%s: dropping %d bytes; %s",
- (this->status == NULL ? "(ignoring error) " : ""),
- fname, static_cast<int>(bytes), s.ToString().c_str());
- if (this->status != NULL && this->status->ok()) *this->status = s;
+ (this->status == nullptr ? "(ignoring error) " : ""), fname,
+ static_cast<int>(bytes), s.ToString().c_str());
+ if (this->status != nullptr && this->status->ok()) *this->status = s;
}
};
@@ -394,32 +410,30 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
reporter.env = env_;
reporter.info_log = options_.info_log;
reporter.fname = fname.c_str();
- reporter.status = (options_.paranoid_checks ? &status : NULL);
+ reporter.status = (options_.paranoid_checks ? &status : nullptr);
// We intentionally make log::Reader do checksumming even if
// paranoid_checks==false so that corruptions cause entire commits
// to be skipped instead of propagating bad information (like overly
// large sequence numbers).
- log::Reader reader(file, &reporter, true/*checksum*/,
- 0/*initial_offset*/);
+ log::Reader reader(file, &reporter, true /*checksum*/, 0 /*initial_offset*/);
Log(options_.info_log, "Recovering log #%llu",
- (unsigned long long) log_number);
+ (unsigned long long)log_number);
// Read all the records and add to a memtable
std::string scratch;
Slice record;
WriteBatch batch;
int compactions = 0;
- MemTable* mem = NULL;
- while (reader.ReadRecord(&record, &scratch) &&
- status.ok()) {
+ MemTable* mem = nullptr;
+ while (reader.ReadRecord(&record, &scratch) && status.ok()) {
if (record.size() < 12) {
- reporter.Corruption(
- record.size(), Status::Corruption("log record too small", fname));
+ reporter.Corruption(record.size(),
+ Status::Corruption("log record too small", fname));
continue;
}
WriteBatchInternal::SetContents(&batch, record);
- if (mem == NULL) {
+ if (mem == nullptr) {
mem = new MemTable(internal_comparator_);
mem->Ref();
}
@@ -428,9 +442,8 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
if (!status.ok()) {
break;
}
- const SequenceNumber last_seq =
- WriteBatchInternal::Sequence(&batch) +
- WriteBatchInternal::Count(&batch) - 1;
+ const SequenceNumber last_seq = WriteBatchInternal::Sequence(&batch) +
+ WriteBatchInternal::Count(&batch) - 1;
if (last_seq > *max_sequence) {
*max_sequence = last_seq;
}
@@ -438,9 +451,9 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
if (mem->ApproximateMemoryUsage() > options_.write_buffer_size) {
compactions++;
*save_manifest = true;
- status = WriteLevel0Table(mem, edit, NULL);
+ status = WriteLevel0Table(mem, edit, nullptr);
mem->Unref();
- mem = NULL;
+ mem = nullptr;
if (!status.ok()) {
// Reflect errors immediately so that conditions like full
// file-systems cause the DB::Open() to fail.
@@ -453,31 +466,31 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
// See if we should keep reusing the last log file.
if (status.ok() && options_.reuse_logs && last_log && compactions == 0) {
- assert(logfile_ == NULL);
- assert(log_ == NULL);
- assert(mem_ == NULL);
+ assert(logfile_ == nullptr);
+ assert(log_ == nullptr);
+ assert(mem_ == nullptr);
uint64_t lfile_size;
if (env_->GetFileSize(fname, &lfile_size).ok() &&
env_->NewAppendableFile(fname, &logfile_).ok()) {
Log(options_.info_log, "Reusing old log %s \n", fname.c_str());
log_ = new log::Writer(logfile_, lfile_size);
logfile_number_ = log_number;
- if (mem != NULL) {
+ if (mem != nullptr) {
mem_ = mem;
- mem = NULL;
+ mem = nullptr;
} else {
- // mem can be NULL if lognum exists but was empty.
+ // mem can be nullptr if lognum exists but was empty.
mem_ = new MemTable(internal_comparator_);
mem_->Ref();
}
}
}
- if (mem != NULL) {
+ if (mem != nullptr) {
// mem did not get reused; compact it.
if (status.ok()) {
*save_manifest = true;
- status = WriteLevel0Table(mem, edit, NULL);
+ status = WriteLevel0Table(mem, edit, nullptr);
}
mem->Unref();
}
@@ -494,7 +507,7 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit,
pending_outputs_.insert(meta.number);
Iterator* iter = mem->NewIterator();
Log(options_.info_log, "Level-0 table #%llu: started",
- (unsigned long long) meta.number);
+ (unsigned long long)meta.number);
Status s;
{
@@ -504,24 +517,22 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit,
}
Log(options_.info_log, "Level-0 table #%llu: %lld bytes %s",
- (unsigned long long) meta.number,
- (unsigned long long) meta.file_size,
+ (unsigned long long)meta.number, (unsigned long long)meta.file_size,
s.ToString().c_str());
delete iter;
pending_outputs_.erase(meta.number);
-
// Note that if file_size is zero, the file has been deleted and
// should not be added to the manifest.
int level = 0;
if (s.ok() && meta.file_size > 0) {
const Slice min_user_key = meta.smallest.user_key();
const Slice max_user_key = meta.largest.user_key();
- if (base != NULL) {
+ if (base != nullptr) {
level = base->PickLevelForMemTableOutput(min_user_key, max_user_key);
}
- edit->AddFile(level, meta.number, meta.file_size,
- meta.smallest, meta.largest);
+ edit->AddFile(level, meta.number, meta.file_size, meta.smallest,
+ meta.largest);
}
CompactionStats stats;
@@ -533,7 +544,7 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit,
void DBImpl::CompactMemTable() {
mutex_.AssertHeld();
- assert(imm_ != NULL);
+ assert(imm_ != nullptr);
// Save the contents of the memtable as a new Table
VersionEdit edit;
@@ -542,7 +553,7 @@ void DBImpl::CompactMemTable() {
Status s = WriteLevel0Table(imm_, &edit, base);
base->Unref();
- if (s.ok() && shutting_down_.Acquire_Load()) {
+ if (s.ok() && shutting_down_.load(std::memory_order_acquire)) {
s = Status::IOError("Deleting DB during memtable compaction");
}
@@ -556,8 +567,8 @@ void DBImpl::CompactMemTable() {
if (s.ok()) {
// Commit to the new state
imm_->Unref();
- imm_ = NULL;
- has_imm_.Release_Store(NULL);
+ imm_ = nullptr;
+ has_imm_.store(false, std::memory_order_release);
DeleteObsoleteFiles();
} else {
RecordBackgroundError(s);
@@ -575,13 +586,14 @@ void DBImpl::CompactRange(const Slice* begin, const Slice* end) {
}
}
}
- TEST_CompactMemTable(); // TODO(sanjay): Skip if memtable does not overlap
+ TEST_CompactMemTable(); // TODO(sanjay): Skip if memtable does not overlap
for (int level = 0; level < max_level_with_files; level++) {
TEST_CompactRange(level, begin, end);
}
}
-void DBImpl::TEST_CompactRange(int level, const Slice* begin,const Slice* end) {
+void DBImpl::TEST_CompactRange(int level, const Slice* begin,
+ const Slice* end) {
assert(level >= 0);
assert(level + 1 < config::kNumLevels);
@@ -590,44 +602,45 @@ void DBImpl::TEST_CompactRange(int level, const Slice* begin,const Slice* end) {
ManualCompaction manual;
manual.level = level;
manual.done = false;
- if (begin == NULL) {
- manual.begin = NULL;
+ if (begin == nullptr) {
+ manual.begin = nullptr;
} else {
begin_storage = InternalKey(*begin, kMaxSequenceNumber, kValueTypeForSeek);
manual.begin = &begin_storage;
}
- if (end == NULL) {
- manual.end = NULL;
+ if (end == nullptr) {
+ manual.end = nullptr;
} else {
end_storage = InternalKey(*end, 0, static_cast<ValueType>(0));
manual.end = &end_storage;
}
MutexLock l(&mutex_);
- while (!manual.done && !shutting_down_.Acquire_Load() && bg_error_.ok()) {
- if (manual_compaction_ == NULL) { // Idle
+ while (!manual.done && !shutting_down_.load(std::memory_order_acquire) &&
+ bg_error_.ok()) {
+ if (manual_compaction_ == nullptr) { // Idle
manual_compaction_ = &manual;
MaybeScheduleCompaction();
} else { // Running either my compaction or another compaction.
- bg_cv_.Wait();
+ background_work_finished_signal_.Wait();
}
}
if (manual_compaction_ == &manual) {
// Cancel my manual compaction since we aborted early for some reason.
- manual_compaction_ = NULL;
+ manual_compaction_ = nullptr;
}
}
Status DBImpl::TEST_CompactMemTable() {
- // NULL batch means just wait for earlier writes to be done
- Status s = Write(WriteOptions(), NULL);
+ // nullptr batch means just wait for earlier writes to be done
+ Status s = Write(WriteOptions(), nullptr);
if (s.ok()) {
// Wait until the compaction completes
MutexLock l(&mutex_);
- while (imm_ != NULL && bg_error_.ok()) {
- bg_cv_.Wait();
+ while (imm_ != nullptr && bg_error_.ok()) {
+ background_work_finished_signal_.Wait();
}
- if (imm_ != NULL) {
+ if (imm_ != nullptr) {
s = bg_error_;
}
}
@@ -638,24 +651,23 @@ void DBImpl::RecordBackgroundError(const Status& s) {
mutex_.AssertHeld();
if (bg_error_.ok()) {
bg_error_ = s;
- bg_cv_.SignalAll();
+ background_work_finished_signal_.SignalAll();
}
}
void DBImpl::MaybeScheduleCompaction() {
mutex_.AssertHeld();
- if (bg_compaction_scheduled_) {
+ if (background_compaction_scheduled_) {
// Already scheduled
- } else if (shutting_down_.Acquire_Load()) {
+ } else if (shutting_down_.load(std::memory_order_acquire)) {
// DB is being deleted; no more background compactions
} else if (!bg_error_.ok()) {
// Already got an error; no more changes
- } else if (imm_ == NULL &&
- manual_compaction_ == NULL &&
+ } else if (imm_ == nullptr && manual_compaction_ == nullptr &&
!versions_->NeedsCompaction()) {
// No work to be done
} else {
- bg_compaction_scheduled_ = true;
+ background_compaction_scheduled_ = true;
env_->Schedule(&DBImpl::BGWork, this);
}
}
@@ -666,8 +678,8 @@ void DBImpl::BGWork(void* db) {
void DBImpl::BackgroundCall() {
MutexLock l(&mutex_);
- assert(bg_compaction_scheduled_);
- if (shutting_down_.Acquire_Load()) {
+ assert(background_compaction_scheduled_);
+ if (shutting_down_.load(std::memory_order_acquire)) {
// No more background work when shutting down.
} else if (!bg_error_.ok()) {
// No more background work after a background error.
@@ -675,36 +687,35 @@ void DBImpl::BackgroundCall() {
BackgroundCompaction();
}
- bg_compaction_scheduled_ = false;
+ background_compaction_scheduled_ = false;
// Previous compaction may have produced too many files in a level,
// so reschedule another compaction if needed.
MaybeScheduleCompaction();
- bg_cv_.SignalAll();
+ background_work_finished_signal_.SignalAll();
}
void DBImpl::BackgroundCompaction() {
mutex_.AssertHeld();
- if (imm_ != NULL) {
+ if (imm_ != nullptr) {
CompactMemTable();
return;
}
Compaction* c;
- bool is_manual = (manual_compaction_ != NULL);
+ bool is_manual = (manual_compaction_ != nullptr);
InternalKey manual_end;
if (is_manual) {
ManualCompaction* m = manual_compaction_;
c = versions_->CompactRange(m->level, m->begin, m->end);
- m->done = (c == NULL);
- if (c != NULL) {
+ m->done = (c == nullptr);
+ if (c != nullptr) {
manual_end = c->input(0, c->num_input_files(0) - 1)->largest;
}
Log(options_.info_log,
"Manual compaction at level-%d from %s .. %s; will stop at %s\n",
- m->level,
- (m->begin ? m->begin->DebugString().c_str() : "(begin)"),
+ m->level, (m->begin ? m->begin->DebugString().c_str() : "(begin)"),
(m->end ? m->end->DebugString().c_str() : "(end)"),
(m->done ? "(end)" : manual_end.DebugString().c_str()));
} else {
@@ -712,26 +723,24 @@ void DBImpl::BackgroundCompaction() {
}
Status status;
- if (c == NULL) {
+ if (c == nullptr) {
// Nothing to do
} else if (!is_manual && c->IsTrivialMove()) {
// Move file to next level
assert(c->num_input_files(0) == 1);
FileMetaData* f = c->input(0, 0);
c->edit()->DeleteFile(c->level(), f->number);
- c->edit()->AddFile(c->level() + 1, f->number, f->file_size,
- f->smallest, f->largest);
+ c->edit()->AddFile(c->level() + 1, f->number, f->file_size, f->smallest,
+ f->largest);
status = versions_->LogAndApply(c->edit(), &mutex_);
if (!status.ok()) {
RecordBackgroundError(status);
}
VersionSet::LevelSummaryStorage tmp;
Log(options_.info_log, "Moved #%lld to level-%d %lld bytes %s: %s\n",
- static_cast<unsigned long long>(f->number),
- c->level() + 1,
+ static_cast<unsigned long long>(f->number), c->level() + 1,
static_cast<unsigned long long>(f->file_size),
- status.ToString().c_str(),
- versions_->LevelSummary(&tmp));
+ status.ToString().c_str(), versions_->LevelSummary(&tmp));
} else {
CompactionState* compact = new CompactionState(c);
status = DoCompactionWork(compact);
@@ -746,11 +755,10 @@ void DBImpl::BackgroundCompaction() {
if (status.ok()) {
// Done
- } else if (shutting_down_.Acquire_Load()) {
+ } else if (shutting_down_.load(std::memory_order_acquire)) {
// Ignore compaction errors found during shutting down
} else {
- Log(options_.info_log,
- "Compaction error: %s", status.ToString().c_str());
+ Log(options_.info_log, "Compaction error: %s", status.ToString().c_str());
}
if (is_manual) {
@@ -764,18 +772,18 @@ void DBImpl::BackgroundCompaction() {
m->tmp_storage = manual_end;
m->begin = &m->tmp_storage;
}
- manual_compaction_ = NULL;
+ manual_compaction_ = nullptr;
}
}
void DBImpl::CleanupCompaction(CompactionState* compact) {
mutex_.AssertHeld();
- if (compact->builder != NULL) {
+ if (compact->builder != nullptr) {
// May happen if we get a shutdown call in the middle of compaction
compact->builder->Abandon();
delete compact->builder;
} else {
- assert(compact->outfile == NULL);
+ assert(compact->outfile == nullptr);
}
delete compact->outfile;
for (size_t i = 0; i < compact->outputs.size(); i++) {
@@ -786,8 +794,8 @@ void DBImpl::CleanupCompaction(CompactionState* compact) {
}
Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) {
- assert(compact != NULL);
- assert(compact->builder == NULL);
+ assert(compact != nullptr);
+ assert(compact->builder == nullptr);
uint64_t file_number;
{
mutex_.Lock();
@@ -812,9 +820,9 @@ Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) {
Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
Iterator* input) {
- assert(compact != NULL);
- assert(compact->outfile != NULL);
- assert(compact->builder != NULL);
+ assert(compact != nullptr);
+ assert(compact->outfile != nullptr);
+ assert(compact->builder != nullptr);
const uint64_t output_number = compact->current_output()->number;
assert(output_number != 0);
@@ -831,7 +839,7 @@ Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
compact->current_output()->file_size = current_bytes;
compact->total_bytes += current_bytes;
delete compact->builder;
- compact->builder = NULL;
+ compact->builder = nullptr;
// Finish and check for file errors
if (s.ok()) {
@@ -841,35 +849,29 @@ Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
s = compact->outfile->Close();
}
delete compact->outfile;
- compact->outfile = NULL;
+ compact->outfile = nullptr;
if (s.ok() && current_entries > 0) {
// Verify that the table is usable
- Iterator* iter = table_cache_->NewIterator(ReadOptions(),
- output_number,
- current_bytes);
+ Iterator* iter =
+ table_cache_->NewIterator(ReadOptions(), output_number, current_bytes);
s = iter->status();
delete iter;
if (s.ok()) {
- Log(options_.info_log,
- "Generated table #%llu@%d: %lld keys, %lld bytes",
- (unsigned long long) output_number,
- compact->compaction->level(),
- (unsigned long long) current_entries,
- (unsigned long long) current_bytes);
+ Log(options_.info_log, "Generated table #%llu@%d: %lld keys, %lld bytes",
+ (unsigned long long)output_number, compact->compaction->level(),
+ (unsigned long long)current_entries,
+ (unsigned long long)current_bytes);
}
}
return s;
}
-
Status DBImpl::InstallCompactionResults(CompactionState* compact) {
mutex_.AssertHeld();
- Log(options_.info_log, "Compacted %d@%d + %d@%d files => %lld bytes",
- compact->compaction->num_input_files(0),
- compact->compaction->level(),
- compact->compaction->num_input_files(1),
- compact->compaction->level() + 1,
+ Log(options_.info_log, "Compacted %d@%d + %d@%d files => %lld bytes",
+ compact->compaction->num_input_files(0), compact->compaction->level(),
+ compact->compaction->num_input_files(1), compact->compaction->level() + 1,
static_cast<long long>(compact->total_bytes));
// Add compaction outputs
@@ -877,9 +879,8 @@ Status DBImpl::InstallCompactionResults(CompactionState* compact) {
const int level = compact->compaction->level();
for (size_t i = 0; i < compact->outputs.size(); i++) {
const CompactionState::Output& out = compact->outputs[i];
- compact->compaction->edit()->AddFile(
- level + 1,
- out.number, out.file_size, out.smallest, out.largest);
+ compact->compaction->edit()->AddFile(level + 1, out.number, out.file_size,
+ out.smallest, out.largest);
}
return versions_->LogAndApply(compact->compaction->edit(), &mutex_);
}
@@ -888,39 +889,40 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
const uint64_t start_micros = env_->NowMicros();
int64_t imm_micros = 0; // Micros spent doing imm_ compactions
- Log(options_.info_log, "Compacting %d@%d + %d@%d files",
- compact->compaction->num_input_files(0),
- compact->compaction->level(),
+ Log(options_.info_log, "Compacting %d@%d + %d@%d files",
+ compact->compaction->num_input_files(0), compact->compaction->level(),
compact->compaction->num_input_files(1),
compact->compaction->level() + 1);
assert(versions_->NumLevelFiles(compact->compaction->level()) > 0);
- assert(compact->builder == NULL);
- assert(compact->outfile == NULL);
+ assert(compact->builder == nullptr);
+ assert(compact->outfile == nullptr);
if (snapshots_.empty()) {
compact->smallest_snapshot = versions_->LastSequence();
} else {
- compact->smallest_snapshot = snapshots_.oldest()->number_;
+ compact->smallest_snapshot = snapshots_.oldest()->sequence_number();
}
+ Iterator* input = versions_->MakeInputIterator(compact->compaction);
+
// Release mutex while we're actually doing the compaction work
mutex_.Unlock();
- Iterator* input = versions_->MakeInputIterator(compact->compaction);
input->SeekToFirst();
Status status;
ParsedInternalKey ikey;
std::string current_user_key;
bool has_current_user_key = false;
SequenceNumber last_sequence_for_key = kMaxSequenceNumber;
- for (; input->Valid() && !shutting_down_.Acquire_Load(); ) {
+ while (input->Valid() && !shutting_down_.load(std::memory_order_acquire)) {
// Prioritize immutable compaction work
- if (has_imm_.NoBarrier_Load() != NULL) {
+ if (has_imm_.load(std::memory_order_relaxed)) {
const uint64_t imm_start = env_->NowMicros();
mutex_.Lock();
- if (imm_ != NULL) {
+ if (imm_ != nullptr) {
CompactMemTable();
- bg_cv_.SignalAll(); // Wakeup MakeRoomForWrite() if necessary
+ // Wake up MakeRoomForWrite() if necessary.
+ background_work_finished_signal_.SignalAll();
}
mutex_.Unlock();
imm_micros += (env_->NowMicros() - imm_start);
@@ -928,7 +930,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
Slice key = input->key();
if (compact->compaction->ShouldStopBefore(key) &&
- compact->builder != NULL) {
+ compact->builder != nullptr) {
status = FinishCompactionOutputFile(compact, input);
if (!status.ok()) {
break;
@@ -944,8 +946,8 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
last_sequence_for_key = kMaxSequenceNumber;
} else {
if (!has_current_user_key ||
- user_comparator()->Compare(ikey.user_key,
- Slice(current_user_key)) != 0) {
+ user_comparator()->Compare(ikey.user_key, Slice(current_user_key)) !=
+ 0) {
// First occurrence of this user key
current_user_key.assign(ikey.user_key.data(), ikey.user_key.size());
has_current_user_key = true;
@@ -954,7 +956,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
if (last_sequence_for_key <= compact->smallest_snapshot) {
// Hidden by an newer entry for same user key
- drop = true; // (A)
+ drop = true; // (A)
} else if (ikey.type == kTypeDeletion &&
ikey.sequence <= compact->smallest_snapshot &&
compact->compaction->IsBaseLevelForKey(ikey.user_key)) {
@@ -982,7 +984,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
if (!drop) {
// Open output file if necessary
- if (compact->builder == NULL) {
+ if (compact->builder == nullptr) {
status = OpenCompactionOutputFile(compact);
if (!status.ok()) {
break;
@@ -1007,17 +1009,17 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
input->Next();
}
- if (status.ok() && shutting_down_.Acquire_Load()) {
+ if (status.ok() && shutting_down_.load(std::memory_order_acquire)) {
status = Status::IOError("Deleting DB during compaction");
}
- if (status.ok() && compact->builder != NULL) {
+ if (status.ok() && compact->builder != nullptr) {
status = FinishCompactionOutputFile(compact, input);
}
if (status.ok()) {
status = input->status();
}
delete input;
- input = NULL;
+ input = nullptr;
CompactionStats stats;
stats.micros = env_->NowMicros() - start_micros - imm_micros;
@@ -1040,34 +1042,37 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
RecordBackgroundError(status);
}
VersionSet::LevelSummaryStorage tmp;
- Log(options_.info_log,
- "compacted to: %s", versions_->LevelSummary(&tmp));
+ Log(options_.info_log, "compacted to: %s", versions_->LevelSummary(&tmp));
return status;
}
namespace {
+
struct IterState {
- port::Mutex* mu;
- Version* version;
- MemTable* mem;
- MemTable* imm;
+ port::Mutex* const mu;
+ Version* const version GUARDED_BY(mu);
+ MemTable* const mem GUARDED_BY(mu);
+ MemTable* const imm GUARDED_BY(mu);
+
+ IterState(port::Mutex* mutex, MemTable* mem, MemTable* imm, Version* version)
+ : mu(mutex), version(version), mem(mem), imm(imm) {}
};
static void CleanupIteratorState(void* arg1, void* arg2) {
IterState* state = reinterpret_cast<IterState*>(arg1);
state->mu->Lock();
state->mem->Unref();
- if (state->imm != NULL) state->imm->Unref();
+ if (state->imm != nullptr) state->imm->Unref();
state->version->Unref();
state->mu->Unlock();
delete state;
}
-} // namespace
+
+} // anonymous namespace
Iterator* DBImpl::NewInternalIterator(const ReadOptions& options,
SequenceNumber* latest_snapshot,
uint32_t* seed) {
- IterState* cleanup = new IterState;
mutex_.Lock();
*latest_snapshot = versions_->LastSequence();
@@ -1075,7 +1080,7 @@ Iterator* DBImpl::NewInternalIterator(const ReadOptions& options,
std::vector<Iterator*> list;
list.push_back(mem_->NewIterator());
mem_->Ref();
- if (imm_ != NULL) {
+ if (imm_ != nullptr) {
list.push_back(imm_->NewIterator());
imm_->Ref();
}
@@ -1084,11 +1089,8 @@ Iterator* DBImpl::NewInternalIterator(const ReadOptions& options,
NewMergingIterator(&internal_comparator_, &list[0], list.size());
versions_->current()->Ref();
- cleanup->mu = &mutex_;
- cleanup->mem = mem_;
- cleanup->imm = imm_;
- cleanup->version = versions_->current();
- internal_iter->RegisterCleanup(CleanupIteratorState, cleanup, NULL);
+ IterState* cleanup = new IterState(&mutex_, mem_, imm_, versions_->current());
+ internal_iter->RegisterCleanup(CleanupIteratorState, cleanup, nullptr);
*seed = ++seed_;
mutex_.Unlock();
@@ -1106,14 +1108,14 @@ int64_t DBImpl::TEST_MaxNextLevelOverlappingBytes() {
return versions_->MaxNextLevelOverlappingBytes();
}
-Status DBImpl::Get(const ReadOptions& options,
- const Slice& key,
+Status DBImpl::Get(const ReadOptions& options, const Slice& key,
std::string* value) {
Status s;
MutexLock l(&mutex_);
SequenceNumber snapshot;
- if (options.snapshot != NULL) {
- snapshot = reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_;
+ if (options.snapshot != nullptr) {
+ snapshot =
+ static_cast<const SnapshotImpl*>(options.snapshot)->sequence_number();
} else {
snapshot = versions_->LastSequence();
}
@@ -1122,7 +1124,7 @@ Status DBImpl::Get(const ReadOptions& options,
MemTable* imm = imm_;
Version* current = versions_->current();
mem->Ref();
- if (imm != NULL) imm->Ref();
+ if (imm != nullptr) imm->Ref();
current->Ref();
bool have_stat_update = false;
@@ -1135,7 +1137,7 @@ Status DBImpl::Get(const ReadOptions& options,
LookupKey lkey(key, snapshot);
if (mem->Get(lkey, value, &s)) {
// Done
- } else if (imm != NULL && imm->Get(lkey, value, &s)) {
+ } else if (imm != nullptr && imm->Get(lkey, value, &s)) {
// Done
} else {
s = current->Get(options, lkey, value, &stats);
@@ -1148,7 +1150,7 @@ Status DBImpl::Get(const ReadOptions& options,
MaybeScheduleCompaction();
}
mem->Unref();
- if (imm != NULL) imm->Unref();
+ if (imm != nullptr) imm->Unref();
current->Unref();
return s;
}
@@ -1157,12 +1159,12 @@ Iterator* DBImpl::NewIterator(const ReadOptions& options) {
SequenceNumber latest_snapshot;
uint32_t seed;
Iterator* iter = NewInternalIterator(options, &latest_snapshot, &seed);
- return NewDBIterator(
- this, user_comparator(), iter,
- (options.snapshot != NULL
- ? reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_
- : latest_snapshot),
- seed);
+ return NewDBIterator(this, user_comparator(), iter,
+ (options.snapshot != nullptr
+ ? static_cast<const SnapshotImpl*>(options.snapshot)
+ ->sequence_number()
+ : latest_snapshot),
+ seed);
}
void DBImpl::RecordReadSample(Slice key) {
@@ -1177,9 +1179,9 @@ const Snapshot* DBImpl::GetSnapshot() {
return snapshots_.New(versions_->LastSequence());
}
-void DBImpl::ReleaseSnapshot(const Snapshot* s) {
+void DBImpl::ReleaseSnapshot(const Snapshot* snapshot) {
MutexLock l(&mutex_);
- snapshots_.Delete(reinterpret_cast<const SnapshotImpl*>(s));
+ snapshots_.Delete(static_cast<const SnapshotImpl*>(snapshot));
}
// Convenience methods
@@ -1191,9 +1193,9 @@ Status DBImpl::Delete(const WriteOptions& options, const Slice& key) {
return DB::Delete(options, key);
}
-Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
+Status DBImpl::Write(const WriteOptions& options, WriteBatch* updates) {
Writer w(&mutex_);
- w.batch = my_batch;
+ w.batch = updates;
w.sync = options.sync;
w.done = false;
@@ -1207,13 +1209,13 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
}
// May temporarily unlock and wait.
- Status status = MakeRoomForWrite(my_batch == NULL);
+ Status status = MakeRoomForWrite(updates == nullptr);
uint64_t last_sequence = versions_->LastSequence();
Writer* last_writer = &w;
- if (status.ok() && my_batch != NULL) { // NULL batch is for compactions
- WriteBatch* updates = BuildBatchGroup(&last_writer);
- WriteBatchInternal::SetSequence(updates, last_sequence + 1);
- last_sequence += WriteBatchInternal::Count(updates);
+ if (status.ok() && updates != nullptr) { // nullptr batch is for compactions
+ WriteBatch* write_batch = BuildBatchGroup(&last_writer);
+ WriteBatchInternal::SetSequence(write_batch, last_sequence + 1);
+ last_sequence += WriteBatchInternal::Count(write_batch);
// Add to log and apply to memtable. We can release the lock
// during this phase since &w is currently responsible for logging
@@ -1221,7 +1223,7 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
// into mem_.
{
mutex_.Unlock();
- status = log_->AddRecord(WriteBatchInternal::Contents(updates));
+ status = log_->AddRecord(WriteBatchInternal::Contents(write_batch));
bool sync_error = false;
if (status.ok() && options.sync) {
status = logfile_->Sync();
@@ -1230,7 +1232,7 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
}
}
if (status.ok()) {
- status = WriteBatchInternal::InsertInto(updates, mem_);
+ status = WriteBatchInternal::InsertInto(write_batch, mem_);
}
mutex_.Lock();
if (sync_error) {
@@ -1240,7 +1242,7 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
RecordBackgroundError(status);
}
}
- if (updates == tmp_batch_) tmp_batch_->Clear();
+ if (write_batch == tmp_batch_) tmp_batch_->Clear();
versions_->SetLastSequence(last_sequence);
}
@@ -1265,12 +1267,13 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
}
// REQUIRES: Writer list must be non-empty
-// REQUIRES: First writer must have a non-NULL batch
+// REQUIRES: First writer must have a non-null batch
WriteBatch* DBImpl::BuildBatchGroup(Writer** last_writer) {
+ mutex_.AssertHeld();
assert(!writers_.empty());
Writer* first = writers_.front();
WriteBatch* result = first->batch;
- assert(result != NULL);
+ assert(result != nullptr);
size_t size = WriteBatchInternal::ByteSize(first->batch);
@@ -1278,8 +1281,8 @@ WriteBatch* DBImpl::BuildBatchGroup(Writer** last_writer) {
// original write is small, limit the growth so we do not slow
// down the small write too much.
size_t max_size = 1 << 20;
- if (size <= (128<<10)) {
- max_size = size + (128<<10);
+ if (size <= (128 << 10)) {
+ max_size = size + (128 << 10);
}
*last_writer = first;
@@ -1292,7 +1295,7 @@ WriteBatch* DBImpl::BuildBatchGroup(Writer** last_writer) {
break;
}
- if (w->batch != NULL) {
+ if (w->batch != nullptr) {
size += WriteBatchInternal::ByteSize(w->batch);
if (size > max_size) {
// Do not make batch too big
@@ -1325,9 +1328,8 @@ Status DBImpl::MakeRoomForWrite(bool force) {
// Yield previous error
s = bg_error_;
break;
- } else if (
- allow_delay &&
- versions_->NumLevelFiles(0) >= config::kL0_SlowdownWritesTrigger) {
+ } else if (allow_delay && versions_->NumLevelFiles(0) >=
+ config::kL0_SlowdownWritesTrigger) {
// We are getting close to hitting a hard limit on the number of
// L0 files. Rather than delaying a single write by several
// seconds when we hit the hard limit, start delaying each
@@ -1342,20 +1344,20 @@ Status DBImpl::MakeRoomForWrite(bool force) {
(mem_->ApproximateMemoryUsage() <= options_.write_buffer_size)) {
// There is room in current memtable
break;
- } else if (imm_ != NULL) {
+ } else if (imm_ != nullptr) {
// We have filled up the current memtable, but the previous
// one is still being compacted, so we wait.
Log(options_.info_log, "Current memtable full; waiting...\n");
- bg_cv_.Wait();
+ background_work_finished_signal_.Wait();
} else if (versions_->NumLevelFiles(0) >= config::kL0_StopWritesTrigger) {
// There are too many level-0 files.
Log(options_.info_log, "Too many L0 files; waiting...\n");
- bg_cv_.Wait();
+ background_work_finished_signal_.Wait();
} else {
// Attempt to switch to a new memtable and trigger compaction of old
assert(versions_->PrevLogNumber() == 0);
uint64_t new_log_number = versions_->NewFileNumber();
- WritableFile* lfile = NULL;
+ WritableFile* lfile = nullptr;
s = env_->NewWritableFile(LogFileName(dbname_, new_log_number), &lfile);
if (!s.ok()) {
// Avoid chewing through file number space in a tight loop.
@@ -1368,10 +1370,10 @@ Status DBImpl::MakeRoomForWrite(bool force) {
logfile_number_ = new_log_number;
log_ = new log::Writer(lfile);
imm_ = mem_;
- has_imm_.Release_Store(imm_);
+ has_imm_.store(true, std::memory_order_release);
mem_ = new MemTable(internal_comparator_);
mem_->Ref();
- force = false; // Do not force another compaction if have room
+ force = false; // Do not force another compaction if have room
MaybeScheduleCompaction();
}
}
@@ -1405,21 +1407,16 @@ bool DBImpl::GetProperty(const Slice& property, std::string* value) {
snprintf(buf, sizeof(buf),
" Compactions\n"
"Level Files Size(MB) Time(sec) Read(MB) Write(MB)\n"
- "--------------------------------------------------\n"
- );
+ "--------------------------------------------------\n");
value->append(buf);
for (int level = 0; level < config::kNumLevels; level++) {
int files = versions_->NumLevelFiles(level);
if (stats_[level].micros > 0 || files > 0) {
- snprintf(
- buf, sizeof(buf),
- "%3d %8d %8.0f %9.0f %8.0f %9.0f\n",
- level,
- files,
- versions_->NumLevelBytes(level) / 1048576.0,
- stats_[level].micros / 1e6,
- stats_[level].bytes_read / 1048576.0,
- stats_[level].bytes_written / 1048576.0);
+ snprintf(buf, sizeof(buf), "%3d %8d %8.0f %9.0f %8.0f %9.0f\n", level,
+ files, versions_->NumLevelBytes(level) / 1048576.0,
+ stats_[level].micros / 1e6,
+ stats_[level].bytes_read / 1048576.0,
+ stats_[level].bytes_written / 1048576.0);
value->append(buf);
}
}
@@ -1445,16 +1442,11 @@ bool DBImpl::GetProperty(const Slice& property, std::string* value) {
return false;
}
-void DBImpl::GetApproximateSizes(
- const Range* range, int n,
- uint64_t* sizes) {
+void DBImpl::GetApproximateSizes(const Range* range, int n, uint64_t* sizes) {
// TODO(opt): better implementation
- Version* v;
- {
- MutexLock l(&mutex_);
- versions_->current()->Ref();
- v = versions_->current();
- }
+ MutexLock l(&mutex_);
+ Version* v = versions_->current();
+ v->Ref();
for (int i = 0; i < n; i++) {
// Convert user_key into a corresponding internal key.
@@ -1465,10 +1457,7 @@ void DBImpl::GetApproximateSizes(
sizes[i] = (limit >= start ? limit - start : 0);
}
- {
- MutexLock l(&mutex_);
- v->Unref();
- }
+ v->Unref();
}
// Default implementations of convenience methods that subclasses of DB
@@ -1485,11 +1474,10 @@ Status DB::Delete(const WriteOptions& opt, const Slice& key) {
return Write(opt, &batch);
}
-DB::~DB() { }
+DB::~DB() = default;
-Status DB::Open(const Options& options, const std::string& dbname,
- DB** dbptr) {
- *dbptr = NULL;
+Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) {
+ *dbptr = nullptr;
DBImpl* impl = new DBImpl(options, dbname);
impl->mutex_.Lock();
@@ -1497,7 +1485,7 @@ Status DB::Open(const Options& options, const std::string& dbname,
// Recover handles create_if_missing, error_if_exists
bool save_manifest = false;
Status s = impl->Recover(&edit, &save_manifest);
- if (s.ok() && impl->mem_ == NULL) {
+ if (s.ok() && impl->mem_ == nullptr) {
// Create new log and a corresponding memtable.
uint64_t new_log_number = impl->versions_->NewFileNumber();
WritableFile* lfile;
@@ -1523,7 +1511,7 @@ Status DB::Open(const Options& options, const std::string& dbname,
}
impl->mutex_.Unlock();
if (s.ok()) {
- assert(impl->mem_ != NULL);
+ assert(impl->mem_ != nullptr);
*dbptr = impl;
} else {
delete impl;
@@ -1531,21 +1519,20 @@ Status DB::Open(const Options& options, const std::string& dbname,
return s;
}
-Snapshot::~Snapshot() {
-}
+Snapshot::~Snapshot() = default;
Status DestroyDB(const std::string& dbname, const Options& options) {
Env* env = options.env;
std::vector<std::string> filenames;
- // Ignore error in case directory does not exist
- env->GetChildren(dbname, &filenames);
- if (filenames.empty()) {
+ Status result = env->GetChildren(dbname, &filenames);
+ if (!result.ok()) {
+ // Ignore error in case directory does not exist
return Status::OK();
}
FileLock* lock;
const std::string lockname = LockFileName(dbname);
- Status result = env->LockFile(lockname, &lock);
+ result = env->LockFile(lockname, &lock);
if (result.ok()) {
uint64_t number;
FileType type;
diff --git a/src/leveldb/db/db_impl.h b/src/leveldb/db/db_impl.h
index 8ff323e728..685735c733 100644
--- a/src/leveldb/db/db_impl.h
+++ b/src/leveldb/db/db_impl.h
@@ -5,8 +5,11 @@
#ifndef STORAGE_LEVELDB_DB_DB_IMPL_H_
#define STORAGE_LEVELDB_DB_DB_IMPL_H_
+#include <atomic>
#include <deque>
#include <set>
+#include <string>
+
#include "db/dbformat.h"
#include "db/log_writer.h"
#include "db/snapshot.h"
@@ -26,21 +29,25 @@ class VersionSet;
class DBImpl : public DB {
public:
DBImpl(const Options& options, const std::string& dbname);
- virtual ~DBImpl();
+
+ DBImpl(const DBImpl&) = delete;
+ DBImpl& operator=(const DBImpl&) = delete;
+
+ ~DBImpl() override;
// Implementations of the DB interface
- virtual Status Put(const WriteOptions&, const Slice& key, const Slice& value);
- virtual Status Delete(const WriteOptions&, const Slice& key);
- virtual Status Write(const WriteOptions& options, WriteBatch* updates);
- virtual Status Get(const ReadOptions& options,
- const Slice& key,
- std::string* value);
- virtual Iterator* NewIterator(const ReadOptions&);
- virtual const Snapshot* GetSnapshot();
- virtual void ReleaseSnapshot(const Snapshot* snapshot);
- virtual bool GetProperty(const Slice& property, std::string* value);
- virtual void GetApproximateSizes(const Range* range, int n, uint64_t* sizes);
- virtual void CompactRange(const Slice* begin, const Slice* end);
+ Status Put(const WriteOptions&, const Slice& key,
+ const Slice& value) override;
+ Status Delete(const WriteOptions&, const Slice& key) override;
+ Status Write(const WriteOptions& options, WriteBatch* updates) override;
+ Status Get(const ReadOptions& options, const Slice& key,
+ std::string* value) override;
+ Iterator* NewIterator(const ReadOptions&) override;
+ const Snapshot* GetSnapshot() override;
+ void ReleaseSnapshot(const Snapshot* snapshot) override;
+ bool GetProperty(const Slice& property, std::string* value) override;
+ void GetApproximateSizes(const Range* range, int n, uint64_t* sizes) override;
+ void CompactRange(const Slice* begin, const Slice* end) override;
// Extra methods (for testing) that are not in the public DB interface
@@ -69,6 +76,31 @@ class DBImpl : public DB {
struct CompactionState;
struct Writer;
+ // Information for a manual compaction
+ struct ManualCompaction {
+ int level;
+ bool done;
+ const InternalKey* begin; // null means beginning of key range
+ const InternalKey* end; // null means end of key range
+ InternalKey tmp_storage; // Used to keep track of compaction progress
+ };
+
+ // Per level compaction stats. stats_[level] stores the stats for
+ // compactions that produced data for the specified "level".
+ struct CompactionStats {
+ CompactionStats() : micros(0), bytes_read(0), bytes_written(0) {}
+
+ void Add(const CompactionStats& c) {
+ this->micros += c.micros;
+ this->bytes_read += c.bytes_read;
+ this->bytes_written += c.bytes_written;
+ }
+
+ int64_t micros;
+ int64_t bytes_read;
+ int64_t bytes_written;
+ };
+
Iterator* NewInternalIterator(const ReadOptions&,
SequenceNumber* latest_snapshot,
uint32_t* seed);
@@ -84,7 +116,7 @@ class DBImpl : public DB {
void MaybeIgnoreError(Status* s) const;
// Delete any unneeded files and stale in-memory entries.
- void DeleteObsoleteFiles();
+ void DeleteObsoleteFiles() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// Compact the in-memory write buffer to disk. Switches to a new
// log-file/memtable and writes a new descriptor iff successful.
@@ -100,14 +132,15 @@ class DBImpl : public DB {
Status MakeRoomForWrite(bool force /* compact even if there is room? */)
EXCLUSIVE_LOCKS_REQUIRED(mutex_);
- WriteBatch* BuildBatchGroup(Writer** last_writer);
+ WriteBatch* BuildBatchGroup(Writer** last_writer)
+ EXCLUSIVE_LOCKS_REQUIRED(mutex_);
void RecordBackgroundError(const Status& s);
void MaybeScheduleCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
static void BGWork(void* db);
void BackgroundCall();
- void BackgroundCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ void BackgroundCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
void CleanupCompaction(CompactionState* compact)
EXCLUSIVE_LOCKS_REQUIRED(mutex_);
Status DoCompactionWork(CompactionState* compact)
@@ -118,93 +151,66 @@ class DBImpl : public DB {
Status InstallCompactionResults(CompactionState* compact)
EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ const Comparator* user_comparator() const {
+ return internal_comparator_.user_comparator();
+ }
+
// Constant after construction
Env* const env_;
const InternalKeyComparator internal_comparator_;
const InternalFilterPolicy internal_filter_policy_;
const Options options_; // options_.comparator == &internal_comparator_
- bool owns_info_log_;
- bool owns_cache_;
+ const bool owns_info_log_;
+ const bool owns_cache_;
const std::string dbname_;
// table_cache_ provides its own synchronization
- TableCache* table_cache_;
+ TableCache* const table_cache_;
- // Lock over the persistent DB state. Non-NULL iff successfully acquired.
+ // Lock over the persistent DB state. Non-null iff successfully acquired.
FileLock* db_lock_;
// State below is protected by mutex_
port::Mutex mutex_;
- port::AtomicPointer shutting_down_;
- port::CondVar bg_cv_; // Signalled when background work finishes
+ std::atomic<bool> shutting_down_;
+ port::CondVar background_work_finished_signal_ GUARDED_BY(mutex_);
MemTable* mem_;
- MemTable* imm_; // Memtable being compacted
- port::AtomicPointer has_imm_; // So bg thread can detect non-NULL imm_
+ MemTable* imm_ GUARDED_BY(mutex_); // Memtable being compacted
+ std::atomic<bool> has_imm_; // So bg thread can detect non-null imm_
WritableFile* logfile_;
- uint64_t logfile_number_;
+ uint64_t logfile_number_ GUARDED_BY(mutex_);
log::Writer* log_;
- uint32_t seed_; // For sampling.
+ uint32_t seed_ GUARDED_BY(mutex_); // For sampling.
// Queue of writers.
- std::deque<Writer*> writers_;
- WriteBatch* tmp_batch_;
+ std::deque<Writer*> writers_ GUARDED_BY(mutex_);
+ WriteBatch* tmp_batch_ GUARDED_BY(mutex_);
- SnapshotList snapshots_;
+ SnapshotList snapshots_ GUARDED_BY(mutex_);
// Set of table files to protect from deletion because they are
// part of ongoing compactions.
- std::set<uint64_t> pending_outputs_;
+ std::set<uint64_t> pending_outputs_ GUARDED_BY(mutex_);
// Has a background compaction been scheduled or is running?
- bool bg_compaction_scheduled_;
+ bool background_compaction_scheduled_ GUARDED_BY(mutex_);
- // Information for a manual compaction
- struct ManualCompaction {
- int level;
- bool done;
- const InternalKey* begin; // NULL means beginning of key range
- const InternalKey* end; // NULL means end of key range
- InternalKey tmp_storage; // Used to keep track of compaction progress
- };
- ManualCompaction* manual_compaction_;
+ ManualCompaction* manual_compaction_ GUARDED_BY(mutex_);
- VersionSet* versions_;
+ VersionSet* const versions_ GUARDED_BY(mutex_);
// Have we encountered a background error in paranoid mode?
- Status bg_error_;
-
- // Per level compaction stats. stats_[level] stores the stats for
- // compactions that produced data for the specified "level".
- struct CompactionStats {
- int64_t micros;
- int64_t bytes_read;
- int64_t bytes_written;
-
- CompactionStats() : micros(0), bytes_read(0), bytes_written(0) { }
+ Status bg_error_ GUARDED_BY(mutex_);
- void Add(const CompactionStats& c) {
- this->micros += c.micros;
- this->bytes_read += c.bytes_read;
- this->bytes_written += c.bytes_written;
- }
- };
- CompactionStats stats_[config::kNumLevels];
-
- // No copying allowed
- DBImpl(const DBImpl&);
- void operator=(const DBImpl&);
-
- const Comparator* user_comparator() const {
- return internal_comparator_.user_comparator();
- }
+ CompactionStats stats_[config::kNumLevels] GUARDED_BY(mutex_);
};
// Sanitize db options. The caller should delete result.info_log if
// it is not equal to src.info_log.
-extern Options SanitizeOptions(const std::string& db,
- const InternalKeyComparator* icmp,
- const InternalFilterPolicy* ipolicy,
- const Options& src);
+Options SanitizeOptions(const std::string& db,
+ const InternalKeyComparator* icmp,
+ const InternalFilterPolicy* ipolicy,
+ const Options& src);
} // namespace leveldb
diff --git a/src/leveldb/db/db_iter.cc b/src/leveldb/db/db_iter.cc
index 3b2035e9e3..98715a9502 100644
--- a/src/leveldb/db/db_iter.cc
+++ b/src/leveldb/db/db_iter.cc
@@ -4,9 +4,9 @@
#include "db/db_iter.h"
-#include "db/filename.h"
#include "db/db_impl.h"
#include "db/dbformat.h"
+#include "db/filename.h"
#include "leveldb/env.h"
#include "leveldb/iterator.h"
#include "port/port.h"
@@ -36,17 +36,14 @@ namespace {
// combines multiple entries for the same userkey found in the DB
// representation into a single entry while accounting for sequence
// numbers, deletion markers, overwrites, etc.
-class DBIter: public Iterator {
+class DBIter : public Iterator {
public:
// Which direction is the iterator currently moving?
// (1) When moving forward, the internal iterator is positioned at
// the exact entry that yields this->key(), this->value()
// (2) When moving backwards, the internal iterator is positioned
// just before all entries whose user key == this->key().
- enum Direction {
- kForward,
- kReverse
- };
+ enum Direction { kForward, kReverse };
DBIter(DBImpl* db, const Comparator* cmp, Iterator* iter, SequenceNumber s,
uint32_t seed)
@@ -57,21 +54,22 @@ class DBIter: public Iterator {
direction_(kForward),
valid_(false),
rnd_(seed),
- bytes_counter_(RandomPeriod()) {
- }
- virtual ~DBIter() {
- delete iter_;
- }
- virtual bool Valid() const { return valid_; }
- virtual Slice key() const {
+ bytes_until_read_sampling_(RandomCompactionPeriod()) {}
+
+ DBIter(const DBIter&) = delete;
+ DBIter& operator=(const DBIter&) = delete;
+
+ ~DBIter() override { delete iter_; }
+ bool Valid() const override { return valid_; }
+ Slice key() const override {
assert(valid_);
return (direction_ == kForward) ? ExtractUserKey(iter_->key()) : saved_key_;
}
- virtual Slice value() const {
+ Slice value() const override {
assert(valid_);
return (direction_ == kForward) ? iter_->value() : saved_value_;
}
- virtual Status status() const {
+ Status status() const override {
if (status_.ok()) {
return iter_->status();
} else {
@@ -79,11 +77,11 @@ class DBIter: public Iterator {
}
}
- virtual void Next();
- virtual void Prev();
- virtual void Seek(const Slice& target);
- virtual void SeekToFirst();
- virtual void SeekToLast();
+ void Next() override;
+ void Prev() override;
+ void Seek(const Slice& target) override;
+ void SeekToFirst() override;
+ void SeekToLast() override;
private:
void FindNextUserEntry(bool skipping, std::string* skip);
@@ -103,38 +101,35 @@ class DBIter: public Iterator {
}
}
- // Pick next gap with average value of config::kReadBytesPeriod.
- ssize_t RandomPeriod() {
- return rnd_.Uniform(2*config::kReadBytesPeriod);
+ // Picks the number of bytes that can be read until a compaction is scheduled.
+ size_t RandomCompactionPeriod() {
+ return rnd_.Uniform(2 * config::kReadBytesPeriod);
}
DBImpl* db_;
const Comparator* const user_comparator_;
Iterator* const iter_;
SequenceNumber const sequence_;
-
Status status_;
- std::string saved_key_; // == current key when direction_==kReverse
- std::string saved_value_; // == current raw value when direction_==kReverse
+ std::string saved_key_; // == current key when direction_==kReverse
+ std::string saved_value_; // == current raw value when direction_==kReverse
Direction direction_;
bool valid_;
-
Random rnd_;
- ssize_t bytes_counter_;
-
- // No copying allowed
- DBIter(const DBIter&);
- void operator=(const DBIter&);
+ size_t bytes_until_read_sampling_;
};
inline bool DBIter::ParseKey(ParsedInternalKey* ikey) {
Slice k = iter_->key();
- ssize_t n = k.size() + iter_->value().size();
- bytes_counter_ -= n;
- while (bytes_counter_ < 0) {
- bytes_counter_ += RandomPeriod();
+
+ size_t bytes_read = k.size() + iter_->value().size();
+ while (bytes_until_read_sampling_ < bytes_read) {
+ bytes_until_read_sampling_ += RandomCompactionPeriod();
db_->RecordReadSample(k);
}
+ assert(bytes_until_read_sampling_ >= bytes_read);
+ bytes_until_read_sampling_ -= bytes_read;
+
if (!ParseInternalKey(k, ikey)) {
status_ = Status::Corruption("corrupted internal key in DBIter");
return false;
@@ -165,6 +160,15 @@ void DBIter::Next() {
} else {
// Store in saved_key_ the current key so we skip it below.
SaveKey(ExtractUserKey(iter_->key()), &saved_key_);
+
+ // iter_ is pointing to current key. We can now safely move to the next to
+ // avoid checking current key.
+ iter_->Next();
+ if (!iter_->Valid()) {
+ valid_ = false;
+ saved_key_.clear();
+ return;
+ }
}
FindNextUserEntry(true, &saved_key_);
@@ -218,8 +222,8 @@ void DBIter::Prev() {
ClearSavedValue();
return;
}
- if (user_comparator_->Compare(ExtractUserKey(iter_->key()),
- saved_key_) < 0) {
+ if (user_comparator_->Compare(ExtractUserKey(iter_->key()), saved_key_) <
+ 0) {
break;
}
}
@@ -275,8 +279,8 @@ void DBIter::Seek(const Slice& target) {
direction_ = kForward;
ClearSavedValue();
saved_key_.clear();
- AppendInternalKey(
- &saved_key_, ParsedInternalKey(target, sequence_, kValueTypeForSeek));
+ AppendInternalKey(&saved_key_,
+ ParsedInternalKey(target, sequence_, kValueTypeForSeek));
iter_->Seek(saved_key_);
if (iter_->Valid()) {
FindNextUserEntry(false, &saved_key_ /* temporary storage */);
@@ -305,12 +309,9 @@ void DBIter::SeekToLast() {
} // anonymous namespace
-Iterator* NewDBIterator(
- DBImpl* db,
- const Comparator* user_key_comparator,
- Iterator* internal_iter,
- SequenceNumber sequence,
- uint32_t seed) {
+Iterator* NewDBIterator(DBImpl* db, const Comparator* user_key_comparator,
+ Iterator* internal_iter, SequenceNumber sequence,
+ uint32_t seed) {
return new DBIter(db, user_key_comparator, internal_iter, sequence, seed);
}
diff --git a/src/leveldb/db/db_iter.h b/src/leveldb/db/db_iter.h
index 04927e937b..fd93e912a0 100644
--- a/src/leveldb/db/db_iter.h
+++ b/src/leveldb/db/db_iter.h
@@ -6,8 +6,9 @@
#define STORAGE_LEVELDB_DB_DB_ITER_H_
#include <stdint.h>
-#include "leveldb/db.h"
+
#include "db/dbformat.h"
+#include "leveldb/db.h"
namespace leveldb {
@@ -16,12 +17,9 @@ class DBImpl;
// Return a new iterator that converts internal keys (yielded by
// "*internal_iter") that were live at the specified "sequence" number
// into appropriate user keys.
-extern Iterator* NewDBIterator(
- DBImpl* db,
- const Comparator* user_key_comparator,
- Iterator* internal_iter,
- SequenceNumber sequence,
- uint32_t seed);
+Iterator* NewDBIterator(DBImpl* db, const Comparator* user_key_comparator,
+ Iterator* internal_iter, SequenceNumber sequence,
+ uint32_t seed);
} // namespace leveldb
diff --git a/src/leveldb/db/db_test.cc b/src/leveldb/db/db_test.cc
index a0b08bc19c..beb1d3bdef 100644
--- a/src/leveldb/db/db_test.cc
+++ b/src/leveldb/db/db_test.cc
@@ -3,14 +3,20 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "leveldb/db.h"
-#include "leveldb/filter_policy.h"
+
+#include <atomic>
+#include <string>
+
#include "db/db_impl.h"
#include "db/filename.h"
#include "db/version_set.h"
#include "db/write_batch_internal.h"
#include "leveldb/cache.h"
#include "leveldb/env.h"
+#include "leveldb/filter_policy.h"
#include "leveldb/table.h"
+#include "port/port.h"
+#include "port/thread_annotations.h"
#include "util/hash.h"
#include "util/logging.h"
#include "util/mutexlock.h"
@@ -25,83 +31,116 @@ static std::string RandomString(Random* rnd, int len) {
return r;
}
+static std::string RandomKey(Random* rnd) {
+ int len =
+ (rnd->OneIn(3) ? 1 // Short sometimes to encourage collisions
+ : (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
+ return test::RandomKey(rnd, len);
+}
+
namespace {
class AtomicCounter {
- private:
- port::Mutex mu_;
- int count_;
public:
- AtomicCounter() : count_(0) { }
- void Increment() {
- IncrementBy(1);
- }
- void IncrementBy(int count) {
+ AtomicCounter() : count_(0) {}
+ void Increment() { IncrementBy(1); }
+ void IncrementBy(int count) LOCKS_EXCLUDED(mu_) {
MutexLock l(&mu_);
count_ += count;
}
- int Read() {
+ int Read() LOCKS_EXCLUDED(mu_) {
MutexLock l(&mu_);
return count_;
}
- void Reset() {
+ void Reset() LOCKS_EXCLUDED(mu_) {
MutexLock l(&mu_);
count_ = 0;
}
+
+ private:
+ port::Mutex mu_;
+ int count_ GUARDED_BY(mu_);
};
void DelayMilliseconds(int millis) {
Env::Default()->SleepForMicroseconds(millis * 1000);
}
-}
+} // namespace
+
+// Test Env to override default Env behavior for testing.
+class TestEnv : public EnvWrapper {
+ public:
+ explicit TestEnv(Env* base) : EnvWrapper(base), ignore_dot_files_(false) {}
+
+ void SetIgnoreDotFiles(bool ignored) { ignore_dot_files_ = ignored; }
+
+ Status GetChildren(const std::string& dir,
+ std::vector<std::string>* result) override {
+ Status s = target()->GetChildren(dir, result);
+ if (!s.ok() || !ignore_dot_files_) {
+ return s;
+ }
+
+ std::vector<std::string>::iterator it = result->begin();
+ while (it != result->end()) {
+ if ((*it == ".") || (*it == "..")) {
+ it = result->erase(it);
+ } else {
+ ++it;
+ }
+ }
+
+ return s;
+ }
+
+ private:
+ bool ignore_dot_files_;
+};
-// Special Env used to delay background operations
+// Special Env used to delay background operations.
class SpecialEnv : public EnvWrapper {
public:
- // sstable/log Sync() calls are blocked while this pointer is non-NULL.
- port::AtomicPointer delay_data_sync_;
+ // sstable/log Sync() calls are blocked while this pointer is non-null.
+ std::atomic<bool> delay_data_sync_;
// sstable/log Sync() calls return an error.
- port::AtomicPointer data_sync_error_;
+ std::atomic<bool> data_sync_error_;
- // Simulate no-space errors while this pointer is non-NULL.
- port::AtomicPointer no_space_;
+ // Simulate no-space errors while this pointer is non-null.
+ std::atomic<bool> no_space_;
- // Simulate non-writable file system while this pointer is non-NULL
- port::AtomicPointer non_writable_;
+ // Simulate non-writable file system while this pointer is non-null.
+ std::atomic<bool> non_writable_;
- // Force sync of manifest files to fail while this pointer is non-NULL
- port::AtomicPointer manifest_sync_error_;
+ // Force sync of manifest files to fail while this pointer is non-null.
+ std::atomic<bool> manifest_sync_error_;
- // Force write to manifest files to fail while this pointer is non-NULL
- port::AtomicPointer manifest_write_error_;
+ // Force write to manifest files to fail while this pointer is non-null.
+ std::atomic<bool> manifest_write_error_;
bool count_random_reads_;
AtomicCounter random_read_counter_;
- explicit SpecialEnv(Env* base) : EnvWrapper(base) {
- delay_data_sync_.Release_Store(NULL);
- data_sync_error_.Release_Store(NULL);
- no_space_.Release_Store(NULL);
- non_writable_.Release_Store(NULL);
- count_random_reads_ = false;
- manifest_sync_error_.Release_Store(NULL);
- manifest_write_error_.Release_Store(NULL);
- }
+ explicit SpecialEnv(Env* base)
+ : EnvWrapper(base),
+ delay_data_sync_(false),
+ data_sync_error_(false),
+ no_space_(false),
+ non_writable_(false),
+ manifest_sync_error_(false),
+ manifest_write_error_(false),
+ count_random_reads_(false) {}
Status NewWritableFile(const std::string& f, WritableFile** r) {
class DataFile : public WritableFile {
private:
- SpecialEnv* env_;
- WritableFile* base_;
+ SpecialEnv* const env_;
+ WritableFile* const base_;
public:
- DataFile(SpecialEnv* env, WritableFile* base)
- : env_(env),
- base_(base) {
- }
+ DataFile(SpecialEnv* env, WritableFile* base) : env_(env), base_(base) {}
~DataFile() { delete base_; }
Status Append(const Slice& data) {
- if (env_->no_space_.Acquire_Load() != NULL) {
+ if (env_->no_space_.load(std::memory_order_acquire)) {
// Drop writes on the floor
return Status::OK();
} else {
@@ -111,24 +150,26 @@ class SpecialEnv : public EnvWrapper {
Status Close() { return base_->Close(); }
Status Flush() { return base_->Flush(); }
Status Sync() {
- if (env_->data_sync_error_.Acquire_Load() != NULL) {
+ if (env_->data_sync_error_.load(std::memory_order_acquire)) {
return Status::IOError("simulated data sync error");
}
- while (env_->delay_data_sync_.Acquire_Load() != NULL) {
+ while (env_->delay_data_sync_.load(std::memory_order_acquire)) {
DelayMilliseconds(100);
}
return base_->Sync();
}
+ std::string GetName() const override { return ""; }
};
class ManifestFile : public WritableFile {
private:
SpecialEnv* env_;
WritableFile* base_;
+
public:
- ManifestFile(SpecialEnv* env, WritableFile* b) : env_(env), base_(b) { }
+ ManifestFile(SpecialEnv* env, WritableFile* b) : env_(env), base_(b) {}
~ManifestFile() { delete base_; }
Status Append(const Slice& data) {
- if (env_->manifest_write_error_.Acquire_Load() != NULL) {
+ if (env_->manifest_write_error_.load(std::memory_order_acquire)) {
return Status::IOError("simulated writer error");
} else {
return base_->Append(data);
@@ -137,24 +178,25 @@ class SpecialEnv : public EnvWrapper {
Status Close() { return base_->Close(); }
Status Flush() { return base_->Flush(); }
Status Sync() {
- if (env_->manifest_sync_error_.Acquire_Load() != NULL) {
+ if (env_->manifest_sync_error_.load(std::memory_order_acquire)) {
return Status::IOError("simulated sync error");
} else {
return base_->Sync();
}
}
+ std::string GetName() const override { return ""; }
};
- if (non_writable_.Acquire_Load() != NULL) {
+ if (non_writable_.load(std::memory_order_acquire)) {
return Status::IOError("simulated write error");
}
Status s = target()->NewWritableFile(f, r);
if (s.ok()) {
- if (strstr(f.c_str(), ".ldb") != NULL ||
- strstr(f.c_str(), ".log") != NULL) {
+ if (strstr(f.c_str(), ".ldb") != nullptr ||
+ strstr(f.c_str(), ".log") != nullptr) {
*r = new DataFile(this, *r);
- } else if (strstr(f.c_str(), "MANIFEST") != NULL) {
+ } else if (strstr(f.c_str(), "MANIFEST") != nullptr) {
*r = new ManifestFile(this, *r);
}
}
@@ -166,16 +208,17 @@ class SpecialEnv : public EnvWrapper {
private:
RandomAccessFile* target_;
AtomicCounter* counter_;
+
public:
CountingFile(RandomAccessFile* target, AtomicCounter* counter)
- : target_(target), counter_(counter) {
- }
- virtual ~CountingFile() { delete target_; }
- virtual Status Read(uint64_t offset, size_t n, Slice* result,
- char* scratch) const {
+ : target_(target), counter_(counter) {}
+ ~CountingFile() override { delete target_; }
+ Status Read(uint64_t offset, size_t n, Slice* result,
+ char* scratch) const override {
counter_->Increment();
return target_->Read(offset, n, result, scratch);
}
+ std::string GetName() const override { return ""; }
};
Status s = target()->NewRandomAccessFile(f, r);
@@ -187,19 +230,6 @@ class SpecialEnv : public EnvWrapper {
};
class DBTest {
- private:
- const FilterPolicy* filter_policy_;
-
- // Sequence of option configurations to try
- enum OptionConfig {
- kDefault,
- kReuse,
- kFilter,
- kUncompressed,
- kEnd
- };
- int option_config_;
-
public:
std::string dbname_;
SpecialEnv* env_;
@@ -207,12 +237,11 @@ class DBTest {
Options last_options_;
- DBTest() : option_config_(kDefault),
- env_(new SpecialEnv(Env::Default())) {
+ DBTest() : env_(new SpecialEnv(Env::Default())), option_config_(kDefault) {
filter_policy_ = NewBloomFilterPolicy(10);
dbname_ = test::TmpDir() + "/db_test";
DestroyDB(dbname_, Options());
- db_ = NULL;
+ db_ = nullptr;
Reopen();
}
@@ -255,31 +284,27 @@ class DBTest {
return options;
}
- DBImpl* dbfull() {
- return reinterpret_cast<DBImpl*>(db_);
- }
+ DBImpl* dbfull() { return reinterpret_cast<DBImpl*>(db_); }
- void Reopen(Options* options = NULL) {
- ASSERT_OK(TryReopen(options));
- }
+ void Reopen(Options* options = nullptr) { ASSERT_OK(TryReopen(options)); }
void Close() {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
}
- void DestroyAndReopen(Options* options = NULL) {
+ void DestroyAndReopen(Options* options = nullptr) {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
DestroyDB(dbname_, Options());
ASSERT_OK(TryReopen(options));
}
Status TryReopen(Options* options) {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
Options opts;
- if (options != NULL) {
+ if (options != nullptr) {
opts = *options;
} else {
opts = CurrentOptions();
@@ -294,11 +319,9 @@ class DBTest {
return db_->Put(WriteOptions(), k, v);
}
- Status Delete(const std::string& k) {
- return db_->Delete(WriteOptions(), k);
- }
+ Status Delete(const std::string& k) { return db_->Delete(WriteOptions(), k); }
- std::string Get(const std::string& k, const Snapshot* snapshot = NULL) {
+ std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) {
ReadOptions options;
options.snapshot = snapshot;
std::string result;
@@ -382,10 +405,9 @@ class DBTest {
int NumTableFilesAtLevel(int level) {
std::string property;
- ASSERT_TRUE(
- db_->GetProperty("leveldb.num-files-at-level" + NumberToString(level),
- &property));
- return atoi(property.c_str());
+ ASSERT_TRUE(db_->GetProperty(
+ "leveldb.num-files-at-level" + NumberToString(level), &property));
+ return std::stoi(property);
}
int TotalTableFiles() {
@@ -431,11 +453,12 @@ class DBTest {
}
// Do n memtable compactions, each of which produces an sstable
- // covering the range [small,large].
- void MakeTables(int n, const std::string& small, const std::string& large) {
+ // covering the range [small_key,large_key].
+ void MakeTables(int n, const std::string& small_key,
+ const std::string& large_key) {
for (int i = 0; i < n; i++) {
- Put(small, "begin");
- Put(large, "end");
+ Put(small_key, "begin");
+ Put(large_key, "end");
dbfull()->TEST_CompactMemTable();
}
}
@@ -448,9 +471,9 @@ class DBTest {
void DumpFileCounts(const char* label) {
fprintf(stderr, "---\n%s:\n", label);
- fprintf(stderr, "maxoverlap: %lld\n",
- static_cast<long long>(
- dbfull()->TEST_MaxNextLevelOverlappingBytes()));
+ fprintf(
+ stderr, "maxoverlap: %lld\n",
+ static_cast<long long>(dbfull()->TEST_MaxNextLevelOverlappingBytes()));
for (int level = 0; level < config::kNumLevels; level++) {
int num = NumTableFilesAtLevel(level);
if (num > 0) {
@@ -506,15 +529,42 @@ class DBTest {
}
return files_renamed;
}
+
+ private:
+ // Sequence of option configurations to try
+ enum OptionConfig { kDefault, kReuse, kFilter, kUncompressed, kEnd };
+
+ const FilterPolicy* filter_policy_;
+ int option_config_;
};
TEST(DBTest, Empty) {
do {
- ASSERT_TRUE(db_ != NULL);
+ ASSERT_TRUE(db_ != nullptr);
ASSERT_EQ("NOT_FOUND", Get("foo"));
} while (ChangeOptions());
}
+TEST(DBTest, EmptyKey) {
+ do {
+ ASSERT_OK(Put("", "v1"));
+ ASSERT_EQ("v1", Get(""));
+ ASSERT_OK(Put("", "v2"));
+ ASSERT_EQ("v2", Get(""));
+ } while (ChangeOptions());
+}
+
+TEST(DBTest, EmptyValue) {
+ do {
+ ASSERT_OK(Put("key", "v1"));
+ ASSERT_EQ("v1", Get("key"));
+ ASSERT_OK(Put("key", ""));
+ ASSERT_EQ("", Get("key"));
+ ASSERT_OK(Put("key", "v2"));
+ ASSERT_EQ("v2", Get("key"));
+ } while (ChangeOptions());
+}
+
TEST(DBTest, ReadWrite) {
do {
ASSERT_OK(Put("foo", "v1"));
@@ -547,11 +597,13 @@ TEST(DBTest, GetFromImmutableLayer) {
ASSERT_OK(Put("foo", "v1"));
ASSERT_EQ("v1", Get("foo"));
- env_->delay_data_sync_.Release_Store(env_); // Block sync calls
- Put("k1", std::string(100000, 'x')); // Fill memtable
- Put("k2", std::string(100000, 'y')); // Trigger compaction
+ // Block sync calls.
+ env_->delay_data_sync_.store(true, std::memory_order_release);
+ Put("k1", std::string(100000, 'x')); // Fill memtable.
+ Put("k2", std::string(100000, 'y')); // Trigger compaction.
ASSERT_EQ("v1", Get("foo"));
- env_->delay_data_sync_.Release_Store(NULL); // Release sync calls
+ // Release sync calls.
+ env_->delay_data_sync_.store(false, std::memory_order_release);
} while (ChangeOptions());
}
@@ -568,9 +620,9 @@ TEST(DBTest, GetMemUsage) {
ASSERT_OK(Put("foo", "v1"));
std::string val;
ASSERT_TRUE(db_->GetProperty("leveldb.approximate-memory-usage", &val));
- int mem_usage = atoi(val.c_str());
+ int mem_usage = std::stoi(val);
ASSERT_GT(mem_usage, 0);
- ASSERT_LT(mem_usage, 5*1024*1024);
+ ASSERT_LT(mem_usage, 5 * 1024 * 1024);
} while (ChangeOptions());
}
@@ -592,6 +644,55 @@ TEST(DBTest, GetSnapshot) {
} while (ChangeOptions());
}
+TEST(DBTest, GetIdenticalSnapshots) {
+ do {
+ // Try with both a short key and a long key
+ for (int i = 0; i < 2; i++) {
+ std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x');
+ ASSERT_OK(Put(key, "v1"));
+ const Snapshot* s1 = db_->GetSnapshot();
+ const Snapshot* s2 = db_->GetSnapshot();
+ const Snapshot* s3 = db_->GetSnapshot();
+ ASSERT_OK(Put(key, "v2"));
+ ASSERT_EQ("v2", Get(key));
+ ASSERT_EQ("v1", Get(key, s1));
+ ASSERT_EQ("v1", Get(key, s2));
+ ASSERT_EQ("v1", Get(key, s3));
+ db_->ReleaseSnapshot(s1);
+ dbfull()->TEST_CompactMemTable();
+ ASSERT_EQ("v2", Get(key));
+ ASSERT_EQ("v1", Get(key, s2));
+ db_->ReleaseSnapshot(s2);
+ ASSERT_EQ("v1", Get(key, s3));
+ db_->ReleaseSnapshot(s3);
+ }
+ } while (ChangeOptions());
+}
+
+TEST(DBTest, IterateOverEmptySnapshot) {
+ do {
+ const Snapshot* snapshot = db_->GetSnapshot();
+ ReadOptions read_options;
+ read_options.snapshot = snapshot;
+ ASSERT_OK(Put("foo", "v1"));
+ ASSERT_OK(Put("foo", "v2"));
+
+ Iterator* iterator1 = db_->NewIterator(read_options);
+ iterator1->SeekToFirst();
+ ASSERT_TRUE(!iterator1->Valid());
+ delete iterator1;
+
+ dbfull()->TEST_CompactMemTable();
+
+ Iterator* iterator2 = db_->NewIterator(read_options);
+ iterator2->SeekToFirst();
+ ASSERT_TRUE(!iterator2->Valid());
+ delete iterator2;
+
+ db_->ReleaseSnapshot(snapshot);
+ } while (ChangeOptions());
+}
+
TEST(DBTest, GetLevel0Ordering) {
do {
// Check that we process level-0 files in correct order. The code
@@ -646,8 +747,7 @@ TEST(DBTest, GetEncountersEmptyLevel) {
// Step 1: First place sstables in levels 0 and 2
int compaction_count = 0;
- while (NumTableFilesAtLevel(0) == 0 ||
- NumTableFilesAtLevel(2) == 0) {
+ while (NumTableFilesAtLevel(0) == 0 || NumTableFilesAtLevel(2) == 0) {
ASSERT_LE(compaction_count, 100) << "could not fill levels 0 and 2";
compaction_count++;
Put("a", "begin");
@@ -656,7 +756,7 @@ TEST(DBTest, GetEncountersEmptyLevel) {
}
// Step 2: clear level 1 if necessary.
- dbfull()->TEST_CompactRange(1, NULL, NULL);
+ dbfull()->TEST_CompactRange(1, nullptr, nullptr);
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
ASSERT_EQ(NumTableFilesAtLevel(1), 0);
ASSERT_EQ(NumTableFilesAtLevel(2), 1);
@@ -784,10 +884,10 @@ TEST(DBTest, IterMulti) {
ASSERT_EQ(IterStatus(iter), "b->vb");
// Make sure iter stays at snapshot
- ASSERT_OK(Put("a", "va2"));
+ ASSERT_OK(Put("a", "va2"));
ASSERT_OK(Put("a2", "va3"));
- ASSERT_OK(Put("b", "vb2"));
- ASSERT_OK(Put("c", "vc2"));
+ ASSERT_OK(Put("b", "vb2"));
+ ASSERT_OK(Put("c", "vc2"));
ASSERT_OK(Delete("b"));
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->va");
@@ -978,7 +1078,7 @@ TEST(DBTest, RecoverWithLargeLog) {
TEST(DBTest, CompactionsGenerateMultipleFiles) {
Options options = CurrentOptions();
- options.write_buffer_size = 100000000; // Large write buffer
+ options.write_buffer_size = 100000000; // Large write buffer
Reopen(&options);
Random rnd(301);
@@ -993,7 +1093,7 @@ TEST(DBTest, CompactionsGenerateMultipleFiles) {
// Reopening moves updates to level-0
Reopen(&options);
- dbfull()->TEST_CompactRange(0, NULL, NULL);
+ dbfull()->TEST_CompactRange(0, nullptr, nullptr);
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
ASSERT_GT(NumTableFilesAtLevel(1), 1);
@@ -1017,7 +1117,7 @@ TEST(DBTest, RepeatedWritesToSameKey) {
for (int i = 0; i < 5 * kMaxFiles; i++) {
Put("key", value);
ASSERT_LE(TotalTableFiles(), kMaxFiles);
- fprintf(stderr, "after %d: %d files\n", int(i+1), TotalTableFiles());
+ fprintf(stderr, "after %d: %d files\n", i + 1, TotalTableFiles());
}
}
@@ -1044,29 +1144,28 @@ TEST(DBTest, SparseMerge) {
}
Put("C", "vc");
dbfull()->TEST_CompactMemTable();
- dbfull()->TEST_CompactRange(0, NULL, NULL);
+ dbfull()->TEST_CompactRange(0, nullptr, nullptr);
// Make sparse update
- Put("A", "va2");
+ Put("A", "va2");
Put("B100", "bvalue2");
- Put("C", "vc2");
+ Put("C", "vc2");
dbfull()->TEST_CompactMemTable();
// Compactions should not cause us to create a situation where
// a file overlaps too much data at the next level.
- ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
- dbfull()->TEST_CompactRange(0, NULL, NULL);
- ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
- dbfull()->TEST_CompactRange(1, NULL, NULL);
- ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
+ ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
+ dbfull()->TEST_CompactRange(0, nullptr, nullptr);
+ ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
+ dbfull()->TEST_CompactRange(1, nullptr, nullptr);
+ ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
}
static bool Between(uint64_t val, uint64_t low, uint64_t high) {
bool result = (val >= low) && (val <= high);
if (!result) {
fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
- (unsigned long long)(val),
- (unsigned long long)(low),
+ (unsigned long long)(val), (unsigned long long)(low),
(unsigned long long)(high));
}
return result;
@@ -1075,7 +1174,7 @@ static bool Between(uint64_t val, uint64_t low, uint64_t high) {
TEST(DBTest, ApproximateSizes) {
do {
Options options = CurrentOptions();
- options.write_buffer_size = 100000000; // Large write buffer
+ options.write_buffer_size = 100000000; // Large write buffer
options.compression = kNoCompression;
DestroyAndReopen();
@@ -1110,12 +1209,13 @@ TEST(DBTest, ApproximateSizes) {
for (int compact_start = 0; compact_start < N; compact_start += 10) {
for (int i = 0; i < N; i += 10) {
- ASSERT_TRUE(Between(Size("", Key(i)), S1*i, S2*i));
- ASSERT_TRUE(Between(Size("", Key(i)+".suffix"), S1*(i+1), S2*(i+1)));
- ASSERT_TRUE(Between(Size(Key(i), Key(i+10)), S1*10, S2*10));
+ ASSERT_TRUE(Between(Size("", Key(i)), S1 * i, S2 * i));
+ ASSERT_TRUE(Between(Size("", Key(i) + ".suffix"), S1 * (i + 1),
+ S2 * (i + 1)));
+ ASSERT_TRUE(Between(Size(Key(i), Key(i + 10)), S1 * 10, S2 * 10));
}
- ASSERT_TRUE(Between(Size("", Key(50)), S1*50, S2*50));
- ASSERT_TRUE(Between(Size("", Key(50)+".suffix"), S1*50, S2*50));
+ ASSERT_TRUE(Between(Size("", Key(50)), S1 * 50, S2 * 50));
+ ASSERT_TRUE(Between(Size("", Key(50) + ".suffix"), S1 * 50, S2 * 50));
std::string cstart_str = Key(compact_start);
std::string cend_str = Key(compact_start + 9);
@@ -1168,7 +1268,7 @@ TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
ASSERT_TRUE(Between(Size(Key(3), Key(5)), 110000, 111000));
- dbfull()->TEST_CompactRange(0, NULL, NULL);
+ dbfull()->TEST_CompactRange(0, nullptr, nullptr);
}
} while (ChangeOptions());
}
@@ -1182,7 +1282,7 @@ TEST(DBTest, IteratorPinsRef) {
// Write to force compactions
Put("foo", "newvalue1");
for (int i = 0; i < 100; i++) {
- ASSERT_OK(Put(Key(i), Key(i) + std::string(100000, 'v'))); // 100K values
+ ASSERT_OK(Put(Key(i), Key(i) + std::string(100000, 'v'))); // 100K values
}
Put("foo", "newvalue2");
@@ -1234,7 +1334,7 @@ TEST(DBTest, HiddenValuesAreRemoved) {
Put("pastfoo", "v");
const Snapshot* snapshot = db_->GetSnapshot();
Put("foo", "tiny");
- Put("pastfoo2", "v2"); // Advance sequence number one more
+ Put("pastfoo2", "v2"); // Advance sequence number one more
ASSERT_OK(dbfull()->TEST_CompactMemTable());
ASSERT_GT(NumTableFilesAtLevel(0), 0);
@@ -1244,11 +1344,11 @@ TEST(DBTest, HiddenValuesAreRemoved) {
db_->ReleaseSnapshot(snapshot);
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny, " + big + " ]");
Slice x("x");
- dbfull()->TEST_CompactRange(0, NULL, &x);
+ dbfull()->TEST_CompactRange(0, nullptr, &x);
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
ASSERT_GE(NumTableFilesAtLevel(1), 1);
- dbfull()->TEST_CompactRange(1, NULL, &x);
+ dbfull()->TEST_CompactRange(1, nullptr, &x);
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
ASSERT_TRUE(Between(Size("", "pastfoo"), 0, 1000));
@@ -1259,14 +1359,14 @@ TEST(DBTest, DeletionMarkers1) {
Put("foo", "v1");
ASSERT_OK(dbfull()->TEST_CompactMemTable());
const int last = config::kMaxMemCompactLevel;
- ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
+ ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
// Place a table at level last-1 to prevent merging with preceding mutation
Put("a", "begin");
Put("z", "end");
dbfull()->TEST_CompactMemTable();
ASSERT_EQ(NumTableFilesAtLevel(last), 1);
- ASSERT_EQ(NumTableFilesAtLevel(last-1), 1);
+ ASSERT_EQ(NumTableFilesAtLevel(last - 1), 1);
Delete("foo");
Put("foo", "v2");
@@ -1274,11 +1374,11 @@ TEST(DBTest, DeletionMarkers1) {
ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
Slice z("z");
- dbfull()->TEST_CompactRange(last-2, NULL, &z);
+ dbfull()->TEST_CompactRange(last - 2, nullptr, &z);
// DEL eliminated, but v1 remains because we aren't compacting that level
// (DEL can be eliminated because v2 hides v1).
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]");
- dbfull()->TEST_CompactRange(last-1, NULL, NULL);
+ dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr);
// Merging last-1 w/ last, so we are the base level for "foo", so
// DEL is removed. (as is v1).
ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]");
@@ -1288,23 +1388,23 @@ TEST(DBTest, DeletionMarkers2) {
Put("foo", "v1");
ASSERT_OK(dbfull()->TEST_CompactMemTable());
const int last = config::kMaxMemCompactLevel;
- ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
+ ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
// Place a table at level last-1 to prevent merging with preceding mutation
Put("a", "begin");
Put("z", "end");
dbfull()->TEST_CompactMemTable();
ASSERT_EQ(NumTableFilesAtLevel(last), 1);
- ASSERT_EQ(NumTableFilesAtLevel(last-1), 1);
+ ASSERT_EQ(NumTableFilesAtLevel(last - 1), 1);
Delete("foo");
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
- dbfull()->TEST_CompactRange(last-2, NULL, NULL);
+ dbfull()->TEST_CompactRange(last - 2, nullptr, nullptr);
// DEL kept: "last" file overlaps
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
- dbfull()->TEST_CompactRange(last-1, NULL, NULL);
+ dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr);
// Merging last-1 w/ last, so we are the base level for "foo", so
// DEL is removed. (as is v1).
ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
@@ -1314,7 +1414,8 @@ TEST(DBTest, OverlapInLevel0) {
do {
ASSERT_EQ(config::kMaxMemCompactLevel, 2) << "Fix test to match config";
- // Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0.
+ // Fill levels 1 and 2 to disable the pushing of new memtables to levels >
+ // 0.
ASSERT_OK(Put("100", "v100"));
ASSERT_OK(Put("999", "v999"));
dbfull()->TEST_CompactMemTable();
@@ -1337,8 +1438,8 @@ TEST(DBTest, OverlapInLevel0) {
ASSERT_EQ("2,1,1", FilesPerLevel());
// Compact away the placeholder files we created initially
- dbfull()->TEST_CompactRange(1, NULL, NULL);
- dbfull()->TEST_CompactRange(2, NULL, NULL);
+ dbfull()->TEST_CompactRange(1, nullptr, nullptr);
+ dbfull()->TEST_CompactRange(2, nullptr, nullptr);
ASSERT_EQ("2", FilesPerLevel());
// Do a memtable compaction. Before bug-fix, the compaction would
@@ -1370,21 +1471,21 @@ TEST(DBTest, L0_CompactionBug_Issue44_a) {
TEST(DBTest, L0_CompactionBug_Issue44_b) {
Reopen();
- Put("","");
+ Put("", "");
Reopen();
Delete("e");
- Put("","");
+ Put("", "");
Reopen();
Put("c", "cv");
Reopen();
- Put("","");
+ Put("", "");
Reopen();
- Put("","");
+ Put("", "");
DelayMilliseconds(1000); // Wait for compaction to finish
Reopen();
- Put("d","dv");
+ Put("d", "dv");
Reopen();
- Put("","");
+ Put("", "");
Reopen();
Delete("d");
Delete("b");
@@ -1394,17 +1495,26 @@ TEST(DBTest, L0_CompactionBug_Issue44_b) {
ASSERT_EQ("(->)(c->cv)", Contents());
}
+TEST(DBTest, Fflush_Issue474) {
+ static const int kNum = 100000;
+ Random rnd(test::RandomSeed());
+ for (int i = 0; i < kNum; i++) {
+ fflush(nullptr);
+ ASSERT_OK(Put(RandomKey(&rnd), RandomString(&rnd, 100)));
+ }
+}
+
TEST(DBTest, ComparatorCheck) {
class NewComparator : public Comparator {
public:
- virtual const char* Name() const { return "leveldb.NewComparator"; }
- virtual int Compare(const Slice& a, const Slice& b) const {
+ const char* Name() const override { return "leveldb.NewComparator"; }
+ int Compare(const Slice& a, const Slice& b) const override {
return BytewiseComparator()->Compare(a, b);
}
- virtual void FindShortestSeparator(std::string* s, const Slice& l) const {
+ void FindShortestSeparator(std::string* s, const Slice& l) const override {
BytewiseComparator()->FindShortestSeparator(s, l);
}
- virtual void FindShortSuccessor(std::string* key) const {
+ void FindShortSuccessor(std::string* key) const override {
BytewiseComparator()->FindShortSuccessor(key);
}
};
@@ -1420,21 +1530,22 @@ TEST(DBTest, ComparatorCheck) {
TEST(DBTest, CustomComparator) {
class NumberComparator : public Comparator {
public:
- virtual const char* Name() const { return "test.NumberComparator"; }
- virtual int Compare(const Slice& a, const Slice& b) const {
+ const char* Name() const override { return "test.NumberComparator"; }
+ int Compare(const Slice& a, const Slice& b) const override {
return ToNumber(a) - ToNumber(b);
}
- virtual void FindShortestSeparator(std::string* s, const Slice& l) const {
- ToNumber(*s); // Check format
- ToNumber(l); // Check format
+ void FindShortestSeparator(std::string* s, const Slice& l) const override {
+ ToNumber(*s); // Check format
+ ToNumber(l); // Check format
}
- virtual void FindShortSuccessor(std::string* key) const {
- ToNumber(*key); // Check format
+ void FindShortSuccessor(std::string* key) const override {
+ ToNumber(*key); // Check format
}
+
private:
static int ToNumber(const Slice& x) {
// Check that there are no extra characters.
- ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size()-1] == ']')
+ ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size() - 1] == ']')
<< EscapeString(x);
int val;
char ignored;
@@ -1447,7 +1558,7 @@ TEST(DBTest, CustomComparator) {
Options new_options = CurrentOptions();
new_options.create_if_missing = true;
new_options.comparator = &cmp;
- new_options.filter_policy = NULL; // Cannot use bloom filters
+ new_options.filter_policy = nullptr; // Cannot use bloom filters
new_options.write_buffer_size = 1000; // Compact more often
DestroyAndReopen(&new_options);
ASSERT_OK(Put("[10]", "ten"));
@@ -1465,7 +1576,7 @@ TEST(DBTest, CustomComparator) {
for (int run = 0; run < 2; run++) {
for (int i = 0; i < 1000; i++) {
char buf[100];
- snprintf(buf, sizeof(buf), "[%d]", i*10);
+ snprintf(buf, sizeof(buf), "[%d]", i * 10);
ASSERT_OK(Put(buf, buf));
}
Compact("[0]", "[1000000]");
@@ -1502,7 +1613,7 @@ TEST(DBTest, ManualCompaction) {
// Compact all
MakeTables(1, "a", "z");
ASSERT_EQ("0,1,2", FilesPerLevel());
- db_->CompactRange(NULL, NULL);
+ db_->CompactRange(nullptr, nullptr);
ASSERT_EQ("0,0,1", FilesPerLevel());
}
@@ -1511,42 +1622,94 @@ TEST(DBTest, DBOpen_Options) {
DestroyDB(dbname, Options());
// Does not exist, and create_if_missing == false: error
- DB* db = NULL;
+ DB* db = nullptr;
Options opts;
opts.create_if_missing = false;
Status s = DB::Open(opts, dbname, &db);
- ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != NULL);
- ASSERT_TRUE(db == NULL);
+ ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != nullptr);
+ ASSERT_TRUE(db == nullptr);
// Does not exist, and create_if_missing == true: OK
opts.create_if_missing = true;
s = DB::Open(opts, dbname, &db);
ASSERT_OK(s);
- ASSERT_TRUE(db != NULL);
+ ASSERT_TRUE(db != nullptr);
delete db;
- db = NULL;
+ db = nullptr;
// Does exist, and error_if_exists == true: error
opts.create_if_missing = false;
opts.error_if_exists = true;
s = DB::Open(opts, dbname, &db);
- ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != NULL);
- ASSERT_TRUE(db == NULL);
+ ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != nullptr);
+ ASSERT_TRUE(db == nullptr);
// Does exist, and error_if_exists == false: OK
opts.create_if_missing = true;
opts.error_if_exists = false;
s = DB::Open(opts, dbname, &db);
ASSERT_OK(s);
- ASSERT_TRUE(db != NULL);
+ ASSERT_TRUE(db != nullptr);
+
+ delete db;
+ db = nullptr;
+}
+
+TEST(DBTest, DestroyEmptyDir) {
+ std::string dbname = test::TmpDir() + "/db_empty_dir";
+ TestEnv env(Env::Default());
+ env.DeleteDir(dbname);
+ ASSERT_TRUE(!env.FileExists(dbname));
+
+ Options opts;
+ opts.env = &env;
+
+ ASSERT_OK(env.CreateDir(dbname));
+ ASSERT_TRUE(env.FileExists(dbname));
+ std::vector<std::string> children;
+ ASSERT_OK(env.GetChildren(dbname, &children));
+ // The stock Env's do not filter out '.' and '..' special files.
+ ASSERT_EQ(2, children.size());
+ ASSERT_OK(DestroyDB(dbname, opts));
+ ASSERT_TRUE(!env.FileExists(dbname));
+
+ // Should also be destroyed if Env is filtering out dot files.
+ env.SetIgnoreDotFiles(true);
+ ASSERT_OK(env.CreateDir(dbname));
+ ASSERT_TRUE(env.FileExists(dbname));
+ ASSERT_OK(env.GetChildren(dbname, &children));
+ ASSERT_EQ(0, children.size());
+ ASSERT_OK(DestroyDB(dbname, opts));
+ ASSERT_TRUE(!env.FileExists(dbname));
+}
+
+TEST(DBTest, DestroyOpenDB) {
+ std::string dbname = test::TmpDir() + "/open_db_dir";
+ env_->DeleteDir(dbname);
+ ASSERT_TRUE(!env_->FileExists(dbname));
+
+ Options opts;
+ opts.create_if_missing = true;
+ DB* db = nullptr;
+ ASSERT_OK(DB::Open(opts, dbname, &db));
+ ASSERT_TRUE(db != nullptr);
+
+ // Must fail to destroy an open db.
+ ASSERT_TRUE(env_->FileExists(dbname));
+ ASSERT_TRUE(!DestroyDB(dbname, Options()).ok());
+ ASSERT_TRUE(env_->FileExists(dbname));
delete db;
- db = NULL;
+ db = nullptr;
+
+ // Should succeed destroying a closed db.
+ ASSERT_OK(DestroyDB(dbname, Options()));
+ ASSERT_TRUE(!env_->FileExists(dbname));
}
TEST(DBTest, Locking) {
- DB* db2 = NULL;
+ DB* db2 = nullptr;
Status s = DB::Open(CurrentOptions(), dbname_, &db2);
ASSERT_TRUE(!s.ok()) << "Locking did not prevent re-opening db";
}
@@ -1561,13 +1724,14 @@ TEST(DBTest, NoSpace) {
ASSERT_EQ("v1", Get("foo"));
Compact("a", "z");
const int num_files = CountFiles();
- env_->no_space_.Release_Store(env_); // Force out-of-space errors
+ // Force out-of-space errors.
+ env_->no_space_.store(true, std::memory_order_release);
for (int i = 0; i < 10; i++) {
- for (int level = 0; level < config::kNumLevels-1; level++) {
- dbfull()->TEST_CompactRange(level, NULL, NULL);
+ for (int level = 0; level < config::kNumLevels - 1; level++) {
+ dbfull()->TEST_CompactRange(level, nullptr, nullptr);
}
}
- env_->no_space_.Release_Store(NULL);
+ env_->no_space_.store(false, std::memory_order_release);
ASSERT_LT(CountFiles(), num_files + 3);
}
@@ -1577,7 +1741,8 @@ TEST(DBTest, NonWritableFileSystem) {
options.env = env_;
Reopen(&options);
ASSERT_OK(Put("foo", "v1"));
- env_->non_writable_.Release_Store(env_); // Force errors for new files
+ // Force errors for new files.
+ env_->non_writable_.store(true, std::memory_order_release);
std::string big(100000, 'x');
int errors = 0;
for (int i = 0; i < 20; i++) {
@@ -1588,7 +1753,7 @@ TEST(DBTest, NonWritableFileSystem) {
}
}
ASSERT_GT(errors, 0);
- env_->non_writable_.Release_Store(NULL);
+ env_->non_writable_.store(false, std::memory_order_release);
}
TEST(DBTest, WriteSyncError) {
@@ -1598,7 +1763,7 @@ TEST(DBTest, WriteSyncError) {
Options options = CurrentOptions();
options.env = env_;
Reopen(&options);
- env_->data_sync_error_.Release_Store(env_);
+ env_->data_sync_error_.store(true, std::memory_order_release);
// (b) Normal write should succeed
WriteOptions w;
@@ -1612,7 +1777,7 @@ TEST(DBTest, WriteSyncError) {
ASSERT_EQ("NOT_FOUND", Get("k2"));
// (d) make sync behave normally
- env_->data_sync_error_.Release_Store(NULL);
+ env_->data_sync_error_.store(false, std::memory_order_release);
// (e) Do a non-sync write; should fail
w.sync = false;
@@ -1632,9 +1797,8 @@ TEST(DBTest, ManifestWriteError) {
// We iterate twice. In the second iteration, everything is the
// same except the log record never makes it to the MANIFEST file.
for (int iter = 0; iter < 2; iter++) {
- port::AtomicPointer* error_type = (iter == 0)
- ? &env_->manifest_sync_error_
- : &env_->manifest_write_error_;
+ std::atomic<bool>* error_type = (iter == 0) ? &env_->manifest_sync_error_
+ : &env_->manifest_write_error_;
// Insert foo=>bar mapping
Options options = CurrentOptions();
@@ -1649,15 +1813,15 @@ TEST(DBTest, ManifestWriteError) {
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("bar", Get("foo"));
const int last = config::kMaxMemCompactLevel;
- ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo=>bar is now in last level
+ ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo=>bar is now in last level
// Merging compaction (will fail)
- error_type->Release_Store(env_);
- dbfull()->TEST_CompactRange(last, NULL, NULL); // Should fail
+ error_type->store(true, std::memory_order_release);
+ dbfull()->TEST_CompactRange(last, nullptr, nullptr); // Should fail
ASSERT_EQ("bar", Get("foo"));
// Recovery: should not lose data
- error_type->Release_Store(NULL);
+ error_type->store(false, std::memory_order_release);
Reopen(&options);
ASSERT_EQ("bar", Get("foo"));
}
@@ -1677,8 +1841,7 @@ TEST(DBTest, MissingSSTFile) {
options.paranoid_checks = true;
Status s = TryReopen(&options);
ASSERT_TRUE(!s.ok());
- ASSERT_TRUE(s.ToString().find("issing") != std::string::npos)
- << s.ToString();
+ ASSERT_TRUE(s.ToString().find("issing") != std::string::npos) << s.ToString();
}
TEST(DBTest, StillReadSST) {
@@ -1728,7 +1891,7 @@ TEST(DBTest, BloomFilter) {
dbfull()->TEST_CompactMemTable();
// Prevent auto compactions triggered by seeks
- env_->delay_data_sync_.Release_Store(env_);
+ env_->delay_data_sync_.store(true, std::memory_order_release);
// Lookup present keys. Should rarely read from small sstable.
env_->random_read_counter_.Reset();
@@ -1738,7 +1901,7 @@ TEST(DBTest, BloomFilter) {
int reads = env_->random_read_counter_.Read();
fprintf(stderr, "%d present => %d reads\n", N, reads);
ASSERT_GE(reads, N);
- ASSERT_LE(reads, N + 2*N/100);
+ ASSERT_LE(reads, N + 2 * N / 100);
// Lookup present keys. Should rarely read from either sstable.
env_->random_read_counter_.Reset();
@@ -1747,9 +1910,9 @@ TEST(DBTest, BloomFilter) {
}
reads = env_->random_read_counter_.Read();
fprintf(stderr, "%d missing => %d reads\n", N, reads);
- ASSERT_LE(reads, 3*N/100);
+ ASSERT_LE(reads, 3 * N / 100);
- env_->delay_data_sync_.Release_Store(NULL);
+ env_->delay_data_sync_.store(false, std::memory_order_release);
Close();
delete options.block_cache;
delete options.filter_policy;
@@ -1764,9 +1927,9 @@ static const int kNumKeys = 1000;
struct MTState {
DBTest* test;
- port::AtomicPointer stop;
- port::AtomicPointer counter[kNumThreads];
- port::AtomicPointer thread_done[kNumThreads];
+ std::atomic<bool> stop;
+ std::atomic<int> counter[kNumThreads];
+ std::atomic<bool> thread_done[kNumThreads];
};
struct MTThread {
@@ -1778,13 +1941,13 @@ static void MTThreadBody(void* arg) {
MTThread* t = reinterpret_cast<MTThread*>(arg);
int id = t->id;
DB* db = t->state->test->db_;
- uintptr_t counter = 0;
+ int counter = 0;
fprintf(stderr, "... starting thread %d\n", id);
Random rnd(1000 + id);
std::string value;
char valbuf[1500];
- while (t->state->stop.Acquire_Load() == NULL) {
- t->state->counter[id].Release_Store(reinterpret_cast<void*>(counter));
+ while (!t->state->stop.load(std::memory_order_acquire)) {
+ t->state->counter[id].store(counter, std::memory_order_release);
int key = rnd.Uniform(kNumKeys);
char keybuf[20];
@@ -1793,8 +1956,8 @@ static void MTThreadBody(void* arg) {
if (rnd.OneIn(2)) {
// Write values of the form <key, my id, counter>.
// We add some padding for force compactions.
- snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d",
- key, id, static_cast<int>(counter));
+ snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d", key, id,
+ static_cast<int>(counter));
ASSERT_OK(db->Put(WriteOptions(), Slice(keybuf), Slice(valbuf)));
} else {
// Read a value and verify that it matches the pattern written above.
@@ -1809,14 +1972,13 @@ static void MTThreadBody(void* arg) {
ASSERT_EQ(k, key);
ASSERT_GE(w, 0);
ASSERT_LT(w, kNumThreads);
- ASSERT_LE(static_cast<uintptr_t>(c), reinterpret_cast<uintptr_t>(
- t->state->counter[w].Acquire_Load()));
+ ASSERT_LE(c, t->state->counter[w].load(std::memory_order_acquire));
}
}
counter++;
}
- t->state->thread_done[id].Release_Store(t);
- fprintf(stderr, "... stopping thread %d after %d ops\n", id, int(counter));
+ t->state->thread_done[id].store(true, std::memory_order_release);
+ fprintf(stderr, "... stopping thread %d after %d ops\n", id, counter);
}
} // namespace
@@ -1826,10 +1988,10 @@ TEST(DBTest, MultiThreaded) {
// Initialize state
MTState mt;
mt.test = this;
- mt.stop.Release_Store(0);
+ mt.stop.store(false, std::memory_order_release);
for (int id = 0; id < kNumThreads; id++) {
- mt.counter[id].Release_Store(0);
- mt.thread_done[id].Release_Store(0);
+ mt.counter[id].store(false, std::memory_order_release);
+ mt.thread_done[id].store(false, std::memory_order_release);
}
// Start threads
@@ -1844,9 +2006,9 @@ TEST(DBTest, MultiThreaded) {
DelayMilliseconds(kTestSeconds * 1000);
// Stop the threads and wait for them to finish
- mt.stop.Release_Store(&mt);
+ mt.stop.store(true, std::memory_order_release);
for (int id = 0; id < kNumThreads; id++) {
- while (mt.thread_done[id].Acquire_Load() == NULL) {
+ while (!mt.thread_done[id].load(std::memory_order_acquire)) {
DelayMilliseconds(100);
}
}
@@ -1857,28 +2019,28 @@ namespace {
typedef std::map<std::string, std::string> KVMap;
}
-class ModelDB: public DB {
+class ModelDB : public DB {
public:
class ModelSnapshot : public Snapshot {
public:
KVMap map_;
};
- explicit ModelDB(const Options& options): options_(options) { }
- ~ModelDB() { }
- virtual Status Put(const WriteOptions& o, const Slice& k, const Slice& v) {
+ explicit ModelDB(const Options& options) : options_(options) {}
+ ~ModelDB() override = default;
+ Status Put(const WriteOptions& o, const Slice& k, const Slice& v) override {
return DB::Put(o, k, v);
}
- virtual Status Delete(const WriteOptions& o, const Slice& key) {
+ Status Delete(const WriteOptions& o, const Slice& key) override {
return DB::Delete(o, key);
}
- virtual Status Get(const ReadOptions& options,
- const Slice& key, std::string* value) {
- assert(false); // Not implemented
+ Status Get(const ReadOptions& options, const Slice& key,
+ std::string* value) override {
+ assert(false); // Not implemented
return Status::NotFound(key);
}
- virtual Iterator* NewIterator(const ReadOptions& options) {
- if (options.snapshot == NULL) {
+ Iterator* NewIterator(const ReadOptions& options) override {
+ if (options.snapshot == nullptr) {
KVMap* saved = new KVMap;
*saved = map_;
return new ModelIter(saved, true);
@@ -1888,68 +2050,65 @@ class ModelDB: public DB {
return new ModelIter(snapshot_state, false);
}
}
- virtual const Snapshot* GetSnapshot() {
+ const Snapshot* GetSnapshot() override {
ModelSnapshot* snapshot = new ModelSnapshot;
snapshot->map_ = map_;
return snapshot;
}
- virtual void ReleaseSnapshot(const Snapshot* snapshot) {
+ void ReleaseSnapshot(const Snapshot* snapshot) override {
delete reinterpret_cast<const ModelSnapshot*>(snapshot);
}
- virtual Status Write(const WriteOptions& options, WriteBatch* batch) {
+ Status Write(const WriteOptions& options, WriteBatch* batch) override {
class Handler : public WriteBatch::Handler {
public:
KVMap* map_;
- virtual void Put(const Slice& key, const Slice& value) {
+ void Put(const Slice& key, const Slice& value) override {
(*map_)[key.ToString()] = value.ToString();
}
- virtual void Delete(const Slice& key) {
- map_->erase(key.ToString());
- }
+ void Delete(const Slice& key) override { map_->erase(key.ToString()); }
};
Handler handler;
handler.map_ = &map_;
return batch->Iterate(&handler);
}
- virtual bool GetProperty(const Slice& property, std::string* value) {
+ bool GetProperty(const Slice& property, std::string* value) override {
return false;
}
- virtual void GetApproximateSizes(const Range* r, int n, uint64_t* sizes) {
+ void GetApproximateSizes(const Range* r, int n, uint64_t* sizes) override {
for (int i = 0; i < n; i++) {
sizes[i] = 0;
}
}
- virtual void CompactRange(const Slice* start, const Slice* end) {
- }
+ void CompactRange(const Slice* start, const Slice* end) override {}
private:
- class ModelIter: public Iterator {
+ class ModelIter : public Iterator {
public:
ModelIter(const KVMap* map, bool owned)
- : map_(map), owned_(owned), iter_(map_->end()) {
- }
- ~ModelIter() {
+ : map_(map), owned_(owned), iter_(map_->end()) {}
+ ~ModelIter() override {
if (owned_) delete map_;
}
- virtual bool Valid() const { return iter_ != map_->end(); }
- virtual void SeekToFirst() { iter_ = map_->begin(); }
- virtual void SeekToLast() {
+ bool Valid() const override { return iter_ != map_->end(); }
+ void SeekToFirst() override { iter_ = map_->begin(); }
+ void SeekToLast() override {
if (map_->empty()) {
iter_ = map_->end();
} else {
iter_ = map_->find(map_->rbegin()->first);
}
}
- virtual void Seek(const Slice& k) {
+ void Seek(const Slice& k) override {
iter_ = map_->lower_bound(k.ToString());
}
- virtual void Next() { ++iter_; }
- virtual void Prev() { --iter_; }
- virtual Slice key() const { return iter_->first; }
- virtual Slice value() const { return iter_->second; }
- virtual Status status() const { return Status::OK(); }
+ void Next() override { ++iter_; }
+ void Prev() override { --iter_; }
+ Slice key() const override { return iter_->first; }
+ Slice value() const override { return iter_->second; }
+ Status status() const override { return Status::OK(); }
+
private:
const KVMap* const map_;
const bool owned_; // Do we own map_
@@ -1959,16 +2118,7 @@ class ModelDB: public DB {
KVMap map_;
};
-static std::string RandomKey(Random* rnd) {
- int len = (rnd->OneIn(3)
- ? 1 // Short sometimes to encourage collisions
- : (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
- return test::RandomKey(rnd, len);
-}
-
-static bool CompareIterators(int step,
- DB* model,
- DB* db,
+static bool CompareIterators(int step, DB* model, DB* db,
const Snapshot* model_snap,
const Snapshot* db_snap) {
ReadOptions options;
@@ -1979,12 +2129,10 @@ static bool CompareIterators(int step,
bool ok = true;
int count = 0;
for (miter->SeekToFirst(), dbiter->SeekToFirst();
- ok && miter->Valid() && dbiter->Valid();
- miter->Next(), dbiter->Next()) {
+ ok && miter->Valid() && dbiter->Valid(); miter->Next(), dbiter->Next()) {
count++;
if (miter->key().compare(dbiter->key()) != 0) {
- fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n",
- step,
+ fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n", step,
EscapeString(miter->key()).c_str(),
EscapeString(dbiter->key()).c_str());
ok = false;
@@ -1993,8 +2141,7 @@ static bool CompareIterators(int step,
if (miter->value().compare(dbiter->value()) != 0) {
fprintf(stderr, "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n",
- step,
- EscapeString(miter->key()).c_str(),
+ step, EscapeString(miter->key()).c_str(),
EscapeString(miter->value()).c_str(),
EscapeString(miter->value()).c_str());
ok = false;
@@ -2019,8 +2166,8 @@ TEST(DBTest, Randomized) {
do {
ModelDB model(CurrentOptions());
const int N = 10000;
- const Snapshot* model_snap = NULL;
- const Snapshot* db_snap = NULL;
+ const Snapshot* model_snap = nullptr;
+ const Snapshot* db_snap = nullptr;
std::string k, v;
for (int step = 0; step < N; step++) {
if (step % 100 == 0) {
@@ -2028,22 +2175,19 @@ TEST(DBTest, Randomized) {
}
// TODO(sanjay): Test Get() works
int p = rnd.Uniform(100);
- if (p < 45) { // Put
+ if (p < 45) { // Put
k = RandomKey(&rnd);
- v = RandomString(&rnd,
- rnd.OneIn(20)
- ? 100 + rnd.Uniform(100)
- : rnd.Uniform(8));
+ v = RandomString(
+ &rnd, rnd.OneIn(20) ? 100 + rnd.Uniform(100) : rnd.Uniform(8));
ASSERT_OK(model.Put(WriteOptions(), k, v));
ASSERT_OK(db_->Put(WriteOptions(), k, v));
- } else if (p < 90) { // Delete
+ } else if (p < 90) { // Delete
k = RandomKey(&rnd);
ASSERT_OK(model.Delete(WriteOptions(), k));
ASSERT_OK(db_->Delete(WriteOptions(), k));
-
- } else { // Multi-element batch
+ } else { // Multi-element batch
WriteBatch b;
const int num = rnd.Uniform(8);
for (int i = 0; i < num; i++) {
@@ -2065,23 +2209,23 @@ TEST(DBTest, Randomized) {
}
if ((step % 100) == 0) {
- ASSERT_TRUE(CompareIterators(step, &model, db_, NULL, NULL));
+ ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
ASSERT_TRUE(CompareIterators(step, &model, db_, model_snap, db_snap));
// Save a snapshot from each DB this time that we'll use next
// time we compare things, to make sure the current state is
// preserved with the snapshot
- if (model_snap != NULL) model.ReleaseSnapshot(model_snap);
- if (db_snap != NULL) db_->ReleaseSnapshot(db_snap);
+ if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
+ if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
Reopen();
- ASSERT_TRUE(CompareIterators(step, &model, db_, NULL, NULL));
+ ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
model_snap = model.GetSnapshot();
db_snap = db_->GetSnapshot();
}
}
- if (model_snap != NULL) model.ReleaseSnapshot(model_snap);
- if (db_snap != NULL) db_->ReleaseSnapshot(db_snap);
+ if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
+ if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
} while (ChangeOptions());
}
@@ -2095,15 +2239,15 @@ void BM_LogAndApply(int iters, int num_base_files) {
std::string dbname = test::TmpDir() + "/leveldb_test_benchmark";
DestroyDB(dbname, Options());
- DB* db = NULL;
+ DB* db = nullptr;
Options opts;
opts.create_if_missing = true;
Status s = DB::Open(opts, dbname, &db);
ASSERT_OK(s);
- ASSERT_TRUE(db != NULL);
+ ASSERT_TRUE(db != nullptr);
delete db;
- db = NULL;
+ db = nullptr;
Env* env = Env::Default();
@@ -2112,14 +2256,14 @@ void BM_LogAndApply(int iters, int num_base_files) {
InternalKeyComparator cmp(BytewiseComparator());
Options options;
- VersionSet vset(dbname, &options, NULL, &cmp);
+ VersionSet vset(dbname, &options, nullptr, &cmp);
bool save_manifest;
ASSERT_OK(vset.Recover(&save_manifest));
VersionEdit vbase;
uint64_t fnum = 1;
for (int i = 0; i < num_base_files; i++) {
- InternalKey start(MakeKey(2*fnum), 1, kTypeValue);
- InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion);
+ InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
+ InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
vbase.AddFile(2, fnum++, 1 /* file size */, start, limit);
}
ASSERT_OK(vset.LogAndApply(&vbase, &mu));
@@ -2129,8 +2273,8 @@ void BM_LogAndApply(int iters, int num_base_files) {
for (int i = 0; i < iters; i++) {
VersionEdit vedit;
vedit.DeleteFile(2, fnum);
- InternalKey start(MakeKey(2*fnum), 1, kTypeValue);
- InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion);
+ InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
+ InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
vedit.AddFile(2, fnum++, 1 /* file size */, start, limit);
vset.LogAndApply(&vedit, &mu);
}
@@ -2139,8 +2283,8 @@ void BM_LogAndApply(int iters, int num_base_files) {
char buf[16];
snprintf(buf, sizeof(buf), "%d", num_base_files);
fprintf(stderr,
- "BM_LogAndApply/%-6s %8d iters : %9u us (%7.0f us / iter)\n",
- buf, iters, us, ((float)us) / iters);
+ "BM_LogAndApply/%-6s %8d iters : %9u us (%7.0f us / iter)\n", buf,
+ iters, us, ((float)us) / iters);
}
} // namespace leveldb
diff --git a/src/leveldb/db/dbformat.cc b/src/leveldb/db/dbformat.cc
index 20a7ca4462..459eddf5b1 100644
--- a/src/leveldb/db/dbformat.cc
+++ b/src/leveldb/db/dbformat.cc
@@ -2,8 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include <stdio.h>
#include "db/dbformat.h"
+
+#include <stdio.h>
+
+#include <sstream>
+
#include "port/port.h"
#include "util/coding.h"
@@ -21,26 +25,20 @@ void AppendInternalKey(std::string* result, const ParsedInternalKey& key) {
}
std::string ParsedInternalKey::DebugString() const {
- char buf[50];
- snprintf(buf, sizeof(buf), "' @ %llu : %d",
- (unsigned long long) sequence,
- int(type));
- std::string result = "'";
- result += EscapeString(user_key.ToString());
- result += buf;
- return result;
+ std::ostringstream ss;
+ ss << '\'' << EscapeString(user_key.ToString()) << "' @ " << sequence << " : "
+ << static_cast<int>(type);
+ return ss.str();
}
std::string InternalKey::DebugString() const {
- std::string result;
ParsedInternalKey parsed;
if (ParseInternalKey(rep_, &parsed)) {
- result = parsed.DebugString();
- } else {
- result = "(bad)";
- result.append(EscapeString(rep_));
+ return parsed.DebugString();
}
- return result;
+ std::ostringstream ss;
+ ss << "(bad)" << EscapeString(rep_);
+ return ss.str();
}
const char* InternalKeyComparator::Name() const {
@@ -65,9 +63,8 @@ int InternalKeyComparator::Compare(const Slice& akey, const Slice& bkey) const {
return r;
}
-void InternalKeyComparator::FindShortestSeparator(
- std::string* start,
- const Slice& limit) const {
+void InternalKeyComparator::FindShortestSeparator(std::string* start,
+ const Slice& limit) const {
// Attempt to shorten the user portion of the key
Slice user_start = ExtractUserKey(*start);
Slice user_limit = ExtractUserKey(limit);
@@ -77,7 +74,8 @@ void InternalKeyComparator::FindShortestSeparator(
user_comparator_->Compare(user_start, tmp) < 0) {
// User key has become shorter physically, but larger logically.
// Tack on the earliest possible number to the shortened user key.
- PutFixed64(&tmp, PackSequenceAndType(kMaxSequenceNumber,kValueTypeForSeek));
+ PutFixed64(&tmp,
+ PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek));
assert(this->Compare(*start, tmp) < 0);
assert(this->Compare(tmp, limit) < 0);
start->swap(tmp);
@@ -92,15 +90,14 @@ void InternalKeyComparator::FindShortSuccessor(std::string* key) const {
user_comparator_->Compare(user_key, tmp) < 0) {
// User key has become shorter physically, but larger logically.
// Tack on the earliest possible number to the shortened user key.
- PutFixed64(&tmp, PackSequenceAndType(kMaxSequenceNumber,kValueTypeForSeek));
+ PutFixed64(&tmp,
+ PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek));
assert(this->Compare(*key, tmp) < 0);
key->swap(tmp);
}
}
-const char* InternalFilterPolicy::Name() const {
- return user_policy_->Name();
-}
+const char* InternalFilterPolicy::Name() const { return user_policy_->Name(); }
void InternalFilterPolicy::CreateFilter(const Slice* keys, int n,
std::string* dst) const {
diff --git a/src/leveldb/db/dbformat.h b/src/leveldb/db/dbformat.h
index ea897b13c0..a1c30ed88c 100644
--- a/src/leveldb/db/dbformat.h
+++ b/src/leveldb/db/dbformat.h
@@ -5,7 +5,10 @@
#ifndef STORAGE_LEVELDB_DB_DBFORMAT_H_
#define STORAGE_LEVELDB_DB_DBFORMAT_H_
-#include <stdio.h>
+#include <cstddef>
+#include <cstdint>
+#include <string>
+
#include "leveldb/comparator.h"
#include "leveldb/db.h"
#include "leveldb/filter_policy.h"
@@ -48,10 +51,7 @@ class InternalKey;
// Value types encoded as the last component of internal keys.
// DO NOT CHANGE THESE ENUM VALUES: they are embedded in the on-disk
// data structures.
-enum ValueType {
- kTypeDeletion = 0x0,
- kTypeValue = 0x1
-};
+enum ValueType { kTypeDeletion = 0x0, kTypeValue = 0x1 };
// kValueTypeForSeek defines the ValueType that should be passed when
// constructing a ParsedInternalKey object for seeking to a particular
// sequence number (since we sort sequence numbers in decreasing order
@@ -64,17 +64,16 @@ typedef uint64_t SequenceNumber;
// We leave eight bits empty at the bottom so a type and sequence#
// can be packed together into 64-bits.
-static const SequenceNumber kMaxSequenceNumber =
- ((0x1ull << 56) - 1);
+static const SequenceNumber kMaxSequenceNumber = ((0x1ull << 56) - 1);
struct ParsedInternalKey {
Slice user_key;
SequenceNumber sequence;
ValueType type;
- ParsedInternalKey() { } // Intentionally left uninitialized (for speed)
+ ParsedInternalKey() {} // Intentionally left uninitialized (for speed)
ParsedInternalKey(const Slice& u, const SequenceNumber& seq, ValueType t)
- : user_key(u), sequence(seq), type(t) { }
+ : user_key(u), sequence(seq), type(t) {}
std::string DebugString() const;
};
@@ -84,15 +83,13 @@ inline size_t InternalKeyEncodingLength(const ParsedInternalKey& key) {
}
// Append the serialization of "key" to *result.
-extern void AppendInternalKey(std::string* result,
- const ParsedInternalKey& key);
+void AppendInternalKey(std::string* result, const ParsedInternalKey& key);
// Attempt to parse an internal key from "internal_key". On success,
// stores the parsed data in "*result", and returns true.
//
// On error, returns false, leaves "*result" in an undefined state.
-extern bool ParseInternalKey(const Slice& internal_key,
- ParsedInternalKey* result);
+bool ParseInternalKey(const Slice& internal_key, ParsedInternalKey* result);
// Returns the user key portion of an internal key.
inline Slice ExtractUserKey(const Slice& internal_key) {
@@ -100,27 +97,19 @@ inline Slice ExtractUserKey(const Slice& internal_key) {
return Slice(internal_key.data(), internal_key.size() - 8);
}
-inline ValueType ExtractValueType(const Slice& internal_key) {
- assert(internal_key.size() >= 8);
- const size_t n = internal_key.size();
- uint64_t num = DecodeFixed64(internal_key.data() + n - 8);
- unsigned char c = num & 0xff;
- return static_cast<ValueType>(c);
-}
-
// A comparator for internal keys that uses a specified comparator for
// the user key portion and breaks ties by decreasing sequence number.
class InternalKeyComparator : public Comparator {
private:
const Comparator* user_comparator_;
+
public:
- explicit InternalKeyComparator(const Comparator* c) : user_comparator_(c) { }
- virtual const char* Name() const;
- virtual int Compare(const Slice& a, const Slice& b) const;
- virtual void FindShortestSeparator(
- std::string* start,
- const Slice& limit) const;
- virtual void FindShortSuccessor(std::string* key) const;
+ explicit InternalKeyComparator(const Comparator* c) : user_comparator_(c) {}
+ const char* Name() const override;
+ int Compare(const Slice& a, const Slice& b) const override;
+ void FindShortestSeparator(std::string* start,
+ const Slice& limit) const override;
+ void FindShortSuccessor(std::string* key) const override;
const Comparator* user_comparator() const { return user_comparator_; }
@@ -131,11 +120,12 @@ class InternalKeyComparator : public Comparator {
class InternalFilterPolicy : public FilterPolicy {
private:
const FilterPolicy* const user_policy_;
+
public:
- explicit InternalFilterPolicy(const FilterPolicy* p) : user_policy_(p) { }
- virtual const char* Name() const;
- virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const;
- virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const;
+ explicit InternalFilterPolicy(const FilterPolicy* p) : user_policy_(p) {}
+ const char* Name() const override;
+ void CreateFilter(const Slice* keys, int n, std::string* dst) const override;
+ bool KeyMayMatch(const Slice& key, const Slice& filter) const override;
};
// Modules in this directory should keep internal keys wrapped inside
@@ -144,13 +134,18 @@ class InternalFilterPolicy : public FilterPolicy {
class InternalKey {
private:
std::string rep_;
+
public:
- InternalKey() { } // Leave rep_ as empty to indicate it is invalid
+ InternalKey() {} // Leave rep_ as empty to indicate it is invalid
InternalKey(const Slice& user_key, SequenceNumber s, ValueType t) {
AppendInternalKey(&rep_, ParsedInternalKey(user_key, s, t));
}
- void DecodeFrom(const Slice& s) { rep_.assign(s.data(), s.size()); }
+ bool DecodeFrom(const Slice& s) {
+ rep_.assign(s.data(), s.size());
+ return !rep_.empty();
+ }
+
Slice Encode() const {
assert(!rep_.empty());
return rep_;
@@ -168,8 +163,8 @@ class InternalKey {
std::string DebugString() const;
};
-inline int InternalKeyComparator::Compare(
- const InternalKey& a, const InternalKey& b) const {
+inline int InternalKeyComparator::Compare(const InternalKey& a,
+ const InternalKey& b) const {
return Compare(a.Encode(), b.Encode());
}
@@ -178,11 +173,11 @@ inline bool ParseInternalKey(const Slice& internal_key,
const size_t n = internal_key.size();
if (n < 8) return false;
uint64_t num = DecodeFixed64(internal_key.data() + n - 8);
- unsigned char c = num & 0xff;
+ uint8_t c = num & 0xff;
result->sequence = num >> 8;
result->type = static_cast<ValueType>(c);
result->user_key = Slice(internal_key.data(), n - 8);
- return (c <= static_cast<unsigned char>(kTypeValue));
+ return (c <= static_cast<uint8_t>(kTypeValue));
}
// A helper class useful for DBImpl::Get()
@@ -192,6 +187,9 @@ class LookupKey {
// the specified sequence number.
LookupKey(const Slice& user_key, SequenceNumber sequence);
+ LookupKey(const LookupKey&) = delete;
+ LookupKey& operator=(const LookupKey&) = delete;
+
~LookupKey();
// Return a key suitable for lookup in a MemTable.
@@ -214,11 +212,7 @@ class LookupKey {
const char* start_;
const char* kstart_;
const char* end_;
- char space_[200]; // Avoid allocation for short keys
-
- // No copying allowed
- LookupKey(const LookupKey&);
- void operator=(const LookupKey&);
+ char space_[200]; // Avoid allocation for short keys
};
inline LookupKey::~LookupKey() {
diff --git a/src/leveldb/db/dbformat_test.cc b/src/leveldb/db/dbformat_test.cc
index 5d82f5d313..1209369c31 100644
--- a/src/leveldb/db/dbformat_test.cc
+++ b/src/leveldb/db/dbformat_test.cc
@@ -8,8 +8,7 @@
namespace leveldb {
-static std::string IKey(const std::string& user_key,
- uint64_t seq,
+static std::string IKey(const std::string& user_key, uint64_t seq,
ValueType vt) {
std::string encoded;
AppendInternalKey(&encoded, ParsedInternalKey(user_key, seq, vt));
@@ -28,9 +27,7 @@ static std::string ShortSuccessor(const std::string& s) {
return result;
}
-static void TestKey(const std::string& key,
- uint64_t seq,
- ValueType vt) {
+static void TestKey(const std::string& key, uint64_t seq, ValueType vt) {
std::string encoded = IKey(key, seq, vt);
Slice in(encoded);
@@ -44,16 +41,22 @@ static void TestKey(const std::string& key,
ASSERT_TRUE(!ParseInternalKey(Slice("bar"), &decoded));
}
-class FormatTest { };
+class FormatTest {};
TEST(FormatTest, InternalKey_EncodeDecode) {
- const char* keys[] = { "", "k", "hello", "longggggggggggggggggggggg" };
- const uint64_t seq[] = {
- 1, 2, 3,
- (1ull << 8) - 1, 1ull << 8, (1ull << 8) + 1,
- (1ull << 16) - 1, 1ull << 16, (1ull << 16) + 1,
- (1ull << 32) - 1, 1ull << 32, (1ull << 32) + 1
- };
+ const char* keys[] = {"", "k", "hello", "longggggggggggggggggggggg"};
+ const uint64_t seq[] = {1,
+ 2,
+ 3,
+ (1ull << 8) - 1,
+ 1ull << 8,
+ (1ull << 8) + 1,
+ (1ull << 16) - 1,
+ 1ull << 16,
+ (1ull << 16) + 1,
+ (1ull << 32) - 1,
+ 1ull << 32,
+ (1ull << 32) + 1};
for (int k = 0; k < sizeof(keys) / sizeof(keys[0]); k++) {
for (int s = 0; s < sizeof(seq) / sizeof(seq[0]); s++) {
TestKey(keys[k], seq[s], kTypeValue);
@@ -62,40 +65,44 @@ TEST(FormatTest, InternalKey_EncodeDecode) {
}
}
+TEST(FormatTest, InternalKey_DecodeFromEmpty) {
+ InternalKey internal_key;
+
+ ASSERT_TRUE(!internal_key.DecodeFrom(""));
+}
+
TEST(FormatTest, InternalKeyShortSeparator) {
// When user keys are same
ASSERT_EQ(IKey("foo", 100, kTypeValue),
- Shorten(IKey("foo", 100, kTypeValue),
- IKey("foo", 99, kTypeValue)));
- ASSERT_EQ(IKey("foo", 100, kTypeValue),
- Shorten(IKey("foo", 100, kTypeValue),
- IKey("foo", 101, kTypeValue)));
- ASSERT_EQ(IKey("foo", 100, kTypeValue),
- Shorten(IKey("foo", 100, kTypeValue),
- IKey("foo", 100, kTypeValue)));
- ASSERT_EQ(IKey("foo", 100, kTypeValue),
- Shorten(IKey("foo", 100, kTypeValue),
- IKey("foo", 100, kTypeDeletion)));
+ Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 99, kTypeValue)));
+ ASSERT_EQ(
+ IKey("foo", 100, kTypeValue),
+ Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 101, kTypeValue)));
+ ASSERT_EQ(
+ IKey("foo", 100, kTypeValue),
+ Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeValue)));
+ ASSERT_EQ(
+ IKey("foo", 100, kTypeValue),
+ Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeDeletion)));
// When user keys are misordered
ASSERT_EQ(IKey("foo", 100, kTypeValue),
- Shorten(IKey("foo", 100, kTypeValue),
- IKey("bar", 99, kTypeValue)));
+ Shorten(IKey("foo", 100, kTypeValue), IKey("bar", 99, kTypeValue)));
// When user keys are different, but correctly ordered
- ASSERT_EQ(IKey("g", kMaxSequenceNumber, kValueTypeForSeek),
- Shorten(IKey("foo", 100, kTypeValue),
- IKey("hello", 200, kTypeValue)));
+ ASSERT_EQ(
+ IKey("g", kMaxSequenceNumber, kValueTypeForSeek),
+ Shorten(IKey("foo", 100, kTypeValue), IKey("hello", 200, kTypeValue)));
// When start user key is prefix of limit user key
- ASSERT_EQ(IKey("foo", 100, kTypeValue),
- Shorten(IKey("foo", 100, kTypeValue),
- IKey("foobar", 200, kTypeValue)));
+ ASSERT_EQ(
+ IKey("foo", 100, kTypeValue),
+ Shorten(IKey("foo", 100, kTypeValue), IKey("foobar", 200, kTypeValue)));
// When limit user key is prefix of start user key
- ASSERT_EQ(IKey("foobar", 100, kTypeValue),
- Shorten(IKey("foobar", 100, kTypeValue),
- IKey("foo", 200, kTypeValue)));
+ ASSERT_EQ(
+ IKey("foobar", 100, kTypeValue),
+ Shorten(IKey("foobar", 100, kTypeValue), IKey("foo", 200, kTypeValue)));
}
TEST(FormatTest, InternalKeyShortestSuccessor) {
@@ -105,8 +112,20 @@ TEST(FormatTest, InternalKeyShortestSuccessor) {
ShortSuccessor(IKey("\xff\xff", 100, kTypeValue)));
}
-} // namespace leveldb
+TEST(FormatTest, ParsedInternalKeyDebugString) {
+ ParsedInternalKey key("The \"key\" in 'single quotes'", 42, kTypeValue);
+
+ ASSERT_EQ("'The \"key\" in 'single quotes'' @ 42 : 1", key.DebugString());
+}
+
+TEST(FormatTest, InternalKeyDebugString) {
+ InternalKey key("The \"key\" in 'single quotes'", 42, kTypeValue);
+ ASSERT_EQ("'The \"key\" in 'single quotes'' @ 42 : 1", key.DebugString());
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
+ InternalKey invalid_key;
+ ASSERT_EQ("(bad)", invalid_key.DebugString());
}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/db/dumpfile.cc b/src/leveldb/db/dumpfile.cc
index 61c47c2ff9..77d59003cf 100644
--- a/src/leveldb/db/dumpfile.cc
+++ b/src/leveldb/db/dumpfile.cc
@@ -2,7 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include "leveldb/dumpfile.h"
+
#include <stdio.h>
+
#include "db/dbformat.h"
#include "db/filename.h"
#include "db/log_reader.h"
@@ -35,8 +38,7 @@ bool GuessType(const std::string& fname, FileType* type) {
// Notified when log reader encounters corruption.
class CorruptionReporter : public log::Reader::Reporter {
public:
- WritableFile* dst_;
- virtual void Corruption(size_t bytes, const Status& status) {
+ void Corruption(size_t bytes, const Status& status) override {
std::string r = "corruption: ";
AppendNumberTo(&r, bytes);
r += " bytes; ";
@@ -44,6 +46,8 @@ class CorruptionReporter : public log::Reader::Reporter {
r.push_back('\n');
dst_->Append(r);
}
+
+ WritableFile* dst_;
};
// Print contents of a log file. (*func)() is called on every record.
@@ -70,8 +74,7 @@ Status PrintLogContents(Env* env, const std::string& fname,
// Called on every item found in a WriteBatch.
class WriteBatchItemPrinter : public WriteBatch::Handler {
public:
- WritableFile* dst_;
- virtual void Put(const Slice& key, const Slice& value) {
+ void Put(const Slice& key, const Slice& value) override {
std::string r = " put '";
AppendEscapedStringTo(&r, key);
r += "' '";
@@ -79,14 +82,15 @@ class WriteBatchItemPrinter : public WriteBatch::Handler {
r += "'\n";
dst_->Append(r);
}
- virtual void Delete(const Slice& key) {
+ void Delete(const Slice& key) override {
std::string r = " del '";
AppendEscapedStringTo(&r, key);
r += "'\n";
dst_->Append(r);
}
-};
+ WritableFile* dst_;
+};
// Called on every log record (each one of which is a WriteBatch)
// found in a kLogFile.
@@ -142,8 +146,8 @@ Status DumpDescriptor(Env* env, const std::string& fname, WritableFile* dst) {
Status DumpTable(Env* env, const std::string& fname, WritableFile* dst) {
uint64_t file_size;
- RandomAccessFile* file = NULL;
- Table* table = NULL;
+ RandomAccessFile* file = nullptr;
+ Table* table = nullptr;
Status s = env->GetFileSize(fname, &file_size);
if (s.ok()) {
s = env->NewRandomAccessFile(fname, &file);
@@ -213,9 +217,12 @@ Status DumpFile(Env* env, const std::string& fname, WritableFile* dst) {
return Status::InvalidArgument(fname + ": unknown file type");
}
switch (ftype) {
- case kLogFile: return DumpLog(env, fname, dst);
- case kDescriptorFile: return DumpDescriptor(env, fname, dst);
- case kTableFile: return DumpTable(env, fname, dst);
+ case kLogFile:
+ return DumpLog(env, fname, dst);
+ case kDescriptorFile:
+ return DumpDescriptor(env, fname, dst);
+ case kTableFile:
+ return DumpTable(env, fname, dst);
default:
break;
}
diff --git a/src/leveldb/db/fault_injection_test.cc b/src/leveldb/db/fault_injection_test.cc
index 875dfe81ee..bf705cb60f 100644
--- a/src/leveldb/db/fault_injection_test.cc
+++ b/src/leveldb/db/fault_injection_test.cc
@@ -6,18 +6,20 @@
// the last "sync". It then checks for data loss errors by purposely dropping
// file data (or entire files) not protected by a "sync".
-#include "leveldb/db.h"
-
#include <map>
#include <set>
+
#include "db/db_impl.h"
#include "db/filename.h"
#include "db/log_format.h"
#include "db/version_set.h"
#include "leveldb/cache.h"
+#include "leveldb/db.h"
#include "leveldb/env.h"
#include "leveldb/table.h"
#include "leveldb/write_batch.h"
+#include "port/port.h"
+#include "port/thread_annotations.h"
#include "util/logging.h"
#include "util/mutexlock.h"
#include "util/testharness.h"
@@ -34,7 +36,7 @@ class FaultInjectionTestEnv;
namespace {
// Assume a filename, and not a directory name like "/foo/bar/"
-static std::string GetDirName(const std::string filename) {
+static std::string GetDirName(const std::string& filename) {
size_t found = filename.find_last_of("/\\");
if (found == std::string::npos) {
return "";
@@ -54,8 +56,7 @@ Status Truncate(const std::string& filename, uint64_t length) {
SequentialFile* orig_file;
Status s = env->NewSequentialFile(filename, &orig_file);
- if (!s.ok())
- return s;
+ if (!s.ok()) return s;
char* scratch = new char[length];
leveldb::Slice result;
@@ -83,15 +84,15 @@ Status Truncate(const std::string& filename, uint64_t length) {
struct FileState {
std::string filename_;
- ssize_t pos_;
- ssize_t pos_at_last_sync_;
- ssize_t pos_at_last_flush_;
+ int64_t pos_;
+ int64_t pos_at_last_sync_;
+ int64_t pos_at_last_flush_;
FileState(const std::string& filename)
: filename_(filename),
pos_(-1),
pos_at_last_sync_(-1),
- pos_at_last_flush_(-1) { }
+ pos_at_last_flush_(-1) {}
FileState() : pos_(-1), pos_at_last_sync_(-1), pos_at_last_flush_(-1) {}
@@ -106,14 +107,14 @@ struct FileState {
// is written to or sync'ed.
class TestWritableFile : public WritableFile {
public:
- TestWritableFile(const FileState& state,
- WritableFile* f,
+ TestWritableFile(const FileState& state, WritableFile* f,
FaultInjectionTestEnv* env);
- virtual ~TestWritableFile();
- virtual Status Append(const Slice& data);
- virtual Status Close();
- virtual Status Flush();
- virtual Status Sync();
+ ~TestWritableFile() override;
+ Status Append(const Slice& data) override;
+ Status Close() override;
+ Status Flush() override;
+ Status Sync() override;
+ std::string GetName() const override { return ""; }
private:
FileState state_;
@@ -126,14 +127,15 @@ class TestWritableFile : public WritableFile {
class FaultInjectionTestEnv : public EnvWrapper {
public:
- FaultInjectionTestEnv() : EnvWrapper(Env::Default()), filesystem_active_(true) {}
- virtual ~FaultInjectionTestEnv() { }
- virtual Status NewWritableFile(const std::string& fname,
- WritableFile** result);
- virtual Status NewAppendableFile(const std::string& fname,
- WritableFile** result);
- virtual Status DeleteFile(const std::string& f);
- virtual Status RenameFile(const std::string& s, const std::string& t);
+ FaultInjectionTestEnv()
+ : EnvWrapper(Env::Default()), filesystem_active_(true) {}
+ ~FaultInjectionTestEnv() override = default;
+ Status NewWritableFile(const std::string& fname,
+ WritableFile** result) override;
+ Status NewAppendableFile(const std::string& fname,
+ WritableFile** result) override;
+ Status DeleteFile(const std::string& f) override;
+ Status RenameFile(const std::string& s, const std::string& t) override;
void WritableFileClosed(const FileState& state);
Status DropUnsyncedFileData();
@@ -146,24 +148,26 @@ class FaultInjectionTestEnv : public EnvWrapper {
// system reset. Setting to inactive will freeze our saved filesystem state so
// that it will stop being recorded. It can then be reset back to the state at
// the time of the reset.
- bool IsFilesystemActive() const { return filesystem_active_; }
- void SetFilesystemActive(bool active) { filesystem_active_ = active; }
+ bool IsFilesystemActive() LOCKS_EXCLUDED(mutex_) {
+ MutexLock l(&mutex_);
+ return filesystem_active_;
+ }
+ void SetFilesystemActive(bool active) LOCKS_EXCLUDED(mutex_) {
+ MutexLock l(&mutex_);
+ filesystem_active_ = active;
+ }
private:
port::Mutex mutex_;
- std::map<std::string, FileState> db_file_state_;
- std::set<std::string> new_files_since_last_dir_sync_;
- bool filesystem_active_; // Record flushes, syncs, writes
+ std::map<std::string, FileState> db_file_state_ GUARDED_BY(mutex_);
+ std::set<std::string> new_files_since_last_dir_sync_ GUARDED_BY(mutex_);
+ bool filesystem_active_ GUARDED_BY(mutex_); // Record flushes, syncs, writes
};
-TestWritableFile::TestWritableFile(const FileState& state,
- WritableFile* f,
+TestWritableFile::TestWritableFile(const FileState& state, WritableFile* f,
FaultInjectionTestEnv* env)
- : state_(state),
- target_(f),
- writable_file_opened_(true),
- env_(env) {
- assert(f != NULL);
+ : state_(state), target_(f), writable_file_opened_(true), env_(env) {
+ assert(f != nullptr);
}
TestWritableFile::~TestWritableFile() {
@@ -265,10 +269,11 @@ Status FaultInjectionTestEnv::NewAppendableFile(const std::string& fname,
Status FaultInjectionTestEnv::DropUnsyncedFileData() {
Status s;
MutexLock l(&mutex_);
- for (std::map<std::string, FileState>::const_iterator it =
- db_file_state_.begin();
- s.ok() && it != db_file_state_.end(); ++it) {
- const FileState& state = it->second;
+ for (const auto& kvp : db_file_state_) {
+ if (!s.ok()) {
+ break;
+ }
+ const FileState& state = kvp.second;
if (!state.IsFullySynced()) {
s = state.DropUnsyncedData();
}
@@ -328,7 +333,6 @@ void FaultInjectionTestEnv::ResetState() {
// Since we are not destroying the database, the existing files
// should keep their recorded synced/flushed state. Therefore
// we do not reset db_file_state_ and new_files_since_last_dir_sync_.
- MutexLock l(&mutex_);
SetFilesystemActive(true);
}
@@ -338,12 +342,14 @@ Status FaultInjectionTestEnv::DeleteFilesCreatedAfterLastDirSync() {
std::set<std::string> new_files(new_files_since_last_dir_sync_.begin(),
new_files_since_last_dir_sync_.end());
mutex_.Unlock();
- Status s;
- std::set<std::string>::const_iterator it;
- for (it = new_files.begin(); s.ok() && it != new_files.end(); ++it) {
- s = DeleteFile(*it);
+ Status status;
+ for (const auto& new_file : new_files) {
+ Status delete_status = DeleteFile(new_file);
+ if (!delete_status.ok() && status.ok()) {
+ status = std::move(delete_status);
+ }
}
- return s;
+ return status;
}
void FaultInjectionTestEnv::WritableFileClosed(const FileState& state) {
@@ -352,7 +358,7 @@ void FaultInjectionTestEnv::WritableFileClosed(const FileState& state) {
}
Status FileState::DropUnsyncedData() const {
- ssize_t sync_pos = pos_at_last_sync_ == -1 ? 0 : pos_at_last_sync_;
+ int64_t sync_pos = pos_at_last_sync_ == -1 ? 0 : pos_at_last_sync_;
return Truncate(filename_, sync_pos);
}
@@ -370,7 +376,7 @@ class FaultInjectionTest {
FaultInjectionTest()
: env_(new FaultInjectionTestEnv),
tiny_cache_(NewLRUCache(100)),
- db_(NULL) {
+ db_(nullptr) {
dbname_ = test::TmpDir() + "/fault_test";
DestroyDB(dbname_, Options()); // Destroy any db from earlier run
options_.reuse_logs = true;
@@ -387,9 +393,7 @@ class FaultInjectionTest {
delete env_;
}
- void ReuseLogs(bool reuse) {
- options_.reuse_logs = reuse;
- }
+ void ReuseLogs(bool reuse) { options_.reuse_logs = reuse; }
void Build(int start_idx, int num_vals) {
std::string key_space, value_space;
@@ -449,19 +453,18 @@ class FaultInjectionTest {
Status OpenDB() {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
env_->ResetState();
return DB::Open(options_, dbname_, &db_);
}
void CloseDB() {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
}
void DeleteAllData() {
Iterator* iter = db_->NewIterator(ReadOptions());
- WriteOptions options;
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
ASSERT_OK(db_->Delete(WriteOptions(), iter->key()));
}
@@ -485,23 +488,22 @@ class FaultInjectionTest {
void PartialCompactTestPreFault(int num_pre_sync, int num_post_sync) {
DeleteAllData();
Build(0, num_pre_sync);
- db_->CompactRange(NULL, NULL);
+ db_->CompactRange(nullptr, nullptr);
Build(num_pre_sync, num_post_sync);
}
void PartialCompactTestReopenWithFault(ResetMethod reset_method,
- int num_pre_sync,
- int num_post_sync) {
+ int num_pre_sync, int num_post_sync) {
env_->SetFilesystemActive(false);
CloseDB();
ResetDBState(reset_method);
ASSERT_OK(OpenDB());
ASSERT_OK(Verify(0, num_pre_sync, FaultInjectionTest::VAL_EXPECT_NO_ERROR));
- ASSERT_OK(Verify(num_pre_sync, num_post_sync, FaultInjectionTest::VAL_EXPECT_ERROR));
+ ASSERT_OK(Verify(num_pre_sync, num_post_sync,
+ FaultInjectionTest::VAL_EXPECT_ERROR));
}
- void NoWriteTestPreFault() {
- }
+ void NoWriteTestPreFault() {}
void NoWriteTestReopenWithFault(ResetMethod reset_method) {
CloseDB();
@@ -517,8 +519,7 @@ class FaultInjectionTest {
int num_post_sync = rnd.Uniform(kMaxNumValues);
PartialCompactTestPreFault(num_pre_sync, num_post_sync);
- PartialCompactTestReopenWithFault(RESET_DROP_UNSYNCED_DATA,
- num_pre_sync,
+ PartialCompactTestReopenWithFault(RESET_DROP_UNSYNCED_DATA, num_pre_sync,
num_post_sync);
NoWriteTestPreFault();
@@ -528,8 +529,7 @@ class FaultInjectionTest {
// No new files created so we expect all values since no files will be
// dropped.
PartialCompactTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES,
- num_pre_sync + num_post_sync,
- 0);
+ num_pre_sync + num_post_sync, 0);
NoWriteTestPreFault();
NoWriteTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES);
@@ -549,6 +549,4 @@ TEST(FaultInjectionTest, FaultTestWithLogReuse) {
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/db/filename.cc b/src/leveldb/db/filename.cc
index da32946d99..85de45c507 100644
--- a/src/leveldb/db/filename.cc
+++ b/src/leveldb/db/filename.cc
@@ -2,9 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include "db/filename.h"
+
#include <ctype.h>
#include <stdio.h>
-#include "db/filename.h"
+
#include "db/dbformat.h"
#include "leveldb/env.h"
#include "util/logging.h"
@@ -12,31 +14,30 @@
namespace leveldb {
// A utility routine: write "data" to the named file and Sync() it.
-extern Status WriteStringToFileSync(Env* env, const Slice& data,
- const std::string& fname);
+Status WriteStringToFileSync(Env* env, const Slice& data,
+ const std::string& fname);
-static std::string MakeFileName(const std::string& name, uint64_t number,
+static std::string MakeFileName(const std::string& dbname, uint64_t number,
const char* suffix) {
char buf[100];
snprintf(buf, sizeof(buf), "/%06llu.%s",
- static_cast<unsigned long long>(number),
- suffix);
- return name + buf;
+ static_cast<unsigned long long>(number), suffix);
+ return dbname + buf;
}
-std::string LogFileName(const std::string& name, uint64_t number) {
+std::string LogFileName(const std::string& dbname, uint64_t number) {
assert(number > 0);
- return MakeFileName(name, number, "log");
+ return MakeFileName(dbname, number, "log");
}
-std::string TableFileName(const std::string& name, uint64_t number) {
+std::string TableFileName(const std::string& dbname, uint64_t number) {
assert(number > 0);
- return MakeFileName(name, number, "ldb");
+ return MakeFileName(dbname, number, "ldb");
}
-std::string SSTTableFileName(const std::string& name, uint64_t number) {
+std::string SSTTableFileName(const std::string& dbname, uint64_t number) {
assert(number > 0);
- return MakeFileName(name, number, "sst");
+ return MakeFileName(dbname, number, "sst");
}
std::string DescriptorFileName(const std::string& dbname, uint64_t number) {
@@ -51,9 +52,7 @@ std::string CurrentFileName(const std::string& dbname) {
return dbname + "/CURRENT";
}
-std::string LockFileName(const std::string& dbname) {
- return dbname + "/LOCK";
-}
+std::string LockFileName(const std::string& dbname) { return dbname + "/LOCK"; }
std::string TempFileName(const std::string& dbname, uint64_t number) {
assert(number > 0);
@@ -69,7 +68,6 @@ std::string OldInfoLogFileName(const std::string& dbname) {
return dbname + "/LOG.old";
}
-
// Owned filenames have the form:
// dbname/CURRENT
// dbname/LOCK
@@ -77,10 +75,9 @@ std::string OldInfoLogFileName(const std::string& dbname) {
// dbname/LOG.old
// dbname/MANIFEST-[0-9]+
// dbname/[0-9]+.(log|sst|ldb)
-bool ParseFileName(const std::string& fname,
- uint64_t* number,
+bool ParseFileName(const std::string& filename, uint64_t* number,
FileType* type) {
- Slice rest(fname);
+ Slice rest(filename);
if (rest == "CURRENT") {
*number = 0;
*type = kCurrentFile;
diff --git a/src/leveldb/db/filename.h b/src/leveldb/db/filename.h
index 87a752605d..524e813c06 100644
--- a/src/leveldb/db/filename.h
+++ b/src/leveldb/db/filename.h
@@ -8,7 +8,9 @@
#define STORAGE_LEVELDB_DB_FILENAME_H_
#include <stdint.h>
+
#include <string>
+
#include "leveldb/slice.h"
#include "leveldb/status.h"
#include "port/port.h"
@@ -30,55 +32,52 @@ enum FileType {
// Return the name of the log file with the specified number
// in the db named by "dbname". The result will be prefixed with
// "dbname".
-extern std::string LogFileName(const std::string& dbname, uint64_t number);
+std::string LogFileName(const std::string& dbname, uint64_t number);
// Return the name of the sstable with the specified number
// in the db named by "dbname". The result will be prefixed with
// "dbname".
-extern std::string TableFileName(const std::string& dbname, uint64_t number);
+std::string TableFileName(const std::string& dbname, uint64_t number);
// Return the legacy file name for an sstable with the specified number
// in the db named by "dbname". The result will be prefixed with
// "dbname".
-extern std::string SSTTableFileName(const std::string& dbname, uint64_t number);
+std::string SSTTableFileName(const std::string& dbname, uint64_t number);
// Return the name of the descriptor file for the db named by
// "dbname" and the specified incarnation number. The result will be
// prefixed with "dbname".
-extern std::string DescriptorFileName(const std::string& dbname,
- uint64_t number);
+std::string DescriptorFileName(const std::string& dbname, uint64_t number);
// Return the name of the current file. This file contains the name
// of the current manifest file. The result will be prefixed with
// "dbname".
-extern std::string CurrentFileName(const std::string& dbname);
+std::string CurrentFileName(const std::string& dbname);
// Return the name of the lock file for the db named by
// "dbname". The result will be prefixed with "dbname".
-extern std::string LockFileName(const std::string& dbname);
+std::string LockFileName(const std::string& dbname);
// Return the name of a temporary file owned by the db named "dbname".
// The result will be prefixed with "dbname".
-extern std::string TempFileName(const std::string& dbname, uint64_t number);
+std::string TempFileName(const std::string& dbname, uint64_t number);
// Return the name of the info log file for "dbname".
-extern std::string InfoLogFileName(const std::string& dbname);
+std::string InfoLogFileName(const std::string& dbname);
// Return the name of the old info log file for "dbname".
-extern std::string OldInfoLogFileName(const std::string& dbname);
+std::string OldInfoLogFileName(const std::string& dbname);
// If filename is a leveldb file, store the type of the file in *type.
// The number encoded in the filename is stored in *number. If the
// filename was successfully parsed, returns true. Else return false.
-extern bool ParseFileName(const std::string& filename,
- uint64_t* number,
- FileType* type);
+bool ParseFileName(const std::string& filename, uint64_t* number,
+ FileType* type);
// Make the CURRENT file point to the descriptor file with the
// specified number.
-extern Status SetCurrentFile(Env* env, const std::string& dbname,
- uint64_t descriptor_number);
-
+Status SetCurrentFile(Env* env, const std::string& dbname,
+ uint64_t descriptor_number);
} // namespace leveldb
diff --git a/src/leveldb/db/filename_test.cc b/src/leveldb/db/filename_test.cc
index a32556deaf..952f32008e 100644
--- a/src/leveldb/db/filename_test.cc
+++ b/src/leveldb/db/filename_test.cc
@@ -11,7 +11,7 @@
namespace leveldb {
-class FileNameTest { };
+class FileNameTest {};
TEST(FileNameTest, Parse) {
Slice db;
@@ -24,17 +24,17 @@ TEST(FileNameTest, Parse) {
uint64_t number;
FileType type;
} cases[] = {
- { "100.log", 100, kLogFile },
- { "0.log", 0, kLogFile },
- { "0.sst", 0, kTableFile },
- { "0.ldb", 0, kTableFile },
- { "CURRENT", 0, kCurrentFile },
- { "LOCK", 0, kDBLockFile },
- { "MANIFEST-2", 2, kDescriptorFile },
- { "MANIFEST-7", 7, kDescriptorFile },
- { "LOG", 0, kInfoLogFile },
- { "LOG.old", 0, kInfoLogFile },
- { "18446744073709551615.log", 18446744073709551615ull, kLogFile },
+ {"100.log", 100, kLogFile},
+ {"0.log", 0, kLogFile},
+ {"0.sst", 0, kTableFile},
+ {"0.ldb", 0, kTableFile},
+ {"CURRENT", 0, kCurrentFile},
+ {"LOCK", 0, kDBLockFile},
+ {"MANIFEST-2", 2, kDescriptorFile},
+ {"MANIFEST-7", 7, kDescriptorFile},
+ {"LOG", 0, kInfoLogFile},
+ {"LOG.old", 0, kInfoLogFile},
+ {"18446744073709551615.log", 18446744073709551615ull, kLogFile},
};
for (int i = 0; i < sizeof(cases) / sizeof(cases[0]); i++) {
std::string f = cases[i].fname;
@@ -44,30 +44,28 @@ TEST(FileNameTest, Parse) {
}
// Errors
- static const char* errors[] = {
- "",
- "foo",
- "foo-dx-100.log",
- ".log",
- "",
- "manifest",
- "CURREN",
- "CURRENTX",
- "MANIFES",
- "MANIFEST",
- "MANIFEST-",
- "XMANIFEST-3",
- "MANIFEST-3x",
- "LOC",
- "LOCKx",
- "LO",
- "LOGx",
- "18446744073709551616.log",
- "184467440737095516150.log",
- "100",
- "100.",
- "100.lop"
- };
+ static const char* errors[] = {"",
+ "foo",
+ "foo-dx-100.log",
+ ".log",
+ "",
+ "manifest",
+ "CURREN",
+ "CURRENTX",
+ "MANIFES",
+ "MANIFEST",
+ "MANIFEST-",
+ "XMANIFEST-3",
+ "MANIFEST-3x",
+ "LOC",
+ "LOCKx",
+ "LO",
+ "LOGx",
+ "18446744073709551616.log",
+ "184467440737095516150.log",
+ "100",
+ "100.",
+ "100.lop"};
for (int i = 0; i < sizeof(errors) / sizeof(errors[0]); i++) {
std::string f = errors[i];
ASSERT_TRUE(!ParseFileName(f, &number, &type)) << f;
@@ -114,10 +112,20 @@ TEST(FileNameTest, Construction) {
ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
ASSERT_EQ(999, number);
ASSERT_EQ(kTempFile, type);
+
+ fname = InfoLogFileName("foo");
+ ASSERT_EQ("foo/", std::string(fname.data(), 4));
+ ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
+ ASSERT_EQ(0, number);
+ ASSERT_EQ(kInfoLogFile, type);
+
+ fname = OldInfoLogFileName("foo");
+ ASSERT_EQ("foo/", std::string(fname.data(), 4));
+ ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
+ ASSERT_EQ(0, number);
+ ASSERT_EQ(kInfoLogFile, type);
}
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/db/leveldbutil.cc b/src/leveldb/db/leveldbutil.cc
index d06d64d640..9ed9667d37 100644
--- a/src/leveldb/db/leveldbutil.cc
+++ b/src/leveldb/db/leveldbutil.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include <stdio.h>
+
#include "leveldb/dumpfile.h"
#include "leveldb/env.h"
#include "leveldb/status.h"
@@ -12,14 +13,14 @@ namespace {
class StdoutPrinter : public WritableFile {
public:
- virtual Status Append(const Slice& data) {
+ Status Append(const Slice& data) override {
fwrite(data.data(), 1, data.size(), stdout);
return Status::OK();
}
- virtual Status Close() { return Status::OK(); }
- virtual Status Flush() { return Status::OK(); }
- virtual Status Sync() { return Status::OK(); }
- virtual std::string GetName() const { return "[stdout]"; }
+ Status Close() override { return Status::OK(); }
+ Status Flush() override { return Status::OK(); }
+ Status Sync() override { return Status::OK(); }
+ std::string GetName() const override { return "[stdout]"; }
};
bool HandleDumpCommand(Env* env, char** files, int num) {
@@ -39,11 +40,9 @@ bool HandleDumpCommand(Env* env, char** files, int num) {
} // namespace leveldb
static void Usage() {
- fprintf(
- stderr,
- "Usage: leveldbutil command...\n"
- " dump files... -- dump contents of specified files\n"
- );
+ fprintf(stderr,
+ "Usage: leveldbutil command...\n"
+ " dump files... -- dump contents of specified files\n");
}
int main(int argc, char** argv) {
@@ -55,7 +54,7 @@ int main(int argc, char** argv) {
} else {
std::string command = argv[1];
if (command == "dump") {
- ok = leveldb::HandleDumpCommand(env, argv+2, argc-2);
+ ok = leveldb::HandleDumpCommand(env, argv + 2, argc - 2);
} else {
Usage();
ok = false;
diff --git a/src/leveldb/db/log_reader.cc b/src/leveldb/db/log_reader.cc
index 8b6ad136d7..1ccfb7b34a 100644
--- a/src/leveldb/db/log_reader.cc
+++ b/src/leveldb/db/log_reader.cc
@@ -5,6 +5,7 @@
#include "db/log_reader.h"
#include <stdio.h>
+
#include "leveldb/env.h"
#include "util/coding.h"
#include "util/crc32c.h"
@@ -12,8 +13,7 @@
namespace leveldb {
namespace log {
-Reader::Reporter::~Reporter() {
-}
+Reader::Reporter::~Reporter() = default;
Reader::Reader(SequentialFile* file, Reporter* reporter, bool checksum,
uint64_t initial_offset)
@@ -26,20 +26,16 @@ Reader::Reader(SequentialFile* file, Reporter* reporter, bool checksum,
last_record_offset_(0),
end_of_buffer_offset_(0),
initial_offset_(initial_offset),
- resyncing_(initial_offset > 0) {
-}
+ resyncing_(initial_offset > 0) {}
-Reader::~Reader() {
- delete[] backing_store_;
-}
+Reader::~Reader() { delete[] backing_store_; }
bool Reader::SkipToInitialBlock() {
- size_t offset_in_block = initial_offset_ % kBlockSize;
+ const size_t offset_in_block = initial_offset_ % kBlockSize;
uint64_t block_start_location = initial_offset_ - offset_in_block;
// Don't search a block if we'd be in the trailer
if (offset_in_block > kBlockSize - 6) {
- offset_in_block = 0;
block_start_location += kBlockSize;
}
@@ -99,9 +95,7 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch) {
// it could emit an empty kFirstType record at the tail end
// of a block followed by a kFullType or kFirstType record
// at the beginning of the next block.
- if (scratch->empty()) {
- in_fragmented_record = false;
- } else {
+ if (!scratch->empty()) {
ReportCorruption(scratch->size(), "partial record without end(1)");
}
}
@@ -117,9 +111,7 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch) {
// it could emit an empty kFirstType record at the tail end
// of a block followed by a kFullType or kFirstType record
// at the beginning of the next block.
- if (scratch->empty()) {
- in_fragmented_record = false;
- } else {
+ if (!scratch->empty()) {
ReportCorruption(scratch->size(), "partial record without end(2)");
}
}
@@ -181,16 +173,14 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch) {
return false;
}
-uint64_t Reader::LastRecordOffset() {
- return last_record_offset_;
-}
+uint64_t Reader::LastRecordOffset() { return last_record_offset_; }
void Reader::ReportCorruption(uint64_t bytes, const char* reason) {
ReportDrop(bytes, Status::Corruption(reason, file_->GetName()));
}
void Reader::ReportDrop(uint64_t bytes, const Status& reason) {
- if (reporter_ != NULL &&
+ if (reporter_ != nullptr &&
end_of_buffer_offset_ - buffer_.size() - bytes >= initial_offset_) {
reporter_->Corruption(static_cast<size_t>(bytes), reason);
}
diff --git a/src/leveldb/db/log_reader.h b/src/leveldb/db/log_reader.h
index 8389d61f8f..001da8948a 100644
--- a/src/leveldb/db/log_reader.h
+++ b/src/leveldb/db/log_reader.h
@@ -32,7 +32,7 @@ class Reader {
// Create a reader that will return log records from "*file".
// "*file" must remain live while this Reader is in use.
//
- // If "reporter" is non-NULL, it is notified whenever some data is
+ // If "reporter" is non-null, it is notified whenever some data is
// dropped due to a detected corruption. "*reporter" must remain
// live while this Reader is in use.
//
@@ -43,6 +43,9 @@ class Reader {
Reader(SequentialFile* file, Reporter* reporter, bool checksum,
uint64_t initial_offset);
+ Reader(const Reader&) = delete;
+ Reader& operator=(const Reader&) = delete;
+
~Reader();
// Read the next record into *record. Returns true if read
@@ -58,26 +61,6 @@ class Reader {
uint64_t LastRecordOffset();
private:
- SequentialFile* const file_;
- Reporter* const reporter_;
- bool const checksum_;
- char* const backing_store_;
- Slice buffer_;
- bool eof_; // Last Read() indicated EOF by returning < kBlockSize
-
- // Offset of the last record returned by ReadRecord.
- uint64_t last_record_offset_;
- // Offset of the first location past the end of buffer_.
- uint64_t end_of_buffer_offset_;
-
- // Offset at which to start looking for the first record to return
- uint64_t const initial_offset_;
-
- // True if we are resynchronizing after a seek (initial_offset_ > 0). In
- // particular, a run of kMiddleType and kLastType records can be silently
- // skipped in this mode
- bool resyncing_;
-
// Extend record types with the following special values
enum {
kEof = kMaxRecordType + 1,
@@ -102,9 +85,25 @@ class Reader {
void ReportCorruption(uint64_t bytes, const char* reason);
void ReportDrop(uint64_t bytes, const Status& reason);
- // No copying allowed
- Reader(const Reader&);
- void operator=(const Reader&);
+ SequentialFile* const file_;
+ Reporter* const reporter_;
+ bool const checksum_;
+ char* const backing_store_;
+ Slice buffer_;
+ bool eof_; // Last Read() indicated EOF by returning < kBlockSize
+
+ // Offset of the last record returned by ReadRecord.
+ uint64_t last_record_offset_;
+ // Offset of the first location past the end of buffer_.
+ uint64_t end_of_buffer_offset_;
+
+ // Offset at which to start looking for the first record to return
+ uint64_t const initial_offset_;
+
+ // True if we are resynchronizing after a seek (initial_offset_ > 0). In
+ // particular, a run of kMiddleType and kLastType records can be silently
+ // skipped in this mode
+ bool resyncing_;
};
} // namespace log
diff --git a/src/leveldb/db/log_test.cc b/src/leveldb/db/log_test.cc
index 48a5928657..41fc043068 100644
--- a/src/leveldb/db/log_test.cc
+++ b/src/leveldb/db/log_test.cc
@@ -37,87 +37,12 @@ static std::string RandomSkewedString(int i, Random* rnd) {
}
class LogTest {
- private:
- class StringDest : public WritableFile {
- public:
- std::string contents_;
-
- virtual Status Close() { return Status::OK(); }
- virtual Status Flush() { return Status::OK(); }
- virtual Status Sync() { return Status::OK(); }
- virtual Status Append(const Slice& slice) {
- contents_.append(slice.data(), slice.size());
- return Status::OK();
- }
- };
-
- class StringSource : public SequentialFile {
- public:
- Slice contents_;
- bool force_error_;
- bool returned_partial_;
- StringSource() : force_error_(false), returned_partial_(false) { }
-
- virtual Status Read(size_t n, Slice* result, char* scratch) {
- ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error";
-
- if (force_error_) {
- force_error_ = false;
- returned_partial_ = true;
- return Status::Corruption("read error");
- }
-
- if (contents_.size() < n) {
- n = contents_.size();
- returned_partial_ = true;
- }
- *result = Slice(contents_.data(), n);
- contents_.remove_prefix(n);
- return Status::OK();
- }
-
- virtual Status Skip(uint64_t n) {
- if (n > contents_.size()) {
- contents_.clear();
- return Status::NotFound("in-memory file skipped past end");
- }
-
- contents_.remove_prefix(n);
-
- return Status::OK();
- }
- };
-
- class ReportCollector : public Reader::Reporter {
- public:
- size_t dropped_bytes_;
- std::string message_;
-
- ReportCollector() : dropped_bytes_(0) { }
- virtual void Corruption(size_t bytes, const Status& status) {
- dropped_bytes_ += bytes;
- message_.append(status.ToString());
- }
- };
-
- StringDest dest_;
- StringSource source_;
- ReportCollector report_;
- bool reading_;
- Writer* writer_;
- Reader* reader_;
-
- // Record metadata for testing initial offset functionality
- static size_t initial_offset_record_sizes_[];
- static uint64_t initial_offset_last_record_offsets_[];
- static int num_initial_offset_records_;
-
public:
- LogTest() : reading_(false),
- writer_(new Writer(&dest_)),
- reader_(new Reader(&source_, &report_, true/*checksum*/,
- 0/*initial_offset*/)) {
- }
+ LogTest()
+ : reading_(false),
+ writer_(new Writer(&dest_)),
+ reader_(new Reader(&source_, &report_, true /*checksum*/,
+ 0 /*initial_offset*/)) {}
~LogTest() {
delete writer_;
@@ -134,9 +59,7 @@ class LogTest {
writer_->AddRecord(Slice(msg));
}
- size_t WrittenBytes() const {
- return dest_.contents_.size();
- }
+ size_t WrittenBytes() const { return dest_.contents_.size(); }
std::string Read() {
if (!reading_) {
@@ -166,22 +89,16 @@ class LogTest {
void FixChecksum(int header_offset, int len) {
// Compute crc of type/len/data
- uint32_t crc = crc32c::Value(&dest_.contents_[header_offset+6], 1 + len);
+ uint32_t crc = crc32c::Value(&dest_.contents_[header_offset + 6], 1 + len);
crc = crc32c::Mask(crc);
EncodeFixed32(&dest_.contents_[header_offset], crc);
}
- void ForceError() {
- source_.force_error_ = true;
- }
+ void ForceError() { source_.force_error_ = true; }
- size_t DroppedBytes() const {
- return report_.dropped_bytes_;
- }
+ size_t DroppedBytes() const { return report_.dropped_bytes_; }
- std::string ReportMessage() const {
- return report_.message_;
- }
+ std::string ReportMessage() const { return report_.message_; }
// Returns OK iff recorded error message contains "msg"
std::string MatchError(const std::string& msg) const {
@@ -202,14 +119,14 @@ class LogTest {
void StartReadingAt(uint64_t initial_offset) {
delete reader_;
- reader_ = new Reader(&source_, &report_, true/*checksum*/, initial_offset);
+ reader_ = new Reader(&source_, &report_, true /*checksum*/, initial_offset);
}
void CheckOffsetPastEndReturnsNoRecords(uint64_t offset_past_end) {
WriteInitialOffsetLog();
reading_ = true;
source_.contents_ = Slice(dest_.contents_);
- Reader* offset_reader = new Reader(&source_, &report_, true/*checksum*/,
+ Reader* offset_reader = new Reader(&source_, &report_, true /*checksum*/,
WrittenBytes() + offset_past_end);
Slice record;
std::string scratch;
@@ -222,8 +139,8 @@ class LogTest {
WriteInitialOffsetLog();
reading_ = true;
source_.contents_ = Slice(dest_.contents_);
- Reader* offset_reader = new Reader(&source_, &report_, true/*checksum*/,
- initial_offset);
+ Reader* offset_reader =
+ new Reader(&source_, &report_, true /*checksum*/, initial_offset);
// Read all records from expected_record_offset through the last one.
ASSERT_LT(expected_record_offset, num_initial_offset_records_);
@@ -240,36 +157,110 @@ class LogTest {
}
delete offset_reader;
}
+
+ private:
+ class StringDest : public WritableFile {
+ public:
+ Status Close() override { return Status::OK(); }
+ Status Flush() override { return Status::OK(); }
+ Status Sync() override { return Status::OK(); }
+ Status Append(const Slice& slice) override {
+ contents_.append(slice.data(), slice.size());
+ return Status::OK();
+ }
+ std::string GetName() const override { return ""; }
+
+ std::string contents_;
+ };
+
+ class StringSource : public SequentialFile {
+ public:
+ StringSource() : force_error_(false), returned_partial_(false) {}
+
+ Status Read(size_t n, Slice* result, char* scratch) override {
+ ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error";
+
+ if (force_error_) {
+ force_error_ = false;
+ returned_partial_ = true;
+ return Status::Corruption("read error");
+ }
+
+ if (contents_.size() < n) {
+ n = contents_.size();
+ returned_partial_ = true;
+ }
+ *result = Slice(contents_.data(), n);
+ contents_.remove_prefix(n);
+ return Status::OK();
+ }
+
+ Status Skip(uint64_t n) override {
+ if (n > contents_.size()) {
+ contents_.clear();
+ return Status::NotFound("in-memory file skipped past end");
+ }
+
+ contents_.remove_prefix(n);
+
+ return Status::OK();
+ }
+ std::string GetName() const { return ""; }
+
+ Slice contents_;
+ bool force_error_;
+ bool returned_partial_;
+ };
+
+ class ReportCollector : public Reader::Reporter {
+ public:
+ ReportCollector() : dropped_bytes_(0) {}
+ void Corruption(size_t bytes, const Status& status) override {
+ dropped_bytes_ += bytes;
+ message_.append(status.ToString());
+ }
+
+ size_t dropped_bytes_;
+ std::string message_;
+ };
+
+ // Record metadata for testing initial offset functionality
+ static size_t initial_offset_record_sizes_[];
+ static uint64_t initial_offset_last_record_offsets_[];
+ static int num_initial_offset_records_;
+
+ StringDest dest_;
+ StringSource source_;
+ ReportCollector report_;
+ bool reading_;
+ Writer* writer_;
+ Reader* reader_;
+};
+
+size_t LogTest::initial_offset_record_sizes_[] = {
+ 10000, // Two sizable records in first block
+ 10000,
+ 2 * log::kBlockSize - 1000, // Span three blocks
+ 1,
+ 13716, // Consume all but two bytes of block 3.
+ log::kBlockSize - kHeaderSize, // Consume the entirety of block 4.
};
-size_t LogTest::initial_offset_record_sizes_[] =
- {10000, // Two sizable records in first block
- 10000,
- 2 * log::kBlockSize - 1000, // Span three blocks
- 1,
- 13716, // Consume all but two bytes of block 3.
- log::kBlockSize - kHeaderSize, // Consume the entirety of block 4.
- };
-
-uint64_t LogTest::initial_offset_last_record_offsets_[] =
- {0,
- kHeaderSize + 10000,
- 2 * (kHeaderSize + 10000),
- 2 * (kHeaderSize + 10000) +
- (2 * log::kBlockSize - 1000) + 3 * kHeaderSize,
- 2 * (kHeaderSize + 10000) +
- (2 * log::kBlockSize - 1000) + 3 * kHeaderSize
- + kHeaderSize + 1,
- 3 * log::kBlockSize,
- };
+uint64_t LogTest::initial_offset_last_record_offsets_[] = {
+ 0,
+ kHeaderSize + 10000,
+ 2 * (kHeaderSize + 10000),
+ 2 * (kHeaderSize + 10000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize,
+ 2 * (kHeaderSize + 10000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize +
+ kHeaderSize + 1,
+ 3 * log::kBlockSize,
+};
// LogTest::initial_offset_last_record_offsets_ must be defined before this.
int LogTest::num_initial_offset_records_ =
- sizeof(LogTest::initial_offset_last_record_offsets_)/sizeof(uint64_t);
+ sizeof(LogTest::initial_offset_last_record_offsets_) / sizeof(uint64_t);
-TEST(LogTest, Empty) {
- ASSERT_EQ("EOF", Read());
-}
+TEST(LogTest, Empty) { ASSERT_EQ("EOF", Read()); }
TEST(LogTest, ReadWrite) {
Write("foo");
@@ -306,7 +297,7 @@ TEST(LogTest, Fragmentation) {
TEST(LogTest, MarginalTrailer) {
// Make a trailer that is exactly the same length as an empty record.
- const int n = kBlockSize - 2*kHeaderSize;
+ const int n = kBlockSize - 2 * kHeaderSize;
Write(BigString("foo", n));
ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes());
Write("");
@@ -319,7 +310,7 @@ TEST(LogTest, MarginalTrailer) {
TEST(LogTest, MarginalTrailer2) {
// Make a trailer that is exactly the same length as an empty record.
- const int n = kBlockSize - 2*kHeaderSize;
+ const int n = kBlockSize - 2 * kHeaderSize;
Write(BigString("foo", n));
ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes());
Write("bar");
@@ -331,7 +322,7 @@ TEST(LogTest, MarginalTrailer2) {
}
TEST(LogTest, ShortTrailer) {
- const int n = kBlockSize - 2*kHeaderSize + 4;
+ const int n = kBlockSize - 2 * kHeaderSize + 4;
Write(BigString("foo", n));
ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
Write("");
@@ -343,7 +334,7 @@ TEST(LogTest, ShortTrailer) {
}
TEST(LogTest, AlignedEof) {
- const int n = kBlockSize - 2*kHeaderSize + 4;
+ const int n = kBlockSize - 2 * kHeaderSize + 4;
Write(BigString("foo", n));
ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
ASSERT_EQ(BigString("foo", n), Read());
@@ -394,7 +385,7 @@ TEST(LogTest, BadRecordType) {
TEST(LogTest, TruncatedTrailingRecordIsIgnored) {
Write("foo");
- ShrinkSize(4); // Drop all payload as well as a header byte
+ ShrinkSize(4); // Drop all payload as well as a header byte
ASSERT_EQ("EOF", Read());
// Truncated last record is ignored, not treated as an error.
ASSERT_EQ(0, DroppedBytes());
@@ -492,7 +483,7 @@ TEST(LogTest, SkipIntoMultiRecord) {
// If initial_offset points to a record after first(R1) but before first(R2)
// incomplete fragment errors are not actual errors, and must be suppressed
// until a new first or full record is encountered.
- Write(BigString("foo", 3*kBlockSize));
+ Write(BigString("foo", 3 * kBlockSize));
Write("correct");
StartReadingAt(kBlockSize);
@@ -514,44 +505,30 @@ TEST(LogTest, ErrorJoinsRecords) {
Write("correct");
// Wipe the middle block
- for (int offset = kBlockSize; offset < 2*kBlockSize; offset++) {
+ for (int offset = kBlockSize; offset < 2 * kBlockSize; offset++) {
SetByte(offset, 'x');
}
ASSERT_EQ("correct", Read());
ASSERT_EQ("EOF", Read());
const size_t dropped = DroppedBytes();
- ASSERT_LE(dropped, 2*kBlockSize + 100);
- ASSERT_GE(dropped, 2*kBlockSize);
+ ASSERT_LE(dropped, 2 * kBlockSize + 100);
+ ASSERT_GE(dropped, 2 * kBlockSize);
}
-TEST(LogTest, ReadStart) {
- CheckInitialOffsetRecord(0, 0);
-}
+TEST(LogTest, ReadStart) { CheckInitialOffsetRecord(0, 0); }
-TEST(LogTest, ReadSecondOneOff) {
- CheckInitialOffsetRecord(1, 1);
-}
+TEST(LogTest, ReadSecondOneOff) { CheckInitialOffsetRecord(1, 1); }
-TEST(LogTest, ReadSecondTenThousand) {
- CheckInitialOffsetRecord(10000, 1);
-}
+TEST(LogTest, ReadSecondTenThousand) { CheckInitialOffsetRecord(10000, 1); }
-TEST(LogTest, ReadSecondStart) {
- CheckInitialOffsetRecord(10007, 1);
-}
+TEST(LogTest, ReadSecondStart) { CheckInitialOffsetRecord(10007, 1); }
-TEST(LogTest, ReadThirdOneOff) {
- CheckInitialOffsetRecord(10008, 2);
-}
+TEST(LogTest, ReadThirdOneOff) { CheckInitialOffsetRecord(10008, 2); }
-TEST(LogTest, ReadThirdStart) {
- CheckInitialOffsetRecord(20014, 2);
-}
+TEST(LogTest, ReadThirdStart) { CheckInitialOffsetRecord(20014, 2); }
-TEST(LogTest, ReadFourthOneOff) {
- CheckInitialOffsetRecord(20015, 3);
-}
+TEST(LogTest, ReadFourthOneOff) { CheckInitialOffsetRecord(20015, 3); }
TEST(LogTest, ReadFourthFirstBlockTrailer) {
CheckInitialOffsetRecord(log::kBlockSize - 4, 3);
@@ -575,17 +552,11 @@ TEST(LogTest, ReadInitialOffsetIntoBlockPadding) {
CheckInitialOffsetRecord(3 * log::kBlockSize - 3, 5);
}
-TEST(LogTest, ReadEnd) {
- CheckOffsetPastEndReturnsNoRecords(0);
-}
+TEST(LogTest, ReadEnd) { CheckOffsetPastEndReturnsNoRecords(0); }
-TEST(LogTest, ReadPastEnd) {
- CheckOffsetPastEndReturnsNoRecords(5);
-}
+TEST(LogTest, ReadPastEnd) { CheckOffsetPastEndReturnsNoRecords(5); }
} // namespace log
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/db/log_writer.cc b/src/leveldb/db/log_writer.cc
index 74a03270da..bfb16fb486 100644
--- a/src/leveldb/db/log_writer.cc
+++ b/src/leveldb/db/log_writer.cc
@@ -5,6 +5,7 @@
#include "db/log_writer.h"
#include <stdint.h>
+
#include "leveldb/env.h"
#include "util/coding.h"
#include "util/crc32c.h"
@@ -19,9 +20,7 @@ static void InitTypeCrc(uint32_t* type_crc) {
}
}
-Writer::Writer(WritableFile* dest)
- : dest_(dest),
- block_offset_(0) {
+Writer::Writer(WritableFile* dest) : dest_(dest), block_offset_(0) {
InitTypeCrc(type_crc_);
}
@@ -30,8 +29,7 @@ Writer::Writer(WritableFile* dest, uint64_t dest_length)
InitTypeCrc(type_crc_);
}
-Writer::~Writer() {
-}
+Writer::~Writer() = default;
Status Writer::AddRecord(const Slice& slice) {
const char* ptr = slice.data();
@@ -49,7 +47,7 @@ Status Writer::AddRecord(const Slice& slice) {
// Switch to a new block
if (leftover > 0) {
// Fill the trailer (literal below relies on kHeaderSize being 7)
- assert(kHeaderSize == 7);
+ static_assert(kHeaderSize == 7, "");
dest_->Append(Slice("\x00\x00\x00\x00\x00\x00", leftover));
}
block_offset_ = 0;
@@ -81,30 +79,31 @@ Status Writer::AddRecord(const Slice& slice) {
return s;
}
-Status Writer::EmitPhysicalRecord(RecordType t, const char* ptr, size_t n) {
- assert(n <= 0xffff); // Must fit in two bytes
- assert(block_offset_ + kHeaderSize + n <= kBlockSize);
+Status Writer::EmitPhysicalRecord(RecordType t, const char* ptr,
+ size_t length) {
+ assert(length <= 0xffff); // Must fit in two bytes
+ assert(block_offset_ + kHeaderSize + length <= kBlockSize);
// Format the header
char buf[kHeaderSize];
- buf[4] = static_cast<char>(n & 0xff);
- buf[5] = static_cast<char>(n >> 8);
+ buf[4] = static_cast<char>(length & 0xff);
+ buf[5] = static_cast<char>(length >> 8);
buf[6] = static_cast<char>(t);
// Compute the crc of the record type and the payload.
- uint32_t crc = crc32c::Extend(type_crc_[t], ptr, n);
- crc = crc32c::Mask(crc); // Adjust for storage
+ uint32_t crc = crc32c::Extend(type_crc_[t], ptr, length);
+ crc = crc32c::Mask(crc); // Adjust for storage
EncodeFixed32(buf, crc);
// Write the header and the payload
Status s = dest_->Append(Slice(buf, kHeaderSize));
if (s.ok()) {
- s = dest_->Append(Slice(ptr, n));
+ s = dest_->Append(Slice(ptr, length));
if (s.ok()) {
s = dest_->Flush();
}
}
- block_offset_ += kHeaderSize + n;
+ block_offset_ += kHeaderSize + length;
return s;
}
diff --git a/src/leveldb/db/log_writer.h b/src/leveldb/db/log_writer.h
index 9e7cc4705b..c0a21147ee 100644
--- a/src/leveldb/db/log_writer.h
+++ b/src/leveldb/db/log_writer.h
@@ -6,6 +6,7 @@
#define STORAGE_LEVELDB_DB_LOG_WRITER_H_
#include <stdint.h>
+
#include "db/log_format.h"
#include "leveldb/slice.h"
#include "leveldb/status.h"
@@ -28,24 +29,23 @@ class Writer {
// "*dest" must remain live while this Writer is in use.
Writer(WritableFile* dest, uint64_t dest_length);
+ Writer(const Writer&) = delete;
+ Writer& operator=(const Writer&) = delete;
+
~Writer();
Status AddRecord(const Slice& slice);
private:
+ Status EmitPhysicalRecord(RecordType type, const char* ptr, size_t length);
+
WritableFile* dest_;
- int block_offset_; // Current offset in block
+ int block_offset_; // Current offset in block
// crc32c values for all supported record types. These are
// pre-computed to reduce the overhead of computing the crc of the
// record type stored in the header.
uint32_t type_crc_[kMaxRecordType + 1];
-
- Status EmitPhysicalRecord(RecordType type, const char* ptr, size_t length);
-
- // No copying allowed
- Writer(const Writer&);
- void operator=(const Writer&);
};
} // namespace log
diff --git a/src/leveldb/db/memtable.cc b/src/leveldb/db/memtable.cc
index 287afdbdcb..00931d4671 100644
--- a/src/leveldb/db/memtable.cc
+++ b/src/leveldb/db/memtable.cc
@@ -18,20 +18,15 @@ static Slice GetLengthPrefixedSlice(const char* data) {
return Slice(p, len);
}
-MemTable::MemTable(const InternalKeyComparator& cmp)
- : comparator_(cmp),
- refs_(0),
- table_(comparator_, &arena_) {
-}
+MemTable::MemTable(const InternalKeyComparator& comparator)
+ : comparator_(comparator), refs_(0), table_(comparator_, &arena_) {}
-MemTable::~MemTable() {
- assert(refs_ == 0);
-}
+MemTable::~MemTable() { assert(refs_ == 0); }
size_t MemTable::ApproximateMemoryUsage() { return arena_.MemoryUsage(); }
-int MemTable::KeyComparator::operator()(const char* aptr, const char* bptr)
- const {
+int MemTable::KeyComparator::operator()(const char* aptr,
+ const char* bptr) const {
// Internal keys are encoded as length-prefixed strings.
Slice a = GetLengthPrefixedSlice(aptr);
Slice b = GetLengthPrefixedSlice(bptr);
@@ -48,39 +43,37 @@ static const char* EncodeKey(std::string* scratch, const Slice& target) {
return scratch->data();
}
-class MemTableIterator: public Iterator {
+class MemTableIterator : public Iterator {
public:
- explicit MemTableIterator(MemTable::Table* table) : iter_(table) { }
-
- virtual bool Valid() const { return iter_.Valid(); }
- virtual void Seek(const Slice& k) { iter_.Seek(EncodeKey(&tmp_, k)); }
- virtual void SeekToFirst() { iter_.SeekToFirst(); }
- virtual void SeekToLast() { iter_.SeekToLast(); }
- virtual void Next() { iter_.Next(); }
- virtual void Prev() { iter_.Prev(); }
- virtual Slice key() const { return GetLengthPrefixedSlice(iter_.key()); }
- virtual Slice value() const {
+ explicit MemTableIterator(MemTable::Table* table) : iter_(table) {}
+
+ MemTableIterator(const MemTableIterator&) = delete;
+ MemTableIterator& operator=(const MemTableIterator&) = delete;
+
+ ~MemTableIterator() override = default;
+
+ bool Valid() const override { return iter_.Valid(); }
+ void Seek(const Slice& k) override { iter_.Seek(EncodeKey(&tmp_, k)); }
+ void SeekToFirst() override { iter_.SeekToFirst(); }
+ void SeekToLast() override { iter_.SeekToLast(); }
+ void Next() override { iter_.Next(); }
+ void Prev() override { iter_.Prev(); }
+ Slice key() const override { return GetLengthPrefixedSlice(iter_.key()); }
+ Slice value() const override {
Slice key_slice = GetLengthPrefixedSlice(iter_.key());
return GetLengthPrefixedSlice(key_slice.data() + key_slice.size());
}
- virtual Status status() const { return Status::OK(); }
+ Status status() const override { return Status::OK(); }
private:
MemTable::Table::Iterator iter_;
- std::string tmp_; // For passing to EncodeKey
-
- // No copying allowed
- MemTableIterator(const MemTableIterator&);
- void operator=(const MemTableIterator&);
+ std::string tmp_; // For passing to EncodeKey
};
-Iterator* MemTable::NewIterator() {
- return new MemTableIterator(&table_);
-}
+Iterator* MemTable::NewIterator() { return new MemTableIterator(&table_); }
-void MemTable::Add(SequenceNumber s, ValueType type,
- const Slice& key,
+void MemTable::Add(SequenceNumber s, ValueType type, const Slice& key,
const Slice& value) {
// Format of an entry is concatenation of:
// key_size : varint32 of internal_key.size()
@@ -90,9 +83,9 @@ void MemTable::Add(SequenceNumber s, ValueType type,
size_t key_size = key.size();
size_t val_size = value.size();
size_t internal_key_size = key_size + 8;
- const size_t encoded_len =
- VarintLength(internal_key_size) + internal_key_size +
- VarintLength(val_size) + val_size;
+ const size_t encoded_len = VarintLength(internal_key_size) +
+ internal_key_size + VarintLength(val_size) +
+ val_size;
char* buf = arena_.Allocate(encoded_len);
char* p = EncodeVarint32(buf, internal_key_size);
memcpy(p, key.data(), key_size);
@@ -121,10 +114,9 @@ bool MemTable::Get(const LookupKey& key, std::string* value, Status* s) {
// all entries with overly large sequence numbers.
const char* entry = iter.key();
uint32_t key_length;
- const char* key_ptr = GetVarint32Ptr(entry, entry+5, &key_length);
+ const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
if (comparator_.comparator.user_comparator()->Compare(
- Slice(key_ptr, key_length - 8),
- key.user_key()) == 0) {
+ Slice(key_ptr, key_length - 8), key.user_key()) == 0) {
// Correct user key
const uint64_t tag = DecodeFixed64(key_ptr + key_length - 8);
switch (static_cast<ValueType>(tag & 0xff)) {
diff --git a/src/leveldb/db/memtable.h b/src/leveldb/db/memtable.h
index 9f41567cde..9d986b1070 100644
--- a/src/leveldb/db/memtable.h
+++ b/src/leveldb/db/memtable.h
@@ -6,15 +6,15 @@
#define STORAGE_LEVELDB_DB_MEMTABLE_H_
#include <string>
-#include "leveldb/db.h"
+
#include "db/dbformat.h"
#include "db/skiplist.h"
+#include "leveldb/db.h"
#include "util/arena.h"
namespace leveldb {
class InternalKeyComparator;
-class Mutex;
class MemTableIterator;
class MemTable {
@@ -23,6 +23,9 @@ class MemTable {
// is zero and the caller must call Ref() at least once.
explicit MemTable(const InternalKeyComparator& comparator);
+ MemTable(const MemTable&) = delete;
+ MemTable& operator=(const MemTable&) = delete;
+
// Increase reference count.
void Ref() { ++refs_; }
@@ -50,8 +53,7 @@ class MemTable {
// Add an entry into memtable that maps key to value at the
// specified sequence number and with the specified type.
// Typically value will be empty if type==kTypeDeletion.
- void Add(SequenceNumber seq, ValueType type,
- const Slice& key,
+ void Add(SequenceNumber seq, ValueType type, const Slice& key,
const Slice& value);
// If memtable contains a value for key, store it in *value and return true.
@@ -61,26 +63,23 @@ class MemTable {
bool Get(const LookupKey& key, std::string* value, Status* s);
private:
- ~MemTable(); // Private since only Unref() should be used to delete it
+ friend class MemTableIterator;
+ friend class MemTableBackwardIterator;
struct KeyComparator {
const InternalKeyComparator comparator;
- explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) { }
+ explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) {}
int operator()(const char* a, const char* b) const;
};
- friend class MemTableIterator;
- friend class MemTableBackwardIterator;
typedef SkipList<const char*, KeyComparator> Table;
+ ~MemTable(); // Private since only Unref() should be used to delete it
+
KeyComparator comparator_;
int refs_;
Arena arena_;
Table table_;
-
- // No copying allowed
- MemTable(const MemTable&);
- void operator=(const MemTable&);
};
} // namespace leveldb
diff --git a/src/leveldb/db/recovery_test.cc b/src/leveldb/db/recovery_test.cc
index 9596f4288a..547a9591ea 100644
--- a/src/leveldb/db/recovery_test.cc
+++ b/src/leveldb/db/recovery_test.cc
@@ -17,7 +17,7 @@ namespace leveldb {
class RecoveryTest {
public:
- RecoveryTest() : env_(Env::Default()), db_(NULL) {
+ RecoveryTest() : env_(Env::Default()), db_(nullptr) {
dbname_ = test::TmpDir() + "/recovery_test";
DestroyDB(dbname_, Options());
Open();
@@ -44,22 +44,26 @@ class RecoveryTest {
void Close() {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
}
- void Open(Options* options = NULL) {
+ Status OpenWithStatus(Options* options = nullptr) {
Close();
Options opts;
- if (options != NULL) {
+ if (options != nullptr) {
opts = *options;
} else {
opts.reuse_logs = true; // TODO(sanjay): test both ways
opts.create_if_missing = true;
}
- if (opts.env == NULL) {
+ if (opts.env == nullptr) {
opts.env = env_;
}
- ASSERT_OK(DB::Open(opts, dbname_, &db_));
+ return DB::Open(opts, dbname_, &db_);
+ }
+
+ void Open(Options* options = nullptr) {
+ ASSERT_OK(OpenWithStatus(options));
ASSERT_EQ(1, NumLogs());
}
@@ -67,7 +71,7 @@ class RecoveryTest {
return db_->Put(WriteOptions(), k, v);
}
- std::string Get(const std::string& k, const Snapshot* snapshot = NULL) {
+ std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) {
std::string result;
Status s = db_->Get(ReadOptions(), k, &result);
if (s.IsNotFound()) {
@@ -82,17 +86,18 @@ class RecoveryTest {
std::string current;
ASSERT_OK(ReadFileToString(env_, CurrentFileName(dbname_), &current));
size_t len = current.size();
- if (len > 0 && current[len-1] == '\n') {
+ if (len > 0 && current[len - 1] == '\n') {
current.resize(len - 1);
}
return dbname_ + "/" + current;
}
- std::string LogName(uint64_t number) {
- return LogFileName(dbname_, number);
- }
+ std::string LogName(uint64_t number) { return LogFileName(dbname_, number); }
size_t DeleteLogFiles() {
+ // Linux allows unlinking open files, but Windows does not.
+ // Closing the db allows for file deletion.
+ Close();
std::vector<uint64_t> logs = GetFiles(kLogFile);
for (size_t i = 0; i < logs.size(); i++) {
ASSERT_OK(env_->DeleteFile(LogName(logs[i]))) << LogName(logs[i]);
@@ -100,9 +105,9 @@ class RecoveryTest {
return logs.size();
}
- uint64_t FirstLogFile() {
- return GetFiles(kLogFile)[0];
- }
+ void DeleteManifestFile() { ASSERT_OK(env_->DeleteFile(ManifestFileName())); }
+
+ uint64_t FirstLogFile() { return GetFiles(kLogFile)[0]; }
std::vector<uint64_t> GetFiles(FileType t) {
std::vector<std::string> filenames;
@@ -118,13 +123,9 @@ class RecoveryTest {
return result;
}
- int NumLogs() {
- return GetFiles(kLogFile).size();
- }
+ int NumLogs() { return GetFiles(kLogFile).size(); }
- int NumTables() {
- return GetFiles(kTableFile).size();
- }
+ int NumTables() { return GetFiles(kTableFile).size(); }
uint64_t FileSize(const std::string& fname) {
uint64_t result;
@@ -132,9 +133,7 @@ class RecoveryTest {
return result;
}
- void CompactMemTable() {
- dbfull()->TEST_CompactMemTable();
- }
+ void CompactMemTable() { dbfull()->TEST_CompactMemTable(); }
// Directly construct a log file that sets key to val.
void MakeLogFile(uint64_t lognum, SequenceNumber seq, Slice key, Slice val) {
@@ -186,7 +185,7 @@ TEST(RecoveryTest, LargeManifestCompacted) {
uint64_t len = FileSize(old_manifest);
WritableFile* file;
ASSERT_OK(env()->NewAppendableFile(old_manifest, &file));
- std::string zeroes(3*1048576 - static_cast<size_t>(len), 0);
+ std::string zeroes(3 * 1048576 - static_cast<size_t>(len), 0);
ASSERT_OK(file->Append(zeroes));
ASSERT_OK(file->Flush());
delete file;
@@ -259,7 +258,7 @@ TEST(RecoveryTest, MultipleMemTables) {
// Force creation of multiple memtables by reducing the write buffer size.
Options opt;
opt.reuse_logs = true;
- opt.write_buffer_size = (kNum*100) / 2;
+ opt.write_buffer_size = (kNum * 100) / 2;
Open(&opt);
ASSERT_LE(2, NumTables());
ASSERT_EQ(1, NumLogs());
@@ -278,16 +277,16 @@ TEST(RecoveryTest, MultipleLogFiles) {
// Make a bunch of uncompacted log files.
uint64_t old_log = FirstLogFile();
- MakeLogFile(old_log+1, 1000, "hello", "world");
- MakeLogFile(old_log+2, 1001, "hi", "there");
- MakeLogFile(old_log+3, 1002, "foo", "bar2");
+ MakeLogFile(old_log + 1, 1000, "hello", "world");
+ MakeLogFile(old_log + 2, 1001, "hi", "there");
+ MakeLogFile(old_log + 3, 1002, "foo", "bar2");
// Recover and check that all log files were processed.
Open();
ASSERT_LE(1, NumTables());
ASSERT_EQ(1, NumLogs());
uint64_t new_log = FirstLogFile();
- ASSERT_LE(old_log+3, new_log);
+ ASSERT_LE(old_log + 3, new_log);
ASSERT_EQ("bar2", Get("foo"));
ASSERT_EQ("world", Get("hello"));
ASSERT_EQ("there", Get("hi"));
@@ -305,7 +304,7 @@ TEST(RecoveryTest, MultipleLogFiles) {
// Check that introducing an older log file does not cause it to be re-read.
Close();
- MakeLogFile(old_log+1, 2000, "hello", "stale write");
+ MakeLogFile(old_log + 1, 2000, "hello", "stale write");
Open();
ASSERT_LE(1, NumTables());
ASSERT_EQ(1, NumLogs());
@@ -317,8 +316,15 @@ TEST(RecoveryTest, MultipleLogFiles) {
ASSERT_EQ("there", Get("hi"));
}
-} // namespace leveldb
+TEST(RecoveryTest, ManifestMissing) {
+ ASSERT_OK(Put("foo", "bar"));
+ Close();
+ DeleteManifestFile();
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
+ Status status = OpenWithStatus();
+ ASSERT_TRUE(status.IsCorruption());
}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/db/repair.cc b/src/leveldb/db/repair.cc
index 7281e3d345..04847c3bbf 100644
--- a/src/leveldb/db/repair.cc
+++ b/src/leveldb/db/repair.cc
@@ -54,7 +54,7 @@ class Repairer {
owns_cache_(options_.block_cache != options.block_cache),
next_file_number_(1) {
// TableCache can be small since we expect each table to be opened once.
- table_cache_ = new TableCache(dbname_, &options_, 10);
+ table_cache_ = new TableCache(dbname_, options_, 10);
}
~Repairer() {
@@ -84,9 +84,7 @@ class Repairer {
"recovered %d files; %llu bytes. "
"Some data may have been lost. "
"****",
- dbname_.c_str(),
- static_cast<int>(tables_.size()),
- bytes);
+ dbname_.c_str(), static_cast<int>(tables_.size()), bytes);
}
return status;
}
@@ -97,22 +95,6 @@ class Repairer {
SequenceNumber max_sequence;
};
- std::string const dbname_;
- Env* const env_;
- InternalKeyComparator const icmp_;
- InternalFilterPolicy const ipolicy_;
- Options const options_;
- bool owns_info_log_;
- bool owns_cache_;
- TableCache* table_cache_;
- VersionEdit edit_;
-
- std::vector<std::string> manifests_;
- std::vector<uint64_t> table_numbers_;
- std::vector<uint64_t> logs_;
- std::vector<TableInfo> tables_;
- uint64_t next_file_number_;
-
Status FindFiles() {
std::vector<std::string> filenames;
Status status = env_->GetChildren(dbname_, &filenames);
@@ -152,8 +134,7 @@ class Repairer {
Status status = ConvertLogToTable(logs_[i]);
if (!status.ok()) {
Log(options_.info_log, "Log #%llu: ignoring conversion error: %s",
- (unsigned long long) logs_[i],
- status.ToString().c_str());
+ (unsigned long long)logs_[i], status.ToString().c_str());
}
ArchiveFile(logname);
}
@@ -164,11 +145,10 @@ class Repairer {
Env* env;
Logger* info_log;
uint64_t lognum;
- virtual void Corruption(size_t bytes, const Status& s) {
+ void Corruption(size_t bytes, const Status& s) override {
// We print error messages for corruption, but continue repairing.
Log(info_log, "Log #%llu: dropping %d bytes; %s",
- (unsigned long long) lognum,
- static_cast<int>(bytes),
+ (unsigned long long)lognum, static_cast<int>(bytes),
s.ToString().c_str());
}
};
@@ -190,8 +170,8 @@ class Repairer {
// corruptions cause entire commits to be skipped instead of
// propagating bad information (like overly large sequence
// numbers).
- log::Reader reader(lfile, &reporter, false/*do not checksum*/,
- 0/*initial_offset*/);
+ log::Reader reader(lfile, &reporter, false /*do not checksum*/,
+ 0 /*initial_offset*/);
// Read all the records and add to a memtable
std::string scratch;
@@ -202,8 +182,8 @@ class Repairer {
int counter = 0;
while (reader.ReadRecord(&record, &scratch)) {
if (record.size() < 12) {
- reporter.Corruption(
- record.size(), Status::Corruption("log record too small", logname));
+ reporter.Corruption(record.size(),
+ Status::Corruption("log record too small", logname));
continue;
}
WriteBatchInternal::SetContents(&batch, record);
@@ -212,8 +192,7 @@ class Repairer {
counter += WriteBatchInternal::Count(&batch);
} else {
Log(options_.info_log, "Log #%llu: ignoring %s",
- (unsigned long long) log,
- status.ToString().c_str());
+ (unsigned long long)log, status.ToString().c_str());
status = Status::OK(); // Keep going with rest of file
}
}
@@ -227,16 +206,14 @@ class Repairer {
status = BuildTable(dbname_, env_, options_, table_cache_, iter, &meta);
delete iter;
mem->Unref();
- mem = NULL;
+ mem = nullptr;
if (status.ok()) {
if (meta.file_size > 0) {
table_numbers_.push_back(meta.number);
}
}
Log(options_.info_log, "Log #%llu: %d ops saved to Table #%llu %s",
- (unsigned long long) log,
- counter,
- (unsigned long long) meta.number,
+ (unsigned long long)log, counter, (unsigned long long)meta.number,
status.ToString().c_str());
return status;
}
@@ -272,8 +249,7 @@ class Repairer {
ArchiveFile(TableFileName(dbname_, number));
ArchiveFile(SSTTableFileName(dbname_, number));
Log(options_.info_log, "Table #%llu: dropped: %s",
- (unsigned long long) t.meta.number,
- status.ToString().c_str());
+ (unsigned long long)t.meta.number, status.ToString().c_str());
return;
}
@@ -287,8 +263,7 @@ class Repairer {
Slice key = iter->key();
if (!ParseInternalKey(key, &parsed)) {
Log(options_.info_log, "Table #%llu: unparsable key %s",
- (unsigned long long) t.meta.number,
- EscapeString(key).c_str());
+ (unsigned long long)t.meta.number, EscapeString(key).c_str());
continue;
}
@@ -307,9 +282,7 @@ class Repairer {
}
delete iter;
Log(options_.info_log, "Table #%llu: %d entries %s",
- (unsigned long long) t.meta.number,
- counter,
- status.ToString().c_str());
+ (unsigned long long)t.meta.number, counter, status.ToString().c_str());
if (status.ok()) {
tables_.push_back(t);
@@ -350,20 +323,20 @@ class Repairer {
}
}
delete builder;
- builder = NULL;
+ builder = nullptr;
if (s.ok()) {
s = file->Close();
}
delete file;
- file = NULL;
+ file = nullptr;
if (counter > 0 && s.ok()) {
std::string orig = TableFileName(dbname_, t.meta.number);
s = env_->RenameFile(copy, orig);
if (s.ok()) {
Log(options_.info_log, "Table #%llu: %d entries repaired",
- (unsigned long long) t.meta.number, counter);
+ (unsigned long long)t.meta.number, counter);
tables_.push_back(t);
}
}
@@ -395,11 +368,11 @@ class Repairer {
for (size_t i = 0; i < tables_.size(); i++) {
// TODO(opt): separate out into multiple levels
const TableInfo& t = tables_[i];
- edit_.AddFile(0, t.meta.number, t.meta.file_size,
- t.meta.smallest, t.meta.largest);
+ edit_.AddFile(0, t.meta.number, t.meta.file_size, t.meta.smallest,
+ t.meta.largest);
}
- //fprintf(stderr, "NewDescriptor:\n%s\n", edit_.DebugString().c_str());
+ // fprintf(stderr, "NewDescriptor:\n%s\n", edit_.DebugString().c_str());
{
log::Writer log(file);
std::string record;
@@ -410,7 +383,7 @@ class Repairer {
status = file->Close();
}
delete file;
- file = NULL;
+ file = nullptr;
if (!status.ok()) {
env_->DeleteFile(tmp);
@@ -438,18 +411,34 @@ class Repairer {
// dir/lost/foo
const char* slash = strrchr(fname.c_str(), '/');
std::string new_dir;
- if (slash != NULL) {
+ if (slash != nullptr) {
new_dir.assign(fname.data(), slash - fname.data());
}
new_dir.append("/lost");
env_->CreateDir(new_dir); // Ignore error
std::string new_file = new_dir;
new_file.append("/");
- new_file.append((slash == NULL) ? fname.c_str() : slash + 1);
+ new_file.append((slash == nullptr) ? fname.c_str() : slash + 1);
Status s = env_->RenameFile(fname, new_file);
- Log(options_.info_log, "Archiving %s: %s\n",
- fname.c_str(), s.ToString().c_str());
+ Log(options_.info_log, "Archiving %s: %s\n", fname.c_str(),
+ s.ToString().c_str());
}
+
+ const std::string dbname_;
+ Env* const env_;
+ InternalKeyComparator const icmp_;
+ InternalFilterPolicy const ipolicy_;
+ const Options options_;
+ bool owns_info_log_;
+ bool owns_cache_;
+ TableCache* table_cache_;
+ VersionEdit edit_;
+
+ std::vector<std::string> manifests_;
+ std::vector<uint64_t> table_numbers_;
+ std::vector<uint64_t> logs_;
+ std::vector<TableInfo> tables_;
+ uint64_t next_file_number_;
};
} // namespace
diff --git a/src/leveldb/db/skiplist.h b/src/leveldb/db/skiplist.h
index 8bd77764d8..a59b45b380 100644
--- a/src/leveldb/db/skiplist.h
+++ b/src/leveldb/db/skiplist.h
@@ -27,9 +27,10 @@
//
// ... prev vs. next pointer ordering ...
-#include <assert.h>
-#include <stdlib.h>
-#include "port/port.h"
+#include <atomic>
+#include <cassert>
+#include <cstdlib>
+
#include "util/arena.h"
#include "util/random.h"
@@ -37,7 +38,7 @@ namespace leveldb {
class Arena;
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
class SkipList {
private:
struct Node;
@@ -48,6 +49,9 @@ class SkipList {
// must remain allocated for the lifetime of the skiplist object.
explicit SkipList(Comparator cmp, Arena* arena);
+ SkipList(const SkipList&) = delete;
+ SkipList& operator=(const SkipList&) = delete;
+
// Insert key into the list.
// REQUIRES: nothing that compares equal to key is currently in the list.
void Insert(const Key& key);
@@ -97,24 +101,10 @@ class SkipList {
private:
enum { kMaxHeight = 12 };
- // Immutable after construction
- Comparator const compare_;
- Arena* const arena_; // Arena used for allocations of nodes
-
- Node* const head_;
-
- // Modified only by Insert(). Read racily by readers, but stale
- // values are ok.
- port::AtomicPointer max_height_; // Height of the entire list
-
inline int GetMaxHeight() const {
- return static_cast<int>(
- reinterpret_cast<intptr_t>(max_height_.NoBarrier_Load()));
+ return max_height_.load(std::memory_order_relaxed);
}
- // Read/written only by Insert().
- Random rnd_;
-
Node* NewNode(const Key& key, int height);
int RandomHeight();
bool Equal(const Key& a, const Key& b) const { return (compare_(a, b) == 0); }
@@ -123,9 +113,9 @@ class SkipList {
bool KeyIsAfterNode(const Key& key, Node* n) const;
// Return the earliest node that comes at or after key.
- // Return NULL if there is no such node.
+ // Return nullptr if there is no such node.
//
- // If prev is non-NULL, fills prev[level] with pointer to previous
+ // If prev is non-null, fills prev[level] with pointer to previous
// node at "level" for every level in [0..max_height_-1].
Node* FindGreaterOrEqual(const Key& key, Node** prev) const;
@@ -137,15 +127,24 @@ class SkipList {
// Return head_ if list is empty.
Node* FindLast() const;
- // No copying allowed
- SkipList(const SkipList&);
- void operator=(const SkipList&);
+ // Immutable after construction
+ Comparator const compare_;
+ Arena* const arena_; // Arena used for allocations of nodes
+
+ Node* const head_;
+
+ // Modified only by Insert(). Read racily by readers, but stale
+ // values are ok.
+ std::atomic<int> max_height_; // Height of the entire list
+
+ // Read/written only by Insert().
+ Random rnd_;
};
// Implementation details follow
-template<typename Key, class Comparator>
-struct SkipList<Key,Comparator>::Node {
- explicit Node(const Key& k) : key(k) { }
+template <typename Key, class Comparator>
+struct SkipList<Key, Comparator>::Node {
+ explicit Node(const Key& k) : key(k) {}
Key const key;
@@ -155,92 +154,92 @@ struct SkipList<Key,Comparator>::Node {
assert(n >= 0);
// Use an 'acquire load' so that we observe a fully initialized
// version of the returned Node.
- return reinterpret_cast<Node*>(next_[n].Acquire_Load());
+ return next_[n].load(std::memory_order_acquire);
}
void SetNext(int n, Node* x) {
assert(n >= 0);
// Use a 'release store' so that anybody who reads through this
// pointer observes a fully initialized version of the inserted node.
- next_[n].Release_Store(x);
+ next_[n].store(x, std::memory_order_release);
}
// No-barrier variants that can be safely used in a few locations.
Node* NoBarrier_Next(int n) {
assert(n >= 0);
- return reinterpret_cast<Node*>(next_[n].NoBarrier_Load());
+ return next_[n].load(std::memory_order_relaxed);
}
void NoBarrier_SetNext(int n, Node* x) {
assert(n >= 0);
- next_[n].NoBarrier_Store(x);
+ next_[n].store(x, std::memory_order_relaxed);
}
private:
// Array of length equal to the node height. next_[0] is lowest level link.
- port::AtomicPointer next_[1];
+ std::atomic<Node*> next_[1];
};
-template<typename Key, class Comparator>
-typename SkipList<Key,Comparator>::Node*
-SkipList<Key,Comparator>::NewNode(const Key& key, int height) {
- char* mem = arena_->AllocateAligned(
- sizeof(Node) + sizeof(port::AtomicPointer) * (height - 1));
- return new (mem) Node(key);
+template <typename Key, class Comparator>
+typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::NewNode(
+ const Key& key, int height) {
+ char* const node_memory = arena_->AllocateAligned(
+ sizeof(Node) + sizeof(std::atomic<Node*>) * (height - 1));
+ return new (node_memory) Node(key);
}
-template<typename Key, class Comparator>
-inline SkipList<Key,Comparator>::Iterator::Iterator(const SkipList* list) {
+template <typename Key, class Comparator>
+inline SkipList<Key, Comparator>::Iterator::Iterator(const SkipList* list) {
list_ = list;
- node_ = NULL;
+ node_ = nullptr;
}
-template<typename Key, class Comparator>
-inline bool SkipList<Key,Comparator>::Iterator::Valid() const {
- return node_ != NULL;
+template <typename Key, class Comparator>
+inline bool SkipList<Key, Comparator>::Iterator::Valid() const {
+ return node_ != nullptr;
}
-template<typename Key, class Comparator>
-inline const Key& SkipList<Key,Comparator>::Iterator::key() const {
+template <typename Key, class Comparator>
+inline const Key& SkipList<Key, Comparator>::Iterator::key() const {
assert(Valid());
return node_->key;
}
-template<typename Key, class Comparator>
-inline void SkipList<Key,Comparator>::Iterator::Next() {
+template <typename Key, class Comparator>
+inline void SkipList<Key, Comparator>::Iterator::Next() {
assert(Valid());
node_ = node_->Next(0);
}
-template<typename Key, class Comparator>
-inline void SkipList<Key,Comparator>::Iterator::Prev() {
+template <typename Key, class Comparator>
+inline void SkipList<Key, Comparator>::Iterator::Prev() {
// Instead of using explicit "prev" links, we just search for the
// last node that falls before key.
assert(Valid());
node_ = list_->FindLessThan(node_->key);
if (node_ == list_->head_) {
- node_ = NULL;
+ node_ = nullptr;
}
}
-template<typename Key, class Comparator>
-inline void SkipList<Key,Comparator>::Iterator::Seek(const Key& target) {
- node_ = list_->FindGreaterOrEqual(target, NULL);
+template <typename Key, class Comparator>
+inline void SkipList<Key, Comparator>::Iterator::Seek(const Key& target) {
+ node_ = list_->FindGreaterOrEqual(target, nullptr);
}
-template<typename Key, class Comparator>
-inline void SkipList<Key,Comparator>::Iterator::SeekToFirst() {
+template <typename Key, class Comparator>
+inline void SkipList<Key, Comparator>::Iterator::SeekToFirst() {
node_ = list_->head_->Next(0);
}
-template<typename Key, class Comparator>
-inline void SkipList<Key,Comparator>::Iterator::SeekToLast() {
+template <typename Key, class Comparator>
+inline void SkipList<Key, Comparator>::Iterator::SeekToLast() {
node_ = list_->FindLast();
if (node_ == list_->head_) {
- node_ = NULL;
+ node_ = nullptr;
}
}
-template<typename Key, class Comparator>
-int SkipList<Key,Comparator>::RandomHeight() {
+template <typename Key, class Comparator>
+int SkipList<Key, Comparator>::RandomHeight() {
// Increase height with probability 1 in kBranching
static const unsigned int kBranching = 4;
int height = 1;
@@ -252,15 +251,16 @@ int SkipList<Key,Comparator>::RandomHeight() {
return height;
}
-template<typename Key, class Comparator>
-bool SkipList<Key,Comparator>::KeyIsAfterNode(const Key& key, Node* n) const {
- // NULL n is considered infinite
- return (n != NULL) && (compare_(n->key, key) < 0);
+template <typename Key, class Comparator>
+bool SkipList<Key, Comparator>::KeyIsAfterNode(const Key& key, Node* n) const {
+ // null n is considered infinite
+ return (n != nullptr) && (compare_(n->key, key) < 0);
}
-template<typename Key, class Comparator>
-typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindGreaterOrEqual(const Key& key, Node** prev)
- const {
+template <typename Key, class Comparator>
+typename SkipList<Key, Comparator>::Node*
+SkipList<Key, Comparator>::FindGreaterOrEqual(const Key& key,
+ Node** prev) const {
Node* x = head_;
int level = GetMaxHeight() - 1;
while (true) {
@@ -269,7 +269,7 @@ typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindGreaterOr
// Keep searching in this list
x = next;
} else {
- if (prev != NULL) prev[level] = x;
+ if (prev != nullptr) prev[level] = x;
if (level == 0) {
return next;
} else {
@@ -280,15 +280,15 @@ typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindGreaterOr
}
}
-template<typename Key, class Comparator>
-typename SkipList<Key,Comparator>::Node*
-SkipList<Key,Comparator>::FindLessThan(const Key& key) const {
+template <typename Key, class Comparator>
+typename SkipList<Key, Comparator>::Node*
+SkipList<Key, Comparator>::FindLessThan(const Key& key) const {
Node* x = head_;
int level = GetMaxHeight() - 1;
while (true) {
assert(x == head_ || compare_(x->key, key) < 0);
Node* next = x->Next(level);
- if (next == NULL || compare_(next->key, key) >= 0) {
+ if (next == nullptr || compare_(next->key, key) >= 0) {
if (level == 0) {
return x;
} else {
@@ -301,14 +301,14 @@ SkipList<Key,Comparator>::FindLessThan(const Key& key) const {
}
}
-template<typename Key, class Comparator>
-typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindLast()
+template <typename Key, class Comparator>
+typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::FindLast()
const {
Node* x = head_;
int level = GetMaxHeight() - 1;
while (true) {
Node* next = x->Next(level);
- if (next == NULL) {
+ if (next == nullptr) {
if (level == 0) {
return x;
} else {
@@ -321,43 +321,41 @@ typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindLast()
}
}
-template<typename Key, class Comparator>
-SkipList<Key,Comparator>::SkipList(Comparator cmp, Arena* arena)
+template <typename Key, class Comparator>
+SkipList<Key, Comparator>::SkipList(Comparator cmp, Arena* arena)
: compare_(cmp),
arena_(arena),
head_(NewNode(0 /* any key will do */, kMaxHeight)),
- max_height_(reinterpret_cast<void*>(1)),
+ max_height_(1),
rnd_(0xdeadbeef) {
for (int i = 0; i < kMaxHeight; i++) {
- head_->SetNext(i, NULL);
+ head_->SetNext(i, nullptr);
}
}
-template<typename Key, class Comparator>
-void SkipList<Key,Comparator>::Insert(const Key& key) {
+template <typename Key, class Comparator>
+void SkipList<Key, Comparator>::Insert(const Key& key) {
// TODO(opt): We can use a barrier-free variant of FindGreaterOrEqual()
// here since Insert() is externally synchronized.
Node* prev[kMaxHeight];
Node* x = FindGreaterOrEqual(key, prev);
// Our data structure does not allow duplicate insertion
- assert(x == NULL || !Equal(key, x->key));
+ assert(x == nullptr || !Equal(key, x->key));
int height = RandomHeight();
if (height > GetMaxHeight()) {
for (int i = GetMaxHeight(); i < height; i++) {
prev[i] = head_;
}
- //fprintf(stderr, "Change height from %d to %d\n", max_height_, height);
-
// It is ok to mutate max_height_ without any synchronization
// with concurrent readers. A concurrent reader that observes
// the new value of max_height_ will see either the old value of
- // new level pointers from head_ (NULL), or a new value set in
+ // new level pointers from head_ (nullptr), or a new value set in
// the loop below. In the former case the reader will
- // immediately drop to the next level since NULL sorts after all
+ // immediately drop to the next level since nullptr sorts after all
// keys. In the latter case the reader will use the new node.
- max_height_.NoBarrier_Store(reinterpret_cast<void*>(height));
+ max_height_.store(height, std::memory_order_relaxed);
}
x = NewNode(key, height);
@@ -369,10 +367,10 @@ void SkipList<Key,Comparator>::Insert(const Key& key) {
}
}
-template<typename Key, class Comparator>
-bool SkipList<Key,Comparator>::Contains(const Key& key) const {
- Node* x = FindGreaterOrEqual(key, NULL);
- if (x != NULL && Equal(key, x->key)) {
+template <typename Key, class Comparator>
+bool SkipList<Key, Comparator>::Contains(const Key& key) const {
+ Node* x = FindGreaterOrEqual(key, nullptr);
+ if (x != nullptr && Equal(key, x->key)) {
return true;
} else {
return false;
diff --git a/src/leveldb/db/skiplist_test.cc b/src/leveldb/db/skiplist_test.cc
index aee1461e1b..9fa2d96829 100644
--- a/src/leveldb/db/skiplist_test.cc
+++ b/src/leveldb/db/skiplist_test.cc
@@ -3,8 +3,13 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "db/skiplist.h"
+
+#include <atomic>
#include <set>
+
#include "leveldb/env.h"
+#include "port/port.h"
+#include "port/thread_annotations.h"
#include "util/arena.h"
#include "util/hash.h"
#include "util/random.h"
@@ -26,7 +31,7 @@ struct Comparator {
}
};
-class SkipTest { };
+class SkipTest {};
TEST(SkipTest, Empty) {
Arena arena;
@@ -112,8 +117,7 @@ TEST(SkipTest, InsertAndLookup) {
// Compare against model iterator
for (std::set<Key>::reverse_iterator model_iter = keys.rbegin();
- model_iter != keys.rend();
- ++model_iter) {
+ model_iter != keys.rend(); ++model_iter) {
ASSERT_TRUE(iter.Valid());
ASSERT_EQ(*model_iter, iter.key());
iter.Prev();
@@ -126,7 +130,7 @@ TEST(SkipTest, InsertAndLookup) {
// concurrent readers (with no synchronization other than when a
// reader's iterator is created), the reader always observes all the
// data that was present in the skip list when the iterator was
-// constructor. Because insertions are happening concurrently, we may
+// constructed. Because insertions are happening concurrently, we may
// also observe new values that were inserted since the iterator was
// constructed, but we should never miss any values that were present
// at iterator construction time.
@@ -155,12 +159,12 @@ class ConcurrentTest {
static uint64_t hash(Key key) { return key & 0xff; }
static uint64_t HashNumbers(uint64_t k, uint64_t g) {
- uint64_t data[2] = { k, g };
+ uint64_t data[2] = {k, g};
return Hash(reinterpret_cast<char*>(data), sizeof(data), 0);
}
static Key MakeKey(uint64_t k, uint64_t g) {
- assert(sizeof(Key) == sizeof(uint64_t));
+ static_assert(sizeof(Key) == sizeof(uint64_t), "");
assert(k <= K); // We sometimes pass K to seek to the end of the skiplist
assert(g <= 0xffffffffu);
return ((k << 40) | (g << 8) | (HashNumbers(k, g) & 0xff));
@@ -186,13 +190,11 @@ class ConcurrentTest {
// Per-key generation
struct State {
- port::AtomicPointer generation[K];
- void Set(int k, intptr_t v) {
- generation[k].Release_Store(reinterpret_cast<void*>(v));
- }
- intptr_t Get(int k) {
- return reinterpret_cast<intptr_t>(generation[k].Acquire_Load());
+ std::atomic<int> generation[K];
+ void Set(int k, int v) {
+ generation[k].store(v, std::memory_order_release);
}
+ int Get(int k) { return generation[k].load(std::memory_order_acquire); }
State() {
for (int k = 0; k < K; k++) {
@@ -211,7 +213,7 @@ class ConcurrentTest {
SkipList<Key, Comparator> list_;
public:
- ConcurrentTest() : list_(Comparator(), &arena_) { }
+ ConcurrentTest() : list_(Comparator(), &arena_) {}
// REQUIRES: External synchronization
void WriteStep(Random* rnd) {
@@ -250,11 +252,9 @@ class ConcurrentTest {
// Note that generation 0 is never inserted, so it is ok if
// <*,0,*> is missing.
ASSERT_TRUE((gen(pos) == 0) ||
- (gen(pos) > static_cast<Key>(initial_state.Get(key(pos))))
- ) << "key: " << key(pos)
- << "; gen: " << gen(pos)
- << "; initgen: "
- << initial_state.Get(key(pos));
+ (gen(pos) > static_cast<Key>(initial_state.Get(key(pos)))))
+ << "key: " << key(pos) << "; gen: " << gen(pos)
+ << "; initgen: " << initial_state.Get(key(pos));
// Advance to next key in the valid key space
if (key(pos) < key(current)) {
@@ -298,21 +298,14 @@ class TestState {
public:
ConcurrentTest t_;
int seed_;
- port::AtomicPointer quit_flag_;
+ std::atomic<bool> quit_flag_;
- enum ReaderState {
- STARTING,
- RUNNING,
- DONE
- };
+ enum ReaderState { STARTING, RUNNING, DONE };
explicit TestState(int s)
- : seed_(s),
- quit_flag_(NULL),
- state_(STARTING),
- state_cv_(&mu_) {}
+ : seed_(s), quit_flag_(false), state_(STARTING), state_cv_(&mu_) {}
- void Wait(ReaderState s) {
+ void Wait(ReaderState s) LOCKS_EXCLUDED(mu_) {
mu_.Lock();
while (state_ != s) {
state_cv_.Wait();
@@ -320,7 +313,7 @@ class TestState {
mu_.Unlock();
}
- void Change(ReaderState s) {
+ void Change(ReaderState s) LOCKS_EXCLUDED(mu_) {
mu_.Lock();
state_ = s;
state_cv_.Signal();
@@ -329,8 +322,8 @@ class TestState {
private:
port::Mutex mu_;
- ReaderState state_;
- port::CondVar state_cv_;
+ ReaderState state_ GUARDED_BY(mu_);
+ port::CondVar state_cv_ GUARDED_BY(mu_);
};
static void ConcurrentReader(void* arg) {
@@ -338,7 +331,7 @@ static void ConcurrentReader(void* arg) {
Random rnd(state->seed_);
int64_t reads = 0;
state->Change(TestState::RUNNING);
- while (!state->quit_flag_.Acquire_Load()) {
+ while (!state->quit_flag_.load(std::memory_order_acquire)) {
state->t_.ReadStep(&rnd);
++reads;
}
@@ -360,7 +353,7 @@ static void RunConcurrent(int run) {
for (int i = 0; i < kSize; i++) {
state.t_.WriteStep(&rnd);
}
- state.quit_flag_.Release_Store(&state); // Any non-NULL arg will do
+ state.quit_flag_.store(true, std::memory_order_release);
state.Wait(TestState::DONE);
}
}
@@ -373,6 +366,4 @@ TEST(SkipTest, Concurrent5) { RunConcurrent(5); }
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/db/snapshot.h b/src/leveldb/db/snapshot.h
index 6ed413c42d..9f1d66491d 100644
--- a/src/leveldb/db/snapshot.h
+++ b/src/leveldb/db/snapshot.h
@@ -16,50 +16,78 @@ class SnapshotList;
// Each SnapshotImpl corresponds to a particular sequence number.
class SnapshotImpl : public Snapshot {
public:
- SequenceNumber number_; // const after creation
+ SnapshotImpl(SequenceNumber sequence_number)
+ : sequence_number_(sequence_number) {}
+
+ SequenceNumber sequence_number() const { return sequence_number_; }
private:
friend class SnapshotList;
- // SnapshotImpl is kept in a doubly-linked circular list
+ // SnapshotImpl is kept in a doubly-linked circular list. The SnapshotList
+ // implementation operates on the next/previous fields direcly.
SnapshotImpl* prev_;
SnapshotImpl* next_;
- SnapshotList* list_; // just for sanity checks
+ const SequenceNumber sequence_number_;
+
+#if !defined(NDEBUG)
+ SnapshotList* list_ = nullptr;
+#endif // !defined(NDEBUG)
};
class SnapshotList {
public:
- SnapshotList() {
- list_.prev_ = &list_;
- list_.next_ = &list_;
+ SnapshotList() : head_(0) {
+ head_.prev_ = &head_;
+ head_.next_ = &head_;
+ }
+
+ bool empty() const { return head_.next_ == &head_; }
+ SnapshotImpl* oldest() const {
+ assert(!empty());
+ return head_.next_;
}
+ SnapshotImpl* newest() const {
+ assert(!empty());
+ return head_.prev_;
+ }
+
+ // Creates a SnapshotImpl and appends it to the end of the list.
+ SnapshotImpl* New(SequenceNumber sequence_number) {
+ assert(empty() || newest()->sequence_number_ <= sequence_number);
+
+ SnapshotImpl* snapshot = new SnapshotImpl(sequence_number);
- bool empty() const { return list_.next_ == &list_; }
- SnapshotImpl* oldest() const { assert(!empty()); return list_.next_; }
- SnapshotImpl* newest() const { assert(!empty()); return list_.prev_; }
-
- const SnapshotImpl* New(SequenceNumber seq) {
- SnapshotImpl* s = new SnapshotImpl;
- s->number_ = seq;
- s->list_ = this;
- s->next_ = &list_;
- s->prev_ = list_.prev_;
- s->prev_->next_ = s;
- s->next_->prev_ = s;
- return s;
+#if !defined(NDEBUG)
+ snapshot->list_ = this;
+#endif // !defined(NDEBUG)
+ snapshot->next_ = &head_;
+ snapshot->prev_ = head_.prev_;
+ snapshot->prev_->next_ = snapshot;
+ snapshot->next_->prev_ = snapshot;
+ return snapshot;
}
- void Delete(const SnapshotImpl* s) {
- assert(s->list_ == this);
- s->prev_->next_ = s->next_;
- s->next_->prev_ = s->prev_;
- delete s;
+ // Removes a SnapshotImpl from this list.
+ //
+ // The snapshot must have been created by calling New() on this list.
+ //
+ // The snapshot pointer should not be const, because its memory is
+ // deallocated. However, that would force us to change DB::ReleaseSnapshot(),
+ // which is in the API, and currently takes a const Snapshot.
+ void Delete(const SnapshotImpl* snapshot) {
+#if !defined(NDEBUG)
+ assert(snapshot->list_ == this);
+#endif // !defined(NDEBUG)
+ snapshot->prev_->next_ = snapshot->next_;
+ snapshot->next_->prev_ = snapshot->prev_;
+ delete snapshot;
}
private:
// Dummy head of doubly-linked list of snapshots
- SnapshotImpl list_;
+ SnapshotImpl head_;
};
} // namespace leveldb
diff --git a/src/leveldb/db/table_cache.cc b/src/leveldb/db/table_cache.cc
index e3d82cd3ea..73f05fd7b1 100644
--- a/src/leveldb/db/table_cache.cc
+++ b/src/leveldb/db/table_cache.cc
@@ -29,18 +29,14 @@ static void UnrefEntry(void* arg1, void* arg2) {
cache->Release(h);
}
-TableCache::TableCache(const std::string& dbname,
- const Options* options,
+TableCache::TableCache(const std::string& dbname, const Options& options,
int entries)
- : env_(options->env),
+ : env_(options.env),
dbname_(dbname),
options_(options),
- cache_(NewLRUCache(entries)) {
-}
+ cache_(NewLRUCache(entries)) {}
-TableCache::~TableCache() {
- delete cache_;
-}
+TableCache::~TableCache() { delete cache_; }
Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
Cache::Handle** handle) {
@@ -49,10 +45,10 @@ Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
EncodeFixed64(buf, file_number);
Slice key(buf, sizeof(buf));
*handle = cache_->Lookup(key);
- if (*handle == NULL) {
+ if (*handle == nullptr) {
std::string fname = TableFileName(dbname_, file_number);
- RandomAccessFile* file = NULL;
- Table* table = NULL;
+ RandomAccessFile* file = nullptr;
+ Table* table = nullptr;
s = env_->NewRandomAccessFile(fname, &file);
if (!s.ok()) {
std::string old_fname = SSTTableFileName(dbname_, file_number);
@@ -61,11 +57,11 @@ Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
}
}
if (s.ok()) {
- s = Table::Open(*options_, file, file_size, &table);
+ s = Table::Open(options_, file, file_size, &table);
}
if (!s.ok()) {
- assert(table == NULL);
+ assert(table == nullptr);
delete file;
// We do not cache error results so that if the error is transient,
// or somebody repairs the file, we recover automatically.
@@ -80,14 +76,13 @@ Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
}
Iterator* TableCache::NewIterator(const ReadOptions& options,
- uint64_t file_number,
- uint64_t file_size,
+ uint64_t file_number, uint64_t file_size,
Table** tableptr) {
- if (tableptr != NULL) {
- *tableptr = NULL;
+ if (tableptr != nullptr) {
+ *tableptr = nullptr;
}
- Cache::Handle* handle = NULL;
+ Cache::Handle* handle = nullptr;
Status s = FindTable(file_number, file_size, &handle);
if (!s.ok()) {
return NewErrorIterator(s);
@@ -96,23 +91,21 @@ Iterator* TableCache::NewIterator(const ReadOptions& options,
Table* table = reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table;
Iterator* result = table->NewIterator(options);
result->RegisterCleanup(&UnrefEntry, cache_, handle);
- if (tableptr != NULL) {
+ if (tableptr != nullptr) {
*tableptr = table;
}
return result;
}
-Status TableCache::Get(const ReadOptions& options,
- uint64_t file_number,
- uint64_t file_size,
- const Slice& k,
- void* arg,
- void (*saver)(void*, const Slice&, const Slice&)) {
- Cache::Handle* handle = NULL;
+Status TableCache::Get(const ReadOptions& options, uint64_t file_number,
+ uint64_t file_size, const Slice& k, void* arg,
+ void (*handle_result)(void*, const Slice&,
+ const Slice&)) {
+ Cache::Handle* handle = nullptr;
Status s = FindTable(file_number, file_size, &handle);
if (s.ok()) {
Table* t = reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table;
- s = t->InternalGet(options, k, arg, saver);
+ s = t->InternalGet(options, k, arg, handle_result);
cache_->Release(handle);
}
return s;
diff --git a/src/leveldb/db/table_cache.h b/src/leveldb/db/table_cache.h
index 8cf4aaf12d..93069c8844 100644
--- a/src/leveldb/db/table_cache.h
+++ b/src/leveldb/db/table_cache.h
@@ -7,8 +7,10 @@
#ifndef STORAGE_LEVELDB_DB_TABLE_CACHE_H_
#define STORAGE_LEVELDB_DB_TABLE_CACHE_H_
-#include <string>
#include <stdint.h>
+
+#include <string>
+
#include "db/dbformat.h"
#include "leveldb/cache.h"
#include "leveldb/table.h"
@@ -20,40 +22,35 @@ class Env;
class TableCache {
public:
- TableCache(const std::string& dbname, const Options* options, int entries);
+ TableCache(const std::string& dbname, const Options& options, int entries);
~TableCache();
// Return an iterator for the specified file number (the corresponding
// file length must be exactly "file_size" bytes). If "tableptr" is
- // non-NULL, also sets "*tableptr" to point to the Table object
- // underlying the returned iterator, or NULL if no Table object underlies
- // the returned iterator. The returned "*tableptr" object is owned by
- // the cache and should not be deleted, and is valid for as long as the
+ // non-null, also sets "*tableptr" to point to the Table object
+ // underlying the returned iterator, or to nullptr if no Table object
+ // underlies the returned iterator. The returned "*tableptr" object is owned
+ // by the cache and should not be deleted, and is valid for as long as the
// returned iterator is live.
- Iterator* NewIterator(const ReadOptions& options,
- uint64_t file_number,
- uint64_t file_size,
- Table** tableptr = NULL);
+ Iterator* NewIterator(const ReadOptions& options, uint64_t file_number,
+ uint64_t file_size, Table** tableptr = nullptr);
// If a seek to internal key "k" in specified file finds an entry,
// call (*handle_result)(arg, found_key, found_value).
- Status Get(const ReadOptions& options,
- uint64_t file_number,
- uint64_t file_size,
- const Slice& k,
- void* arg,
+ Status Get(const ReadOptions& options, uint64_t file_number,
+ uint64_t file_size, const Slice& k, void* arg,
void (*handle_result)(void*, const Slice&, const Slice&));
// Evict any entry for the specified file number
void Evict(uint64_t file_number);
private:
+ Status FindTable(uint64_t file_number, uint64_t file_size, Cache::Handle**);
+
Env* const env_;
const std::string dbname_;
- const Options* options_;
+ const Options& options_;
Cache* cache_;
-
- Status FindTable(uint64_t file_number, uint64_t file_size, Cache::Handle**);
};
} // namespace leveldb
diff --git a/src/leveldb/db/version_edit.cc b/src/leveldb/db/version_edit.cc
index f10a2d58b2..cd770ef12d 100644
--- a/src/leveldb/db/version_edit.cc
+++ b/src/leveldb/db/version_edit.cc
@@ -12,15 +12,15 @@ namespace leveldb {
// Tag numbers for serialized VersionEdit. These numbers are written to
// disk and should not be changed.
enum Tag {
- kComparator = 1,
- kLogNumber = 2,
- kNextFileNumber = 3,
- kLastSequence = 4,
- kCompactPointer = 5,
- kDeletedFile = 6,
- kNewFile = 7,
+ kComparator = 1,
+ kLogNumber = 2,
+ kNextFileNumber = 3,
+ kLastSequence = 4,
+ kCompactPointer = 5,
+ kDeletedFile = 6,
+ kNewFile = 7,
// 8 was used for large value refs
- kPrevLogNumber = 9
+ kPrevLogNumber = 9
};
void VersionEdit::Clear() {
@@ -66,12 +66,10 @@ void VersionEdit::EncodeTo(std::string* dst) const {
PutLengthPrefixedSlice(dst, compact_pointers_[i].second.Encode());
}
- for (DeletedFileSet::const_iterator iter = deleted_files_.begin();
- iter != deleted_files_.end();
- ++iter) {
+ for (const auto& deleted_file_kvp : deleted_files_) {
PutVarint32(dst, kDeletedFile);
- PutVarint32(dst, iter->first); // level
- PutVarint64(dst, iter->second); // file number
+ PutVarint32(dst, deleted_file_kvp.first); // level
+ PutVarint64(dst, deleted_file_kvp.second); // file number
}
for (size_t i = 0; i < new_files_.size(); i++) {
@@ -88,8 +86,7 @@ void VersionEdit::EncodeTo(std::string* dst) const {
static bool GetInternalKey(Slice* input, InternalKey* dst) {
Slice str;
if (GetLengthPrefixedSlice(input, &str)) {
- dst->DecodeFrom(str);
- return true;
+ return dst->DecodeFrom(str);
} else {
return false;
}
@@ -97,8 +94,7 @@ static bool GetInternalKey(Slice* input, InternalKey* dst) {
static bool GetLevel(Slice* input, int* level) {
uint32_t v;
- if (GetVarint32(input, &v) &&
- v < config::kNumLevels) {
+ if (GetVarint32(input, &v) && v < config::kNumLevels) {
*level = v;
return true;
} else {
@@ -109,7 +105,7 @@ static bool GetLevel(Slice* input, int* level) {
Status VersionEdit::DecodeFrom(const Slice& src) {
Clear();
Slice input = src;
- const char* msg = NULL;
+ const char* msg = nullptr;
uint32_t tag;
// Temporary storage for parsing
@@ -119,7 +115,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
Slice str;
InternalKey key;
- while (msg == NULL && GetVarint32(&input, &tag)) {
+ while (msg == nullptr && GetVarint32(&input, &tag)) {
switch (tag) {
case kComparator:
if (GetLengthPrefixedSlice(&input, &str)) {
@@ -163,8 +159,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
break;
case kCompactPointer:
- if (GetLevel(&input, &level) &&
- GetInternalKey(&input, &key)) {
+ if (GetLevel(&input, &level) && GetInternalKey(&input, &key)) {
compact_pointers_.push_back(std::make_pair(level, key));
} else {
msg = "compaction pointer";
@@ -172,8 +167,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
break;
case kDeletedFile:
- if (GetLevel(&input, &level) &&
- GetVarint64(&input, &number)) {
+ if (GetLevel(&input, &level) && GetVarint64(&input, &number)) {
deleted_files_.insert(std::make_pair(level, number));
} else {
msg = "deleted file";
@@ -181,8 +175,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
break;
case kNewFile:
- if (GetLevel(&input, &level) &&
- GetVarint64(&input, &f.number) &&
+ if (GetLevel(&input, &level) && GetVarint64(&input, &f.number) &&
GetVarint64(&input, &f.file_size) &&
GetInternalKey(&input, &f.smallest) &&
GetInternalKey(&input, &f.largest)) {
@@ -198,12 +191,12 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
}
}
- if (msg == NULL && !input.empty()) {
+ if (msg == nullptr && !input.empty()) {
msg = "invalid tag";
}
Status result;
- if (msg != NULL) {
+ if (msg != nullptr) {
result = Status::Corruption("VersionEdit", msg);
}
return result;
@@ -238,13 +231,11 @@ std::string VersionEdit::DebugString() const {
r.append(" ");
r.append(compact_pointers_[i].second.DebugString());
}
- for (DeletedFileSet::const_iterator iter = deleted_files_.begin();
- iter != deleted_files_.end();
- ++iter) {
+ for (const auto& deleted_files_kvp : deleted_files_) {
r.append("\n DeleteFile: ");
- AppendNumberTo(&r, iter->first);
+ AppendNumberTo(&r, deleted_files_kvp.first);
r.append(" ");
- AppendNumberTo(&r, iter->second);
+ AppendNumberTo(&r, deleted_files_kvp.second);
}
for (size_t i = 0; i < new_files_.size(); i++) {
const FileMetaData& f = new_files_[i].second;
diff --git a/src/leveldb/db/version_edit.h b/src/leveldb/db/version_edit.h
index eaef77b327..0de4531773 100644
--- a/src/leveldb/db/version_edit.h
+++ b/src/leveldb/db/version_edit.h
@@ -8,6 +8,7 @@
#include <set>
#include <utility>
#include <vector>
+
#include "db/dbformat.h"
namespace leveldb {
@@ -15,20 +16,20 @@ namespace leveldb {
class VersionSet;
struct FileMetaData {
+ FileMetaData() : refs(0), allowed_seeks(1 << 30), file_size(0) {}
+
int refs;
- int allowed_seeks; // Seeks allowed until compaction
+ int allowed_seeks; // Seeks allowed until compaction
uint64_t number;
- uint64_t file_size; // File size in bytes
- InternalKey smallest; // Smallest internal key served by table
- InternalKey largest; // Largest internal key served by table
-
- FileMetaData() : refs(0), allowed_seeks(1 << 30), file_size(0) { }
+ uint64_t file_size; // File size in bytes
+ InternalKey smallest; // Smallest internal key served by table
+ InternalKey largest; // Largest internal key served by table
};
class VersionEdit {
public:
VersionEdit() { Clear(); }
- ~VersionEdit() { }
+ ~VersionEdit() = default;
void Clear();
@@ -59,10 +60,8 @@ class VersionEdit {
// Add the specified file at the specified number.
// REQUIRES: This version has not been saved (see VersionSet::SaveTo)
// REQUIRES: "smallest" and "largest" are smallest and largest keys in file
- void AddFile(int level, uint64_t file,
- uint64_t file_size,
- const InternalKey& smallest,
- const InternalKey& largest) {
+ void AddFile(int level, uint64_t file, uint64_t file_size,
+ const InternalKey& smallest, const InternalKey& largest) {
FileMetaData f;
f.number = file;
f.file_size = file_size;
@@ -84,7 +83,7 @@ class VersionEdit {
private:
friend class VersionSet;
- typedef std::set< std::pair<int, uint64_t> > DeletedFileSet;
+ typedef std::set<std::pair<int, uint64_t>> DeletedFileSet;
std::string comparator_;
uint64_t log_number_;
@@ -97,9 +96,9 @@ class VersionEdit {
bool has_next_file_number_;
bool has_last_sequence_;
- std::vector< std::pair<int, InternalKey> > compact_pointers_;
+ std::vector<std::pair<int, InternalKey>> compact_pointers_;
DeletedFileSet deleted_files_;
- std::vector< std::pair<int, FileMetaData> > new_files_;
+ std::vector<std::pair<int, FileMetaData>> new_files_;
};
} // namespace leveldb
diff --git a/src/leveldb/db/version_edit_test.cc b/src/leveldb/db/version_edit_test.cc
index 280310b49d..0b7cda8854 100644
--- a/src/leveldb/db/version_edit_test.cc
+++ b/src/leveldb/db/version_edit_test.cc
@@ -17,7 +17,7 @@ static void TestEncodeDecode(const VersionEdit& edit) {
ASSERT_EQ(encoded, encoded2);
}
-class VersionEditTest { };
+class VersionEditTest {};
TEST(VersionEditTest, EncodeDecode) {
static const uint64_t kBig = 1ull << 50;
@@ -41,6 +41,4 @@ TEST(VersionEditTest, EncodeDecode) {
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/db/version_set.cc b/src/leveldb/db/version_set.cc
index 2cb6d80ed3..cd07346ea8 100644
--- a/src/leveldb/db/version_set.cc
+++ b/src/leveldb/db/version_set.cc
@@ -4,8 +4,10 @@
#include "db/version_set.h"
-#include <algorithm>
#include <stdio.h>
+
+#include <algorithm>
+
#include "db/filename.h"
#include "db/log_reader.h"
#include "db/log_writer.h"
@@ -84,8 +86,7 @@ Version::~Version() {
}
int FindFile(const InternalKeyComparator& icmp,
- const std::vector<FileMetaData*>& files,
- const Slice& key) {
+ const std::vector<FileMetaData*>& files, const Slice& key) {
uint32_t left = 0;
uint32_t right = files.size();
while (left < right) {
@@ -104,26 +105,25 @@ int FindFile(const InternalKeyComparator& icmp,
return right;
}
-static bool AfterFile(const Comparator* ucmp,
- const Slice* user_key, const FileMetaData* f) {
- // NULL user_key occurs before all keys and is therefore never after *f
- return (user_key != NULL &&
+static bool AfterFile(const Comparator* ucmp, const Slice* user_key,
+ const FileMetaData* f) {
+ // null user_key occurs before all keys and is therefore never after *f
+ return (user_key != nullptr &&
ucmp->Compare(*user_key, f->largest.user_key()) > 0);
}
-static bool BeforeFile(const Comparator* ucmp,
- const Slice* user_key, const FileMetaData* f) {
- // NULL user_key occurs after all keys and is therefore never before *f
- return (user_key != NULL &&
+static bool BeforeFile(const Comparator* ucmp, const Slice* user_key,
+ const FileMetaData* f) {
+ // null user_key occurs after all keys and is therefore never before *f
+ return (user_key != nullptr &&
ucmp->Compare(*user_key, f->smallest.user_key()) < 0);
}
-bool SomeFileOverlapsRange(
- const InternalKeyComparator& icmp,
- bool disjoint_sorted_files,
- const std::vector<FileMetaData*>& files,
- const Slice* smallest_user_key,
- const Slice* largest_user_key) {
+bool SomeFileOverlapsRange(const InternalKeyComparator& icmp,
+ bool disjoint_sorted_files,
+ const std::vector<FileMetaData*>& files,
+ const Slice* smallest_user_key,
+ const Slice* largest_user_key) {
const Comparator* ucmp = icmp.user_comparator();
if (!disjoint_sorted_files) {
// Need to check against all files
@@ -141,10 +141,11 @@ bool SomeFileOverlapsRange(
// Binary search over file list
uint32_t index = 0;
- if (smallest_user_key != NULL) {
+ if (smallest_user_key != nullptr) {
// Find the earliest possible internal key for smallest_user_key
- InternalKey small(*smallest_user_key, kMaxSequenceNumber,kValueTypeForSeek);
- index = FindFile(icmp, files, small.Encode());
+ InternalKey small_key(*smallest_user_key, kMaxSequenceNumber,
+ kValueTypeForSeek);
+ index = FindFile(icmp, files, small_key.Encode());
}
if (index >= files.size()) {
@@ -164,25 +165,21 @@ class Version::LevelFileNumIterator : public Iterator {
public:
LevelFileNumIterator(const InternalKeyComparator& icmp,
const std::vector<FileMetaData*>* flist)
- : icmp_(icmp),
- flist_(flist),
- index_(flist->size()) { // Marks as invalid
- }
- virtual bool Valid() const {
- return index_ < flist_->size();
+ : icmp_(icmp), flist_(flist), index_(flist->size()) { // Marks as invalid
}
- virtual void Seek(const Slice& target) {
+ bool Valid() const override { return index_ < flist_->size(); }
+ void Seek(const Slice& target) override {
index_ = FindFile(icmp_, *flist_, target);
}
- virtual void SeekToFirst() { index_ = 0; }
- virtual void SeekToLast() {
+ void SeekToFirst() override { index_ = 0; }
+ void SeekToLast() override {
index_ = flist_->empty() ? 0 : flist_->size() - 1;
}
- virtual void Next() {
+ void Next() override {
assert(Valid());
index_++;
}
- virtual void Prev() {
+ void Prev() override {
assert(Valid());
if (index_ == 0) {
index_ = flist_->size(); // Marks as invalid
@@ -190,17 +187,18 @@ class Version::LevelFileNumIterator : public Iterator {
index_--;
}
}
- Slice key() const {
+ Slice key() const override {
assert(Valid());
return (*flist_)[index_]->largest.Encode();
}
- Slice value() const {
+ Slice value() const override {
assert(Valid());
EncodeFixed64(value_buf_, (*flist_)[index_]->number);
- EncodeFixed64(value_buf_+8, (*flist_)[index_]->file_size);
+ EncodeFixed64(value_buf_ + 8, (*flist_)[index_]->file_size);
return Slice(value_buf_, sizeof(value_buf_));
}
- virtual Status status() const { return Status::OK(); }
+ Status status() const override { return Status::OK(); }
+
private:
const InternalKeyComparator icmp_;
const std::vector<FileMetaData*>* const flist_;
@@ -210,16 +208,14 @@ class Version::LevelFileNumIterator : public Iterator {
mutable char value_buf_[16];
};
-static Iterator* GetFileIterator(void* arg,
- const ReadOptions& options,
+static Iterator* GetFileIterator(void* arg, const ReadOptions& options,
const Slice& file_value) {
TableCache* cache = reinterpret_cast<TableCache*>(arg);
if (file_value.size() != 16) {
return NewErrorIterator(
Status::Corruption("FileReader invoked with unexpected value"));
} else {
- return cache->NewIterator(options,
- DecodeFixed64(file_value.data()),
+ return cache->NewIterator(options, DecodeFixed64(file_value.data()),
DecodeFixed64(file_value.data() + 8));
}
}
@@ -227,17 +223,16 @@ static Iterator* GetFileIterator(void* arg,
Iterator* Version::NewConcatenatingIterator(const ReadOptions& options,
int level) const {
return NewTwoLevelIterator(
- new LevelFileNumIterator(vset_->icmp_, &files_[level]),
- &GetFileIterator, vset_->table_cache_, options);
+ new LevelFileNumIterator(vset_->icmp_, &files_[level]), &GetFileIterator,
+ vset_->table_cache_, options);
}
void Version::AddIterators(const ReadOptions& options,
std::vector<Iterator*>* iters) {
// Merge all level zero files together since they may overlap
for (size_t i = 0; i < files_[0].size(); i++) {
- iters->push_back(
- vset_->table_cache_->NewIterator(
- options, files_[0][i]->number, files_[0][i]->file_size));
+ iters->push_back(vset_->table_cache_->NewIterator(
+ options, files_[0][i]->number, files_[0][i]->file_size));
}
// For levels > 0, we can use a concatenating iterator that sequentially
@@ -264,7 +259,7 @@ struct Saver {
Slice user_key;
std::string* value;
};
-}
+} // namespace
static void SaveValue(void* arg, const Slice& ikey, const Slice& v) {
Saver* s = reinterpret_cast<Saver*>(arg);
ParsedInternalKey parsed_key;
@@ -284,10 +279,8 @@ static bool NewestFirst(FileMetaData* a, FileMetaData* b) {
return a->number > b->number;
}
-void Version::ForEachOverlapping(Slice user_key, Slice internal_key,
- void* arg,
+void Version::ForEachOverlapping(Slice user_key, Slice internal_key, void* arg,
bool (*func)(void*, int, FileMetaData*)) {
- // TODO(sanjay): Change Version::Get() to use this function.
const Comparator* ucmp = vset_->icmp_.user_comparator();
// Search level-0 in order from newest to oldest.
@@ -329,110 +322,89 @@ void Version::ForEachOverlapping(Slice user_key, Slice internal_key,
}
}
-Status Version::Get(const ReadOptions& options,
- const LookupKey& k,
- std::string* value,
- GetStats* stats) {
- Slice ikey = k.internal_key();
- Slice user_key = k.user_key();
- const Comparator* ucmp = vset_->icmp_.user_comparator();
- Status s;
-
- stats->seek_file = NULL;
+Status Version::Get(const ReadOptions& options, const LookupKey& k,
+ std::string* value, GetStats* stats) {
+ stats->seek_file = nullptr;
stats->seek_file_level = -1;
- FileMetaData* last_file_read = NULL;
- int last_file_read_level = -1;
- // We can search level-by-level since entries never hop across
- // levels. Therefore we are guaranteed that if we find data
- // in an smaller level, later levels are irrelevant.
- std::vector<FileMetaData*> tmp;
- FileMetaData* tmp2;
- for (int level = 0; level < config::kNumLevels; level++) {
- size_t num_files = files_[level].size();
- if (num_files == 0) continue;
+ struct State {
+ Saver saver;
+ GetStats* stats;
+ const ReadOptions* options;
+ Slice ikey;
+ FileMetaData* last_file_read;
+ int last_file_read_level;
- // Get the list of files to search in this level
- FileMetaData* const* files = &files_[level][0];
- if (level == 0) {
- // Level-0 files may overlap each other. Find all files that
- // overlap user_key and process them in order from newest to oldest.
- tmp.reserve(num_files);
- for (uint32_t i = 0; i < num_files; i++) {
- FileMetaData* f = files[i];
- if (ucmp->Compare(user_key, f->smallest.user_key()) >= 0 &&
- ucmp->Compare(user_key, f->largest.user_key()) <= 0) {
- tmp.push_back(f);
- }
- }
- if (tmp.empty()) continue;
+ VersionSet* vset;
+ Status s;
+ bool found;
- std::sort(tmp.begin(), tmp.end(), NewestFirst);
- files = &tmp[0];
- num_files = tmp.size();
- } else {
- // Binary search to find earliest index whose largest key >= ikey.
- uint32_t index = FindFile(vset_->icmp_, files_[level], ikey);
- if (index >= num_files) {
- files = NULL;
- num_files = 0;
- } else {
- tmp2 = files[index];
- if (ucmp->Compare(user_key, tmp2->smallest.user_key()) < 0) {
- // All of "tmp2" is past any data for user_key
- files = NULL;
- num_files = 0;
- } else {
- files = &tmp2;
- num_files = 1;
- }
- }
- }
+ static bool Match(void* arg, int level, FileMetaData* f) {
+ State* state = reinterpret_cast<State*>(arg);
- for (uint32_t i = 0; i < num_files; ++i) {
- if (last_file_read != NULL && stats->seek_file == NULL) {
+ if (state->stats->seek_file == nullptr &&
+ state->last_file_read != nullptr) {
// We have had more than one seek for this read. Charge the 1st file.
- stats->seek_file = last_file_read;
- stats->seek_file_level = last_file_read_level;
+ state->stats->seek_file = state->last_file_read;
+ state->stats->seek_file_level = state->last_file_read_level;
}
- FileMetaData* f = files[i];
- last_file_read = f;
- last_file_read_level = level;
-
- Saver saver;
- saver.state = kNotFound;
- saver.ucmp = ucmp;
- saver.user_key = user_key;
- saver.value = value;
- s = vset_->table_cache_->Get(options, f->number, f->file_size,
- ikey, &saver, SaveValue);
- if (!s.ok()) {
- return s;
+ state->last_file_read = f;
+ state->last_file_read_level = level;
+
+ state->s = state->vset->table_cache_->Get(*state->options, f->number,
+ f->file_size, state->ikey,
+ &state->saver, SaveValue);
+ if (!state->s.ok()) {
+ state->found = true;
+ return false;
}
- switch (saver.state) {
+ switch (state->saver.state) {
case kNotFound:
- break; // Keep searching in other files
+ return true; // Keep searching in other files
case kFound:
- return s;
+ state->found = true;
+ return false;
case kDeleted:
- s = Status::NotFound(Slice()); // Use empty error message for speed
- return s;
+ return false;
case kCorrupt:
- s = Status::Corruption("corrupted key for ", user_key);
- return s;
+ state->s =
+ Status::Corruption("corrupted key for ", state->saver.user_key);
+ state->found = true;
+ return false;
}
+
+ // Not reached. Added to avoid false compilation warnings of
+ // "control reaches end of non-void function".
+ return false;
}
- }
+ };
+
+ State state;
+ state.found = false;
+ state.stats = stats;
+ state.last_file_read = nullptr;
+ state.last_file_read_level = -1;
- return Status::NotFound(Slice()); // Use an empty error message for speed
+ state.options = &options;
+ state.ikey = k.internal_key();
+ state.vset = vset_;
+
+ state.saver.state = kNotFound;
+ state.saver.ucmp = vset_->icmp_.user_comparator();
+ state.saver.user_key = k.user_key();
+ state.saver.value = value;
+
+ ForEachOverlapping(state.saver.user_key, state.ikey, &state, &State::Match);
+
+ return state.found ? state.s : Status::NotFound(Slice());
}
bool Version::UpdateStats(const GetStats& stats) {
FileMetaData* f = stats.seek_file;
- if (f != NULL) {
+ if (f != nullptr) {
f->allowed_seeks--;
- if (f->allowed_seeks <= 0 && file_to_compact_ == NULL) {
+ if (f->allowed_seeks <= 0 && file_to_compact_ == nullptr) {
file_to_compact_ = f;
file_to_compact_level_ = stats.seek_file_level;
return true;
@@ -479,9 +451,7 @@ bool Version::RecordReadSample(Slice internal_key) {
return false;
}
-void Version::Ref() {
- ++refs_;
-}
+void Version::Ref() { ++refs_; }
void Version::Unref() {
assert(this != &vset_->dummy_versions_);
@@ -492,16 +462,14 @@ void Version::Unref() {
}
}
-bool Version::OverlapInLevel(int level,
- const Slice* smallest_user_key,
+bool Version::OverlapInLevel(int level, const Slice* smallest_user_key,
const Slice* largest_user_key) {
return SomeFileOverlapsRange(vset_->icmp_, (level > 0), files_[level],
smallest_user_key, largest_user_key);
}
-int Version::PickLevelForMemTableOutput(
- const Slice& smallest_user_key,
- const Slice& largest_user_key) {
+int Version::PickLevelForMemTableOutput(const Slice& smallest_user_key,
+ const Slice& largest_user_key) {
int level = 0;
if (!OverlapInLevel(0, &smallest_user_key, &largest_user_key)) {
// Push to next level if there is no overlap in next level,
@@ -528,40 +496,39 @@ int Version::PickLevelForMemTableOutput(
}
// Store in "*inputs" all files in "level" that overlap [begin,end]
-void Version::GetOverlappingInputs(
- int level,
- const InternalKey* begin,
- const InternalKey* end,
- std::vector<FileMetaData*>* inputs) {
+void Version::GetOverlappingInputs(int level, const InternalKey* begin,
+ const InternalKey* end,
+ std::vector<FileMetaData*>* inputs) {
assert(level >= 0);
assert(level < config::kNumLevels);
inputs->clear();
Slice user_begin, user_end;
- if (begin != NULL) {
+ if (begin != nullptr) {
user_begin = begin->user_key();
}
- if (end != NULL) {
+ if (end != nullptr) {
user_end = end->user_key();
}
const Comparator* user_cmp = vset_->icmp_.user_comparator();
- for (size_t i = 0; i < files_[level].size(); ) {
+ for (size_t i = 0; i < files_[level].size();) {
FileMetaData* f = files_[level][i++];
const Slice file_start = f->smallest.user_key();
const Slice file_limit = f->largest.user_key();
- if (begin != NULL && user_cmp->Compare(file_limit, user_begin) < 0) {
+ if (begin != nullptr && user_cmp->Compare(file_limit, user_begin) < 0) {
// "f" is completely before specified range; skip it
- } else if (end != NULL && user_cmp->Compare(file_start, user_end) > 0) {
+ } else if (end != nullptr && user_cmp->Compare(file_start, user_end) > 0) {
// "f" is completely after specified range; skip it
} else {
inputs->push_back(f);
if (level == 0) {
// Level-0 files may overlap each other. So check if the newly
// added file has expanded the range. If so, restart search.
- if (begin != NULL && user_cmp->Compare(file_start, user_begin) < 0) {
+ if (begin != nullptr && user_cmp->Compare(file_start, user_begin) < 0) {
user_begin = file_start;
inputs->clear();
i = 0;
- } else if (end != NULL && user_cmp->Compare(file_limit, user_end) > 0) {
+ } else if (end != nullptr &&
+ user_cmp->Compare(file_limit, user_end) > 0) {
user_end = file_limit;
inputs->clear();
i = 0;
@@ -629,9 +596,7 @@ class VersionSet::Builder {
public:
// Initialize a builder with the files from *base and other info from *vset
- Builder(VersionSet* vset, Version* base)
- : vset_(vset),
- base_(base) {
+ Builder(VersionSet* vset, Version* base) : vset_(vset), base_(base) {
base_->Ref();
BySmallestKey cmp;
cmp.internal_comparator = &vset_->icmp_;
@@ -645,8 +610,8 @@ class VersionSet::Builder {
const FileSet* added = levels_[level].added_files;
std::vector<FileMetaData*> to_unref;
to_unref.reserve(added->size());
- for (FileSet::const_iterator it = added->begin();
- it != added->end(); ++it) {
+ for (FileSet::const_iterator it = added->begin(); it != added->end();
+ ++it) {
to_unref.push_back(*it);
}
delete added;
@@ -671,12 +636,9 @@ class VersionSet::Builder {
}
// Delete files
- const VersionEdit::DeletedFileSet& del = edit->deleted_files_;
- for (VersionEdit::DeletedFileSet::const_iterator iter = del.begin();
- iter != del.end();
- ++iter) {
- const int level = iter->first;
- const uint64_t number = iter->second;
+ for (const auto& deleted_file_set_kvp : edit->deleted_files_) {
+ const int level = deleted_file_set_kvp.first;
+ const uint64_t number = deleted_file_set_kvp.second;
levels_[level].deleted_files.insert(number);
}
@@ -699,7 +661,7 @@ class VersionSet::Builder {
// same as the compaction of 40KB of data. We are a little
// conservative and allow approximately one seek for every 16KB
// of data before triggering a compaction.
- f->allowed_seeks = (f->file_size / 16384);
+ f->allowed_seeks = static_cast<int>((f->file_size / 16384U));
if (f->allowed_seeks < 100) f->allowed_seeks = 100;
levels_[level].deleted_files.erase(f->number);
@@ -717,20 +679,17 @@ class VersionSet::Builder {
const std::vector<FileMetaData*>& base_files = base_->files_[level];
std::vector<FileMetaData*>::const_iterator base_iter = base_files.begin();
std::vector<FileMetaData*>::const_iterator base_end = base_files.end();
- const FileSet* added = levels_[level].added_files;
- v->files_[level].reserve(base_files.size() + added->size());
- for (FileSet::const_iterator added_iter = added->begin();
- added_iter != added->end();
- ++added_iter) {
+ const FileSet* added_files = levels_[level].added_files;
+ v->files_[level].reserve(base_files.size() + added_files->size());
+ for (const auto& added_file : *added_files) {
// Add all smaller files listed in base_
- for (std::vector<FileMetaData*>::const_iterator bpos
- = std::upper_bound(base_iter, base_end, *added_iter, cmp);
- base_iter != bpos;
- ++base_iter) {
+ for (std::vector<FileMetaData*>::const_iterator bpos =
+ std::upper_bound(base_iter, base_end, added_file, cmp);
+ base_iter != bpos; ++base_iter) {
MaybeAddFile(v, level, *base_iter);
}
- MaybeAddFile(v, level, *added_iter);
+ MaybeAddFile(v, level, added_file);
}
// Add remaining base files
@@ -742,7 +701,7 @@ class VersionSet::Builder {
// Make sure there is no overlap in levels > 0
if (level > 0) {
for (uint32_t i = 1; i < v->files_[level].size(); i++) {
- const InternalKey& prev_end = v->files_[level][i-1]->largest;
+ const InternalKey& prev_end = v->files_[level][i - 1]->largest;
const InternalKey& this_begin = v->files_[level][i]->smallest;
if (vset_->icmp_.Compare(prev_end, this_begin) >= 0) {
fprintf(stderr, "overlapping ranges in same level %s vs. %s\n",
@@ -763,7 +722,7 @@ class VersionSet::Builder {
std::vector<FileMetaData*>* files = &v->files_[level];
if (level > 0 && !files->empty()) {
// Must not overlap
- assert(vset_->icmp_.Compare((*files)[files->size()-1]->largest,
+ assert(vset_->icmp_.Compare((*files)[files->size() - 1]->largest,
f->smallest) < 0);
}
f->refs++;
@@ -772,8 +731,7 @@ class VersionSet::Builder {
}
};
-VersionSet::VersionSet(const std::string& dbname,
- const Options* options,
+VersionSet::VersionSet(const std::string& dbname, const Options* options,
TableCache* table_cache,
const InternalKeyComparator* cmp)
: env_(options->env),
@@ -786,10 +744,10 @@ VersionSet::VersionSet(const std::string& dbname,
last_sequence_(0),
log_number_(0),
prev_log_number_(0),
- descriptor_file_(NULL),
- descriptor_log_(NULL),
+ descriptor_file_(nullptr),
+ descriptor_log_(nullptr),
dummy_versions_(this),
- current_(NULL) {
+ current_(nullptr) {
AppendVersion(new Version(this));
}
@@ -804,7 +762,7 @@ void VersionSet::AppendVersion(Version* v) {
// Make "v" current
assert(v->refs_ == 0);
assert(v != current_);
- if (current_ != NULL) {
+ if (current_ != nullptr) {
current_->Unref();
}
current_ = v;
@@ -844,10 +802,10 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) {
// a temporary file that contains a snapshot of the current version.
std::string new_manifest_file;
Status s;
- if (descriptor_log_ == NULL) {
+ if (descriptor_log_ == nullptr) {
// No reason to unlock *mu here since we only hit this path in the
// first call to LogAndApply (when opening the database).
- assert(descriptor_file_ == NULL);
+ assert(descriptor_file_ == nullptr);
new_manifest_file = DescriptorFileName(dbname_, manifest_file_number_);
edit->SetNextFile(next_file_number_);
s = env_->NewWritableFile(new_manifest_file, &descriptor_file_);
@@ -893,8 +851,8 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) {
if (!new_manifest_file.empty()) {
delete descriptor_log_;
delete descriptor_file_;
- descriptor_log_ = NULL;
- descriptor_file_ = NULL;
+ descriptor_log_ = nullptr;
+ descriptor_file_ = nullptr;
env_->DeleteFile(new_manifest_file);
}
}
@@ -902,10 +860,10 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) {
return s;
}
-Status VersionSet::Recover(bool *save_manifest) {
+Status VersionSet::Recover(bool* save_manifest) {
struct LogReporter : public log::Reader::Reporter {
Status* status;
- virtual void Corruption(size_t bytes, const Status& s) {
+ void Corruption(size_t bytes, const Status& s) override {
if (this->status->ok()) *this->status = s;
}
};
@@ -916,7 +874,7 @@ Status VersionSet::Recover(bool *save_manifest) {
if (!s.ok()) {
return s;
}
- if (current.empty() || current[current.size()-1] != '\n') {
+ if (current.empty() || current[current.size() - 1] != '\n') {
return Status::Corruption("CURRENT file does not end with newline");
}
current.resize(current.size() - 1);
@@ -925,6 +883,10 @@ Status VersionSet::Recover(bool *save_manifest) {
SequentialFile* file;
s = env_->NewSequentialFile(dscname, &file);
if (!s.ok()) {
+ if (s.IsNotFound()) {
+ return Status::Corruption("CURRENT points to a non-existent file",
+ s.ToString());
+ }
return s;
}
@@ -941,7 +903,8 @@ Status VersionSet::Recover(bool *save_manifest) {
{
LogReporter reporter;
reporter.status = &s;
- log::Reader reader(file, &reporter, true/*checksum*/, 0/*initial_offset*/);
+ log::Reader reader(file, &reporter, true /*checksum*/,
+ 0 /*initial_offset*/);
Slice record;
std::string scratch;
while (reader.ReadRecord(&record, &scratch) && s.ok()) {
@@ -982,7 +945,7 @@ Status VersionSet::Recover(bool *save_manifest) {
}
}
delete file;
- file = NULL;
+ file = nullptr;
if (s.ok()) {
if (!have_next_file) {
@@ -1040,12 +1003,12 @@ bool VersionSet::ReuseManifest(const std::string& dscname,
return false;
}
- assert(descriptor_file_ == NULL);
- assert(descriptor_log_ == NULL);
+ assert(descriptor_file_ == nullptr);
+ assert(descriptor_log_ == nullptr);
Status r = env_->NewAppendableFile(dscname, &descriptor_file_);
if (!r.ok()) {
Log(options_->info_log, "Reuse MANIFEST: %s\n", r.ToString().c_str());
- assert(descriptor_file_ == NULL);
+ assert(descriptor_file_ == nullptr);
return false;
}
@@ -1066,7 +1029,7 @@ void VersionSet::Finalize(Version* v) {
int best_level = -1;
double best_score = -1;
- for (int level = 0; level < config::kNumLevels-1; level++) {
+ for (int level = 0; level < config::kNumLevels - 1; level++) {
double score;
if (level == 0) {
// We treat level-0 specially by bounding the number of files
@@ -1081,7 +1044,7 @@ void VersionSet::Finalize(Version* v) {
// setting, or very high compression ratios, or lots of
// overwrites/deletions).
score = v->files_[level].size() /
- static_cast<double>(config::kL0_CompactionTrigger);
+ static_cast<double>(config::kL0_CompactionTrigger);
} else {
// Compute the ratio of current size to size limit.
const uint64_t level_bytes = TotalFileSize(v->files_[level]);
@@ -1137,16 +1100,12 @@ int VersionSet::NumLevelFiles(int level) const {
const char* VersionSet::LevelSummary(LevelSummaryStorage* scratch) const {
// Update code if kNumLevels changes
- assert(config::kNumLevels == 7);
+ static_assert(config::kNumLevels == 7, "");
snprintf(scratch->buffer, sizeof(scratch->buffer),
- "files[ %d %d %d %d %d %d %d ]",
- int(current_->files_[0].size()),
- int(current_->files_[1].size()),
- int(current_->files_[2].size()),
- int(current_->files_[3].size()),
- int(current_->files_[4].size()),
- int(current_->files_[5].size()),
- int(current_->files_[6].size()));
+ "files[ %d %d %d %d %d %d %d ]", int(current_->files_[0].size()),
+ int(current_->files_[1].size()), int(current_->files_[2].size()),
+ int(current_->files_[3].size()), int(current_->files_[4].size()),
+ int(current_->files_[5].size()), int(current_->files_[6].size()));
return scratch->buffer;
}
@@ -1172,7 +1131,7 @@ uint64_t VersionSet::ApproximateOffsetOf(Version* v, const InternalKey& ikey) {
Table* tableptr;
Iterator* iter = table_cache_->NewIterator(
ReadOptions(), files[i]->number, files[i]->file_size, &tableptr);
- if (tableptr != NULL) {
+ if (tableptr != nullptr) {
result += tableptr->ApproximateOffsetOf(ikey.Encode());
}
delete iter;
@@ -1183,8 +1142,7 @@ uint64_t VersionSet::ApproximateOffsetOf(Version* v, const InternalKey& ikey) {
}
void VersionSet::AddLiveFiles(std::set<uint64_t>* live) {
- for (Version* v = dummy_versions_.next_;
- v != &dummy_versions_;
+ for (Version* v = dummy_versions_.next_; v != &dummy_versions_;
v = v->next_) {
for (int level = 0; level < config::kNumLevels; level++) {
const std::vector<FileMetaData*>& files = v->files_[level];
@@ -1207,7 +1165,7 @@ int64_t VersionSet::MaxNextLevelOverlappingBytes() {
for (int level = 1; level < config::kNumLevels - 1; level++) {
for (size_t i = 0; i < current_->files_[level].size(); i++) {
const FileMetaData* f = current_->files_[level][i];
- current_->GetOverlappingInputs(level+1, &f->smallest, &f->largest,
+ current_->GetOverlappingInputs(level + 1, &f->smallest, &f->largest,
&overlaps);
const int64_t sum = TotalFileSize(overlaps);
if (sum > result) {
@@ -1222,8 +1180,7 @@ int64_t VersionSet::MaxNextLevelOverlappingBytes() {
// *smallest, *largest.
// REQUIRES: inputs is not empty
void VersionSet::GetRange(const std::vector<FileMetaData*>& inputs,
- InternalKey* smallest,
- InternalKey* largest) {
+ InternalKey* smallest, InternalKey* largest) {
assert(!inputs.empty());
smallest->Clear();
largest->Clear();
@@ -1248,8 +1205,7 @@ void VersionSet::GetRange(const std::vector<FileMetaData*>& inputs,
// REQUIRES: inputs is not empty
void VersionSet::GetRange2(const std::vector<FileMetaData*>& inputs1,
const std::vector<FileMetaData*>& inputs2,
- InternalKey* smallest,
- InternalKey* largest) {
+ InternalKey* smallest, InternalKey* largest) {
std::vector<FileMetaData*> all = inputs1;
all.insert(all.end(), inputs2.begin(), inputs2.end());
GetRange(all, smallest, largest);
@@ -1271,8 +1227,8 @@ Iterator* VersionSet::MakeInputIterator(Compaction* c) {
if (c->level() + which == 0) {
const std::vector<FileMetaData*>& files = c->inputs_[which];
for (size_t i = 0; i < files.size(); i++) {
- list[num++] = table_cache_->NewIterator(
- options, files[i]->number, files[i]->file_size);
+ list[num++] = table_cache_->NewIterator(options, files[i]->number,
+ files[i]->file_size);
}
} else {
// Create concatenating iterator for the files from this level
@@ -1295,11 +1251,11 @@ Compaction* VersionSet::PickCompaction() {
// We prefer compactions triggered by too much data in a level over
// the compactions triggered by seeks.
const bool size_compaction = (current_->compaction_score_ >= 1);
- const bool seek_compaction = (current_->file_to_compact_ != NULL);
+ const bool seek_compaction = (current_->file_to_compact_ != nullptr);
if (size_compaction) {
level = current_->compaction_level_;
assert(level >= 0);
- assert(level+1 < config::kNumLevels);
+ assert(level + 1 < config::kNumLevels);
c = new Compaction(options_, level);
// Pick the first file that comes after compact_pointer_[level]
@@ -1320,7 +1276,7 @@ Compaction* VersionSet::PickCompaction() {
c = new Compaction(options_, level);
c->inputs_[0].push_back(current_->file_to_compact_);
} else {
- return NULL;
+ return nullptr;
}
c->input_version_ = current_;
@@ -1342,12 +1298,94 @@ Compaction* VersionSet::PickCompaction() {
return c;
}
+// Finds the largest key in a vector of files. Returns true if files it not
+// empty.
+bool FindLargestKey(const InternalKeyComparator& icmp,
+ const std::vector<FileMetaData*>& files,
+ InternalKey* largest_key) {
+ if (files.empty()) {
+ return false;
+ }
+ *largest_key = files[0]->largest;
+ for (size_t i = 1; i < files.size(); ++i) {
+ FileMetaData* f = files[i];
+ if (icmp.Compare(f->largest, *largest_key) > 0) {
+ *largest_key = f->largest;
+ }
+ }
+ return true;
+}
+
+// Finds minimum file b2=(l2, u2) in level file for which l2 > u1 and
+// user_key(l2) = user_key(u1)
+FileMetaData* FindSmallestBoundaryFile(
+ const InternalKeyComparator& icmp,
+ const std::vector<FileMetaData*>& level_files,
+ const InternalKey& largest_key) {
+ const Comparator* user_cmp = icmp.user_comparator();
+ FileMetaData* smallest_boundary_file = nullptr;
+ for (size_t i = 0; i < level_files.size(); ++i) {
+ FileMetaData* f = level_files[i];
+ if (icmp.Compare(f->smallest, largest_key) > 0 &&
+ user_cmp->Compare(f->smallest.user_key(), largest_key.user_key()) ==
+ 0) {
+ if (smallest_boundary_file == nullptr ||
+ icmp.Compare(f->smallest, smallest_boundary_file->smallest) < 0) {
+ smallest_boundary_file = f;
+ }
+ }
+ }
+ return smallest_boundary_file;
+}
+
+// Extracts the largest file b1 from |compaction_files| and then searches for a
+// b2 in |level_files| for which user_key(u1) = user_key(l2). If it finds such a
+// file b2 (known as a boundary file) it adds it to |compaction_files| and then
+// searches again using this new upper bound.
+//
+// If there are two blocks, b1=(l1, u1) and b2=(l2, u2) and
+// user_key(u1) = user_key(l2), and if we compact b1 but not b2 then a
+// subsequent get operation will yield an incorrect result because it will
+// return the record from b2 in level i rather than from b1 because it searches
+// level by level for records matching the supplied user key.
+//
+// parameters:
+// in level_files: List of files to search for boundary files.
+// in/out compaction_files: List of files to extend by adding boundary files.
+void AddBoundaryInputs(const InternalKeyComparator& icmp,
+ const std::vector<FileMetaData*>& level_files,
+ std::vector<FileMetaData*>* compaction_files) {
+ InternalKey largest_key;
+
+ // Quick return if compaction_files is empty.
+ if (!FindLargestKey(icmp, *compaction_files, &largest_key)) {
+ return;
+ }
+
+ bool continue_searching = true;
+ while (continue_searching) {
+ FileMetaData* smallest_boundary_file =
+ FindSmallestBoundaryFile(icmp, level_files, largest_key);
+
+ // If a boundary file was found advance largest_key, otherwise we're done.
+ if (smallest_boundary_file != NULL) {
+ compaction_files->push_back(smallest_boundary_file);
+ largest_key = smallest_boundary_file->largest;
+ } else {
+ continue_searching = false;
+ }
+ }
+}
+
void VersionSet::SetupOtherInputs(Compaction* c) {
const int level = c->level();
InternalKey smallest, largest;
+
+ AddBoundaryInputs(icmp_, current_->files_[level], &c->inputs_[0]);
GetRange(c->inputs_[0], &smallest, &largest);
- current_->GetOverlappingInputs(level+1, &smallest, &largest, &c->inputs_[1]);
+ current_->GetOverlappingInputs(level + 1, &smallest, &largest,
+ &c->inputs_[1]);
// Get entire range covered by compaction
InternalKey all_start, all_limit;
@@ -1358,6 +1396,7 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
if (!c->inputs_[1].empty()) {
std::vector<FileMetaData*> expanded0;
current_->GetOverlappingInputs(level, &all_start, &all_limit, &expanded0);
+ AddBoundaryInputs(icmp_, current_->files_[level], &expanded0);
const int64_t inputs0_size = TotalFileSize(c->inputs_[0]);
const int64_t inputs1_size = TotalFileSize(c->inputs_[1]);
const int64_t expanded0_size = TotalFileSize(expanded0);
@@ -1367,18 +1406,14 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
InternalKey new_start, new_limit;
GetRange(expanded0, &new_start, &new_limit);
std::vector<FileMetaData*> expanded1;
- current_->GetOverlappingInputs(level+1, &new_start, &new_limit,
+ current_->GetOverlappingInputs(level + 1, &new_start, &new_limit,
&expanded1);
if (expanded1.size() == c->inputs_[1].size()) {
Log(options_->info_log,
"Expanding@%d %d+%d (%ld+%ld bytes) to %d+%d (%ld+%ld bytes)\n",
- level,
- int(c->inputs_[0].size()),
- int(c->inputs_[1].size()),
- long(inputs0_size), long(inputs1_size),
- int(expanded0.size()),
- int(expanded1.size()),
- long(expanded0_size), long(inputs1_size));
+ level, int(c->inputs_[0].size()), int(c->inputs_[1].size()),
+ long(inputs0_size), long(inputs1_size), int(expanded0.size()),
+ int(expanded1.size()), long(expanded0_size), long(inputs1_size));
smallest = new_start;
largest = new_limit;
c->inputs_[0] = expanded0;
@@ -1395,13 +1430,6 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
&c->grandparents_);
}
- if (false) {
- Log(options_->info_log, "Compacting %d '%s' .. '%s'",
- level,
- smallest.DebugString().c_str(),
- largest.DebugString().c_str());
- }
-
// Update the place where we will do the next compaction for this level.
// We update this immediately instead of waiting for the VersionEdit
// to be applied so that if the compaction fails, we will try a different
@@ -1410,14 +1438,12 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
c->edit_.SetCompactPointer(level, largest);
}
-Compaction* VersionSet::CompactRange(
- int level,
- const InternalKey* begin,
- const InternalKey* end) {
+Compaction* VersionSet::CompactRange(int level, const InternalKey* begin,
+ const InternalKey* end) {
std::vector<FileMetaData*> inputs;
current_->GetOverlappingInputs(level, begin, end, &inputs);
if (inputs.empty()) {
- return NULL;
+ return nullptr;
}
// Avoid compacting too much in one shot in case the range is large.
@@ -1448,7 +1474,7 @@ Compaction* VersionSet::CompactRange(
Compaction::Compaction(const Options* options, int level)
: level_(level),
max_output_file_size_(MaxFileSizeForLevel(options, level)),
- input_version_(NULL),
+ input_version_(nullptr),
grandparent_index_(0),
seen_key_(false),
overlapped_bytes_(0) {
@@ -1458,7 +1484,7 @@ Compaction::Compaction(const Options* options, int level)
}
Compaction::~Compaction() {
- if (input_version_ != NULL) {
+ if (input_version_ != nullptr) {
input_version_->Unref();
}
}
@@ -1486,7 +1512,7 @@ bool Compaction::IsBaseLevelForKey(const Slice& user_key) {
const Comparator* user_cmp = input_version_->vset_->icmp_.user_comparator();
for (int lvl = level_ + 2; lvl < config::kNumLevels; lvl++) {
const std::vector<FileMetaData*>& files = input_version_->files_[lvl];
- for (; level_ptrs_[lvl] < files.size(); ) {
+ while (level_ptrs_[lvl] < files.size()) {
FileMetaData* f = files[level_ptrs_[lvl]];
if (user_cmp->Compare(user_key, f->largest.user_key()) <= 0) {
// We've advanced far enough
@@ -1507,8 +1533,9 @@ bool Compaction::ShouldStopBefore(const Slice& internal_key) {
// Scan to find earliest grandparent file that contains key.
const InternalKeyComparator* icmp = &vset->icmp_;
while (grandparent_index_ < grandparents_.size() &&
- icmp->Compare(internal_key,
- grandparents_[grandparent_index_]->largest.Encode()) > 0) {
+ icmp->Compare(internal_key,
+ grandparents_[grandparent_index_]->largest.Encode()) >
+ 0) {
if (seen_key_) {
overlapped_bytes_ += grandparents_[grandparent_index_]->file_size;
}
@@ -1526,9 +1553,9 @@ bool Compaction::ShouldStopBefore(const Slice& internal_key) {
}
void Compaction::ReleaseInputs() {
- if (input_version_ != NULL) {
+ if (input_version_ != nullptr) {
input_version_->Unref();
- input_version_ = NULL;
+ input_version_ = nullptr;
}
}
diff --git a/src/leveldb/db/version_set.h b/src/leveldb/db/version_set.h
index 7935a965a7..69f3d70133 100644
--- a/src/leveldb/db/version_set.h
+++ b/src/leveldb/db/version_set.h
@@ -18,6 +18,7 @@
#include <map>
#include <set>
#include <vector>
+
#include "db/dbformat.h"
#include "db/version_edit.h"
#include "port/port.h"
@@ -25,7 +26,9 @@
namespace leveldb {
-namespace log { class Writer; }
+namespace log {
+class Writer;
+}
class Compaction;
class Iterator;
@@ -39,30 +42,23 @@ class WritableFile;
// Return the smallest index i such that files[i]->largest >= key.
// Return files.size() if there is no such file.
// REQUIRES: "files" contains a sorted list of non-overlapping files.
-extern int FindFile(const InternalKeyComparator& icmp,
- const std::vector<FileMetaData*>& files,
- const Slice& key);
+int FindFile(const InternalKeyComparator& icmp,
+ const std::vector<FileMetaData*>& files, const Slice& key);
// Returns true iff some file in "files" overlaps the user key range
// [*smallest,*largest].
-// smallest==NULL represents a key smaller than all keys in the DB.
-// largest==NULL represents a key largest than all keys in the DB.
+// smallest==nullptr represents a key smaller than all keys in the DB.
+// largest==nullptr represents a key largest than all keys in the DB.
// REQUIRES: If disjoint_sorted_files, files[] contains disjoint ranges
// in sorted order.
-extern bool SomeFileOverlapsRange(
- const InternalKeyComparator& icmp,
- bool disjoint_sorted_files,
- const std::vector<FileMetaData*>& files,
- const Slice* smallest_user_key,
- const Slice* largest_user_key);
+bool SomeFileOverlapsRange(const InternalKeyComparator& icmp,
+ bool disjoint_sorted_files,
+ const std::vector<FileMetaData*>& files,
+ const Slice* smallest_user_key,
+ const Slice* largest_user_key);
class Version {
public:
- // Append to *iters a sequence of iterators that will
- // yield the contents of this Version when merged together.
- // REQUIRES: This version has been saved (see VersionSet::SaveTo)
- void AddIterators(const ReadOptions&, std::vector<Iterator*>* iters);
-
// Lookup the value for key. If found, store it in *val and
// return OK. Else return a non-OK status. Fills *stats.
// REQUIRES: lock is not held
@@ -70,6 +66,12 @@ class Version {
FileMetaData* seek_file;
int seek_file_level;
};
+
+ // Append to *iters a sequence of iterators that will
+ // yield the contents of this Version when merged together.
+ // REQUIRES: This version has been saved (see VersionSet::SaveTo)
+ void AddIterators(const ReadOptions&, std::vector<Iterator*>* iters);
+
Status Get(const ReadOptions&, const LookupKey& key, std::string* val,
GetStats* stats);
@@ -91,16 +93,15 @@ class Version {
void GetOverlappingInputs(
int level,
- const InternalKey* begin, // NULL means before all keys
- const InternalKey* end, // NULL means after all keys
+ const InternalKey* begin, // nullptr means before all keys
+ const InternalKey* end, // nullptr means after all keys
std::vector<FileMetaData*>* inputs);
// Returns true iff some file in the specified level overlaps
// some part of [*smallest_user_key,*largest_user_key].
- // smallest_user_key==NULL represents a key smaller than all keys in the DB.
- // largest_user_key==NULL represents a key largest than all keys in the DB.
- bool OverlapInLevel(int level,
- const Slice* smallest_user_key,
+ // smallest_user_key==nullptr represents a key smaller than all the DB's keys.
+ // largest_user_key==nullptr represents a key largest than all the DB's keys.
+ bool OverlapInLevel(int level, const Slice* smallest_user_key,
const Slice* largest_user_key);
// Return the level at which we should place a new memtable compaction
@@ -118,6 +119,22 @@ class Version {
friend class VersionSet;
class LevelFileNumIterator;
+
+ explicit Version(VersionSet* vset)
+ : vset_(vset),
+ next_(this),
+ prev_(this),
+ refs_(0),
+ file_to_compact_(nullptr),
+ file_to_compact_level_(-1),
+ compaction_score_(-1),
+ compaction_level_(-1) {}
+
+ Version(const Version&) = delete;
+ Version& operator=(const Version&) = delete;
+
+ ~Version();
+
Iterator* NewConcatenatingIterator(const ReadOptions&, int level) const;
// Call func(arg, level, f) for every file that overlaps user_key in
@@ -125,14 +142,13 @@ class Version {
// false, makes no more calls.
//
// REQUIRES: user portion of internal_key == user_key.
- void ForEachOverlapping(Slice user_key, Slice internal_key,
- void* arg,
+ void ForEachOverlapping(Slice user_key, Slice internal_key, void* arg,
bool (*func)(void*, int, FileMetaData*));
- VersionSet* vset_; // VersionSet to which this Version belongs
- Version* next_; // Next version in linked list
- Version* prev_; // Previous version in linked list
- int refs_; // Number of live refs to this version
+ VersionSet* vset_; // VersionSet to which this Version belongs
+ Version* next_; // Next version in linked list
+ Version* prev_; // Previous version in linked list
+ int refs_; // Number of live refs to this version
// List of files per level
std::vector<FileMetaData*> files_[config::kNumLevels];
@@ -146,28 +162,15 @@ class Version {
// are initialized by Finalize().
double compaction_score_;
int compaction_level_;
-
- explicit Version(VersionSet* vset)
- : vset_(vset), next_(this), prev_(this), refs_(0),
- file_to_compact_(NULL),
- file_to_compact_level_(-1),
- compaction_score_(-1),
- compaction_level_(-1) {
- }
-
- ~Version();
-
- // No copying allowed
- Version(const Version&);
- void operator=(const Version&);
};
class VersionSet {
public:
- VersionSet(const std::string& dbname,
- const Options* options,
- TableCache* table_cache,
- const InternalKeyComparator*);
+ VersionSet(const std::string& dbname, const Options* options,
+ TableCache* table_cache, const InternalKeyComparator*);
+ VersionSet(const VersionSet&) = delete;
+ VersionSet& operator=(const VersionSet&) = delete;
+
~VersionSet();
// Apply *edit to the current version to form a new descriptor that
@@ -179,7 +182,7 @@ class VersionSet {
EXCLUSIVE_LOCKS_REQUIRED(mu);
// Recover the last saved descriptor from persistent storage.
- Status Recover(bool *save_manifest);
+ Status Recover(bool* save_manifest);
// Return the current version.
Version* current() const { return current_; }
@@ -225,19 +228,17 @@ class VersionSet {
uint64_t PrevLogNumber() const { return prev_log_number_; }
// Pick level and inputs for a new compaction.
- // Returns NULL if there is no compaction to be done.
+ // Returns nullptr if there is no compaction to be done.
// Otherwise returns a pointer to a heap-allocated object that
// describes the compaction. Caller should delete the result.
Compaction* PickCompaction();
// Return a compaction object for compacting the range [begin,end] in
- // the specified level. Returns NULL if there is nothing in that
+ // the specified level. Returns nullptr if there is nothing in that
// level that overlaps the specified range. Caller should delete
// the result.
- Compaction* CompactRange(
- int level,
- const InternalKey* begin,
- const InternalKey* end);
+ Compaction* CompactRange(int level, const InternalKey* begin,
+ const InternalKey* end);
// Return the maximum overlapping data (in bytes) at next level for any
// file at a level >= 1.
@@ -250,7 +251,7 @@ class VersionSet {
// Returns true iff some level needs a compaction.
bool NeedsCompaction() const {
Version* v = current_;
- return (v->compaction_score_ >= 1) || (v->file_to_compact_ != NULL);
+ return (v->compaction_score_ >= 1) || (v->file_to_compact_ != nullptr);
}
// Add all files listed in any live version to *live.
@@ -278,14 +279,12 @@ class VersionSet {
void Finalize(Version* v);
- void GetRange(const std::vector<FileMetaData*>& inputs,
- InternalKey* smallest,
+ void GetRange(const std::vector<FileMetaData*>& inputs, InternalKey* smallest,
InternalKey* largest);
void GetRange2(const std::vector<FileMetaData*>& inputs1,
const std::vector<FileMetaData*>& inputs2,
- InternalKey* smallest,
- InternalKey* largest);
+ InternalKey* smallest, InternalKey* largest);
void SetupOtherInputs(Compaction* c);
@@ -314,10 +313,6 @@ class VersionSet {
// Per-level key at which the next compaction at that level should start.
// Either an empty string, or a valid InternalKey.
std::string compact_pointer_[config::kNumLevels];
-
- // No copying allowed
- VersionSet(const VersionSet&);
- void operator=(const VersionSet&);
};
// A Compaction encapsulates information about a compaction.
@@ -374,7 +369,7 @@ class Compaction {
VersionEdit edit_;
// Each compaction reads inputs from "level_" and "level_+1"
- std::vector<FileMetaData*> inputs_[2]; // The two sets of inputs
+ std::vector<FileMetaData*> inputs_[2]; // The two sets of inputs
// State used to check for number of overlapping grandparent files
// (parent == level_ + 1, grandparent == level_ + 2)
diff --git a/src/leveldb/db/version_set_test.cc b/src/leveldb/db/version_set_test.cc
index 501e34d133..c1056a1e7d 100644
--- a/src/leveldb/db/version_set_test.cc
+++ b/src/leveldb/db/version_set_test.cc
@@ -11,10 +11,7 @@ namespace leveldb {
class FindFileTest {
public:
- std::vector<FileMetaData*> files_;
- bool disjoint_sorted_files_;
-
- FindFileTest() : disjoint_sorted_files_(true) { }
+ FindFileTest() : disjoint_sorted_files_(true) {}
~FindFileTest() {
for (int i = 0; i < files_.size(); i++) {
@@ -40,20 +37,25 @@ class FindFileTest {
bool Overlaps(const char* smallest, const char* largest) {
InternalKeyComparator cmp(BytewiseComparator());
- Slice s(smallest != NULL ? smallest : "");
- Slice l(largest != NULL ? largest : "");
+ Slice s(smallest != nullptr ? smallest : "");
+ Slice l(largest != nullptr ? largest : "");
return SomeFileOverlapsRange(cmp, disjoint_sorted_files_, files_,
- (smallest != NULL ? &s : NULL),
- (largest != NULL ? &l : NULL));
+ (smallest != nullptr ? &s : nullptr),
+ (largest != nullptr ? &l : nullptr));
}
+
+ bool disjoint_sorted_files_;
+
+ private:
+ std::vector<FileMetaData*> files_;
};
TEST(FindFileTest, Empty) {
ASSERT_EQ(0, Find("foo"));
- ASSERT_TRUE(! Overlaps("a", "z"));
- ASSERT_TRUE(! Overlaps(NULL, "z"));
- ASSERT_TRUE(! Overlaps("a", NULL));
- ASSERT_TRUE(! Overlaps(NULL, NULL));
+ ASSERT_TRUE(!Overlaps("a", "z"));
+ ASSERT_TRUE(!Overlaps(nullptr, "z"));
+ ASSERT_TRUE(!Overlaps("a", nullptr));
+ ASSERT_TRUE(!Overlaps(nullptr, nullptr));
}
TEST(FindFileTest, Single) {
@@ -65,8 +67,8 @@ TEST(FindFileTest, Single) {
ASSERT_EQ(1, Find("q1"));
ASSERT_EQ(1, Find("z"));
- ASSERT_TRUE(! Overlaps("a", "b"));
- ASSERT_TRUE(! Overlaps("z1", "z2"));
+ ASSERT_TRUE(!Overlaps("a", "b"));
+ ASSERT_TRUE(!Overlaps("z1", "z2"));
ASSERT_TRUE(Overlaps("a", "p"));
ASSERT_TRUE(Overlaps("a", "q"));
ASSERT_TRUE(Overlaps("a", "z"));
@@ -78,15 +80,14 @@ TEST(FindFileTest, Single) {
ASSERT_TRUE(Overlaps("q", "q"));
ASSERT_TRUE(Overlaps("q", "q1"));
- ASSERT_TRUE(! Overlaps(NULL, "j"));
- ASSERT_TRUE(! Overlaps("r", NULL));
- ASSERT_TRUE(Overlaps(NULL, "p"));
- ASSERT_TRUE(Overlaps(NULL, "p1"));
- ASSERT_TRUE(Overlaps("q", NULL));
- ASSERT_TRUE(Overlaps(NULL, NULL));
+ ASSERT_TRUE(!Overlaps(nullptr, "j"));
+ ASSERT_TRUE(!Overlaps("r", nullptr));
+ ASSERT_TRUE(Overlaps(nullptr, "p"));
+ ASSERT_TRUE(Overlaps(nullptr, "p1"));
+ ASSERT_TRUE(Overlaps("q", nullptr));
+ ASSERT_TRUE(Overlaps(nullptr, nullptr));
}
-
TEST(FindFileTest, Multiple) {
Add("150", "200");
Add("200", "250");
@@ -110,10 +111,10 @@ TEST(FindFileTest, Multiple) {
ASSERT_EQ(3, Find("450"));
ASSERT_EQ(4, Find("451"));
- ASSERT_TRUE(! Overlaps("100", "149"));
- ASSERT_TRUE(! Overlaps("251", "299"));
- ASSERT_TRUE(! Overlaps("451", "500"));
- ASSERT_TRUE(! Overlaps("351", "399"));
+ ASSERT_TRUE(!Overlaps("100", "149"));
+ ASSERT_TRUE(!Overlaps("251", "299"));
+ ASSERT_TRUE(!Overlaps("451", "500"));
+ ASSERT_TRUE(!Overlaps("351", "399"));
ASSERT_TRUE(Overlaps("100", "150"));
ASSERT_TRUE(Overlaps("100", "200"));
@@ -130,25 +131,25 @@ TEST(FindFileTest, MultipleNullBoundaries) {
Add("200", "250");
Add("300", "350");
Add("400", "450");
- ASSERT_TRUE(! Overlaps(NULL, "149"));
- ASSERT_TRUE(! Overlaps("451", NULL));
- ASSERT_TRUE(Overlaps(NULL, NULL));
- ASSERT_TRUE(Overlaps(NULL, "150"));
- ASSERT_TRUE(Overlaps(NULL, "199"));
- ASSERT_TRUE(Overlaps(NULL, "200"));
- ASSERT_TRUE(Overlaps(NULL, "201"));
- ASSERT_TRUE(Overlaps(NULL, "400"));
- ASSERT_TRUE(Overlaps(NULL, "800"));
- ASSERT_TRUE(Overlaps("100", NULL));
- ASSERT_TRUE(Overlaps("200", NULL));
- ASSERT_TRUE(Overlaps("449", NULL));
- ASSERT_TRUE(Overlaps("450", NULL));
+ ASSERT_TRUE(!Overlaps(nullptr, "149"));
+ ASSERT_TRUE(!Overlaps("451", nullptr));
+ ASSERT_TRUE(Overlaps(nullptr, nullptr));
+ ASSERT_TRUE(Overlaps(nullptr, "150"));
+ ASSERT_TRUE(Overlaps(nullptr, "199"));
+ ASSERT_TRUE(Overlaps(nullptr, "200"));
+ ASSERT_TRUE(Overlaps(nullptr, "201"));
+ ASSERT_TRUE(Overlaps(nullptr, "400"));
+ ASSERT_TRUE(Overlaps(nullptr, "800"));
+ ASSERT_TRUE(Overlaps("100", nullptr));
+ ASSERT_TRUE(Overlaps("200", nullptr));
+ ASSERT_TRUE(Overlaps("449", nullptr));
+ ASSERT_TRUE(Overlaps("450", nullptr));
}
TEST(FindFileTest, OverlapSequenceChecks) {
Add("200", "200", 5000, 3000);
- ASSERT_TRUE(! Overlaps("199", "199"));
- ASSERT_TRUE(! Overlaps("201", "300"));
+ ASSERT_TRUE(!Overlaps("199", "199"));
+ ASSERT_TRUE(!Overlaps("201", "300"));
ASSERT_TRUE(Overlaps("200", "200"));
ASSERT_TRUE(Overlaps("190", "200"));
ASSERT_TRUE(Overlaps("200", "210"));
@@ -158,8 +159,8 @@ TEST(FindFileTest, OverlappingFiles) {
Add("150", "600");
Add("400", "500");
disjoint_sorted_files_ = false;
- ASSERT_TRUE(! Overlaps("100", "149"));
- ASSERT_TRUE(! Overlaps("601", "700"));
+ ASSERT_TRUE(!Overlaps("100", "149"));
+ ASSERT_TRUE(!Overlaps("601", "700"));
ASSERT_TRUE(Overlaps("100", "150"));
ASSERT_TRUE(Overlaps("100", "200"));
ASSERT_TRUE(Overlaps("100", "300"));
@@ -172,8 +173,160 @@ TEST(FindFileTest, OverlappingFiles) {
ASSERT_TRUE(Overlaps("600", "700"));
}
-} // namespace leveldb
+void AddBoundaryInputs(const InternalKeyComparator& icmp,
+ const std::vector<FileMetaData*>& level_files,
+ std::vector<FileMetaData*>* compaction_files);
+
+class AddBoundaryInputsTest {
+ public:
+ std::vector<FileMetaData*> level_files_;
+ std::vector<FileMetaData*> compaction_files_;
+ std::vector<FileMetaData*> all_files_;
+ InternalKeyComparator icmp_;
+
+ AddBoundaryInputsTest() : icmp_(BytewiseComparator()) {}
+
+ ~AddBoundaryInputsTest() {
+ for (size_t i = 0; i < all_files_.size(); ++i) {
+ delete all_files_[i];
+ }
+ all_files_.clear();
+ }
+
+ FileMetaData* CreateFileMetaData(uint64_t number, InternalKey smallest,
+ InternalKey largest) {
+ FileMetaData* f = new FileMetaData();
+ f->number = number;
+ f->smallest = smallest;
+ f->largest = largest;
+ all_files_.push_back(f);
+ return f;
+ }
+};
+
+TEST(AddBoundaryInputsTest, TestEmptyFileSets) {
+ AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+ ASSERT_TRUE(compaction_files_.empty());
+ ASSERT_TRUE(level_files_.empty());
+}
+
+TEST(AddBoundaryInputsTest, TestEmptyLevelFiles) {
+ FileMetaData* f1 =
+ CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
+ InternalKey(InternalKey("100", 1, kTypeValue)));
+ compaction_files_.push_back(f1);
+
+ AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+ ASSERT_EQ(1, compaction_files_.size());
+ ASSERT_EQ(f1, compaction_files_[0]);
+ ASSERT_TRUE(level_files_.empty());
+}
+
+TEST(AddBoundaryInputsTest, TestEmptyCompactionFiles) {
+ FileMetaData* f1 =
+ CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
+ InternalKey(InternalKey("100", 1, kTypeValue)));
+ level_files_.push_back(f1);
+
+ AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+ ASSERT_TRUE(compaction_files_.empty());
+ ASSERT_EQ(1, level_files_.size());
+ ASSERT_EQ(f1, level_files_[0]);
+}
+
+TEST(AddBoundaryInputsTest, TestNoBoundaryFiles) {
+ FileMetaData* f1 =
+ CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
+ InternalKey(InternalKey("100", 1, kTypeValue)));
+ FileMetaData* f2 =
+ CreateFileMetaData(1, InternalKey("200", 2, kTypeValue),
+ InternalKey(InternalKey("200", 1, kTypeValue)));
+ FileMetaData* f3 =
+ CreateFileMetaData(1, InternalKey("300", 2, kTypeValue),
+ InternalKey(InternalKey("300", 1, kTypeValue)));
+
+ level_files_.push_back(f3);
+ level_files_.push_back(f2);
+ level_files_.push_back(f1);
+ compaction_files_.push_back(f2);
+ compaction_files_.push_back(f3);
+
+ AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+ ASSERT_EQ(2, compaction_files_.size());
+}
+
+TEST(AddBoundaryInputsTest, TestOneBoundaryFiles) {
+ FileMetaData* f1 =
+ CreateFileMetaData(1, InternalKey("100", 3, kTypeValue),
+ InternalKey(InternalKey("100", 2, kTypeValue)));
+ FileMetaData* f2 =
+ CreateFileMetaData(1, InternalKey("100", 1, kTypeValue),
+ InternalKey(InternalKey("200", 3, kTypeValue)));
+ FileMetaData* f3 =
+ CreateFileMetaData(1, InternalKey("300", 2, kTypeValue),
+ InternalKey(InternalKey("300", 1, kTypeValue)));
+
+ level_files_.push_back(f3);
+ level_files_.push_back(f2);
+ level_files_.push_back(f1);
+ compaction_files_.push_back(f1);
+
+ AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+ ASSERT_EQ(2, compaction_files_.size());
+ ASSERT_EQ(f1, compaction_files_[0]);
+ ASSERT_EQ(f2, compaction_files_[1]);
+}
+
+TEST(AddBoundaryInputsTest, TestTwoBoundaryFiles) {
+ FileMetaData* f1 =
+ CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
+ InternalKey(InternalKey("100", 5, kTypeValue)));
+ FileMetaData* f2 =
+ CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
+ InternalKey(InternalKey("300", 1, kTypeValue)));
+ FileMetaData* f3 =
+ CreateFileMetaData(1, InternalKey("100", 4, kTypeValue),
+ InternalKey(InternalKey("100", 3, kTypeValue)));
+
+ level_files_.push_back(f2);
+ level_files_.push_back(f3);
+ level_files_.push_back(f1);
+ compaction_files_.push_back(f1);
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
+ AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+ ASSERT_EQ(3, compaction_files_.size());
+ ASSERT_EQ(f1, compaction_files_[0]);
+ ASSERT_EQ(f3, compaction_files_[1]);
+ ASSERT_EQ(f2, compaction_files_[2]);
}
+
+TEST(AddBoundaryInputsTest, TestDisjoinFilePointers) {
+ FileMetaData* f1 =
+ CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
+ InternalKey(InternalKey("100", 5, kTypeValue)));
+ FileMetaData* f2 =
+ CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
+ InternalKey(InternalKey("100", 5, kTypeValue)));
+ FileMetaData* f3 =
+ CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
+ InternalKey(InternalKey("300", 1, kTypeValue)));
+ FileMetaData* f4 =
+ CreateFileMetaData(1, InternalKey("100", 4, kTypeValue),
+ InternalKey(InternalKey("100", 3, kTypeValue)));
+
+ level_files_.push_back(f2);
+ level_files_.push_back(f3);
+ level_files_.push_back(f4);
+
+ compaction_files_.push_back(f1);
+
+ AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+ ASSERT_EQ(3, compaction_files_.size());
+ ASSERT_EQ(f1, compaction_files_[0]);
+ ASSERT_EQ(f4, compaction_files_[1]);
+ ASSERT_EQ(f3, compaction_files_[2]);
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/db/write_batch.cc b/src/leveldb/db/write_batch.cc
index 33f4a4257e..b54313c35e 100644
--- a/src/leveldb/db/write_batch.cc
+++ b/src/leveldb/db/write_batch.cc
@@ -15,10 +15,10 @@
#include "leveldb/write_batch.h"
-#include "leveldb/db.h"
#include "db/dbformat.h"
#include "db/memtable.h"
#include "db/write_batch_internal.h"
+#include "leveldb/db.h"
#include "util/coding.h"
namespace leveldb {
@@ -26,19 +26,19 @@ namespace leveldb {
// WriteBatch header has an 8-byte sequence number followed by a 4-byte count.
static const size_t kHeader = 12;
-WriteBatch::WriteBatch() {
- Clear();
-}
+WriteBatch::WriteBatch() { Clear(); }
-WriteBatch::~WriteBatch() { }
+WriteBatch::~WriteBatch() = default;
-WriteBatch::Handler::~Handler() { }
+WriteBatch::Handler::~Handler() = default;
void WriteBatch::Clear() {
rep_.clear();
rep_.resize(kHeader);
}
+size_t WriteBatch::ApproximateSize() const { return rep_.size(); }
+
Status WriteBatch::Iterate(Handler* handler) const {
Slice input(rep_);
if (input.size() < kHeader) {
@@ -108,25 +108,28 @@ void WriteBatch::Delete(const Slice& key) {
PutLengthPrefixedSlice(&rep_, key);
}
+void WriteBatch::Append(const WriteBatch& source) {
+ WriteBatchInternal::Append(this, &source);
+}
+
namespace {
class MemTableInserter : public WriteBatch::Handler {
public:
SequenceNumber sequence_;
MemTable* mem_;
- virtual void Put(const Slice& key, const Slice& value) {
+ void Put(const Slice& key, const Slice& value) override {
mem_->Add(sequence_, kTypeValue, key, value);
sequence_++;
}
- virtual void Delete(const Slice& key) {
+ void Delete(const Slice& key) override {
mem_->Add(sequence_, kTypeDeletion, key, Slice());
sequence_++;
}
};
} // namespace
-Status WriteBatchInternal::InsertInto(const WriteBatch* b,
- MemTable* memtable) {
+Status WriteBatchInternal::InsertInto(const WriteBatch* b, MemTable* memtable) {
MemTableInserter inserter;
inserter.sequence_ = WriteBatchInternal::Sequence(b);
inserter.mem_ = memtable;
diff --git a/src/leveldb/db/write_batch_internal.h b/src/leveldb/db/write_batch_internal.h
index 9448ef7b21..fce86e3f1f 100644
--- a/src/leveldb/db/write_batch_internal.h
+++ b/src/leveldb/db/write_batch_internal.h
@@ -29,13 +29,9 @@ class WriteBatchInternal {
// this batch.
static void SetSequence(WriteBatch* batch, SequenceNumber seq);
- static Slice Contents(const WriteBatch* batch) {
- return Slice(batch->rep_);
- }
+ static Slice Contents(const WriteBatch* batch) { return Slice(batch->rep_); }
- static size_t ByteSize(const WriteBatch* batch) {
- return batch->rep_.size();
- }
+ static size_t ByteSize(const WriteBatch* batch) { return batch->rep_.size(); }
static void SetContents(WriteBatch* batch, const Slice& contents);
@@ -46,5 +42,4 @@ class WriteBatchInternal {
} // namespace leveldb
-
#endif // STORAGE_LEVELDB_DB_WRITE_BATCH_INTERNAL_H_
diff --git a/src/leveldb/db/write_batch_test.cc b/src/leveldb/db/write_batch_test.cc
index 9064e3d85e..c32317fb5e 100644
--- a/src/leveldb/db/write_batch_test.cc
+++ b/src/leveldb/db/write_batch_test.cc
@@ -52,7 +52,7 @@ static std::string PrintContents(WriteBatch* b) {
return state;
}
-class WriteBatchTest { };
+class WriteBatchTest {};
TEST(WriteBatchTest, Empty) {
WriteBatch batch;
@@ -68,10 +68,11 @@ TEST(WriteBatchTest, Multiple) {
WriteBatchInternal::SetSequence(&batch, 100);
ASSERT_EQ(100, WriteBatchInternal::Sequence(&batch));
ASSERT_EQ(3, WriteBatchInternal::Count(&batch));
- ASSERT_EQ("Put(baz, boo)@102"
- "Delete(box)@101"
- "Put(foo, bar)@100",
- PrintContents(&batch));
+ ASSERT_EQ(
+ "Put(baz, boo)@102"
+ "Delete(box)@101"
+ "Put(foo, bar)@100",
+ PrintContents(&batch));
}
TEST(WriteBatchTest, Corruption) {
@@ -81,40 +82,56 @@ TEST(WriteBatchTest, Corruption) {
WriteBatchInternal::SetSequence(&batch, 200);
Slice contents = WriteBatchInternal::Contents(&batch);
WriteBatchInternal::SetContents(&batch,
- Slice(contents.data(),contents.size()-1));
- ASSERT_EQ("Put(foo, bar)@200"
- "ParseError()",
- PrintContents(&batch));
+ Slice(contents.data(), contents.size() - 1));
+ ASSERT_EQ(
+ "Put(foo, bar)@200"
+ "ParseError()",
+ PrintContents(&batch));
}
TEST(WriteBatchTest, Append) {
WriteBatch b1, b2;
WriteBatchInternal::SetSequence(&b1, 200);
WriteBatchInternal::SetSequence(&b2, 300);
- WriteBatchInternal::Append(&b1, &b2);
- ASSERT_EQ("",
- PrintContents(&b1));
+ b1.Append(b2);
+ ASSERT_EQ("", PrintContents(&b1));
b2.Put("a", "va");
- WriteBatchInternal::Append(&b1, &b2);
- ASSERT_EQ("Put(a, va)@200",
- PrintContents(&b1));
+ b1.Append(b2);
+ ASSERT_EQ("Put(a, va)@200", PrintContents(&b1));
b2.Clear();
b2.Put("b", "vb");
- WriteBatchInternal::Append(&b1, &b2);
- ASSERT_EQ("Put(a, va)@200"
- "Put(b, vb)@201",
- PrintContents(&b1));
+ b1.Append(b2);
+ ASSERT_EQ(
+ "Put(a, va)@200"
+ "Put(b, vb)@201",
+ PrintContents(&b1));
b2.Delete("foo");
- WriteBatchInternal::Append(&b1, &b2);
- ASSERT_EQ("Put(a, va)@200"
- "Put(b, vb)@202"
- "Put(b, vb)@201"
- "Delete(foo)@203",
- PrintContents(&b1));
+ b1.Append(b2);
+ ASSERT_EQ(
+ "Put(a, va)@200"
+ "Put(b, vb)@202"
+ "Put(b, vb)@201"
+ "Delete(foo)@203",
+ PrintContents(&b1));
}
-} // namespace leveldb
+TEST(WriteBatchTest, ApproximateSize) {
+ WriteBatch batch;
+ size_t empty_size = batch.ApproximateSize();
+
+ batch.Put(Slice("foo"), Slice("bar"));
+ size_t one_key_size = batch.ApproximateSize();
+ ASSERT_LT(empty_size, one_key_size);
+
+ batch.Put(Slice("baz"), Slice("boo"));
+ size_t two_keys_size = batch.ApproximateSize();
+ ASSERT_LT(one_key_size, two_keys_size);
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
+ batch.Delete(Slice("box"));
+ size_t post_delete_size = batch.ApproximateSize();
+ ASSERT_LT(two_keys_size, post_delete_size);
}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/doc/benchmark.html b/src/leveldb/doc/benchmark.html
index c4639772c1..f3fd77144c 100644
--- a/src/leveldb/doc/benchmark.html
+++ b/src/leveldb/doc/benchmark.html
@@ -90,9 +90,9 @@ div.bsql {
<h4>Benchmark Source Code</h4>
<p>We wrote benchmark tools for SQLite and Kyoto TreeDB based on LevelDB's <span class="code">db_bench</span>. The code for each of the benchmarks resides here:</p>
<ul>
- <li> <b>LevelDB:</b> <a href="http://code.google.com/p/leveldb/source/browse/trunk/db/db_bench.cc">db/db_bench.cc</a>.</li>
- <li> <b>SQLite:</b> <a href="http://code.google.com/p/leveldb/source/browse/#svn%2Ftrunk%2Fdoc%2Fbench%2Fdb_bench_sqlite3.cc">doc/bench/db_bench_sqlite3.cc</a>.</li>
- <li> <b>Kyoto TreeDB:</b> <a href="http://code.google.com/p/leveldb/source/browse/#svn%2Ftrunk%2Fdoc%2Fbench%2Fdb_bench_tree_db.cc">doc/bench/db_bench_tree_db.cc</a>.</li>
+ <li> <b>LevelDB:</b> <a href="https://github.com/google/leveldb/blob/master/benchmarks/db_bench.cc">benchmarks/db_bench.cc</a>.</li>
+ <li> <b>SQLite:</b> <a href="https://github.com/google/leveldb/blob/master/benchmarks/db_bench_sqlite3.cc">benchmarks/db_bench_sqlite3.cc</a>.</li>
+ <li> <b>Kyoto TreeDB:</b> <a href="https://github.com/google/leveldb/blob/master/benchmarks/db_bench_tree_db.cc">benchmarks/db_bench_tree_db.cc</a>.</li>
</ul>
<h4>Custom Build Specifications</h4>
diff --git a/src/leveldb/doc/impl.md b/src/leveldb/doc/impl.md
index 4b13f2a6ba..cacabb96fc 100644
--- a/src/leveldb/doc/impl.md
+++ b/src/leveldb/doc/impl.md
@@ -64,13 +64,15 @@ Other files used for miscellaneous purposes may also be present (LOCK, *.dbtmp).
## Level 0
-When the log file grows above a certain size (1MB by default):
-Create a brand new memtable and log file and direct future updates here
+When the log file grows above a certain size (4MB by default):
+Create a brand new memtable and log file and direct future updates here.
+
In the background:
-Write the contents of the previous memtable to an sstable
-Discard the memtable
-Delete the old log file and the old memtable
-Add the new sstable to the young (level-0) level.
+
+1. Write the contents of the previous memtable to an sstable.
+2. Discard the memtable.
+3. Delete the old log file and the old memtable.
+4. Add the new sstable to the young (level-0) level.
## Compactions
diff --git a/src/leveldb/doc/index.md b/src/leveldb/doc/index.md
index be8569692b..3d9a25805b 100644
--- a/src/leveldb/doc/index.md
+++ b/src/leveldb/doc/index.md
@@ -307,7 +307,7 @@ version numbers found in the keys to decide how to interpret them.
## Performance
Performance can be tuned by changing the default values of the types defined in
-`include/leveldb/options.h`.
+`include/options.h`.
### Block size
@@ -338,19 +338,19 @@ options.compression = leveldb::kNoCompression;
### Cache
The contents of the database are stored in a set of files in the filesystem and
-each file stores a sequence of compressed blocks. If options.cache is non-NULL,
-it is used to cache frequently used uncompressed block contents.
+each file stores a sequence of compressed blocks. If options.block_cache is
+non-NULL, it is used to cache frequently used uncompressed block contents.
```c++
#include "leveldb/cache.h"
leveldb::Options options;
-options.cache = leveldb::NewLRUCache(100 * 1048576); // 100MB cache
+options.block_cache = leveldb::NewLRUCache(100 * 1048576); // 100MB cache
leveldb::DB* db;
leveldb::DB::Open(options, name, &db);
... use the db ...
delete db
-delete options.cache;
+delete options.block_cache;
```
Note that the cache holds uncompressed data, and therefore it should be sized
diff --git a/src/leveldb/helpers/memenv/memenv.cc b/src/leveldb/helpers/memenv/memenv.cc
index 68c0614a59..47e4481f7c 100644
--- a/src/leveldb/helpers/memenv/memenv.cc
+++ b/src/leveldb/helpers/memenv/memenv.cc
@@ -4,14 +4,18 @@
#include "helpers/memenv/memenv.h"
+#include <string.h>
+
+#include <limits>
+#include <map>
+#include <string>
+#include <vector>
+
#include "leveldb/env.h"
#include "leveldb/status.h"
#include "port/port.h"
+#include "port/thread_annotations.h"
#include "util/mutexlock.h"
-#include <map>
-#include <string.h>
-#include <string>
-#include <vector>
namespace leveldb {
@@ -23,6 +27,10 @@ class FileState {
// and the caller must call Ref() at least once.
FileState() : refs_(0), size_(0) {}
+ // No copying allowed.
+ FileState(const FileState&) = delete;
+ FileState& operator=(const FileState&) = delete;
+
// Increase the reference count.
void Ref() {
MutexLock lock(&refs_mutex_);
@@ -47,9 +55,22 @@ class FileState {
}
}
- uint64_t Size() const { return size_; }
+ uint64_t Size() const {
+ MutexLock lock(&blocks_mutex_);
+ return size_;
+ }
+
+ void Truncate() {
+ MutexLock lock(&blocks_mutex_);
+ for (char*& block : blocks_) {
+ delete[] block;
+ }
+ blocks_.clear();
+ size_ = 0;
+ }
Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const {
+ MutexLock lock(&blocks_mutex_);
if (offset > size_) {
return Status::IOError("Offset greater than file size.");
}
@@ -62,16 +83,9 @@ class FileState {
return Status::OK();
}
- assert(offset / kBlockSize <= SIZE_MAX);
+ assert(offset / kBlockSize <= std::numeric_limits<size_t>::max());
size_t block = static_cast<size_t>(offset / kBlockSize);
size_t block_offset = offset % kBlockSize;
-
- if (n <= kBlockSize - block_offset) {
- // The requested bytes are all in the first block.
- *result = Slice(blocks_[block] + block_offset, n);
- return Status::OK();
- }
-
size_t bytes_to_copy = n;
char* dst = scratch;
@@ -96,6 +110,7 @@ class FileState {
const char* src = data.data();
size_t src_len = data.size();
+ MutexLock lock(&blocks_mutex_);
while (src_len > 0) {
size_t avail;
size_t offset = size_ % kBlockSize;
@@ -122,28 +137,17 @@ class FileState {
}
private:
- // Private since only Unref() should be used to delete it.
- ~FileState() {
- for (std::vector<char*>::iterator i = blocks_.begin(); i != blocks_.end();
- ++i) {
- delete [] *i;
- }
- }
+ enum { kBlockSize = 8 * 1024 };
- // No copying allowed.
- FileState(const FileState&);
- void operator=(const FileState&);
+ // Private since only Unref() should be used to delete it.
+ ~FileState() { Truncate(); }
port::Mutex refs_mutex_;
- int refs_; // Protected by refs_mutex_;
+ int refs_ GUARDED_BY(refs_mutex_);
- // The following fields are not protected by any mutex. They are only mutable
- // while the file is being written, and concurrent access is not allowed
- // to writable files.
- std::vector<char*> blocks_;
- uint64_t size_;
-
- enum { kBlockSize = 8 * 1024 };
+ mutable port::Mutex blocks_mutex_;
+ std::vector<char*> blocks_ GUARDED_BY(blocks_mutex_);
+ uint64_t size_ GUARDED_BY(blocks_mutex_);
};
class SequentialFileImpl : public SequentialFile {
@@ -152,11 +156,9 @@ class SequentialFileImpl : public SequentialFile {
file_->Ref();
}
- ~SequentialFileImpl() {
- file_->Unref();
- }
+ ~SequentialFileImpl() override { file_->Unref(); }
- virtual Status Read(size_t n, Slice* result, char* scratch) {
+ Status Read(size_t n, Slice* result, char* scratch) override {
Status s = file_->Read(pos_, n, result, scratch);
if (s.ok()) {
pos_ += result->size();
@@ -164,7 +166,7 @@ class SequentialFileImpl : public SequentialFile {
return s;
}
- virtual Status Skip(uint64_t n) {
+ Status Skip(uint64_t n) override {
if (pos_ > file_->Size()) {
return Status::IOError("pos_ > file_->Size()");
}
@@ -176,7 +178,7 @@ class SequentialFileImpl : public SequentialFile {
return Status::OK();
}
- virtual std::string GetName() const { return "[memenv]"; }
+ virtual std::string GetName() const override { return "[memenv]"; }
private:
FileState* file_;
uint64_t pos_;
@@ -184,68 +186,58 @@ class SequentialFileImpl : public SequentialFile {
class RandomAccessFileImpl : public RandomAccessFile {
public:
- explicit RandomAccessFileImpl(FileState* file) : file_(file) {
- file_->Ref();
- }
+ explicit RandomAccessFileImpl(FileState* file) : file_(file) { file_->Ref(); }
- ~RandomAccessFileImpl() {
- file_->Unref();
- }
+ ~RandomAccessFileImpl() override { file_->Unref(); }
- virtual Status Read(uint64_t offset, size_t n, Slice* result,
- char* scratch) const {
+ Status Read(uint64_t offset, size_t n, Slice* result,
+ char* scratch) const override {
return file_->Read(offset, n, result, scratch);
}
- virtual std::string GetName() const { return "[memenv]"; }
+ virtual std::string GetName() const override { return "[memenv]"; }
private:
FileState* file_;
};
class WritableFileImpl : public WritableFile {
public:
- WritableFileImpl(FileState* file) : file_(file) {
- file_->Ref();
- }
+ WritableFileImpl(FileState* file) : file_(file) { file_->Ref(); }
- ~WritableFileImpl() {
- file_->Unref();
- }
+ ~WritableFileImpl() override { file_->Unref(); }
- virtual Status Append(const Slice& data) {
- return file_->Append(data);
- }
+ Status Append(const Slice& data) override { return file_->Append(data); }
- virtual Status Close() { return Status::OK(); }
- virtual Status Flush() { return Status::OK(); }
- virtual Status Sync() { return Status::OK(); }
+ Status Close() override { return Status::OK(); }
+ Status Flush() override { return Status::OK(); }
+ Status Sync() override { return Status::OK(); }
- virtual std::string GetName() const { return "[memenv]"; }
+ virtual std::string GetName() const override { return "[memenv]"; }
private:
FileState* file_;
};
class NoOpLogger : public Logger {
public:
- virtual void Logv(const char* format, va_list ap) { }
+ void Logv(const char* format, va_list ap) override {}
};
class InMemoryEnv : public EnvWrapper {
public:
- explicit InMemoryEnv(Env* base_env) : EnvWrapper(base_env) { }
+ explicit InMemoryEnv(Env* base_env) : EnvWrapper(base_env) {}
- virtual ~InMemoryEnv() {
- for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end(); ++i){
- i->second->Unref();
+ ~InMemoryEnv() override {
+ for (const auto& kvp : file_map_) {
+ kvp.second->Unref();
}
}
// Partial implementation of the Env interface.
- virtual Status NewSequentialFile(const std::string& fname,
- SequentialFile** result) {
+ Status NewSequentialFile(const std::string& fname,
+ SequentialFile** result) override {
MutexLock lock(&mutex_);
if (file_map_.find(fname) == file_map_.end()) {
- *result = NULL;
+ *result = nullptr;
return Status::IOError(fname, "File not found");
}
@@ -253,11 +245,11 @@ class InMemoryEnv : public EnvWrapper {
return Status::OK();
}
- virtual Status NewRandomAccessFile(const std::string& fname,
- RandomAccessFile** result) {
+ Status NewRandomAccessFile(const std::string& fname,
+ RandomAccessFile** result) override {
MutexLock lock(&mutex_);
if (file_map_.find(fname) == file_map_.end()) {
- *result = NULL;
+ *result = nullptr;
return Status::IOError(fname, "File not found");
}
@@ -265,27 +257,32 @@ class InMemoryEnv : public EnvWrapper {
return Status::OK();
}
- virtual Status NewWritableFile(const std::string& fname,
- WritableFile** result) {
+ Status NewWritableFile(const std::string& fname,
+ WritableFile** result) override {
MutexLock lock(&mutex_);
- if (file_map_.find(fname) != file_map_.end()) {
- DeleteFileInternal(fname);
- }
+ FileSystem::iterator it = file_map_.find(fname);
- FileState* file = new FileState();
- file->Ref();
- file_map_[fname] = file;
+ FileState* file;
+ if (it == file_map_.end()) {
+ // File is not currently open.
+ file = new FileState();
+ file->Ref();
+ file_map_[fname] = file;
+ } else {
+ file = it->second;
+ file->Truncate();
+ }
*result = new WritableFileImpl(file);
return Status::OK();
}
- virtual Status NewAppendableFile(const std::string& fname,
- WritableFile** result) {
+ Status NewAppendableFile(const std::string& fname,
+ WritableFile** result) override {
MutexLock lock(&mutex_);
FileState** sptr = &file_map_[fname];
FileState* file = *sptr;
- if (file == NULL) {
+ if (file == nullptr) {
file = new FileState();
file->Ref();
}
@@ -293,18 +290,18 @@ class InMemoryEnv : public EnvWrapper {
return Status::OK();
}
- virtual bool FileExists(const std::string& fname) {
+ bool FileExists(const std::string& fname) override {
MutexLock lock(&mutex_);
return file_map_.find(fname) != file_map_.end();
}
- virtual Status GetChildren(const std::string& dir,
- std::vector<std::string>* result) {
+ Status GetChildren(const std::string& dir,
+ std::vector<std::string>* result) override {
MutexLock lock(&mutex_);
result->clear();
- for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end(); ++i){
- const std::string& filename = i->first;
+ for (const auto& kvp : file_map_) {
+ const std::string& filename = kvp.first;
if (filename.size() >= dir.size() + 1 && filename[dir.size()] == '/' &&
Slice(filename).starts_with(Slice(dir))) {
@@ -315,7 +312,8 @@ class InMemoryEnv : public EnvWrapper {
return Status::OK();
}
- void DeleteFileInternal(const std::string& fname) {
+ void DeleteFileInternal(const std::string& fname)
+ EXCLUSIVE_LOCKS_REQUIRED(mutex_) {
if (file_map_.find(fname) == file_map_.end()) {
return;
}
@@ -324,7 +322,7 @@ class InMemoryEnv : public EnvWrapper {
file_map_.erase(fname);
}
- virtual Status DeleteFile(const std::string& fname) {
+ Status DeleteFile(const std::string& fname) override {
MutexLock lock(&mutex_);
if (file_map_.find(fname) == file_map_.end()) {
return Status::IOError(fname, "File not found");
@@ -334,15 +332,11 @@ class InMemoryEnv : public EnvWrapper {
return Status::OK();
}
- virtual Status CreateDir(const std::string& dirname) {
- return Status::OK();
- }
+ Status CreateDir(const std::string& dirname) override { return Status::OK(); }
- virtual Status DeleteDir(const std::string& dirname) {
- return Status::OK();
- }
+ Status DeleteDir(const std::string& dirname) override { return Status::OK(); }
- virtual Status GetFileSize(const std::string& fname, uint64_t* file_size) {
+ Status GetFileSize(const std::string& fname, uint64_t* file_size) override {
MutexLock lock(&mutex_);
if (file_map_.find(fname) == file_map_.end()) {
return Status::IOError(fname, "File not found");
@@ -352,8 +346,8 @@ class InMemoryEnv : public EnvWrapper {
return Status::OK();
}
- virtual Status RenameFile(const std::string& src,
- const std::string& target) {
+ Status RenameFile(const std::string& src,
+ const std::string& target) override {
MutexLock lock(&mutex_);
if (file_map_.find(src) == file_map_.end()) {
return Status::IOError(src, "File not found");
@@ -365,22 +359,22 @@ class InMemoryEnv : public EnvWrapper {
return Status::OK();
}
- virtual Status LockFile(const std::string& fname, FileLock** lock) {
+ Status LockFile(const std::string& fname, FileLock** lock) override {
*lock = new FileLock;
return Status::OK();
}
- virtual Status UnlockFile(FileLock* lock) {
+ Status UnlockFile(FileLock* lock) override {
delete lock;
return Status::OK();
}
- virtual Status GetTestDirectory(std::string* path) {
+ Status GetTestDirectory(std::string* path) override {
*path = "/test";
return Status::OK();
}
- virtual Status NewLogger(const std::string& fname, Logger** result) {
+ Status NewLogger(const std::string& fname, Logger** result) override {
*result = new NoOpLogger;
return Status::OK();
}
@@ -388,14 +382,13 @@ class InMemoryEnv : public EnvWrapper {
private:
// Map from filenames to FileState objects, representing a simple file system.
typedef std::map<std::string, FileState*> FileSystem;
+
port::Mutex mutex_;
- FileSystem file_map_; // Protected by mutex_.
+ FileSystem file_map_ GUARDED_BY(mutex_);
};
} // namespace
-Env* NewMemEnv(Env* base_env) {
- return new InMemoryEnv(base_env);
-}
+Env* NewMemEnv(Env* base_env) { return new InMemoryEnv(base_env); }
} // namespace leveldb
diff --git a/src/leveldb/helpers/memenv/memenv.h b/src/leveldb/helpers/memenv/memenv.h
index 03b88de761..3d929e4c4e 100644
--- a/src/leveldb/helpers/memenv/memenv.h
+++ b/src/leveldb/helpers/memenv/memenv.h
@@ -5,6 +5,8 @@
#ifndef STORAGE_LEVELDB_HELPERS_MEMENV_MEMENV_H_
#define STORAGE_LEVELDB_HELPERS_MEMENV_MEMENV_H_
+#include "leveldb/export.h"
+
namespace leveldb {
class Env;
@@ -13,7 +15,7 @@ class Env;
// all non-file-storage tasks to base_env. The caller must delete the result
// when it is no longer needed.
// *base_env must remain live while the result is in use.
-Env* NewMemEnv(Env* base_env);
+LEVELDB_EXPORT Env* NewMemEnv(Env* base_env);
} // namespace leveldb
diff --git a/src/leveldb/helpers/memenv/memenv_test.cc b/src/leveldb/helpers/memenv/memenv_test.cc
index 5cff77613f..94ad06be68 100644
--- a/src/leveldb/helpers/memenv/memenv_test.cc
+++ b/src/leveldb/helpers/memenv/memenv_test.cc
@@ -4,25 +4,22 @@
#include "helpers/memenv/memenv.h"
+#include <string>
+#include <vector>
+
#include "db/db_impl.h"
#include "leveldb/db.h"
#include "leveldb/env.h"
#include "util/testharness.h"
-#include <string>
-#include <vector>
namespace leveldb {
class MemEnvTest {
public:
- Env* env_;
+ MemEnvTest() : env_(NewMemEnv(Env::Default())) {}
+ ~MemEnvTest() { delete env_; }
- MemEnvTest()
- : env_(NewMemEnv(Env::Default())) {
- }
- ~MemEnvTest() {
- delete env_;
- }
+ Env* env_;
};
TEST(MemEnvTest, Basics) {
@@ -109,25 +106,25 @@ TEST(MemEnvTest, ReadWrite) {
// Read sequentially.
ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
- ASSERT_OK(seq_file->Read(5, &result, scratch)); // Read "hello".
+ ASSERT_OK(seq_file->Read(5, &result, scratch)); // Read "hello".
ASSERT_EQ(0, result.compare("hello"));
ASSERT_OK(seq_file->Skip(1));
- ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Read "world".
+ ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Read "world".
ASSERT_EQ(0, result.compare("world"));
- ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Try reading past EOF.
+ ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Try reading past EOF.
ASSERT_EQ(0, result.size());
- ASSERT_OK(seq_file->Skip(100)); // Try to skip past end of file.
+ ASSERT_OK(seq_file->Skip(100)); // Try to skip past end of file.
ASSERT_OK(seq_file->Read(1000, &result, scratch));
ASSERT_EQ(0, result.size());
delete seq_file;
// Random reads.
ASSERT_OK(env_->NewRandomAccessFile("/dir/f", &rand_file));
- ASSERT_OK(rand_file->Read(6, 5, &result, scratch)); // Read "world".
+ ASSERT_OK(rand_file->Read(6, 5, &result, scratch)); // Read "world".
ASSERT_EQ(0, result.compare("world"));
- ASSERT_OK(rand_file->Read(0, 5, &result, scratch)); // Read "hello".
+ ASSERT_OK(rand_file->Read(0, 5, &result, scratch)); // Read "hello".
ASSERT_EQ(0, result.compare("hello"));
- ASSERT_OK(rand_file->Read(10, 100, &result, scratch)); // Read "d".
+ ASSERT_OK(rand_file->Read(10, 100, &result, scratch)); // Read "d".
ASSERT_EQ(0, result.compare("d"));
// Too high offset.
@@ -176,7 +173,7 @@ TEST(MemEnvTest, LargeWrite) {
SequentialFile* seq_file;
Slice result;
ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
- ASSERT_OK(seq_file->Read(3, &result, scratch)); // Read "foo".
+ ASSERT_OK(seq_file->Read(3, &result, scratch)); // Read "foo".
ASSERT_EQ(0, result.compare("foo"));
size_t read = 0;
@@ -188,7 +185,30 @@ TEST(MemEnvTest, LargeWrite) {
}
ASSERT_TRUE(write_data == read_data);
delete seq_file;
- delete [] scratch;
+ delete[] scratch;
+}
+
+TEST(MemEnvTest, OverwriteOpenFile) {
+ const char kWrite1Data[] = "Write #1 data";
+ const size_t kFileDataLen = sizeof(kWrite1Data) - 1;
+ const std::string kTestFileName = test::TmpDir() + "/leveldb-TestFile.dat";
+
+ ASSERT_OK(WriteStringToFile(env_, kWrite1Data, kTestFileName));
+
+ RandomAccessFile* rand_file;
+ ASSERT_OK(env_->NewRandomAccessFile(kTestFileName, &rand_file));
+
+ const char kWrite2Data[] = "Write #2 data";
+ ASSERT_OK(WriteStringToFile(env_, kWrite2Data, kTestFileName));
+
+ // Verify that overwriting an open file will result in the new file data
+ // being read from files opened before the write.
+ Slice result;
+ char scratch[kFileDataLen];
+ ASSERT_OK(rand_file->Read(0, kFileDataLen, &result, scratch));
+ ASSERT_EQ(0, result.compare(kWrite2Data));
+
+ delete rand_file;
}
TEST(MemEnvTest, DBTest) {
@@ -236,6 +256,4 @@ TEST(MemEnvTest, DBTest) {
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/include/leveldb/c.h b/src/leveldb/include/leveldb/c.h
index 1048fe3b86..02c79ba72e 100644
--- a/src/leveldb/include/leveldb/c.h
+++ b/src/leveldb/include/leveldb/c.h
@@ -32,7 +32,7 @@
On failure, leveldb frees the old value of *errptr and
set *errptr to a malloc()ed error message.
- (4) Bools have the type unsigned char (0 == false; rest == true)
+ (4) Bools have the type uint8_t (0 == false; rest == true)
(5) All of the pointer arguments must be non-NULL.
*/
@@ -48,225 +48,205 @@ extern "C" {
#include <stddef.h>
#include <stdint.h>
+#include "leveldb/export.h"
+
/* Exported types */
-typedef struct leveldb_t leveldb_t;
-typedef struct leveldb_cache_t leveldb_cache_t;
-typedef struct leveldb_comparator_t leveldb_comparator_t;
-typedef struct leveldb_env_t leveldb_env_t;
-typedef struct leveldb_filelock_t leveldb_filelock_t;
-typedef struct leveldb_filterpolicy_t leveldb_filterpolicy_t;
-typedef struct leveldb_iterator_t leveldb_iterator_t;
-typedef struct leveldb_logger_t leveldb_logger_t;
-typedef struct leveldb_options_t leveldb_options_t;
-typedef struct leveldb_randomfile_t leveldb_randomfile_t;
-typedef struct leveldb_readoptions_t leveldb_readoptions_t;
-typedef struct leveldb_seqfile_t leveldb_seqfile_t;
-typedef struct leveldb_snapshot_t leveldb_snapshot_t;
-typedef struct leveldb_writablefile_t leveldb_writablefile_t;
-typedef struct leveldb_writebatch_t leveldb_writebatch_t;
-typedef struct leveldb_writeoptions_t leveldb_writeoptions_t;
+typedef struct leveldb_t leveldb_t;
+typedef struct leveldb_cache_t leveldb_cache_t;
+typedef struct leveldb_comparator_t leveldb_comparator_t;
+typedef struct leveldb_env_t leveldb_env_t;
+typedef struct leveldb_filelock_t leveldb_filelock_t;
+typedef struct leveldb_filterpolicy_t leveldb_filterpolicy_t;
+typedef struct leveldb_iterator_t leveldb_iterator_t;
+typedef struct leveldb_logger_t leveldb_logger_t;
+typedef struct leveldb_options_t leveldb_options_t;
+typedef struct leveldb_randomfile_t leveldb_randomfile_t;
+typedef struct leveldb_readoptions_t leveldb_readoptions_t;
+typedef struct leveldb_seqfile_t leveldb_seqfile_t;
+typedef struct leveldb_snapshot_t leveldb_snapshot_t;
+typedef struct leveldb_writablefile_t leveldb_writablefile_t;
+typedef struct leveldb_writebatch_t leveldb_writebatch_t;
+typedef struct leveldb_writeoptions_t leveldb_writeoptions_t;
/* DB operations */
-extern leveldb_t* leveldb_open(
- const leveldb_options_t* options,
- const char* name,
- char** errptr);
+LEVELDB_EXPORT leveldb_t* leveldb_open(const leveldb_options_t* options,
+ const char* name, char** errptr);
-extern void leveldb_close(leveldb_t* db);
+LEVELDB_EXPORT void leveldb_close(leveldb_t* db);
-extern void leveldb_put(
- leveldb_t* db,
- const leveldb_writeoptions_t* options,
- const char* key, size_t keylen,
- const char* val, size_t vallen,
- char** errptr);
+LEVELDB_EXPORT void leveldb_put(leveldb_t* db,
+ const leveldb_writeoptions_t* options,
+ const char* key, size_t keylen, const char* val,
+ size_t vallen, char** errptr);
-extern void leveldb_delete(
- leveldb_t* db,
- const leveldb_writeoptions_t* options,
- const char* key, size_t keylen,
- char** errptr);
+LEVELDB_EXPORT void leveldb_delete(leveldb_t* db,
+ const leveldb_writeoptions_t* options,
+ const char* key, size_t keylen,
+ char** errptr);
-extern void leveldb_write(
- leveldb_t* db,
- const leveldb_writeoptions_t* options,
- leveldb_writebatch_t* batch,
- char** errptr);
+LEVELDB_EXPORT void leveldb_write(leveldb_t* db,
+ const leveldb_writeoptions_t* options,
+ leveldb_writebatch_t* batch, char** errptr);
/* Returns NULL if not found. A malloc()ed array otherwise.
Stores the length of the array in *vallen. */
-extern char* leveldb_get(
- leveldb_t* db,
- const leveldb_readoptions_t* options,
- const char* key, size_t keylen,
- size_t* vallen,
- char** errptr);
+LEVELDB_EXPORT char* leveldb_get(leveldb_t* db,
+ const leveldb_readoptions_t* options,
+ const char* key, size_t keylen, size_t* vallen,
+ char** errptr);
-extern leveldb_iterator_t* leveldb_create_iterator(
- leveldb_t* db,
- const leveldb_readoptions_t* options);
+LEVELDB_EXPORT leveldb_iterator_t* leveldb_create_iterator(
+ leveldb_t* db, const leveldb_readoptions_t* options);
-extern const leveldb_snapshot_t* leveldb_create_snapshot(
- leveldb_t* db);
+LEVELDB_EXPORT const leveldb_snapshot_t* leveldb_create_snapshot(leveldb_t* db);
-extern void leveldb_release_snapshot(
- leveldb_t* db,
- const leveldb_snapshot_t* snapshot);
+LEVELDB_EXPORT void leveldb_release_snapshot(
+ leveldb_t* db, const leveldb_snapshot_t* snapshot);
/* Returns NULL if property name is unknown.
Else returns a pointer to a malloc()-ed null-terminated value. */
-extern char* leveldb_property_value(
- leveldb_t* db,
- const char* propname);
-
-extern void leveldb_approximate_sizes(
- leveldb_t* db,
- int num_ranges,
- const char* const* range_start_key, const size_t* range_start_key_len,
- const char* const* range_limit_key, const size_t* range_limit_key_len,
- uint64_t* sizes);
-
-extern void leveldb_compact_range(
- leveldb_t* db,
- const char* start_key, size_t start_key_len,
- const char* limit_key, size_t limit_key_len);
+LEVELDB_EXPORT char* leveldb_property_value(leveldb_t* db,
+ const char* propname);
+
+LEVELDB_EXPORT void leveldb_approximate_sizes(
+ leveldb_t* db, int num_ranges, const char* const* range_start_key,
+ const size_t* range_start_key_len, const char* const* range_limit_key,
+ const size_t* range_limit_key_len, uint64_t* sizes);
+
+LEVELDB_EXPORT void leveldb_compact_range(leveldb_t* db, const char* start_key,
+ size_t start_key_len,
+ const char* limit_key,
+ size_t limit_key_len);
/* Management operations */
-extern void leveldb_destroy_db(
- const leveldb_options_t* options,
- const char* name,
- char** errptr);
+LEVELDB_EXPORT void leveldb_destroy_db(const leveldb_options_t* options,
+ const char* name, char** errptr);
-extern void leveldb_repair_db(
- const leveldb_options_t* options,
- const char* name,
- char** errptr);
+LEVELDB_EXPORT void leveldb_repair_db(const leveldb_options_t* options,
+ const char* name, char** errptr);
/* Iterator */
-extern void leveldb_iter_destroy(leveldb_iterator_t*);
-extern unsigned char leveldb_iter_valid(const leveldb_iterator_t*);
-extern void leveldb_iter_seek_to_first(leveldb_iterator_t*);
-extern void leveldb_iter_seek_to_last(leveldb_iterator_t*);
-extern void leveldb_iter_seek(leveldb_iterator_t*, const char* k, size_t klen);
-extern void leveldb_iter_next(leveldb_iterator_t*);
-extern void leveldb_iter_prev(leveldb_iterator_t*);
-extern const char* leveldb_iter_key(const leveldb_iterator_t*, size_t* klen);
-extern const char* leveldb_iter_value(const leveldb_iterator_t*, size_t* vlen);
-extern void leveldb_iter_get_error(const leveldb_iterator_t*, char** errptr);
+LEVELDB_EXPORT void leveldb_iter_destroy(leveldb_iterator_t*);
+LEVELDB_EXPORT uint8_t leveldb_iter_valid(const leveldb_iterator_t*);
+LEVELDB_EXPORT void leveldb_iter_seek_to_first(leveldb_iterator_t*);
+LEVELDB_EXPORT void leveldb_iter_seek_to_last(leveldb_iterator_t*);
+LEVELDB_EXPORT void leveldb_iter_seek(leveldb_iterator_t*, const char* k,
+ size_t klen);
+LEVELDB_EXPORT void leveldb_iter_next(leveldb_iterator_t*);
+LEVELDB_EXPORT void leveldb_iter_prev(leveldb_iterator_t*);
+LEVELDB_EXPORT const char* leveldb_iter_key(const leveldb_iterator_t*,
+ size_t* klen);
+LEVELDB_EXPORT const char* leveldb_iter_value(const leveldb_iterator_t*,
+ size_t* vlen);
+LEVELDB_EXPORT void leveldb_iter_get_error(const leveldb_iterator_t*,
+ char** errptr);
/* Write batch */
-extern leveldb_writebatch_t* leveldb_writebatch_create();
-extern void leveldb_writebatch_destroy(leveldb_writebatch_t*);
-extern void leveldb_writebatch_clear(leveldb_writebatch_t*);
-extern void leveldb_writebatch_put(
- leveldb_writebatch_t*,
- const char* key, size_t klen,
- const char* val, size_t vlen);
-extern void leveldb_writebatch_delete(
- leveldb_writebatch_t*,
- const char* key, size_t klen);
-extern void leveldb_writebatch_iterate(
- leveldb_writebatch_t*,
- void* state,
+LEVELDB_EXPORT leveldb_writebatch_t* leveldb_writebatch_create(void);
+LEVELDB_EXPORT void leveldb_writebatch_destroy(leveldb_writebatch_t*);
+LEVELDB_EXPORT void leveldb_writebatch_clear(leveldb_writebatch_t*);
+LEVELDB_EXPORT void leveldb_writebatch_put(leveldb_writebatch_t*,
+ const char* key, size_t klen,
+ const char* val, size_t vlen);
+LEVELDB_EXPORT void leveldb_writebatch_delete(leveldb_writebatch_t*,
+ const char* key, size_t klen);
+LEVELDB_EXPORT void leveldb_writebatch_iterate(
+ const leveldb_writebatch_t*, void* state,
void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen),
void (*deleted)(void*, const char* k, size_t klen));
+LEVELDB_EXPORT void leveldb_writebatch_append(
+ leveldb_writebatch_t* destination, const leveldb_writebatch_t* source);
/* Options */
-extern leveldb_options_t* leveldb_options_create();
-extern void leveldb_options_destroy(leveldb_options_t*);
-extern void leveldb_options_set_comparator(
- leveldb_options_t*,
- leveldb_comparator_t*);
-extern void leveldb_options_set_filter_policy(
- leveldb_options_t*,
- leveldb_filterpolicy_t*);
-extern void leveldb_options_set_create_if_missing(
- leveldb_options_t*, unsigned char);
-extern void leveldb_options_set_error_if_exists(
- leveldb_options_t*, unsigned char);
-extern void leveldb_options_set_paranoid_checks(
- leveldb_options_t*, unsigned char);
-extern void leveldb_options_set_env(leveldb_options_t*, leveldb_env_t*);
-extern void leveldb_options_set_info_log(leveldb_options_t*, leveldb_logger_t*);
-extern void leveldb_options_set_write_buffer_size(leveldb_options_t*, size_t);
-extern void leveldb_options_set_max_open_files(leveldb_options_t*, int);
-extern void leveldb_options_set_cache(leveldb_options_t*, leveldb_cache_t*);
-extern void leveldb_options_set_block_size(leveldb_options_t*, size_t);
-extern void leveldb_options_set_block_restart_interval(leveldb_options_t*, int);
-
-enum {
- leveldb_no_compression = 0,
- leveldb_snappy_compression = 1
-};
-extern void leveldb_options_set_compression(leveldb_options_t*, int);
+LEVELDB_EXPORT leveldb_options_t* leveldb_options_create(void);
+LEVELDB_EXPORT void leveldb_options_destroy(leveldb_options_t*);
+LEVELDB_EXPORT void leveldb_options_set_comparator(leveldb_options_t*,
+ leveldb_comparator_t*);
+LEVELDB_EXPORT void leveldb_options_set_filter_policy(leveldb_options_t*,
+ leveldb_filterpolicy_t*);
+LEVELDB_EXPORT void leveldb_options_set_create_if_missing(leveldb_options_t*,
+ uint8_t);
+LEVELDB_EXPORT void leveldb_options_set_error_if_exists(leveldb_options_t*,
+ uint8_t);
+LEVELDB_EXPORT void leveldb_options_set_paranoid_checks(leveldb_options_t*,
+ uint8_t);
+LEVELDB_EXPORT void leveldb_options_set_env(leveldb_options_t*, leveldb_env_t*);
+LEVELDB_EXPORT void leveldb_options_set_info_log(leveldb_options_t*,
+ leveldb_logger_t*);
+LEVELDB_EXPORT void leveldb_options_set_write_buffer_size(leveldb_options_t*,
+ size_t);
+LEVELDB_EXPORT void leveldb_options_set_max_open_files(leveldb_options_t*, int);
+LEVELDB_EXPORT void leveldb_options_set_cache(leveldb_options_t*,
+ leveldb_cache_t*);
+LEVELDB_EXPORT void leveldb_options_set_block_size(leveldb_options_t*, size_t);
+LEVELDB_EXPORT void leveldb_options_set_block_restart_interval(
+ leveldb_options_t*, int);
+LEVELDB_EXPORT void leveldb_options_set_max_file_size(leveldb_options_t*,
+ size_t);
+
+enum { leveldb_no_compression = 0, leveldb_snappy_compression = 1 };
+LEVELDB_EXPORT void leveldb_options_set_compression(leveldb_options_t*, int);
/* Comparator */
-extern leveldb_comparator_t* leveldb_comparator_create(
- void* state,
- void (*destructor)(void*),
- int (*compare)(
- void*,
- const char* a, size_t alen,
- const char* b, size_t blen),
+LEVELDB_EXPORT leveldb_comparator_t* leveldb_comparator_create(
+ void* state, void (*destructor)(void*),
+ int (*compare)(void*, const char* a, size_t alen, const char* b,
+ size_t blen),
const char* (*name)(void*));
-extern void leveldb_comparator_destroy(leveldb_comparator_t*);
+LEVELDB_EXPORT void leveldb_comparator_destroy(leveldb_comparator_t*);
/* Filter policy */
-extern leveldb_filterpolicy_t* leveldb_filterpolicy_create(
- void* state,
- void (*destructor)(void*),
- char* (*create_filter)(
- void*,
- const char* const* key_array, const size_t* key_length_array,
- int num_keys,
- size_t* filter_length),
- unsigned char (*key_may_match)(
- void*,
- const char* key, size_t length,
- const char* filter, size_t filter_length),
+LEVELDB_EXPORT leveldb_filterpolicy_t* leveldb_filterpolicy_create(
+ void* state, void (*destructor)(void*),
+ char* (*create_filter)(void*, const char* const* key_array,
+ const size_t* key_length_array, int num_keys,
+ size_t* filter_length),
+ uint8_t (*key_may_match)(void*, const char* key, size_t length,
+ const char* filter, size_t filter_length),
const char* (*name)(void*));
-extern void leveldb_filterpolicy_destroy(leveldb_filterpolicy_t*);
+LEVELDB_EXPORT void leveldb_filterpolicy_destroy(leveldb_filterpolicy_t*);
-extern leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(
+LEVELDB_EXPORT leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(
int bits_per_key);
/* Read options */
-extern leveldb_readoptions_t* leveldb_readoptions_create();
-extern void leveldb_readoptions_destroy(leveldb_readoptions_t*);
-extern void leveldb_readoptions_set_verify_checksums(
- leveldb_readoptions_t*,
- unsigned char);
-extern void leveldb_readoptions_set_fill_cache(
- leveldb_readoptions_t*, unsigned char);
-extern void leveldb_readoptions_set_snapshot(
- leveldb_readoptions_t*,
- const leveldb_snapshot_t*);
+LEVELDB_EXPORT leveldb_readoptions_t* leveldb_readoptions_create(void);
+LEVELDB_EXPORT void leveldb_readoptions_destroy(leveldb_readoptions_t*);
+LEVELDB_EXPORT void leveldb_readoptions_set_verify_checksums(
+ leveldb_readoptions_t*, uint8_t);
+LEVELDB_EXPORT void leveldb_readoptions_set_fill_cache(leveldb_readoptions_t*,
+ uint8_t);
+LEVELDB_EXPORT void leveldb_readoptions_set_snapshot(leveldb_readoptions_t*,
+ const leveldb_snapshot_t*);
/* Write options */
-extern leveldb_writeoptions_t* leveldb_writeoptions_create();
-extern void leveldb_writeoptions_destroy(leveldb_writeoptions_t*);
-extern void leveldb_writeoptions_set_sync(
- leveldb_writeoptions_t*, unsigned char);
+LEVELDB_EXPORT leveldb_writeoptions_t* leveldb_writeoptions_create(void);
+LEVELDB_EXPORT void leveldb_writeoptions_destroy(leveldb_writeoptions_t*);
+LEVELDB_EXPORT void leveldb_writeoptions_set_sync(leveldb_writeoptions_t*,
+ uint8_t);
/* Cache */
-extern leveldb_cache_t* leveldb_cache_create_lru(size_t capacity);
-extern void leveldb_cache_destroy(leveldb_cache_t* cache);
+LEVELDB_EXPORT leveldb_cache_t* leveldb_cache_create_lru(size_t capacity);
+LEVELDB_EXPORT void leveldb_cache_destroy(leveldb_cache_t* cache);
/* Env */
-extern leveldb_env_t* leveldb_create_default_env();
-extern void leveldb_env_destroy(leveldb_env_t*);
+LEVELDB_EXPORT leveldb_env_t* leveldb_create_default_env(void);
+LEVELDB_EXPORT void leveldb_env_destroy(leveldb_env_t*);
+
+/* If not NULL, the returned buffer must be released using leveldb_free(). */
+LEVELDB_EXPORT char* leveldb_env_get_test_directory(leveldb_env_t*);
/* Utility */
@@ -275,16 +255,16 @@ extern void leveldb_env_destroy(leveldb_env_t*);
in this file. Note that in certain cases (typically on Windows), you
may need to call this routine instead of free(ptr) to dispose of
malloc()-ed memory returned by this library. */
-extern void leveldb_free(void* ptr);
+LEVELDB_EXPORT void leveldb_free(void* ptr);
/* Return the major version number for this release. */
-extern int leveldb_major_version();
+LEVELDB_EXPORT int leveldb_major_version(void);
/* Return the minor version number for this release. */
-extern int leveldb_minor_version();
+LEVELDB_EXPORT int leveldb_minor_version(void);
#ifdef __cplusplus
-} /* end extern "C" */
+} /* end extern "C" */
#endif
-#endif /* STORAGE_LEVELDB_INCLUDE_C_H_ */
+#endif /* STORAGE_LEVELDB_INCLUDE_C_H_ */
diff --git a/src/leveldb/include/leveldb/cache.h b/src/leveldb/include/leveldb/cache.h
index 6819d5bc49..7d1a221193 100644
--- a/src/leveldb/include/leveldb/cache.h
+++ b/src/leveldb/include/leveldb/cache.h
@@ -19,26 +19,31 @@
#define STORAGE_LEVELDB_INCLUDE_CACHE_H_
#include <stdint.h>
+
+#include "leveldb/export.h"
#include "leveldb/slice.h"
namespace leveldb {
-class Cache;
+class LEVELDB_EXPORT Cache;
// Create a new cache with a fixed size capacity. This implementation
// of Cache uses a least-recently-used eviction policy.
-extern Cache* NewLRUCache(size_t capacity);
+LEVELDB_EXPORT Cache* NewLRUCache(size_t capacity);
-class Cache {
+class LEVELDB_EXPORT Cache {
public:
- Cache() { }
+ Cache() = default;
+
+ Cache(const Cache&) = delete;
+ Cache& operator=(const Cache&) = delete;
// Destroys all existing entries by calling the "deleter"
// function that was passed to the constructor.
virtual ~Cache();
// Opaque handle to an entry stored in the cache.
- struct Handle { };
+ struct Handle {};
// Insert a mapping from key->value into the cache and assign it
// the specified charge against the total cache capacity.
@@ -52,7 +57,7 @@ class Cache {
virtual Handle* Insert(const Slice& key, void* value, size_t charge,
void (*deleter)(const Slice& key, void* value)) = 0;
- // If the cache has no mapping for "key", returns NULL.
+ // If the cache has no mapping for "key", returns nullptr.
//
// Else return a handle that corresponds to the mapping. The caller
// must call this->Release(handle) when the returned mapping is no
@@ -99,10 +104,6 @@ class Cache {
struct Rep;
Rep* rep_;
-
- // No copying allowed
- Cache(const Cache&);
- void operator=(const Cache&);
};
} // namespace leveldb
diff --git a/src/leveldb/include/leveldb/comparator.h b/src/leveldb/include/leveldb/comparator.h
index 556b984c76..a85b51ebd8 100644
--- a/src/leveldb/include/leveldb/comparator.h
+++ b/src/leveldb/include/leveldb/comparator.h
@@ -7,6 +7,8 @@
#include <string>
+#include "leveldb/export.h"
+
namespace leveldb {
class Slice;
@@ -15,7 +17,7 @@ class Slice;
// used as keys in an sstable or a database. A Comparator implementation
// must be thread-safe since leveldb may invoke its methods concurrently
// from multiple threads.
-class Comparator {
+class LEVELDB_EXPORT Comparator {
public:
virtual ~Comparator();
@@ -43,9 +45,8 @@ class Comparator {
// If *start < limit, changes *start to a short string in [start,limit).
// Simple comparator implementations may return with *start unchanged,
// i.e., an implementation of this method that does nothing is correct.
- virtual void FindShortestSeparator(
- std::string* start,
- const Slice& limit) const = 0;
+ virtual void FindShortestSeparator(std::string* start,
+ const Slice& limit) const = 0;
// Changes *key to a short string >= *key.
// Simple comparator implementations may return with *key unchanged,
@@ -56,7 +57,7 @@ class Comparator {
// Return a builtin comparator that uses lexicographic byte-wise
// ordering. The result remains the property of this module and
// must not be deleted.
-extern const Comparator* BytewiseComparator();
+LEVELDB_EXPORT const Comparator* BytewiseComparator();
} // namespace leveldb
diff --git a/src/leveldb/include/leveldb/db.h b/src/leveldb/include/leveldb/db.h
index bfab10a0b7..b73014a221 100644
--- a/src/leveldb/include/leveldb/db.h
+++ b/src/leveldb/include/leveldb/db.h
@@ -7,14 +7,16 @@
#include <stdint.h>
#include <stdio.h>
+
+#include "leveldb/export.h"
#include "leveldb/iterator.h"
#include "leveldb/options.h"
namespace leveldb {
-// Update Makefile if you change these
+// Update CMakeLists.txt if you change these
static const int kMajorVersion = 1;
-static const int kMinorVersion = 20;
+static const int kMinorVersion = 22;
struct Options;
struct ReadOptions;
@@ -24,42 +26,44 @@ class WriteBatch;
// Abstract handle to particular state of a DB.
// A Snapshot is an immutable object and can therefore be safely
// accessed from multiple threads without any external synchronization.
-class Snapshot {
+class LEVELDB_EXPORT Snapshot {
protected:
virtual ~Snapshot();
};
// A range of keys
-struct Range {
- Slice start; // Included in the range
- Slice limit; // Not included in the range
+struct LEVELDB_EXPORT Range {
+ Range() = default;
+ Range(const Slice& s, const Slice& l) : start(s), limit(l) {}
- Range() { }
- Range(const Slice& s, const Slice& l) : start(s), limit(l) { }
+ Slice start; // Included in the range
+ Slice limit; // Not included in the range
};
// A DB is a persistent ordered map from keys to values.
// A DB is safe for concurrent access from multiple threads without
// any external synchronization.
-class DB {
+class LEVELDB_EXPORT DB {
public:
// Open the database with the specified "name".
// Stores a pointer to a heap-allocated database in *dbptr and returns
// OK on success.
- // Stores NULL in *dbptr and returns a non-OK status on error.
+ // Stores nullptr in *dbptr and returns a non-OK status on error.
// Caller should delete *dbptr when it is no longer needed.
- static Status Open(const Options& options,
- const std::string& name,
+ static Status Open(const Options& options, const std::string& name,
DB** dbptr);
- DB() { }
+ DB() = default;
+
+ DB(const DB&) = delete;
+ DB& operator=(const DB&) = delete;
+
virtual ~DB();
// Set the database entry for "key" to "value". Returns OK on success,
// and a non-OK status on error.
// Note: consider setting options.sync = true.
- virtual Status Put(const WriteOptions& options,
- const Slice& key,
+ virtual Status Put(const WriteOptions& options, const Slice& key,
const Slice& value) = 0;
// Remove the database entry (if any) for "key". Returns OK on
@@ -80,8 +84,8 @@ class DB {
// a status for which Status::IsNotFound() returns true.
//
// May return some other Status on an error.
- virtual Status Get(const ReadOptions& options,
- const Slice& key, std::string* value) = 0;
+ virtual Status Get(const ReadOptions& options, const Slice& key,
+ std::string* value) = 0;
// Return a heap-allocated iterator over the contents of the database.
// The result of NewIterator() is initially invalid (caller must
@@ -136,27 +140,27 @@ class DB {
// needed to access the data. This operation should typically only
// be invoked by users who understand the underlying implementation.
//
- // begin==NULL is treated as a key before all keys in the database.
- // end==NULL is treated as a key after all keys in the database.
+ // begin==nullptr is treated as a key before all keys in the database.
+ // end==nullptr is treated as a key after all keys in the database.
// Therefore the following call will compact the entire database:
- // db->CompactRange(NULL, NULL);
+ // db->CompactRange(nullptr, nullptr);
virtual void CompactRange(const Slice* begin, const Slice* end) = 0;
-
- private:
- // No copying allowed
- DB(const DB&);
- void operator=(const DB&);
};
// Destroy the contents of the specified database.
// Be very careful using this method.
-Status DestroyDB(const std::string& name, const Options& options);
+//
+// Note: For backwards compatibility, if DestroyDB is unable to list the
+// database files, Status::OK() will still be returned masking this failure.
+LEVELDB_EXPORT Status DestroyDB(const std::string& name,
+ const Options& options);
// If a DB cannot be opened, you may attempt to call this method to
// resurrect as much of the contents of the database as possible.
// Some data may be lost, so be careful when calling this function
// on a database that contains important information.
-Status RepairDB(const std::string& dbname, const Options& options);
+LEVELDB_EXPORT Status RepairDB(const std::string& dbname,
+ const Options& options);
} // namespace leveldb
diff --git a/src/leveldb/include/leveldb/dumpfile.h b/src/leveldb/include/leveldb/dumpfile.h
index 3f97fda16b..a58bc6b36c 100644
--- a/src/leveldb/include/leveldb/dumpfile.h
+++ b/src/leveldb/include/leveldb/dumpfile.h
@@ -6,7 +6,9 @@
#define STORAGE_LEVELDB_INCLUDE_DUMPFILE_H_
#include <string>
+
#include "leveldb/env.h"
+#include "leveldb/export.h"
#include "leveldb/status.h"
namespace leveldb {
@@ -18,7 +20,8 @@ namespace leveldb {
//
// Returns a non-OK result if fname does not name a leveldb storage
// file, or if the file cannot be read.
-Status DumpFile(Env* env, const std::string& fname, WritableFile* dst);
+LEVELDB_EXPORT Status DumpFile(Env* env, const std::string& fname,
+ WritableFile* dst);
} // namespace leveldb
diff --git a/src/leveldb/include/leveldb/env.h b/src/leveldb/include/leveldb/env.h
index 275d441eae..96c21b3966 100644
--- a/src/leveldb/include/leveldb/env.h
+++ b/src/leveldb/include/leveldb/env.h
@@ -13,12 +13,36 @@
#ifndef STORAGE_LEVELDB_INCLUDE_ENV_H_
#define STORAGE_LEVELDB_INCLUDE_ENV_H_
-#include <string>
-#include <vector>
#include <stdarg.h>
#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+#include "leveldb/export.h"
#include "leveldb/status.h"
+#if defined(_WIN32)
+// The leveldb::Env class below contains a DeleteFile method.
+// At the same time, <windows.h>, a fairly popular header
+// file for Windows applications, defines a DeleteFile macro.
+//
+// Without any intervention on our part, the result of this
+// unfortunate coincidence is that the name of the
+// leveldb::Env::DeleteFile method seen by the compiler depends on
+// whether <windows.h> was included before or after the LevelDB
+// headers.
+//
+// To avoid headaches, we undefined DeleteFile (if defined) and
+// redefine it at the bottom of this file. This way <windows.h>
+// can be included before this file (or not at all) and the
+// exported method will always be leveldb::Env::DeleteFile.
+#if defined(DeleteFile)
+#undef DeleteFile
+#define LEVELDB_DELETEFILE_UNDEFINED
+#endif // defined(DeleteFile)
+#endif // defined(_WIN32)
+
namespace leveldb {
class FileLock;
@@ -28,9 +52,13 @@ class SequentialFile;
class Slice;
class WritableFile;
-class Env {
+class LEVELDB_EXPORT Env {
public:
- Env() { }
+ Env() = default;
+
+ Env(const Env&) = delete;
+ Env& operator=(const Env&) = delete;
+
virtual ~Env();
// Return a default environment suitable for the current operating
@@ -40,20 +68,22 @@ class Env {
// The result of Default() belongs to leveldb and must never be deleted.
static Env* Default();
- // Create a brand new sequentially-readable file with the specified name.
+ // Create an object that sequentially reads the file with the specified name.
// On success, stores a pointer to the new file in *result and returns OK.
- // On failure stores NULL in *result and returns non-OK. If the file does
- // not exist, returns a non-OK status.
+ // On failure stores nullptr in *result and returns non-OK. If the file does
+ // not exist, returns a non-OK status. Implementations should return a
+ // NotFound status when the file does not exist.
//
// The returned file will only be accessed by one thread at a time.
virtual Status NewSequentialFile(const std::string& fname,
SequentialFile** result) = 0;
- // Create a brand new random access read-only file with the
+ // Create an object supporting random-access reads from the file with the
// specified name. On success, stores a pointer to the new file in
- // *result and returns OK. On failure stores NULL in *result and
+ // *result and returns OK. On failure stores nullptr in *result and
// returns non-OK. If the file does not exist, returns a non-OK
- // status.
+ // status. Implementations should return a NotFound status when the file does
+ // not exist.
//
// The returned file may be concurrently accessed by multiple threads.
virtual Status NewRandomAccessFile(const std::string& fname,
@@ -62,7 +92,7 @@ class Env {
// Create an object that writes to a new file with the specified
// name. Deletes any existing file with the same name and creates a
// new file. On success, stores a pointer to the new file in
- // *result and returns OK. On failure stores NULL in *result and
+ // *result and returns OK. On failure stores nullptr in *result and
// returns non-OK.
//
// The returned file will only be accessed by one thread at a time.
@@ -72,7 +102,7 @@ class Env {
// Create an object that either appends to an existing file, or
// writes to a new file (if the file does not exist to begin with).
// On success, stores a pointer to the new file in *result and
- // returns OK. On failure stores NULL in *result and returns
+ // returns OK. On failure stores nullptr in *result and returns
// non-OK.
//
// The returned file will only be accessed by one thread at a time.
@@ -110,7 +140,7 @@ class Env {
const std::string& target) = 0;
// Lock the specified file. Used to prevent concurrent access to
- // the same db by multiple processes. On failure, stores NULL in
+ // the same db by multiple processes. On failure, stores nullptr in
// *lock and returns non-OK.
//
// On success, stores a pointer to the object that represents the
@@ -136,16 +166,14 @@ class Env {
// added to the same Env may run concurrently in different threads.
// I.e., the caller may not assume that background work items are
// serialized.
- virtual void Schedule(
- void (*function)(void* arg),
- void* arg) = 0;
+ virtual void Schedule(void (*function)(void* arg), void* arg) = 0;
// Start a new thread, invoking "function(arg)" within the new thread.
// When "function(arg)" returns, the thread will be destroyed.
virtual void StartThread(void (*function)(void* arg), void* arg) = 0;
// *path is set to a temporary directory that can be used for testing. It may
- // or many not have just been created. The directory may or may not differ
+ // or may not have just been created. The directory may or may not differ
// between runs of the same process, but subsequent calls will return the
// same directory.
virtual Status GetTestDirectory(std::string* path) = 0;
@@ -159,17 +187,16 @@ class Env {
// Sleep/delay the thread for the prescribed number of micro-seconds.
virtual void SleepForMicroseconds(int micros) = 0;
-
- private:
- // No copying allowed
- Env(const Env&);
- void operator=(const Env&);
};
// A file abstraction for reading sequentially through a file
-class SequentialFile {
+class LEVELDB_EXPORT SequentialFile {
public:
- SequentialFile() { }
+ SequentialFile() = default;
+
+ SequentialFile(const SequentialFile&) = delete;
+ SequentialFile& operator=(const SequentialFile&) = delete;
+
virtual ~SequentialFile();
// Read up to "n" bytes from the file. "scratch[0..n-1]" may be
@@ -193,17 +220,16 @@ class SequentialFile {
// Get a name for the file, only for error reporting
virtual std::string GetName() const = 0;
-
- private:
- // No copying allowed
- SequentialFile(const SequentialFile&);
- void operator=(const SequentialFile&);
};
// A file abstraction for randomly reading the contents of a file.
-class RandomAccessFile {
+class LEVELDB_EXPORT RandomAccessFile {
public:
- RandomAccessFile() { }
+ RandomAccessFile() = default;
+
+ RandomAccessFile(const RandomAccessFile&) = delete;
+ RandomAccessFile& operator=(const RandomAccessFile&) = delete;
+
virtual ~RandomAccessFile();
// Read up to "n" bytes from the file starting at "offset".
@@ -220,19 +246,18 @@ class RandomAccessFile {
// Get a name for the file, only for error reporting
virtual std::string GetName() const = 0;
-
- private:
- // No copying allowed
- RandomAccessFile(const RandomAccessFile&);
- void operator=(const RandomAccessFile&);
};
// A file abstraction for sequential writing. The implementation
// must provide buffering since callers may append small fragments
// at a time to the file.
-class WritableFile {
+class LEVELDB_EXPORT WritableFile {
public:
- WritableFile() { }
+ WritableFile() = default;
+
+ WritableFile(const WritableFile&) = delete;
+ WritableFile& operator=(const WritableFile&) = delete;
+
virtual ~WritableFile();
virtual Status Append(const Slice& data) = 0;
@@ -242,119 +267,130 @@ class WritableFile {
// Get a name for the file, only for error reporting
virtual std::string GetName() const = 0;
-
- private:
- // No copying allowed
- WritableFile(const WritableFile&);
- void operator=(const WritableFile&);
};
// An interface for writing log messages.
-class Logger {
+class LEVELDB_EXPORT Logger {
public:
- Logger() { }
+ Logger() = default;
+
+ Logger(const Logger&) = delete;
+ Logger& operator=(const Logger&) = delete;
+
virtual ~Logger();
// Write an entry to the log file with the specified format.
virtual void Logv(const char* format, va_list ap) = 0;
-
- private:
- // No copying allowed
- Logger(const Logger&);
- void operator=(const Logger&);
};
-
// Identifies a locked file.
-class FileLock {
+class LEVELDB_EXPORT FileLock {
public:
- FileLock() { }
+ FileLock() = default;
+
+ FileLock(const FileLock&) = delete;
+ FileLock& operator=(const FileLock&) = delete;
+
virtual ~FileLock();
- private:
- // No copying allowed
- FileLock(const FileLock&);
- void operator=(const FileLock&);
};
-// Log the specified data to *info_log if info_log is non-NULL.
-extern void Log(Logger* info_log, const char* format, ...)
-# if defined(__GNUC__) || defined(__clang__)
- __attribute__((__format__ (__printf__, 2, 3)))
-# endif
+// Log the specified data to *info_log if info_log is non-null.
+void Log(Logger* info_log, const char* format, ...)
+#if defined(__GNUC__) || defined(__clang__)
+ __attribute__((__format__(__printf__, 2, 3)))
+#endif
;
// A utility routine: write "data" to the named file.
-extern Status WriteStringToFile(Env* env, const Slice& data,
- const std::string& fname);
+LEVELDB_EXPORT Status WriteStringToFile(Env* env, const Slice& data,
+ const std::string& fname);
// A utility routine: read contents of named file into *data
-extern Status ReadFileToString(Env* env, const std::string& fname,
- std::string* data);
+LEVELDB_EXPORT Status ReadFileToString(Env* env, const std::string& fname,
+ std::string* data);
// An implementation of Env that forwards all calls to another Env.
// May be useful to clients who wish to override just part of the
// functionality of another Env.
-class EnvWrapper : public Env {
+class LEVELDB_EXPORT EnvWrapper : public Env {
public:
- // Initialize an EnvWrapper that delegates all calls to *t
- explicit EnvWrapper(Env* t) : target_(t) { }
+ // Initialize an EnvWrapper that delegates all calls to *t.
+ explicit EnvWrapper(Env* t) : target_(t) {}
virtual ~EnvWrapper();
- // Return the target to which this Env forwards all calls
+ // Return the target to which this Env forwards all calls.
Env* target() const { return target_; }
- // The following text is boilerplate that forwards all methods to target()
- Status NewSequentialFile(const std::string& f, SequentialFile** r) {
+ // The following text is boilerplate that forwards all methods to target().
+ Status NewSequentialFile(const std::string& f, SequentialFile** r) override {
return target_->NewSequentialFile(f, r);
}
- Status NewRandomAccessFile(const std::string& f, RandomAccessFile** r) {
+ Status NewRandomAccessFile(const std::string& f,
+ RandomAccessFile** r) override {
return target_->NewRandomAccessFile(f, r);
}
- Status NewWritableFile(const std::string& f, WritableFile** r) {
+ Status NewWritableFile(const std::string& f, WritableFile** r) override {
return target_->NewWritableFile(f, r);
}
- Status NewAppendableFile(const std::string& f, WritableFile** r) {
+ Status NewAppendableFile(const std::string& f, WritableFile** r) override {
return target_->NewAppendableFile(f, r);
}
- bool FileExists(const std::string& f) { return target_->FileExists(f); }
- Status GetChildren(const std::string& dir, std::vector<std::string>* r) {
+ bool FileExists(const std::string& f) override {
+ return target_->FileExists(f);
+ }
+ Status GetChildren(const std::string& dir,
+ std::vector<std::string>* r) override {
return target_->GetChildren(dir, r);
}
- Status DeleteFile(const std::string& f) { return target_->DeleteFile(f); }
- Status CreateDir(const std::string& d) { return target_->CreateDir(d); }
- Status DeleteDir(const std::string& d) { return target_->DeleteDir(d); }
- Status GetFileSize(const std::string& f, uint64_t* s) {
+ Status DeleteFile(const std::string& f) override {
+ return target_->DeleteFile(f);
+ }
+ Status CreateDir(const std::string& d) override {
+ return target_->CreateDir(d);
+ }
+ Status DeleteDir(const std::string& d) override {
+ return target_->DeleteDir(d);
+ }
+ Status GetFileSize(const std::string& f, uint64_t* s) override {
return target_->GetFileSize(f, s);
}
- Status RenameFile(const std::string& s, const std::string& t) {
+ Status RenameFile(const std::string& s, const std::string& t) override {
return target_->RenameFile(s, t);
}
- Status LockFile(const std::string& f, FileLock** l) {
+ Status LockFile(const std::string& f, FileLock** l) override {
return target_->LockFile(f, l);
}
- Status UnlockFile(FileLock* l) { return target_->UnlockFile(l); }
- void Schedule(void (*f)(void*), void* a) {
+ Status UnlockFile(FileLock* l) override { return target_->UnlockFile(l); }
+ void Schedule(void (*f)(void*), void* a) override {
return target_->Schedule(f, a);
}
- void StartThread(void (*f)(void*), void* a) {
+ void StartThread(void (*f)(void*), void* a) override {
return target_->StartThread(f, a);
}
- virtual Status GetTestDirectory(std::string* path) {
+ Status GetTestDirectory(std::string* path) override {
return target_->GetTestDirectory(path);
}
- virtual Status NewLogger(const std::string& fname, Logger** result) {
+ Status NewLogger(const std::string& fname, Logger** result) override {
return target_->NewLogger(fname, result);
}
- uint64_t NowMicros() {
- return target_->NowMicros();
- }
- void SleepForMicroseconds(int micros) {
+ uint64_t NowMicros() override { return target_->NowMicros(); }
+ void SleepForMicroseconds(int micros) override {
target_->SleepForMicroseconds(micros);
}
+
private:
Env* target_;
};
} // namespace leveldb
+// Redefine DeleteFile if necessary.
+#if defined(_WIN32) && defined(LEVELDB_DELETEFILE_UNDEFINED)
+#if defined(UNICODE)
+#define DeleteFile DeleteFileW
+#else
+#define DeleteFile DeleteFileA
+#endif // defined(UNICODE)
+#endif // defined(_WIN32) && defined(LEVELDB_DELETEFILE_UNDEFINED)
+
#endif // STORAGE_LEVELDB_INCLUDE_ENV_H_
diff --git a/src/leveldb/include/leveldb/export.h b/src/leveldb/include/leveldb/export.h
new file mode 100644
index 0000000000..6ba9b183da
--- /dev/null
+++ b/src/leveldb/include/leveldb/export.h
@@ -0,0 +1,33 @@
+// Copyright (c) 2017 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef STORAGE_LEVELDB_INCLUDE_EXPORT_H_
+#define STORAGE_LEVELDB_INCLUDE_EXPORT_H_
+
+#if !defined(LEVELDB_EXPORT)
+
+#if defined(LEVELDB_SHARED_LIBRARY)
+#if defined(_WIN32)
+
+#if defined(LEVELDB_COMPILE_LIBRARY)
+#define LEVELDB_EXPORT __declspec(dllexport)
+#else
+#define LEVELDB_EXPORT __declspec(dllimport)
+#endif // defined(LEVELDB_COMPILE_LIBRARY)
+
+#else // defined(_WIN32)
+#if defined(LEVELDB_COMPILE_LIBRARY)
+#define LEVELDB_EXPORT __attribute__((visibility("default")))
+#else
+#define LEVELDB_EXPORT
+#endif
+#endif // defined(_WIN32)
+
+#else // defined(LEVELDB_SHARED_LIBRARY)
+#define LEVELDB_EXPORT
+#endif
+
+#endif // !defined(LEVELDB_EXPORT)
+
+#endif // STORAGE_LEVELDB_INCLUDE_EXPORT_H_
diff --git a/src/leveldb/include/leveldb/filter_policy.h b/src/leveldb/include/leveldb/filter_policy.h
index 1fba08001f..49c8eda776 100644
--- a/src/leveldb/include/leveldb/filter_policy.h
+++ b/src/leveldb/include/leveldb/filter_policy.h
@@ -18,11 +18,13 @@
#include <string>
+#include "leveldb/export.h"
+
namespace leveldb {
class Slice;
-class FilterPolicy {
+class LEVELDB_EXPORT FilterPolicy {
public:
virtual ~FilterPolicy();
@@ -38,8 +40,8 @@ class FilterPolicy {
//
// Warning: do not change the initial contents of *dst. Instead,
// append the newly constructed filter to *dst.
- virtual void CreateFilter(const Slice* keys, int n, std::string* dst)
- const = 0;
+ virtual void CreateFilter(const Slice* keys, int n,
+ std::string* dst) const = 0;
// "filter" contains the data appended by a preceding call to
// CreateFilter() on this class. This method must return true if
@@ -63,8 +65,8 @@ class FilterPolicy {
// ignores trailing spaces, it would be incorrect to use a
// FilterPolicy (like NewBloomFilterPolicy) that does not ignore
// trailing spaces in keys.
-extern const FilterPolicy* NewBloomFilterPolicy(int bits_per_key);
+LEVELDB_EXPORT const FilterPolicy* NewBloomFilterPolicy(int bits_per_key);
-}
+} // namespace leveldb
#endif // STORAGE_LEVELDB_INCLUDE_FILTER_POLICY_H_
diff --git a/src/leveldb/include/leveldb/iterator.h b/src/leveldb/include/leveldb/iterator.h
index da631ed9d8..bb9a5df8f5 100644
--- a/src/leveldb/include/leveldb/iterator.h
+++ b/src/leveldb/include/leveldb/iterator.h
@@ -15,14 +15,19 @@
#ifndef STORAGE_LEVELDB_INCLUDE_ITERATOR_H_
#define STORAGE_LEVELDB_INCLUDE_ITERATOR_H_
+#include "leveldb/export.h"
#include "leveldb/slice.h"
#include "leveldb/status.h"
namespace leveldb {
-class Iterator {
+class LEVELDB_EXPORT Iterator {
public:
Iterator();
+
+ Iterator(const Iterator&) = delete;
+ Iterator& operator=(const Iterator&) = delete;
+
virtual ~Iterator();
// An iterator is either positioned at a key/value pair, or
@@ -72,28 +77,35 @@ class Iterator {
//
// Note that unlike all of the preceding methods, this method is
// not abstract and therefore clients should not override it.
- typedef void (*CleanupFunction)(void* arg1, void* arg2);
+ using CleanupFunction = void (*)(void* arg1, void* arg2);
void RegisterCleanup(CleanupFunction function, void* arg1, void* arg2);
private:
- struct Cleanup {
+ // Cleanup functions are stored in a single-linked list.
+ // The list's head node is inlined in the iterator.
+ struct CleanupNode {
+ // True if the node is not used. Only head nodes might be unused.
+ bool IsEmpty() const { return function == nullptr; }
+ // Invokes the cleanup function.
+ void Run() {
+ assert(function != nullptr);
+ (*function)(arg1, arg2);
+ }
+
+ // The head node is used if the function pointer is not null.
CleanupFunction function;
void* arg1;
void* arg2;
- Cleanup* next;
+ CleanupNode* next;
};
- Cleanup cleanup_;
-
- // No copying allowed
- Iterator(const Iterator&);
- void operator=(const Iterator&);
+ CleanupNode cleanup_head_;
};
// Return an empty iterator (yields nothing).
-extern Iterator* NewEmptyIterator();
+LEVELDB_EXPORT Iterator* NewEmptyIterator();
// Return an empty iterator with the specified status.
-extern Iterator* NewErrorIterator(const Status& status);
+LEVELDB_EXPORT Iterator* NewErrorIterator(const Status& status);
} // namespace leveldb
diff --git a/src/leveldb/include/leveldb/options.h b/src/leveldb/include/leveldb/options.h
index 976e38122a..b7487726bc 100644
--- a/src/leveldb/include/leveldb/options.h
+++ b/src/leveldb/include/leveldb/options.h
@@ -7,6 +7,8 @@
#include <stddef.h>
+#include "leveldb/export.h"
+
namespace leveldb {
class Cache;
@@ -23,12 +25,15 @@ class Snapshot;
enum CompressionType {
// NOTE: do not change the values of existing entries, as these are
// part of the persistent format on disk.
- kNoCompression = 0x0,
+ kNoCompression = 0x0,
kSnappyCompression = 0x1
};
// Options to control the behavior of a database (passed to DB::Open)
-struct Options {
+struct LEVELDB_EXPORT Options {
+ // Create an Options object with default values for all fields.
+ Options();
+
// -------------------
// Parameters that affect behavior
@@ -41,20 +46,17 @@ struct Options {
const Comparator* comparator;
// If true, the database will be created if it is missing.
- // Default: false
- bool create_if_missing;
+ bool create_if_missing = false;
// If true, an error is raised if the database already exists.
- // Default: false
- bool error_if_exists;
+ bool error_if_exists = false;
// If true, the implementation will do aggressive checking of the
// data it is processing and will stop early if it detects any
// errors. This may have unforeseen ramifications: for example, a
// corruption of one DB entry may cause a large number of entries to
// become unreadable or for the entire DB to become unopenable.
- // Default: false
- bool paranoid_checks;
+ bool paranoid_checks = false;
// Use the specified object to interact with the environment,
// e.g. to read/write files, schedule background work, etc.
@@ -62,10 +64,9 @@ struct Options {
Env* env;
// Any internal progress/error information generated by the db will
- // be written to info_log if it is non-NULL, or to a file stored
- // in the same directory as the DB contents if info_log is NULL.
- // Default: NULL
- Logger* info_log;
+ // be written to info_log if it is non-null, or to a file stored
+ // in the same directory as the DB contents if info_log is null.
+ Logger* info_log = nullptr;
// -------------------
// Parameters that affect performance
@@ -78,39 +79,30 @@ struct Options {
// so you may wish to adjust this parameter to control memory usage.
// Also, a larger write buffer will result in a longer recovery time
// the next time the database is opened.
- //
- // Default: 4MB
- size_t write_buffer_size;
+ size_t write_buffer_size = 4 * 1024 * 1024;
// Number of open files that can be used by the DB. You may need to
// increase this if your database has a large working set (budget
// one open file per 2MB of working set).
- //
- // Default: 1000
- int max_open_files;
+ int max_open_files = 1000;
// Control over blocks (user data is stored in a set of blocks, and
// a block is the unit of reading from disk).
- // If non-NULL, use the specified cache for blocks.
- // If NULL, leveldb will automatically create and use an 8MB internal cache.
- // Default: NULL
- Cache* block_cache;
+ // If non-null, use the specified cache for blocks.
+ // If null, leveldb will automatically create and use an 8MB internal cache.
+ Cache* block_cache = nullptr;
// Approximate size of user data packed per block. Note that the
// block size specified here corresponds to uncompressed data. The
// actual size of the unit read from disk may be smaller if
// compression is enabled. This parameter can be changed dynamically.
- //
- // Default: 4K
- size_t block_size;
+ size_t block_size = 4 * 1024;
// Number of keys between restart points for delta encoding of keys.
// This parameter can be changed dynamically. Most clients should
// leave this parameter alone.
- //
- // Default: 16
- int block_restart_interval;
+ int block_restart_interval = 16;
// Leveldb will write up to this amount of bytes to a file before
// switching to a new one.
@@ -120,9 +112,7 @@ struct Options {
// compactions and hence longer latency/performance hiccups.
// Another reason to increase this parameter might be when you are
// initially populating a large database.
- //
- // Default: 2MB
- size_t max_file_size;
+ size_t max_file_size = 2 * 1024 * 1024;
// Compress blocks using the specified compression algorithm. This
// parameter can be changed dynamically.
@@ -138,53 +128,43 @@ struct Options {
// worth switching to kNoCompression. Even if the input data is
// incompressible, the kSnappyCompression implementation will
// efficiently detect that and will switch to uncompressed mode.
- CompressionType compression;
+ CompressionType compression = kSnappyCompression;
// EXPERIMENTAL: If true, append to existing MANIFEST and log files
// when a database is opened. This can significantly speed up open.
//
// Default: currently false, but may become true later.
- bool reuse_logs;
+ bool reuse_logs = false;
- // If non-NULL, use the specified filter policy to reduce disk reads.
+ // If non-null, use the specified filter policy to reduce disk reads.
// Many applications will benefit from passing the result of
// NewBloomFilterPolicy() here.
- //
- // Default: NULL
- const FilterPolicy* filter_policy;
-
- // Create an Options object with default values for all fields.
- Options();
+ const FilterPolicy* filter_policy = nullptr;
};
// Options that control read operations
-struct ReadOptions {
+struct LEVELDB_EXPORT ReadOptions {
+ ReadOptions() = default;
+
// If true, all data read from underlying storage will be
// verified against corresponding checksums.
- // Default: false
- bool verify_checksums;
+ bool verify_checksums = false;
// Should the data read for this iteration be cached in memory?
// Callers may wish to set this field to false for bulk scans.
- // Default: true
- bool fill_cache;
+ bool fill_cache = true;
- // If "snapshot" is non-NULL, read as of the supplied snapshot
+ // If "snapshot" is non-null, read as of the supplied snapshot
// (which must belong to the DB that is being read and which must
- // not have been released). If "snapshot" is NULL, use an implicit
+ // not have been released). If "snapshot" is null, use an implicit
// snapshot of the state at the beginning of this read operation.
- // Default: NULL
- const Snapshot* snapshot;
-
- ReadOptions()
- : verify_checksums(false),
- fill_cache(true),
- snapshot(NULL) {
- }
+ const Snapshot* snapshot = nullptr;
};
// Options that control write operations
-struct WriteOptions {
+struct LEVELDB_EXPORT WriteOptions {
+ WriteOptions() = default;
+
// If true, the write will be flushed from the operating system
// buffer cache (by calling WritableFile::Sync()) before the write
// is considered complete. If this flag is true, writes will be
@@ -199,13 +179,7 @@ struct WriteOptions {
// crash semantics as the "write()" system call. A DB write
// with sync==true has similar crash semantics to a "write()"
// system call followed by "fsync()".
- //
- // Default: false
- bool sync;
-
- WriteOptions()
- : sync(false) {
- }
+ bool sync = false;
};
} // namespace leveldb
diff --git a/src/leveldb/include/leveldb/slice.h b/src/leveldb/include/leveldb/slice.h
index bc367986f7..2df417dc31 100644
--- a/src/leveldb/include/leveldb/slice.h
+++ b/src/leveldb/include/leveldb/slice.h
@@ -18,23 +18,30 @@
#include <assert.h>
#include <stddef.h>
#include <string.h>
+
#include <string>
+#include "leveldb/export.h"
+
namespace leveldb {
-class Slice {
+class LEVELDB_EXPORT Slice {
public:
// Create an empty slice.
- Slice() : data_(""), size_(0) { }
+ Slice() : data_(""), size_(0) {}
// Create a slice that refers to d[0,n-1].
- Slice(const char* d, size_t n) : data_(d), size_(n) { }
+ Slice(const char* d, size_t n) : data_(d), size_(n) {}
// Create a slice that refers to the contents of "s"
- Slice(const std::string& s) : data_(s.data()), size_(s.size()) { }
+ Slice(const std::string& s) : data_(s.data()), size_(s.size()) {}
// Create a slice that refers to s[0,strlen(s)-1]
- Slice(const char* s) : data_(s), size_(strlen(s)) { }
+ Slice(const char* s) : data_(s), size_(strlen(s)) {}
+
+ // Intentionally copyable.
+ Slice(const Slice&) = default;
+ Slice& operator=(const Slice&) = default;
// Return a pointer to the beginning of the referenced data
const char* data() const { return data_; }
@@ -53,7 +60,10 @@ class Slice {
}
// Change this slice to refer to an empty array
- void clear() { data_ = ""; size_ = 0; }
+ void clear() {
+ data_ = "";
+ size_ = 0;
+ }
// Drop the first "n" bytes from this slice.
void remove_prefix(size_t n) {
@@ -73,15 +83,12 @@ class Slice {
// Return true iff "x" is a prefix of "*this"
bool starts_with(const Slice& x) const {
- return ((size_ >= x.size_) &&
- (memcmp(data_, x.data_, x.size_) == 0));
+ return ((size_ >= x.size_) && (memcmp(data_, x.data_, x.size_) == 0));
}
private:
const char* data_;
size_t size_;
-
- // Intentionally copyable
};
inline bool operator==(const Slice& x, const Slice& y) {
@@ -89,21 +96,20 @@ inline bool operator==(const Slice& x, const Slice& y) {
(memcmp(x.data(), y.data(), x.size()) == 0));
}
-inline bool operator!=(const Slice& x, const Slice& y) {
- return !(x == y);
-}
+inline bool operator!=(const Slice& x, const Slice& y) { return !(x == y); }
inline int Slice::compare(const Slice& b) const {
const size_t min_len = (size_ < b.size_) ? size_ : b.size_;
int r = memcmp(data_, b.data_, min_len);
if (r == 0) {
- if (size_ < b.size_) r = -1;
- else if (size_ > b.size_) r = +1;
+ if (size_ < b.size_)
+ r = -1;
+ else if (size_ > b.size_)
+ r = +1;
}
return r;
}
} // namespace leveldb
-
#endif // STORAGE_LEVELDB_INCLUDE_SLICE_H_
diff --git a/src/leveldb/include/leveldb/status.h b/src/leveldb/include/leveldb/status.h
index d9575f9753..e3273144e4 100644
--- a/src/leveldb/include/leveldb/status.h
+++ b/src/leveldb/include/leveldb/status.h
@@ -13,20 +13,25 @@
#ifndef STORAGE_LEVELDB_INCLUDE_STATUS_H_
#define STORAGE_LEVELDB_INCLUDE_STATUS_H_
+#include <algorithm>
#include <string>
+
+#include "leveldb/export.h"
#include "leveldb/slice.h"
namespace leveldb {
-class Status {
+class LEVELDB_EXPORT Status {
public:
// Create a success status.
- Status() : state_(NULL) { }
+ Status() noexcept : state_(nullptr) {}
~Status() { delete[] state_; }
- // Copy the specified status.
- Status(const Status& s);
- void operator=(const Status& s);
+ Status(const Status& rhs);
+ Status& operator=(const Status& rhs);
+
+ Status(Status&& rhs) noexcept : state_(rhs.state_) { rhs.state_ = nullptr; }
+ Status& operator=(Status&& rhs) noexcept;
// Return a success status.
static Status OK() { return Status(); }
@@ -49,7 +54,7 @@ class Status {
}
// Returns true iff the status indicates success.
- bool ok() const { return (state_ == NULL); }
+ bool ok() const { return (state_ == nullptr); }
// Returns true iff the status indicates a NotFound error.
bool IsNotFound() const { return code() == kNotFound; }
@@ -71,13 +76,6 @@ class Status {
std::string ToString() const;
private:
- // OK status has a NULL state_. Otherwise, state_ is a new[] array
- // of the following form:
- // state_[0..3] == length of message
- // state_[4] == code
- // state_[5..] == message
- const char* state_;
-
enum Code {
kOk = 0,
kNotFound = 1,
@@ -88,23 +86,35 @@ class Status {
};
Code code() const {
- return (state_ == NULL) ? kOk : static_cast<Code>(state_[4]);
+ return (state_ == nullptr) ? kOk : static_cast<Code>(state_[4]);
}
Status(Code code, const Slice& msg, const Slice& msg2);
static const char* CopyState(const char* s);
+
+ // OK status has a null state_. Otherwise, state_ is a new[] array
+ // of the following form:
+ // state_[0..3] == length of message
+ // state_[4] == code
+ // state_[5..] == message
+ const char* state_;
};
-inline Status::Status(const Status& s) {
- state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_);
+inline Status::Status(const Status& rhs) {
+ state_ = (rhs.state_ == nullptr) ? nullptr : CopyState(rhs.state_);
}
-inline void Status::operator=(const Status& s) {
- // The following condition catches both aliasing (when this == &s),
- // and the common case where both s and *this are ok.
- if (state_ != s.state_) {
+inline Status& Status::operator=(const Status& rhs) {
+ // The following condition catches both aliasing (when this == &rhs),
+ // and the common case where both rhs and *this are ok.
+ if (state_ != rhs.state_) {
delete[] state_;
- state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_);
+ state_ = (rhs.state_ == nullptr) ? nullptr : CopyState(rhs.state_);
}
+ return *this;
+}
+inline Status& Status::operator=(Status&& rhs) noexcept {
+ std::swap(state_, rhs.state_);
+ return *this;
}
} // namespace leveldb
diff --git a/src/leveldb/include/leveldb/table.h b/src/leveldb/include/leveldb/table.h
index a9746c3f5e..25c6013116 100644
--- a/src/leveldb/include/leveldb/table.h
+++ b/src/leveldb/include/leveldb/table.h
@@ -6,6 +6,8 @@
#define STORAGE_LEVELDB_INCLUDE_TABLE_H_
#include <stdint.h>
+
+#include "leveldb/export.h"
#include "leveldb/iterator.h"
namespace leveldb {
@@ -21,7 +23,7 @@ class TableCache;
// A Table is a sorted map from strings to strings. Tables are
// immutable and persistent. A Table may be safely accessed from
// multiple threads without external synchronization.
-class Table {
+class LEVELDB_EXPORT Table {
public:
// Attempt to open the table that is stored in bytes [0..file_size)
// of "file", and read the metadata entries necessary to allow
@@ -30,15 +32,16 @@ class Table {
// If successful, returns ok and sets "*table" to the newly opened
// table. The client should delete "*table" when no longer needed.
// If there was an error while initializing the table, sets "*table"
- // to NULL and returns a non-ok status. Does not take ownership of
+ // to nullptr and returns a non-ok status. Does not take ownership of
// "*source", but the client must ensure that "source" remains live
// for the duration of the returned table's lifetime.
//
// *file must remain live while this Table is in use.
- static Status Open(const Options& options,
- RandomAccessFile* file,
- uint64_t file_size,
- Table** table);
+ static Status Open(const Options& options, RandomAccessFile* file,
+ uint64_t file_size, Table** table);
+
+ Table(const Table&) = delete;
+ Table& operator=(const Table&) = delete;
~Table();
@@ -56,28 +59,24 @@ class Table {
uint64_t ApproximateOffsetOf(const Slice& key) const;
private:
+ friend class TableCache;
struct Rep;
- Rep* rep_;
- explicit Table(Rep* rep) { rep_ = rep; }
static Iterator* BlockReader(void*, const ReadOptions&, const Slice&);
+ explicit Table(Rep* rep) : rep_(rep) {}
+
// Calls (*handle_result)(arg, ...) with the entry found after a call
// to Seek(key). May not make such a call if filter policy says
// that key is not present.
- friend class TableCache;
- Status InternalGet(
- const ReadOptions&, const Slice& key,
- void* arg,
- void (*handle_result)(void* arg, const Slice& k, const Slice& v));
-
+ Status InternalGet(const ReadOptions&, const Slice& key, void* arg,
+ void (*handle_result)(void* arg, const Slice& k,
+ const Slice& v));
void ReadMeta(const Footer& footer);
void ReadFilter(const Slice& filter_handle_value);
- // No copying allowed
- Table(const Table&);
- void operator=(const Table&);
+ Rep* const rep_;
};
} // namespace leveldb
diff --git a/src/leveldb/include/leveldb/table_builder.h b/src/leveldb/include/leveldb/table_builder.h
index 5fd1dc71f1..7d8896bb89 100644
--- a/src/leveldb/include/leveldb/table_builder.h
+++ b/src/leveldb/include/leveldb/table_builder.h
@@ -14,6 +14,8 @@
#define STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_
#include <stdint.h>
+
+#include "leveldb/export.h"
#include "leveldb/options.h"
#include "leveldb/status.h"
@@ -23,13 +25,16 @@ class BlockBuilder;
class BlockHandle;
class WritableFile;
-class TableBuilder {
+class LEVELDB_EXPORT TableBuilder {
public:
// Create a builder that will store the contents of the table it is
// building in *file. Does not close the file. It is up to the
// caller to close the file after calling Finish().
TableBuilder(const Options& options, WritableFile* file);
+ TableBuilder(const TableBuilder&) = delete;
+ TableBuilder& operator=(const TableBuilder&) = delete;
+
// REQUIRES: Either Finish() or Abandon() has been called.
~TableBuilder();
@@ -81,10 +86,6 @@ class TableBuilder {
struct Rep;
Rep* rep_;
-
- // No copying allowed
- TableBuilder(const TableBuilder&);
- void operator=(const TableBuilder&);
};
} // namespace leveldb
diff --git a/src/leveldb/include/leveldb/write_batch.h b/src/leveldb/include/leveldb/write_batch.h
index ee9aab68e0..94d4115fed 100644
--- a/src/leveldb/include/leveldb/write_batch.h
+++ b/src/leveldb/include/leveldb/write_batch.h
@@ -22,15 +22,29 @@
#define STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_
#include <string>
+
+#include "leveldb/export.h"
#include "leveldb/status.h"
namespace leveldb {
class Slice;
-class WriteBatch {
+class LEVELDB_EXPORT WriteBatch {
public:
+ class LEVELDB_EXPORT Handler {
+ public:
+ virtual ~Handler();
+ virtual void Put(const Slice& key, const Slice& value) = 0;
+ virtual void Delete(const Slice& key) = 0;
+ };
+
WriteBatch();
+
+ // Intentionally copyable.
+ WriteBatch(const WriteBatch&) = default;
+ WriteBatch& operator=(const WriteBatch&) = default;
+
~WriteBatch();
// Store the mapping "key->value" in the database.
@@ -42,21 +56,26 @@ class WriteBatch {
// Clear all updates buffered in this batch.
void Clear();
+ // The size of the database changes caused by this batch.
+ //
+ // This number is tied to implementation details, and may change across
+ // releases. It is intended for LevelDB usage metrics.
+ size_t ApproximateSize() const;
+
+ // Copies the operations in "source" to this batch.
+ //
+ // This runs in O(source size) time. However, the constant factor is better
+ // than calling Iterate() over the source batch with a Handler that replicates
+ // the operations into this batch.
+ void Append(const WriteBatch& source);
+
// Support for iterating over the contents of a batch.
- class Handler {
- public:
- virtual ~Handler();
- virtual void Put(const Slice& key, const Slice& value) = 0;
- virtual void Delete(const Slice& key) = 0;
- };
Status Iterate(Handler* handler) const;
private:
friend class WriteBatchInternal;
std::string rep_; // See comment in write_batch.cc for the format of rep_
-
- // Intentionally copyable
};
} // namespace leveldb
diff --git a/src/leveldb/issues/issue178_test.cc b/src/leveldb/issues/issue178_test.cc
index 1b1cf8bb28..d50ffeb9d4 100644
--- a/src/leveldb/issues/issue178_test.cc
+++ b/src/leveldb/issues/issue178_test.cc
@@ -3,9 +3,9 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
// Test for issue 178: a manual compaction causes deleted data to reappear.
+#include <cstdlib>
#include <iostream>
#include <sstream>
-#include <cstdlib>
#include "leveldb/db.h"
#include "leveldb/write_batch.h"
@@ -21,11 +21,9 @@ std::string Key1(int i) {
return buf;
}
-std::string Key2(int i) {
- return Key1(i) + "_xxx";
-}
+std::string Key2(int i) { return Key1(i) + "_xxx"; }
-class Issue178 { };
+class Issue178 {};
TEST(Issue178, Test) {
// Get rid of any state from an old run.
@@ -87,6 +85,4 @@ TEST(Issue178, Test) {
} // anonymous namespace
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/issues/issue200_test.cc b/src/leveldb/issues/issue200_test.cc
index 1cec79f443..877b2afc47 100644
--- a/src/leveldb/issues/issue200_test.cc
+++ b/src/leveldb/issues/issue200_test.cc
@@ -11,14 +11,14 @@
namespace leveldb {
-class Issue200 { };
+class Issue200 {};
TEST(Issue200, Test) {
// Get rid of any state from an old run.
std::string dbpath = test::TmpDir() + "/leveldb_issue200_test";
DestroyDB(dbpath, Options());
- DB *db;
+ DB* db;
Options options;
options.create_if_missing = true;
ASSERT_OK(DB::Open(options, dbpath, &db));
@@ -31,7 +31,7 @@ TEST(Issue200, Test) {
ASSERT_OK(db->Put(write_options, "5", "f"));
ReadOptions read_options;
- Iterator *iter = db->NewIterator(read_options);
+ Iterator* iter = db->NewIterator(read_options);
// Add an element that should not be reflected in the iterator.
ASSERT_OK(db->Put(write_options, "25", "cd"));
@@ -54,6 +54,4 @@ TEST(Issue200, Test) {
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/issues/issue320_test.cc b/src/leveldb/issues/issue320_test.cc
new file mode 100644
index 0000000000..c5fcbfc6e7
--- /dev/null
+++ b/src/leveldb/issues/issue320_test.cc
@@ -0,0 +1,128 @@
+// Copyright (c) 2019 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include <cstdint>
+#include <cstdlib>
+#include <iostream>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "leveldb/db.h"
+#include "leveldb/write_batch.h"
+#include "util/testharness.h"
+
+namespace leveldb {
+
+namespace {
+
+// Creates a random number in the range of [0, max).
+int GenerateRandomNumber(int max) { return std::rand() % max; }
+
+std::string CreateRandomString(int32_t index) {
+ static const size_t len = 1024;
+ char bytes[len];
+ size_t i = 0;
+ while (i < 8) {
+ bytes[i] = 'a' + ((index >> (4 * i)) & 0xf);
+ ++i;
+ }
+ while (i < sizeof(bytes)) {
+ bytes[i] = 'a' + GenerateRandomNumber(26);
+ ++i;
+ }
+ return std::string(bytes, sizeof(bytes));
+}
+
+} // namespace
+
+class Issue320 {};
+
+TEST(Issue320, Test) {
+ std::srand(0);
+
+ bool delete_before_put = false;
+ bool keep_snapshots = true;
+
+ std::vector<std::unique_ptr<std::pair<std::string, std::string>>> test_map(
+ 10000);
+ std::vector<Snapshot const*> snapshots(100, nullptr);
+
+ DB* db;
+ Options options;
+ options.create_if_missing = true;
+
+ std::string dbpath = test::TmpDir() + "/leveldb_issue320_test";
+ ASSERT_OK(DB::Open(options, dbpath, &db));
+
+ uint32_t target_size = 10000;
+ uint32_t num_items = 0;
+ uint32_t count = 0;
+ std::string key;
+ std::string value, old_value;
+
+ WriteOptions writeOptions;
+ ReadOptions readOptions;
+ while (count < 200000) {
+ if ((++count % 1000) == 0) {
+ std::cout << "count: " << count << std::endl;
+ }
+
+ int index = GenerateRandomNumber(test_map.size());
+ WriteBatch batch;
+
+ if (test_map[index] == nullptr) {
+ num_items++;
+ test_map[index].reset(new std::pair<std::string, std::string>(
+ CreateRandomString(index), CreateRandomString(index)));
+ batch.Put(test_map[index]->first, test_map[index]->second);
+ } else {
+ ASSERT_OK(db->Get(readOptions, test_map[index]->first, &old_value));
+ if (old_value != test_map[index]->second) {
+ std::cout << "ERROR incorrect value returned by Get" << std::endl;
+ std::cout << " count=" << count << std::endl;
+ std::cout << " old value=" << old_value << std::endl;
+ std::cout << " test_map[index]->second=" << test_map[index]->second
+ << std::endl;
+ std::cout << " test_map[index]->first=" << test_map[index]->first
+ << std::endl;
+ std::cout << " index=" << index << std::endl;
+ ASSERT_EQ(old_value, test_map[index]->second);
+ }
+
+ if (num_items >= target_size && GenerateRandomNumber(100) > 30) {
+ batch.Delete(test_map[index]->first);
+ test_map[index] = nullptr;
+ --num_items;
+ } else {
+ test_map[index]->second = CreateRandomString(index);
+ if (delete_before_put) batch.Delete(test_map[index]->first);
+ batch.Put(test_map[index]->first, test_map[index]->second);
+ }
+ }
+
+ ASSERT_OK(db->Write(writeOptions, &batch));
+
+ if (keep_snapshots && GenerateRandomNumber(10) == 0) {
+ int i = GenerateRandomNumber(snapshots.size());
+ if (snapshots[i] != nullptr) {
+ db->ReleaseSnapshot(snapshots[i]);
+ }
+ snapshots[i] = db->GetSnapshot();
+ }
+ }
+
+ for (Snapshot const* snapshot : snapshots) {
+ if (snapshot) {
+ db->ReleaseSnapshot(snapshot);
+ }
+ }
+
+ delete db;
+ DestroyDB(dbpath, options);
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/port/README b/src/leveldb/port/README.md
index 422563e25c..8b171532e1 100644
--- a/src/leveldb/port/README
+++ b/src/leveldb/port/README.md
@@ -5,6 +5,6 @@ Code in the rest of the package includes "port.h" from this directory.
"port.h" in turn includes a platform specific "port_<platform>.h" file
that provides the platform specific implementation.
-See port_posix.h for an example of what must be provided in a platform
+See port_stdcxx.h for an example of what must be provided in a platform
specific header file.
diff --git a/src/leveldb/port/atomic_pointer.h b/src/leveldb/port/atomic_pointer.h
deleted file mode 100644
index d79a02230d..0000000000
--- a/src/leveldb/port/atomic_pointer.h
+++ /dev/null
@@ -1,245 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-// AtomicPointer provides storage for a lock-free pointer.
-// Platform-dependent implementation of AtomicPointer:
-// - If the platform provides a cheap barrier, we use it with raw pointers
-// - If <atomic> is present (on newer versions of gcc, it is), we use
-// a <atomic>-based AtomicPointer. However we prefer the memory
-// barrier based version, because at least on a gcc 4.4 32-bit build
-// on linux, we have encountered a buggy <atomic> implementation.
-// Also, some <atomic> implementations are much slower than a memory-barrier
-// based implementation (~16ns for <atomic> based acquire-load vs. ~1ns for
-// a barrier based acquire-load).
-// This code is based on atomicops-internals-* in Google's perftools:
-// http://code.google.com/p/google-perftools/source/browse/#svn%2Ftrunk%2Fsrc%2Fbase
-
-#ifndef PORT_ATOMIC_POINTER_H_
-#define PORT_ATOMIC_POINTER_H_
-
-#include <stdint.h>
-#ifdef LEVELDB_ATOMIC_PRESENT
-#include <atomic>
-#endif
-#ifdef OS_WIN
-#include <windows.h>
-#endif
-#ifdef OS_MACOSX
-#include <libkern/OSAtomic.h>
-#endif
-
-#if defined(_M_X64) || defined(__x86_64__)
-#define ARCH_CPU_X86_FAMILY 1
-#elif defined(_M_IX86) || defined(__i386__) || defined(__i386)
-#define ARCH_CPU_X86_FAMILY 1
-#elif defined(__ARMEL__)
-#define ARCH_CPU_ARM_FAMILY 1
-#elif defined(__aarch64__)
-#define ARCH_CPU_ARM64_FAMILY 1
-#elif defined(__ppc__) || defined(__powerpc__) || defined(__powerpc64__)
-#define ARCH_CPU_PPC_FAMILY 1
-#elif defined(__mips__)
-#define ARCH_CPU_MIPS_FAMILY 1
-#endif
-
-namespace leveldb {
-namespace port {
-
-// AtomicPointer based on <cstdatomic> if available
-#if defined(LEVELDB_ATOMIC_PRESENT)
-class AtomicPointer {
- private:
- std::atomic<void*> rep_;
- public:
- AtomicPointer() { }
- explicit AtomicPointer(void* v) : rep_(v) { }
- inline void* Acquire_Load() const {
- return rep_.load(std::memory_order_acquire);
- }
- inline void Release_Store(void* v) {
- rep_.store(v, std::memory_order_release);
- }
- inline void* NoBarrier_Load() const {
- return rep_.load(std::memory_order_relaxed);
- }
- inline void NoBarrier_Store(void* v) {
- rep_.store(v, std::memory_order_relaxed);
- }
-};
-
-#else
-
-// Define MemoryBarrier() if available
-// Windows on x86
-#if defined(OS_WIN) && defined(COMPILER_MSVC) && defined(ARCH_CPU_X86_FAMILY)
-// windows.h already provides a MemoryBarrier(void) macro
-// http://msdn.microsoft.com/en-us/library/ms684208(v=vs.85).aspx
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// Mac OS
-#elif defined(OS_MACOSX)
-inline void MemoryBarrier() {
- OSMemoryBarrier();
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// Gcc on x86
-#elif defined(ARCH_CPU_X86_FAMILY) && defined(__GNUC__)
-inline void MemoryBarrier() {
- // See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on
- // this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering.
- __asm__ __volatile__("" : : : "memory");
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// Sun Studio
-#elif defined(ARCH_CPU_X86_FAMILY) && defined(__SUNPRO_CC)
-inline void MemoryBarrier() {
- // See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on
- // this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering.
- asm volatile("" : : : "memory");
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// ARM Linux
-#elif defined(ARCH_CPU_ARM_FAMILY) && defined(__linux__)
-typedef void (*LinuxKernelMemoryBarrierFunc)(void);
-// The Linux ARM kernel provides a highly optimized device-specific memory
-// barrier function at a fixed memory address that is mapped in every
-// user-level process.
-//
-// This beats using CPU-specific instructions which are, on single-core
-// devices, un-necessary and very costly (e.g. ARMv7-A "dmb" takes more
-// than 180ns on a Cortex-A8 like the one on a Nexus One). Benchmarking
-// shows that the extra function call cost is completely negligible on
-// multi-core devices.
-//
-inline void MemoryBarrier() {
- (*(LinuxKernelMemoryBarrierFunc)0xffff0fa0)();
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// ARM64
-#elif defined(ARCH_CPU_ARM64_FAMILY)
-inline void MemoryBarrier() {
- asm volatile("dmb sy" : : : "memory");
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// PPC
-#elif defined(ARCH_CPU_PPC_FAMILY) && defined(__GNUC__)
-inline void MemoryBarrier() {
- // TODO for some powerpc expert: is there a cheaper suitable variant?
- // Perhaps by having separate barriers for acquire and release ops.
- asm volatile("sync" : : : "memory");
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// MIPS
-#elif defined(ARCH_CPU_MIPS_FAMILY) && defined(__GNUC__)
-inline void MemoryBarrier() {
- __asm__ __volatile__("sync" : : : "memory");
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-#endif
-
-// AtomicPointer built using platform-specific MemoryBarrier()
-#if defined(LEVELDB_HAVE_MEMORY_BARRIER)
-class AtomicPointer {
- private:
- void* rep_;
- public:
- AtomicPointer() { }
- explicit AtomicPointer(void* p) : rep_(p) {}
- inline void* NoBarrier_Load() const { return rep_; }
- inline void NoBarrier_Store(void* v) { rep_ = v; }
- inline void* Acquire_Load() const {
- void* result = rep_;
- MemoryBarrier();
- return result;
- }
- inline void Release_Store(void* v) {
- MemoryBarrier();
- rep_ = v;
- }
-};
-
-// Atomic pointer based on sparc memory barriers
-#elif defined(__sparcv9) && defined(__GNUC__)
-class AtomicPointer {
- private:
- void* rep_;
- public:
- AtomicPointer() { }
- explicit AtomicPointer(void* v) : rep_(v) { }
- inline void* Acquire_Load() const {
- void* val;
- __asm__ __volatile__ (
- "ldx [%[rep_]], %[val] \n\t"
- "membar #LoadLoad|#LoadStore \n\t"
- : [val] "=r" (val)
- : [rep_] "r" (&rep_)
- : "memory");
- return val;
- }
- inline void Release_Store(void* v) {
- __asm__ __volatile__ (
- "membar #LoadStore|#StoreStore \n\t"
- "stx %[v], [%[rep_]] \n\t"
- :
- : [rep_] "r" (&rep_), [v] "r" (v)
- : "memory");
- }
- inline void* NoBarrier_Load() const { return rep_; }
- inline void NoBarrier_Store(void* v) { rep_ = v; }
-};
-
-// Atomic pointer based on ia64 acq/rel
-#elif defined(__ia64) && defined(__GNUC__)
-class AtomicPointer {
- private:
- void* rep_;
- public:
- AtomicPointer() { }
- explicit AtomicPointer(void* v) : rep_(v) { }
- inline void* Acquire_Load() const {
- void* val ;
- __asm__ __volatile__ (
- "ld8.acq %[val] = [%[rep_]] \n\t"
- : [val] "=r" (val)
- : [rep_] "r" (&rep_)
- : "memory"
- );
- return val;
- }
- inline void Release_Store(void* v) {
- __asm__ __volatile__ (
- "st8.rel [%[rep_]] = %[v] \n\t"
- :
- : [rep_] "r" (&rep_), [v] "r" (v)
- : "memory"
- );
- }
- inline void* NoBarrier_Load() const { return rep_; }
- inline void NoBarrier_Store(void* v) { rep_ = v; }
-};
-
-// We have neither MemoryBarrier(), nor <atomic>
-#else
-#error Please implement AtomicPointer for this platform.
-
-#endif
-#endif
-
-#undef LEVELDB_HAVE_MEMORY_BARRIER
-#undef ARCH_CPU_X86_FAMILY
-#undef ARCH_CPU_ARM_FAMILY
-#undef ARCH_CPU_ARM64_FAMILY
-#undef ARCH_CPU_PPC_FAMILY
-
-} // namespace port
-} // namespace leveldb
-
-#endif // PORT_ATOMIC_POINTER_H_
diff --git a/src/leveldb/port/port.h b/src/leveldb/port/port.h
index 4baafa8e22..4b247f74f9 100644
--- a/src/leveldb/port/port.h
+++ b/src/leveldb/port/port.h
@@ -10,12 +10,10 @@
// Include the appropriate platform specific file below. If you are
// porting to a new platform, see "port_example.h" for documentation
// of what the new port_<platform>.h file must provide.
-#if defined(LEVELDB_PLATFORM_POSIX)
-# include "port/port_posix.h"
+#if defined(LEVELDB_PLATFORM_POSIX) || defined(LEVELDB_PLATFORM_WINDOWS)
+#include "port/port_stdcxx.h"
#elif defined(LEVELDB_PLATFORM_CHROMIUM)
-# include "port/port_chromium.h"
-#elif defined(LEVELDB_PLATFORM_WINDOWS)
-# include "port/port_win.h"
+#include "port/port_chromium.h"
#endif
#endif // STORAGE_LEVELDB_PORT_PORT_H_
diff --git a/src/leveldb/port/port_config.h.in b/src/leveldb/port/port_config.h.in
new file mode 100644
index 0000000000..21273153a3
--- /dev/null
+++ b/src/leveldb/port/port_config.h.in
@@ -0,0 +1,39 @@
+// Copyright 2017 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef STORAGE_LEVELDB_PORT_PORT_CONFIG_H_
+#define STORAGE_LEVELDB_PORT_PORT_CONFIG_H_
+
+// Define to 1 if you have a definition for fdatasync() in <unistd.h>.
+#if !defined(HAVE_FDATASYNC)
+#cmakedefine01 HAVE_FDATASYNC
+#endif // !defined(HAVE_FDATASYNC)
+
+// Define to 1 if you have a definition for F_FULLFSYNC in <fcntl.h>.
+#if !defined(HAVE_FULLFSYNC)
+#cmakedefine01 HAVE_FULLFSYNC
+#endif // !defined(HAVE_FULLFSYNC)
+
+// Define to 1 if you have a definition for O_CLOEXEC in <fcntl.h>.
+#if !defined(HAVE_O_CLOEXEC)
+#cmakedefine01 HAVE_O_CLOEXEC
+#endif // !defined(HAVE_O_CLOEXEC)
+
+// Define to 1 if you have Google CRC32C.
+#if !defined(HAVE_CRC32C)
+#cmakedefine01 HAVE_CRC32C
+#endif // !defined(HAVE_CRC32C)
+
+// Define to 1 if you have Google Snappy.
+#if !defined(HAVE_SNAPPY)
+#cmakedefine01 HAVE_SNAPPY
+#endif // !defined(HAVE_SNAPPY)
+
+// Define to 1 if your processor stores words with the most significant byte
+// first (like Motorola and SPARC, unlike Intel and VAX).
+#if !defined(LEVELDB_IS_BIG_ENDIAN)
+#cmakedefine01 LEVELDB_IS_BIG_ENDIAN
+#endif // !defined(LEVELDB_IS_BIG_ENDIAN)
+
+#endif // STORAGE_LEVELDB_PORT_PORT_CONFIG_H_ \ No newline at end of file
diff --git a/src/leveldb/port/port_example.h b/src/leveldb/port/port_example.h
index 5b1d027de5..1a8fca24b3 100644
--- a/src/leveldb/port/port_example.h
+++ b/src/leveldb/port/port_example.h
@@ -10,6 +10,8 @@
#ifndef STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_
#define STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_
+#include "port/thread_annotations.h"
+
namespace leveldb {
namespace port {
@@ -23,23 +25,23 @@ static const bool kLittleEndian = true /* or some other expression */;
// ------------------ Threading -------------------
// A Mutex represents an exclusive lock.
-class Mutex {
+class LOCKABLE Mutex {
public:
Mutex();
~Mutex();
// Lock the mutex. Waits until other lockers have exited.
// Will deadlock if the mutex is already locked by this thread.
- void Lock();
+ void Lock() EXCLUSIVE_LOCK_FUNCTION();
// Unlock the mutex.
// REQUIRES: This mutex was locked by this thread.
- void Unlock();
+ void Unlock() UNLOCK_FUNCTION();
// Optionally crash if this thread does not hold this mutex.
// The implementation must be fast, especially if NDEBUG is
// defined. The implementation is allowed to skip all checks.
- void AssertHeld();
+ void AssertHeld() ASSERT_EXCLUSIVE_LOCK();
};
class CondVar {
@@ -60,57 +62,18 @@ class CondVar {
void SignallAll();
};
-// Thread-safe initialization.
-// Used as follows:
-// static port::OnceType init_control = LEVELDB_ONCE_INIT;
-// static void Initializer() { ... do something ...; }
-// ...
-// port::InitOnce(&init_control, &Initializer);
-typedef intptr_t OnceType;
-#define LEVELDB_ONCE_INIT 0
-extern void InitOnce(port::OnceType*, void (*initializer)());
-
-// A type that holds a pointer that can be read or written atomically
-// (i.e., without word-tearing.)
-class AtomicPointer {
- private:
- intptr_t rep_;
- public:
- // Initialize to arbitrary value
- AtomicPointer();
-
- // Initialize to hold v
- explicit AtomicPointer(void* v) : rep_(v) { }
-
- // Read and return the stored pointer with the guarantee that no
- // later memory access (read or write) by this thread can be
- // reordered ahead of this read.
- void* Acquire_Load() const;
-
- // Set v as the stored pointer with the guarantee that no earlier
- // memory access (read or write) by this thread can be reordered
- // after this store.
- void Release_Store(void* v);
-
- // Read the stored pointer with no ordering guarantees.
- void* NoBarrier_Load() const;
-
- // Set va as the stored pointer with no ordering guarantees.
- void NoBarrier_Store(void* v);
-};
-
// ------------------ Compression -------------------
// Store the snappy compression of "input[0,input_length-1]" in *output.
// Returns false if snappy is not supported by this port.
-extern bool Snappy_Compress(const char* input, size_t input_length,
- std::string* output);
+bool Snappy_Compress(const char* input, size_t input_length,
+ std::string* output);
// If input[0,input_length-1] looks like a valid snappy compressed
// buffer, store the size of the uncompressed data in *result and
// return true. Else return false.
-extern bool Snappy_GetUncompressedLength(const char* input, size_t length,
- size_t* result);
+bool Snappy_GetUncompressedLength(const char* input, size_t length,
+ size_t* result);
// Attempt to snappy uncompress input[0,input_length-1] into *output.
// Returns true if successful, false if the input is invalid lightweight
@@ -119,19 +82,15 @@ extern bool Snappy_GetUncompressedLength(const char* input, size_t length,
// REQUIRES: at least the first "n" bytes of output[] must be writable
// where "n" is the result of a successful call to
// Snappy_GetUncompressedLength.
-extern bool Snappy_Uncompress(const char* input_data, size_t input_length,
- char* output);
+bool Snappy_Uncompress(const char* input_data, size_t input_length,
+ char* output);
// ------------------ Miscellaneous -------------------
// If heap profiling is not supported, returns false.
// Else repeatedly calls (*func)(arg, data, n) and then returns true.
// The concatenation of all "data[0,n-1]" fragments is the heap profile.
-extern bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg);
-
-// Determine whether a working accelerated crc32 implementation exists
-// Returns true if AcceleratedCRC32C is safe to call
-bool HasAcceleratedCRC32C();
+bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg);
// Extend the CRC to include the first n bytes of buf.
//
diff --git a/src/leveldb/port/port_posix.cc b/src/leveldb/port/port_posix.cc
deleted file mode 100644
index ec39e92195..0000000000
--- a/src/leveldb/port/port_posix.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "port/port_posix.h"
-
-#include <cstdlib>
-#include <stdio.h>
-#include <string.h>
-
-#if (defined(__x86_64__) || defined(__i386__)) && defined(__GNUC__)
-#include <cpuid.h>
-#endif
-
-namespace leveldb {
-namespace port {
-
-static void PthreadCall(const char* label, int result) {
- if (result != 0) {
- fprintf(stderr, "pthread %s: %s\n", label, strerror(result));
- abort();
- }
-}
-
-Mutex::Mutex() { PthreadCall("init mutex", pthread_mutex_init(&mu_, NULL)); }
-
-Mutex::~Mutex() { PthreadCall("destroy mutex", pthread_mutex_destroy(&mu_)); }
-
-void Mutex::Lock() { PthreadCall("lock", pthread_mutex_lock(&mu_)); }
-
-void Mutex::Unlock() { PthreadCall("unlock", pthread_mutex_unlock(&mu_)); }
-
-CondVar::CondVar(Mutex* mu)
- : mu_(mu) {
- PthreadCall("init cv", pthread_cond_init(&cv_, NULL));
-}
-
-CondVar::~CondVar() { PthreadCall("destroy cv", pthread_cond_destroy(&cv_)); }
-
-void CondVar::Wait() {
- PthreadCall("wait", pthread_cond_wait(&cv_, &mu_->mu_));
-}
-
-void CondVar::Signal() {
- PthreadCall("signal", pthread_cond_signal(&cv_));
-}
-
-void CondVar::SignalAll() {
- PthreadCall("broadcast", pthread_cond_broadcast(&cv_));
-}
-
-void InitOnce(OnceType* once, void (*initializer)()) {
- PthreadCall("once", pthread_once(once, initializer));
-}
-
-bool HasAcceleratedCRC32C() {
-#if (defined(__x86_64__) || defined(__i386__)) && defined(__GNUC__)
- unsigned int eax, ebx, ecx, edx;
- __get_cpuid(1, &eax, &ebx, &ecx, &edx);
- return (ecx & (1 << 20)) != 0;
-#else
- return false;
-#endif
-}
-
-} // namespace port
-} // namespace leveldb
diff --git a/src/leveldb/port/port_posix.h b/src/leveldb/port/port_posix.h
deleted file mode 100644
index d85fa5d63f..0000000000
--- a/src/leveldb/port/port_posix.h
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// See port_example.h for documentation for the following types/functions.
-
-#ifndef STORAGE_LEVELDB_PORT_PORT_POSIX_H_
-#define STORAGE_LEVELDB_PORT_PORT_POSIX_H_
-
-#undef PLATFORM_IS_LITTLE_ENDIAN
-#if defined(OS_MACOSX)
- #include <machine/endian.h>
- #if defined(__DARWIN_LITTLE_ENDIAN) && defined(__DARWIN_BYTE_ORDER)
- #define PLATFORM_IS_LITTLE_ENDIAN \
- (__DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN)
- #endif
-#elif defined(OS_SOLARIS)
- #include <sys/isa_defs.h>
- #ifdef _LITTLE_ENDIAN
- #define PLATFORM_IS_LITTLE_ENDIAN true
- #else
- #define PLATFORM_IS_LITTLE_ENDIAN false
- #endif
-#elif defined(OS_FREEBSD) || defined(OS_OPENBSD) ||\
- defined(OS_NETBSD) || defined(OS_DRAGONFLYBSD)
- #include <sys/types.h>
- #include <sys/endian.h>
- #define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN)
-#elif defined(OS_HPUX)
- #define PLATFORM_IS_LITTLE_ENDIAN false
-#elif defined(OS_ANDROID)
- // Due to a bug in the NDK x86 <sys/endian.h> definition,
- // _BYTE_ORDER must be used instead of __BYTE_ORDER on Android.
- // See http://code.google.com/p/android/issues/detail?id=39824
- #include <endian.h>
- #define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN)
-#else
- #include <endian.h>
-#endif
-
-#include <pthread.h>
-#ifdef SNAPPY
-#include <snappy.h>
-#endif
-#include <stdint.h>
-#include <string>
-#include "port/atomic_pointer.h"
-
-#ifndef PLATFORM_IS_LITTLE_ENDIAN
-#define PLATFORM_IS_LITTLE_ENDIAN (__BYTE_ORDER == __LITTLE_ENDIAN)
-#endif
-
-#if defined(OS_MACOSX) || defined(OS_SOLARIS) || defined(OS_FREEBSD) ||\
- defined(OS_NETBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLYBSD) ||\
- defined(OS_ANDROID) || defined(OS_HPUX) || defined(CYGWIN)
-// Use fread/fwrite/fflush on platforms without _unlocked variants
-#define fread_unlocked fread
-#define fwrite_unlocked fwrite
-#define fflush_unlocked fflush
-#endif
-
-#if defined(OS_FREEBSD) ||\
- defined(OS_OPENBSD) || defined(OS_DRAGONFLYBSD)
-// Use fsync() on platforms without fdatasync()
-#define fdatasync fsync
-#endif
-
-#if defined(OS_MACOSX)
-#define fdatasync(fd) fcntl(fd, F_FULLFSYNC, 0)
-#endif
-
-#if defined(OS_ANDROID) && __ANDROID_API__ < 9
-// fdatasync() was only introduced in API level 9 on Android. Use fsync()
-// when targetting older platforms.
-#define fdatasync fsync
-#endif
-
-namespace leveldb {
-namespace port {
-
-static const bool kLittleEndian = PLATFORM_IS_LITTLE_ENDIAN;
-#undef PLATFORM_IS_LITTLE_ENDIAN
-
-class CondVar;
-
-class Mutex {
- public:
- Mutex();
- ~Mutex();
-
- void Lock();
- void Unlock();
- void AssertHeld() { }
-
- private:
- friend class CondVar;
- pthread_mutex_t mu_;
-
- // No copying
- Mutex(const Mutex&);
- void operator=(const Mutex&);
-};
-
-class CondVar {
- public:
- explicit CondVar(Mutex* mu);
- ~CondVar();
- void Wait();
- void Signal();
- void SignalAll();
- private:
- pthread_cond_t cv_;
- Mutex* mu_;
-};
-
-typedef pthread_once_t OnceType;
-#define LEVELDB_ONCE_INIT PTHREAD_ONCE_INIT
-extern void InitOnce(OnceType* once, void (*initializer)());
-
-inline bool Snappy_Compress(const char* input, size_t length,
- ::std::string* output) {
-#ifdef SNAPPY
- output->resize(snappy::MaxCompressedLength(length));
- size_t outlen;
- snappy::RawCompress(input, length, &(*output)[0], &outlen);
- output->resize(outlen);
- return true;
-#endif
-
- return false;
-}
-
-inline bool Snappy_GetUncompressedLength(const char* input, size_t length,
- size_t* result) {
-#ifdef SNAPPY
- return snappy::GetUncompressedLength(input, length, result);
-#else
- return false;
-#endif
-}
-
-inline bool Snappy_Uncompress(const char* input, size_t length,
- char* output) {
-#ifdef SNAPPY
- return snappy::RawUncompress(input, length, output);
-#else
- return false;
-#endif
-}
-
-inline bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg) {
- return false;
-}
-
-bool HasAcceleratedCRC32C();
-uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size);
-
-} // namespace port
-} // namespace leveldb
-
-#endif // STORAGE_LEVELDB_PORT_PORT_POSIX_H_
diff --git a/src/leveldb/port/port_posix_sse.cc b/src/leveldb/port/port_posix_sse.cc
deleted file mode 100644
index 2d49c21dd8..0000000000
--- a/src/leveldb/port/port_posix_sse.cc
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2016 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// A portable implementation of crc32c, optimized to handle
-// four bytes at a time.
-//
-// In a separate source file to allow this accelerated CRC32C function to be
-// compiled with the appropriate compiler flags to enable x86 SSE 4.2
-// instructions.
-
-#include <stdint.h>
-#include <string.h>
-#include "port/port.h"
-
-#if defined(LEVELDB_PLATFORM_POSIX_SSE)
-
-#if defined(_MSC_VER)
-#include <intrin.h>
-#elif defined(__GNUC__) && defined(__SSE4_2__)
-#include <nmmintrin.h>
-#endif
-
-#endif // defined(LEVELDB_PLATFORM_POSIX_SSE)
-
-namespace leveldb {
-namespace port {
-
-#if defined(LEVELDB_PLATFORM_POSIX_SSE)
-
-// Used to fetch a naturally-aligned 32-bit word in little endian byte-order
-static inline uint32_t LE_LOAD32(const uint8_t *p) {
- // SSE is x86 only, so ensured that |p| is always little-endian.
- uint32_t word;
- memcpy(&word, p, sizeof(word));
- return word;
-}
-
-#if defined(_M_X64) || defined(__x86_64__) // LE_LOAD64 is only used on x64.
-
-// Used to fetch a naturally-aligned 64-bit word in little endian byte-order
-static inline uint64_t LE_LOAD64(const uint8_t *p) {
- uint64_t dword;
- memcpy(&dword, p, sizeof(dword));
- return dword;
-}
-
-#endif // defined(_M_X64) || defined(__x86_64__)
-
-#endif // defined(LEVELDB_PLATFORM_POSIX_SSE)
-
-// For further improvements see Intel publication at:
-// http://download.intel.com/design/intarch/papers/323405.pdf
-uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size) {
-#if !defined(LEVELDB_PLATFORM_POSIX_SSE)
- return 0;
-#else
-
- const uint8_t *p = reinterpret_cast<const uint8_t *>(buf);
- const uint8_t *e = p + size;
- uint32_t l = crc ^ 0xffffffffu;
-
-#define STEP1 do { \
- l = _mm_crc32_u8(l, *p++); \
-} while (0)
-#define STEP4 do { \
- l = _mm_crc32_u32(l, LE_LOAD32(p)); \
- p += 4; \
-} while (0)
-#define STEP8 do { \
- l = _mm_crc32_u64(l, LE_LOAD64(p)); \
- p += 8; \
-} while (0)
-
- if (size > 16) {
- // Process unaligned bytes
- for (unsigned int i = reinterpret_cast<uintptr_t>(p) % 8; i; --i) {
- STEP1;
- }
-
- // _mm_crc32_u64 is only available on x64.
-#if defined(_M_X64) || defined(__x86_64__)
- // Process 8 bytes at a time
- while ((e-p) >= 8) {
- STEP8;
- }
- // Process 4 bytes at a time
- if ((e-p) >= 4) {
- STEP4;
- }
-#else // !(defined(_M_X64) || defined(__x86_64__))
- // Process 4 bytes at a time
- while ((e-p) >= 4) {
- STEP4;
- }
-#endif // defined(_M_X64) || defined(__x86_64__)
- }
- // Process the last few bytes
- while (p != e) {
- STEP1;
- }
-#undef STEP8
-#undef STEP4
-#undef STEP1
- return l ^ 0xffffffffu;
-#endif // defined(LEVELDB_PLATFORM_POSIX_SSE)
-}
-
-} // namespace port
-} // namespace leveldb
diff --git a/src/leveldb/port/port_stdcxx.h b/src/leveldb/port/port_stdcxx.h
new file mode 100644
index 0000000000..e9cb0e53af
--- /dev/null
+++ b/src/leveldb/port/port_stdcxx.h
@@ -0,0 +1,153 @@
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef STORAGE_LEVELDB_PORT_PORT_STDCXX_H_
+#define STORAGE_LEVELDB_PORT_PORT_STDCXX_H_
+
+// port/port_config.h availability is automatically detected via __has_include
+// in newer compilers. If LEVELDB_HAS_PORT_CONFIG_H is defined, it overrides the
+// configuration detection.
+#if defined(LEVELDB_HAS_PORT_CONFIG_H)
+
+#if LEVELDB_HAS_PORT_CONFIG_H
+#include "port/port_config.h"
+#endif // LEVELDB_HAS_PORT_CONFIG_H
+
+#elif defined(__has_include)
+
+#if __has_include("port/port_config.h")
+#include "port/port_config.h"
+#endif // __has_include("port/port_config.h")
+
+#endif // defined(LEVELDB_HAS_PORT_CONFIG_H)
+
+#if HAVE_CRC32C
+#include <crc32c/crc32c.h>
+#endif // HAVE_CRC32C
+#if HAVE_SNAPPY
+#include <snappy.h>
+#endif // HAVE_SNAPPY
+
+#include <cassert>
+#include <condition_variable> // NOLINT
+#include <cstddef>
+#include <cstdint>
+#include <mutex> // NOLINT
+#include <string>
+
+#include "port/thread_annotations.h"
+
+namespace leveldb {
+namespace port {
+
+static const bool kLittleEndian = !LEVELDB_IS_BIG_ENDIAN;
+
+class CondVar;
+
+// Thinly wraps std::mutex.
+class LOCKABLE Mutex {
+ public:
+ Mutex() = default;
+ ~Mutex() = default;
+
+ Mutex(const Mutex&) = delete;
+ Mutex& operator=(const Mutex&) = delete;
+
+ void Lock() EXCLUSIVE_LOCK_FUNCTION() { mu_.lock(); }
+ void Unlock() UNLOCK_FUNCTION() { mu_.unlock(); }
+ void AssertHeld() ASSERT_EXCLUSIVE_LOCK() {}
+
+ private:
+ friend class CondVar;
+ std::mutex mu_;
+};
+
+// Thinly wraps std::condition_variable.
+class CondVar {
+ public:
+ explicit CondVar(Mutex* mu) : mu_(mu) { assert(mu != nullptr); }
+ ~CondVar() = default;
+
+ CondVar(const CondVar&) = delete;
+ CondVar& operator=(const CondVar&) = delete;
+
+ void Wait() {
+ std::unique_lock<std::mutex> lock(mu_->mu_, std::adopt_lock);
+ cv_.wait(lock);
+ lock.release();
+ }
+ void Signal() { cv_.notify_one(); }
+ void SignalAll() { cv_.notify_all(); }
+
+ private:
+ std::condition_variable cv_;
+ Mutex* const mu_;
+};
+
+inline bool Snappy_Compress(const char* input, size_t length,
+ std::string* output) {
+#if HAVE_SNAPPY
+ output->resize(snappy::MaxCompressedLength(length));
+ size_t outlen;
+ snappy::RawCompress(input, length, &(*output)[0], &outlen);
+ output->resize(outlen);
+ return true;
+#else
+ // Silence compiler warnings about unused arguments.
+ (void)input;
+ (void)length;
+ (void)output;
+#endif // HAVE_SNAPPY
+
+ return false;
+}
+
+inline bool Snappy_GetUncompressedLength(const char* input, size_t length,
+ size_t* result) {
+#if HAVE_SNAPPY
+ return snappy::GetUncompressedLength(input, length, result);
+#else
+ // Silence compiler warnings about unused arguments.
+ (void)input;
+ (void)length;
+ (void)result;
+ return false;
+#endif // HAVE_SNAPPY
+}
+
+inline bool Snappy_Uncompress(const char* input, size_t length, char* output) {
+#if HAVE_SNAPPY
+ return snappy::RawUncompress(input, length, output);
+#else
+ // Silence compiler warnings about unused arguments.
+ (void)input;
+ (void)length;
+ (void)output;
+ return false;
+#endif // HAVE_SNAPPY
+}
+
+inline bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg) {
+ // Silence compiler warnings about unused arguments.
+ (void)func;
+ (void)arg;
+ return false;
+}
+
+inline uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size) {
+#if HAVE_CRC32C
+ return ::crc32c::Extend(crc, reinterpret_cast<const uint8_t*>(buf), size);
+#else
+ // Silence compiler warnings about unused arguments.
+ (void)crc;
+ (void)buf;
+ (void)size;
+ return 0;
+#endif // HAVE_CRC32C
+}
+
+} // namespace port
+} // namespace leveldb
+
+#endif // STORAGE_LEVELDB_PORT_PORT_STDCXX_H_
diff --git a/src/leveldb/port/port_win.cc b/src/leveldb/port/port_win.cc
deleted file mode 100644
index 1be9e8d5b0..0000000000
--- a/src/leveldb/port/port_win.cc
+++ /dev/null
@@ -1,158 +0,0 @@
-// LevelDB Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// See port_example.h for documentation for the following types/functions.
-
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-// * Neither the name of the University of California, Berkeley nor the
-// names of its contributors may be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-// DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
-// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-
-#include "port/port_win.h"
-
-#include <windows.h>
-#include <cassert>
-#include <intrin.h>
-
-namespace leveldb {
-namespace port {
-
-Mutex::Mutex() :
- cs_(NULL) {
- assert(!cs_);
- cs_ = static_cast<void *>(new CRITICAL_SECTION());
- ::InitializeCriticalSection(static_cast<CRITICAL_SECTION *>(cs_));
- assert(cs_);
-}
-
-Mutex::~Mutex() {
- assert(cs_);
- ::DeleteCriticalSection(static_cast<CRITICAL_SECTION *>(cs_));
- delete static_cast<CRITICAL_SECTION *>(cs_);
- cs_ = NULL;
- assert(!cs_);
-}
-
-void Mutex::Lock() {
- assert(cs_);
- ::EnterCriticalSection(static_cast<CRITICAL_SECTION *>(cs_));
-}
-
-void Mutex::Unlock() {
- assert(cs_);
- ::LeaveCriticalSection(static_cast<CRITICAL_SECTION *>(cs_));
-}
-
-void Mutex::AssertHeld() {
- assert(cs_);
- assert(1);
-}
-
-CondVar::CondVar(Mutex* mu) :
- waiting_(0),
- mu_(mu),
- sem1_(::CreateSemaphore(NULL, 0, 10000, NULL)),
- sem2_(::CreateSemaphore(NULL, 0, 10000, NULL)) {
- assert(mu_);
-}
-
-CondVar::~CondVar() {
- ::CloseHandle(sem1_);
- ::CloseHandle(sem2_);
-}
-
-void CondVar::Wait() {
- mu_->AssertHeld();
-
- wait_mtx_.Lock();
- ++waiting_;
- wait_mtx_.Unlock();
-
- mu_->Unlock();
-
- // initiate handshake
- ::WaitForSingleObject(sem1_, INFINITE);
- ::ReleaseSemaphore(sem2_, 1, NULL);
- mu_->Lock();
-}
-
-void CondVar::Signal() {
- wait_mtx_.Lock();
- if (waiting_ > 0) {
- --waiting_;
-
- // finalize handshake
- ::ReleaseSemaphore(sem1_, 1, NULL);
- ::WaitForSingleObject(sem2_, INFINITE);
- }
- wait_mtx_.Unlock();
-}
-
-void CondVar::SignalAll() {
- wait_mtx_.Lock();
- ::ReleaseSemaphore(sem1_, waiting_, NULL);
- while(waiting_ > 0) {
- --waiting_;
- ::WaitForSingleObject(sem2_, INFINITE);
- }
- wait_mtx_.Unlock();
-}
-
-AtomicPointer::AtomicPointer(void* v) {
- Release_Store(v);
-}
-
-void InitOnce(OnceType* once, void (*initializer)()) {
- once->InitOnce(initializer);
-}
-
-void* AtomicPointer::Acquire_Load() const {
- void * p = NULL;
- InterlockedExchangePointer(&p, rep_);
- return p;
-}
-
-void AtomicPointer::Release_Store(void* v) {
- InterlockedExchangePointer(&rep_, v);
-}
-
-void* AtomicPointer::NoBarrier_Load() const {
- return rep_;
-}
-
-void AtomicPointer::NoBarrier_Store(void* v) {
- rep_ = v;
-}
-
-bool HasAcceleratedCRC32C() {
-#if defined(__x86_64__) || defined(__i386__)
- int cpu_info[4];
- __cpuid(cpu_info, 1);
- return (cpu_info[2] & (1 << 20)) != 0;
-#else
- return false;
-#endif
-}
-
-}
-}
diff --git a/src/leveldb/port/port_win.h b/src/leveldb/port/port_win.h
deleted file mode 100644
index 989c15cd91..0000000000
--- a/src/leveldb/port/port_win.h
+++ /dev/null
@@ -1,184 +0,0 @@
-// LevelDB Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// See port_example.h for documentation for the following types/functions.
-
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-// * Neither the name of the University of California, Berkeley nor the
-// names of its contributors may be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-// DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
-// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-
-#ifndef STORAGE_LEVELDB_PORT_PORT_WIN_H_
-#define STORAGE_LEVELDB_PORT_PORT_WIN_H_
-
-#ifdef _MSC_VER
-#if !(_MSC_VER >= 1900)
-#define snprintf _snprintf
-#endif
-#define close _close
-#define fread_unlocked _fread_nolock
-#ifdef _WIN64
-#define ssize_t int64_t
-#else
-#define ssize_t int32_t
-#endif
-#endif
-
-#include <string>
-#include <stdint.h>
-#ifdef SNAPPY
-#include <snappy.h>
-#endif
-
-namespace leveldb {
-namespace port {
-
-// Windows is little endian (for now :p)
-static const bool kLittleEndian = true;
-
-class CondVar;
-
-class Mutex {
- public:
- Mutex();
- ~Mutex();
-
- void Lock();
- void Unlock();
- void AssertHeld();
-
- private:
- friend class CondVar;
- // critical sections are more efficient than mutexes
- // but they are not recursive and can only be used to synchronize threads within the same process
- // we use opaque void * to avoid including windows.h in port_win.h
- void * cs_;
-
- // No copying
- Mutex(const Mutex&);
- void operator=(const Mutex&);
-};
-
-// the Win32 API offers a dependable condition variable mechanism, but only starting with
-// Windows 2008 and Vista
-// no matter what we will implement our own condition variable with a semaphore
-// implementation as described in a paper written by Andrew D. Birrell in 2003
-class CondVar {
- public:
- explicit CondVar(Mutex* mu);
- ~CondVar();
- void Wait();
- void Signal();
- void SignalAll();
- private:
- Mutex* mu_;
-
- Mutex wait_mtx_;
- long waiting_;
-
- void * sem1_;
- void * sem2_;
-
-
-};
-
-class OnceType {
-public:
-// OnceType() : init_(false) {}
- OnceType(const OnceType &once) : init_(once.init_) {}
- OnceType(bool f) : init_(f) {}
- void InitOnce(void (*initializer)()) {
- mutex_.Lock();
- if (!init_) {
- init_ = true;
- initializer();
- }
- mutex_.Unlock();
- }
-
-private:
- bool init_;
- Mutex mutex_;
-};
-
-#define LEVELDB_ONCE_INIT false
-extern void InitOnce(port::OnceType*, void (*initializer)());
-
-// Storage for a lock-free pointer
-class AtomicPointer {
- private:
- void * rep_;
- public:
- AtomicPointer() : rep_(NULL) { }
- explicit AtomicPointer(void* v);
- void* Acquire_Load() const;
-
- void Release_Store(void* v);
-
- void* NoBarrier_Load() const;
-
- void NoBarrier_Store(void* v);
-};
-
-inline bool Snappy_Compress(const char* input, size_t length,
- ::std::string* output) {
-#ifdef SNAPPY
- output->resize(snappy::MaxCompressedLength(length));
- size_t outlen;
- snappy::RawCompress(input, length, &(*output)[0], &outlen);
- output->resize(outlen);
- return true;
-#endif
-
- return false;
-}
-
-inline bool Snappy_GetUncompressedLength(const char* input, size_t length,
- size_t* result) {
-#ifdef SNAPPY
- return snappy::GetUncompressedLength(input, length, result);
-#else
- return false;
-#endif
-}
-
-inline bool Snappy_Uncompress(const char* input, size_t length,
- char* output) {
-#ifdef SNAPPY
- return snappy::RawUncompress(input, length, output);
-#else
- return false;
-#endif
-}
-
-inline bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg) {
- return false;
-}
-
-bool HasAcceleratedCRC32C();
-uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size);
-
-}
-}
-
-#endif // STORAGE_LEVELDB_PORT_PORT_WIN_H_
diff --git a/src/leveldb/port/thread_annotations.h b/src/leveldb/port/thread_annotations.h
index 9470ef587c..1547df908f 100644
--- a/src/leveldb/port/thread_annotations.h
+++ b/src/leveldb/port/thread_annotations.h
@@ -5,56 +5,104 @@
#ifndef STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
#define STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
-// Some environments provide custom macros to aid in static thread-safety
-// analysis. Provide empty definitions of such macros unless they are already
-// defined.
+// Use Clang's thread safety analysis annotations when available. In other
+// environments, the macros receive empty definitions.
+// Usage documentation: https://clang.llvm.org/docs/ThreadSafetyAnalysis.html
+
+#if !defined(THREAD_ANNOTATION_ATTRIBUTE__)
+
+#if defined(__clang__)
+
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
+#else
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
+#endif
+
+#endif // !defined(THREAD_ANNOTATION_ATTRIBUTE__)
+
+#ifndef GUARDED_BY
+#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
+#endif
+
+#ifndef PT_GUARDED_BY
+#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
+#endif
+
+#ifndef ACQUIRED_AFTER
+#define ACQUIRED_AFTER(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
+#endif
+
+#ifndef ACQUIRED_BEFORE
+#define ACQUIRED_BEFORE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
+#endif
#ifndef EXCLUSIVE_LOCKS_REQUIRED
-#define EXCLUSIVE_LOCKS_REQUIRED(...)
+#define EXCLUSIVE_LOCKS_REQUIRED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__))
#endif
#ifndef SHARED_LOCKS_REQUIRED
-#define SHARED_LOCKS_REQUIRED(...)
+#define SHARED_LOCKS_REQUIRED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__))
#endif
#ifndef LOCKS_EXCLUDED
-#define LOCKS_EXCLUDED(...)
+#define LOCKS_EXCLUDED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
#endif
#ifndef LOCK_RETURNED
-#define LOCK_RETURNED(x)
+#define LOCK_RETURNED(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
#endif
#ifndef LOCKABLE
-#define LOCKABLE
+#define LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(lockable)
#endif
#ifndef SCOPED_LOCKABLE
-#define SCOPED_LOCKABLE
+#define SCOPED_LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
#endif
#ifndef EXCLUSIVE_LOCK_FUNCTION
-#define EXCLUSIVE_LOCK_FUNCTION(...)
+#define EXCLUSIVE_LOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__))
#endif
#ifndef SHARED_LOCK_FUNCTION
-#define SHARED_LOCK_FUNCTION(...)
+#define SHARED_LOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__))
#endif
#ifndef EXCLUSIVE_TRYLOCK_FUNCTION
-#define EXCLUSIVE_TRYLOCK_FUNCTION(...)
+#define EXCLUSIVE_TRYLOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__))
#endif
#ifndef SHARED_TRYLOCK_FUNCTION
-#define SHARED_TRYLOCK_FUNCTION(...)
+#define SHARED_TRYLOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__))
#endif
#ifndef UNLOCK_FUNCTION
-#define UNLOCK_FUNCTION(...)
+#define UNLOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__))
#endif
#ifndef NO_THREAD_SAFETY_ANALYSIS
-#define NO_THREAD_SAFETY_ANALYSIS
+#define NO_THREAD_SAFETY_ANALYSIS \
+ THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
+#endif
+
+#ifndef ASSERT_EXCLUSIVE_LOCK
+#define ASSERT_EXCLUSIVE_LOCK(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__))
+#endif
+
+#ifndef ASSERT_SHARED_LOCK
+#define ASSERT_SHARED_LOCK(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__))
#endif
#endif // STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
diff --git a/src/leveldb/port/win/stdint.h b/src/leveldb/port/win/stdint.h
deleted file mode 100644
index 39edd0db13..0000000000
--- a/src/leveldb/port/win/stdint.h
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-// MSVC didn't ship with this file until the 2010 version.
-
-#ifndef STORAGE_LEVELDB_PORT_WIN_STDINT_H_
-#define STORAGE_LEVELDB_PORT_WIN_STDINT_H_
-
-#if !defined(_MSC_VER)
-#error This file should only be included when compiling with MSVC.
-#endif
-
-// Define C99 equivalent types.
-typedef signed char int8_t;
-typedef signed short int16_t;
-typedef signed int int32_t;
-typedef signed long long int64_t;
-typedef unsigned char uint8_t;
-typedef unsigned short uint16_t;
-typedef unsigned int uint32_t;
-typedef unsigned long long uint64_t;
-
-#endif // STORAGE_LEVELDB_PORT_WIN_STDINT_H_
diff --git a/src/leveldb/table/block.cc b/src/leveldb/table/block.cc
index 43e402c9c0..2fe89eaa45 100644
--- a/src/leveldb/table/block.cc
+++ b/src/leveldb/table/block.cc
@@ -6,8 +6,10 @@
#include "table/block.h"
-#include <vector>
#include <algorithm>
+#include <cstdint>
+#include <vector>
+
#include "leveldb/comparator.h"
#include "table/format.h"
#include "util/coding.h"
@@ -27,7 +29,7 @@ Block::Block(const BlockContents& contents)
if (size_ < sizeof(uint32_t)) {
size_ = 0; // Error marker
} else {
- size_t max_restarts_allowed = (size_-sizeof(uint32_t)) / sizeof(uint32_t);
+ size_t max_restarts_allowed = (size_ - sizeof(uint32_t)) / sizeof(uint32_t);
if (NumRestarts() > max_restarts_allowed) {
// The size is too small for NumRestarts()
size_ = 0;
@@ -48,27 +50,26 @@ Block::~Block() {
// and the length of the value in "*shared", "*non_shared", and
// "*value_length", respectively. Will not dereference past "limit".
//
-// If any errors are detected, returns NULL. Otherwise, returns a
+// If any errors are detected, returns nullptr. Otherwise, returns a
// pointer to the key delta (just past the three decoded values).
static inline const char* DecodeEntry(const char* p, const char* limit,
- uint32_t* shared,
- uint32_t* non_shared,
+ uint32_t* shared, uint32_t* non_shared,
uint32_t* value_length) {
- if (limit - p < 3) return NULL;
- *shared = reinterpret_cast<const unsigned char*>(p)[0];
- *non_shared = reinterpret_cast<const unsigned char*>(p)[1];
- *value_length = reinterpret_cast<const unsigned char*>(p)[2];
+ if (limit - p < 3) return nullptr;
+ *shared = reinterpret_cast<const uint8_t*>(p)[0];
+ *non_shared = reinterpret_cast<const uint8_t*>(p)[1];
+ *value_length = reinterpret_cast<const uint8_t*>(p)[2];
if ((*shared | *non_shared | *value_length) < 128) {
// Fast path: all three values are encoded in one byte each
p += 3;
} else {
- if ((p = GetVarint32Ptr(p, limit, shared)) == NULL) return NULL;
- if ((p = GetVarint32Ptr(p, limit, non_shared)) == NULL) return NULL;
- if ((p = GetVarint32Ptr(p, limit, value_length)) == NULL) return NULL;
+ if ((p = GetVarint32Ptr(p, limit, shared)) == nullptr) return nullptr;
+ if ((p = GetVarint32Ptr(p, limit, non_shared)) == nullptr) return nullptr;
+ if ((p = GetVarint32Ptr(p, limit, value_length)) == nullptr) return nullptr;
}
if (static_cast<uint32_t>(limit - p) < (*non_shared + *value_length)) {
- return NULL;
+ return nullptr;
}
return p;
}
@@ -76,9 +77,9 @@ static inline const char* DecodeEntry(const char* p, const char* limit,
class Block::Iter : public Iterator {
private:
const Comparator* const comparator_;
- const char* const data_; // underlying block contents
- uint32_t const restarts_; // Offset of restart array (list of fixed32)
- uint32_t const num_restarts_; // Number of uint32_t entries in restart array
+ const char* const data_; // underlying block contents
+ uint32_t const restarts_; // Offset of restart array (list of fixed32)
+ uint32_t const num_restarts_; // Number of uint32_t entries in restart array
// current_ is offset in data_ of current entry. >= restarts_ if !Valid
uint32_t current_;
@@ -112,9 +113,7 @@ class Block::Iter : public Iterator {
}
public:
- Iter(const Comparator* comparator,
- const char* data,
- uint32_t restarts,
+ Iter(const Comparator* comparator, const char* data, uint32_t restarts,
uint32_t num_restarts)
: comparator_(comparator),
data_(data),
@@ -125,23 +124,23 @@ class Block::Iter : public Iterator {
assert(num_restarts_ > 0);
}
- virtual bool Valid() const { return current_ < restarts_; }
- virtual Status status() const { return status_; }
- virtual Slice key() const {
+ bool Valid() const override { return current_ < restarts_; }
+ Status status() const override { return status_; }
+ Slice key() const override {
assert(Valid());
return key_;
}
- virtual Slice value() const {
+ Slice value() const override {
assert(Valid());
return value_;
}
- virtual void Next() {
+ void Next() override {
assert(Valid());
ParseNextKey();
}
- virtual void Prev() {
+ void Prev() override {
assert(Valid());
// Scan backwards to a restart point before current_
@@ -162,7 +161,7 @@ class Block::Iter : public Iterator {
} while (ParseNextKey() && NextEntryOffset() < original);
}
- virtual void Seek(const Slice& target) {
+ void Seek(const Slice& target) override {
// Binary search in restart array to find the last restart point
// with a key < target
uint32_t left = 0;
@@ -171,10 +170,10 @@ class Block::Iter : public Iterator {
uint32_t mid = (left + right + 1) / 2;
uint32_t region_offset = GetRestartPoint(mid);
uint32_t shared, non_shared, value_length;
- const char* key_ptr = DecodeEntry(data_ + region_offset,
- data_ + restarts_,
- &shared, &non_shared, &value_length);
- if (key_ptr == NULL || (shared != 0)) {
+ const char* key_ptr =
+ DecodeEntry(data_ + region_offset, data_ + restarts_, &shared,
+ &non_shared, &value_length);
+ if (key_ptr == nullptr || (shared != 0)) {
CorruptionError();
return;
}
@@ -202,12 +201,12 @@ class Block::Iter : public Iterator {
}
}
- virtual void SeekToFirst() {
+ void SeekToFirst() override {
SeekToRestartPoint(0);
ParseNextKey();
}
- virtual void SeekToLast() {
+ void SeekToLast() override {
SeekToRestartPoint(num_restarts_ - 1);
while (ParseNextKey() && NextEntryOffset() < restarts_) {
// Keep skipping
@@ -237,7 +236,7 @@ class Block::Iter : public Iterator {
// Decode next entry
uint32_t shared, non_shared, value_length;
p = DecodeEntry(p, limit, &shared, &non_shared, &value_length);
- if (p == NULL || key_.size() < shared) {
+ if (p == nullptr || key_.size() < shared) {
CorruptionError();
return false;
} else {
@@ -253,7 +252,7 @@ class Block::Iter : public Iterator {
}
};
-Iterator* Block::NewIterator(const Comparator* cmp) {
+Iterator* Block::NewIterator(const Comparator* comparator) {
if (size_ < sizeof(uint32_t)) {
return NewErrorIterator(Status::Corruption("bad block contents"));
}
@@ -261,7 +260,7 @@ Iterator* Block::NewIterator(const Comparator* cmp) {
if (num_restarts == 0) {
return NewEmptyIterator();
} else {
- return new Iter(cmp, data_, restart_offset_, num_restarts);
+ return new Iter(comparator, data_, restart_offset_, num_restarts);
}
}
diff --git a/src/leveldb/table/block.h b/src/leveldb/table/block.h
index 2493eb9f9f..c8f1f7b436 100644
--- a/src/leveldb/table/block.h
+++ b/src/leveldb/table/block.h
@@ -7,6 +7,7 @@
#include <stddef.h>
#include <stdint.h>
+
#include "leveldb/iterator.h"
namespace leveldb {
@@ -19,24 +20,23 @@ class Block {
// Initialize the block with the specified contents.
explicit Block(const BlockContents& contents);
+ Block(const Block&) = delete;
+ Block& operator=(const Block&) = delete;
+
~Block();
size_t size() const { return size_; }
Iterator* NewIterator(const Comparator* comparator);
private:
+ class Iter;
+
uint32_t NumRestarts() const;
const char* data_;
size_t size_;
- uint32_t restart_offset_; // Offset in data_ of restart array
- bool owned_; // Block owns data_[]
-
- // No copying allowed
- Block(const Block&);
- void operator=(const Block&);
-
- class Iter;
+ uint32_t restart_offset_; // Offset in data_ of restart array
+ bool owned_; // Block owns data_[]
};
} // namespace leveldb
diff --git a/src/leveldb/table/block_builder.cc b/src/leveldb/table/block_builder.cc
index db660cd07c..919cff5c93 100644
--- a/src/leveldb/table/block_builder.cc
+++ b/src/leveldb/table/block_builder.cc
@@ -28,36 +28,35 @@
#include "table/block_builder.h"
-#include <algorithm>
#include <assert.h>
+
+#include <algorithm>
+
#include "leveldb/comparator.h"
-#include "leveldb/table_builder.h"
+#include "leveldb/options.h"
#include "util/coding.h"
namespace leveldb {
BlockBuilder::BlockBuilder(const Options* options)
- : options_(options),
- restarts_(),
- counter_(0),
- finished_(false) {
+ : options_(options), restarts_(), counter_(0), finished_(false) {
assert(options->block_restart_interval >= 1);
- restarts_.push_back(0); // First restart point is at offset 0
+ restarts_.push_back(0); // First restart point is at offset 0
}
void BlockBuilder::Reset() {
buffer_.clear();
restarts_.clear();
- restarts_.push_back(0); // First restart point is at offset 0
+ restarts_.push_back(0); // First restart point is at offset 0
counter_ = 0;
finished_ = false;
last_key_.clear();
}
size_t BlockBuilder::CurrentSizeEstimate() const {
- return (buffer_.size() + // Raw data buffer
- restarts_.size() * sizeof(uint32_t) + // Restart array
- sizeof(uint32_t)); // Restart array length
+ return (buffer_.size() + // Raw data buffer
+ restarts_.size() * sizeof(uint32_t) + // Restart array
+ sizeof(uint32_t)); // Restart array length
}
Slice BlockBuilder::Finish() {
@@ -74,7 +73,7 @@ void BlockBuilder::Add(const Slice& key, const Slice& value) {
Slice last_key_piece(last_key_);
assert(!finished_);
assert(counter_ <= options_->block_restart_interval);
- assert(buffer_.empty() // No values yet?
+ assert(buffer_.empty() // No values yet?
|| options_->comparator->Compare(key, last_key_piece) > 0);
size_t shared = 0;
if (counter_ < options_->block_restart_interval) {
diff --git a/src/leveldb/table/block_builder.h b/src/leveldb/table/block_builder.h
index 4fbcb33972..f91f5e6d47 100644
--- a/src/leveldb/table/block_builder.h
+++ b/src/leveldb/table/block_builder.h
@@ -5,9 +5,10 @@
#ifndef STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_
#define STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_
+#include <stdint.h>
+
#include <vector>
-#include <stdint.h>
#include "leveldb/slice.h"
namespace leveldb {
@@ -18,6 +19,9 @@ class BlockBuilder {
public:
explicit BlockBuilder(const Options* options);
+ BlockBuilder(const BlockBuilder&) = delete;
+ BlockBuilder& operator=(const BlockBuilder&) = delete;
+
// Reset the contents as if the BlockBuilder was just constructed.
void Reset();
@@ -35,21 +39,15 @@ class BlockBuilder {
size_t CurrentSizeEstimate() const;
// Return true iff no entries have been added since the last Reset()
- bool empty() const {
- return buffer_.empty();
- }
+ bool empty() const { return buffer_.empty(); }
private:
- const Options* options_;
- std::string buffer_; // Destination buffer
- std::vector<uint32_t> restarts_; // Restart points
- int counter_; // Number of entries emitted since restart
- bool finished_; // Has Finish() been called?
- std::string last_key_;
-
- // No copying allowed
- BlockBuilder(const BlockBuilder&);
- void operator=(const BlockBuilder&);
+ const Options* options_;
+ std::string buffer_; // Destination buffer
+ std::vector<uint32_t> restarts_; // Restart points
+ int counter_; // Number of entries emitted since restart
+ bool finished_; // Has Finish() been called?
+ std::string last_key_;
};
} // namespace leveldb
diff --git a/src/leveldb/table/filter_block.cc b/src/leveldb/table/filter_block.cc
index 1ed5134170..09ec0094bd 100644
--- a/src/leveldb/table/filter_block.cc
+++ b/src/leveldb/table/filter_block.cc
@@ -16,8 +16,7 @@ static const size_t kFilterBaseLg = 11;
static const size_t kFilterBase = 1 << kFilterBaseLg;
FilterBlockBuilder::FilterBlockBuilder(const FilterPolicy* policy)
- : policy_(policy) {
-}
+ : policy_(policy) {}
void FilterBlockBuilder::StartBlock(uint64_t block_offset) {
uint64_t filter_index = (block_offset / kFilterBase);
@@ -62,7 +61,7 @@ void FilterBlockBuilder::GenerateFilter() {
tmp_keys_.resize(num_keys);
for (size_t i = 0; i < num_keys; i++) {
const char* base = keys_.data() + start_[i];
- size_t length = start_[i+1] - start_[i];
+ size_t length = start_[i + 1] - start_[i];
tmp_keys_[i] = Slice(base, length);
}
@@ -77,14 +76,10 @@ void FilterBlockBuilder::GenerateFilter() {
FilterBlockReader::FilterBlockReader(const FilterPolicy* policy,
const Slice& contents)
- : policy_(policy),
- data_(NULL),
- offset_(NULL),
- num_(0),
- base_lg_(0) {
+ : policy_(policy), data_(nullptr), offset_(nullptr), num_(0), base_lg_(0) {
size_t n = contents.size();
if (n < 5) return; // 1 byte for base_lg_ and 4 for start of offset array
- base_lg_ = contents[n-1];
+ base_lg_ = contents[n - 1];
uint32_t last_word = DecodeFixed32(contents.data() + n - 5);
if (last_word > n - 5) return;
data_ = contents.data();
@@ -95,8 +90,8 @@ FilterBlockReader::FilterBlockReader(const FilterPolicy* policy,
bool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) {
uint64_t index = block_offset >> base_lg_;
if (index < num_) {
- uint32_t start = DecodeFixed32(offset_ + index*4);
- uint32_t limit = DecodeFixed32(offset_ + index*4 + 4);
+ uint32_t start = DecodeFixed32(offset_ + index * 4);
+ uint32_t limit = DecodeFixed32(offset_ + index * 4 + 4);
if (start <= limit && limit <= static_cast<size_t>(offset_ - data_)) {
Slice filter = Slice(data_ + start, limit - start);
return policy_->KeyMayMatch(key, filter);
@@ -108,4 +103,4 @@ bool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) {
return true; // Errors are treated as potential matches
}
-}
+} // namespace leveldb
diff --git a/src/leveldb/table/filter_block.h b/src/leveldb/table/filter_block.h
index c67d010bd1..73b5399249 100644
--- a/src/leveldb/table/filter_block.h
+++ b/src/leveldb/table/filter_block.h
@@ -11,8 +11,10 @@
#include <stddef.h>
#include <stdint.h>
+
#include <string>
#include <vector>
+
#include "leveldb/slice.h"
#include "util/hash.h"
@@ -30,6 +32,9 @@ class FilterBlockBuilder {
public:
explicit FilterBlockBuilder(const FilterPolicy*);
+ FilterBlockBuilder(const FilterBlockBuilder&) = delete;
+ FilterBlockBuilder& operator=(const FilterBlockBuilder&) = delete;
+
void StartBlock(uint64_t block_offset);
void AddKey(const Slice& key);
Slice Finish();
@@ -38,20 +43,16 @@ class FilterBlockBuilder {
void GenerateFilter();
const FilterPolicy* policy_;
- std::string keys_; // Flattened key contents
- std::vector<size_t> start_; // Starting index in keys_ of each key
- std::string result_; // Filter data computed so far
- std::vector<Slice> tmp_keys_; // policy_->CreateFilter() argument
+ std::string keys_; // Flattened key contents
+ std::vector<size_t> start_; // Starting index in keys_ of each key
+ std::string result_; // Filter data computed so far
+ std::vector<Slice> tmp_keys_; // policy_->CreateFilter() argument
std::vector<uint32_t> filter_offsets_;
-
- // No copying allowed
- FilterBlockBuilder(const FilterBlockBuilder&);
- void operator=(const FilterBlockBuilder&);
};
class FilterBlockReader {
public:
- // REQUIRES: "contents" and *policy must stay live while *this is live.
+ // REQUIRES: "contents" and *policy must stay live while *this is live.
FilterBlockReader(const FilterPolicy* policy, const Slice& contents);
bool KeyMayMatch(uint64_t block_offset, const Slice& key);
@@ -63,6 +64,6 @@ class FilterBlockReader {
size_t base_lg_; // Encoding parameter (see kFilterBaseLg in .cc file)
};
-}
+} // namespace leveldb
#endif // STORAGE_LEVELDB_TABLE_FILTER_BLOCK_H_
diff --git a/src/leveldb/table/filter_block_test.cc b/src/leveldb/table/filter_block_test.cc
index 8c4a4741f2..8b33bbdd18 100644
--- a/src/leveldb/table/filter_block_test.cc
+++ b/src/leveldb/table/filter_block_test.cc
@@ -16,18 +16,16 @@ namespace leveldb {
// For testing: emit an array with one hash value per key
class TestHashFilter : public FilterPolicy {
public:
- virtual const char* Name() const {
- return "TestHashFilter";
- }
+ const char* Name() const override { return "TestHashFilter"; }
- virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
+ void CreateFilter(const Slice* keys, int n, std::string* dst) const override {
for (int i = 0; i < n; i++) {
uint32_t h = Hash(keys[i].data(), keys[i].size(), 1);
PutFixed32(dst, h);
}
}
- virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const {
+ bool KeyMayMatch(const Slice& key, const Slice& filter) const override {
uint32_t h = Hash(key.data(), key.size(), 1);
for (size_t i = 0; i + 4 <= filter.size(); i += 4) {
if (h == DecodeFixed32(filter.data() + i)) {
@@ -69,8 +67,8 @@ TEST(FilterBlockTest, SingleChunk) {
ASSERT_TRUE(reader.KeyMayMatch(100, "box"));
ASSERT_TRUE(reader.KeyMayMatch(100, "hello"));
ASSERT_TRUE(reader.KeyMayMatch(100, "foo"));
- ASSERT_TRUE(! reader.KeyMayMatch(100, "missing"));
- ASSERT_TRUE(! reader.KeyMayMatch(100, "other"));
+ ASSERT_TRUE(!reader.KeyMayMatch(100, "missing"));
+ ASSERT_TRUE(!reader.KeyMayMatch(100, "other"));
}
TEST(FilterBlockTest, MultiChunk) {
@@ -99,30 +97,28 @@ TEST(FilterBlockTest, MultiChunk) {
// Check first filter
ASSERT_TRUE(reader.KeyMayMatch(0, "foo"));
ASSERT_TRUE(reader.KeyMayMatch(2000, "bar"));
- ASSERT_TRUE(! reader.KeyMayMatch(0, "box"));
- ASSERT_TRUE(! reader.KeyMayMatch(0, "hello"));
+ ASSERT_TRUE(!reader.KeyMayMatch(0, "box"));
+ ASSERT_TRUE(!reader.KeyMayMatch(0, "hello"));
// Check second filter
ASSERT_TRUE(reader.KeyMayMatch(3100, "box"));
- ASSERT_TRUE(! reader.KeyMayMatch(3100, "foo"));
- ASSERT_TRUE(! reader.KeyMayMatch(3100, "bar"));
- ASSERT_TRUE(! reader.KeyMayMatch(3100, "hello"));
+ ASSERT_TRUE(!reader.KeyMayMatch(3100, "foo"));
+ ASSERT_TRUE(!reader.KeyMayMatch(3100, "bar"));
+ ASSERT_TRUE(!reader.KeyMayMatch(3100, "hello"));
// Check third filter (empty)
- ASSERT_TRUE(! reader.KeyMayMatch(4100, "foo"));
- ASSERT_TRUE(! reader.KeyMayMatch(4100, "bar"));
- ASSERT_TRUE(! reader.KeyMayMatch(4100, "box"));
- ASSERT_TRUE(! reader.KeyMayMatch(4100, "hello"));
+ ASSERT_TRUE(!reader.KeyMayMatch(4100, "foo"));
+ ASSERT_TRUE(!reader.KeyMayMatch(4100, "bar"));
+ ASSERT_TRUE(!reader.KeyMayMatch(4100, "box"));
+ ASSERT_TRUE(!reader.KeyMayMatch(4100, "hello"));
// Check last filter
ASSERT_TRUE(reader.KeyMayMatch(9000, "box"));
ASSERT_TRUE(reader.KeyMayMatch(9000, "hello"));
- ASSERT_TRUE(! reader.KeyMayMatch(9000, "foo"));
- ASSERT_TRUE(! reader.KeyMayMatch(9000, "bar"));
+ ASSERT_TRUE(!reader.KeyMayMatch(9000, "foo"));
+ ASSERT_TRUE(!reader.KeyMayMatch(9000, "bar"));
}
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/table/format.cc b/src/leveldb/table/format.cc
index 285e1c0de3..a3d67de2e4 100644
--- a/src/leveldb/table/format.cc
+++ b/src/leveldb/table/format.cc
@@ -21,8 +21,7 @@ void BlockHandle::EncodeTo(std::string* dst) const {
}
Status BlockHandle::DecodeFrom(Slice* input) {
- if (GetVarint64(input, &offset_) &&
- GetVarint64(input, &size_)) {
+ if (GetVarint64(input, &offset_) && GetVarint64(input, &size_)) {
return Status::OK();
} else {
return Status::Corruption("bad block handle");
@@ -62,10 +61,8 @@ Status Footer::DecodeFrom(Slice* input) {
return result;
}
-Status ReadBlock(RandomAccessFile* file,
- const ReadOptions& options,
- const BlockHandle& handle,
- BlockContents* result) {
+Status ReadBlock(RandomAccessFile* file, const ReadOptions& options,
+ const BlockHandle& handle, BlockContents* result) {
result->data = Slice();
result->cachable = false;
result->heap_allocated = false;
@@ -86,7 +83,7 @@ Status ReadBlock(RandomAccessFile* file,
}
// Check the crc of the type and the block contents
- const char* data = contents.data(); // Pointer to where Read put the data
+ const char* data = contents.data(); // Pointer to where Read put the data
if (options.verify_checksums) {
const uint32_t crc = crc32c::Unmask(DecodeFixed32(data + n + 1));
const uint32_t actual = crc32c::Value(data, n + 1);
diff --git a/src/leveldb/table/format.h b/src/leveldb/table/format.h
index 6c0b80c017..e49dfdc047 100644
--- a/src/leveldb/table/format.h
+++ b/src/leveldb/table/format.h
@@ -5,8 +5,10 @@
#ifndef STORAGE_LEVELDB_TABLE_FORMAT_H_
#define STORAGE_LEVELDB_TABLE_FORMAT_H_
-#include <string>
#include <stdint.h>
+
+#include <string>
+
#include "leveldb/slice.h"
#include "leveldb/status.h"
#include "leveldb/table_builder.h"
@@ -21,6 +23,9 @@ struct ReadOptions;
// block or a meta block.
class BlockHandle {
public:
+ // Maximum encoding length of a BlockHandle
+ enum { kMaxEncodedLength = 10 + 10 };
+
BlockHandle();
// The offset of the block in the file.
@@ -34,9 +39,6 @@ class BlockHandle {
void EncodeTo(std::string* dst) const;
Status DecodeFrom(Slice* input);
- // Maximum encoding length of a BlockHandle
- enum { kMaxEncodedLength = 10 + 10 };
-
private:
uint64_t offset_;
uint64_t size_;
@@ -46,30 +48,24 @@ class BlockHandle {
// end of every table file.
class Footer {
public:
- Footer() { }
+ // Encoded length of a Footer. Note that the serialization of a
+ // Footer will always occupy exactly this many bytes. It consists
+ // of two block handles and a magic number.
+ enum { kEncodedLength = 2 * BlockHandle::kMaxEncodedLength + 8 };
+
+ Footer() = default;
// The block handle for the metaindex block of the table
const BlockHandle& metaindex_handle() const { return metaindex_handle_; }
void set_metaindex_handle(const BlockHandle& h) { metaindex_handle_ = h; }
// The block handle for the index block of the table
- const BlockHandle& index_handle() const {
- return index_handle_;
- }
- void set_index_handle(const BlockHandle& h) {
- index_handle_ = h;
- }
+ const BlockHandle& index_handle() const { return index_handle_; }
+ void set_index_handle(const BlockHandle& h) { index_handle_ = h; }
void EncodeTo(std::string* dst) const;
Status DecodeFrom(Slice* input);
- // Encoded length of a Footer. Note that the serialization of a
- // Footer will always occupy exactly this many bytes. It consists
- // of two block handles and a magic number.
- enum {
- kEncodedLength = 2*BlockHandle::kMaxEncodedLength + 8
- };
-
private:
BlockHandle metaindex_handle_;
BlockHandle index_handle_;
@@ -91,17 +87,13 @@ struct BlockContents {
// Read the block identified by "handle" from "file". On failure
// return non-OK. On success fill *result and return OK.
-extern Status ReadBlock(RandomAccessFile* file,
- const ReadOptions& options,
- const BlockHandle& handle,
- BlockContents* result);
+Status ReadBlock(RandomAccessFile* file, const ReadOptions& options,
+ const BlockHandle& handle, BlockContents* result);
// Implementation details follow. Clients should ignore,
inline BlockHandle::BlockHandle()
- : offset_(~static_cast<uint64_t>(0)),
- size_(~static_cast<uint64_t>(0)) {
-}
+ : offset_(~static_cast<uint64_t>(0)), size_(~static_cast<uint64_t>(0)) {}
} // namespace leveldb
diff --git a/src/leveldb/table/iterator.cc b/src/leveldb/table/iterator.cc
index 3d1c87fdec..dfef083d4d 100644
--- a/src/leveldb/table/iterator.cc
+++ b/src/leveldb/table/iterator.cc
@@ -7,58 +7,67 @@
namespace leveldb {
Iterator::Iterator() {
- cleanup_.function = NULL;
- cleanup_.next = NULL;
+ cleanup_head_.function = nullptr;
+ cleanup_head_.next = nullptr;
}
Iterator::~Iterator() {
- if (cleanup_.function != NULL) {
- (*cleanup_.function)(cleanup_.arg1, cleanup_.arg2);
- for (Cleanup* c = cleanup_.next; c != NULL; ) {
- (*c->function)(c->arg1, c->arg2);
- Cleanup* next = c->next;
- delete c;
- c = next;
+ if (!cleanup_head_.IsEmpty()) {
+ cleanup_head_.Run();
+ for (CleanupNode* node = cleanup_head_.next; node != nullptr;) {
+ node->Run();
+ CleanupNode* next_node = node->next;
+ delete node;
+ node = next_node;
}
}
}
void Iterator::RegisterCleanup(CleanupFunction func, void* arg1, void* arg2) {
- assert(func != NULL);
- Cleanup* c;
- if (cleanup_.function == NULL) {
- c = &cleanup_;
+ assert(func != nullptr);
+ CleanupNode* node;
+ if (cleanup_head_.IsEmpty()) {
+ node = &cleanup_head_;
} else {
- c = new Cleanup;
- c->next = cleanup_.next;
- cleanup_.next = c;
+ node = new CleanupNode();
+ node->next = cleanup_head_.next;
+ cleanup_head_.next = node;
}
- c->function = func;
- c->arg1 = arg1;
- c->arg2 = arg2;
+ node->function = func;
+ node->arg1 = arg1;
+ node->arg2 = arg2;
}
namespace {
+
class EmptyIterator : public Iterator {
public:
- EmptyIterator(const Status& s) : status_(s) { }
- virtual bool Valid() const { return false; }
- virtual void Seek(const Slice& target) { }
- virtual void SeekToFirst() { }
- virtual void SeekToLast() { }
- virtual void Next() { assert(false); }
- virtual void Prev() { assert(false); }
- Slice key() const { assert(false); return Slice(); }
- Slice value() const { assert(false); return Slice(); }
- virtual Status status() const { return status_; }
+ EmptyIterator(const Status& s) : status_(s) {}
+ ~EmptyIterator() override = default;
+
+ bool Valid() const override { return false; }
+ void Seek(const Slice& target) override {}
+ void SeekToFirst() override {}
+ void SeekToLast() override {}
+ void Next() override { assert(false); }
+ void Prev() override { assert(false); }
+ Slice key() const override {
+ assert(false);
+ return Slice();
+ }
+ Slice value() const override {
+ assert(false);
+ return Slice();
+ }
+ Status status() const override { return status_; }
+
private:
Status status_;
};
-} // namespace
-Iterator* NewEmptyIterator() {
- return new EmptyIterator(Status::OK());
-}
+} // anonymous namespace
+
+Iterator* NewEmptyIterator() { return new EmptyIterator(Status::OK()); }
Iterator* NewErrorIterator(const Status& status) {
return new EmptyIterator(status);
diff --git a/src/leveldb/table/iterator_wrapper.h b/src/leveldb/table/iterator_wrapper.h
index f410c3fabe..c230572529 100644
--- a/src/leveldb/table/iterator_wrapper.h
+++ b/src/leveldb/table/iterator_wrapper.h
@@ -16,10 +16,8 @@ namespace leveldb {
// cache locality.
class IteratorWrapper {
public:
- IteratorWrapper(): iter_(NULL), valid_(false) { }
- explicit IteratorWrapper(Iterator* iter): iter_(NULL) {
- Set(iter);
- }
+ IteratorWrapper() : iter_(nullptr), valid_(false) {}
+ explicit IteratorWrapper(Iterator* iter) : iter_(nullptr) { Set(iter); }
~IteratorWrapper() { delete iter_; }
Iterator* iter() const { return iter_; }
@@ -28,25 +26,53 @@ class IteratorWrapper {
void Set(Iterator* iter) {
delete iter_;
iter_ = iter;
- if (iter_ == NULL) {
+ if (iter_ == nullptr) {
valid_ = false;
} else {
Update();
}
}
-
// Iterator interface methods
- bool Valid() const { return valid_; }
- Slice key() const { assert(Valid()); return key_; }
- Slice value() const { assert(Valid()); return iter_->value(); }
- // Methods below require iter() != NULL
- Status status() const { assert(iter_); return iter_->status(); }
- void Next() { assert(iter_); iter_->Next(); Update(); }
- void Prev() { assert(iter_); iter_->Prev(); Update(); }
- void Seek(const Slice& k) { assert(iter_); iter_->Seek(k); Update(); }
- void SeekToFirst() { assert(iter_); iter_->SeekToFirst(); Update(); }
- void SeekToLast() { assert(iter_); iter_->SeekToLast(); Update(); }
+ bool Valid() const { return valid_; }
+ Slice key() const {
+ assert(Valid());
+ return key_;
+ }
+ Slice value() const {
+ assert(Valid());
+ return iter_->value();
+ }
+ // Methods below require iter() != nullptr
+ Status status() const {
+ assert(iter_);
+ return iter_->status();
+ }
+ void Next() {
+ assert(iter_);
+ iter_->Next();
+ Update();
+ }
+ void Prev() {
+ assert(iter_);
+ iter_->Prev();
+ Update();
+ }
+ void Seek(const Slice& k) {
+ assert(iter_);
+ iter_->Seek(k);
+ Update();
+ }
+ void SeekToFirst() {
+ assert(iter_);
+ iter_->SeekToFirst();
+ Update();
+ }
+ void SeekToLast() {
+ assert(iter_);
+ iter_->SeekToLast();
+ Update();
+ }
private:
void Update() {
diff --git a/src/leveldb/table/merger.cc b/src/leveldb/table/merger.cc
index 2dde4dc21f..76441b1cc2 100644
--- a/src/leveldb/table/merger.cc
+++ b/src/leveldb/table/merger.cc
@@ -17,22 +17,18 @@ class MergingIterator : public Iterator {
: comparator_(comparator),
children_(new IteratorWrapper[n]),
n_(n),
- current_(NULL),
+ current_(nullptr),
direction_(kForward) {
for (int i = 0; i < n; i++) {
children_[i].Set(children[i]);
}
}
- virtual ~MergingIterator() {
- delete[] children_;
- }
+ ~MergingIterator() override { delete[] children_; }
- virtual bool Valid() const {
- return (current_ != NULL);
- }
+ bool Valid() const override { return (current_ != nullptr); }
- virtual void SeekToFirst() {
+ void SeekToFirst() override {
for (int i = 0; i < n_; i++) {
children_[i].SeekToFirst();
}
@@ -40,7 +36,7 @@ class MergingIterator : public Iterator {
direction_ = kForward;
}
- virtual void SeekToLast() {
+ void SeekToLast() override {
for (int i = 0; i < n_; i++) {
children_[i].SeekToLast();
}
@@ -48,7 +44,7 @@ class MergingIterator : public Iterator {
direction_ = kReverse;
}
- virtual void Seek(const Slice& target) {
+ void Seek(const Slice& target) override {
for (int i = 0; i < n_; i++) {
children_[i].Seek(target);
}
@@ -56,7 +52,7 @@ class MergingIterator : public Iterator {
direction_ = kForward;
}
- virtual void Next() {
+ void Next() override {
assert(Valid());
// Ensure that all children are positioned after key().
@@ -82,7 +78,7 @@ class MergingIterator : public Iterator {
FindSmallest();
}
- virtual void Prev() {
+ void Prev() override {
assert(Valid());
// Ensure that all children are positioned before key().
@@ -111,17 +107,17 @@ class MergingIterator : public Iterator {
FindLargest();
}
- virtual Slice key() const {
+ Slice key() const override {
assert(Valid());
return current_->key();
}
- virtual Slice value() const {
+ Slice value() const override {
assert(Valid());
return current_->value();
}
- virtual Status status() const {
+ Status status() const override {
Status status;
for (int i = 0; i < n_; i++) {
status = children_[i].status();
@@ -133,6 +129,9 @@ class MergingIterator : public Iterator {
}
private:
+ // Which direction is the iterator moving?
+ enum Direction { kForward, kReverse };
+
void FindSmallest();
void FindLargest();
@@ -143,21 +142,15 @@ class MergingIterator : public Iterator {
IteratorWrapper* children_;
int n_;
IteratorWrapper* current_;
-
- // Which direction is the iterator moving?
- enum Direction {
- kForward,
- kReverse
- };
Direction direction_;
};
void MergingIterator::FindSmallest() {
- IteratorWrapper* smallest = NULL;
+ IteratorWrapper* smallest = nullptr;
for (int i = 0; i < n_; i++) {
IteratorWrapper* child = &children_[i];
if (child->Valid()) {
- if (smallest == NULL) {
+ if (smallest == nullptr) {
smallest = child;
} else if (comparator_->Compare(child->key(), smallest->key()) < 0) {
smallest = child;
@@ -168,11 +161,11 @@ void MergingIterator::FindSmallest() {
}
void MergingIterator::FindLargest() {
- IteratorWrapper* largest = NULL;
- for (int i = n_-1; i >= 0; i--) {
+ IteratorWrapper* largest = nullptr;
+ for (int i = n_ - 1; i >= 0; i--) {
IteratorWrapper* child = &children_[i];
if (child->Valid()) {
- if (largest == NULL) {
+ if (largest == nullptr) {
largest = child;
} else if (comparator_->Compare(child->key(), largest->key()) > 0) {
largest = child;
@@ -183,14 +176,15 @@ void MergingIterator::FindLargest() {
}
} // namespace
-Iterator* NewMergingIterator(const Comparator* cmp, Iterator** list, int n) {
+Iterator* NewMergingIterator(const Comparator* comparator, Iterator** children,
+ int n) {
assert(n >= 0);
if (n == 0) {
return NewEmptyIterator();
} else if (n == 1) {
- return list[0];
+ return children[0];
} else {
- return new MergingIterator(cmp, list, n);
+ return new MergingIterator(comparator, children, n);
}
}
diff --git a/src/leveldb/table/merger.h b/src/leveldb/table/merger.h
index 91ddd80faa..41cedc5254 100644
--- a/src/leveldb/table/merger.h
+++ b/src/leveldb/table/merger.h
@@ -18,8 +18,8 @@ class Iterator;
// key is present in K child iterators, it will be yielded K times.
//
// REQUIRES: n >= 0
-extern Iterator* NewMergingIterator(
- const Comparator* comparator, Iterator** children, int n);
+Iterator* NewMergingIterator(const Comparator* comparator, Iterator** children,
+ int n);
} // namespace leveldb
diff --git a/src/leveldb/table/table.cc b/src/leveldb/table/table.cc
index decf8082cc..b07bc88c7e 100644
--- a/src/leveldb/table/table.cc
+++ b/src/leveldb/table/table.cc
@@ -20,7 +20,7 @@ namespace leveldb {
struct Table::Rep {
~Rep() {
delete filter;
- delete [] filter_data;
+ delete[] filter_data;
delete index_block;
}
@@ -35,11 +35,9 @@ struct Table::Rep {
Block* index_block;
};
-Status Table::Open(const Options& options,
- RandomAccessFile* file,
- uint64_t size,
- Table** table) {
- *table = NULL;
+Status Table::Open(const Options& options, RandomAccessFile* file,
+ uint64_t size, Table** table) {
+ *table = nullptr;
if (size < Footer::kEncodedLength) {
return Status::Corruption("file is too short to be an sstable");
}
@@ -55,41 +53,36 @@ Status Table::Open(const Options& options,
if (!s.ok()) return s;
// Read the index block
- BlockContents contents;
- Block* index_block = NULL;
+ BlockContents index_block_contents;
if (s.ok()) {
ReadOptions opt;
if (options.paranoid_checks) {
opt.verify_checksums = true;
}
- s = ReadBlock(file, opt, footer.index_handle(), &contents);
- if (s.ok()) {
- index_block = new Block(contents);
- }
+ s = ReadBlock(file, opt, footer.index_handle(), &index_block_contents);
}
if (s.ok()) {
// We've successfully read the footer and the index block: we're
// ready to serve requests.
+ Block* index_block = new Block(index_block_contents);
Rep* rep = new Table::Rep;
rep->options = options;
rep->file = file;
rep->metaindex_handle = footer.metaindex_handle();
rep->index_block = index_block;
rep->cache_id = (options.block_cache ? options.block_cache->NewId() : 0);
- rep->filter_data = NULL;
- rep->filter = NULL;
+ rep->filter_data = nullptr;
+ rep->filter = nullptr;
*table = new Table(rep);
(*table)->ReadMeta(footer);
- } else {
- delete index_block;
}
return s;
}
void Table::ReadMeta(const Footer& footer) {
- if (rep_->options.filter_policy == NULL) {
+ if (rep_->options.filter_policy == nullptr) {
return; // Do not need any metadata
}
@@ -135,14 +128,12 @@ void Table::ReadFilter(const Slice& filter_handle_value) {
return;
}
if (block.heap_allocated) {
- rep_->filter_data = block.data.data(); // Will need to delete later
+ rep_->filter_data = block.data.data(); // Will need to delete later
}
rep_->filter = new FilterBlockReader(rep_->options.filter_policy, block.data);
}
-Table::~Table() {
- delete rep_;
-}
+Table::~Table() { delete rep_; }
static void DeleteBlock(void* arg, void* ignored) {
delete reinterpret_cast<Block*>(arg);
@@ -161,13 +152,12 @@ static void ReleaseBlock(void* arg, void* h) {
// Convert an index iterator value (i.e., an encoded BlockHandle)
// into an iterator over the contents of the corresponding block.
-Iterator* Table::BlockReader(void* arg,
- const ReadOptions& options,
+Iterator* Table::BlockReader(void* arg, const ReadOptions& options,
const Slice& index_value) {
Table* table = reinterpret_cast<Table*>(arg);
Cache* block_cache = table->rep_->options.block_cache;
- Block* block = NULL;
- Cache::Handle* cache_handle = NULL;
+ Block* block = nullptr;
+ Cache::Handle* cache_handle = nullptr;
BlockHandle handle;
Slice input = index_value;
@@ -177,21 +167,21 @@ Iterator* Table::BlockReader(void* arg,
if (s.ok()) {
BlockContents contents;
- if (block_cache != NULL) {
+ if (block_cache != nullptr) {
char cache_key_buffer[16];
EncodeFixed64(cache_key_buffer, table->rep_->cache_id);
- EncodeFixed64(cache_key_buffer+8, handle.offset());
+ EncodeFixed64(cache_key_buffer + 8, handle.offset());
Slice key(cache_key_buffer, sizeof(cache_key_buffer));
cache_handle = block_cache->Lookup(key);
- if (cache_handle != NULL) {
+ if (cache_handle != nullptr) {
block = reinterpret_cast<Block*>(block_cache->Value(cache_handle));
} else {
s = ReadBlock(table->rep_->file, options, handle, &contents);
if (s.ok()) {
block = new Block(contents);
if (contents.cachable && options.fill_cache) {
- cache_handle = block_cache->Insert(
- key, block, block->size(), &DeleteCachedBlock);
+ cache_handle = block_cache->Insert(key, block, block->size(),
+ &DeleteCachedBlock);
}
}
}
@@ -204,10 +194,10 @@ Iterator* Table::BlockReader(void* arg,
}
Iterator* iter;
- if (block != NULL) {
+ if (block != nullptr) {
iter = block->NewIterator(table->rep_->options.comparator);
- if (cache_handle == NULL) {
- iter->RegisterCleanup(&DeleteBlock, block, NULL);
+ if (cache_handle == nullptr) {
+ iter->RegisterCleanup(&DeleteBlock, block, nullptr);
} else {
iter->RegisterCleanup(&ReleaseBlock, block_cache, cache_handle);
}
@@ -223,9 +213,9 @@ Iterator* Table::NewIterator(const ReadOptions& options) const {
&Table::BlockReader, const_cast<Table*>(this), options);
}
-Status Table::InternalGet(const ReadOptions& options, const Slice& k,
- void* arg,
- void (*saver)(void*, const Slice&, const Slice&)) {
+Status Table::InternalGet(const ReadOptions& options, const Slice& k, void* arg,
+ void (*handle_result)(void*, const Slice&,
+ const Slice&)) {
Status s;
Iterator* iiter = rep_->index_block->NewIterator(rep_->options.comparator);
iiter->Seek(k);
@@ -233,15 +223,14 @@ Status Table::InternalGet(const ReadOptions& options, const Slice& k,
Slice handle_value = iiter->value();
FilterBlockReader* filter = rep_->filter;
BlockHandle handle;
- if (filter != NULL &&
- handle.DecodeFrom(&handle_value).ok() &&
+ if (filter != nullptr && handle.DecodeFrom(&handle_value).ok() &&
!filter->KeyMayMatch(handle.offset(), k)) {
// Not found
} else {
Iterator* block_iter = BlockReader(this, options, iiter->value());
block_iter->Seek(k);
if (block_iter->Valid()) {
- (*saver)(arg, block_iter->key(), block_iter->value());
+ (*handle_result)(arg, block_iter->key(), block_iter->value());
}
s = block_iter->status();
delete block_iter;
@@ -254,7 +243,6 @@ Status Table::InternalGet(const ReadOptions& options, const Slice& k,
return s;
}
-
uint64_t Table::ApproximateOffsetOf(const Slice& key) const {
Iterator* index_iter =
rep_->index_block->NewIterator(rep_->options.comparator);
diff --git a/src/leveldb/table/table_builder.cc b/src/leveldb/table/table_builder.cc
index 62002c84f2..278febf94f 100644
--- a/src/leveldb/table/table_builder.cc
+++ b/src/leveldb/table/table_builder.cc
@@ -5,6 +5,7 @@
#include "leveldb/table_builder.h"
#include <assert.h>
+
#include "leveldb/comparator.h"
#include "leveldb/env.h"
#include "leveldb/filter_policy.h"
@@ -18,6 +19,22 @@
namespace leveldb {
struct TableBuilder::Rep {
+ Rep(const Options& opt, WritableFile* f)
+ : options(opt),
+ index_block_options(opt),
+ file(f),
+ offset(0),
+ data_block(&options),
+ index_block(&index_block_options),
+ num_entries(0),
+ closed(false),
+ filter_block(opt.filter_policy == nullptr
+ ? nullptr
+ : new FilterBlockBuilder(opt.filter_policy)),
+ pending_index_entry(false) {
+ index_block_options.block_restart_interval = 1;
+ }
+
Options options;
Options index_block_options;
WritableFile* file;
@@ -27,7 +44,7 @@ struct TableBuilder::Rep {
BlockBuilder index_block;
std::string last_key;
int64_t num_entries;
- bool closed; // Either Finish() or Abandon() has been called.
+ bool closed; // Either Finish() or Abandon() has been called.
FilterBlockBuilder* filter_block;
// We do not emit the index entry for a block until we have seen the
@@ -43,26 +60,11 @@ struct TableBuilder::Rep {
BlockHandle pending_handle; // Handle to add to index block
std::string compressed_output;
-
- Rep(const Options& opt, WritableFile* f)
- : options(opt),
- index_block_options(opt),
- file(f),
- offset(0),
- data_block(&options),
- index_block(&index_block_options),
- num_entries(0),
- closed(false),
- filter_block(opt.filter_policy == NULL ? NULL
- : new FilterBlockBuilder(opt.filter_policy)),
- pending_index_entry(false) {
- index_block_options.block_restart_interval = 1;
- }
};
TableBuilder::TableBuilder(const Options& options, WritableFile* file)
: rep_(new Rep(options, file)) {
- if (rep_->filter_block != NULL) {
+ if (rep_->filter_block != nullptr) {
rep_->filter_block->StartBlock(0);
}
}
@@ -106,7 +108,7 @@ void TableBuilder::Add(const Slice& key, const Slice& value) {
r->pending_index_entry = false;
}
- if (r->filter_block != NULL) {
+ if (r->filter_block != nullptr) {
r->filter_block->AddKey(key);
}
@@ -131,7 +133,7 @@ void TableBuilder::Flush() {
r->pending_index_entry = true;
r->status = r->file->Flush();
}
- if (r->filter_block != NULL) {
+ if (r->filter_block != nullptr) {
r->filter_block->StartBlock(r->offset);
}
}
@@ -173,8 +175,7 @@ void TableBuilder::WriteBlock(BlockBuilder* block, BlockHandle* handle) {
}
void TableBuilder::WriteRawBlock(const Slice& block_contents,
- CompressionType type,
- BlockHandle* handle) {
+ CompressionType type, BlockHandle* handle) {
Rep* r = rep_;
handle->set_offset(r->offset);
handle->set_size(block_contents.size());
@@ -184,7 +185,7 @@ void TableBuilder::WriteRawBlock(const Slice& block_contents,
trailer[0] = type;
uint32_t crc = crc32c::Value(block_contents.data(), block_contents.size());
crc = crc32c::Extend(crc, trailer, 1); // Extend crc to cover block type
- EncodeFixed32(trailer+1, crc32c::Mask(crc));
+ EncodeFixed32(trailer + 1, crc32c::Mask(crc));
r->status = r->file->Append(Slice(trailer, kBlockTrailerSize));
if (r->status.ok()) {
r->offset += block_contents.size() + kBlockTrailerSize;
@@ -192,9 +193,7 @@ void TableBuilder::WriteRawBlock(const Slice& block_contents,
}
}
-Status TableBuilder::status() const {
- return rep_->status;
-}
+Status TableBuilder::status() const { return rep_->status; }
Status TableBuilder::Finish() {
Rep* r = rep_;
@@ -205,7 +204,7 @@ Status TableBuilder::Finish() {
BlockHandle filter_block_handle, metaindex_block_handle, index_block_handle;
// Write filter block
- if (ok() && r->filter_block != NULL) {
+ if (ok() && r->filter_block != nullptr) {
WriteRawBlock(r->filter_block->Finish(), kNoCompression,
&filter_block_handle);
}
@@ -213,7 +212,7 @@ Status TableBuilder::Finish() {
// Write metaindex block
if (ok()) {
BlockBuilder meta_index_block(&r->options);
- if (r->filter_block != NULL) {
+ if (r->filter_block != nullptr) {
// Add mapping from "filter.Name" to location of filter data
std::string key = "filter.";
key.append(r->options.filter_policy->Name());
@@ -259,12 +258,8 @@ void TableBuilder::Abandon() {
r->closed = true;
}
-uint64_t TableBuilder::NumEntries() const {
- return rep_->num_entries;
-}
+uint64_t TableBuilder::NumEntries() const { return rep_->num_entries; }
-uint64_t TableBuilder::FileSize() const {
- return rep_->offset;
-}
+uint64_t TableBuilder::FileSize() const { return rep_->offset; }
} // namespace leveldb
diff --git a/src/leveldb/table/table_test.cc b/src/leveldb/table/table_test.cc
index abf6e246ff..17aaea2f9e 100644
--- a/src/leveldb/table/table_test.cc
+++ b/src/leveldb/table/table_test.cc
@@ -6,6 +6,7 @@
#include <map>
#include <string>
+
#include "db/dbformat.h"
#include "db/memtable.h"
#include "db/write_batch_internal.h"
@@ -27,8 +28,8 @@ namespace leveldb {
static std::string Reverse(const Slice& key) {
std::string str(key.ToString());
std::string rev("");
- for (std::string::reverse_iterator rit = str.rbegin();
- rit != str.rend(); ++rit) {
+ for (std::string::reverse_iterator rit = str.rbegin(); rit != str.rend();
+ ++rit) {
rev.push_back(*rit);
}
return rev;
@@ -37,24 +38,23 @@ static std::string Reverse(const Slice& key) {
namespace {
class ReverseKeyComparator : public Comparator {
public:
- virtual const char* Name() const {
+ const char* Name() const override {
return "leveldb.ReverseBytewiseComparator";
}
- virtual int Compare(const Slice& a, const Slice& b) const {
+ int Compare(const Slice& a, const Slice& b) const override {
return BytewiseComparator()->Compare(Reverse(a), Reverse(b));
}
- virtual void FindShortestSeparator(
- std::string* start,
- const Slice& limit) const {
+ void FindShortestSeparator(std::string* start,
+ const Slice& limit) const override {
std::string s = Reverse(*start);
std::string l = Reverse(limit);
BytewiseComparator()->FindShortestSeparator(&s, l);
*start = Reverse(s);
}
- virtual void FindShortSuccessor(std::string* key) const {
+ void FindShortSuccessor(std::string* key) const override {
std::string s = Reverse(*key);
BytewiseComparator()->FindShortSuccessor(&s);
*key = Reverse(s);
@@ -79,47 +79,46 @@ namespace {
struct STLLessThan {
const Comparator* cmp;
- STLLessThan() : cmp(BytewiseComparator()) { }
- STLLessThan(const Comparator* c) : cmp(c) { }
+ STLLessThan() : cmp(BytewiseComparator()) {}
+ STLLessThan(const Comparator* c) : cmp(c) {}
bool operator()(const std::string& a, const std::string& b) const {
return cmp->Compare(Slice(a), Slice(b)) < 0;
}
};
} // namespace
-class StringSink: public WritableFile {
+class StringSink : public WritableFile {
public:
- ~StringSink() { }
+ ~StringSink() override = default;
const std::string& contents() const { return contents_; }
- virtual Status Close() { return Status::OK(); }
- virtual Status Flush() { return Status::OK(); }
- virtual Status Sync() { return Status::OK(); }
+ Status Close() override { return Status::OK(); }
+ Status Flush() override { return Status::OK(); }
+ Status Sync() override { return Status::OK(); }
- virtual Status Append(const Slice& data) {
+ Status Append(const Slice& data) override {
contents_.append(data.data(), data.size());
return Status::OK();
}
+ std::string GetName() const override { return ""; }
private:
std::string contents_;
};
-
-class StringSource: public RandomAccessFile {
+class StringSource : public RandomAccessFile {
public:
StringSource(const Slice& contents)
- : contents_(contents.data(), contents.size()) {
- }
+ : contents_(contents.data(), contents.size()) {}
- virtual ~StringSource() { }
+ ~StringSource() override = default;
uint64_t Size() const { return contents_.size(); }
- virtual Status Read(uint64_t offset, size_t n, Slice* result,
- char* scratch) const {
- if (offset > contents_.size()) {
+ Status Read(uint64_t offset, size_t n, Slice* result,
+ char* scratch) const override {
+ if (offset >= contents_.size()) {
return Status::InvalidArgument("invalid Read offset");
}
if (offset + n > contents_.size()) {
@@ -130,6 +129,7 @@ class StringSource: public RandomAccessFile {
return Status::OK();
}
+ std::string GetName() const { return ""; }
private:
std::string contents_;
};
@@ -140,8 +140,8 @@ typedef std::map<std::string, std::string, STLLessThan> KVMap;
// BlockBuilder/TableBuilder and Block/Table.
class Constructor {
public:
- explicit Constructor(const Comparator* cmp) : data_(STLLessThan(cmp)) { }
- virtual ~Constructor() { }
+ explicit Constructor(const Comparator* cmp) : data_(STLLessThan(cmp)) {}
+ virtual ~Constructor() = default;
void Add(const std::string& key, const Slice& value) {
data_[key] = value.ToString();
@@ -150,15 +150,12 @@ class Constructor {
// Finish constructing the data structure with all the keys that have
// been added so far. Returns the keys in sorted order in "*keys"
// and stores the key/value pairs in "*kvmap"
- void Finish(const Options& options,
- std::vector<std::string>* keys,
+ void Finish(const Options& options, std::vector<std::string>* keys,
KVMap* kvmap) {
*kvmap = data_;
keys->clear();
- for (KVMap::const_iterator it = data_.begin();
- it != data_.end();
- ++it) {
- keys->push_back(it->first);
+ for (const auto& kvp : data_) {
+ keys->push_back(kvp.first);
}
data_.clear();
Status s = FinishImpl(options, *kvmap);
@@ -170,32 +167,26 @@ class Constructor {
virtual Iterator* NewIterator() const = 0;
- virtual const KVMap& data() { return data_; }
+ const KVMap& data() const { return data_; }
- virtual DB* db() const { return NULL; } // Overridden in DBConstructor
+ virtual DB* db() const { return nullptr; } // Overridden in DBConstructor
private:
KVMap data_;
};
-class BlockConstructor: public Constructor {
+class BlockConstructor : public Constructor {
public:
explicit BlockConstructor(const Comparator* cmp)
- : Constructor(cmp),
- comparator_(cmp),
- block_(NULL) { }
- ~BlockConstructor() {
+ : Constructor(cmp), comparator_(cmp), block_(nullptr) {}
+ ~BlockConstructor() override { delete block_; }
+ Status FinishImpl(const Options& options, const KVMap& data) override {
delete block_;
- }
- virtual Status FinishImpl(const Options& options, const KVMap& data) {
- delete block_;
- block_ = NULL;
+ block_ = nullptr;
BlockBuilder builder(&options);
- for (KVMap::const_iterator it = data.begin();
- it != data.end();
- ++it) {
- builder.Add(it->first, it->second);
+ for (const auto& kvp : data) {
+ builder.Add(kvp.first, kvp.second);
}
// Open the block
data_ = builder.Finish().ToString();
@@ -206,36 +197,30 @@ class BlockConstructor: public Constructor {
block_ = new Block(contents);
return Status::OK();
}
- virtual Iterator* NewIterator() const {
+ Iterator* NewIterator() const override {
return block_->NewIterator(comparator_);
}
private:
- const Comparator* comparator_;
+ const Comparator* const comparator_;
std::string data_;
Block* block_;
BlockConstructor();
};
-class TableConstructor: public Constructor {
+class TableConstructor : public Constructor {
public:
TableConstructor(const Comparator* cmp)
- : Constructor(cmp),
- source_(NULL), table_(NULL) {
- }
- ~TableConstructor() {
- Reset();
- }
- virtual Status FinishImpl(const Options& options, const KVMap& data) {
+ : Constructor(cmp), source_(nullptr), table_(nullptr) {}
+ ~TableConstructor() override { Reset(); }
+ Status FinishImpl(const Options& options, const KVMap& data) override {
Reset();
StringSink sink;
TableBuilder builder(options, &sink);
- for (KVMap::const_iterator it = data.begin();
- it != data.end();
- ++it) {
- builder.Add(it->first, it->second);
+ for (const auto& kvp : data) {
+ builder.Add(kvp.first, kvp.second);
ASSERT_TRUE(builder.status().ok());
}
Status s = builder.Finish();
@@ -250,7 +235,7 @@ class TableConstructor: public Constructor {
return Table::Open(table_options, source_, sink.contents().size(), &table_);
}
- virtual Iterator* NewIterator() const {
+ Iterator* NewIterator() const override {
return table_->NewIterator(ReadOptions());
}
@@ -262,8 +247,8 @@ class TableConstructor: public Constructor {
void Reset() {
delete table_;
delete source_;
- table_ = NULL;
- source_ = NULL;
+ table_ = nullptr;
+ source_ = nullptr;
}
StringSource* source_;
@@ -273,23 +258,28 @@ class TableConstructor: public Constructor {
};
// A helper class that converts internal format keys into user keys
-class KeyConvertingIterator: public Iterator {
+class KeyConvertingIterator : public Iterator {
public:
- explicit KeyConvertingIterator(Iterator* iter) : iter_(iter) { }
- virtual ~KeyConvertingIterator() { delete iter_; }
- virtual bool Valid() const { return iter_->Valid(); }
- virtual void Seek(const Slice& target) {
+ explicit KeyConvertingIterator(Iterator* iter) : iter_(iter) {}
+
+ KeyConvertingIterator(const KeyConvertingIterator&) = delete;
+ KeyConvertingIterator& operator=(const KeyConvertingIterator&) = delete;
+
+ ~KeyConvertingIterator() override { delete iter_; }
+
+ bool Valid() const override { return iter_->Valid(); }
+ void Seek(const Slice& target) override {
ParsedInternalKey ikey(target, kMaxSequenceNumber, kTypeValue);
std::string encoded;
AppendInternalKey(&encoded, ikey);
iter_->Seek(encoded);
}
- virtual void SeekToFirst() { iter_->SeekToFirst(); }
- virtual void SeekToLast() { iter_->SeekToLast(); }
- virtual void Next() { iter_->Next(); }
- virtual void Prev() { iter_->Prev(); }
+ void SeekToFirst() override { iter_->SeekToFirst(); }
+ void SeekToLast() override { iter_->SeekToLast(); }
+ void Next() override { iter_->Next(); }
+ void Prev() override { iter_->Prev(); }
- virtual Slice key() const {
+ Slice key() const override {
assert(Valid());
ParsedInternalKey key;
if (!ParseInternalKey(iter_->key(), &key)) {
@@ -299,82 +289,68 @@ class KeyConvertingIterator: public Iterator {
return key.user_key;
}
- virtual Slice value() const { return iter_->value(); }
- virtual Status status() const {
+ Slice value() const override { return iter_->value(); }
+ Status status() const override {
return status_.ok() ? iter_->status() : status_;
}
private:
mutable Status status_;
Iterator* iter_;
-
- // No copying allowed
- KeyConvertingIterator(const KeyConvertingIterator&);
- void operator=(const KeyConvertingIterator&);
};
-class MemTableConstructor: public Constructor {
+class MemTableConstructor : public Constructor {
public:
explicit MemTableConstructor(const Comparator* cmp)
- : Constructor(cmp),
- internal_comparator_(cmp) {
+ : Constructor(cmp), internal_comparator_(cmp) {
memtable_ = new MemTable(internal_comparator_);
memtable_->Ref();
}
- ~MemTableConstructor() {
- memtable_->Unref();
- }
- virtual Status FinishImpl(const Options& options, const KVMap& data) {
+ ~MemTableConstructor() override { memtable_->Unref(); }
+ Status FinishImpl(const Options& options, const KVMap& data) override {
memtable_->Unref();
memtable_ = new MemTable(internal_comparator_);
memtable_->Ref();
int seq = 1;
- for (KVMap::const_iterator it = data.begin();
- it != data.end();
- ++it) {
- memtable_->Add(seq, kTypeValue, it->first, it->second);
+ for (const auto& kvp : data) {
+ memtable_->Add(seq, kTypeValue, kvp.first, kvp.second);
seq++;
}
return Status::OK();
}
- virtual Iterator* NewIterator() const {
+ Iterator* NewIterator() const override {
return new KeyConvertingIterator(memtable_->NewIterator());
}
private:
- InternalKeyComparator internal_comparator_;
+ const InternalKeyComparator internal_comparator_;
MemTable* memtable_;
};
-class DBConstructor: public Constructor {
+class DBConstructor : public Constructor {
public:
explicit DBConstructor(const Comparator* cmp)
- : Constructor(cmp),
- comparator_(cmp) {
- db_ = NULL;
+ : Constructor(cmp), comparator_(cmp) {
+ db_ = nullptr;
NewDB();
}
- ~DBConstructor() {
- delete db_;
- }
- virtual Status FinishImpl(const Options& options, const KVMap& data) {
+ ~DBConstructor() override { delete db_; }
+ Status FinishImpl(const Options& options, const KVMap& data) override {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
NewDB();
- for (KVMap::const_iterator it = data.begin();
- it != data.end();
- ++it) {
+ for (const auto& kvp : data) {
WriteBatch batch;
- batch.Put(it->first, it->second);
+ batch.Put(kvp.first, kvp.second);
ASSERT_TRUE(db_->Write(WriteOptions(), &batch).ok());
}
return Status::OK();
}
- virtual Iterator* NewIterator() const {
+ Iterator* NewIterator() const override {
return db_->NewIterator(ReadOptions());
}
- virtual DB* db() const { return db_; }
+ DB* db() const override { return db_; }
private:
void NewDB() {
@@ -392,16 +368,11 @@ class DBConstructor: public Constructor {
ASSERT_TRUE(status.ok()) << status.ToString();
}
- const Comparator* comparator_;
+ const Comparator* const comparator_;
DB* db_;
};
-enum TestType {
- TABLE_TEST,
- BLOCK_TEST,
- MEMTABLE_TEST,
- DB_TEST
-};
+enum TestType { TABLE_TEST, BLOCK_TEST, MEMTABLE_TEST, DB_TEST };
struct TestArgs {
TestType type;
@@ -410,37 +381,37 @@ struct TestArgs {
};
static const TestArgs kTestArgList[] = {
- { TABLE_TEST, false, 16 },
- { TABLE_TEST, false, 1 },
- { TABLE_TEST, false, 1024 },
- { TABLE_TEST, true, 16 },
- { TABLE_TEST, true, 1 },
- { TABLE_TEST, true, 1024 },
-
- { BLOCK_TEST, false, 16 },
- { BLOCK_TEST, false, 1 },
- { BLOCK_TEST, false, 1024 },
- { BLOCK_TEST, true, 16 },
- { BLOCK_TEST, true, 1 },
- { BLOCK_TEST, true, 1024 },
-
- // Restart interval does not matter for memtables
- { MEMTABLE_TEST, false, 16 },
- { MEMTABLE_TEST, true, 16 },
-
- // Do not bother with restart interval variations for DB
- { DB_TEST, false, 16 },
- { DB_TEST, true, 16 },
+ {TABLE_TEST, false, 16},
+ {TABLE_TEST, false, 1},
+ {TABLE_TEST, false, 1024},
+ {TABLE_TEST, true, 16},
+ {TABLE_TEST, true, 1},
+ {TABLE_TEST, true, 1024},
+
+ {BLOCK_TEST, false, 16},
+ {BLOCK_TEST, false, 1},
+ {BLOCK_TEST, false, 1024},
+ {BLOCK_TEST, true, 16},
+ {BLOCK_TEST, true, 1},
+ {BLOCK_TEST, true, 1024},
+
+ // Restart interval does not matter for memtables
+ {MEMTABLE_TEST, false, 16},
+ {MEMTABLE_TEST, true, 16},
+
+ // Do not bother with restart interval variations for DB
+ {DB_TEST, false, 16},
+ {DB_TEST, true, 16},
};
static const int kNumTestArgs = sizeof(kTestArgList) / sizeof(kTestArgList[0]);
class Harness {
public:
- Harness() : constructor_(NULL) { }
+ Harness() : constructor_(nullptr) {}
void Init(const TestArgs& args) {
delete constructor_;
- constructor_ = NULL;
+ constructor_ = nullptr;
options_ = Options();
options_.block_restart_interval = args.restart_interval;
@@ -466,9 +437,7 @@ class Harness {
}
}
- ~Harness() {
- delete constructor_;
- }
+ ~Harness() { delete constructor_; }
void Add(const std::string& key, const std::string& value) {
constructor_->Add(key, value);
@@ -490,8 +459,7 @@ class Harness {
ASSERT_TRUE(!iter->Valid());
iter->SeekToFirst();
for (KVMap::const_iterator model_iter = data.begin();
- model_iter != data.end();
- ++model_iter) {
+ model_iter != data.end(); ++model_iter) {
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
iter->Next();
}
@@ -505,8 +473,7 @@ class Harness {
ASSERT_TRUE(!iter->Valid());
iter->SeekToLast();
for (KVMap::const_reverse_iterator model_iter = data.rbegin();
- model_iter != data.rend();
- ++model_iter) {
+ model_iter != data.rend(); ++model_iter) {
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
iter->Prev();
}
@@ -514,8 +481,7 @@ class Harness {
delete iter;
}
- void TestRandomAccess(Random* rnd,
- const std::vector<std::string>& keys,
+ void TestRandomAccess(Random* rnd, const std::vector<std::string>& keys,
const KVMap& data) {
static const bool kVerbose = false;
Iterator* iter = constructor_->NewIterator();
@@ -546,8 +512,8 @@ class Harness {
case 2: {
std::string key = PickRandomKey(rnd, keys);
model_iter = data.lower_bound(key);
- if (kVerbose) fprintf(stderr, "Seek '%s'\n",
- EscapeString(key).c_str());
+ if (kVerbose)
+ fprintf(stderr, "Seek '%s'\n", EscapeString(key).c_str());
iter->Seek(Slice(key));
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
break;
@@ -558,7 +524,7 @@ class Harness {
if (kVerbose) fprintf(stderr, "Prev\n");
iter->Prev();
if (model_iter == data.begin()) {
- model_iter = data.end(); // Wrap around to invalid value
+ model_iter = data.end(); // Wrap around to invalid value
} else {
--model_iter;
}
@@ -621,8 +587,8 @@ class Harness {
break;
case 1: {
// Attempt to return something smaller than an existing key
- if (result.size() > 0 && result[result.size()-1] > '\0') {
- result[result.size()-1]--;
+ if (!result.empty() && result[result.size() - 1] > '\0') {
+ result[result.size() - 1]--;
}
break;
}
@@ -636,7 +602,7 @@ class Harness {
}
}
- // Returns NULL if not running against a DB
+ // Returns nullptr if not running against a DB
DB* db() const { return constructor_->db(); }
private:
@@ -720,8 +686,8 @@ TEST(Harness, Randomized) {
for (int num_entries = 0; num_entries < 2000;
num_entries += (num_entries < 50 ? 1 : 200)) {
if ((num_entries % 10) == 0) {
- fprintf(stderr, "case %d of %d: num_entries = %d\n",
- (i + 1), int(kNumTestArgs), num_entries);
+ fprintf(stderr, "case %d of %d: num_entries = %d\n", (i + 1),
+ int(kNumTestArgs), num_entries);
}
for (int e = 0; e < num_entries; e++) {
std::string v;
@@ -735,7 +701,7 @@ TEST(Harness, Randomized) {
TEST(Harness, RandomizedLongDB) {
Random rnd(test::RandomSeed());
- TestArgs args = { DB_TEST, false, 16 };
+ TestArgs args = {DB_TEST, false, 16};
Init(args);
int num_entries = 100000;
for (int e = 0; e < num_entries; e++) {
@@ -757,7 +723,7 @@ TEST(Harness, RandomizedLongDB) {
ASSERT_GT(files, 0);
}
-class MemTableTest { };
+class MemTableTest {};
TEST(MemTableTest, Simple) {
InternalKeyComparator cmp(BytewiseComparator());
@@ -774,8 +740,7 @@ TEST(MemTableTest, Simple) {
Iterator* iter = memtable->NewIterator();
iter->SeekToFirst();
while (iter->Valid()) {
- fprintf(stderr, "key: '%s' -> '%s'\n",
- iter->key().ToString().c_str(),
+ fprintf(stderr, "key: '%s' -> '%s'\n", iter->key().ToString().c_str(),
iter->value().ToString().c_str());
iter->Next();
}
@@ -788,14 +753,13 @@ static bool Between(uint64_t val, uint64_t low, uint64_t high) {
bool result = (val >= low) && (val <= high);
if (!result) {
fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
- (unsigned long long)(val),
- (unsigned long long)(low),
+ (unsigned long long)(val), (unsigned long long)(low),
(unsigned long long)(high));
}
return result;
}
-class TableTest { };
+class TableTest {};
TEST(TableTest, ApproximateOffsetOfPlain) {
TableConstructor c(BytewiseComparator());
@@ -813,18 +777,17 @@ TEST(TableTest, ApproximateOffsetOfPlain) {
options.compression = kNoCompression;
c.Finish(options, &keys, &kvmap);
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"), 0, 0));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 0, 0));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 10000, 11000));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"), 0, 0));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 0, 0));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 10000, 11000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04a"), 210000, 211000));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"), 210000, 211000));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"), 510000, 511000));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"), 510000, 511000));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 610000, 612000));
-
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"), 210000, 211000));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"), 510000, 511000));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"), 510000, 511000));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 610000, 612000));
}
static bool SnappyCompressionSupported() {
@@ -855,7 +818,7 @@ TEST(TableTest, ApproximateOffsetOfCompressed) {
// Expected upper and lower bounds of space used by compressible strings.
static const int kSlop = 1000; // Compressor effectiveness varies.
- const int expected = 2500; // 10000 * compression ratio (0.25)
+ const int expected = 2500; // 10000 * compression ratio (0.25)
const int min_z = expected - kSlop;
const int max_z = expected + kSlop;
@@ -871,6 +834,4 @@ TEST(TableTest, ApproximateOffsetOfCompressed) {
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/table/two_level_iterator.cc b/src/leveldb/table/two_level_iterator.cc
index 7822ebab9c..144790dd97 100644
--- a/src/leveldb/table/two_level_iterator.cc
+++ b/src/leveldb/table/two_level_iterator.cc
@@ -15,38 +15,33 @@ namespace {
typedef Iterator* (*BlockFunction)(void*, const ReadOptions&, const Slice&);
-class TwoLevelIterator: public Iterator {
+class TwoLevelIterator : public Iterator {
public:
- TwoLevelIterator(
- Iterator* index_iter,
- BlockFunction block_function,
- void* arg,
- const ReadOptions& options);
-
- virtual ~TwoLevelIterator();
-
- virtual void Seek(const Slice& target);
- virtual void SeekToFirst();
- virtual void SeekToLast();
- virtual void Next();
- virtual void Prev();
-
- virtual bool Valid() const {
- return data_iter_.Valid();
- }
- virtual Slice key() const {
+ TwoLevelIterator(Iterator* index_iter, BlockFunction block_function,
+ void* arg, const ReadOptions& options);
+
+ ~TwoLevelIterator() override;
+
+ void Seek(const Slice& target) override;
+ void SeekToFirst() override;
+ void SeekToLast() override;
+ void Next() override;
+ void Prev() override;
+
+ bool Valid() const override { return data_iter_.Valid(); }
+ Slice key() const override {
assert(Valid());
return data_iter_.key();
}
- virtual Slice value() const {
+ Slice value() const override {
assert(Valid());
return data_iter_.value();
}
- virtual Status status() const {
+ Status status() const override {
// It'd be nice if status() returned a const Status& instead of a Status
if (!index_iter_.status().ok()) {
return index_iter_.status();
- } else if (data_iter_.iter() != NULL && !data_iter_.status().ok()) {
+ } else if (data_iter_.iter() != nullptr && !data_iter_.status().ok()) {
return data_iter_.status();
} else {
return status_;
@@ -67,45 +62,41 @@ class TwoLevelIterator: public Iterator {
const ReadOptions options_;
Status status_;
IteratorWrapper index_iter_;
- IteratorWrapper data_iter_; // May be NULL
- // If data_iter_ is non-NULL, then "data_block_handle_" holds the
+ IteratorWrapper data_iter_; // May be nullptr
+ // If data_iter_ is non-null, then "data_block_handle_" holds the
// "index_value" passed to block_function_ to create the data_iter_.
std::string data_block_handle_;
};
-TwoLevelIterator::TwoLevelIterator(
- Iterator* index_iter,
- BlockFunction block_function,
- void* arg,
- const ReadOptions& options)
+TwoLevelIterator::TwoLevelIterator(Iterator* index_iter,
+ BlockFunction block_function, void* arg,
+ const ReadOptions& options)
: block_function_(block_function),
arg_(arg),
options_(options),
index_iter_(index_iter),
- data_iter_(NULL) {
-}
+ data_iter_(nullptr) {}
-TwoLevelIterator::~TwoLevelIterator() {
-}
+TwoLevelIterator::~TwoLevelIterator() = default;
void TwoLevelIterator::Seek(const Slice& target) {
index_iter_.Seek(target);
InitDataBlock();
- if (data_iter_.iter() != NULL) data_iter_.Seek(target);
+ if (data_iter_.iter() != nullptr) data_iter_.Seek(target);
SkipEmptyDataBlocksForward();
}
void TwoLevelIterator::SeekToFirst() {
index_iter_.SeekToFirst();
InitDataBlock();
- if (data_iter_.iter() != NULL) data_iter_.SeekToFirst();
+ if (data_iter_.iter() != nullptr) data_iter_.SeekToFirst();
SkipEmptyDataBlocksForward();
}
void TwoLevelIterator::SeekToLast() {
index_iter_.SeekToLast();
InitDataBlock();
- if (data_iter_.iter() != NULL) data_iter_.SeekToLast();
+ if (data_iter_.iter() != nullptr) data_iter_.SeekToLast();
SkipEmptyDataBlocksBackward();
}
@@ -121,44 +112,44 @@ void TwoLevelIterator::Prev() {
SkipEmptyDataBlocksBackward();
}
-
void TwoLevelIterator::SkipEmptyDataBlocksForward() {
- while (data_iter_.iter() == NULL || !data_iter_.Valid()) {
+ while (data_iter_.iter() == nullptr || !data_iter_.Valid()) {
// Move to next block
if (!index_iter_.Valid()) {
- SetDataIterator(NULL);
+ SetDataIterator(nullptr);
return;
}
index_iter_.Next();
InitDataBlock();
- if (data_iter_.iter() != NULL) data_iter_.SeekToFirst();
+ if (data_iter_.iter() != nullptr) data_iter_.SeekToFirst();
}
}
void TwoLevelIterator::SkipEmptyDataBlocksBackward() {
- while (data_iter_.iter() == NULL || !data_iter_.Valid()) {
+ while (data_iter_.iter() == nullptr || !data_iter_.Valid()) {
// Move to next block
if (!index_iter_.Valid()) {
- SetDataIterator(NULL);
+ SetDataIterator(nullptr);
return;
}
index_iter_.Prev();
InitDataBlock();
- if (data_iter_.iter() != NULL) data_iter_.SeekToLast();
+ if (data_iter_.iter() != nullptr) data_iter_.SeekToLast();
}
}
void TwoLevelIterator::SetDataIterator(Iterator* data_iter) {
- if (data_iter_.iter() != NULL) SaveError(data_iter_.status());
+ if (data_iter_.iter() != nullptr) SaveError(data_iter_.status());
data_iter_.Set(data_iter);
}
void TwoLevelIterator::InitDataBlock() {
if (!index_iter_.Valid()) {
- SetDataIterator(NULL);
+ SetDataIterator(nullptr);
} else {
Slice handle = index_iter_.value();
- if (data_iter_.iter() != NULL && handle.compare(data_block_handle_) == 0) {
+ if (data_iter_.iter() != nullptr &&
+ handle.compare(data_block_handle_) == 0) {
// data_iter_ is already constructed with this iterator, so
// no need to change anything
} else {
@@ -171,11 +162,9 @@ void TwoLevelIterator::InitDataBlock() {
} // namespace
-Iterator* NewTwoLevelIterator(
- Iterator* index_iter,
- BlockFunction block_function,
- void* arg,
- const ReadOptions& options) {
+Iterator* NewTwoLevelIterator(Iterator* index_iter,
+ BlockFunction block_function, void* arg,
+ const ReadOptions& options) {
return new TwoLevelIterator(index_iter, block_function, arg, options);
}
diff --git a/src/leveldb/table/two_level_iterator.h b/src/leveldb/table/two_level_iterator.h
index 629ca34525..81ffe809ac 100644
--- a/src/leveldb/table/two_level_iterator.h
+++ b/src/leveldb/table/two_level_iterator.h
@@ -20,14 +20,11 @@ struct ReadOptions;
//
// Uses a supplied function to convert an index_iter value into
// an iterator over the contents of the corresponding block.
-extern Iterator* NewTwoLevelIterator(
+Iterator* NewTwoLevelIterator(
Iterator* index_iter,
- Iterator* (*block_function)(
- void* arg,
- const ReadOptions& options,
- const Slice& index_value),
- void* arg,
- const ReadOptions& options);
+ Iterator* (*block_function)(void* arg, const ReadOptions& options,
+ const Slice& index_value),
+ void* arg, const ReadOptions& options);
} // namespace leveldb
diff --git a/src/leveldb/util/arena.cc b/src/leveldb/util/arena.cc
index 74078213ee..46e3b2eb8f 100644
--- a/src/leveldb/util/arena.cc
+++ b/src/leveldb/util/arena.cc
@@ -3,16 +3,13 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "util/arena.h"
-#include <assert.h>
namespace leveldb {
static const int kBlockSize = 4096;
-Arena::Arena() : memory_usage_(0) {
- alloc_ptr_ = NULL; // First allocation will allocate a block
- alloc_bytes_remaining_ = 0;
-}
+Arena::Arena()
+ : alloc_ptr_(nullptr), alloc_bytes_remaining_(0), memory_usage_(0) {}
Arena::~Arena() {
for (size_t i = 0; i < blocks_.size(); i++) {
@@ -40,8 +37,9 @@ char* Arena::AllocateFallback(size_t bytes) {
char* Arena::AllocateAligned(size_t bytes) {
const int align = (sizeof(void*) > 8) ? sizeof(void*) : 8;
- assert((align & (align-1)) == 0); // Pointer size should be a power of 2
- size_t current_mod = reinterpret_cast<uintptr_t>(alloc_ptr_) & (align-1);
+ static_assert((align & (align - 1)) == 0,
+ "Pointer size should be a power of 2");
+ size_t current_mod = reinterpret_cast<uintptr_t>(alloc_ptr_) & (align - 1);
size_t slop = (current_mod == 0 ? 0 : align - current_mod);
size_t needed = bytes + slop;
char* result;
@@ -53,15 +51,15 @@ char* Arena::AllocateAligned(size_t bytes) {
// AllocateFallback always returned aligned memory
result = AllocateFallback(bytes);
}
- assert((reinterpret_cast<uintptr_t>(result) & (align-1)) == 0);
+ assert((reinterpret_cast<uintptr_t>(result) & (align - 1)) == 0);
return result;
}
char* Arena::AllocateNewBlock(size_t block_bytes) {
char* result = new char[block_bytes];
blocks_.push_back(result);
- memory_usage_.NoBarrier_Store(
- reinterpret_cast<void*>(MemoryUsage() + block_bytes + sizeof(char*)));
+ memory_usage_.fetch_add(block_bytes + sizeof(char*),
+ std::memory_order_relaxed);
return result;
}
diff --git a/src/leveldb/util/arena.h b/src/leveldb/util/arena.h
index 48bab33741..68fc55d4dd 100644
--- a/src/leveldb/util/arena.h
+++ b/src/leveldb/util/arena.h
@@ -5,29 +5,33 @@
#ifndef STORAGE_LEVELDB_UTIL_ARENA_H_
#define STORAGE_LEVELDB_UTIL_ARENA_H_
+#include <atomic>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
#include <vector>
-#include <assert.h>
-#include <stddef.h>
-#include <stdint.h>
-#include "port/port.h"
namespace leveldb {
class Arena {
public:
Arena();
+
+ Arena(const Arena&) = delete;
+ Arena& operator=(const Arena&) = delete;
+
~Arena();
// Return a pointer to a newly allocated memory block of "bytes" bytes.
char* Allocate(size_t bytes);
- // Allocate memory with the normal alignment guarantees provided by malloc
+ // Allocate memory with the normal alignment guarantees provided by malloc.
char* AllocateAligned(size_t bytes);
// Returns an estimate of the total memory usage of data allocated
// by the arena.
size_t MemoryUsage() const {
- return reinterpret_cast<uintptr_t>(memory_usage_.NoBarrier_Load());
+ return memory_usage_.load(std::memory_order_relaxed);
}
private:
@@ -42,11 +46,10 @@ class Arena {
std::vector<char*> blocks_;
// Total memory usage of the arena.
- port::AtomicPointer memory_usage_;
-
- // No copying allowed
- Arena(const Arena&);
- void operator=(const Arena&);
+ //
+ // TODO(costan): This member is accessed via atomics, but the others are
+ // accessed without any locking. Is this OK?
+ std::atomic<size_t> memory_usage_;
};
inline char* Arena::Allocate(size_t bytes) {
diff --git a/src/leveldb/util/arena_test.cc b/src/leveldb/util/arena_test.cc
index 58e870ec44..e917228f42 100644
--- a/src/leveldb/util/arena_test.cc
+++ b/src/leveldb/util/arena_test.cc
@@ -9,14 +9,12 @@
namespace leveldb {
-class ArenaTest { };
+class ArenaTest {};
-TEST(ArenaTest, Empty) {
- Arena arena;
-}
+TEST(ArenaTest, Empty) { Arena arena; }
TEST(ArenaTest, Simple) {
- std::vector<std::pair<size_t, char*> > allocated;
+ std::vector<std::pair<size_t, char*>> allocated;
Arena arena;
const int N = 100000;
size_t bytes = 0;
@@ -26,8 +24,9 @@ TEST(ArenaTest, Simple) {
if (i % (N / 10) == 0) {
s = i;
} else {
- s = rnd.OneIn(4000) ? rnd.Uniform(6000) :
- (rnd.OneIn(10) ? rnd.Uniform(100) : rnd.Uniform(20));
+ s = rnd.OneIn(4000)
+ ? rnd.Uniform(6000)
+ : (rnd.OneIn(10) ? rnd.Uniform(100) : rnd.Uniform(20));
}
if (s == 0) {
// Our arena disallows size 0 allocations.
@@ -47,7 +46,7 @@ TEST(ArenaTest, Simple) {
bytes += s;
allocated.push_back(std::make_pair(s, r));
ASSERT_GE(arena.MemoryUsage(), bytes);
- if (i > N/10) {
+ if (i > N / 10) {
ASSERT_LE(arena.MemoryUsage(), bytes * 1.10);
}
}
@@ -63,6 +62,4 @@ TEST(ArenaTest, Simple) {
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/util/bloom.cc b/src/leveldb/util/bloom.cc
index bf3e4ca6e9..87547a7e62 100644
--- a/src/leveldb/util/bloom.cc
+++ b/src/leveldb/util/bloom.cc
@@ -15,24 +15,17 @@ static uint32_t BloomHash(const Slice& key) {
}
class BloomFilterPolicy : public FilterPolicy {
- private:
- size_t bits_per_key_;
- size_t k_;
-
public:
- explicit BloomFilterPolicy(int bits_per_key)
- : bits_per_key_(bits_per_key) {
+ explicit BloomFilterPolicy(int bits_per_key) : bits_per_key_(bits_per_key) {
// We intentionally round down to reduce probing cost a little bit
k_ = static_cast<size_t>(bits_per_key * 0.69); // 0.69 =~ ln(2)
if (k_ < 1) k_ = 1;
if (k_ > 30) k_ = 30;
}
- virtual const char* Name() const {
- return "leveldb.BuiltinBloomFilter2";
- }
+ const char* Name() const override { return "leveldb.BuiltinBloomFilter2"; }
- virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
+ void CreateFilter(const Slice* keys, int n, std::string* dst) const override {
// Compute bloom filter size (in both bits and bytes)
size_t bits = n * bits_per_key_;
@@ -54,13 +47,13 @@ class BloomFilterPolicy : public FilterPolicy {
const uint32_t delta = (h >> 17) | (h << 15); // Rotate right 17 bits
for (size_t j = 0; j < k_; j++) {
const uint32_t bitpos = h % bits;
- array[bitpos/8] |= (1 << (bitpos % 8));
+ array[bitpos / 8] |= (1 << (bitpos % 8));
h += delta;
}
}
}
- virtual bool KeyMayMatch(const Slice& key, const Slice& bloom_filter) const {
+ bool KeyMayMatch(const Slice& key, const Slice& bloom_filter) const override {
const size_t len = bloom_filter.size();
if (len < 2) return false;
@@ -69,7 +62,7 @@ class BloomFilterPolicy : public FilterPolicy {
// Use the encoded k so that we can read filters generated by
// bloom filters created using different parameters.
- const size_t k = array[len-1];
+ const size_t k = array[len - 1];
if (k > 30) {
// Reserved for potentially new encodings for short bloom filters.
// Consider it a match.
@@ -80,13 +73,17 @@ class BloomFilterPolicy : public FilterPolicy {
const uint32_t delta = (h >> 17) | (h << 15); // Rotate right 17 bits
for (size_t j = 0; j < k; j++) {
const uint32_t bitpos = h % bits;
- if ((array[bitpos/8] & (1 << (bitpos % 8))) == 0) return false;
+ if ((array[bitpos / 8] & (1 << (bitpos % 8))) == 0) return false;
h += delta;
}
return true;
}
+
+ private:
+ size_t bits_per_key_;
+ size_t k_;
};
-}
+} // namespace
const FilterPolicy* NewBloomFilterPolicy(int bits_per_key) {
return new BloomFilterPolicy(bits_per_key);
diff --git a/src/leveldb/util/bloom_test.cc b/src/leveldb/util/bloom_test.cc
index 1b87a2be3f..436daa9e99 100644
--- a/src/leveldb/util/bloom_test.cc
+++ b/src/leveldb/util/bloom_test.cc
@@ -19,26 +19,17 @@ static Slice Key(int i, char* buffer) {
}
class BloomTest {
- private:
- const FilterPolicy* policy_;
- std::string filter_;
- std::vector<std::string> keys_;
-
public:
- BloomTest() : policy_(NewBloomFilterPolicy(10)) { }
+ BloomTest() : policy_(NewBloomFilterPolicy(10)) {}
- ~BloomTest() {
- delete policy_;
- }
+ ~BloomTest() { delete policy_; }
void Reset() {
keys_.clear();
filter_.clear();
}
- void Add(const Slice& s) {
- keys_.push_back(s.ToString());
- }
+ void Add(const Slice& s) { keys_.push_back(s.ToString()); }
void Build() {
std::vector<Slice> key_slices;
@@ -52,16 +43,14 @@ class BloomTest {
if (kVerbose >= 2) DumpFilter();
}
- size_t FilterSize() const {
- return filter_.size();
- }
+ size_t FilterSize() const { return filter_.size(); }
void DumpFilter() {
fprintf(stderr, "F(");
- for (size_t i = 0; i+1 < filter_.size(); i++) {
+ for (size_t i = 0; i + 1 < filter_.size(); i++) {
const unsigned int c = static_cast<unsigned int>(filter_[i]);
for (int j = 0; j < 8; j++) {
- fprintf(stderr, "%c", (c & (1 <<j)) ? '1' : '.');
+ fprintf(stderr, "%c", (c & (1 << j)) ? '1' : '.');
}
}
fprintf(stderr, ")\n");
@@ -84,11 +73,16 @@ class BloomTest {
}
return result / 10000.0;
}
+
+ private:
+ const FilterPolicy* policy_;
+ std::string filter_;
+ std::vector<std::string> keys_;
};
TEST(BloomTest, EmptyFilter) {
- ASSERT_TRUE(! Matches("hello"));
- ASSERT_TRUE(! Matches("world"));
+ ASSERT_TRUE(!Matches("hello"));
+ ASSERT_TRUE(!Matches("world"));
}
TEST(BloomTest, Small) {
@@ -96,8 +90,8 @@ TEST(BloomTest, Small) {
Add("world");
ASSERT_TRUE(Matches("hello"));
ASSERT_TRUE(Matches("world"));
- ASSERT_TRUE(! Matches("x"));
- ASSERT_TRUE(! Matches("foo"));
+ ASSERT_TRUE(!Matches("x"));
+ ASSERT_TRUE(!Matches("foo"));
}
static int NextLength(int length) {
@@ -140,23 +134,23 @@ TEST(BloomTest, VaryingLengths) {
double rate = FalsePositiveRate();
if (kVerbose >= 1) {
fprintf(stderr, "False positives: %5.2f%% @ length = %6d ; bytes = %6d\n",
- rate*100.0, length, static_cast<int>(FilterSize()));
+ rate * 100.0, length, static_cast<int>(FilterSize()));
}
- ASSERT_LE(rate, 0.02); // Must not be over 2%
- if (rate > 0.0125) mediocre_filters++; // Allowed, but not too often
- else good_filters++;
+ ASSERT_LE(rate, 0.02); // Must not be over 2%
+ if (rate > 0.0125)
+ mediocre_filters++; // Allowed, but not too often
+ else
+ good_filters++;
}
if (kVerbose >= 1) {
- fprintf(stderr, "Filters: %d good, %d mediocre\n",
- good_filters, mediocre_filters);
+ fprintf(stderr, "Filters: %d good, %d mediocre\n", good_filters,
+ mediocre_filters);
}
- ASSERT_LE(mediocre_filters, good_filters/5);
+ ASSERT_LE(mediocre_filters, good_filters / 5);
}
// Different bits-per-byte
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/util/cache.cc b/src/leveldb/util/cache.cc
index ce46886171..12de306cad 100644
--- a/src/leveldb/util/cache.cc
+++ b/src/leveldb/util/cache.cc
@@ -8,13 +8,13 @@
#include "leveldb/cache.h"
#include "port/port.h"
+#include "port/thread_annotations.h"
#include "util/hash.h"
#include "util/mutexlock.h"
namespace leveldb {
-Cache::~Cache() {
-}
+Cache::~Cache() {}
namespace {
@@ -45,21 +45,19 @@ struct LRUHandle {
LRUHandle* next_hash;
LRUHandle* next;
LRUHandle* prev;
- size_t charge; // TODO(opt): Only allow uint32_t?
+ size_t charge; // TODO(opt): Only allow uint32_t?
size_t key_length;
- bool in_cache; // Whether entry is in the cache.
- uint32_t refs; // References, including cache reference, if present.
- uint32_t hash; // Hash of key(); used for fast sharding and comparisons
- char key_data[1]; // Beginning of key
+ bool in_cache; // Whether entry is in the cache.
+ uint32_t refs; // References, including cache reference, if present.
+ uint32_t hash; // Hash of key(); used for fast sharding and comparisons
+ char key_data[1]; // Beginning of key
Slice key() const {
- // For cheaper lookups, we allow a temporary Handle object
- // to store a pointer to a key in "value".
- if (next == this) {
- return *(reinterpret_cast<Slice*>(value));
- } else {
- return Slice(key_data, key_length);
- }
+ // next_ is only equal to this if the LRU handle is the list head of an
+ // empty list. List heads never have meaningful keys.
+ assert(next != this);
+
+ return Slice(key_data, key_length);
}
};
@@ -70,7 +68,7 @@ struct LRUHandle {
// 4.4.3's builtin hashtable.
class HandleTable {
public:
- HandleTable() : length_(0), elems_(0), list_(NULL) { Resize(); }
+ HandleTable() : length_(0), elems_(0), list_(nullptr) { Resize(); }
~HandleTable() { delete[] list_; }
LRUHandle* Lookup(const Slice& key, uint32_t hash) {
@@ -80,9 +78,9 @@ class HandleTable {
LRUHandle* Insert(LRUHandle* h) {
LRUHandle** ptr = FindPointer(h->key(), h->hash);
LRUHandle* old = *ptr;
- h->next_hash = (old == NULL ? NULL : old->next_hash);
+ h->next_hash = (old == nullptr ? nullptr : old->next_hash);
*ptr = h;
- if (old == NULL) {
+ if (old == nullptr) {
++elems_;
if (elems_ > length_) {
// Since each cache entry is fairly large, we aim for a small
@@ -96,7 +94,7 @@ class HandleTable {
LRUHandle* Remove(const Slice& key, uint32_t hash) {
LRUHandle** ptr = FindPointer(key, hash);
LRUHandle* result = *ptr;
- if (result != NULL) {
+ if (result != nullptr) {
*ptr = result->next_hash;
--elems_;
}
@@ -115,8 +113,7 @@ class HandleTable {
// pointer to the trailing slot in the corresponding linked list.
LRUHandle** FindPointer(const Slice& key, uint32_t hash) {
LRUHandle** ptr = &list_[hash & (length_ - 1)];
- while (*ptr != NULL &&
- ((*ptr)->hash != hash || key != (*ptr)->key())) {
+ while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) {
ptr = &(*ptr)->next_hash;
}
return ptr;
@@ -132,7 +129,7 @@ class HandleTable {
uint32_t count = 0;
for (uint32_t i = 0; i < length_; i++) {
LRUHandle* h = list_[i];
- while (h != NULL) {
+ while (h != nullptr) {
LRUHandle* next = h->next_hash;
uint32_t hash = h->hash;
LRUHandle** ptr = &new_list[hash & (new_length - 1)];
@@ -159,8 +156,8 @@ class LRUCache {
void SetCapacity(size_t capacity) { capacity_ = capacity; }
// Like Cache methods, but with an extra "hash" parameter.
- Cache::Handle* Insert(const Slice& key, uint32_t hash,
- void* value, size_t charge,
+ Cache::Handle* Insert(const Slice& key, uint32_t hash, void* value,
+ size_t charge,
void (*deleter)(const Slice& key, void* value));
Cache::Handle* Lookup(const Slice& key, uint32_t hash);
void Release(Cache::Handle* handle);
@@ -173,32 +170,31 @@ class LRUCache {
private:
void LRU_Remove(LRUHandle* e);
- void LRU_Append(LRUHandle*list, LRUHandle* e);
+ void LRU_Append(LRUHandle* list, LRUHandle* e);
void Ref(LRUHandle* e);
void Unref(LRUHandle* e);
- bool FinishErase(LRUHandle* e);
+ bool FinishErase(LRUHandle* e) EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// Initialized before use.
size_t capacity_;
// mutex_ protects the following state.
mutable port::Mutex mutex_;
- size_t usage_;
+ size_t usage_ GUARDED_BY(mutex_);
// Dummy head of LRU list.
// lru.prev is newest entry, lru.next is oldest entry.
// Entries have refs==1 and in_cache==true.
- LRUHandle lru_;
+ LRUHandle lru_ GUARDED_BY(mutex_);
// Dummy head of in-use list.
// Entries are in use by clients, and have refs >= 2 and in_cache==true.
- LRUHandle in_use_;
+ LRUHandle in_use_ GUARDED_BY(mutex_);
- HandleTable table_;
+ HandleTable table_ GUARDED_BY(mutex_);
};
-LRUCache::LRUCache()
- : usage_(0) {
+LRUCache::LRUCache() : capacity_(0), usage_(0) {
// Make empty circular linked lists.
lru_.next = &lru_;
lru_.prev = &lru_;
@@ -208,7 +204,7 @@ LRUCache::LRUCache()
LRUCache::~LRUCache() {
assert(in_use_.next == &in_use_); // Error if caller has an unreleased handle
- for (LRUHandle* e = lru_.next; e != &lru_; ) {
+ for (LRUHandle* e = lru_.next; e != &lru_;) {
LRUHandle* next = e->next;
assert(e->in_cache);
e->in_cache = false;
@@ -229,11 +225,12 @@ void LRUCache::Ref(LRUHandle* e) {
void LRUCache::Unref(LRUHandle* e) {
assert(e->refs > 0);
e->refs--;
- if (e->refs == 0) { // Deallocate.
+ if (e->refs == 0) { // Deallocate.
assert(!e->in_cache);
(*e->deleter)(e->key(), e->value);
free(e);
- } else if (e->in_cache && e->refs == 1) { // No longer in use; move to lru_ list.
+ } else if (e->in_cache && e->refs == 1) {
+ // No longer in use; move to lru_ list.
LRU_Remove(e);
LRU_Append(&lru_, e);
}
@@ -255,7 +252,7 @@ void LRUCache::LRU_Append(LRUHandle* list, LRUHandle* e) {
Cache::Handle* LRUCache::Lookup(const Slice& key, uint32_t hash) {
MutexLock l(&mutex_);
LRUHandle* e = table_.Lookup(key, hash);
- if (e != NULL) {
+ if (e != nullptr) {
Ref(e);
}
return reinterpret_cast<Cache::Handle*>(e);
@@ -266,13 +263,14 @@ void LRUCache::Release(Cache::Handle* handle) {
Unref(reinterpret_cast<LRUHandle*>(handle));
}
-Cache::Handle* LRUCache::Insert(
- const Slice& key, uint32_t hash, void* value, size_t charge,
- void (*deleter)(const Slice& key, void* value)) {
+Cache::Handle* LRUCache::Insert(const Slice& key, uint32_t hash, void* value,
+ size_t charge,
+ void (*deleter)(const Slice& key,
+ void* value)) {
MutexLock l(&mutex_);
- LRUHandle* e = reinterpret_cast<LRUHandle*>(
- malloc(sizeof(LRUHandle)-1 + key.size()));
+ LRUHandle* e =
+ reinterpret_cast<LRUHandle*>(malloc(sizeof(LRUHandle) - 1 + key.size()));
e->value = value;
e->deleter = deleter;
e->charge = charge;
@@ -288,8 +286,10 @@ Cache::Handle* LRUCache::Insert(
LRU_Append(&in_use_, e);
usage_ += charge;
FinishErase(table_.Insert(e));
- } // else don't cache. (Tests use capacity_==0 to turn off caching.)
-
+ } else { // don't cache. (capacity_==0 is supported and turns off caching.)
+ // next is read by key() in an assert, so it must be initialized
+ e->next = nullptr;
+ }
while (usage_ > capacity_ && lru_.next != &lru_) {
LRUHandle* old = lru_.next;
assert(old->refs == 1);
@@ -302,17 +302,17 @@ Cache::Handle* LRUCache::Insert(
return reinterpret_cast<Cache::Handle*>(e);
}
-// If e != NULL, finish removing *e from the cache; it has already been removed
-// from the hash table. Return whether e != NULL. Requires mutex_ held.
+// If e != nullptr, finish removing *e from the cache; it has already been
+// removed from the hash table. Return whether e != nullptr.
bool LRUCache::FinishErase(LRUHandle* e) {
- if (e != NULL) {
+ if (e != nullptr) {
assert(e->in_cache);
LRU_Remove(e);
e->in_cache = false;
usage_ -= e->charge;
Unref(e);
}
- return e != NULL;
+ return e != nullptr;
}
void LRUCache::Erase(const Slice& key, uint32_t hash) {
@@ -345,49 +345,46 @@ class ShardedLRUCache : public Cache {
return Hash(s.data(), s.size(), 0);
}
- static uint32_t Shard(uint32_t hash) {
- return hash >> (32 - kNumShardBits);
- }
+ static uint32_t Shard(uint32_t hash) { return hash >> (32 - kNumShardBits); }
public:
- explicit ShardedLRUCache(size_t capacity)
- : last_id_(0) {
+ explicit ShardedLRUCache(size_t capacity) : last_id_(0) {
const size_t per_shard = (capacity + (kNumShards - 1)) / kNumShards;
for (int s = 0; s < kNumShards; s++) {
shard_[s].SetCapacity(per_shard);
}
}
- virtual ~ShardedLRUCache() { }
- virtual Handle* Insert(const Slice& key, void* value, size_t charge,
- void (*deleter)(const Slice& key, void* value)) {
+ ~ShardedLRUCache() override {}
+ Handle* Insert(const Slice& key, void* value, size_t charge,
+ void (*deleter)(const Slice& key, void* value)) override {
const uint32_t hash = HashSlice(key);
return shard_[Shard(hash)].Insert(key, hash, value, charge, deleter);
}
- virtual Handle* Lookup(const Slice& key) {
+ Handle* Lookup(const Slice& key) override {
const uint32_t hash = HashSlice(key);
return shard_[Shard(hash)].Lookup(key, hash);
}
- virtual void Release(Handle* handle) {
+ void Release(Handle* handle) override {
LRUHandle* h = reinterpret_cast<LRUHandle*>(handle);
shard_[Shard(h->hash)].Release(handle);
}
- virtual void Erase(const Slice& key) {
+ void Erase(const Slice& key) override {
const uint32_t hash = HashSlice(key);
shard_[Shard(hash)].Erase(key, hash);
}
- virtual void* Value(Handle* handle) {
+ void* Value(Handle* handle) override {
return reinterpret_cast<LRUHandle*>(handle)->value;
}
- virtual uint64_t NewId() {
+ uint64_t NewId() override {
MutexLock l(&id_mutex_);
return ++(last_id_);
}
- virtual void Prune() {
+ void Prune() override {
for (int s = 0; s < kNumShards; s++) {
shard_[s].Prune();
}
}
- virtual size_t TotalCharge() const {
+ size_t TotalCharge() const override {
size_t total = 0;
for (int s = 0; s < kNumShards; s++) {
total += shard_[s].TotalCharge();
@@ -398,8 +395,6 @@ class ShardedLRUCache : public Cache {
} // end anonymous namespace
-Cache* NewLRUCache(size_t capacity) {
- return new ShardedLRUCache(capacity);
-}
+Cache* NewLRUCache(size_t capacity) { return new ShardedLRUCache(capacity); }
} // namespace leveldb
diff --git a/src/leveldb/util/cache_test.cc b/src/leveldb/util/cache_test.cc
index 468f7a6425..974334b9f8 100644
--- a/src/leveldb/util/cache_test.cc
+++ b/src/leveldb/util/cache_test.cc
@@ -25,8 +25,6 @@ static int DecodeValue(void* v) { return reinterpret_cast<uintptr_t>(v); }
class CacheTest {
public:
- static CacheTest* current_;
-
static void Deleter(const Slice& key, void* v) {
current_->deleted_keys_.push_back(DecodeKey(key));
current_->deleted_values_.push_back(DecodeValue(v));
@@ -37,18 +35,14 @@ class CacheTest {
std::vector<int> deleted_values_;
Cache* cache_;
- CacheTest() : cache_(NewLRUCache(kCacheSize)) {
- current_ = this;
- }
+ CacheTest() : cache_(NewLRUCache(kCacheSize)) { current_ = this; }
- ~CacheTest() {
- delete cache_;
- }
+ ~CacheTest() { delete cache_; }
int Lookup(int key) {
Cache::Handle* handle = cache_->Lookup(EncodeKey(key));
- const int r = (handle == NULL) ? -1 : DecodeValue(cache_->Value(handle));
- if (handle != NULL) {
+ const int r = (handle == nullptr) ? -1 : DecodeValue(cache_->Value(handle));
+ if (handle != nullptr) {
cache_->Release(handle);
}
return r;
@@ -64,9 +58,9 @@ class CacheTest {
&CacheTest::Deleter);
}
- void Erase(int key) {
- cache_->Erase(EncodeKey(key));
- }
+ void Erase(int key) { cache_->Erase(EncodeKey(key)); }
+
+ static CacheTest* current_;
};
CacheTest* CacheTest::current_;
@@ -75,18 +69,18 @@ TEST(CacheTest, HitAndMiss) {
Insert(100, 101);
ASSERT_EQ(101, Lookup(100));
- ASSERT_EQ(-1, Lookup(200));
- ASSERT_EQ(-1, Lookup(300));
+ ASSERT_EQ(-1, Lookup(200));
+ ASSERT_EQ(-1, Lookup(300));
Insert(200, 201);
ASSERT_EQ(101, Lookup(100));
ASSERT_EQ(201, Lookup(200));
- ASSERT_EQ(-1, Lookup(300));
+ ASSERT_EQ(-1, Lookup(300));
Insert(100, 102);
ASSERT_EQ(102, Lookup(100));
ASSERT_EQ(201, Lookup(200));
- ASSERT_EQ(-1, Lookup(300));
+ ASSERT_EQ(-1, Lookup(300));
ASSERT_EQ(1, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[0]);
@@ -100,14 +94,14 @@ TEST(CacheTest, Erase) {
Insert(100, 101);
Insert(200, 201);
Erase(100);
- ASSERT_EQ(-1, Lookup(100));
+ ASSERT_EQ(-1, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(1, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[0]);
ASSERT_EQ(101, deleted_values_[0]);
Erase(100);
- ASSERT_EQ(-1, Lookup(100));
+ ASSERT_EQ(-1, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(1, deleted_keys_.size());
}
@@ -146,8 +140,8 @@ TEST(CacheTest, EvictionPolicy) {
// Frequently used entry must be kept around,
// as must things that are still in use.
for (int i = 0; i < kCacheSize + 100; i++) {
- Insert(1000+i, 2000+i);
- ASSERT_EQ(2000+i, Lookup(1000+i));
+ Insert(1000 + i, 2000 + i);
+ ASSERT_EQ(2000 + i, Lookup(1000 + i));
ASSERT_EQ(101, Lookup(100));
}
ASSERT_EQ(101, Lookup(100));
@@ -160,12 +154,12 @@ TEST(CacheTest, UseExceedsCacheSize) {
// Overfill the cache, keeping handles on all inserted entries.
std::vector<Cache::Handle*> h;
for (int i = 0; i < kCacheSize + 100; i++) {
- h.push_back(InsertAndReturnHandle(1000+i, 2000+i));
+ h.push_back(InsertAndReturnHandle(1000 + i, 2000 + i));
}
// Check that all the entries can be found in the cache.
for (int i = 0; i < h.size(); i++) {
- ASSERT_EQ(2000+i, Lookup(1000+i));
+ ASSERT_EQ(2000 + i, Lookup(1000 + i));
}
for (int i = 0; i < h.size(); i++) {
@@ -181,9 +175,9 @@ TEST(CacheTest, HeavyEntries) {
const int kHeavy = 10;
int added = 0;
int index = 0;
- while (added < 2*kCacheSize) {
+ while (added < 2 * kCacheSize) {
const int weight = (index & 1) ? kLight : kHeavy;
- Insert(index, 1000+index, weight);
+ Insert(index, 1000 + index, weight);
added += weight;
index++;
}
@@ -194,10 +188,10 @@ TEST(CacheTest, HeavyEntries) {
int r = Lookup(i);
if (r >= 0) {
cached_weight += weight;
- ASSERT_EQ(1000+i, r);
+ ASSERT_EQ(1000 + i, r);
}
}
- ASSERT_LE(cached_weight, kCacheSize + kCacheSize/10);
+ ASSERT_LE(cached_weight, kCacheSize + kCacheSize / 10);
}
TEST(CacheTest, NewId) {
@@ -219,8 +213,14 @@ TEST(CacheTest, Prune) {
ASSERT_EQ(-1, Lookup(2));
}
-} // namespace leveldb
+TEST(CacheTest, ZeroSizeCache) {
+ delete cache_;
+ cache_ = NewLRUCache(0);
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
+ Insert(1, 100);
+ ASSERT_EQ(-1, Lookup(1));
}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/util/coding.cc b/src/leveldb/util/coding.cc
index 21e3186d5d..df3fa10f0d 100644
--- a/src/leveldb/util/coding.cc
+++ b/src/leveldb/util/coding.cc
@@ -6,32 +6,6 @@
namespace leveldb {
-void EncodeFixed32(char* buf, uint32_t value) {
- if (port::kLittleEndian) {
- memcpy(buf, &value, sizeof(value));
- } else {
- buf[0] = value & 0xff;
- buf[1] = (value >> 8) & 0xff;
- buf[2] = (value >> 16) & 0xff;
- buf[3] = (value >> 24) & 0xff;
- }
-}
-
-void EncodeFixed64(char* buf, uint64_t value) {
- if (port::kLittleEndian) {
- memcpy(buf, &value, sizeof(value));
- } else {
- buf[0] = value & 0xff;
- buf[1] = (value >> 8) & 0xff;
- buf[2] = (value >> 16) & 0xff;
- buf[3] = (value >> 24) & 0xff;
- buf[4] = (value >> 32) & 0xff;
- buf[5] = (value >> 40) & 0xff;
- buf[6] = (value >> 48) & 0xff;
- buf[7] = (value >> 56) & 0xff;
- }
-}
-
void PutFixed32(std::string* dst, uint32_t value) {
char buf[sizeof(value)];
EncodeFixed32(buf, value);
@@ -46,28 +20,28 @@ void PutFixed64(std::string* dst, uint64_t value) {
char* EncodeVarint32(char* dst, uint32_t v) {
// Operate on characters as unsigneds
- unsigned char* ptr = reinterpret_cast<unsigned char*>(dst);
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(dst);
static const int B = 128;
- if (v < (1<<7)) {
+ if (v < (1 << 7)) {
*(ptr++) = v;
- } else if (v < (1<<14)) {
+ } else if (v < (1 << 14)) {
*(ptr++) = v | B;
- *(ptr++) = v>>7;
- } else if (v < (1<<21)) {
+ *(ptr++) = v >> 7;
+ } else if (v < (1 << 21)) {
*(ptr++) = v | B;
- *(ptr++) = (v>>7) | B;
- *(ptr++) = v>>14;
- } else if (v < (1<<28)) {
+ *(ptr++) = (v >> 7) | B;
+ *(ptr++) = v >> 14;
+ } else if (v < (1 << 28)) {
*(ptr++) = v | B;
- *(ptr++) = (v>>7) | B;
- *(ptr++) = (v>>14) | B;
- *(ptr++) = v>>21;
+ *(ptr++) = (v >> 7) | B;
+ *(ptr++) = (v >> 14) | B;
+ *(ptr++) = v >> 21;
} else {
*(ptr++) = v | B;
- *(ptr++) = (v>>7) | B;
- *(ptr++) = (v>>14) | B;
- *(ptr++) = (v>>21) | B;
- *(ptr++) = v>>28;
+ *(ptr++) = (v >> 7) | B;
+ *(ptr++) = (v >> 14) | B;
+ *(ptr++) = (v >> 21) | B;
+ *(ptr++) = v >> 28;
}
return reinterpret_cast<char*>(ptr);
}
@@ -80,12 +54,12 @@ void PutVarint32(std::string* dst, uint32_t v) {
char* EncodeVarint64(char* dst, uint64_t v) {
static const int B = 128;
- unsigned char* ptr = reinterpret_cast<unsigned char*>(dst);
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(dst);
while (v >= B) {
- *(ptr++) = (v & (B-1)) | B;
+ *(ptr++) = v | B;
v >>= 7;
}
- *(ptr++) = static_cast<unsigned char>(v);
+ *(ptr++) = static_cast<uint8_t>(v);
return reinterpret_cast<char*>(ptr);
}
@@ -109,12 +83,11 @@ int VarintLength(uint64_t v) {
return len;
}
-const char* GetVarint32PtrFallback(const char* p,
- const char* limit,
+const char* GetVarint32PtrFallback(const char* p, const char* limit,
uint32_t* value) {
uint32_t result = 0;
for (uint32_t shift = 0; shift <= 28 && p < limit; shift += 7) {
- uint32_t byte = *(reinterpret_cast<const unsigned char*>(p));
+ uint32_t byte = *(reinterpret_cast<const uint8_t*>(p));
p++;
if (byte & 128) {
// More bytes are present
@@ -125,14 +98,14 @@ const char* GetVarint32PtrFallback(const char* p,
return reinterpret_cast<const char*>(p);
}
}
- return NULL;
+ return nullptr;
}
bool GetVarint32(Slice* input, uint32_t* value) {
const char* p = input->data();
const char* limit = p + input->size();
const char* q = GetVarint32Ptr(p, limit, value);
- if (q == NULL) {
+ if (q == nullptr) {
return false;
} else {
*input = Slice(q, limit - q);
@@ -143,7 +116,7 @@ bool GetVarint32(Slice* input, uint32_t* value) {
const char* GetVarint64Ptr(const char* p, const char* limit, uint64_t* value) {
uint64_t result = 0;
for (uint32_t shift = 0; shift <= 63 && p < limit; shift += 7) {
- uint64_t byte = *(reinterpret_cast<const unsigned char*>(p));
+ uint64_t byte = *(reinterpret_cast<const uint8_t*>(p));
p++;
if (byte & 128) {
// More bytes are present
@@ -154,14 +127,14 @@ const char* GetVarint64Ptr(const char* p, const char* limit, uint64_t* value) {
return reinterpret_cast<const char*>(p);
}
}
- return NULL;
+ return nullptr;
}
bool GetVarint64(Slice* input, uint64_t* value) {
const char* p = input->data();
const char* limit = p + input->size();
const char* q = GetVarint64Ptr(p, limit, value);
- if (q == NULL) {
+ if (q == nullptr) {
return false;
} else {
*input = Slice(q, limit - q);
@@ -173,16 +146,15 @@ const char* GetLengthPrefixedSlice(const char* p, const char* limit,
Slice* result) {
uint32_t len;
p = GetVarint32Ptr(p, limit, &len);
- if (p == NULL) return NULL;
- if (p + len > limit) return NULL;
+ if (p == nullptr) return nullptr;
+ if (p + len > limit) return nullptr;
*result = Slice(p, len);
return p + len;
}
bool GetLengthPrefixedSlice(Slice* input, Slice* result) {
uint32_t len;
- if (GetVarint32(input, &len) &&
- input->size() >= len) {
+ if (GetVarint32(input, &len) && input->size() >= len) {
*result = Slice(input->data(), len);
input->remove_prefix(len);
return true;
diff --git a/src/leveldb/util/coding.h b/src/leveldb/util/coding.h
index 3993c4a755..1983ae7173 100644
--- a/src/leveldb/util/coding.h
+++ b/src/leveldb/util/coding.h
@@ -10,87 +10,147 @@
#ifndef STORAGE_LEVELDB_UTIL_CODING_H_
#define STORAGE_LEVELDB_UTIL_CODING_H_
-#include <stdint.h>
-#include <string.h>
+#include <cstdint>
+#include <cstring>
#include <string>
+
#include "leveldb/slice.h"
#include "port/port.h"
namespace leveldb {
// Standard Put... routines append to a string
-extern void PutFixed32(std::string* dst, uint32_t value);
-extern void PutFixed64(std::string* dst, uint64_t value);
-extern void PutVarint32(std::string* dst, uint32_t value);
-extern void PutVarint64(std::string* dst, uint64_t value);
-extern void PutLengthPrefixedSlice(std::string* dst, const Slice& value);
+void PutFixed32(std::string* dst, uint32_t value);
+void PutFixed64(std::string* dst, uint64_t value);
+void PutVarint32(std::string* dst, uint32_t value);
+void PutVarint64(std::string* dst, uint64_t value);
+void PutLengthPrefixedSlice(std::string* dst, const Slice& value);
// Standard Get... routines parse a value from the beginning of a Slice
// and advance the slice past the parsed value.
-extern bool GetVarint32(Slice* input, uint32_t* value);
-extern bool GetVarint64(Slice* input, uint64_t* value);
-extern bool GetLengthPrefixedSlice(Slice* input, Slice* result);
+bool GetVarint32(Slice* input, uint32_t* value);
+bool GetVarint64(Slice* input, uint64_t* value);
+bool GetLengthPrefixedSlice(Slice* input, Slice* result);
// Pointer-based variants of GetVarint... These either store a value
// in *v and return a pointer just past the parsed value, or return
-// NULL on error. These routines only look at bytes in the range
+// nullptr on error. These routines only look at bytes in the range
// [p..limit-1]
-extern const char* GetVarint32Ptr(const char* p,const char* limit, uint32_t* v);
-extern const char* GetVarint64Ptr(const char* p,const char* limit, uint64_t* v);
+const char* GetVarint32Ptr(const char* p, const char* limit, uint32_t* v);
+const char* GetVarint64Ptr(const char* p, const char* limit, uint64_t* v);
// Returns the length of the varint32 or varint64 encoding of "v"
-extern int VarintLength(uint64_t v);
+int VarintLength(uint64_t v);
// Lower-level versions of Put... that write directly into a character buffer
+// and return a pointer just past the last byte written.
// REQUIRES: dst has enough space for the value being written
-extern void EncodeFixed32(char* dst, uint32_t value);
-extern void EncodeFixed64(char* dst, uint64_t value);
+char* EncodeVarint32(char* dst, uint32_t value);
+char* EncodeVarint64(char* dst, uint64_t value);
+
+// TODO(costan): Remove port::kLittleEndian and the fast paths based on
+// std::memcpy when clang learns to optimize the generic code, as
+// described in https://bugs.llvm.org/show_bug.cgi?id=41761
+//
+// The platform-independent code in DecodeFixed{32,64}() gets optimized to mov
+// on x86 and ldr on ARM64, by both clang and gcc. However, only gcc optimizes
+// the platform-independent code in EncodeFixed{32,64}() to mov / str.
// Lower-level versions of Put... that write directly into a character buffer
-// and return a pointer just past the last byte written.
// REQUIRES: dst has enough space for the value being written
-extern char* EncodeVarint32(char* dst, uint32_t value);
-extern char* EncodeVarint64(char* dst, uint64_t value);
+
+inline void EncodeFixed32(char* dst, uint32_t value) {
+ uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
+
+ if (port::kLittleEndian) {
+ // Fast path for little-endian CPUs. All major compilers optimize this to a
+ // single mov (x86_64) / str (ARM) instruction.
+ std::memcpy(buffer, &value, sizeof(uint32_t));
+ return;
+ }
+
+ // Platform-independent code.
+ // Currently, only gcc optimizes this to a single mov / str instruction.
+ buffer[0] = static_cast<uint8_t>(value);
+ buffer[1] = static_cast<uint8_t>(value >> 8);
+ buffer[2] = static_cast<uint8_t>(value >> 16);
+ buffer[3] = static_cast<uint8_t>(value >> 24);
+}
+
+inline void EncodeFixed64(char* dst, uint64_t value) {
+ uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
+
+ if (port::kLittleEndian) {
+ // Fast path for little-endian CPUs. All major compilers optimize this to a
+ // single mov (x86_64) / str (ARM) instruction.
+ std::memcpy(buffer, &value, sizeof(uint64_t));
+ return;
+ }
+
+ // Platform-independent code.
+ // Currently, only gcc optimizes this to a single mov / str instruction.
+ buffer[0] = static_cast<uint8_t>(value);
+ buffer[1] = static_cast<uint8_t>(value >> 8);
+ buffer[2] = static_cast<uint8_t>(value >> 16);
+ buffer[3] = static_cast<uint8_t>(value >> 24);
+ buffer[4] = static_cast<uint8_t>(value >> 32);
+ buffer[5] = static_cast<uint8_t>(value >> 40);
+ buffer[6] = static_cast<uint8_t>(value >> 48);
+ buffer[7] = static_cast<uint8_t>(value >> 56);
+}
// Lower-level versions of Get... that read directly from a character buffer
// without any bounds checking.
inline uint32_t DecodeFixed32(const char* ptr) {
+ const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
+
if (port::kLittleEndian) {
- // Load the raw bytes
+ // Fast path for little-endian CPUs. All major compilers optimize this to a
+ // single mov (x86_64) / ldr (ARM) instruction.
uint32_t result;
- memcpy(&result, ptr, sizeof(result)); // gcc optimizes this to a plain load
+ std::memcpy(&result, buffer, sizeof(uint32_t));
return result;
- } else {
- return ((static_cast<uint32_t>(static_cast<unsigned char>(ptr[0])))
- | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[1])) << 8)
- | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[2])) << 16)
- | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[3])) << 24));
}
+
+ // Platform-independent code.
+ // Clang and gcc optimize this to a single mov / ldr instruction.
+ return (static_cast<uint32_t>(buffer[0])) |
+ (static_cast<uint32_t>(buffer[1]) << 8) |
+ (static_cast<uint32_t>(buffer[2]) << 16) |
+ (static_cast<uint32_t>(buffer[3]) << 24);
}
inline uint64_t DecodeFixed64(const char* ptr) {
+ const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
+
if (port::kLittleEndian) {
- // Load the raw bytes
+ // Fast path for little-endian CPUs. All major compilers optimize this to a
+ // single mov (x86_64) / ldr (ARM) instruction.
uint64_t result;
- memcpy(&result, ptr, sizeof(result)); // gcc optimizes this to a plain load
+ std::memcpy(&result, buffer, sizeof(uint64_t));
return result;
- } else {
- uint64_t lo = DecodeFixed32(ptr);
- uint64_t hi = DecodeFixed32(ptr + 4);
- return (hi << 32) | lo;
}
+
+ // Platform-independent code.
+ // Clang and gcc optimize this to a single mov / ldr instruction.
+ return (static_cast<uint64_t>(buffer[0])) |
+ (static_cast<uint64_t>(buffer[1]) << 8) |
+ (static_cast<uint64_t>(buffer[2]) << 16) |
+ (static_cast<uint64_t>(buffer[3]) << 24) |
+ (static_cast<uint64_t>(buffer[4]) << 32) |
+ (static_cast<uint64_t>(buffer[5]) << 40) |
+ (static_cast<uint64_t>(buffer[6]) << 48) |
+ (static_cast<uint64_t>(buffer[7]) << 56);
}
// Internal routine for use by fallback path of GetVarint32Ptr
-extern const char* GetVarint32PtrFallback(const char* p,
- const char* limit,
- uint32_t* value);
-inline const char* GetVarint32Ptr(const char* p,
- const char* limit,
+const char* GetVarint32PtrFallback(const char* p, const char* limit,
+ uint32_t* value);
+inline const char* GetVarint32Ptr(const char* p, const char* limit,
uint32_t* value) {
if (p < limit) {
- uint32_t result = *(reinterpret_cast<const unsigned char*>(p));
+ uint32_t result = *(reinterpret_cast<const uint8_t*>(p));
if ((result & 128) == 0) {
*value = result;
return p + 1;
diff --git a/src/leveldb/util/coding_test.cc b/src/leveldb/util/coding_test.cc
index 521541ea61..0d2a0c51f6 100644
--- a/src/leveldb/util/coding_test.cc
+++ b/src/leveldb/util/coding_test.cc
@@ -2,13 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include "util/coding.h"
+#include <vector>
+#include "util/coding.h"
#include "util/testharness.h"
namespace leveldb {
-class Coding { };
+class Coding {};
TEST(Coding, Fixed32) {
std::string s;
@@ -38,15 +39,15 @@ TEST(Coding, Fixed64) {
uint64_t v = static_cast<uint64_t>(1) << power;
uint64_t actual;
actual = DecodeFixed64(p);
- ASSERT_EQ(v-1, actual);
+ ASSERT_EQ(v - 1, actual);
p += sizeof(uint64_t);
actual = DecodeFixed64(p);
- ASSERT_EQ(v+0, actual);
+ ASSERT_EQ(v + 0, actual);
p += sizeof(uint64_t);
actual = DecodeFixed64(p);
- ASSERT_EQ(v+1, actual);
+ ASSERT_EQ(v + 1, actual);
p += sizeof(uint64_t);
}
}
@@ -88,7 +89,7 @@ TEST(Coding, Varint32) {
uint32_t actual;
const char* start = p;
p = GetVarint32Ptr(p, limit, &actual);
- ASSERT_TRUE(p != NULL);
+ ASSERT_TRUE(p != nullptr);
ASSERT_EQ(expected, actual);
ASSERT_EQ(VarintLength(actual), p - start);
}
@@ -107,8 +108,8 @@ TEST(Coding, Varint64) {
// Test values near powers of two
const uint64_t power = 1ull << k;
values.push_back(power);
- values.push_back(power-1);
- values.push_back(power+1);
+ values.push_back(power - 1);
+ values.push_back(power + 1);
}
std::string s;
@@ -123,19 +124,18 @@ TEST(Coding, Varint64) {
uint64_t actual;
const char* start = p;
p = GetVarint64Ptr(p, limit, &actual);
- ASSERT_TRUE(p != NULL);
+ ASSERT_TRUE(p != nullptr);
ASSERT_EQ(values[i], actual);
ASSERT_EQ(VarintLength(actual), p - start);
}
ASSERT_EQ(p, limit);
-
}
TEST(Coding, Varint32Overflow) {
uint32_t result;
std::string input("\x81\x82\x83\x84\x85\x11");
- ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(), &result)
- == NULL);
+ ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(),
+ &result) == nullptr);
}
TEST(Coding, Varint32Truncation) {
@@ -144,17 +144,18 @@ TEST(Coding, Varint32Truncation) {
PutVarint32(&s, large_value);
uint32_t result;
for (size_t len = 0; len < s.size() - 1; len++) {
- ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == NULL);
+ ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == nullptr);
}
- ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + s.size(), &result) != NULL);
+ ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + s.size(), &result) !=
+ nullptr);
ASSERT_EQ(large_value, result);
}
TEST(Coding, Varint64Overflow) {
uint64_t result;
std::string input("\x81\x82\x83\x84\x85\x81\x82\x83\x84\x85\x11");
- ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(), &result)
- == NULL);
+ ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(),
+ &result) == nullptr);
}
TEST(Coding, Varint64Truncation) {
@@ -163,9 +164,10 @@ TEST(Coding, Varint64Truncation) {
PutVarint64(&s, large_value);
uint64_t result;
for (size_t len = 0; len < s.size() - 1; len++) {
- ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == NULL);
+ ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == nullptr);
}
- ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) != NULL);
+ ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) !=
+ nullptr);
ASSERT_EQ(large_value, result);
}
@@ -191,6 +193,4 @@ TEST(Coding, Strings) {
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/util/comparator.cc b/src/leveldb/util/comparator.cc
index 4b7b5724ef..c5766e9462 100644
--- a/src/leveldb/util/comparator.cc
+++ b/src/leveldb/util/comparator.cc
@@ -2,33 +2,34 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include <algorithm>
-#include <stdint.h>
#include "leveldb/comparator.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <string>
+#include <type_traits>
+
#include "leveldb/slice.h"
-#include "port/port.h"
#include "util/logging.h"
+#include "util/no_destructor.h"
namespace leveldb {
-Comparator::~Comparator() { }
+Comparator::~Comparator() = default;
namespace {
class BytewiseComparatorImpl : public Comparator {
public:
- BytewiseComparatorImpl() { }
+ BytewiseComparatorImpl() = default;
- virtual const char* Name() const {
- return "leveldb.BytewiseComparator";
- }
+ const char* Name() const override { return "leveldb.BytewiseComparator"; }
- virtual int Compare(const Slice& a, const Slice& b) const {
+ int Compare(const Slice& a, const Slice& b) const override {
return a.compare(b);
}
- virtual void FindShortestSeparator(
- std::string* start,
- const Slice& limit) const {
+ void FindShortestSeparator(std::string* start,
+ const Slice& limit) const override {
// Find length of common prefix
size_t min_length = std::min(start->size(), limit.size());
size_t diff_index = 0;
@@ -50,14 +51,14 @@ class BytewiseComparatorImpl : public Comparator {
}
}
- virtual void FindShortSuccessor(std::string* key) const {
+ void FindShortSuccessor(std::string* key) const override {
// Find first character that can be incremented
size_t n = key->size();
for (size_t i = 0; i < n; i++) {
const uint8_t byte = (*key)[i];
if (byte != static_cast<uint8_t>(0xff)) {
(*key)[i] = byte + 1;
- key->resize(i+1);
+ key->resize(i + 1);
return;
}
}
@@ -66,16 +67,9 @@ class BytewiseComparatorImpl : public Comparator {
};
} // namespace
-static port::OnceType once = LEVELDB_ONCE_INIT;
-static const Comparator* bytewise;
-
-static void InitModule() {
- bytewise = new BytewiseComparatorImpl;
-}
-
const Comparator* BytewiseComparator() {
- port::InitOnce(&once, InitModule);
- return bytewise;
+ static NoDestructor<BytewiseComparatorImpl> singleton;
+ return singleton.get();
}
} // namespace leveldb
diff --git a/src/leveldb/util/crc32c.cc b/src/leveldb/util/crc32c.cc
index b3f40eeeed..c2e61f7dba 100644
--- a/src/leveldb/util/crc32c.cc
+++ b/src/leveldb/util/crc32c.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
//
-// A portable implementation of crc32c, optimized to handle
-// four bytes at a time.
+// A portable implementation of crc32c.
#include "util/crc32c.h"
+#include <stddef.h>
#include <stdint.h>
#include "port/port.h"
@@ -15,283 +15,256 @@
namespace leveldb {
namespace crc32c {
-static const uint32_t table0_[256] = {
- 0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4,
- 0xc79a971f, 0x35f1141c, 0x26a1e7e8, 0xd4ca64eb,
- 0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b,
- 0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24,
- 0x105ec76f, 0xe235446c, 0xf165b798, 0x030e349b,
- 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384,
- 0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54,
- 0x5d1d08bf, 0xaf768bbc, 0xbc267848, 0x4e4dfb4b,
- 0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a,
- 0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35,
- 0xaa64d611, 0x580f5512, 0x4b5fa6e6, 0xb93425e5,
- 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa,
- 0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45,
- 0xf779deae, 0x05125dad, 0x1642ae59, 0xe4292d5a,
- 0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a,
- 0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595,
- 0x417b1dbc, 0xb3109ebf, 0xa0406d4b, 0x522bee48,
- 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957,
- 0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687,
- 0x0c38d26c, 0xfe53516f, 0xed03a29b, 0x1f682198,
- 0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927,
- 0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38,
- 0xdbfc821c, 0x2997011f, 0x3ac7f2eb, 0xc8ac71e8,
- 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7,
- 0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096,
- 0xa65c047d, 0x5437877e, 0x4767748a, 0xb50cf789,
- 0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859,
- 0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46,
- 0x7198540d, 0x83f3d70e, 0x90a324fa, 0x62c8a7f9,
- 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6,
- 0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36,
- 0x3cdb9bdd, 0xceb018de, 0xdde0eb2a, 0x2f8b6829,
- 0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c,
- 0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93,
- 0x082f63b7, 0xfa44e0b4, 0xe9141340, 0x1b7f9043,
- 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c,
- 0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3,
- 0x55326b08, 0xa759e80b, 0xb4091bff, 0x466298fc,
- 0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c,
- 0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033,
- 0xa24bb5a6, 0x502036a5, 0x4370c551, 0xb11b4652,
- 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d,
- 0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d,
- 0xef087a76, 0x1d63f975, 0x0e330a81, 0xfc588982,
- 0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d,
- 0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622,
- 0x38cc2a06, 0xcaa7a905, 0xd9f75af1, 0x2b9cd9f2,
- 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed,
- 0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530,
- 0x0417b1db, 0xf67c32d8, 0xe52cc12c, 0x1747422f,
- 0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff,
- 0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0,
- 0xd3d3e1ab, 0x21b862a8, 0x32e8915c, 0xc083125f,
- 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540,
- 0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90,
- 0x9e902e7b, 0x6cfbad78, 0x7fab5e8c, 0x8dc0dd8f,
- 0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee,
- 0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1,
- 0x69e9f0d5, 0x9b8273d6, 0x88d28022, 0x7ab90321,
- 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e,
- 0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81,
- 0x34f4f86a, 0xc69f7b69, 0xd5cf889d, 0x27a40b9e,
- 0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e,
- 0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351
-};
-static const uint32_t table1_[256] = {
- 0x00000000, 0x13a29877, 0x274530ee, 0x34e7a899,
- 0x4e8a61dc, 0x5d28f9ab, 0x69cf5132, 0x7a6dc945,
- 0x9d14c3b8, 0x8eb65bcf, 0xba51f356, 0xa9f36b21,
- 0xd39ea264, 0xc03c3a13, 0xf4db928a, 0xe7790afd,
- 0x3fc5f181, 0x2c6769f6, 0x1880c16f, 0x0b225918,
- 0x714f905d, 0x62ed082a, 0x560aa0b3, 0x45a838c4,
- 0xa2d13239, 0xb173aa4e, 0x859402d7, 0x96369aa0,
- 0xec5b53e5, 0xfff9cb92, 0xcb1e630b, 0xd8bcfb7c,
- 0x7f8be302, 0x6c297b75, 0x58ced3ec, 0x4b6c4b9b,
- 0x310182de, 0x22a31aa9, 0x1644b230, 0x05e62a47,
- 0xe29f20ba, 0xf13db8cd, 0xc5da1054, 0xd6788823,
- 0xac154166, 0xbfb7d911, 0x8b507188, 0x98f2e9ff,
- 0x404e1283, 0x53ec8af4, 0x670b226d, 0x74a9ba1a,
- 0x0ec4735f, 0x1d66eb28, 0x298143b1, 0x3a23dbc6,
- 0xdd5ad13b, 0xcef8494c, 0xfa1fe1d5, 0xe9bd79a2,
- 0x93d0b0e7, 0x80722890, 0xb4958009, 0xa737187e,
- 0xff17c604, 0xecb55e73, 0xd852f6ea, 0xcbf06e9d,
- 0xb19da7d8, 0xa23f3faf, 0x96d89736, 0x857a0f41,
- 0x620305bc, 0x71a19dcb, 0x45463552, 0x56e4ad25,
- 0x2c896460, 0x3f2bfc17, 0x0bcc548e, 0x186eccf9,
- 0xc0d23785, 0xd370aff2, 0xe797076b, 0xf4359f1c,
- 0x8e585659, 0x9dface2e, 0xa91d66b7, 0xbabffec0,
- 0x5dc6f43d, 0x4e646c4a, 0x7a83c4d3, 0x69215ca4,
- 0x134c95e1, 0x00ee0d96, 0x3409a50f, 0x27ab3d78,
- 0x809c2506, 0x933ebd71, 0xa7d915e8, 0xb47b8d9f,
- 0xce1644da, 0xddb4dcad, 0xe9537434, 0xfaf1ec43,
- 0x1d88e6be, 0x0e2a7ec9, 0x3acdd650, 0x296f4e27,
- 0x53028762, 0x40a01f15, 0x7447b78c, 0x67e52ffb,
- 0xbf59d487, 0xacfb4cf0, 0x981ce469, 0x8bbe7c1e,
- 0xf1d3b55b, 0xe2712d2c, 0xd69685b5, 0xc5341dc2,
- 0x224d173f, 0x31ef8f48, 0x050827d1, 0x16aabfa6,
- 0x6cc776e3, 0x7f65ee94, 0x4b82460d, 0x5820de7a,
- 0xfbc3faf9, 0xe861628e, 0xdc86ca17, 0xcf245260,
- 0xb5499b25, 0xa6eb0352, 0x920cabcb, 0x81ae33bc,
- 0x66d73941, 0x7575a136, 0x419209af, 0x523091d8,
- 0x285d589d, 0x3bffc0ea, 0x0f186873, 0x1cbaf004,
- 0xc4060b78, 0xd7a4930f, 0xe3433b96, 0xf0e1a3e1,
- 0x8a8c6aa4, 0x992ef2d3, 0xadc95a4a, 0xbe6bc23d,
- 0x5912c8c0, 0x4ab050b7, 0x7e57f82e, 0x6df56059,
- 0x1798a91c, 0x043a316b, 0x30dd99f2, 0x237f0185,
- 0x844819fb, 0x97ea818c, 0xa30d2915, 0xb0afb162,
- 0xcac27827, 0xd960e050, 0xed8748c9, 0xfe25d0be,
- 0x195cda43, 0x0afe4234, 0x3e19eaad, 0x2dbb72da,
- 0x57d6bb9f, 0x447423e8, 0x70938b71, 0x63311306,
- 0xbb8de87a, 0xa82f700d, 0x9cc8d894, 0x8f6a40e3,
- 0xf50789a6, 0xe6a511d1, 0xd242b948, 0xc1e0213f,
- 0x26992bc2, 0x353bb3b5, 0x01dc1b2c, 0x127e835b,
- 0x68134a1e, 0x7bb1d269, 0x4f567af0, 0x5cf4e287,
- 0x04d43cfd, 0x1776a48a, 0x23910c13, 0x30339464,
- 0x4a5e5d21, 0x59fcc556, 0x6d1b6dcf, 0x7eb9f5b8,
- 0x99c0ff45, 0x8a626732, 0xbe85cfab, 0xad2757dc,
- 0xd74a9e99, 0xc4e806ee, 0xf00fae77, 0xe3ad3600,
- 0x3b11cd7c, 0x28b3550b, 0x1c54fd92, 0x0ff665e5,
- 0x759baca0, 0x663934d7, 0x52de9c4e, 0x417c0439,
- 0xa6050ec4, 0xb5a796b3, 0x81403e2a, 0x92e2a65d,
- 0xe88f6f18, 0xfb2df76f, 0xcfca5ff6, 0xdc68c781,
- 0x7b5fdfff, 0x68fd4788, 0x5c1aef11, 0x4fb87766,
- 0x35d5be23, 0x26772654, 0x12908ecd, 0x013216ba,
- 0xe64b1c47, 0xf5e98430, 0xc10e2ca9, 0xd2acb4de,
- 0xa8c17d9b, 0xbb63e5ec, 0x8f844d75, 0x9c26d502,
- 0x449a2e7e, 0x5738b609, 0x63df1e90, 0x707d86e7,
- 0x0a104fa2, 0x19b2d7d5, 0x2d557f4c, 0x3ef7e73b,
- 0xd98eedc6, 0xca2c75b1, 0xfecbdd28, 0xed69455f,
- 0x97048c1a, 0x84a6146d, 0xb041bcf4, 0xa3e32483
-};
-static const uint32_t table2_[256] = {
- 0x00000000, 0xa541927e, 0x4f6f520d, 0xea2ec073,
- 0x9edea41a, 0x3b9f3664, 0xd1b1f617, 0x74f06469,
- 0x38513ec5, 0x9d10acbb, 0x773e6cc8, 0xd27ffeb6,
- 0xa68f9adf, 0x03ce08a1, 0xe9e0c8d2, 0x4ca15aac,
- 0x70a27d8a, 0xd5e3eff4, 0x3fcd2f87, 0x9a8cbdf9,
- 0xee7cd990, 0x4b3d4bee, 0xa1138b9d, 0x045219e3,
- 0x48f3434f, 0xedb2d131, 0x079c1142, 0xa2dd833c,
- 0xd62de755, 0x736c752b, 0x9942b558, 0x3c032726,
- 0xe144fb14, 0x4405696a, 0xae2ba919, 0x0b6a3b67,
- 0x7f9a5f0e, 0xdadbcd70, 0x30f50d03, 0x95b49f7d,
- 0xd915c5d1, 0x7c5457af, 0x967a97dc, 0x333b05a2,
- 0x47cb61cb, 0xe28af3b5, 0x08a433c6, 0xade5a1b8,
- 0x91e6869e, 0x34a714e0, 0xde89d493, 0x7bc846ed,
- 0x0f382284, 0xaa79b0fa, 0x40577089, 0xe516e2f7,
- 0xa9b7b85b, 0x0cf62a25, 0xe6d8ea56, 0x43997828,
- 0x37691c41, 0x92288e3f, 0x78064e4c, 0xdd47dc32,
- 0xc76580d9, 0x622412a7, 0x880ad2d4, 0x2d4b40aa,
- 0x59bb24c3, 0xfcfab6bd, 0x16d476ce, 0xb395e4b0,
- 0xff34be1c, 0x5a752c62, 0xb05bec11, 0x151a7e6f,
- 0x61ea1a06, 0xc4ab8878, 0x2e85480b, 0x8bc4da75,
- 0xb7c7fd53, 0x12866f2d, 0xf8a8af5e, 0x5de93d20,
- 0x29195949, 0x8c58cb37, 0x66760b44, 0xc337993a,
- 0x8f96c396, 0x2ad751e8, 0xc0f9919b, 0x65b803e5,
- 0x1148678c, 0xb409f5f2, 0x5e273581, 0xfb66a7ff,
- 0x26217bcd, 0x8360e9b3, 0x694e29c0, 0xcc0fbbbe,
- 0xb8ffdfd7, 0x1dbe4da9, 0xf7908dda, 0x52d11fa4,
- 0x1e704508, 0xbb31d776, 0x511f1705, 0xf45e857b,
- 0x80aee112, 0x25ef736c, 0xcfc1b31f, 0x6a802161,
- 0x56830647, 0xf3c29439, 0x19ec544a, 0xbcadc634,
- 0xc85da25d, 0x6d1c3023, 0x8732f050, 0x2273622e,
- 0x6ed23882, 0xcb93aafc, 0x21bd6a8f, 0x84fcf8f1,
- 0xf00c9c98, 0x554d0ee6, 0xbf63ce95, 0x1a225ceb,
- 0x8b277743, 0x2e66e53d, 0xc448254e, 0x6109b730,
- 0x15f9d359, 0xb0b84127, 0x5a968154, 0xffd7132a,
- 0xb3764986, 0x1637dbf8, 0xfc191b8b, 0x595889f5,
- 0x2da8ed9c, 0x88e97fe2, 0x62c7bf91, 0xc7862def,
- 0xfb850ac9, 0x5ec498b7, 0xb4ea58c4, 0x11abcaba,
- 0x655baed3, 0xc01a3cad, 0x2a34fcde, 0x8f756ea0,
- 0xc3d4340c, 0x6695a672, 0x8cbb6601, 0x29faf47f,
- 0x5d0a9016, 0xf84b0268, 0x1265c21b, 0xb7245065,
- 0x6a638c57, 0xcf221e29, 0x250cde5a, 0x804d4c24,
- 0xf4bd284d, 0x51fcba33, 0xbbd27a40, 0x1e93e83e,
- 0x5232b292, 0xf77320ec, 0x1d5de09f, 0xb81c72e1,
- 0xccec1688, 0x69ad84f6, 0x83834485, 0x26c2d6fb,
- 0x1ac1f1dd, 0xbf8063a3, 0x55aea3d0, 0xf0ef31ae,
- 0x841f55c7, 0x215ec7b9, 0xcb7007ca, 0x6e3195b4,
- 0x2290cf18, 0x87d15d66, 0x6dff9d15, 0xc8be0f6b,
- 0xbc4e6b02, 0x190ff97c, 0xf321390f, 0x5660ab71,
- 0x4c42f79a, 0xe90365e4, 0x032da597, 0xa66c37e9,
- 0xd29c5380, 0x77ddc1fe, 0x9df3018d, 0x38b293f3,
- 0x7413c95f, 0xd1525b21, 0x3b7c9b52, 0x9e3d092c,
- 0xeacd6d45, 0x4f8cff3b, 0xa5a23f48, 0x00e3ad36,
- 0x3ce08a10, 0x99a1186e, 0x738fd81d, 0xd6ce4a63,
- 0xa23e2e0a, 0x077fbc74, 0xed517c07, 0x4810ee79,
- 0x04b1b4d5, 0xa1f026ab, 0x4bdee6d8, 0xee9f74a6,
- 0x9a6f10cf, 0x3f2e82b1, 0xd50042c2, 0x7041d0bc,
- 0xad060c8e, 0x08479ef0, 0xe2695e83, 0x4728ccfd,
- 0x33d8a894, 0x96993aea, 0x7cb7fa99, 0xd9f668e7,
- 0x9557324b, 0x3016a035, 0xda386046, 0x7f79f238,
- 0x0b899651, 0xaec8042f, 0x44e6c45c, 0xe1a75622,
- 0xdda47104, 0x78e5e37a, 0x92cb2309, 0x378ab177,
- 0x437ad51e, 0xe63b4760, 0x0c158713, 0xa954156d,
- 0xe5f54fc1, 0x40b4ddbf, 0xaa9a1dcc, 0x0fdb8fb2,
- 0x7b2bebdb, 0xde6a79a5, 0x3444b9d6, 0x91052ba8
-};
-static const uint32_t table3_[256] = {
- 0x00000000, 0xdd45aab8, 0xbf672381, 0x62228939,
- 0x7b2231f3, 0xa6679b4b, 0xc4451272, 0x1900b8ca,
- 0xf64463e6, 0x2b01c95e, 0x49234067, 0x9466eadf,
- 0x8d665215, 0x5023f8ad, 0x32017194, 0xef44db2c,
- 0xe964b13d, 0x34211b85, 0x560392bc, 0x8b463804,
- 0x924680ce, 0x4f032a76, 0x2d21a34f, 0xf06409f7,
- 0x1f20d2db, 0xc2657863, 0xa047f15a, 0x7d025be2,
- 0x6402e328, 0xb9474990, 0xdb65c0a9, 0x06206a11,
- 0xd725148b, 0x0a60be33, 0x6842370a, 0xb5079db2,
- 0xac072578, 0x71428fc0, 0x136006f9, 0xce25ac41,
- 0x2161776d, 0xfc24ddd5, 0x9e0654ec, 0x4343fe54,
- 0x5a43469e, 0x8706ec26, 0xe524651f, 0x3861cfa7,
- 0x3e41a5b6, 0xe3040f0e, 0x81268637, 0x5c632c8f,
- 0x45639445, 0x98263efd, 0xfa04b7c4, 0x27411d7c,
- 0xc805c650, 0x15406ce8, 0x7762e5d1, 0xaa274f69,
- 0xb327f7a3, 0x6e625d1b, 0x0c40d422, 0xd1057e9a,
- 0xaba65fe7, 0x76e3f55f, 0x14c17c66, 0xc984d6de,
- 0xd0846e14, 0x0dc1c4ac, 0x6fe34d95, 0xb2a6e72d,
- 0x5de23c01, 0x80a796b9, 0xe2851f80, 0x3fc0b538,
- 0x26c00df2, 0xfb85a74a, 0x99a72e73, 0x44e284cb,
- 0x42c2eeda, 0x9f874462, 0xfda5cd5b, 0x20e067e3,
- 0x39e0df29, 0xe4a57591, 0x8687fca8, 0x5bc25610,
- 0xb4868d3c, 0x69c32784, 0x0be1aebd, 0xd6a40405,
- 0xcfa4bccf, 0x12e11677, 0x70c39f4e, 0xad8635f6,
- 0x7c834b6c, 0xa1c6e1d4, 0xc3e468ed, 0x1ea1c255,
- 0x07a17a9f, 0xdae4d027, 0xb8c6591e, 0x6583f3a6,
- 0x8ac7288a, 0x57828232, 0x35a00b0b, 0xe8e5a1b3,
- 0xf1e51979, 0x2ca0b3c1, 0x4e823af8, 0x93c79040,
- 0x95e7fa51, 0x48a250e9, 0x2a80d9d0, 0xf7c57368,
- 0xeec5cba2, 0x3380611a, 0x51a2e823, 0x8ce7429b,
- 0x63a399b7, 0xbee6330f, 0xdcc4ba36, 0x0181108e,
- 0x1881a844, 0xc5c402fc, 0xa7e68bc5, 0x7aa3217d,
- 0x52a0c93f, 0x8fe56387, 0xedc7eabe, 0x30824006,
- 0x2982f8cc, 0xf4c75274, 0x96e5db4d, 0x4ba071f5,
- 0xa4e4aad9, 0x79a10061, 0x1b838958, 0xc6c623e0,
- 0xdfc69b2a, 0x02833192, 0x60a1b8ab, 0xbde41213,
- 0xbbc47802, 0x6681d2ba, 0x04a35b83, 0xd9e6f13b,
- 0xc0e649f1, 0x1da3e349, 0x7f816a70, 0xa2c4c0c8,
- 0x4d801be4, 0x90c5b15c, 0xf2e73865, 0x2fa292dd,
- 0x36a22a17, 0xebe780af, 0x89c50996, 0x5480a32e,
- 0x8585ddb4, 0x58c0770c, 0x3ae2fe35, 0xe7a7548d,
- 0xfea7ec47, 0x23e246ff, 0x41c0cfc6, 0x9c85657e,
- 0x73c1be52, 0xae8414ea, 0xcca69dd3, 0x11e3376b,
- 0x08e38fa1, 0xd5a62519, 0xb784ac20, 0x6ac10698,
- 0x6ce16c89, 0xb1a4c631, 0xd3864f08, 0x0ec3e5b0,
- 0x17c35d7a, 0xca86f7c2, 0xa8a47efb, 0x75e1d443,
- 0x9aa50f6f, 0x47e0a5d7, 0x25c22cee, 0xf8878656,
- 0xe1873e9c, 0x3cc29424, 0x5ee01d1d, 0x83a5b7a5,
- 0xf90696d8, 0x24433c60, 0x4661b559, 0x9b241fe1,
- 0x8224a72b, 0x5f610d93, 0x3d4384aa, 0xe0062e12,
- 0x0f42f53e, 0xd2075f86, 0xb025d6bf, 0x6d607c07,
- 0x7460c4cd, 0xa9256e75, 0xcb07e74c, 0x16424df4,
- 0x106227e5, 0xcd278d5d, 0xaf050464, 0x7240aedc,
- 0x6b401616, 0xb605bcae, 0xd4273597, 0x09629f2f,
- 0xe6264403, 0x3b63eebb, 0x59416782, 0x8404cd3a,
- 0x9d0475f0, 0x4041df48, 0x22635671, 0xff26fcc9,
- 0x2e238253, 0xf36628eb, 0x9144a1d2, 0x4c010b6a,
- 0x5501b3a0, 0x88441918, 0xea669021, 0x37233a99,
- 0xd867e1b5, 0x05224b0d, 0x6700c234, 0xba45688c,
- 0xa345d046, 0x7e007afe, 0x1c22f3c7, 0xc167597f,
- 0xc747336e, 0x1a0299d6, 0x782010ef, 0xa565ba57,
- 0xbc65029d, 0x6120a825, 0x0302211c, 0xde478ba4,
- 0x31035088, 0xec46fa30, 0x8e647309, 0x5321d9b1,
- 0x4a21617b, 0x9764cbc3, 0xf54642fa, 0x2803e842
-};
+namespace {
-// Used to fetch a naturally-aligned 32-bit word in little endian byte-order
-static inline uint32_t LE_LOAD32(const uint8_t *p) {
- return DecodeFixed32(reinterpret_cast<const char*>(p));
+const uint32_t kByteExtensionTable[256] = {
+ 0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, 0xc79a971f, 0x35f1141c,
+ 0x26a1e7e8, 0xd4ca64eb, 0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b,
+ 0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24, 0x105ec76f, 0xe235446c,
+ 0xf165b798, 0x030e349b, 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384,
+ 0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54, 0x5d1d08bf, 0xaf768bbc,
+ 0xbc267848, 0x4e4dfb4b, 0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a,
+ 0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35, 0xaa64d611, 0x580f5512,
+ 0x4b5fa6e6, 0xb93425e5, 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa,
+ 0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45, 0xf779deae, 0x05125dad,
+ 0x1642ae59, 0xe4292d5a, 0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a,
+ 0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595, 0x417b1dbc, 0xb3109ebf,
+ 0xa0406d4b, 0x522bee48, 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957,
+ 0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687, 0x0c38d26c, 0xfe53516f,
+ 0xed03a29b, 0x1f682198, 0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927,
+ 0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38, 0xdbfc821c, 0x2997011f,
+ 0x3ac7f2eb, 0xc8ac71e8, 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7,
+ 0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096, 0xa65c047d, 0x5437877e,
+ 0x4767748a, 0xb50cf789, 0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859,
+ 0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46, 0x7198540d, 0x83f3d70e,
+ 0x90a324fa, 0x62c8a7f9, 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6,
+ 0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36, 0x3cdb9bdd, 0xceb018de,
+ 0xdde0eb2a, 0x2f8b6829, 0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c,
+ 0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93, 0x082f63b7, 0xfa44e0b4,
+ 0xe9141340, 0x1b7f9043, 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c,
+ 0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3, 0x55326b08, 0xa759e80b,
+ 0xb4091bff, 0x466298fc, 0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c,
+ 0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033, 0xa24bb5a6, 0x502036a5,
+ 0x4370c551, 0xb11b4652, 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d,
+ 0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d, 0xef087a76, 0x1d63f975,
+ 0x0e330a81, 0xfc588982, 0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d,
+ 0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622, 0x38cc2a06, 0xcaa7a905,
+ 0xd9f75af1, 0x2b9cd9f2, 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed,
+ 0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530, 0x0417b1db, 0xf67c32d8,
+ 0xe52cc12c, 0x1747422f, 0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff,
+ 0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0, 0xd3d3e1ab, 0x21b862a8,
+ 0x32e8915c, 0xc083125f, 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540,
+ 0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90, 0x9e902e7b, 0x6cfbad78,
+ 0x7fab5e8c, 0x8dc0dd8f, 0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee,
+ 0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1, 0x69e9f0d5, 0x9b8273d6,
+ 0x88d28022, 0x7ab90321, 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e,
+ 0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81, 0x34f4f86a, 0xc69f7b69,
+ 0xd5cf889d, 0x27a40b9e, 0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e,
+ 0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351};
+
+const uint32_t kStrideExtensionTable0[256] = {
+ 0x00000000, 0x30d23865, 0x61a470ca, 0x517648af, 0xc348e194, 0xf39ad9f1,
+ 0xa2ec915e, 0x923ea93b, 0x837db5d9, 0xb3af8dbc, 0xe2d9c513, 0xd20bfd76,
+ 0x4035544d, 0x70e76c28, 0x21912487, 0x11431ce2, 0x03171d43, 0x33c52526,
+ 0x62b36d89, 0x526155ec, 0xc05ffcd7, 0xf08dc4b2, 0xa1fb8c1d, 0x9129b478,
+ 0x806aa89a, 0xb0b890ff, 0xe1ced850, 0xd11ce035, 0x4322490e, 0x73f0716b,
+ 0x228639c4, 0x125401a1, 0x062e3a86, 0x36fc02e3, 0x678a4a4c, 0x57587229,
+ 0xc566db12, 0xf5b4e377, 0xa4c2abd8, 0x941093bd, 0x85538f5f, 0xb581b73a,
+ 0xe4f7ff95, 0xd425c7f0, 0x461b6ecb, 0x76c956ae, 0x27bf1e01, 0x176d2664,
+ 0x053927c5, 0x35eb1fa0, 0x649d570f, 0x544f6f6a, 0xc671c651, 0xf6a3fe34,
+ 0xa7d5b69b, 0x97078efe, 0x8644921c, 0xb696aa79, 0xe7e0e2d6, 0xd732dab3,
+ 0x450c7388, 0x75de4bed, 0x24a80342, 0x147a3b27, 0x0c5c750c, 0x3c8e4d69,
+ 0x6df805c6, 0x5d2a3da3, 0xcf149498, 0xffc6acfd, 0xaeb0e452, 0x9e62dc37,
+ 0x8f21c0d5, 0xbff3f8b0, 0xee85b01f, 0xde57887a, 0x4c692141, 0x7cbb1924,
+ 0x2dcd518b, 0x1d1f69ee, 0x0f4b684f, 0x3f99502a, 0x6eef1885, 0x5e3d20e0,
+ 0xcc0389db, 0xfcd1b1be, 0xada7f911, 0x9d75c174, 0x8c36dd96, 0xbce4e5f3,
+ 0xed92ad5c, 0xdd409539, 0x4f7e3c02, 0x7fac0467, 0x2eda4cc8, 0x1e0874ad,
+ 0x0a724f8a, 0x3aa077ef, 0x6bd63f40, 0x5b040725, 0xc93aae1e, 0xf9e8967b,
+ 0xa89eded4, 0x984ce6b1, 0x890ffa53, 0xb9ddc236, 0xe8ab8a99, 0xd879b2fc,
+ 0x4a471bc7, 0x7a9523a2, 0x2be36b0d, 0x1b315368, 0x096552c9, 0x39b76aac,
+ 0x68c12203, 0x58131a66, 0xca2db35d, 0xfaff8b38, 0xab89c397, 0x9b5bfbf2,
+ 0x8a18e710, 0xbacadf75, 0xebbc97da, 0xdb6eafbf, 0x49500684, 0x79823ee1,
+ 0x28f4764e, 0x18264e2b, 0x18b8ea18, 0x286ad27d, 0x791c9ad2, 0x49cea2b7,
+ 0xdbf00b8c, 0xeb2233e9, 0xba547b46, 0x8a864323, 0x9bc55fc1, 0xab1767a4,
+ 0xfa612f0b, 0xcab3176e, 0x588dbe55, 0x685f8630, 0x3929ce9f, 0x09fbf6fa,
+ 0x1baff75b, 0x2b7dcf3e, 0x7a0b8791, 0x4ad9bff4, 0xd8e716cf, 0xe8352eaa,
+ 0xb9436605, 0x89915e60, 0x98d24282, 0xa8007ae7, 0xf9763248, 0xc9a40a2d,
+ 0x5b9aa316, 0x6b489b73, 0x3a3ed3dc, 0x0aecebb9, 0x1e96d09e, 0x2e44e8fb,
+ 0x7f32a054, 0x4fe09831, 0xddde310a, 0xed0c096f, 0xbc7a41c0, 0x8ca879a5,
+ 0x9deb6547, 0xad395d22, 0xfc4f158d, 0xcc9d2de8, 0x5ea384d3, 0x6e71bcb6,
+ 0x3f07f419, 0x0fd5cc7c, 0x1d81cddd, 0x2d53f5b8, 0x7c25bd17, 0x4cf78572,
+ 0xdec92c49, 0xee1b142c, 0xbf6d5c83, 0x8fbf64e6, 0x9efc7804, 0xae2e4061,
+ 0xff5808ce, 0xcf8a30ab, 0x5db49990, 0x6d66a1f5, 0x3c10e95a, 0x0cc2d13f,
+ 0x14e49f14, 0x2436a771, 0x7540efde, 0x4592d7bb, 0xd7ac7e80, 0xe77e46e5,
+ 0xb6080e4a, 0x86da362f, 0x97992acd, 0xa74b12a8, 0xf63d5a07, 0xc6ef6262,
+ 0x54d1cb59, 0x6403f33c, 0x3575bb93, 0x05a783f6, 0x17f38257, 0x2721ba32,
+ 0x7657f29d, 0x4685caf8, 0xd4bb63c3, 0xe4695ba6, 0xb51f1309, 0x85cd2b6c,
+ 0x948e378e, 0xa45c0feb, 0xf52a4744, 0xc5f87f21, 0x57c6d61a, 0x6714ee7f,
+ 0x3662a6d0, 0x06b09eb5, 0x12caa592, 0x22189df7, 0x736ed558, 0x43bced3d,
+ 0xd1824406, 0xe1507c63, 0xb02634cc, 0x80f40ca9, 0x91b7104b, 0xa165282e,
+ 0xf0136081, 0xc0c158e4, 0x52fff1df, 0x622dc9ba, 0x335b8115, 0x0389b970,
+ 0x11ddb8d1, 0x210f80b4, 0x7079c81b, 0x40abf07e, 0xd2955945, 0xe2476120,
+ 0xb331298f, 0x83e311ea, 0x92a00d08, 0xa272356d, 0xf3047dc2, 0xc3d645a7,
+ 0x51e8ec9c, 0x613ad4f9, 0x304c9c56, 0x009ea433};
+
+const uint32_t kStrideExtensionTable1[256] = {
+ 0x00000000, 0x54075546, 0xa80eaa8c, 0xfc09ffca, 0x55f123e9, 0x01f676af,
+ 0xfdff8965, 0xa9f8dc23, 0xabe247d2, 0xffe51294, 0x03eced5e, 0x57ebb818,
+ 0xfe13643b, 0xaa14317d, 0x561dceb7, 0x021a9bf1, 0x5228f955, 0x062fac13,
+ 0xfa2653d9, 0xae21069f, 0x07d9dabc, 0x53de8ffa, 0xafd77030, 0xfbd02576,
+ 0xf9cabe87, 0xadcdebc1, 0x51c4140b, 0x05c3414d, 0xac3b9d6e, 0xf83cc828,
+ 0x043537e2, 0x503262a4, 0xa451f2aa, 0xf056a7ec, 0x0c5f5826, 0x58580d60,
+ 0xf1a0d143, 0xa5a78405, 0x59ae7bcf, 0x0da92e89, 0x0fb3b578, 0x5bb4e03e,
+ 0xa7bd1ff4, 0xf3ba4ab2, 0x5a429691, 0x0e45c3d7, 0xf24c3c1d, 0xa64b695b,
+ 0xf6790bff, 0xa27e5eb9, 0x5e77a173, 0x0a70f435, 0xa3882816, 0xf78f7d50,
+ 0x0b86829a, 0x5f81d7dc, 0x5d9b4c2d, 0x099c196b, 0xf595e6a1, 0xa192b3e7,
+ 0x086a6fc4, 0x5c6d3a82, 0xa064c548, 0xf463900e, 0x4d4f93a5, 0x1948c6e3,
+ 0xe5413929, 0xb1466c6f, 0x18beb04c, 0x4cb9e50a, 0xb0b01ac0, 0xe4b74f86,
+ 0xe6add477, 0xb2aa8131, 0x4ea37efb, 0x1aa42bbd, 0xb35cf79e, 0xe75ba2d8,
+ 0x1b525d12, 0x4f550854, 0x1f676af0, 0x4b603fb6, 0xb769c07c, 0xe36e953a,
+ 0x4a964919, 0x1e911c5f, 0xe298e395, 0xb69fb6d3, 0xb4852d22, 0xe0827864,
+ 0x1c8b87ae, 0x488cd2e8, 0xe1740ecb, 0xb5735b8d, 0x497aa447, 0x1d7df101,
+ 0xe91e610f, 0xbd193449, 0x4110cb83, 0x15179ec5, 0xbcef42e6, 0xe8e817a0,
+ 0x14e1e86a, 0x40e6bd2c, 0x42fc26dd, 0x16fb739b, 0xeaf28c51, 0xbef5d917,
+ 0x170d0534, 0x430a5072, 0xbf03afb8, 0xeb04fafe, 0xbb36985a, 0xef31cd1c,
+ 0x133832d6, 0x473f6790, 0xeec7bbb3, 0xbac0eef5, 0x46c9113f, 0x12ce4479,
+ 0x10d4df88, 0x44d38ace, 0xb8da7504, 0xecdd2042, 0x4525fc61, 0x1122a927,
+ 0xed2b56ed, 0xb92c03ab, 0x9a9f274a, 0xce98720c, 0x32918dc6, 0x6696d880,
+ 0xcf6e04a3, 0x9b6951e5, 0x6760ae2f, 0x3367fb69, 0x317d6098, 0x657a35de,
+ 0x9973ca14, 0xcd749f52, 0x648c4371, 0x308b1637, 0xcc82e9fd, 0x9885bcbb,
+ 0xc8b7de1f, 0x9cb08b59, 0x60b97493, 0x34be21d5, 0x9d46fdf6, 0xc941a8b0,
+ 0x3548577a, 0x614f023c, 0x635599cd, 0x3752cc8b, 0xcb5b3341, 0x9f5c6607,
+ 0x36a4ba24, 0x62a3ef62, 0x9eaa10a8, 0xcaad45ee, 0x3eced5e0, 0x6ac980a6,
+ 0x96c07f6c, 0xc2c72a2a, 0x6b3ff609, 0x3f38a34f, 0xc3315c85, 0x973609c3,
+ 0x952c9232, 0xc12bc774, 0x3d2238be, 0x69256df8, 0xc0ddb1db, 0x94dae49d,
+ 0x68d31b57, 0x3cd44e11, 0x6ce62cb5, 0x38e179f3, 0xc4e88639, 0x90efd37f,
+ 0x39170f5c, 0x6d105a1a, 0x9119a5d0, 0xc51ef096, 0xc7046b67, 0x93033e21,
+ 0x6f0ac1eb, 0x3b0d94ad, 0x92f5488e, 0xc6f21dc8, 0x3afbe202, 0x6efcb744,
+ 0xd7d0b4ef, 0x83d7e1a9, 0x7fde1e63, 0x2bd94b25, 0x82219706, 0xd626c240,
+ 0x2a2f3d8a, 0x7e2868cc, 0x7c32f33d, 0x2835a67b, 0xd43c59b1, 0x803b0cf7,
+ 0x29c3d0d4, 0x7dc48592, 0x81cd7a58, 0xd5ca2f1e, 0x85f84dba, 0xd1ff18fc,
+ 0x2df6e736, 0x79f1b270, 0xd0096e53, 0x840e3b15, 0x7807c4df, 0x2c009199,
+ 0x2e1a0a68, 0x7a1d5f2e, 0x8614a0e4, 0xd213f5a2, 0x7beb2981, 0x2fec7cc7,
+ 0xd3e5830d, 0x87e2d64b, 0x73814645, 0x27861303, 0xdb8fecc9, 0x8f88b98f,
+ 0x267065ac, 0x727730ea, 0x8e7ecf20, 0xda799a66, 0xd8630197, 0x8c6454d1,
+ 0x706dab1b, 0x246afe5d, 0x8d92227e, 0xd9957738, 0x259c88f2, 0x719bddb4,
+ 0x21a9bf10, 0x75aeea56, 0x89a7159c, 0xdda040da, 0x74589cf9, 0x205fc9bf,
+ 0xdc563675, 0x88516333, 0x8a4bf8c2, 0xde4cad84, 0x2245524e, 0x76420708,
+ 0xdfbadb2b, 0x8bbd8e6d, 0x77b471a7, 0x23b324e1};
+
+const uint32_t kStrideExtensionTable2[256] = {
+ 0x00000000, 0x678efd01, 0xcf1dfa02, 0xa8930703, 0x9bd782f5, 0xfc597ff4,
+ 0x54ca78f7, 0x334485f6, 0x3243731b, 0x55cd8e1a, 0xfd5e8919, 0x9ad07418,
+ 0xa994f1ee, 0xce1a0cef, 0x66890bec, 0x0107f6ed, 0x6486e636, 0x03081b37,
+ 0xab9b1c34, 0xcc15e135, 0xff5164c3, 0x98df99c2, 0x304c9ec1, 0x57c263c0,
+ 0x56c5952d, 0x314b682c, 0x99d86f2f, 0xfe56922e, 0xcd1217d8, 0xaa9cead9,
+ 0x020fedda, 0x658110db, 0xc90dcc6c, 0xae83316d, 0x0610366e, 0x619ecb6f,
+ 0x52da4e99, 0x3554b398, 0x9dc7b49b, 0xfa49499a, 0xfb4ebf77, 0x9cc04276,
+ 0x34534575, 0x53ddb874, 0x60993d82, 0x0717c083, 0xaf84c780, 0xc80a3a81,
+ 0xad8b2a5a, 0xca05d75b, 0x6296d058, 0x05182d59, 0x365ca8af, 0x51d255ae,
+ 0xf94152ad, 0x9ecfafac, 0x9fc85941, 0xf846a440, 0x50d5a343, 0x375b5e42,
+ 0x041fdbb4, 0x639126b5, 0xcb0221b6, 0xac8cdcb7, 0x97f7ee29, 0xf0791328,
+ 0x58ea142b, 0x3f64e92a, 0x0c206cdc, 0x6bae91dd, 0xc33d96de, 0xa4b36bdf,
+ 0xa5b49d32, 0xc23a6033, 0x6aa96730, 0x0d279a31, 0x3e631fc7, 0x59ede2c6,
+ 0xf17ee5c5, 0x96f018c4, 0xf371081f, 0x94fff51e, 0x3c6cf21d, 0x5be20f1c,
+ 0x68a68aea, 0x0f2877eb, 0xa7bb70e8, 0xc0358de9, 0xc1327b04, 0xa6bc8605,
+ 0x0e2f8106, 0x69a17c07, 0x5ae5f9f1, 0x3d6b04f0, 0x95f803f3, 0xf276fef2,
+ 0x5efa2245, 0x3974df44, 0x91e7d847, 0xf6692546, 0xc52da0b0, 0xa2a35db1,
+ 0x0a305ab2, 0x6dbea7b3, 0x6cb9515e, 0x0b37ac5f, 0xa3a4ab5c, 0xc42a565d,
+ 0xf76ed3ab, 0x90e02eaa, 0x387329a9, 0x5ffdd4a8, 0x3a7cc473, 0x5df23972,
+ 0xf5613e71, 0x92efc370, 0xa1ab4686, 0xc625bb87, 0x6eb6bc84, 0x09384185,
+ 0x083fb768, 0x6fb14a69, 0xc7224d6a, 0xa0acb06b, 0x93e8359d, 0xf466c89c,
+ 0x5cf5cf9f, 0x3b7b329e, 0x2a03aaa3, 0x4d8d57a2, 0xe51e50a1, 0x8290ada0,
+ 0xb1d42856, 0xd65ad557, 0x7ec9d254, 0x19472f55, 0x1840d9b8, 0x7fce24b9,
+ 0xd75d23ba, 0xb0d3debb, 0x83975b4d, 0xe419a64c, 0x4c8aa14f, 0x2b045c4e,
+ 0x4e854c95, 0x290bb194, 0x8198b697, 0xe6164b96, 0xd552ce60, 0xb2dc3361,
+ 0x1a4f3462, 0x7dc1c963, 0x7cc63f8e, 0x1b48c28f, 0xb3dbc58c, 0xd455388d,
+ 0xe711bd7b, 0x809f407a, 0x280c4779, 0x4f82ba78, 0xe30e66cf, 0x84809bce,
+ 0x2c139ccd, 0x4b9d61cc, 0x78d9e43a, 0x1f57193b, 0xb7c41e38, 0xd04ae339,
+ 0xd14d15d4, 0xb6c3e8d5, 0x1e50efd6, 0x79de12d7, 0x4a9a9721, 0x2d146a20,
+ 0x85876d23, 0xe2099022, 0x878880f9, 0xe0067df8, 0x48957afb, 0x2f1b87fa,
+ 0x1c5f020c, 0x7bd1ff0d, 0xd342f80e, 0xb4cc050f, 0xb5cbf3e2, 0xd2450ee3,
+ 0x7ad609e0, 0x1d58f4e1, 0x2e1c7117, 0x49928c16, 0xe1018b15, 0x868f7614,
+ 0xbdf4448a, 0xda7ab98b, 0x72e9be88, 0x15674389, 0x2623c67f, 0x41ad3b7e,
+ 0xe93e3c7d, 0x8eb0c17c, 0x8fb73791, 0xe839ca90, 0x40aacd93, 0x27243092,
+ 0x1460b564, 0x73ee4865, 0xdb7d4f66, 0xbcf3b267, 0xd972a2bc, 0xbefc5fbd,
+ 0x166f58be, 0x71e1a5bf, 0x42a52049, 0x252bdd48, 0x8db8da4b, 0xea36274a,
+ 0xeb31d1a7, 0x8cbf2ca6, 0x242c2ba5, 0x43a2d6a4, 0x70e65352, 0x1768ae53,
+ 0xbffba950, 0xd8755451, 0x74f988e6, 0x137775e7, 0xbbe472e4, 0xdc6a8fe5,
+ 0xef2e0a13, 0x88a0f712, 0x2033f011, 0x47bd0d10, 0x46bafbfd, 0x213406fc,
+ 0x89a701ff, 0xee29fcfe, 0xdd6d7908, 0xbae38409, 0x1270830a, 0x75fe7e0b,
+ 0x107f6ed0, 0x77f193d1, 0xdf6294d2, 0xb8ec69d3, 0x8ba8ec25, 0xec261124,
+ 0x44b51627, 0x233beb26, 0x223c1dcb, 0x45b2e0ca, 0xed21e7c9, 0x8aaf1ac8,
+ 0xb9eb9f3e, 0xde65623f, 0x76f6653c, 0x1178983d};
+
+const uint32_t kStrideExtensionTable3[256] = {
+ 0x00000000, 0xf20c0dfe, 0xe1f46d0d, 0x13f860f3, 0xc604aceb, 0x3408a115,
+ 0x27f0c1e6, 0xd5fccc18, 0x89e52f27, 0x7be922d9, 0x6811422a, 0x9a1d4fd4,
+ 0x4fe183cc, 0xbded8e32, 0xae15eec1, 0x5c19e33f, 0x162628bf, 0xe42a2541,
+ 0xf7d245b2, 0x05de484c, 0xd0228454, 0x222e89aa, 0x31d6e959, 0xc3dae4a7,
+ 0x9fc30798, 0x6dcf0a66, 0x7e376a95, 0x8c3b676b, 0x59c7ab73, 0xabcba68d,
+ 0xb833c67e, 0x4a3fcb80, 0x2c4c517e, 0xde405c80, 0xcdb83c73, 0x3fb4318d,
+ 0xea48fd95, 0x1844f06b, 0x0bbc9098, 0xf9b09d66, 0xa5a97e59, 0x57a573a7,
+ 0x445d1354, 0xb6511eaa, 0x63add2b2, 0x91a1df4c, 0x8259bfbf, 0x7055b241,
+ 0x3a6a79c1, 0xc866743f, 0xdb9e14cc, 0x29921932, 0xfc6ed52a, 0x0e62d8d4,
+ 0x1d9ab827, 0xef96b5d9, 0xb38f56e6, 0x41835b18, 0x527b3beb, 0xa0773615,
+ 0x758bfa0d, 0x8787f7f3, 0x947f9700, 0x66739afe, 0x5898a2fc, 0xaa94af02,
+ 0xb96ccff1, 0x4b60c20f, 0x9e9c0e17, 0x6c9003e9, 0x7f68631a, 0x8d646ee4,
+ 0xd17d8ddb, 0x23718025, 0x3089e0d6, 0xc285ed28, 0x17792130, 0xe5752cce,
+ 0xf68d4c3d, 0x048141c3, 0x4ebe8a43, 0xbcb287bd, 0xaf4ae74e, 0x5d46eab0,
+ 0x88ba26a8, 0x7ab62b56, 0x694e4ba5, 0x9b42465b, 0xc75ba564, 0x3557a89a,
+ 0x26afc869, 0xd4a3c597, 0x015f098f, 0xf3530471, 0xe0ab6482, 0x12a7697c,
+ 0x74d4f382, 0x86d8fe7c, 0x95209e8f, 0x672c9371, 0xb2d05f69, 0x40dc5297,
+ 0x53243264, 0xa1283f9a, 0xfd31dca5, 0x0f3dd15b, 0x1cc5b1a8, 0xeec9bc56,
+ 0x3b35704e, 0xc9397db0, 0xdac11d43, 0x28cd10bd, 0x62f2db3d, 0x90fed6c3,
+ 0x8306b630, 0x710abbce, 0xa4f677d6, 0x56fa7a28, 0x45021adb, 0xb70e1725,
+ 0xeb17f41a, 0x191bf9e4, 0x0ae39917, 0xf8ef94e9, 0x2d1358f1, 0xdf1f550f,
+ 0xcce735fc, 0x3eeb3802, 0xb13145f8, 0x433d4806, 0x50c528f5, 0xa2c9250b,
+ 0x7735e913, 0x8539e4ed, 0x96c1841e, 0x64cd89e0, 0x38d46adf, 0xcad86721,
+ 0xd92007d2, 0x2b2c0a2c, 0xfed0c634, 0x0cdccbca, 0x1f24ab39, 0xed28a6c7,
+ 0xa7176d47, 0x551b60b9, 0x46e3004a, 0xb4ef0db4, 0x6113c1ac, 0x931fcc52,
+ 0x80e7aca1, 0x72eba15f, 0x2ef24260, 0xdcfe4f9e, 0xcf062f6d, 0x3d0a2293,
+ 0xe8f6ee8b, 0x1afae375, 0x09028386, 0xfb0e8e78, 0x9d7d1486, 0x6f711978,
+ 0x7c89798b, 0x8e857475, 0x5b79b86d, 0xa975b593, 0xba8dd560, 0x4881d89e,
+ 0x14983ba1, 0xe694365f, 0xf56c56ac, 0x07605b52, 0xd29c974a, 0x20909ab4,
+ 0x3368fa47, 0xc164f7b9, 0x8b5b3c39, 0x795731c7, 0x6aaf5134, 0x98a35cca,
+ 0x4d5f90d2, 0xbf539d2c, 0xacabfddf, 0x5ea7f021, 0x02be131e, 0xf0b21ee0,
+ 0xe34a7e13, 0x114673ed, 0xc4babff5, 0x36b6b20b, 0x254ed2f8, 0xd742df06,
+ 0xe9a9e704, 0x1ba5eafa, 0x085d8a09, 0xfa5187f7, 0x2fad4bef, 0xdda14611,
+ 0xce5926e2, 0x3c552b1c, 0x604cc823, 0x9240c5dd, 0x81b8a52e, 0x73b4a8d0,
+ 0xa64864c8, 0x54446936, 0x47bc09c5, 0xb5b0043b, 0xff8fcfbb, 0x0d83c245,
+ 0x1e7ba2b6, 0xec77af48, 0x398b6350, 0xcb876eae, 0xd87f0e5d, 0x2a7303a3,
+ 0x766ae09c, 0x8466ed62, 0x979e8d91, 0x6592806f, 0xb06e4c77, 0x42624189,
+ 0x519a217a, 0xa3962c84, 0xc5e5b67a, 0x37e9bb84, 0x2411db77, 0xd61dd689,
+ 0x03e11a91, 0xf1ed176f, 0xe215779c, 0x10197a62, 0x4c00995d, 0xbe0c94a3,
+ 0xadf4f450, 0x5ff8f9ae, 0x8a0435b6, 0x78083848, 0x6bf058bb, 0x99fc5545,
+ 0xd3c39ec5, 0x21cf933b, 0x3237f3c8, 0xc03bfe36, 0x15c7322e, 0xe7cb3fd0,
+ 0xf4335f23, 0x063f52dd, 0x5a26b1e2, 0xa82abc1c, 0xbbd2dcef, 0x49ded111,
+ 0x9c221d09, 0x6e2e10f7, 0x7dd67004, 0x8fda7dfa};
+
+// CRCs are pre- and post- conditioned by xoring with all ones.
+static constexpr const uint32_t kCRC32Xor = static_cast<uint32_t>(0xffffffffU);
+
+// Reads a little-endian 32-bit integer from a 32-bit-aligned buffer.
+inline uint32_t ReadUint32LE(const uint8_t* buffer) {
+ return DecodeFixed32(reinterpret_cast<const char*>(buffer));
}
+// Returns the smallest address >= the given address that is aligned to N bytes.
+//
+// N must be a power of two.
+template <int N>
+constexpr inline const uint8_t* RoundUp(const uint8_t* pointer) {
+ return reinterpret_cast<uint8_t*>(
+ (reinterpret_cast<uintptr_t>(pointer) + (N - 1)) &
+ ~static_cast<uintptr_t>(N - 1));
+}
+
+} // namespace
+
// Determine if the CPU running this program can accelerate the CRC32C
// calculation.
static bool CanAccelerateCRC32C() {
- if (!port::HasAcceleratedCRC32C())
- return false;
-
- // Double-check that the accelerated implementation functions correctly.
// port::AcceleretedCRC32C returns zero when unable to accelerate.
static const char kTestCRCBuffer[] = "TestCRCBuffer";
static const char kBufSize = sizeof(kTestCRCBuffer) - 1;
@@ -300,54 +273,107 @@ static bool CanAccelerateCRC32C() {
return port::AcceleratedCRC32C(0, kTestCRCBuffer, kBufSize) == kTestCRCValue;
}
-uint32_t Extend(uint32_t crc, const char* buf, size_t size) {
+uint32_t Extend(uint32_t crc, const char* data, size_t n) {
static bool accelerate = CanAccelerateCRC32C();
if (accelerate) {
- return port::AcceleratedCRC32C(crc, buf, size);
+ return port::AcceleratedCRC32C(crc, data, n);
}
- const uint8_t *p = reinterpret_cast<const uint8_t *>(buf);
- const uint8_t *e = p + size;
- uint32_t l = crc ^ 0xffffffffu;
+ const uint8_t* p = reinterpret_cast<const uint8_t*>(data);
+ const uint8_t* e = p + n;
+ uint32_t l = crc ^ kCRC32Xor;
+
+// Process one byte at a time.
+#define STEP1 \
+ do { \
+ int c = (l & 0xff) ^ *p++; \
+ l = kByteExtensionTable[c] ^ (l >> 8); \
+ } while (0)
-#define STEP1 do { \
- int c = (l & 0xff) ^ *p++; \
- l = table0_[c] ^ (l >> 8); \
-} while (0)
-#define STEP4 do { \
- uint32_t c = l ^ LE_LOAD32(p); \
- p += 4; \
- l = table3_[c & 0xff] ^ \
- table2_[(c >> 8) & 0xff] ^ \
- table1_[(c >> 16) & 0xff] ^ \
- table0_[c >> 24]; \
-} while (0)
+// Process one of the 4 strides of 4-byte data.
+#define STEP4(s) \
+ do { \
+ crc##s = ReadUint32LE(p + s * 4) ^ kStrideExtensionTable3[crc##s & 0xff] ^ \
+ kStrideExtensionTable2[(crc##s >> 8) & 0xff] ^ \
+ kStrideExtensionTable1[(crc##s >> 16) & 0xff] ^ \
+ kStrideExtensionTable0[crc##s >> 24]; \
+ } while (0)
- // Point x at first 4-byte aligned byte in string. This might be
- // just past the end of the string.
- const uintptr_t pval = reinterpret_cast<uintptr_t>(p);
- const uint8_t* x = reinterpret_cast<const uint8_t*>(((pval + 3) >> 2) << 2);
+// Process a 16-byte swath of 4 strides, each of which has 4 bytes of data.
+#define STEP16 \
+ do { \
+ STEP4(0); \
+ STEP4(1); \
+ STEP4(2); \
+ STEP4(3); \
+ p += 16; \
+ } while (0)
+
+// Process 4 bytes that were already loaded into a word.
+#define STEP4W(w) \
+ do { \
+ w ^= l; \
+ for (size_t i = 0; i < 4; ++i) { \
+ w = (w >> 8) ^ kByteExtensionTable[w & 0xff]; \
+ } \
+ l = w; \
+ } while (0)
+
+ // Point x at first 4-byte aligned byte in the buffer. This might be past the
+ // end of the buffer.
+ const uint8_t* x = RoundUp<4>(p);
if (x <= e) {
- // Process bytes until finished or p is 4-byte aligned
+ // Process bytes p is 4-byte aligned.
while (p != x) {
STEP1;
}
}
- // Process bytes 16 at a time
- while ((e-p) >= 16) {
- STEP4; STEP4; STEP4; STEP4;
- }
- // Process bytes 4 at a time
- while ((e-p) >= 4) {
- STEP4;
+
+ if ((e - p) >= 16) {
+ // Load a 16-byte swath into the stride partial results.
+ uint32_t crc0 = ReadUint32LE(p + 0 * 4) ^ l;
+ uint32_t crc1 = ReadUint32LE(p + 1 * 4);
+ uint32_t crc2 = ReadUint32LE(p + 2 * 4);
+ uint32_t crc3 = ReadUint32LE(p + 3 * 4);
+ p += 16;
+
+ // It is possible to get better speeds (at least on x86) by interleaving
+ // prefetching 256 bytes ahead with processing 64 bytes at a time. See the
+ // portable implementation in https://github.com/google/crc32c/.
+
+ // Process one 16-byte swath at a time.
+ while ((e - p) >= 16) {
+ STEP16;
+ }
+
+ // Advance one word at a time as far as possible.
+ while ((e - p) >= 4) {
+ STEP4(0);
+ uint32_t tmp = crc0;
+ crc0 = crc1;
+ crc1 = crc2;
+ crc2 = crc3;
+ crc3 = tmp;
+ p += 4;
+ }
+
+ // Combine the 4 partial stride results.
+ l = 0;
+ STEP4W(crc0);
+ STEP4W(crc1);
+ STEP4W(crc2);
+ STEP4W(crc3);
}
- // Process the last few bytes
+
+ // Process the last few bytes.
while (p != e) {
STEP1;
}
+#undef STEP4W
+#undef STEP16
#undef STEP4
#undef STEP1
- return l ^ 0xffffffffu;
+ return l ^ kCRC32Xor;
}
} // namespace crc32c
diff --git a/src/leveldb/util/crc32c.h b/src/leveldb/util/crc32c.h
index 1d7e5c075d..98fabb0d2f 100644
--- a/src/leveldb/util/crc32c.h
+++ b/src/leveldb/util/crc32c.h
@@ -14,12 +14,10 @@ namespace crc32c {
// Return the crc32c of concat(A, data[0,n-1]) where init_crc is the
// crc32c of some string A. Extend() is often used to maintain the
// crc32c of a stream of data.
-extern uint32_t Extend(uint32_t init_crc, const char* data, size_t n);
+uint32_t Extend(uint32_t init_crc, const char* data, size_t n);
// Return the crc32c of data[0,n-1]
-inline uint32_t Value(const char* data, size_t n) {
- return Extend(0, data, n);
-}
+inline uint32_t Value(const char* data, size_t n) { return Extend(0, data, n); }
static const uint32_t kMaskDelta = 0xa282ead8ul;
diff --git a/src/leveldb/util/crc32c_test.cc b/src/leveldb/util/crc32c_test.cc
index 4b957ee120..18a8494824 100644
--- a/src/leveldb/util/crc32c_test.cc
+++ b/src/leveldb/util/crc32c_test.cc
@@ -8,7 +8,7 @@
namespace leveldb {
namespace crc32c {
-class CRC { };
+class CRC {};
TEST(CRC, StandardResults) {
// From rfc3720 section B.4.
@@ -30,30 +30,19 @@ TEST(CRC, StandardResults) {
}
ASSERT_EQ(0x113fdb5c, Value(buf, sizeof(buf)));
- unsigned char data[48] = {
- 0x01, 0xc0, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x14, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x04, 0x00,
- 0x00, 0x00, 0x00, 0x14,
- 0x00, 0x00, 0x00, 0x18,
- 0x28, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x02, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
+ uint8_t data[48] = {
+ 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
ASSERT_EQ(0xd9963a56, Value(reinterpret_cast<char*>(data), sizeof(data)));
}
-TEST(CRC, Values) {
- ASSERT_NE(Value("a", 1), Value("foo", 3));
-}
+TEST(CRC, Values) { ASSERT_NE(Value("a", 1), Value("foo", 3)); }
TEST(CRC, Extend) {
- ASSERT_EQ(Value("hello world", 11),
- Extend(Value("hello ", 6), "world", 5));
+ ASSERT_EQ(Value("hello world", 11), Extend(Value("hello ", 6), "world", 5));
}
TEST(CRC, Mask) {
@@ -67,6 +56,4 @@ TEST(CRC, Mask) {
} // namespace crc32c
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/util/env.cc b/src/leveldb/util/env.cc
index c58a0821ef..d2f0aef326 100644
--- a/src/leveldb/util/env.cc
+++ b/src/leveldb/util/env.cc
@@ -6,30 +6,24 @@
namespace leveldb {
-Env::~Env() {
-}
+Env::~Env() = default;
Status Env::NewAppendableFile(const std::string& fname, WritableFile** result) {
return Status::NotSupported("NewAppendableFile", fname);
}
-SequentialFile::~SequentialFile() {
-}
+SequentialFile::~SequentialFile() = default;
-RandomAccessFile::~RandomAccessFile() {
-}
+RandomAccessFile::~RandomAccessFile() = default;
-WritableFile::~WritableFile() {
-}
+WritableFile::~WritableFile() = default;
-Logger::~Logger() {
-}
+Logger::~Logger() = default;
-FileLock::~FileLock() {
-}
+FileLock::~FileLock() = default;
void Log(Logger* info_log, const char* format, ...) {
- if (info_log != NULL) {
+ if (info_log != nullptr) {
va_list ap;
va_start(ap, format);
info_log->Logv(format, ap);
@@ -38,8 +32,7 @@ void Log(Logger* info_log, const char* format, ...) {
}
static Status DoWriteStringToFile(Env* env, const Slice& data,
- const std::string& fname,
- bool should_sync) {
+ const std::string& fname, bool should_sync) {
WritableFile* file;
Status s = env->NewWritableFile(fname, &file);
if (!s.ok()) {
@@ -94,7 +87,6 @@ Status ReadFileToString(Env* env, const std::string& fname, std::string* data) {
return s;
}
-EnvWrapper::~EnvWrapper() {
-}
+EnvWrapper::~EnvWrapper() {}
} // namespace leveldb
diff --git a/src/leveldb/util/env_posix.cc b/src/leveldb/util/env_posix.cc
index f77918313e..9f5863a0f3 100644
--- a/src/leveldb/util/env_posix.cc
+++ b/src/leveldb/util/env_posix.cc
@@ -1,706 +1,906 @@
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#if !defined(LEVELDB_PLATFORM_WINDOWS)
#include <dirent.h>
-#include <errno.h>
#include <fcntl.h>
#include <pthread.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
-#include <time.h>
#include <unistd.h>
-#include <deque>
+
+#include <atomic>
+#include <cerrno>
+#include <cstddef>
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
#include <limits>
+#include <queue>
#include <set>
+#include <string>
+#include <thread>
+#include <type_traits>
+#include <utility>
+
#include "leveldb/env.h"
#include "leveldb/slice.h"
+#include "leveldb/status.h"
#include "port/port.h"
-#include "util/logging.h"
-#include "util/mutexlock.h"
-#include "util/posix_logger.h"
+#include "port/thread_annotations.h"
#include "util/env_posix_test_helper.h"
+#include "util/posix_logger.h"
namespace leveldb {
namespace {
-static int open_read_only_file_limit = -1;
-static int mmap_limit = -1;
+// Set by EnvPosixTestHelper::SetReadOnlyMMapLimit() and MaxOpenFiles().
+int g_open_read_only_file_limit = -1;
+
+// Up to 4096 mmap regions for 64-bit binaries; none for 32-bit.
+constexpr const int kDefaultMmapLimit = (sizeof(void*) >= 8) ? 4096 : 0;
+
+// Can be set using EnvPosixTestHelper::SetReadOnlyMMapLimit().
+int g_mmap_limit = kDefaultMmapLimit;
-static Status IOError(const std::string& context, int err_number) {
- return Status::IOError(context, strerror(err_number));
+// Common flags defined for all posix open operations
+#if defined(HAVE_O_CLOEXEC)
+constexpr const int kOpenBaseFlags = O_CLOEXEC;
+#else
+constexpr const int kOpenBaseFlags = 0;
+#endif // defined(HAVE_O_CLOEXEC)
+
+constexpr const size_t kWritableFileBufferSize = 65536;
+
+Status PosixError(const std::string& context, int error_number) {
+ if (error_number == ENOENT) {
+ return Status::NotFound(context, std::strerror(error_number));
+ } else {
+ return Status::IOError(context, std::strerror(error_number));
+ }
}
// Helper class to limit resource usage to avoid exhaustion.
// Currently used to limit read-only file descriptors and mmap file usage
-// so that we do not end up running out of file descriptors, virtual memory,
-// or running into kernel performance problems for very large databases.
+// so that we do not run out of file descriptors or virtual memory, or run into
+// kernel performance problems for very large databases.
class Limiter {
public:
- // Limit maximum number of resources to |n|.
- Limiter(intptr_t n) {
- SetAllowed(n);
- }
+ // Limit maximum number of resources to |max_acquires|.
+ Limiter(int max_acquires) : acquires_allowed_(max_acquires) {}
+
+ Limiter(const Limiter&) = delete;
+ Limiter operator=(const Limiter&) = delete;
// If another resource is available, acquire it and return true.
// Else return false.
bool Acquire() {
- if (GetAllowed() <= 0) {
- return false;
- }
- MutexLock l(&mu_);
- intptr_t x = GetAllowed();
- if (x <= 0) {
- return false;
- } else {
- SetAllowed(x - 1);
- return true;
- }
+ int old_acquires_allowed =
+ acquires_allowed_.fetch_sub(1, std::memory_order_relaxed);
+
+ if (old_acquires_allowed > 0) return true;
+
+ acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
+ return false;
}
// Release a resource acquired by a previous call to Acquire() that returned
// true.
- void Release() {
- MutexLock l(&mu_);
- SetAllowed(GetAllowed() + 1);
- }
+ void Release() { acquires_allowed_.fetch_add(1, std::memory_order_relaxed); }
private:
- port::Mutex mu_;
- port::AtomicPointer allowed_;
-
- intptr_t GetAllowed() const {
- return reinterpret_cast<intptr_t>(allowed_.Acquire_Load());
- }
-
- // REQUIRES: mu_ must be held
- void SetAllowed(intptr_t v) {
- allowed_.Release_Store(reinterpret_cast<void*>(v));
- }
-
- Limiter(const Limiter&);
- void operator=(const Limiter&);
+ // The number of available resources.
+ //
+ // This is a counter and is not tied to the invariants of any other class, so
+ // it can be operated on safely using std::memory_order_relaxed.
+ std::atomic<int> acquires_allowed_;
};
-class PosixSequentialFile: public SequentialFile {
- private:
- std::string filename_;
- FILE* file_;
-
+// Implements sequential read access in a file using read().
+//
+// Instances of this class are thread-friendly but not thread-safe, as required
+// by the SequentialFile API.
+class PosixSequentialFile final : public SequentialFile {
public:
- PosixSequentialFile(const std::string& fname, FILE* f)
- : filename_(fname), file_(f) { }
- virtual ~PosixSequentialFile() { fclose(file_); }
-
- virtual Status Read(size_t n, Slice* result, char* scratch) {
- Status s;
- size_t r = fread_unlocked(scratch, 1, n, file_);
- *result = Slice(scratch, r);
- if (r < n) {
- if (feof(file_)) {
- // We leave status as ok if we hit the end of the file
- } else {
- // A partial read with an error: return a non-ok status
- s = IOError(filename_, errno);
+ PosixSequentialFile(std::string filename, int fd)
+ : fd_(fd), filename_(filename) {}
+ ~PosixSequentialFile() override { close(fd_); }
+
+ Status Read(size_t n, Slice* result, char* scratch) override {
+ Status status;
+ while (true) {
+ ::ssize_t read_size = ::read(fd_, scratch, n);
+ if (read_size < 0) { // Read error.
+ if (errno == EINTR) {
+ continue; // Retry
+ }
+ status = PosixError(filename_, errno);
+ break;
}
+ *result = Slice(scratch, read_size);
+ break;
}
- return s;
+ return status;
}
- virtual Status Skip(uint64_t n) {
- if (fseek(file_, n, SEEK_CUR)) {
- return IOError(filename_, errno);
+ Status Skip(uint64_t n) override {
+ if (::lseek(fd_, n, SEEK_CUR) == static_cast<off_t>(-1)) {
+ return PosixError(filename_, errno);
}
return Status::OK();
}
- virtual std::string GetName() const { return filename_; }
-};
+ virtual std::string GetName() const override { return filename_; }
-// pread() based random-access
-class PosixRandomAccessFile: public RandomAccessFile {
private:
- std::string filename_;
- bool temporary_fd_; // If true, fd_ is -1 and we open on every read.
- int fd_;
- Limiter* limiter_;
+ const int fd_;
+ const std::string filename_;
+};
+// Implements random read access in a file using pread().
+//
+// Instances of this class are thread-safe, as required by the RandomAccessFile
+// API. Instances are immutable and Read() only calls thread-safe library
+// functions.
+class PosixRandomAccessFile final : public RandomAccessFile {
public:
- PosixRandomAccessFile(const std::string& fname, int fd, Limiter* limiter)
- : filename_(fname), fd_(fd), limiter_(limiter) {
- temporary_fd_ = !limiter->Acquire();
- if (temporary_fd_) {
- // Open file on every access.
- close(fd_);
- fd_ = -1;
+ // The new instance takes ownership of |fd|. |fd_limiter| must outlive this
+ // instance, and will be used to determine if .
+ PosixRandomAccessFile(std::string filename, int fd, Limiter* fd_limiter)
+ : has_permanent_fd_(fd_limiter->Acquire()),
+ fd_(has_permanent_fd_ ? fd : -1),
+ fd_limiter_(fd_limiter),
+ filename_(std::move(filename)) {
+ if (!has_permanent_fd_) {
+ assert(fd_ == -1);
+ ::close(fd); // The file will be opened on every read.
}
}
- virtual ~PosixRandomAccessFile() {
- if (!temporary_fd_) {
- close(fd_);
- limiter_->Release();
+ ~PosixRandomAccessFile() override {
+ if (has_permanent_fd_) {
+ assert(fd_ != -1);
+ ::close(fd_);
+ fd_limiter_->Release();
}
}
- virtual Status Read(uint64_t offset, size_t n, Slice* result,
- char* scratch) const {
+ Status Read(uint64_t offset, size_t n, Slice* result,
+ char* scratch) const override {
int fd = fd_;
- if (temporary_fd_) {
- fd = open(filename_.c_str(), O_RDONLY);
+ if (!has_permanent_fd_) {
+ fd = ::open(filename_.c_str(), O_RDONLY | kOpenBaseFlags);
if (fd < 0) {
- return IOError(filename_, errno);
+ return PosixError(filename_, errno);
}
}
- Status s;
- ssize_t r = pread(fd, scratch, n, static_cast<off_t>(offset));
- *result = Slice(scratch, (r < 0) ? 0 : r);
- if (r < 0) {
- // An error: return a non-ok status
- s = IOError(filename_, errno);
+ assert(fd != -1);
+
+ Status status;
+ ssize_t read_size = ::pread(fd, scratch, n, static_cast<off_t>(offset));
+ *result = Slice(scratch, (read_size < 0) ? 0 : read_size);
+ if (read_size < 0) {
+ // An error: return a non-ok status.
+ status = PosixError(filename_, errno);
}
- if (temporary_fd_) {
+ if (!has_permanent_fd_) {
// Close the temporary file descriptor opened earlier.
- close(fd);
+ assert(fd != fd_);
+ ::close(fd);
}
- return s;
+ return status;
}
- virtual std::string GetName() const { return filename_; }
-};
+ virtual std::string GetName() const override { return filename_; }
-// mmap() based random-access
-class PosixMmapReadableFile: public RandomAccessFile {
private:
- std::string filename_;
- void* mmapped_region_;
- size_t length_;
- Limiter* limiter_;
+ const bool has_permanent_fd_; // If false, the file is opened on every read.
+ const int fd_; // -1 if has_permanent_fd_ is false.
+ Limiter* const fd_limiter_;
+ const std::string filename_;
+};
+// Implements random read access in a file using mmap().
+//
+// Instances of this class are thread-safe, as required by the RandomAccessFile
+// API. Instances are immutable and Read() only calls thread-safe library
+// functions.
+class PosixMmapReadableFile final : public RandomAccessFile {
public:
- // base[0,length-1] contains the mmapped contents of the file.
- PosixMmapReadableFile(const std::string& fname, void* base, size_t length,
- Limiter* limiter)
- : filename_(fname), mmapped_region_(base), length_(length),
- limiter_(limiter) {
- }
-
- virtual ~PosixMmapReadableFile() {
- munmap(mmapped_region_, length_);
- limiter_->Release();
- }
-
- virtual Status Read(uint64_t offset, size_t n, Slice* result,
- char* scratch) const {
- Status s;
+ // mmap_base[0, length-1] points to the memory-mapped contents of the file. It
+ // must be the result of a successful call to mmap(). This instances takes
+ // over the ownership of the region.
+ //
+ // |mmap_limiter| must outlive this instance. The caller must have already
+ // aquired the right to use one mmap region, which will be released when this
+ // instance is destroyed.
+ PosixMmapReadableFile(std::string filename, char* mmap_base, size_t length,
+ Limiter* mmap_limiter)
+ : mmap_base_(mmap_base),
+ length_(length),
+ mmap_limiter_(mmap_limiter),
+ filename_(std::move(filename)) {}
+
+ ~PosixMmapReadableFile() override {
+ ::munmap(static_cast<void*>(mmap_base_), length_);
+ mmap_limiter_->Release();
+ }
+
+ Status Read(uint64_t offset, size_t n, Slice* result,
+ char* scratch) const override {
if (offset + n > length_) {
*result = Slice();
- s = IOError(filename_, EINVAL);
- } else {
- *result = Slice(reinterpret_cast<char*>(mmapped_region_) + offset, n);
+ return PosixError(filename_, EINVAL);
}
- return s;
+
+ *result = Slice(mmap_base_ + offset, n);
+ return Status::OK();
}
- virtual std::string GetName() const { return filename_; }
-};
+ virtual std::string GetName() const override { return filename_; }
-class PosixWritableFile : public WritableFile {
private:
- std::string filename_;
- FILE* file_;
+ char* const mmap_base_;
+ const size_t length_;
+ Limiter* const mmap_limiter_;
+ const std::string filename_;
+};
+class PosixWritableFile final : public WritableFile {
public:
- PosixWritableFile(const std::string& fname, FILE* f)
- : filename_(fname), file_(f) { }
-
- ~PosixWritableFile() {
- if (file_ != NULL) {
+ PosixWritableFile(std::string filename, int fd)
+ : pos_(0),
+ fd_(fd),
+ is_manifest_(IsManifest(filename)),
+ filename_(std::move(filename)),
+ dirname_(Dirname(filename_)) {}
+
+ ~PosixWritableFile() override {
+ if (fd_ >= 0) {
// Ignoring any potential errors
- fclose(file_);
+ Close();
}
}
- virtual Status Append(const Slice& data) {
- size_t r = fwrite_unlocked(data.data(), 1, data.size(), file_);
- if (r != data.size()) {
- return IOError(filename_, errno);
+ Status Append(const Slice& data) override {
+ size_t write_size = data.size();
+ const char* write_data = data.data();
+
+ // Fit as much as possible into buffer.
+ size_t copy_size = std::min(write_size, kWritableFileBufferSize - pos_);
+ std::memcpy(buf_ + pos_, write_data, copy_size);
+ write_data += copy_size;
+ write_size -= copy_size;
+ pos_ += copy_size;
+ if (write_size == 0) {
+ return Status::OK();
}
- return Status::OK();
+
+ // Can't fit in buffer, so need to do at least one write.
+ Status status = FlushBuffer();
+ if (!status.ok()) {
+ return status;
+ }
+
+ // Small writes go to buffer, large writes are written directly.
+ if (write_size < kWritableFileBufferSize) {
+ std::memcpy(buf_, write_data, write_size);
+ pos_ = write_size;
+ return Status::OK();
+ }
+ return WriteUnbuffered(write_data, write_size);
}
- virtual Status Close() {
- Status result;
- if (fclose(file_) != 0) {
- result = IOError(filename_, errno);
+ Status Close() override {
+ Status status = FlushBuffer();
+ const int close_result = ::close(fd_);
+ if (close_result < 0 && status.ok()) {
+ status = PosixError(filename_, errno);
}
- file_ = NULL;
- return result;
+ fd_ = -1;
+ return status;
}
- virtual Status Flush() {
- if (fflush_unlocked(file_) != 0) {
- return IOError(filename_, errno);
+ Status Flush() override { return FlushBuffer(); }
+
+ Status Sync() override {
+ // Ensure new files referred to by the manifest are in the filesystem.
+ //
+ // This needs to happen before the manifest file is flushed to disk, to
+ // avoid crashing in a state where the manifest refers to files that are not
+ // yet on disk.
+ Status status = SyncDirIfManifest();
+ if (!status.ok()) {
+ return status;
+ }
+
+ status = FlushBuffer();
+ if (!status.ok()) {
+ return status;
+ }
+
+ return SyncFd(fd_, filename_, false);
+ }
+
+ private:
+ Status FlushBuffer() {
+ Status status = WriteUnbuffered(buf_, pos_);
+ pos_ = 0;
+ return status;
+ }
+
+ Status WriteUnbuffered(const char* data, size_t size) {
+ while (size > 0) {
+ ssize_t write_result = ::write(fd_, data, size);
+ if (write_result < 0) {
+ if (errno == EINTR) {
+ continue; // Retry
+ }
+ return PosixError(filename_, errno);
+ }
+ data += write_result;
+ size -= write_result;
}
return Status::OK();
}
Status SyncDirIfManifest() {
- const char* f = filename_.c_str();
- const char* sep = strrchr(f, '/');
- Slice basename;
- std::string dir;
- if (sep == NULL) {
- dir = ".";
- basename = f;
+ Status status;
+ if (!is_manifest_) {
+ return status;
+ }
+
+ int fd = ::open(dirname_.c_str(), O_RDONLY | kOpenBaseFlags);
+ if (fd < 0) {
+ status = PosixError(dirname_, errno);
} else {
- dir = std::string(f, sep - f);
- basename = sep + 1;
+ status = SyncFd(fd, dirname_, true);
+ ::close(fd);
+ }
+ return status;
+ }
+
+ // Ensures that all the caches associated with the given file descriptor's
+ // data are flushed all the way to durable media, and can withstand power
+ // failures.
+ //
+ // The path argument is only used to populate the description string in the
+ // returned Status if an error occurs.
+ static Status SyncFd(int fd, const std::string& fd_path, bool syncing_dir) {
+#if HAVE_FULLFSYNC
+ // On macOS and iOS, fsync() doesn't guarantee durability past power
+ // failures. fcntl(F_FULLFSYNC) is required for that purpose. Some
+ // filesystems don't support fcntl(F_FULLFSYNC), and require a fallback to
+ // fsync().
+ if (::fcntl(fd, F_FULLFSYNC) == 0) {
+ return Status::OK();
}
- Status s;
- if (basename.starts_with("MANIFEST")) {
- int fd = open(dir.c_str(), O_RDONLY);
- if (fd < 0) {
- s = IOError(dir, errno);
- } else {
- if (fsync(fd) < 0 && errno != EINVAL) {
- s = IOError(dir, errno);
- }
- close(fd);
- }
+#endif // HAVE_FULLFSYNC
+
+#if HAVE_FDATASYNC
+ bool sync_success = ::fdatasync(fd) == 0;
+#else
+ bool sync_success = ::fsync(fd) == 0;
+#endif // HAVE_FDATASYNC
+
+ if (sync_success) {
+ return Status::OK();
}
- return s;
+ // Do not crash if filesystem can't fsync directories
+ // (see https://github.com/bitcoin/bitcoin/pull/10000)
+ if (syncing_dir && errno == EINVAL) {
+ return Status::OK();
+ }
+ return PosixError(fd_path, errno);
}
- virtual Status Sync() {
- // Ensure new files referred to by the manifest are in the filesystem.
- Status s = SyncDirIfManifest();
- if (!s.ok()) {
- return s;
+ // Returns the directory name in a path pointing to a file.
+ //
+ // Returns "." if the path does not contain any directory separator.
+ static std::string Dirname(const std::string& filename) {
+ std::string::size_type separator_pos = filename.rfind('/');
+ if (separator_pos == std::string::npos) {
+ return std::string(".");
}
- if (fflush_unlocked(file_) != 0 ||
- fdatasync(fileno(file_)) != 0) {
- s = Status::IOError(filename_, strerror(errno));
+ // The filename component should not contain a path separator. If it does,
+ // the splitting was done incorrectly.
+ assert(filename.find('/', separator_pos + 1) == std::string::npos);
+
+ return filename.substr(0, separator_pos);
+ }
+
+ // Extracts the file name from a path pointing to a file.
+ //
+ // The returned Slice points to |filename|'s data buffer, so it is only valid
+ // while |filename| is alive and unchanged.
+ static Slice Basename(const std::string& filename) {
+ std::string::size_type separator_pos = filename.rfind('/');
+ if (separator_pos == std::string::npos) {
+ return Slice(filename);
}
- return s;
+ // The filename component should not contain a path separator. If it does,
+ // the splitting was done incorrectly.
+ assert(filename.find('/', separator_pos + 1) == std::string::npos);
+
+ return Slice(filename.data() + separator_pos + 1,
+ filename.length() - separator_pos - 1);
}
- virtual std::string GetName() const { return filename_; }
+ // True if the given file is a manifest file.
+ static bool IsManifest(const std::string& filename) {
+ return Basename(filename).starts_with("MANIFEST");
+ }
+
+ virtual std::string GetName() const override { return filename_; }
+
+ // buf_[0, pos_ - 1] contains data to be written to fd_.
+ char buf_[kWritableFileBufferSize];
+ size_t pos_;
+ int fd_;
+
+ const bool is_manifest_; // True if the file's name starts with MANIFEST.
+ const std::string filename_;
+ const std::string dirname_; // The directory of filename_.
};
-static int LockOrUnlock(int fd, bool lock) {
+int LockOrUnlock(int fd, bool lock) {
errno = 0;
- struct flock f;
- memset(&f, 0, sizeof(f));
- f.l_type = (lock ? F_WRLCK : F_UNLCK);
- f.l_whence = SEEK_SET;
- f.l_start = 0;
- f.l_len = 0; // Lock/unlock entire file
- return fcntl(fd, F_SETLK, &f);
+ struct ::flock file_lock_info;
+ std::memset(&file_lock_info, 0, sizeof(file_lock_info));
+ file_lock_info.l_type = (lock ? F_WRLCK : F_UNLCK);
+ file_lock_info.l_whence = SEEK_SET;
+ file_lock_info.l_start = 0;
+ file_lock_info.l_len = 0; // Lock/unlock entire file.
+ return ::fcntl(fd, F_SETLK, &file_lock_info);
}
+// Instances are thread-safe because they are immutable.
class PosixFileLock : public FileLock {
public:
- int fd_;
- std::string name_;
+ PosixFileLock(int fd, std::string filename)
+ : fd_(fd), filename_(std::move(filename)) {}
+
+ int fd() const { return fd_; }
+ const std::string& filename() const { return filename_; }
+
+ private:
+ const int fd_;
+ const std::string filename_;
};
-// Set of locked files. We keep a separate set instead of just
-// relying on fcntrl(F_SETLK) since fcntl(F_SETLK) does not provide
-// any protection against multiple uses from the same process.
+// Tracks the files locked by PosixEnv::LockFile().
+//
+// We maintain a separate set instead of relying on fcntl(F_SETLK) because
+// fcntl(F_SETLK) does not provide any protection against multiple uses from the
+// same process.
+//
+// Instances are thread-safe because all member data is guarded by a mutex.
class PosixLockTable {
- private:
- port::Mutex mu_;
- std::set<std::string> locked_files_;
public:
- bool Insert(const std::string& fname) {
- MutexLock l(&mu_);
- return locked_files_.insert(fname).second;
- }
- void Remove(const std::string& fname) {
- MutexLock l(&mu_);
+ bool Insert(const std::string& fname) LOCKS_EXCLUDED(mu_) {
+ mu_.Lock();
+ bool succeeded = locked_files_.insert(fname).second;
+ mu_.Unlock();
+ return succeeded;
+ }
+ void Remove(const std::string& fname) LOCKS_EXCLUDED(mu_) {
+ mu_.Lock();
locked_files_.erase(fname);
+ mu_.Unlock();
}
+
+ private:
+ port::Mutex mu_;
+ std::set<std::string> locked_files_ GUARDED_BY(mu_);
};
class PosixEnv : public Env {
public:
PosixEnv();
- virtual ~PosixEnv() {
- char msg[] = "Destroying Env::Default()\n";
- fwrite(msg, 1, sizeof(msg), stderr);
- abort();
- }
-
- virtual Status NewSequentialFile(const std::string& fname,
- SequentialFile** result) {
- FILE* f = fopen(fname.c_str(), "r");
- if (f == NULL) {
- *result = NULL;
- return IOError(fname, errno);
- } else {
- *result = new PosixSequentialFile(fname, f);
- return Status::OK();
+ ~PosixEnv() override {
+ static const char msg[] =
+ "PosixEnv singleton destroyed. Unsupported behavior!\n";
+ std::fwrite(msg, 1, sizeof(msg), stderr);
+ std::abort();
+ }
+
+ Status NewSequentialFile(const std::string& filename,
+ SequentialFile** result) override {
+ int fd = ::open(filename.c_str(), O_RDONLY | kOpenBaseFlags);
+ if (fd < 0) {
+ *result = nullptr;
+ return PosixError(filename, errno);
}
+
+ *result = new PosixSequentialFile(filename, fd);
+ return Status::OK();
}
- virtual Status NewRandomAccessFile(const std::string& fname,
- RandomAccessFile** result) {
- *result = NULL;
- Status s;
- int fd = open(fname.c_str(), O_RDONLY);
+ Status NewRandomAccessFile(const std::string& filename,
+ RandomAccessFile** result) override {
+ *result = nullptr;
+ int fd = ::open(filename.c_str(), O_RDONLY | kOpenBaseFlags);
if (fd < 0) {
- s = IOError(fname, errno);
- } else if (mmap_limit_.Acquire()) {
- uint64_t size;
- s = GetFileSize(fname, &size);
- if (s.ok()) {
- void* base = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, 0);
- if (base != MAP_FAILED) {
- *result = new PosixMmapReadableFile(fname, base, size, &mmap_limit_);
- } else {
- s = IOError(fname, errno);
- }
- }
- close(fd);
- if (!s.ok()) {
- mmap_limit_.Release();
+ return PosixError(filename, errno);
+ }
+
+ if (!mmap_limiter_.Acquire()) {
+ *result = new PosixRandomAccessFile(filename, fd, &fd_limiter_);
+ return Status::OK();
+ }
+
+ uint64_t file_size;
+ Status status = GetFileSize(filename, &file_size);
+ if (status.ok()) {
+ void* mmap_base =
+ ::mmap(/*addr=*/nullptr, file_size, PROT_READ, MAP_SHARED, fd, 0);
+ if (mmap_base != MAP_FAILED) {
+ *result = new PosixMmapReadableFile(filename,
+ reinterpret_cast<char*>(mmap_base),
+ file_size, &mmap_limiter_);
+ } else {
+ status = PosixError(filename, errno);
}
- } else {
- *result = new PosixRandomAccessFile(fname, fd, &fd_limit_);
}
- return s;
+ ::close(fd);
+ if (!status.ok()) {
+ mmap_limiter_.Release();
+ }
+ return status;
}
- virtual Status NewWritableFile(const std::string& fname,
- WritableFile** result) {
- Status s;
- FILE* f = fopen(fname.c_str(), "w");
- if (f == NULL) {
- *result = NULL;
- s = IOError(fname, errno);
- } else {
- *result = new PosixWritableFile(fname, f);
+ Status NewWritableFile(const std::string& filename,
+ WritableFile** result) override {
+ int fd = ::open(filename.c_str(),
+ O_TRUNC | O_WRONLY | O_CREAT | kOpenBaseFlags, 0644);
+ if (fd < 0) {
+ *result = nullptr;
+ return PosixError(filename, errno);
}
- return s;
+
+ *result = new PosixWritableFile(filename, fd);
+ return Status::OK();
}
- virtual Status NewAppendableFile(const std::string& fname,
- WritableFile** result) {
- Status s;
- FILE* f = fopen(fname.c_str(), "a");
- if (f == NULL) {
- *result = NULL;
- s = IOError(fname, errno);
- } else {
- *result = new PosixWritableFile(fname, f);
+ Status NewAppendableFile(const std::string& filename,
+ WritableFile** result) override {
+ int fd = ::open(filename.c_str(),
+ O_APPEND | O_WRONLY | O_CREAT | kOpenBaseFlags, 0644);
+ if (fd < 0) {
+ *result = nullptr;
+ return PosixError(filename, errno);
}
- return s;
+
+ *result = new PosixWritableFile(filename, fd);
+ return Status::OK();
}
- virtual bool FileExists(const std::string& fname) {
- return access(fname.c_str(), F_OK) == 0;
+ bool FileExists(const std::string& filename) override {
+ return ::access(filename.c_str(), F_OK) == 0;
}
- virtual Status GetChildren(const std::string& dir,
- std::vector<std::string>* result) {
+ Status GetChildren(const std::string& directory_path,
+ std::vector<std::string>* result) override {
result->clear();
- DIR* d = opendir(dir.c_str());
- if (d == NULL) {
- return IOError(dir, errno);
+ ::DIR* dir = ::opendir(directory_path.c_str());
+ if (dir == nullptr) {
+ return PosixError(directory_path, errno);
}
- struct dirent* entry;
- while ((entry = readdir(d)) != NULL) {
- result->push_back(entry->d_name);
+ struct ::dirent* entry;
+ while ((entry = ::readdir(dir)) != nullptr) {
+ result->emplace_back(entry->d_name);
}
- closedir(d);
+ ::closedir(dir);
return Status::OK();
}
- virtual Status DeleteFile(const std::string& fname) {
- Status result;
- if (unlink(fname.c_str()) != 0) {
- result = IOError(fname, errno);
+ Status DeleteFile(const std::string& filename) override {
+ if (::unlink(filename.c_str()) != 0) {
+ return PosixError(filename, errno);
}
- return result;
+ return Status::OK();
}
- virtual Status CreateDir(const std::string& name) {
- Status result;
- if (mkdir(name.c_str(), 0755) != 0) {
- result = IOError(name, errno);
+ Status CreateDir(const std::string& dirname) override {
+ if (::mkdir(dirname.c_str(), 0755) != 0) {
+ return PosixError(dirname, errno);
}
- return result;
+ return Status::OK();
}
- virtual Status DeleteDir(const std::string& name) {
- Status result;
- if (rmdir(name.c_str()) != 0) {
- result = IOError(name, errno);
+ Status DeleteDir(const std::string& dirname) override {
+ if (::rmdir(dirname.c_str()) != 0) {
+ return PosixError(dirname, errno);
}
- return result;
+ return Status::OK();
}
- virtual Status GetFileSize(const std::string& fname, uint64_t* size) {
- Status s;
- struct stat sbuf;
- if (stat(fname.c_str(), &sbuf) != 0) {
+ Status GetFileSize(const std::string& filename, uint64_t* size) override {
+ struct ::stat file_stat;
+ if (::stat(filename.c_str(), &file_stat) != 0) {
*size = 0;
- s = IOError(fname, errno);
- } else {
- *size = sbuf.st_size;
+ return PosixError(filename, errno);
}
- return s;
+ *size = file_stat.st_size;
+ return Status::OK();
}
- virtual Status RenameFile(const std::string& src, const std::string& target) {
- Status result;
- if (rename(src.c_str(), target.c_str()) != 0) {
- result = IOError(src, errno);
+ Status RenameFile(const std::string& from, const std::string& to) override {
+ if (std::rename(from.c_str(), to.c_str()) != 0) {
+ return PosixError(from, errno);
}
- return result;
+ return Status::OK();
}
- virtual Status LockFile(const std::string& fname, FileLock** lock) {
- *lock = NULL;
- Status result;
- int fd = open(fname.c_str(), O_RDWR | O_CREAT, 0644);
+ Status LockFile(const std::string& filename, FileLock** lock) override {
+ *lock = nullptr;
+
+ int fd = ::open(filename.c_str(), O_RDWR | O_CREAT | kOpenBaseFlags, 0644);
if (fd < 0) {
- result = IOError(fname, errno);
- } else if (!locks_.Insert(fname)) {
- close(fd);
- result = Status::IOError("lock " + fname, "already held by process");
- } else if (LockOrUnlock(fd, true) == -1) {
- result = IOError("lock " + fname, errno);
- close(fd);
- locks_.Remove(fname);
- } else {
- PosixFileLock* my_lock = new PosixFileLock;
- my_lock->fd_ = fd;
- my_lock->name_ = fname;
- *lock = my_lock;
+ return PosixError(filename, errno);
}
- return result;
+
+ if (!locks_.Insert(filename)) {
+ ::close(fd);
+ return Status::IOError("lock " + filename, "already held by process");
+ }
+
+ if (LockOrUnlock(fd, true) == -1) {
+ int lock_errno = errno;
+ ::close(fd);
+ locks_.Remove(filename);
+ return PosixError("lock " + filename, lock_errno);
+ }
+
+ *lock = new PosixFileLock(fd, filename);
+ return Status::OK();
}
- virtual Status UnlockFile(FileLock* lock) {
- PosixFileLock* my_lock = reinterpret_cast<PosixFileLock*>(lock);
- Status result;
- if (LockOrUnlock(my_lock->fd_, false) == -1) {
- result = IOError("unlock", errno);
+ Status UnlockFile(FileLock* lock) override {
+ PosixFileLock* posix_file_lock = static_cast<PosixFileLock*>(lock);
+ if (LockOrUnlock(posix_file_lock->fd(), false) == -1) {
+ return PosixError("unlock " + posix_file_lock->filename(), errno);
}
- locks_.Remove(my_lock->name_);
- close(my_lock->fd_);
- delete my_lock;
- return result;
+ locks_.Remove(posix_file_lock->filename());
+ ::close(posix_file_lock->fd());
+ delete posix_file_lock;
+ return Status::OK();
}
- virtual void Schedule(void (*function)(void*), void* arg);
+ void Schedule(void (*background_work_function)(void* background_work_arg),
+ void* background_work_arg) override;
- virtual void StartThread(void (*function)(void* arg), void* arg);
+ void StartThread(void (*thread_main)(void* thread_main_arg),
+ void* thread_main_arg) override {
+ std::thread new_thread(thread_main, thread_main_arg);
+ new_thread.detach();
+ }
- virtual Status GetTestDirectory(std::string* result) {
- const char* env = getenv("TEST_TMPDIR");
+ Status GetTestDirectory(std::string* result) override {
+ const char* env = std::getenv("TEST_TMPDIR");
if (env && env[0] != '\0') {
*result = env;
} else {
char buf[100];
- snprintf(buf, sizeof(buf), "/tmp/leveldbtest-%d", int(geteuid()));
+ std::snprintf(buf, sizeof(buf), "/tmp/leveldbtest-%d",
+ static_cast<int>(::geteuid()));
*result = buf;
}
- // Directory may already exist
+
+ // The CreateDir status is ignored because the directory may already exist.
CreateDir(*result);
+
return Status::OK();
}
- static uint64_t gettid() {
- pthread_t tid = pthread_self();
- uint64_t thread_id = 0;
- memcpy(&thread_id, &tid, std::min(sizeof(thread_id), sizeof(tid)));
- return thread_id;
- }
+ Status NewLogger(const std::string& filename, Logger** result) override {
+ int fd = ::open(filename.c_str(),
+ O_APPEND | O_WRONLY | O_CREAT | kOpenBaseFlags, 0644);
+ if (fd < 0) {
+ *result = nullptr;
+ return PosixError(filename, errno);
+ }
- virtual Status NewLogger(const std::string& fname, Logger** result) {
- FILE* f = fopen(fname.c_str(), "w");
- if (f == NULL) {
- *result = NULL;
- return IOError(fname, errno);
+ std::FILE* fp = ::fdopen(fd, "w");
+ if (fp == nullptr) {
+ ::close(fd);
+ *result = nullptr;
+ return PosixError(filename, errno);
} else {
- *result = new PosixLogger(f, &PosixEnv::gettid);
+ *result = new PosixLogger(fp);
return Status::OK();
}
}
- virtual uint64_t NowMicros() {
- struct timeval tv;
- gettimeofday(&tv, NULL);
- return static_cast<uint64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
+ uint64_t NowMicros() override {
+ static constexpr uint64_t kUsecondsPerSecond = 1000000;
+ struct ::timeval tv;
+ ::gettimeofday(&tv, nullptr);
+ return static_cast<uint64_t>(tv.tv_sec) * kUsecondsPerSecond + tv.tv_usec;
}
- virtual void SleepForMicroseconds(int micros) {
- usleep(micros);
+ void SleepForMicroseconds(int micros) override {
+ std::this_thread::sleep_for(std::chrono::microseconds(micros));
}
private:
- void PthreadCall(const char* label, int result) {
- if (result != 0) {
- fprintf(stderr, "pthread %s: %s\n", label, strerror(result));
- abort();
- }
- }
+ void BackgroundThreadMain();
- // BGThread() is the body of the background thread
- void BGThread();
- static void* BGThreadWrapper(void* arg) {
- reinterpret_cast<PosixEnv*>(arg)->BGThread();
- return NULL;
+ static void BackgroundThreadEntryPoint(PosixEnv* env) {
+ env->BackgroundThreadMain();
}
- pthread_mutex_t mu_;
- pthread_cond_t bgsignal_;
- pthread_t bgthread_;
- bool started_bgthread_;
+ // Stores the work item data in a Schedule() call.
+ //
+ // Instances are constructed on the thread calling Schedule() and used on the
+ // background thread.
+ //
+ // This structure is thread-safe beacuse it is immutable.
+ struct BackgroundWorkItem {
+ explicit BackgroundWorkItem(void (*function)(void* arg), void* arg)
+ : function(function), arg(arg) {}
+
+ void (*const function)(void*);
+ void* const arg;
+ };
- // Entry per Schedule() call
- struct BGItem { void* arg; void (*function)(void*); };
- typedef std::deque<BGItem> BGQueue;
- BGQueue queue_;
+ port::Mutex background_work_mutex_;
+ port::CondVar background_work_cv_ GUARDED_BY(background_work_mutex_);
+ bool started_background_thread_ GUARDED_BY(background_work_mutex_);
- PosixLockTable locks_;
- Limiter mmap_limit_;
- Limiter fd_limit_;
+ std::queue<BackgroundWorkItem> background_work_queue_
+ GUARDED_BY(background_work_mutex_);
+
+ PosixLockTable locks_; // Thread-safe.
+ Limiter mmap_limiter_; // Thread-safe.
+ Limiter fd_limiter_; // Thread-safe.
};
// Return the maximum number of concurrent mmaps.
-static int MaxMmaps() {
- if (mmap_limit >= 0) {
- return mmap_limit;
- }
- // Up to 4096 mmaps for 64-bit binaries; none for smaller pointer sizes.
- mmap_limit = sizeof(void*) >= 8 ? 4096 : 0;
- return mmap_limit;
-}
+int MaxMmaps() { return g_mmap_limit; }
// Return the maximum number of read-only files to keep open.
-static intptr_t MaxOpenFiles() {
- if (open_read_only_file_limit >= 0) {
- return open_read_only_file_limit;
+int MaxOpenFiles() {
+ if (g_open_read_only_file_limit >= 0) {
+ return g_open_read_only_file_limit;
}
- struct rlimit rlim;
- if (getrlimit(RLIMIT_NOFILE, &rlim)) {
+ struct ::rlimit rlim;
+ if (::getrlimit(RLIMIT_NOFILE, &rlim)) {
// getrlimit failed, fallback to hard-coded default.
- open_read_only_file_limit = 50;
+ g_open_read_only_file_limit = 50;
} else if (rlim.rlim_cur == RLIM_INFINITY) {
- open_read_only_file_limit = std::numeric_limits<int>::max();
+ g_open_read_only_file_limit = std::numeric_limits<int>::max();
} else {
// Allow use of 20% of available file descriptors for read-only files.
- open_read_only_file_limit = rlim.rlim_cur / 5;
+ g_open_read_only_file_limit = rlim.rlim_cur / 5;
}
- return open_read_only_file_limit;
+ return g_open_read_only_file_limit;
}
+} // namespace
+
PosixEnv::PosixEnv()
- : started_bgthread_(false),
- mmap_limit_(MaxMmaps()),
- fd_limit_(MaxOpenFiles()) {
- PthreadCall("mutex_init", pthread_mutex_init(&mu_, NULL));
- PthreadCall("cvar_init", pthread_cond_init(&bgsignal_, NULL));
-}
+ : background_work_cv_(&background_work_mutex_),
+ started_background_thread_(false),
+ mmap_limiter_(MaxMmaps()),
+ fd_limiter_(MaxOpenFiles()) {}
-void PosixEnv::Schedule(void (*function)(void*), void* arg) {
- PthreadCall("lock", pthread_mutex_lock(&mu_));
+void PosixEnv::Schedule(
+ void (*background_work_function)(void* background_work_arg),
+ void* background_work_arg) {
+ background_work_mutex_.Lock();
- // Start background thread if necessary
- if (!started_bgthread_) {
- started_bgthread_ = true;
- PthreadCall(
- "create thread",
- pthread_create(&bgthread_, NULL, &PosixEnv::BGThreadWrapper, this));
+ // Start the background thread, if we haven't done so already.
+ if (!started_background_thread_) {
+ started_background_thread_ = true;
+ std::thread background_thread(PosixEnv::BackgroundThreadEntryPoint, this);
+ background_thread.detach();
}
- // If the queue is currently empty, the background thread may currently be
- // waiting.
- if (queue_.empty()) {
- PthreadCall("signal", pthread_cond_signal(&bgsignal_));
+ // If the queue is empty, the background thread may be waiting for work.
+ if (background_work_queue_.empty()) {
+ background_work_cv_.Signal();
}
- // Add to priority queue
- queue_.push_back(BGItem());
- queue_.back().function = function;
- queue_.back().arg = arg;
-
- PthreadCall("unlock", pthread_mutex_unlock(&mu_));
+ background_work_queue_.emplace(background_work_function, background_work_arg);
+ background_work_mutex_.Unlock();
}
-void PosixEnv::BGThread() {
+void PosixEnv::BackgroundThreadMain() {
while (true) {
- // Wait until there is an item that is ready to run
- PthreadCall("lock", pthread_mutex_lock(&mu_));
- while (queue_.empty()) {
- PthreadCall("wait", pthread_cond_wait(&bgsignal_, &mu_));
+ background_work_mutex_.Lock();
+
+ // Wait until there is work to be done.
+ while (background_work_queue_.empty()) {
+ background_work_cv_.Wait();
}
- void (*function)(void*) = queue_.front().function;
- void* arg = queue_.front().arg;
- queue_.pop_front();
+ assert(!background_work_queue_.empty());
+ auto background_work_function = background_work_queue_.front().function;
+ void* background_work_arg = background_work_queue_.front().arg;
+ background_work_queue_.pop();
- PthreadCall("unlock", pthread_mutex_unlock(&mu_));
- (*function)(arg);
+ background_work_mutex_.Unlock();
+ background_work_function(background_work_arg);
}
}
namespace {
-struct StartThreadState {
- void (*user_function)(void*);
- void* arg;
+
+// Wraps an Env instance whose destructor is never created.
+//
+// Intended usage:
+// using PlatformSingletonEnv = SingletonEnv<PlatformEnv>;
+// void ConfigurePosixEnv(int param) {
+// PlatformSingletonEnv::AssertEnvNotInitialized();
+// // set global configuration flags.
+// }
+// Env* Env::Default() {
+// static PlatformSingletonEnv default_env;
+// return default_env.env();
+// }
+template <typename EnvType>
+class SingletonEnv {
+ public:
+ SingletonEnv() {
+#if !defined(NDEBUG)
+ env_initialized_.store(true, std::memory_order::memory_order_relaxed);
+#endif // !defined(NDEBUG)
+ static_assert(sizeof(env_storage_) >= sizeof(EnvType),
+ "env_storage_ will not fit the Env");
+ static_assert(alignof(decltype(env_storage_)) >= alignof(EnvType),
+ "env_storage_ does not meet the Env's alignment needs");
+ new (&env_storage_) EnvType();
+ }
+ ~SingletonEnv() = default;
+
+ SingletonEnv(const SingletonEnv&) = delete;
+ SingletonEnv& operator=(const SingletonEnv&) = delete;
+
+ Env* env() { return reinterpret_cast<Env*>(&env_storage_); }
+
+ static void AssertEnvNotInitialized() {
+#if !defined(NDEBUG)
+ assert(!env_initialized_.load(std::memory_order::memory_order_relaxed));
+#endif // !defined(NDEBUG)
+ }
+
+ private:
+ typename std::aligned_storage<sizeof(EnvType), alignof(EnvType)>::type
+ env_storage_;
+#if !defined(NDEBUG)
+ static std::atomic<bool> env_initialized_;
+#endif // !defined(NDEBUG)
};
-}
-static void* StartThreadWrapper(void* arg) {
- StartThreadState* state = reinterpret_cast<StartThreadState*>(arg);
- state->user_function(state->arg);
- delete state;
- return NULL;
-}
-void PosixEnv::StartThread(void (*function)(void* arg), void* arg) {
- pthread_t t;
- StartThreadState* state = new StartThreadState;
- state->user_function = function;
- state->arg = arg;
- PthreadCall("start thread",
- pthread_create(&t, NULL, &StartThreadWrapper, state));
-}
+#if !defined(NDEBUG)
+template <typename EnvType>
+std::atomic<bool> SingletonEnv<EnvType>::env_initialized_;
+#endif // !defined(NDEBUG)
-} // namespace
+using PosixDefaultEnv = SingletonEnv<PosixEnv>;
-static pthread_once_t once = PTHREAD_ONCE_INIT;
-static Env* default_env;
-static void InitDefaultEnv() { default_env = new PosixEnv; }
+} // namespace
void EnvPosixTestHelper::SetReadOnlyFDLimit(int limit) {
- assert(default_env == NULL);
- open_read_only_file_limit = limit;
+ PosixDefaultEnv::AssertEnvNotInitialized();
+ g_open_read_only_file_limit = limit;
}
void EnvPosixTestHelper::SetReadOnlyMMapLimit(int limit) {
- assert(default_env == NULL);
- mmap_limit = limit;
+ PosixDefaultEnv::AssertEnvNotInitialized();
+ g_mmap_limit = limit;
}
Env* Env::Default() {
- pthread_once(&once, InitDefaultEnv);
- return default_env;
+ static PosixDefaultEnv env_container;
+ return env_container.env();
}
} // namespace leveldb
-
-#endif
diff --git a/src/leveldb/util/env_posix_test.cc b/src/leveldb/util/env_posix_test.cc
index 295f8ae440..9675d739ad 100644
--- a/src/leveldb/util/env_posix_test.cc
+++ b/src/leveldb/util/env_posix_test.cc
@@ -2,27 +2,182 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include "leveldb/env.h"
+#include <sys/resource.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <string>
+#include <unordered_set>
+#include <vector>
+#include "leveldb/env.h"
#include "port/port.h"
-#include "util/testharness.h"
#include "util/env_posix_test_helper.h"
+#include "util/testharness.h"
+
+#if HAVE_O_CLOEXEC
+
+namespace {
+
+// Exit codes for the helper process spawned by TestCloseOnExec* tests.
+// Useful for debugging test failures.
+constexpr int kTextCloseOnExecHelperExecFailedCode = 61;
+constexpr int kTextCloseOnExecHelperDup2FailedCode = 62;
+constexpr int kTextCloseOnExecHelperFoundOpenFdCode = 63;
+
+// Global set by main() and read in TestCloseOnExec.
+//
+// The argv[0] value is stored in a std::vector instead of a std::string because
+// std::string does not return a mutable pointer to its buffer until C++17.
+//
+// The vector stores the string pointed to by argv[0], plus the trailing null.
+std::vector<char>* GetArgvZero() {
+ static std::vector<char> program_name;
+ return &program_name;
+}
+
+// Command-line switch used to run this test as the CloseOnExecSwitch helper.
+static const char kTestCloseOnExecSwitch[] = "--test-close-on-exec-helper";
+
+// Executed in a separate process by TestCloseOnExec* tests.
+//
+// main() delegates to this function when the test executable is launched with
+// a special command-line switch. TestCloseOnExec* tests fork()+exec() the test
+// executable and pass the special command-line switch.
+//
+
+// main() delegates to this function when the test executable is launched with
+// a special command-line switch. TestCloseOnExec* tests fork()+exec() the test
+// executable and pass the special command-line switch.
+//
+// When main() delegates to this function, the process probes whether a given
+// file descriptor is open, and communicates the result via its exit code.
+int TestCloseOnExecHelperMain(char* pid_arg) {
+ int fd = std::atoi(pid_arg);
+ // When given the same file descriptor twice, dup2() returns -1 if the
+ // file descriptor is closed, or the given file descriptor if it is open.
+ if (::dup2(fd, fd) == fd) {
+ std::fprintf(stderr, "Unexpected open fd %d\n", fd);
+ return kTextCloseOnExecHelperFoundOpenFdCode;
+ }
+ // Double-check that dup2() is saying the file descriptor is closed.
+ if (errno != EBADF) {
+ std::fprintf(stderr, "Unexpected errno after calling dup2 on fd %d: %s\n",
+ fd, std::strerror(errno));
+ return kTextCloseOnExecHelperDup2FailedCode;
+ }
+ return 0;
+}
+
+// File descriptors are small non-negative integers.
+//
+// Returns void so the implementation can use ASSERT_EQ.
+void GetMaxFileDescriptor(int* result_fd) {
+ // Get the maximum file descriptor number.
+ ::rlimit fd_rlimit;
+ ASSERT_EQ(0, ::getrlimit(RLIMIT_NOFILE, &fd_rlimit));
+ *result_fd = fd_rlimit.rlim_cur;
+}
+
+// Iterates through all possible FDs and returns the currently open ones.
+//
+// Returns void so the implementation can use ASSERT_EQ.
+void GetOpenFileDescriptors(std::unordered_set<int>* open_fds) {
+ int max_fd = 0;
+ GetMaxFileDescriptor(&max_fd);
+
+ for (int fd = 0; fd < max_fd; ++fd) {
+ if (::dup2(fd, fd) != fd) {
+ // When given the same file descriptor twice, dup2() returns -1 if the
+ // file descriptor is closed, or the given file descriptor if it is open.
+ //
+ // Double-check that dup2() is saying the fd is closed.
+ ASSERT_EQ(EBADF, errno)
+ << "dup2() should set errno to EBADF on closed file descriptors";
+ continue;
+ }
+ open_fds->insert(fd);
+ }
+}
+
+// Finds an FD open since a previous call to GetOpenFileDescriptors().
+//
+// |baseline_open_fds| is the result of a previous GetOpenFileDescriptors()
+// call. Assumes that exactly one FD was opened since that call.
+//
+// Returns void so the implementation can use ASSERT_EQ.
+void GetNewlyOpenedFileDescriptor(
+ const std::unordered_set<int>& baseline_open_fds, int* result_fd) {
+ std::unordered_set<int> open_fds;
+ GetOpenFileDescriptors(&open_fds);
+ for (int fd : baseline_open_fds) {
+ ASSERT_EQ(1, open_fds.count(fd))
+ << "Previously opened file descriptor was closed during test setup";
+ open_fds.erase(fd);
+ }
+ ASSERT_EQ(1, open_fds.size())
+ << "Expected exactly one newly opened file descriptor during test setup";
+ *result_fd = *open_fds.begin();
+}
+
+// Check that a fork()+exec()-ed child process does not have an extra open FD.
+void CheckCloseOnExecDoesNotLeakFDs(
+ const std::unordered_set<int>& baseline_open_fds) {
+ // Prepare the argument list for the child process.
+ // execv() wants mutable buffers.
+ char switch_buffer[sizeof(kTestCloseOnExecSwitch)];
+ std::memcpy(switch_buffer, kTestCloseOnExecSwitch,
+ sizeof(kTestCloseOnExecSwitch));
+
+ int probed_fd;
+ GetNewlyOpenedFileDescriptor(baseline_open_fds, &probed_fd);
+ std::string fd_string = std::to_string(probed_fd);
+ std::vector<char> fd_buffer(fd_string.begin(), fd_string.end());
+ fd_buffer.emplace_back('\0');
+
+ // The helper process is launched with the command below.
+ // env_posix_tests --test-close-on-exec-helper 3
+ char* child_argv[] = {GetArgvZero()->data(), switch_buffer, fd_buffer.data(),
+ nullptr};
+
+ constexpr int kForkInChildProcessReturnValue = 0;
+ int child_pid = fork();
+ if (child_pid == kForkInChildProcessReturnValue) {
+ ::execv(child_argv[0], child_argv);
+ std::fprintf(stderr, "Error spawning child process: %s\n", strerror(errno));
+ std::exit(kTextCloseOnExecHelperExecFailedCode);
+ }
+
+ int child_status = 0;
+ ASSERT_EQ(child_pid, ::waitpid(child_pid, &child_status, 0));
+ ASSERT_TRUE(WIFEXITED(child_status))
+ << "The helper process did not exit with an exit code";
+ ASSERT_EQ(0, WEXITSTATUS(child_status))
+ << "The helper process encountered an error";
+}
+
+} // namespace
+
+#endif // HAVE_O_CLOEXEC
namespace leveldb {
-static const int kDelayMicros = 100000;
static const int kReadOnlyFileLimit = 4;
static const int kMMapLimit = 4;
class EnvPosixTest {
public:
- Env* env_;
- EnvPosixTest() : env_(Env::Default()) { }
-
static void SetFileLimits(int read_only_file_limit, int mmap_limit) {
EnvPosixTestHelper::SetReadOnlyFDLimit(read_only_file_limit);
EnvPosixTestHelper::SetReadOnlyMMapLimit(mmap_limit);
}
+
+ EnvPosixTest() : env_(Env::Default()) {}
+
+ Env* env_;
};
TEST(EnvPosixTest, TestOpenOnRead) {
@@ -31,8 +186,8 @@ TEST(EnvPosixTest, TestOpenOnRead) {
ASSERT_OK(env_->GetTestDirectory(&test_dir));
std::string test_file = test_dir + "/open_on_read.txt";
- FILE* f = fopen(test_file.c_str(), "w");
- ASSERT_TRUE(f != NULL);
+ FILE* f = fopen(test_file.c_str(), "we");
+ ASSERT_TRUE(f != nullptr);
const char kFileData[] = "abcdefghijklmnopqrstuvwxyz";
fputs(kFileData, f);
fclose(f);
@@ -56,9 +211,138 @@ TEST(EnvPosixTest, TestOpenOnRead) {
ASSERT_OK(env_->DeleteFile(test_file));
}
+#if HAVE_O_CLOEXEC
+
+TEST(EnvPosixTest, TestCloseOnExecSequentialFile) {
+ std::unordered_set<int> open_fds;
+ GetOpenFileDescriptors(&open_fds);
+
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+ std::string file_path = test_dir + "/close_on_exec_sequential.txt";
+ ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path));
+
+ leveldb::SequentialFile* file = nullptr;
+ ASSERT_OK(env_->NewSequentialFile(file_path, &file));
+ CheckCloseOnExecDoesNotLeakFDs(open_fds);
+ delete file;
+
+ ASSERT_OK(env_->DeleteFile(file_path));
+}
+
+TEST(EnvPosixTest, TestCloseOnExecRandomAccessFile) {
+ std::unordered_set<int> open_fds;
+ GetOpenFileDescriptors(&open_fds);
+
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+ std::string file_path = test_dir + "/close_on_exec_random_access.txt";
+ ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path));
+
+ // Exhaust the RandomAccessFile mmap limit. This way, the test
+ // RandomAccessFile instance below is backed by a file descriptor, not by an
+ // mmap region.
+ leveldb::RandomAccessFile* mmapped_files[kReadOnlyFileLimit] = {nullptr};
+ for (int i = 0; i < kReadOnlyFileLimit; i++) {
+ ASSERT_OK(env_->NewRandomAccessFile(file_path, &mmapped_files[i]));
+ }
+
+ leveldb::RandomAccessFile* file = nullptr;
+ ASSERT_OK(env_->NewRandomAccessFile(file_path, &file));
+ CheckCloseOnExecDoesNotLeakFDs(open_fds);
+ delete file;
+
+ for (int i = 0; i < kReadOnlyFileLimit; i++) {
+ delete mmapped_files[i];
+ }
+ ASSERT_OK(env_->DeleteFile(file_path));
+}
+
+TEST(EnvPosixTest, TestCloseOnExecWritableFile) {
+ std::unordered_set<int> open_fds;
+ GetOpenFileDescriptors(&open_fds);
+
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+ std::string file_path = test_dir + "/close_on_exec_writable.txt";
+ ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path));
+
+ leveldb::WritableFile* file = nullptr;
+ ASSERT_OK(env_->NewWritableFile(file_path, &file));
+ CheckCloseOnExecDoesNotLeakFDs(open_fds);
+ delete file;
+
+ ASSERT_OK(env_->DeleteFile(file_path));
+}
+
+TEST(EnvPosixTest, TestCloseOnExecAppendableFile) {
+ std::unordered_set<int> open_fds;
+ GetOpenFileDescriptors(&open_fds);
+
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+ std::string file_path = test_dir + "/close_on_exec_appendable.txt";
+ ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path));
+
+ leveldb::WritableFile* file = nullptr;
+ ASSERT_OK(env_->NewAppendableFile(file_path, &file));
+ CheckCloseOnExecDoesNotLeakFDs(open_fds);
+ delete file;
+
+ ASSERT_OK(env_->DeleteFile(file_path));
+}
+
+TEST(EnvPosixTest, TestCloseOnExecLockFile) {
+ std::unordered_set<int> open_fds;
+ GetOpenFileDescriptors(&open_fds);
+
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+ std::string file_path = test_dir + "/close_on_exec_lock.txt";
+ ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path));
+
+ leveldb::FileLock* lock = nullptr;
+ ASSERT_OK(env_->LockFile(file_path, &lock));
+ CheckCloseOnExecDoesNotLeakFDs(open_fds);
+ ASSERT_OK(env_->UnlockFile(lock));
+
+ ASSERT_OK(env_->DeleteFile(file_path));
+}
+
+TEST(EnvPosixTest, TestCloseOnExecLogger) {
+ std::unordered_set<int> open_fds;
+ GetOpenFileDescriptors(&open_fds);
+
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+ std::string file_path = test_dir + "/close_on_exec_logger.txt";
+ ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path));
+
+ leveldb::Logger* file = nullptr;
+ ASSERT_OK(env_->NewLogger(file_path, &file));
+ CheckCloseOnExecDoesNotLeakFDs(open_fds);
+ delete file;
+
+ ASSERT_OK(env_->DeleteFile(file_path));
+}
+
+#endif // HAVE_O_CLOEXEC
+
} // namespace leveldb
int main(int argc, char** argv) {
+#if HAVE_O_CLOEXEC
+ // Check if we're invoked as a helper program, or as the test suite.
+ for (int i = 1; i < argc; ++i) {
+ if (!std::strcmp(argv[i], kTestCloseOnExecSwitch)) {
+ return TestCloseOnExecHelperMain(argv[i + 1]);
+ }
+ }
+
+ // Save argv[0] early, because googletest may modify argv.
+ GetArgvZero()->assign(argv[0], argv[0] + std::strlen(argv[0]) + 1);
+#endif // HAVE_O_CLOEXEC
+
// All tests currently run with the same read-only file limits.
leveldb::EnvPosixTest::SetFileLimits(leveldb::kReadOnlyFileLimit,
leveldb::kMMapLimit);
diff --git a/src/leveldb/util/env_test.cc b/src/leveldb/util/env_test.cc
index 839ae56a1a..7db03fc11c 100644
--- a/src/leveldb/util/env_test.cc
+++ b/src/leveldb/util/env_test.cc
@@ -4,72 +4,144 @@
#include "leveldb/env.h"
+#include <algorithm>
+
#include "port/port.h"
+#include "port/thread_annotations.h"
+#include "util/mutexlock.h"
#include "util/testharness.h"
+#include "util/testutil.h"
namespace leveldb {
static const int kDelayMicros = 100000;
-static const int kReadOnlyFileLimit = 4;
-static const int kMMapLimit = 4;
class EnvTest {
- private:
- port::Mutex mu_;
- std::string events_;
-
public:
+ EnvTest() : env_(Env::Default()) {}
+
Env* env_;
- EnvTest() : env_(Env::Default()) { }
};
-static void SetBool(void* ptr) {
- reinterpret_cast<port::AtomicPointer*>(ptr)->NoBarrier_Store(ptr);
+TEST(EnvTest, ReadWrite) {
+ Random rnd(test::RandomSeed());
+
+ // Get file to use for testing.
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+ std::string test_file_name = test_dir + "/open_on_read.txt";
+ WritableFile* writable_file;
+ ASSERT_OK(env_->NewWritableFile(test_file_name, &writable_file));
+
+ // Fill a file with data generated via a sequence of randomly sized writes.
+ static const size_t kDataSize = 10 * 1048576;
+ std::string data;
+ while (data.size() < kDataSize) {
+ int len = rnd.Skewed(18); // Up to 2^18 - 1, but typically much smaller
+ std::string r;
+ test::RandomString(&rnd, len, &r);
+ ASSERT_OK(writable_file->Append(r));
+ data += r;
+ if (rnd.OneIn(10)) {
+ ASSERT_OK(writable_file->Flush());
+ }
+ }
+ ASSERT_OK(writable_file->Sync());
+ ASSERT_OK(writable_file->Close());
+ delete writable_file;
+
+ // Read all data using a sequence of randomly sized reads.
+ SequentialFile* sequential_file;
+ ASSERT_OK(env_->NewSequentialFile(test_file_name, &sequential_file));
+ std::string read_result;
+ std::string scratch;
+ while (read_result.size() < data.size()) {
+ int len = std::min<int>(rnd.Skewed(18), data.size() - read_result.size());
+ scratch.resize(std::max(len, 1)); // at least 1 so &scratch[0] is legal
+ Slice read;
+ ASSERT_OK(sequential_file->Read(len, &read, &scratch[0]));
+ if (len > 0) {
+ ASSERT_GT(read.size(), 0);
+ }
+ ASSERT_LE(read.size(), len);
+ read_result.append(read.data(), read.size());
+ }
+ ASSERT_EQ(read_result, data);
+ delete sequential_file;
}
TEST(EnvTest, RunImmediately) {
- port::AtomicPointer called (NULL);
- env_->Schedule(&SetBool, &called);
- env_->SleepForMicroseconds(kDelayMicros);
- ASSERT_TRUE(called.NoBarrier_Load() != NULL);
+ struct RunState {
+ port::Mutex mu;
+ port::CondVar cvar{&mu};
+ bool called = false;
+
+ static void Run(void* arg) {
+ RunState* state = reinterpret_cast<RunState*>(arg);
+ MutexLock l(&state->mu);
+ ASSERT_EQ(state->called, false);
+ state->called = true;
+ state->cvar.Signal();
+ }
+ };
+
+ RunState state;
+ env_->Schedule(&RunState::Run, &state);
+
+ MutexLock l(&state.mu);
+ while (!state.called) {
+ state.cvar.Wait();
+ }
}
TEST(EnvTest, RunMany) {
- port::AtomicPointer last_id (NULL);
+ struct RunState {
+ port::Mutex mu;
+ port::CondVar cvar{&mu};
+ int last_id = 0;
+ };
+
+ struct Callback {
+ RunState* state_; // Pointer to shared state.
+ const int id_; // Order# for the execution of this callback.
- struct CB {
- port::AtomicPointer* last_id_ptr; // Pointer to shared slot
- uintptr_t id; // Order# for the execution of this callback
+ Callback(RunState* s, int id) : state_(s), id_(id) {}
- CB(port::AtomicPointer* p, int i) : last_id_ptr(p), id(i) { }
+ static void Run(void* arg) {
+ Callback* callback = reinterpret_cast<Callback*>(arg);
+ RunState* state = callback->state_;
- static void Run(void* v) {
- CB* cb = reinterpret_cast<CB*>(v);
- void* cur = cb->last_id_ptr->NoBarrier_Load();
- ASSERT_EQ(cb->id-1, reinterpret_cast<uintptr_t>(cur));
- cb->last_id_ptr->Release_Store(reinterpret_cast<void*>(cb->id));
+ MutexLock l(&state->mu);
+ ASSERT_EQ(state->last_id, callback->id_ - 1);
+ state->last_id = callback->id_;
+ state->cvar.Signal();
}
};
- // Schedule in different order than start time
- CB cb1(&last_id, 1);
- CB cb2(&last_id, 2);
- CB cb3(&last_id, 3);
- CB cb4(&last_id, 4);
- env_->Schedule(&CB::Run, &cb1);
- env_->Schedule(&CB::Run, &cb2);
- env_->Schedule(&CB::Run, &cb3);
- env_->Schedule(&CB::Run, &cb4);
-
- env_->SleepForMicroseconds(kDelayMicros);
- void* cur = last_id.Acquire_Load();
- ASSERT_EQ(4, reinterpret_cast<uintptr_t>(cur));
+ RunState state;
+ Callback callback1(&state, 1);
+ Callback callback2(&state, 2);
+ Callback callback3(&state, 3);
+ Callback callback4(&state, 4);
+ env_->Schedule(&Callback::Run, &callback1);
+ env_->Schedule(&Callback::Run, &callback2);
+ env_->Schedule(&Callback::Run, &callback3);
+ env_->Schedule(&Callback::Run, &callback4);
+
+ MutexLock l(&state.mu);
+ while (state.last_id != 4) {
+ state.cvar.Wait();
+ }
}
struct State {
port::Mutex mu;
- int val;
- int num_running;
+ port::CondVar cvar{&mu};
+
+ int val GUARDED_BY(mu);
+ int num_running GUARDED_BY(mu);
+
+ State(int val, int num_running) : val(val), num_running(num_running) {}
};
static void ThreadBody(void* arg) {
@@ -77,30 +149,89 @@ static void ThreadBody(void* arg) {
s->mu.Lock();
s->val += 1;
s->num_running -= 1;
+ s->cvar.Signal();
s->mu.Unlock();
}
TEST(EnvTest, StartThread) {
- State state;
- state.val = 0;
- state.num_running = 3;
+ State state(0, 3);
for (int i = 0; i < 3; i++) {
env_->StartThread(&ThreadBody, &state);
}
- while (true) {
- state.mu.Lock();
- int num = state.num_running;
- state.mu.Unlock();
- if (num == 0) {
- break;
- }
- env_->SleepForMicroseconds(kDelayMicros);
+
+ MutexLock l(&state.mu);
+ while (state.num_running != 0) {
+ state.cvar.Wait();
}
ASSERT_EQ(state.val, 3);
}
-} // namespace leveldb
+TEST(EnvTest, TestOpenNonExistentFile) {
+ // Write some test data to a single file that will be opened |n| times.
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+
+ std::string non_existent_file = test_dir + "/non_existent_file";
+ ASSERT_TRUE(!env_->FileExists(non_existent_file));
+
+ RandomAccessFile* random_access_file;
+ Status status =
+ env_->NewRandomAccessFile(non_existent_file, &random_access_file);
+ ASSERT_TRUE(status.IsNotFound());
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
+ SequentialFile* sequential_file;
+ status = env_->NewSequentialFile(non_existent_file, &sequential_file);
+ ASSERT_TRUE(status.IsNotFound());
}
+
+TEST(EnvTest, ReopenWritableFile) {
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+ std::string test_file_name = test_dir + "/reopen_writable_file.txt";
+ env_->DeleteFile(test_file_name);
+
+ WritableFile* writable_file;
+ ASSERT_OK(env_->NewWritableFile(test_file_name, &writable_file));
+ std::string data("hello world!");
+ ASSERT_OK(writable_file->Append(data));
+ ASSERT_OK(writable_file->Close());
+ delete writable_file;
+
+ ASSERT_OK(env_->NewWritableFile(test_file_name, &writable_file));
+ data = "42";
+ ASSERT_OK(writable_file->Append(data));
+ ASSERT_OK(writable_file->Close());
+ delete writable_file;
+
+ ASSERT_OK(ReadFileToString(env_, test_file_name, &data));
+ ASSERT_EQ(std::string("42"), data);
+ env_->DeleteFile(test_file_name);
+}
+
+TEST(EnvTest, ReopenAppendableFile) {
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+ std::string test_file_name = test_dir + "/reopen_appendable_file.txt";
+ env_->DeleteFile(test_file_name);
+
+ WritableFile* appendable_file;
+ ASSERT_OK(env_->NewAppendableFile(test_file_name, &appendable_file));
+ std::string data("hello world!");
+ ASSERT_OK(appendable_file->Append(data));
+ ASSERT_OK(appendable_file->Close());
+ delete appendable_file;
+
+ ASSERT_OK(env_->NewAppendableFile(test_file_name, &appendable_file));
+ data = "42";
+ ASSERT_OK(appendable_file->Append(data));
+ ASSERT_OK(appendable_file->Close());
+ delete appendable_file;
+
+ ASSERT_OK(ReadFileToString(env_, test_file_name, &data));
+ ASSERT_EQ(std::string("hello world!42"), data);
+ env_->DeleteFile(test_file_name);
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/util/env_win.cc b/src/leveldb/util/env_win.cc
deleted file mode 100644
index 830332abe9..0000000000
--- a/src/leveldb/util/env_win.cc
+++ /dev/null
@@ -1,902 +0,0 @@
-// This file contains source that originates from:
-// http://code.google.com/p/leveldbwin/source/browse/trunk/win32_impl_src/env_win32.h
-// http://code.google.com/p/leveldbwin/source/browse/trunk/win32_impl_src/port_win32.cc
-// Those files don't have any explicit license headers but the
-// project (http://code.google.com/p/leveldbwin/) lists the 'New BSD License'
-// as the license.
-#if defined(LEVELDB_PLATFORM_WINDOWS)
-#include <map>
-
-
-#include "leveldb/env.h"
-
-#include "port/port.h"
-#include "leveldb/slice.h"
-#include "util/logging.h"
-
-#include <shlwapi.h>
-#include <process.h>
-#include <cstring>
-#include <stdio.h>
-#include <errno.h>
-#include <io.h>
-#include <algorithm>
-
-#ifdef max
-#undef max
-#endif
-
-#ifndef va_copy
-#define va_copy(d,s) ((d) = (s))
-#endif
-
-#if defined DeleteFile
-#undef DeleteFile
-#endif
-
-//Declarations
-namespace leveldb
-{
-
-namespace Win32
-{
-
-#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
- TypeName(const TypeName&); \
- void operator=(const TypeName&)
-
-std::string GetCurrentDir();
-std::wstring GetCurrentDirW();
-
-static const std::string CurrentDir = GetCurrentDir();
-static const std::wstring CurrentDirW = GetCurrentDirW();
-
-std::string& ModifyPath(std::string& path);
-std::wstring& ModifyPath(std::wstring& path);
-
-std::string GetLastErrSz();
-std::wstring GetLastErrSzW();
-
-size_t GetPageSize();
-
-typedef void (*ScheduleProc)(void*) ;
-
-struct WorkItemWrapper
-{
- WorkItemWrapper(ScheduleProc proc_,void* content_);
- ScheduleProc proc;
- void* pContent;
-};
-
-DWORD WINAPI WorkItemWrapperProc(LPVOID pContent);
-
-class Win32SequentialFile : public SequentialFile
-{
-public:
- friend class Win32Env;
- virtual ~Win32SequentialFile();
- virtual Status Read(size_t n, Slice* result, char* scratch);
- virtual Status Skip(uint64_t n);
- BOOL isEnable();
- virtual std::string GetName() const { return _filename; }
-private:
- BOOL _Init();
- void _CleanUp();
- Win32SequentialFile(const std::string& fname);
- std::string _filename;
- ::HANDLE _hFile;
- DISALLOW_COPY_AND_ASSIGN(Win32SequentialFile);
-};
-
-class Win32RandomAccessFile : public RandomAccessFile
-{
-public:
- friend class Win32Env;
- virtual ~Win32RandomAccessFile();
- virtual Status Read(uint64_t offset, size_t n, Slice* result,char* scratch) const;
- BOOL isEnable();
- virtual std::string GetName() const { return _filename; }
-private:
- BOOL _Init(LPCWSTR path);
- void _CleanUp();
- Win32RandomAccessFile(const std::string& fname);
- HANDLE _hFile;
- const std::string _filename;
- DISALLOW_COPY_AND_ASSIGN(Win32RandomAccessFile);
-};
-
-class Win32WritableFile : public WritableFile
-{
-public:
- Win32WritableFile(const std::string& fname, bool append);
- ~Win32WritableFile();
-
- virtual Status Append(const Slice& data);
- virtual Status Close();
- virtual Status Flush();
- virtual Status Sync();
- BOOL isEnable();
- virtual std::string GetName() const { return filename_; }
-private:
- std::string filename_;
- ::HANDLE _hFile;
-};
-
-class Win32FileLock : public FileLock
-{
-public:
- friend class Win32Env;
- virtual ~Win32FileLock();
- BOOL isEnable();
-private:
- BOOL _Init(LPCWSTR path);
- void _CleanUp();
- Win32FileLock(const std::string& fname);
- HANDLE _hFile;
- std::string _filename;
- DISALLOW_COPY_AND_ASSIGN(Win32FileLock);
-};
-
-class Win32Logger : public Logger
-{
-public:
- friend class Win32Env;
- virtual ~Win32Logger();
- virtual void Logv(const char* format, va_list ap);
-private:
- explicit Win32Logger(WritableFile* pFile);
- WritableFile* _pFileProxy;
- DISALLOW_COPY_AND_ASSIGN(Win32Logger);
-};
-
-class Win32Env : public Env
-{
-public:
- Win32Env();
- virtual ~Win32Env();
- virtual Status NewSequentialFile(const std::string& fname,
- SequentialFile** result);
-
- virtual Status NewRandomAccessFile(const std::string& fname,
- RandomAccessFile** result);
- virtual Status NewWritableFile(const std::string& fname,
- WritableFile** result);
- virtual Status NewAppendableFile(const std::string& fname,
- WritableFile** result);
-
- virtual bool FileExists(const std::string& fname);
-
- virtual Status GetChildren(const std::string& dir,
- std::vector<std::string>* result);
-
- virtual Status DeleteFile(const std::string& fname);
-
- virtual Status CreateDir(const std::string& dirname);
-
- virtual Status DeleteDir(const std::string& dirname);
-
- virtual Status GetFileSize(const std::string& fname, uint64_t* file_size);
-
- virtual Status RenameFile(const std::string& src,
- const std::string& target);
-
- virtual Status LockFile(const std::string& fname, FileLock** lock);
-
- virtual Status UnlockFile(FileLock* lock);
-
- virtual void Schedule(
- void (*function)(void* arg),
- void* arg);
-
- virtual void StartThread(void (*function)(void* arg), void* arg);
-
- virtual Status GetTestDirectory(std::string* path);
-
- //virtual void Logv(WritableFile* log, const char* format, va_list ap);
-
- virtual Status NewLogger(const std::string& fname, Logger** result);
-
- virtual uint64_t NowMicros();
-
- virtual void SleepForMicroseconds(int micros);
-};
-
-void ToWidePath(const std::string& value, std::wstring& target) {
- wchar_t buffer[MAX_PATH];
- MultiByteToWideChar(CP_UTF8, 0, value.c_str(), -1, buffer, MAX_PATH);
- target = buffer;
-}
-
-void ToNarrowPath(const std::wstring& value, std::string& target) {
- char buffer[MAX_PATH];
- WideCharToMultiByte(CP_UTF8, 0, value.c_str(), -1, buffer, MAX_PATH, NULL, NULL);
- target = buffer;
-}
-
-std::wstring GetCurrentDirW()
-{
- WCHAR path[MAX_PATH];
- ::GetModuleFileNameW(::GetModuleHandleW(NULL),path,MAX_PATH);
- *wcsrchr(path,L'\\') = 0;
- return std::wstring(path);
-}
-
-std::string GetCurrentDir()
-{
- std::string path;
- ToNarrowPath(GetCurrentDirW(), path);
- return path;
-}
-
-std::string& ModifyPath(std::string& path)
-{
- if(path[0] == '/' || path[0] == '\\'){
- path = CurrentDir + path;
- }
- std::replace(path.begin(),path.end(),'/','\\');
-
- return path;
-}
-
-std::wstring& ModifyPath(std::wstring& path)
-{
- if(path[0] == L'/' || path[0] == L'\\'){
- path = CurrentDirW + path;
- }
- std::replace(path.begin(),path.end(),L'/',L'\\');
- return path;
-}
-
-std::string GetLastErrSz()
-{
- LPWSTR lpMsgBuf;
- FormatMessageW(
- FORMAT_MESSAGE_ALLOCATE_BUFFER |
- FORMAT_MESSAGE_FROM_SYSTEM |
- FORMAT_MESSAGE_IGNORE_INSERTS,
- NULL,
- GetLastError(),
- 0, // Default language
- (LPWSTR) &lpMsgBuf,
- 0,
- NULL
- );
- std::string Err;
- ToNarrowPath(lpMsgBuf, Err);
- LocalFree( lpMsgBuf );
- return Err;
-}
-
-std::wstring GetLastErrSzW()
-{
- LPVOID lpMsgBuf;
- FormatMessageW(
- FORMAT_MESSAGE_ALLOCATE_BUFFER |
- FORMAT_MESSAGE_FROM_SYSTEM |
- FORMAT_MESSAGE_IGNORE_INSERTS,
- NULL,
- GetLastError(),
- 0, // Default language
- (LPWSTR) &lpMsgBuf,
- 0,
- NULL
- );
- std::wstring Err = (LPCWSTR)lpMsgBuf;
- LocalFree(lpMsgBuf);
- return Err;
-}
-
-WorkItemWrapper::WorkItemWrapper( ScheduleProc proc_,void* content_ ) :
- proc(proc_),pContent(content_)
-{
-
-}
-
-DWORD WINAPI WorkItemWrapperProc(LPVOID pContent)
-{
- WorkItemWrapper* item = static_cast<WorkItemWrapper*>(pContent);
- ScheduleProc TempProc = item->proc;
- void* arg = item->pContent;
- delete item;
- TempProc(arg);
- return 0;
-}
-
-size_t GetPageSize()
-{
- SYSTEM_INFO si;
- GetSystemInfo(&si);
- return std::max(si.dwPageSize,si.dwAllocationGranularity);
-}
-
-const size_t g_PageSize = GetPageSize();
-
-
-Win32SequentialFile::Win32SequentialFile( const std::string& fname ) :
- _filename(fname),_hFile(NULL)
-{
- _Init();
-}
-
-Win32SequentialFile::~Win32SequentialFile()
-{
- _CleanUp();
-}
-
-Status Win32SequentialFile::Read( size_t n, Slice* result, char* scratch )
-{
- Status sRet;
- DWORD hasRead = 0;
- if(_hFile && ReadFile(_hFile,scratch,n,&hasRead,NULL) ){
- *result = Slice(scratch,hasRead);
- } else {
- sRet = Status::IOError(_filename, Win32::GetLastErrSz() );
- }
- return sRet;
-}
-
-Status Win32SequentialFile::Skip( uint64_t n )
-{
- Status sRet;
- LARGE_INTEGER Move,NowPointer;
- Move.QuadPart = n;
- if(!SetFilePointerEx(_hFile,Move,&NowPointer,FILE_CURRENT)){
- sRet = Status::IOError(_filename,Win32::GetLastErrSz());
- }
- return sRet;
-}
-
-BOOL Win32SequentialFile::isEnable()
-{
- return _hFile ? TRUE : FALSE;
-}
-
-BOOL Win32SequentialFile::_Init()
-{
- std::wstring path;
- ToWidePath(_filename, path);
- _hFile = CreateFileW(path.c_str(),
- GENERIC_READ,
- FILE_SHARE_READ | FILE_SHARE_WRITE,
- NULL,
- OPEN_EXISTING,
- FILE_ATTRIBUTE_NORMAL | FILE_FLAG_SEQUENTIAL_SCAN,
- NULL);
- if (_hFile == INVALID_HANDLE_VALUE)
- _hFile = NULL;
- return _hFile ? TRUE : FALSE;
-}
-
-void Win32SequentialFile::_CleanUp()
-{
- if(_hFile){
- CloseHandle(_hFile);
- _hFile = NULL;
- }
-}
-
-Win32RandomAccessFile::Win32RandomAccessFile( const std::string& fname ) :
- _filename(fname),_hFile(NULL)
-{
- std::wstring path;
- ToWidePath(fname, path);
- _Init( path.c_str() );
-}
-
-Win32RandomAccessFile::~Win32RandomAccessFile()
-{
- _CleanUp();
-}
-
-Status Win32RandomAccessFile::Read(uint64_t offset,size_t n,Slice* result,char* scratch) const
-{
- Status sRet;
- OVERLAPPED ol = {0};
- ZeroMemory(&ol,sizeof(ol));
- ol.Offset = (DWORD)offset;
- ol.OffsetHigh = (DWORD)(offset >> 32);
- DWORD hasRead = 0;
- if(!ReadFile(_hFile,scratch,n,&hasRead,&ol))
- sRet = Status::IOError(_filename,Win32::GetLastErrSz());
- else
- *result = Slice(scratch,hasRead);
- return sRet;
-}
-
-BOOL Win32RandomAccessFile::_Init( LPCWSTR path )
-{
- BOOL bRet = FALSE;
- if(!_hFile)
- _hFile = ::CreateFileW(path,GENERIC_READ,FILE_SHARE_READ|FILE_SHARE_WRITE,NULL,OPEN_EXISTING,
- FILE_ATTRIBUTE_NORMAL | FILE_FLAG_RANDOM_ACCESS,NULL);
- if(!_hFile || _hFile == INVALID_HANDLE_VALUE )
- _hFile = NULL;
- else
- bRet = TRUE;
- return bRet;
-}
-
-BOOL Win32RandomAccessFile::isEnable()
-{
- return _hFile ? TRUE : FALSE;
-}
-
-void Win32RandomAccessFile::_CleanUp()
-{
- if(_hFile){
- ::CloseHandle(_hFile);
- _hFile = NULL;
- }
-}
-
-Win32WritableFile::Win32WritableFile(const std::string& fname, bool append)
- : filename_(fname)
-{
- std::wstring path;
- ToWidePath(fname, path);
- // NewAppendableFile: append to an existing file, or create a new one
- // if none exists - this is OPEN_ALWAYS behavior, with
- // FILE_APPEND_DATA to avoid having to manually position the file
- // pointer at the end of the file.
- // NewWritableFile: create a new file, delete if it exists - this is
- // CREATE_ALWAYS behavior. This file is used for writing only so
- // use GENERIC_WRITE.
- _hFile = CreateFileW(path.c_str(),
- append ? FILE_APPEND_DATA : GENERIC_WRITE,
- FILE_SHARE_READ|FILE_SHARE_DELETE|FILE_SHARE_WRITE,
- NULL,
- append ? OPEN_ALWAYS : CREATE_ALWAYS,
- FILE_ATTRIBUTE_NORMAL,
- NULL);
- // CreateFileW returns INVALID_HANDLE_VALUE in case of error, always check isEnable() before use
-}
-
-Win32WritableFile::~Win32WritableFile()
-{
- if (_hFile != INVALID_HANDLE_VALUE)
- Close();
-}
-
-Status Win32WritableFile::Append(const Slice& data)
-{
- DWORD r = 0;
- if (!WriteFile(_hFile, data.data(), data.size(), &r, NULL) || r != data.size()) {
- return Status::IOError("Win32WritableFile.Append::WriteFile: "+filename_, Win32::GetLastErrSz());
- }
- return Status::OK();
-}
-
-Status Win32WritableFile::Close()
-{
- if (!CloseHandle(_hFile)) {
- return Status::IOError("Win32WritableFile.Close::CloseHandle: "+filename_, Win32::GetLastErrSz());
- }
- _hFile = INVALID_HANDLE_VALUE;
- return Status::OK();
-}
-
-Status Win32WritableFile::Flush()
-{
- // Nothing to do here, there are no application-side buffers
- return Status::OK();
-}
-
-Status Win32WritableFile::Sync()
-{
- if (!FlushFileBuffers(_hFile)) {
- return Status::IOError("Win32WritableFile.Sync::FlushFileBuffers "+filename_, Win32::GetLastErrSz());
- }
- return Status::OK();
-}
-
-BOOL Win32WritableFile::isEnable()
-{
- return _hFile != INVALID_HANDLE_VALUE;
-}
-
-Win32FileLock::Win32FileLock( const std::string& fname ) :
- _hFile(NULL),_filename(fname)
-{
- std::wstring path;
- ToWidePath(fname, path);
- _Init(path.c_str());
-}
-
-Win32FileLock::~Win32FileLock()
-{
- _CleanUp();
-}
-
-BOOL Win32FileLock::_Init( LPCWSTR path )
-{
- BOOL bRet = FALSE;
- if(!_hFile)
- _hFile = ::CreateFileW(path,0,0,NULL,CREATE_ALWAYS,FILE_ATTRIBUTE_NORMAL,NULL);
- if(!_hFile || _hFile == INVALID_HANDLE_VALUE ){
- _hFile = NULL;
- }
- else
- bRet = TRUE;
- return bRet;
-}
-
-void Win32FileLock::_CleanUp()
-{
- ::CloseHandle(_hFile);
- _hFile = NULL;
-}
-
-BOOL Win32FileLock::isEnable()
-{
- return _hFile ? TRUE : FALSE;
-}
-
-Win32Logger::Win32Logger(WritableFile* pFile) : _pFileProxy(pFile)
-{
- assert(_pFileProxy);
-}
-
-Win32Logger::~Win32Logger()
-{
- if(_pFileProxy)
- delete _pFileProxy;
-}
-
-void Win32Logger::Logv( const char* format, va_list ap )
-{
- uint64_t thread_id = ::GetCurrentThreadId();
-
- // We try twice: the first time with a fixed-size stack allocated buffer,
- // and the second time with a much larger dynamically allocated buffer.
- char buffer[500];
- for (int iter = 0; iter < 2; iter++) {
- char* base;
- int bufsize;
- if (iter == 0) {
- bufsize = sizeof(buffer);
- base = buffer;
- } else {
- bufsize = 30000;
- base = new char[bufsize];
- }
- char* p = base;
- char* limit = base + bufsize;
-
- SYSTEMTIME st;
- GetLocalTime(&st);
- p += snprintf(p, limit - p,
- "%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ",
- int(st.wYear),
- int(st.wMonth),
- int(st.wDay),
- int(st.wHour),
- int(st.wMinute),
- int(st.wMinute),
- int(st.wMilliseconds),
- static_cast<long long unsigned int>(thread_id));
-
- // Print the message
- if (p < limit) {
- va_list backup_ap;
- va_copy(backup_ap, ap);
- p += vsnprintf(p, limit - p, format, backup_ap);
- va_end(backup_ap);
- }
-
- // Truncate to available space if necessary
- if (p >= limit) {
- if (iter == 0) {
- continue; // Try again with larger buffer
- } else {
- p = limit - 1;
- }
- }
-
- // Add newline if necessary
- if (p == base || p[-1] != '\n') {
- *p++ = '\n';
- }
-
- assert(p <= limit);
- DWORD hasWritten = 0;
- if(_pFileProxy){
- _pFileProxy->Append(Slice(base, p - base));
- _pFileProxy->Flush();
- }
- if (base != buffer) {
- delete[] base;
- }
- break;
- }
-}
-
-bool Win32Env::FileExists(const std::string& fname)
-{
- std::string path = fname;
- std::wstring wpath;
- ToWidePath(ModifyPath(path), wpath);
- return ::PathFileExistsW(wpath.c_str()) ? true : false;
-}
-
-Status Win32Env::GetChildren(const std::string& dir, std::vector<std::string>* result)
-{
- Status sRet;
- ::WIN32_FIND_DATAW wfd;
- std::string path = dir;
- ModifyPath(path);
- path += "\\*.*";
- std::wstring wpath;
- ToWidePath(path, wpath);
-
- ::HANDLE hFind = ::FindFirstFileW(wpath.c_str() ,&wfd);
- if(hFind && hFind != INVALID_HANDLE_VALUE){
- BOOL hasNext = TRUE;
- std::string child;
- while(hasNext){
- ToNarrowPath(wfd.cFileName, child);
- if(child != ".." && child != ".") {
- result->push_back(child);
- }
- hasNext = ::FindNextFileW(hFind,&wfd);
- }
- ::FindClose(hFind);
- }
- else
- sRet = Status::IOError(dir,"Could not get children.");
- return sRet;
-}
-
-void Win32Env::SleepForMicroseconds( int micros )
-{
- ::Sleep((micros + 999) /1000);
-}
-
-
-Status Win32Env::DeleteFile( const std::string& fname )
-{
- Status sRet;
- std::string path = fname;
- std::wstring wpath;
- ToWidePath(ModifyPath(path), wpath);
-
- if(!::DeleteFileW(wpath.c_str())) {
- sRet = Status::IOError(path, "Could not delete file.");
- }
- return sRet;
-}
-
-Status Win32Env::GetFileSize( const std::string& fname, uint64_t* file_size )
-{
- Status sRet;
- std::string path = fname;
- std::wstring wpath;
- ToWidePath(ModifyPath(path), wpath);
-
- HANDLE file = ::CreateFileW(wpath.c_str(),
- GENERIC_READ,FILE_SHARE_READ|FILE_SHARE_WRITE,NULL,OPEN_EXISTING,FILE_ATTRIBUTE_NORMAL,NULL);
- LARGE_INTEGER li;
- if(::GetFileSizeEx(file,&li)){
- *file_size = (uint64_t)li.QuadPart;
- }else
- sRet = Status::IOError(path,"Could not get the file size.");
- CloseHandle(file);
- return sRet;
-}
-
-Status Win32Env::RenameFile( const std::string& src, const std::string& target )
-{
- Status sRet;
- std::string src_path = src;
- std::wstring wsrc_path;
- ToWidePath(ModifyPath(src_path), wsrc_path);
- std::string target_path = target;
- std::wstring wtarget_path;
- ToWidePath(ModifyPath(target_path), wtarget_path);
-
- if(!MoveFileW(wsrc_path.c_str(), wtarget_path.c_str() ) ){
- DWORD err = GetLastError();
- if(err == 0x000000b7){
- if(!::DeleteFileW(wtarget_path.c_str() ) )
- sRet = Status::IOError(src, "Could not rename file.");
- else if(!::MoveFileW(wsrc_path.c_str(),
- wtarget_path.c_str() ) )
- sRet = Status::IOError(src, "Could not rename file.");
- }
- }
- return sRet;
-}
-
-Status Win32Env::LockFile( const std::string& fname, FileLock** lock )
-{
- Status sRet;
- std::string path = fname;
- ModifyPath(path);
- Win32FileLock* _lock = new Win32FileLock(path);
- if(!_lock->isEnable()){
- delete _lock;
- *lock = NULL;
- sRet = Status::IOError(path, "Could not lock file.");
- }
- else
- *lock = _lock;
- return sRet;
-}
-
-Status Win32Env::UnlockFile( FileLock* lock )
-{
- Status sRet;
- delete lock;
- return sRet;
-}
-
-void Win32Env::Schedule( void (*function)(void* arg), void* arg )
-{
- QueueUserWorkItem(Win32::WorkItemWrapperProc,
- new Win32::WorkItemWrapper(function,arg),
- WT_EXECUTEDEFAULT);
-}
-
-void Win32Env::StartThread( void (*function)(void* arg), void* arg )
-{
- ::_beginthread(function,0,arg);
-}
-
-Status Win32Env::GetTestDirectory( std::string* path )
-{
- Status sRet;
- WCHAR TempPath[MAX_PATH];
- ::GetTempPathW(MAX_PATH,TempPath);
- ToNarrowPath(TempPath, *path);
- path->append("leveldb\\test\\");
- ModifyPath(*path);
- return sRet;
-}
-
-uint64_t Win32Env::NowMicros()
-{
-#ifndef USE_VISTA_API
-#define GetTickCount64 GetTickCount
-#endif
- return (uint64_t)(GetTickCount64()*1000);
-}
-
-static Status CreateDirInner( const std::string& dirname )
-{
- Status sRet;
- std::wstring dirnameW;
- ToWidePath(dirname, dirnameW);
- DWORD attr = ::GetFileAttributesW(dirnameW.c_str());
- if (attr == INVALID_FILE_ATTRIBUTES) { // doesn't exist:
- std::size_t slash = dirname.find_last_of("\\");
- if (slash != std::string::npos){
- sRet = CreateDirInner(dirname.substr(0, slash));
- if (!sRet.ok()) return sRet;
- }
- BOOL result = ::CreateDirectoryW(dirnameW.c_str(), NULL);
- if (result == FALSE) {
- sRet = Status::IOError(dirname, "Could not create directory.");
- return sRet;
- }
- }
- return sRet;
-}
-
-Status Win32Env::CreateDir( const std::string& dirname )
-{
- std::string path = dirname;
- if(path[path.length() - 1] != '\\'){
- path += '\\';
- }
- ModifyPath(path);
-
- return CreateDirInner(path);
-}
-
-Status Win32Env::DeleteDir( const std::string& dirname )
-{
- Status sRet;
- std::wstring path;
- ToWidePath(dirname, path);
- ModifyPath(path);
- if(!::RemoveDirectoryW( path.c_str() ) ){
- sRet = Status::IOError(dirname, "Could not delete directory.");
- }
- return sRet;
-}
-
-Status Win32Env::NewSequentialFile( const std::string& fname, SequentialFile** result )
-{
- Status sRet;
- std::string path = fname;
- ModifyPath(path);
- Win32SequentialFile* pFile = new Win32SequentialFile(path);
- if(pFile->isEnable()){
- *result = pFile;
- }else {
- delete pFile;
- sRet = Status::IOError(path, Win32::GetLastErrSz());
- }
- return sRet;
-}
-
-Status Win32Env::NewRandomAccessFile( const std::string& fname, RandomAccessFile** result )
-{
- Status sRet;
- std::string path = fname;
- Win32RandomAccessFile* pFile = new Win32RandomAccessFile(ModifyPath(path));
- if(!pFile->isEnable()){
- delete pFile;
- *result = NULL;
- sRet = Status::IOError(path, Win32::GetLastErrSz());
- }else
- *result = pFile;
- return sRet;
-}
-
-Status Win32Env::NewLogger( const std::string& fname, Logger** result )
-{
- Status sRet;
- std::string path = fname;
- // Logs are opened with write semantics, not with append semantics
- // (see PosixEnv::NewLogger)
- Win32WritableFile* pMapFile = new Win32WritableFile(ModifyPath(path), false);
- if(!pMapFile->isEnable()){
- delete pMapFile;
- *result = NULL;
- sRet = Status::IOError(path,"could not create a logger.");
- }else
- *result = new Win32Logger(pMapFile);
- return sRet;
-}
-
-Status Win32Env::NewWritableFile( const std::string& fname, WritableFile** result )
-{
- Status sRet;
- std::string path = fname;
- Win32WritableFile* pFile = new Win32WritableFile(ModifyPath(path), false);
- if(!pFile->isEnable()){
- *result = NULL;
- sRet = Status::IOError(fname,Win32::GetLastErrSz());
- }else
- *result = pFile;
- return sRet;
-}
-
-Status Win32Env::NewAppendableFile( const std::string& fname, WritableFile** result )
-{
- Status sRet;
- std::string path = fname;
- Win32WritableFile* pFile = new Win32WritableFile(ModifyPath(path), true);
- if(!pFile->isEnable()){
- *result = NULL;
- sRet = Status::IOError(fname,Win32::GetLastErrSz());
- }else
- *result = pFile;
- return sRet;
-}
-
-Win32Env::Win32Env()
-{
-
-}
-
-Win32Env::~Win32Env()
-{
-
-}
-
-
-} // Win32 namespace
-
-static port::OnceType once = LEVELDB_ONCE_INIT;
-static Env* default_env;
-static void InitDefaultEnv() { default_env = new Win32::Win32Env(); }
-
-Env* Env::Default() {
- port::InitOnce(&once, InitDefaultEnv);
- return default_env;
-}
-
-} // namespace leveldb
-
-#endif // defined(LEVELDB_PLATFORM_WINDOWS)
diff --git a/src/leveldb/util/env_windows.cc b/src/leveldb/util/env_windows.cc
new file mode 100644
index 0000000000..1834206562
--- /dev/null
+++ b/src/leveldb/util/env_windows.cc
@@ -0,0 +1,849 @@
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+// Prevent Windows headers from defining min/max macros and instead
+// use STL.
+#ifndef NOMINMAX
+#define NOMINMAX
+#endif // ifndef NOMINMAX
+#include <windows.h>
+
+#include <algorithm>
+#include <atomic>
+#include <chrono>
+#include <condition_variable>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <memory>
+#include <mutex>
+#include <queue>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include "leveldb/env.h"
+#include "leveldb/slice.h"
+#include "port/port.h"
+#include "port/thread_annotations.h"
+#include "util/env_windows_test_helper.h"
+#include "util/logging.h"
+#include "util/mutexlock.h"
+#include "util/windows_logger.h"
+
+#if defined(DeleteFile)
+#undef DeleteFile
+#endif // defined(DeleteFile)
+
+namespace leveldb {
+
+namespace {
+
+constexpr const size_t kWritableFileBufferSize = 65536;
+
+// Up to 1000 mmaps for 64-bit binaries; none for 32-bit.
+constexpr int kDefaultMmapLimit = (sizeof(void*) >= 8) ? 1000 : 0;
+
+// Can be set by by EnvWindowsTestHelper::SetReadOnlyMMapLimit().
+int g_mmap_limit = kDefaultMmapLimit;
+
+std::string GetWindowsErrorMessage(DWORD error_code) {
+ std::string message;
+ char* error_text = nullptr;
+ // Use MBCS version of FormatMessage to match return value.
+ size_t error_text_size = ::FormatMessageA(
+ FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ nullptr, error_code, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ reinterpret_cast<char*>(&error_text), 0, nullptr);
+ if (!error_text) {
+ return message;
+ }
+ message.assign(error_text, error_text_size);
+ ::LocalFree(error_text);
+ return message;
+}
+
+Status WindowsError(const std::string& context, DWORD error_code) {
+ if (error_code == ERROR_FILE_NOT_FOUND || error_code == ERROR_PATH_NOT_FOUND)
+ return Status::NotFound(context, GetWindowsErrorMessage(error_code));
+ return Status::IOError(context, GetWindowsErrorMessage(error_code));
+}
+
+class ScopedHandle {
+ public:
+ ScopedHandle(HANDLE handle) : handle_(handle) {}
+ ScopedHandle(const ScopedHandle&) = delete;
+ ScopedHandle(ScopedHandle&& other) noexcept : handle_(other.Release()) {}
+ ~ScopedHandle() { Close(); }
+
+ ScopedHandle& operator=(const ScopedHandle&) = delete;
+
+ ScopedHandle& operator=(ScopedHandle&& rhs) noexcept {
+ if (this != &rhs) handle_ = rhs.Release();
+ return *this;
+ }
+
+ bool Close() {
+ if (!is_valid()) {
+ return true;
+ }
+ HANDLE h = handle_;
+ handle_ = INVALID_HANDLE_VALUE;
+ return ::CloseHandle(h);
+ }
+
+ bool is_valid() const {
+ return handle_ != INVALID_HANDLE_VALUE && handle_ != nullptr;
+ }
+
+ HANDLE get() const { return handle_; }
+
+ HANDLE Release() {
+ HANDLE h = handle_;
+ handle_ = INVALID_HANDLE_VALUE;
+ return h;
+ }
+
+ private:
+ HANDLE handle_;
+};
+
+// Helper class to limit resource usage to avoid exhaustion.
+// Currently used to limit read-only file descriptors and mmap file usage
+// so that we do not run out of file descriptors or virtual memory, or run into
+// kernel performance problems for very large databases.
+class Limiter {
+ public:
+ // Limit maximum number of resources to |max_acquires|.
+ Limiter(int max_acquires) : acquires_allowed_(max_acquires) {}
+
+ Limiter(const Limiter&) = delete;
+ Limiter operator=(const Limiter&) = delete;
+
+ // If another resource is available, acquire it and return true.
+ // Else return false.
+ bool Acquire() {
+ int old_acquires_allowed =
+ acquires_allowed_.fetch_sub(1, std::memory_order_relaxed);
+
+ if (old_acquires_allowed > 0) return true;
+
+ acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
+ return false;
+ }
+
+ // Release a resource acquired by a previous call to Acquire() that returned
+ // true.
+ void Release() { acquires_allowed_.fetch_add(1, std::memory_order_relaxed); }
+
+ private:
+ // The number of available resources.
+ //
+ // This is a counter and is not tied to the invariants of any other class, so
+ // it can be operated on safely using std::memory_order_relaxed.
+ std::atomic<int> acquires_allowed_;
+};
+
+class WindowsSequentialFile : public SequentialFile {
+ public:
+ WindowsSequentialFile(std::string filename, ScopedHandle handle)
+ : handle_(std::move(handle)), filename_(std::move(filename)) {}
+ ~WindowsSequentialFile() override {}
+
+ Status Read(size_t n, Slice* result, char* scratch) override {
+ DWORD bytes_read;
+ // DWORD is 32-bit, but size_t could technically be larger. However leveldb
+ // files are limited to leveldb::Options::max_file_size which is clamped to
+ // 1<<30 or 1 GiB.
+ assert(n <= std::numeric_limits<DWORD>::max());
+ if (!::ReadFile(handle_.get(), scratch, static_cast<DWORD>(n), &bytes_read,
+ nullptr)) {
+ return WindowsError(filename_, ::GetLastError());
+ }
+
+ *result = Slice(scratch, bytes_read);
+ return Status::OK();
+ }
+
+ Status Skip(uint64_t n) override {
+ LARGE_INTEGER distance;
+ distance.QuadPart = n;
+ if (!::SetFilePointerEx(handle_.get(), distance, nullptr, FILE_CURRENT)) {
+ return WindowsError(filename_, ::GetLastError());
+ }
+ return Status::OK();
+ }
+
+ std::string GetName() const override { return filename_; }
+
+ private:
+ const ScopedHandle handle_;
+ const std::string filename_;
+};
+
+class WindowsRandomAccessFile : public RandomAccessFile {
+ public:
+ WindowsRandomAccessFile(std::string filename, ScopedHandle handle)
+ : handle_(std::move(handle)), filename_(std::move(filename)) {}
+
+ ~WindowsRandomAccessFile() override = default;
+
+ Status Read(uint64_t offset, size_t n, Slice* result,
+ char* scratch) const override {
+ DWORD bytes_read = 0;
+ OVERLAPPED overlapped = {0};
+
+ overlapped.OffsetHigh = static_cast<DWORD>(offset >> 32);
+ overlapped.Offset = static_cast<DWORD>(offset);
+ if (!::ReadFile(handle_.get(), scratch, static_cast<DWORD>(n), &bytes_read,
+ &overlapped)) {
+ DWORD error_code = ::GetLastError();
+ if (error_code != ERROR_HANDLE_EOF) {
+ *result = Slice(scratch, 0);
+ return Status::IOError(filename_, GetWindowsErrorMessage(error_code));
+ }
+ }
+
+ *result = Slice(scratch, bytes_read);
+ return Status::OK();
+ }
+
+ std::string GetName() const override { return filename_; }
+
+ private:
+ const ScopedHandle handle_;
+ const std::string filename_;
+};
+
+class WindowsMmapReadableFile : public RandomAccessFile {
+ public:
+ // base[0,length-1] contains the mmapped contents of the file.
+ WindowsMmapReadableFile(std::string filename, char* mmap_base, size_t length,
+ Limiter* mmap_limiter)
+ : mmap_base_(mmap_base),
+ length_(length),
+ mmap_limiter_(mmap_limiter),
+ filename_(std::move(filename)) {}
+
+ ~WindowsMmapReadableFile() override {
+ ::UnmapViewOfFile(mmap_base_);
+ mmap_limiter_->Release();
+ }
+
+ Status Read(uint64_t offset, size_t n, Slice* result,
+ char* scratch) const override {
+ if (offset + n > length_) {
+ *result = Slice();
+ return WindowsError(filename_, ERROR_INVALID_PARAMETER);
+ }
+
+ *result = Slice(mmap_base_ + offset, n);
+ return Status::OK();
+ }
+
+ std::string GetName() const override { return filename_; }
+
+ private:
+ char* const mmap_base_;
+ const size_t length_;
+ Limiter* const mmap_limiter_;
+ const std::string filename_;
+};
+
+class WindowsWritableFile : public WritableFile {
+ public:
+ WindowsWritableFile(std::string filename, ScopedHandle handle)
+ : pos_(0), handle_(std::move(handle)), filename_(std::move(filename)) {}
+
+ ~WindowsWritableFile() override = default;
+
+ Status Append(const Slice& data) override {
+ size_t write_size = data.size();
+ const char* write_data = data.data();
+
+ // Fit as much as possible into buffer.
+ size_t copy_size = std::min(write_size, kWritableFileBufferSize - pos_);
+ std::memcpy(buf_ + pos_, write_data, copy_size);
+ write_data += copy_size;
+ write_size -= copy_size;
+ pos_ += copy_size;
+ if (write_size == 0) {
+ return Status::OK();
+ }
+
+ // Can't fit in buffer, so need to do at least one write.
+ Status status = FlushBuffer();
+ if (!status.ok()) {
+ return status;
+ }
+
+ // Small writes go to buffer, large writes are written directly.
+ if (write_size < kWritableFileBufferSize) {
+ std::memcpy(buf_, write_data, write_size);
+ pos_ = write_size;
+ return Status::OK();
+ }
+ return WriteUnbuffered(write_data, write_size);
+ }
+
+ Status Close() override {
+ Status status = FlushBuffer();
+ if (!handle_.Close() && status.ok()) {
+ status = WindowsError(filename_, ::GetLastError());
+ }
+ return status;
+ }
+
+ Status Flush() override { return FlushBuffer(); }
+
+ Status Sync() override {
+ // On Windows no need to sync parent directory. Its metadata will be updated
+ // via the creation of the new file, without an explicit sync.
+
+ Status status = FlushBuffer();
+ if (!status.ok()) {
+ return status;
+ }
+
+ if (!::FlushFileBuffers(handle_.get())) {
+ return Status::IOError(filename_,
+ GetWindowsErrorMessage(::GetLastError()));
+ }
+ return Status::OK();
+ }
+
+ std::string GetName() const override { return filename_; }
+
+ private:
+ Status FlushBuffer() {
+ Status status = WriteUnbuffered(buf_, pos_);
+ pos_ = 0;
+ return status;
+ }
+
+ Status WriteUnbuffered(const char* data, size_t size) {
+ DWORD bytes_written;
+ if (!::WriteFile(handle_.get(), data, static_cast<DWORD>(size),
+ &bytes_written, nullptr)) {
+ return Status::IOError(filename_,
+ GetWindowsErrorMessage(::GetLastError()));
+ }
+ return Status::OK();
+ }
+
+ // buf_[0, pos_-1] contains data to be written to handle_.
+ char buf_[kWritableFileBufferSize];
+ size_t pos_;
+
+ ScopedHandle handle_;
+ const std::string filename_;
+};
+
+// Lock or unlock the entire file as specified by |lock|. Returns true
+// when successful, false upon failure. Caller should call ::GetLastError()
+// to determine cause of failure
+bool LockOrUnlock(HANDLE handle, bool lock) {
+ if (lock) {
+ return ::LockFile(handle,
+ /*dwFileOffsetLow=*/0, /*dwFileOffsetHigh=*/0,
+ /*nNumberOfBytesToLockLow=*/MAXDWORD,
+ /*nNumberOfBytesToLockHigh=*/MAXDWORD);
+ } else {
+ return ::UnlockFile(handle,
+ /*dwFileOffsetLow=*/0, /*dwFileOffsetHigh=*/0,
+ /*nNumberOfBytesToLockLow=*/MAXDWORD,
+ /*nNumberOfBytesToLockHigh=*/MAXDWORD);
+ }
+}
+
+class WindowsFileLock : public FileLock {
+ public:
+ WindowsFileLock(ScopedHandle handle, std::string filename)
+ : handle_(std::move(handle)), filename_(std::move(filename)) {}
+
+ const ScopedHandle& handle() const { return handle_; }
+ const std::string& filename() const { return filename_; }
+
+ private:
+ const ScopedHandle handle_;
+ const std::string filename_;
+};
+
+class WindowsEnv : public Env {
+ public:
+ WindowsEnv();
+ ~WindowsEnv() override {
+ static const char msg[] =
+ "WindowsEnv singleton destroyed. Unsupported behavior!\n";
+ std::fwrite(msg, 1, sizeof(msg), stderr);
+ std::abort();
+ }
+
+ Status NewSequentialFile(const std::string& filename,
+ SequentialFile** result) override {
+ *result = nullptr;
+ DWORD desired_access = GENERIC_READ;
+ DWORD share_mode = FILE_SHARE_READ;
+ auto wFilename = toUtf16(filename);
+ ScopedHandle handle = ::CreateFileW(
+ wFilename.c_str(), desired_access, share_mode,
+ /*lpSecurityAttributes=*/nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL,
+ /*hTemplateFile=*/nullptr);
+ if (!handle.is_valid()) {
+ return WindowsError(filename, ::GetLastError());
+ }
+
+ *result = new WindowsSequentialFile(filename, std::move(handle));
+ return Status::OK();
+ }
+
+ Status NewRandomAccessFile(const std::string& filename,
+ RandomAccessFile** result) override {
+ *result = nullptr;
+ DWORD desired_access = GENERIC_READ;
+ DWORD share_mode = FILE_SHARE_READ;
+ auto wFilename = toUtf16(filename);
+ ScopedHandle handle =
+ ::CreateFileW(wFilename.c_str(), desired_access, share_mode,
+ /*lpSecurityAttributes=*/nullptr, OPEN_EXISTING,
+ FILE_ATTRIBUTE_READONLY,
+ /*hTemplateFile=*/nullptr);
+ if (!handle.is_valid()) {
+ return WindowsError(filename, ::GetLastError());
+ }
+ if (!mmap_limiter_.Acquire()) {
+ *result = new WindowsRandomAccessFile(filename, std::move(handle));
+ return Status::OK();
+ }
+
+ LARGE_INTEGER file_size;
+ Status status;
+ if (!::GetFileSizeEx(handle.get(), &file_size)) {
+ mmap_limiter_.Release();
+ return WindowsError(filename, ::GetLastError());
+ }
+
+ ScopedHandle mapping =
+ ::CreateFileMappingW(handle.get(),
+ /*security attributes=*/nullptr, PAGE_READONLY,
+ /*dwMaximumSizeHigh=*/0,
+ /*dwMaximumSizeLow=*/0,
+ /*lpName=*/nullptr);
+ if (mapping.is_valid()) {
+ void* mmap_base = ::MapViewOfFile(mapping.get(), FILE_MAP_READ,
+ /*dwFileOffsetHigh=*/0,
+ /*dwFileOffsetLow=*/0,
+ /*dwNumberOfBytesToMap=*/0);
+ if (mmap_base) {
+ *result = new WindowsMmapReadableFile(
+ filename, reinterpret_cast<char*>(mmap_base),
+ static_cast<size_t>(file_size.QuadPart), &mmap_limiter_);
+ return Status::OK();
+ }
+ }
+ mmap_limiter_.Release();
+ return WindowsError(filename, ::GetLastError());
+ }
+
+ Status NewWritableFile(const std::string& filename,
+ WritableFile** result) override {
+ DWORD desired_access = GENERIC_WRITE;
+ DWORD share_mode = 0; // Exclusive access.
+ auto wFilename = toUtf16(filename);
+ ScopedHandle handle = ::CreateFileW(
+ wFilename.c_str(), desired_access, share_mode,
+ /*lpSecurityAttributes=*/nullptr, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL,
+ /*hTemplateFile=*/nullptr);
+ if (!handle.is_valid()) {
+ *result = nullptr;
+ return WindowsError(filename, ::GetLastError());
+ }
+
+ *result = new WindowsWritableFile(filename, std::move(handle));
+ return Status::OK();
+ }
+
+ Status NewAppendableFile(const std::string& filename,
+ WritableFile** result) override {
+ DWORD desired_access = FILE_APPEND_DATA;
+ DWORD share_mode = 0; // Exclusive access.
+ auto wFilename = toUtf16(filename);
+ ScopedHandle handle = ::CreateFileW(
+ wFilename.c_str(), desired_access, share_mode,
+ /*lpSecurityAttributes=*/nullptr, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL,
+ /*hTemplateFile=*/nullptr);
+ if (!handle.is_valid()) {
+ *result = nullptr;
+ return WindowsError(filename, ::GetLastError());
+ }
+
+ *result = new WindowsWritableFile(filename, std::move(handle));
+ return Status::OK();
+ }
+
+ bool FileExists(const std::string& filename) override {
+ auto wFilename = toUtf16(filename);
+ return GetFileAttributesW(wFilename.c_str()) != INVALID_FILE_ATTRIBUTES;
+ }
+
+ Status GetChildren(const std::string& directory_path,
+ std::vector<std::string>* result) override {
+ const std::string find_pattern = directory_path + "\\*";
+ WIN32_FIND_DATAW find_data;
+ auto wFind_pattern = toUtf16(find_pattern);
+ HANDLE dir_handle = ::FindFirstFileW(wFind_pattern.c_str(), &find_data);
+ if (dir_handle == INVALID_HANDLE_VALUE) {
+ DWORD last_error = ::GetLastError();
+ if (last_error == ERROR_FILE_NOT_FOUND) {
+ return Status::OK();
+ }
+ return WindowsError(directory_path, last_error);
+ }
+ do {
+ char base_name[_MAX_FNAME];
+ char ext[_MAX_EXT];
+
+ auto find_data_filename = toUtf8(find_data.cFileName);
+ if (!_splitpath_s(find_data_filename.c_str(), nullptr, 0, nullptr, 0,
+ base_name, ARRAYSIZE(base_name), ext, ARRAYSIZE(ext))) {
+ result->emplace_back(std::string(base_name) + ext);
+ }
+ } while (::FindNextFileW(dir_handle, &find_data));
+ DWORD last_error = ::GetLastError();
+ ::FindClose(dir_handle);
+ if (last_error != ERROR_NO_MORE_FILES) {
+ return WindowsError(directory_path, last_error);
+ }
+ return Status::OK();
+ }
+
+ Status DeleteFile(const std::string& filename) override {
+ auto wFilename = toUtf16(filename);
+ if (!::DeleteFileW(wFilename.c_str())) {
+ return WindowsError(filename, ::GetLastError());
+ }
+ return Status::OK();
+ }
+
+ Status CreateDir(const std::string& dirname) override {
+ auto wDirname = toUtf16(dirname);
+ if (!::CreateDirectoryW(wDirname.c_str(), nullptr)) {
+ return WindowsError(dirname, ::GetLastError());
+ }
+ return Status::OK();
+ }
+
+ Status DeleteDir(const std::string& dirname) override {
+ auto wDirname = toUtf16(dirname);
+ if (!::RemoveDirectoryW(wDirname.c_str())) {
+ return WindowsError(dirname, ::GetLastError());
+ }
+ return Status::OK();
+ }
+
+ Status GetFileSize(const std::string& filename, uint64_t* size) override {
+ WIN32_FILE_ATTRIBUTE_DATA file_attributes;
+ auto wFilename = toUtf16(filename);
+ if (!::GetFileAttributesExW(wFilename.c_str(), GetFileExInfoStandard,
+ &file_attributes)) {
+ return WindowsError(filename, ::GetLastError());
+ }
+ ULARGE_INTEGER file_size;
+ file_size.HighPart = file_attributes.nFileSizeHigh;
+ file_size.LowPart = file_attributes.nFileSizeLow;
+ *size = file_size.QuadPart;
+ return Status::OK();
+ }
+
+ Status RenameFile(const std::string& from, const std::string& to) override {
+ // Try a simple move first. It will only succeed when |to| doesn't already
+ // exist.
+ auto wFrom = toUtf16(from);
+ auto wTo = toUtf16(to);
+ if (::MoveFileW(wFrom.c_str(), wTo.c_str())) {
+ return Status::OK();
+ }
+ DWORD move_error = ::GetLastError();
+
+ // Try the full-blown replace if the move fails, as ReplaceFile will only
+ // succeed when |to| does exist. When writing to a network share, we may not
+ // be able to change the ACLs. Ignore ACL errors then
+ // (REPLACEFILE_IGNORE_MERGE_ERRORS).
+ if (::ReplaceFileW(wTo.c_str(), wFrom.c_str(), /*lpBackupFileName=*/nullptr,
+ REPLACEFILE_IGNORE_MERGE_ERRORS,
+ /*lpExclude=*/nullptr, /*lpReserved=*/nullptr)) {
+ return Status::OK();
+ }
+ DWORD replace_error = ::GetLastError();
+ // In the case of FILE_ERROR_NOT_FOUND from ReplaceFile, it is likely that
+ // |to| does not exist. In this case, the more relevant error comes from the
+ // call to MoveFile.
+ if (replace_error == ERROR_FILE_NOT_FOUND ||
+ replace_error == ERROR_PATH_NOT_FOUND) {
+ return WindowsError(from, move_error);
+ } else {
+ return WindowsError(from, replace_error);
+ }
+ }
+
+ Status LockFile(const std::string& filename, FileLock** lock) override {
+ *lock = nullptr;
+ Status result;
+ auto wFilename = toUtf16(filename);
+ ScopedHandle handle = ::CreateFileW(
+ wFilename.c_str(), GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ,
+ /*lpSecurityAttributes=*/nullptr, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL,
+ nullptr);
+ if (!handle.is_valid()) {
+ result = WindowsError(filename, ::GetLastError());
+ } else if (!LockOrUnlock(handle.get(), true)) {
+ result = WindowsError("lock " + filename, ::GetLastError());
+ } else {
+ *lock = new WindowsFileLock(std::move(handle), filename);
+ }
+ return result;
+ }
+
+ Status UnlockFile(FileLock* lock) override {
+ WindowsFileLock* windows_file_lock =
+ reinterpret_cast<WindowsFileLock*>(lock);
+ if (!LockOrUnlock(windows_file_lock->handle().get(), false)) {
+ return WindowsError("unlock " + windows_file_lock->filename(),
+ ::GetLastError());
+ }
+ delete windows_file_lock;
+ return Status::OK();
+ }
+
+ void Schedule(void (*background_work_function)(void* background_work_arg),
+ void* background_work_arg) override;
+
+ void StartThread(void (*thread_main)(void* thread_main_arg),
+ void* thread_main_arg) override {
+ std::thread new_thread(thread_main, thread_main_arg);
+ new_thread.detach();
+ }
+
+ Status GetTestDirectory(std::string* result) override {
+ const char* env = getenv("TEST_TMPDIR");
+ if (env && env[0] != '\0') {
+ *result = env;
+ return Status::OK();
+ }
+
+ wchar_t wtmp_path[MAX_PATH];
+ if (!GetTempPathW(ARRAYSIZE(wtmp_path), wtmp_path)) {
+ return WindowsError("GetTempPath", ::GetLastError());
+ }
+ std::string tmp_path = toUtf8(std::wstring(wtmp_path));
+ std::stringstream ss;
+ ss << tmp_path << "leveldbtest-" << std::this_thread::get_id();
+ *result = ss.str();
+
+ // Directory may already exist
+ CreateDir(*result);
+ return Status::OK();
+ }
+
+ Status NewLogger(const std::string& filename, Logger** result) override {
+ auto wFilename = toUtf16(filename);
+ std::FILE* fp = _wfopen(wFilename.c_str(), L"w");
+ if (fp == nullptr) {
+ *result = nullptr;
+ return WindowsError(filename, ::GetLastError());
+ } else {
+ *result = new WindowsLogger(fp);
+ return Status::OK();
+ }
+ }
+
+ uint64_t NowMicros() override {
+ // GetSystemTimeAsFileTime typically has a resolution of 10-20 msec.
+ // TODO(cmumford): Switch to GetSystemTimePreciseAsFileTime which is
+ // available in Windows 8 and later.
+ FILETIME ft;
+ ::GetSystemTimeAsFileTime(&ft);
+ // Each tick represents a 100-nanosecond intervals since January 1, 1601
+ // (UTC).
+ uint64_t num_ticks =
+ (static_cast<uint64_t>(ft.dwHighDateTime) << 32) + ft.dwLowDateTime;
+ return num_ticks / 10;
+ }
+
+ void SleepForMicroseconds(int micros) override {
+ std::this_thread::sleep_for(std::chrono::microseconds(micros));
+ }
+
+ private:
+ void BackgroundThreadMain();
+
+ static void BackgroundThreadEntryPoint(WindowsEnv* env) {
+ env->BackgroundThreadMain();
+ }
+
+ // Stores the work item data in a Schedule() call.
+ //
+ // Instances are constructed on the thread calling Schedule() and used on the
+ // background thread.
+ //
+ // This structure is thread-safe beacuse it is immutable.
+ struct BackgroundWorkItem {
+ explicit BackgroundWorkItem(void (*function)(void* arg), void* arg)
+ : function(function), arg(arg) {}
+
+ void (*const function)(void*);
+ void* const arg;
+ };
+
+ port::Mutex background_work_mutex_;
+ port::CondVar background_work_cv_ GUARDED_BY(background_work_mutex_);
+ bool started_background_thread_ GUARDED_BY(background_work_mutex_);
+
+ std::queue<BackgroundWorkItem> background_work_queue_
+ GUARDED_BY(background_work_mutex_);
+
+ Limiter mmap_limiter_; // Thread-safe.
+
+ // Converts a Windows wide multi-byte UTF-16 string to a UTF-8 string.
+ // See http://utf8everywhere.org/#windows
+ std::string toUtf8(const std::wstring& wstr) {
+ if (wstr.empty()) return std::string();
+ int size_needed = WideCharToMultiByte(
+ CP_UTF8, 0, &wstr[0], (int)wstr.size(), NULL, 0, NULL, NULL);
+ std::string strTo(size_needed, 0);
+ WideCharToMultiByte(CP_UTF8, 0, &wstr[0], (int)wstr.size(), &strTo[0],
+ size_needed, NULL, NULL);
+ return strTo;
+ }
+
+ // Converts a UTF-8 string to a Windows UTF-16 multi-byte wide character
+ // string.
+ // See http://utf8everywhere.org/#windows
+ std::wstring toUtf16(const std::string& str) {
+ if (str.empty()) return std::wstring();
+ int size_needed =
+ MultiByteToWideChar(CP_UTF8, 0, &str[0], (int)str.size(), NULL, 0);
+ std::wstring strTo(size_needed, 0);
+ MultiByteToWideChar(CP_UTF8, 0, &str[0], (int)str.size(), &strTo[0],
+ size_needed);
+ return strTo;
+ }
+};
+
+// Return the maximum number of concurrent mmaps.
+int MaxMmaps() { return g_mmap_limit; }
+
+WindowsEnv::WindowsEnv()
+ : background_work_cv_(&background_work_mutex_),
+ started_background_thread_(false),
+ mmap_limiter_(MaxMmaps()) {}
+
+void WindowsEnv::Schedule(
+ void (*background_work_function)(void* background_work_arg),
+ void* background_work_arg) {
+ background_work_mutex_.Lock();
+
+ // Start the background thread, if we haven't done so already.
+ if (!started_background_thread_) {
+ started_background_thread_ = true;
+ std::thread background_thread(WindowsEnv::BackgroundThreadEntryPoint, this);
+ background_thread.detach();
+ }
+
+ // If the queue is empty, the background thread may be waiting for work.
+ if (background_work_queue_.empty()) {
+ background_work_cv_.Signal();
+ }
+
+ background_work_queue_.emplace(background_work_function, background_work_arg);
+ background_work_mutex_.Unlock();
+}
+
+void WindowsEnv::BackgroundThreadMain() {
+ while (true) {
+ background_work_mutex_.Lock();
+
+ // Wait until there is work to be done.
+ while (background_work_queue_.empty()) {
+ background_work_cv_.Wait();
+ }
+
+ assert(!background_work_queue_.empty());
+ auto background_work_function = background_work_queue_.front().function;
+ void* background_work_arg = background_work_queue_.front().arg;
+ background_work_queue_.pop();
+
+ background_work_mutex_.Unlock();
+ background_work_function(background_work_arg);
+ }
+}
+
+// Wraps an Env instance whose destructor is never created.
+//
+// Intended usage:
+// using PlatformSingletonEnv = SingletonEnv<PlatformEnv>;
+// void ConfigurePosixEnv(int param) {
+// PlatformSingletonEnv::AssertEnvNotInitialized();
+// // set global configuration flags.
+// }
+// Env* Env::Default() {
+// static PlatformSingletonEnv default_env;
+// return default_env.env();
+// }
+template <typename EnvType>
+class SingletonEnv {
+ public:
+ SingletonEnv() {
+#if !defined(NDEBUG)
+ env_initialized_.store(true, std::memory_order::memory_order_relaxed);
+#endif // !defined(NDEBUG)
+ static_assert(sizeof(env_storage_) >= sizeof(EnvType),
+ "env_storage_ will not fit the Env");
+ static_assert(alignof(decltype(env_storage_)) >= alignof(EnvType),
+ "env_storage_ does not meet the Env's alignment needs");
+ new (&env_storage_) EnvType();
+ }
+ ~SingletonEnv() = default;
+
+ SingletonEnv(const SingletonEnv&) = delete;
+ SingletonEnv& operator=(const SingletonEnv&) = delete;
+
+ Env* env() { return reinterpret_cast<Env*>(&env_storage_); }
+
+ static void AssertEnvNotInitialized() {
+#if !defined(NDEBUG)
+ assert(!env_initialized_.load(std::memory_order::memory_order_relaxed));
+#endif // !defined(NDEBUG)
+ }
+
+ private:
+ typename std::aligned_storage<sizeof(EnvType), alignof(EnvType)>::type
+ env_storage_;
+#if !defined(NDEBUG)
+ static std::atomic<bool> env_initialized_;
+#endif // !defined(NDEBUG)
+};
+
+#if !defined(NDEBUG)
+template <typename EnvType>
+std::atomic<bool> SingletonEnv<EnvType>::env_initialized_;
+#endif // !defined(NDEBUG)
+
+using WindowsDefaultEnv = SingletonEnv<WindowsEnv>;
+
+} // namespace
+
+void EnvWindowsTestHelper::SetReadOnlyMMapLimit(int limit) {
+ WindowsDefaultEnv::AssertEnvNotInitialized();
+ g_mmap_limit = limit;
+}
+
+Env* Env::Default() {
+ static WindowsDefaultEnv env_container;
+ return env_container.env();
+}
+
+} // namespace leveldb
diff --git a/src/leveldb/util/env_windows_test.cc b/src/leveldb/util/env_windows_test.cc
new file mode 100644
index 0000000000..3c22133891
--- /dev/null
+++ b/src/leveldb/util/env_windows_test.cc
@@ -0,0 +1,64 @@
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "leveldb/env.h"
+
+#include "port/port.h"
+#include "util/env_windows_test_helper.h"
+#include "util/testharness.h"
+
+namespace leveldb {
+
+static const int kMMapLimit = 4;
+
+class EnvWindowsTest {
+ public:
+ static void SetFileLimits(int mmap_limit) {
+ EnvWindowsTestHelper::SetReadOnlyMMapLimit(mmap_limit);
+ }
+
+ EnvWindowsTest() : env_(Env::Default()) {}
+
+ Env* env_;
+};
+
+TEST(EnvWindowsTest, TestOpenOnRead) {
+ // Write some test data to a single file that will be opened |n| times.
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+ std::string test_file = test_dir + "/open_on_read.txt";
+
+ FILE* f = fopen(test_file.c_str(), "w");
+ ASSERT_TRUE(f != nullptr);
+ const char kFileData[] = "abcdefghijklmnopqrstuvwxyz";
+ fputs(kFileData, f);
+ fclose(f);
+
+ // Open test file some number above the sum of the two limits to force
+ // leveldb::WindowsEnv to switch from mapping the file into memory
+ // to basic file reading.
+ const int kNumFiles = kMMapLimit + 5;
+ leveldb::RandomAccessFile* files[kNumFiles] = {0};
+ for (int i = 0; i < kNumFiles; i++) {
+ ASSERT_OK(env_->NewRandomAccessFile(test_file, &files[i]));
+ }
+ char scratch;
+ Slice read_result;
+ for (int i = 0; i < kNumFiles; i++) {
+ ASSERT_OK(files[i]->Read(i, 1, &read_result, &scratch));
+ ASSERT_EQ(kFileData[i], read_result[0]);
+ }
+ for (int i = 0; i < kNumFiles; i++) {
+ delete files[i];
+ }
+ ASSERT_OK(env_->DeleteFile(test_file));
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) {
+ // All tests currently run with the same read-only file limits.
+ leveldb::EnvWindowsTest::SetFileLimits(leveldb::kMMapLimit);
+ return leveldb::test::RunAllTests();
+}
diff --git a/src/leveldb/util/env_windows_test_helper.h b/src/leveldb/util/env_windows_test_helper.h
new file mode 100644
index 0000000000..e6f6020561
--- /dev/null
+++ b/src/leveldb/util/env_windows_test_helper.h
@@ -0,0 +1,25 @@
+// Copyright 2018 (c) The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef STORAGE_LEVELDB_UTIL_ENV_WINDOWS_TEST_HELPER_H_
+#define STORAGE_LEVELDB_UTIL_ENV_WINDOWS_TEST_HELPER_H_
+
+namespace leveldb {
+
+class EnvWindowsTest;
+
+// A helper for the Windows Env to facilitate testing.
+class EnvWindowsTestHelper {
+ private:
+ friend class CorruptionTest;
+ friend class EnvWindowsTest;
+
+ // Set the maximum number of read-only files that will be mapped via mmap.
+ // Must be called before creating an Env.
+ static void SetReadOnlyMMapLimit(int limit);
+};
+
+} // namespace leveldb
+
+#endif // STORAGE_LEVELDB_UTIL_ENV_WINDOWS_TEST_HELPER_H_
diff --git a/src/leveldb/util/filter_policy.cc b/src/leveldb/util/filter_policy.cc
index 7b045c8c91..90fd754d64 100644
--- a/src/leveldb/util/filter_policy.cc
+++ b/src/leveldb/util/filter_policy.cc
@@ -6,6 +6,6 @@
namespace leveldb {
-FilterPolicy::~FilterPolicy() { }
+FilterPolicy::~FilterPolicy() {}
} // namespace leveldb
diff --git a/src/leveldb/util/hash.cc b/src/leveldb/util/hash.cc
index ed439ce7a2..dd47c110ee 100644
--- a/src/leveldb/util/hash.cc
+++ b/src/leveldb/util/hash.cc
@@ -2,15 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include "util/hash.h"
+
#include <string.h>
+
#include "util/coding.h"
-#include "util/hash.h"
// The FALLTHROUGH_INTENDED macro can be used to annotate implicit fall-through
// between switch labels. The real definition should be provided externally.
// This one is a fallback version for unsupported compilers.
#ifndef FALLTHROUGH_INTENDED
-#define FALLTHROUGH_INTENDED do { } while (0)
+#define FALLTHROUGH_INTENDED \
+ do { \
+ } while (0)
#endif
namespace leveldb {
@@ -34,13 +38,13 @@ uint32_t Hash(const char* data, size_t n, uint32_t seed) {
// Pick up remaining bytes
switch (limit - data) {
case 3:
- h += static_cast<unsigned char>(data[2]) << 16;
+ h += static_cast<uint8_t>(data[2]) << 16;
FALLTHROUGH_INTENDED;
case 2:
- h += static_cast<unsigned char>(data[1]) << 8;
+ h += static_cast<uint8_t>(data[1]) << 8;
FALLTHROUGH_INTENDED;
case 1:
- h += static_cast<unsigned char>(data[0]);
+ h += static_cast<uint8_t>(data[0]);
h *= m;
h ^= (h >> r);
break;
@@ -48,5 +52,4 @@ uint32_t Hash(const char* data, size_t n, uint32_t seed) {
return h;
}
-
} // namespace leveldb
diff --git a/src/leveldb/util/hash.h b/src/leveldb/util/hash.h
index 8889d56be8..74bdb6e7b2 100644
--- a/src/leveldb/util/hash.h
+++ b/src/leveldb/util/hash.h
@@ -12,8 +12,8 @@
namespace leveldb {
-extern uint32_t Hash(const char* data, size_t n, uint32_t seed);
+uint32_t Hash(const char* data, size_t n, uint32_t seed);
-}
+} // namespace leveldb
#endif // STORAGE_LEVELDB_UTIL_HASH_H_
diff --git a/src/leveldb/util/hash_test.cc b/src/leveldb/util/hash_test.cc
index eaa1c92c23..21f8171da6 100644
--- a/src/leveldb/util/hash_test.cc
+++ b/src/leveldb/util/hash_test.cc
@@ -7,26 +7,18 @@
namespace leveldb {
-class HASH { };
+class HASH {};
TEST(HASH, SignedUnsignedIssue) {
- const unsigned char data1[1] = {0x62};
- const unsigned char data2[2] = {0xc3, 0x97};
- const unsigned char data3[3] = {0xe2, 0x99, 0xa5};
- const unsigned char data4[4] = {0xe1, 0x80, 0xb9, 0x32};
- const unsigned char data5[48] = {
- 0x01, 0xc0, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x14, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x04, 0x00,
- 0x00, 0x00, 0x00, 0x14,
- 0x00, 0x00, 0x00, 0x18,
- 0x28, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x02, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
+ const uint8_t data1[1] = {0x62};
+ const uint8_t data2[2] = {0xc3, 0x97};
+ const uint8_t data3[3] = {0xe2, 0x99, 0xa5};
+ const uint8_t data4[4] = {0xe1, 0x80, 0xb9, 0x32};
+ const uint8_t data5[48] = {
+ 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
ASSERT_EQ(Hash(0, 0, 0xbc9f1d34), 0xbc9f1d34);
@@ -49,6 +41,4 @@ TEST(HASH, SignedUnsignedIssue) {
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/util/histogram.cc b/src/leveldb/util/histogram.cc
index bb95f583ea..65092c88f2 100644
--- a/src/leveldb/util/histogram.cc
+++ b/src/leveldb/util/histogram.cc
@@ -2,36 +2,174 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include "util/histogram.h"
+
#include <math.h>
#include <stdio.h>
+
#include "port/port.h"
-#include "util/histogram.h"
namespace leveldb {
const double Histogram::kBucketLimit[kNumBuckets] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 25, 30, 35, 40, 45,
- 50, 60, 70, 80, 90, 100, 120, 140, 160, 180, 200, 250, 300, 350, 400, 450,
- 500, 600, 700, 800, 900, 1000, 1200, 1400, 1600, 1800, 2000, 2500, 3000,
- 3500, 4000, 4500, 5000, 6000, 7000, 8000, 9000, 10000, 12000, 14000,
- 16000, 18000, 20000, 25000, 30000, 35000, 40000, 45000, 50000, 60000,
- 70000, 80000, 90000, 100000, 120000, 140000, 160000, 180000, 200000,
- 250000, 300000, 350000, 400000, 450000, 500000, 600000, 700000, 800000,
- 900000, 1000000, 1200000, 1400000, 1600000, 1800000, 2000000, 2500000,
- 3000000, 3500000, 4000000, 4500000, 5000000, 6000000, 7000000, 8000000,
- 9000000, 10000000, 12000000, 14000000, 16000000, 18000000, 20000000,
- 25000000, 30000000, 35000000, 40000000, 45000000, 50000000, 60000000,
- 70000000, 80000000, 90000000, 100000000, 120000000, 140000000, 160000000,
- 180000000, 200000000, 250000000, 300000000, 350000000, 400000000,
- 450000000, 500000000, 600000000, 700000000, 800000000, 900000000,
- 1000000000, 1200000000, 1400000000, 1600000000, 1800000000, 2000000000,
- 2500000000.0, 3000000000.0, 3500000000.0, 4000000000.0, 4500000000.0,
- 5000000000.0, 6000000000.0, 7000000000.0, 8000000000.0, 9000000000.0,
- 1e200,
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 12,
+ 14,
+ 16,
+ 18,
+ 20,
+ 25,
+ 30,
+ 35,
+ 40,
+ 45,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 120,
+ 140,
+ 160,
+ 180,
+ 200,
+ 250,
+ 300,
+ 350,
+ 400,
+ 450,
+ 500,
+ 600,
+ 700,
+ 800,
+ 900,
+ 1000,
+ 1200,
+ 1400,
+ 1600,
+ 1800,
+ 2000,
+ 2500,
+ 3000,
+ 3500,
+ 4000,
+ 4500,
+ 5000,
+ 6000,
+ 7000,
+ 8000,
+ 9000,
+ 10000,
+ 12000,
+ 14000,
+ 16000,
+ 18000,
+ 20000,
+ 25000,
+ 30000,
+ 35000,
+ 40000,
+ 45000,
+ 50000,
+ 60000,
+ 70000,
+ 80000,
+ 90000,
+ 100000,
+ 120000,
+ 140000,
+ 160000,
+ 180000,
+ 200000,
+ 250000,
+ 300000,
+ 350000,
+ 400000,
+ 450000,
+ 500000,
+ 600000,
+ 700000,
+ 800000,
+ 900000,
+ 1000000,
+ 1200000,
+ 1400000,
+ 1600000,
+ 1800000,
+ 2000000,
+ 2500000,
+ 3000000,
+ 3500000,
+ 4000000,
+ 4500000,
+ 5000000,
+ 6000000,
+ 7000000,
+ 8000000,
+ 9000000,
+ 10000000,
+ 12000000,
+ 14000000,
+ 16000000,
+ 18000000,
+ 20000000,
+ 25000000,
+ 30000000,
+ 35000000,
+ 40000000,
+ 45000000,
+ 50000000,
+ 60000000,
+ 70000000,
+ 80000000,
+ 90000000,
+ 100000000,
+ 120000000,
+ 140000000,
+ 160000000,
+ 180000000,
+ 200000000,
+ 250000000,
+ 300000000,
+ 350000000,
+ 400000000,
+ 450000000,
+ 500000000,
+ 600000000,
+ 700000000,
+ 800000000,
+ 900000000,
+ 1000000000,
+ 1200000000,
+ 1400000000,
+ 1600000000,
+ 1800000000,
+ 2000000000,
+ 2500000000.0,
+ 3000000000.0,
+ 3500000000.0,
+ 4000000000.0,
+ 4500000000.0,
+ 5000000000.0,
+ 6000000000.0,
+ 7000000000.0,
+ 8000000000.0,
+ 9000000000.0,
+ 1e200,
};
void Histogram::Clear() {
- min_ = kBucketLimit[kNumBuckets-1];
+ min_ = kBucketLimit[kNumBuckets - 1];
max_ = 0;
num_ = 0;
sum_ = 0;
@@ -66,9 +204,7 @@ void Histogram::Merge(const Histogram& other) {
}
}
-double Histogram::Median() const {
- return Percentile(50.0);
-}
+double Histogram::Median() const { return Percentile(50.0); }
double Histogram::Percentile(double p) const {
double threshold = num_ * (p / 100.0);
@@ -77,7 +213,7 @@ double Histogram::Percentile(double p) const {
sum += buckets_[b];
if (sum >= threshold) {
// Scale linearly within this bucket
- double left_point = (b == 0) ? 0 : kBucketLimit[b-1];
+ double left_point = (b == 0) ? 0 : kBucketLimit[b - 1];
double right_point = kBucketLimit[b];
double left_sum = sum - buckets_[b];
double right_sum = sum;
@@ -105,12 +241,10 @@ double Histogram::StandardDeviation() const {
std::string Histogram::ToString() const {
std::string r;
char buf[200];
- snprintf(buf, sizeof(buf),
- "Count: %.0f Average: %.4f StdDev: %.2f\n",
- num_, Average(), StandardDeviation());
+ snprintf(buf, sizeof(buf), "Count: %.0f Average: %.4f StdDev: %.2f\n", num_,
+ Average(), StandardDeviation());
r.append(buf);
- snprintf(buf, sizeof(buf),
- "Min: %.4f Median: %.4f Max: %.4f\n",
+ snprintf(buf, sizeof(buf), "Min: %.4f Median: %.4f Max: %.4f\n",
(num_ == 0.0 ? 0.0 : min_), Median(), max_);
r.append(buf);
r.append("------------------------------------------------------\n");
@@ -119,17 +253,16 @@ std::string Histogram::ToString() const {
for (int b = 0; b < kNumBuckets; b++) {
if (buckets_[b] <= 0.0) continue;
sum += buckets_[b];
- snprintf(buf, sizeof(buf),
- "[ %7.0f, %7.0f ) %7.0f %7.3f%% %7.3f%% ",
- ((b == 0) ? 0.0 : kBucketLimit[b-1]), // left
- kBucketLimit[b], // right
- buckets_[b], // count
- mult * buckets_[b], // percentage
- mult * sum); // cumulative percentage
+ snprintf(buf, sizeof(buf), "[ %7.0f, %7.0f ) %7.0f %7.3f%% %7.3f%% ",
+ ((b == 0) ? 0.0 : kBucketLimit[b - 1]), // left
+ kBucketLimit[b], // right
+ buckets_[b], // count
+ mult * buckets_[b], // percentage
+ mult * sum); // cumulative percentage
r.append(buf);
// Add hash marks based on percentage; 20 marks for 100%.
- int marks = static_cast<int>(20*(buckets_[b] / num_) + 0.5);
+ int marks = static_cast<int>(20 * (buckets_[b] / num_) + 0.5);
r.append(marks, '#');
r.push_back('\n');
}
diff --git a/src/leveldb/util/histogram.h b/src/leveldb/util/histogram.h
index 1ef9f3c8ab..4da60fba45 100644
--- a/src/leveldb/util/histogram.h
+++ b/src/leveldb/util/histogram.h
@@ -11,8 +11,8 @@ namespace leveldb {
class Histogram {
public:
- Histogram() { }
- ~Histogram() { }
+ Histogram() {}
+ ~Histogram() {}
void Clear();
void Add(double value);
@@ -21,20 +21,22 @@ class Histogram {
std::string ToString() const;
private:
+ enum { kNumBuckets = 154 };
+
+ double Median() const;
+ double Percentile(double p) const;
+ double Average() const;
+ double StandardDeviation() const;
+
+ static const double kBucketLimit[kNumBuckets];
+
double min_;
double max_;
double num_;
double sum_;
double sum_squares_;
- enum { kNumBuckets = 154 };
- static const double kBucketLimit[kNumBuckets];
double buckets_[kNumBuckets];
-
- double Median() const;
- double Percentile(double p) const;
- double Average() const;
- double StandardDeviation() const;
};
} // namespace leveldb
diff --git a/src/leveldb/util/logging.cc b/src/leveldb/util/logging.cc
index db6160c8f1..75e9d037d3 100644
--- a/src/leveldb/util/logging.cc
+++ b/src/leveldb/util/logging.cc
@@ -8,6 +8,9 @@
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
+
+#include <limits>
+
#include "leveldb/env.h"
#include "leveldb/slice.h"
@@ -15,7 +18,7 @@ namespace leveldb {
void AppendNumberTo(std::string* str, uint64_t num) {
char buf[30];
- snprintf(buf, sizeof(buf), "%llu", (unsigned long long) num);
+ snprintf(buf, sizeof(buf), "%llu", (unsigned long long)num);
str->append(buf);
}
@@ -46,27 +49,36 @@ std::string EscapeString(const Slice& value) {
}
bool ConsumeDecimalNumber(Slice* in, uint64_t* val) {
- uint64_t v = 0;
- int digits = 0;
- while (!in->empty()) {
- unsigned char c = (*in)[0];
- if (c >= '0' && c <= '9') {
- ++digits;
- const int delta = (c - '0');
- static const uint64_t kMaxUint64 = ~static_cast<uint64_t>(0);
- if (v > kMaxUint64/10 ||
- (v == kMaxUint64/10 && delta > kMaxUint64%10)) {
- // Overflow
- return false;
- }
- v = (v * 10) + delta;
- in->remove_prefix(1);
- } else {
- break;
+ // Constants that will be optimized away.
+ constexpr const uint64_t kMaxUint64 = std::numeric_limits<uint64_t>::max();
+ constexpr const char kLastDigitOfMaxUint64 =
+ '0' + static_cast<char>(kMaxUint64 % 10);
+
+ uint64_t value = 0;
+
+ // reinterpret_cast-ing from char* to uint8_t* to avoid signedness.
+ const uint8_t* start = reinterpret_cast<const uint8_t*>(in->data());
+
+ const uint8_t* end = start + in->size();
+ const uint8_t* current = start;
+ for (; current != end; ++current) {
+ const uint8_t ch = *current;
+ if (ch < '0' || ch > '9') break;
+
+ // Overflow check.
+ // kMaxUint64 / 10 is also constant and will be optimized away.
+ if (value > kMaxUint64 / 10 ||
+ (value == kMaxUint64 / 10 && ch > kLastDigitOfMaxUint64)) {
+ return false;
}
+
+ value = (value * 10) + (ch - '0');
}
- *val = v;
- return (digits > 0);
+
+ *val = value;
+ const size_t digits_consumed = current - start;
+ in->remove_prefix(digits_consumed);
+ return digits_consumed != 0;
}
} // namespace leveldb
diff --git a/src/leveldb/util/logging.h b/src/leveldb/util/logging.h
index 1b450d2480..8ff2da86b4 100644
--- a/src/leveldb/util/logging.h
+++ b/src/leveldb/util/logging.h
@@ -8,9 +8,11 @@
#ifndef STORAGE_LEVELDB_UTIL_LOGGING_H_
#define STORAGE_LEVELDB_UTIL_LOGGING_H_
-#include <stdio.h>
#include <stdint.h>
+#include <stdio.h>
+
#include <string>
+
#include "port/port.h"
namespace leveldb {
@@ -19,24 +21,24 @@ class Slice;
class WritableFile;
// Append a human-readable printout of "num" to *str
-extern void AppendNumberTo(std::string* str, uint64_t num);
+void AppendNumberTo(std::string* str, uint64_t num);
// Append a human-readable printout of "value" to *str.
// Escapes any non-printable characters found in "value".
-extern void AppendEscapedStringTo(std::string* str, const Slice& value);
+void AppendEscapedStringTo(std::string* str, const Slice& value);
// Return a human-readable printout of "num"
-extern std::string NumberToString(uint64_t num);
+std::string NumberToString(uint64_t num);
// Return a human-readable version of "value".
// Escapes any non-printable characters found in "value".
-extern std::string EscapeString(const Slice& value);
+std::string EscapeString(const Slice& value);
// Parse a human-readable number from "*in" into *value. On success,
// advances "*in" past the consumed number and sets "*val" to the
// numeric value. Otherwise, returns false and leaves *in in an
// unspecified state.
-extern bool ConsumeDecimalNumber(Slice* in, uint64_t* val);
+bool ConsumeDecimalNumber(Slice* in, uint64_t* val);
} // namespace leveldb
diff --git a/src/leveldb/util/logging_test.cc b/src/leveldb/util/logging_test.cc
new file mode 100644
index 0000000000..389cbeb14f
--- /dev/null
+++ b/src/leveldb/util/logging_test.cc
@@ -0,0 +1,143 @@
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include <limits>
+#include <string>
+
+#include "leveldb/slice.h"
+#include "util/logging.h"
+#include "util/testharness.h"
+
+namespace leveldb {
+
+class Logging {};
+
+TEST(Logging, NumberToString) {
+ ASSERT_EQ("0", NumberToString(0));
+ ASSERT_EQ("1", NumberToString(1));
+ ASSERT_EQ("9", NumberToString(9));
+
+ ASSERT_EQ("10", NumberToString(10));
+ ASSERT_EQ("11", NumberToString(11));
+ ASSERT_EQ("19", NumberToString(19));
+ ASSERT_EQ("99", NumberToString(99));
+
+ ASSERT_EQ("100", NumberToString(100));
+ ASSERT_EQ("109", NumberToString(109));
+ ASSERT_EQ("190", NumberToString(190));
+ ASSERT_EQ("123", NumberToString(123));
+ ASSERT_EQ("12345678", NumberToString(12345678));
+
+ static_assert(std::numeric_limits<uint64_t>::max() == 18446744073709551615U,
+ "Test consistency check");
+ ASSERT_EQ("18446744073709551000", NumberToString(18446744073709551000U));
+ ASSERT_EQ("18446744073709551600", NumberToString(18446744073709551600U));
+ ASSERT_EQ("18446744073709551610", NumberToString(18446744073709551610U));
+ ASSERT_EQ("18446744073709551614", NumberToString(18446744073709551614U));
+ ASSERT_EQ("18446744073709551615", NumberToString(18446744073709551615U));
+}
+
+void ConsumeDecimalNumberRoundtripTest(uint64_t number,
+ const std::string& padding = "") {
+ std::string decimal_number = NumberToString(number);
+ std::string input_string = decimal_number + padding;
+ Slice input(input_string);
+ Slice output = input;
+ uint64_t result;
+ ASSERT_TRUE(ConsumeDecimalNumber(&output, &result));
+ ASSERT_EQ(number, result);
+ ASSERT_EQ(decimal_number.size(), output.data() - input.data());
+ ASSERT_EQ(padding.size(), output.size());
+}
+
+TEST(Logging, ConsumeDecimalNumberRoundtrip) {
+ ConsumeDecimalNumberRoundtripTest(0);
+ ConsumeDecimalNumberRoundtripTest(1);
+ ConsumeDecimalNumberRoundtripTest(9);
+
+ ConsumeDecimalNumberRoundtripTest(10);
+ ConsumeDecimalNumberRoundtripTest(11);
+ ConsumeDecimalNumberRoundtripTest(19);
+ ConsumeDecimalNumberRoundtripTest(99);
+
+ ConsumeDecimalNumberRoundtripTest(100);
+ ConsumeDecimalNumberRoundtripTest(109);
+ ConsumeDecimalNumberRoundtripTest(190);
+ ConsumeDecimalNumberRoundtripTest(123);
+ ASSERT_EQ("12345678", NumberToString(12345678));
+
+ for (uint64_t i = 0; i < 100; ++i) {
+ uint64_t large_number = std::numeric_limits<uint64_t>::max() - i;
+ ConsumeDecimalNumberRoundtripTest(large_number);
+ }
+}
+
+TEST(Logging, ConsumeDecimalNumberRoundtripWithPadding) {
+ ConsumeDecimalNumberRoundtripTest(0, " ");
+ ConsumeDecimalNumberRoundtripTest(1, "abc");
+ ConsumeDecimalNumberRoundtripTest(9, "x");
+
+ ConsumeDecimalNumberRoundtripTest(10, "_");
+ ConsumeDecimalNumberRoundtripTest(11, std::string("\0\0\0", 3));
+ ConsumeDecimalNumberRoundtripTest(19, "abc");
+ ConsumeDecimalNumberRoundtripTest(99, "padding");
+
+ ConsumeDecimalNumberRoundtripTest(100, " ");
+
+ for (uint64_t i = 0; i < 100; ++i) {
+ uint64_t large_number = std::numeric_limits<uint64_t>::max() - i;
+ ConsumeDecimalNumberRoundtripTest(large_number, "pad");
+ }
+}
+
+void ConsumeDecimalNumberOverflowTest(const std::string& input_string) {
+ Slice input(input_string);
+ Slice output = input;
+ uint64_t result;
+ ASSERT_EQ(false, ConsumeDecimalNumber(&output, &result));
+}
+
+TEST(Logging, ConsumeDecimalNumberOverflow) {
+ static_assert(std::numeric_limits<uint64_t>::max() == 18446744073709551615U,
+ "Test consistency check");
+ ConsumeDecimalNumberOverflowTest("18446744073709551616");
+ ConsumeDecimalNumberOverflowTest("18446744073709551617");
+ ConsumeDecimalNumberOverflowTest("18446744073709551618");
+ ConsumeDecimalNumberOverflowTest("18446744073709551619");
+ ConsumeDecimalNumberOverflowTest("18446744073709551620");
+ ConsumeDecimalNumberOverflowTest("18446744073709551621");
+ ConsumeDecimalNumberOverflowTest("18446744073709551622");
+ ConsumeDecimalNumberOverflowTest("18446744073709551623");
+ ConsumeDecimalNumberOverflowTest("18446744073709551624");
+ ConsumeDecimalNumberOverflowTest("18446744073709551625");
+ ConsumeDecimalNumberOverflowTest("18446744073709551626");
+
+ ConsumeDecimalNumberOverflowTest("18446744073709551700");
+
+ ConsumeDecimalNumberOverflowTest("99999999999999999999");
+}
+
+void ConsumeDecimalNumberNoDigitsTest(const std::string& input_string) {
+ Slice input(input_string);
+ Slice output = input;
+ uint64_t result;
+ ASSERT_EQ(false, ConsumeDecimalNumber(&output, &result));
+ ASSERT_EQ(input.data(), output.data());
+ ASSERT_EQ(input.size(), output.size());
+}
+
+TEST(Logging, ConsumeDecimalNumberNoDigits) {
+ ConsumeDecimalNumberNoDigitsTest("");
+ ConsumeDecimalNumberNoDigitsTest(" ");
+ ConsumeDecimalNumberNoDigitsTest("a");
+ ConsumeDecimalNumberNoDigitsTest(" 123");
+ ConsumeDecimalNumberNoDigitsTest("a123");
+ ConsumeDecimalNumberNoDigitsTest(std::string("\000123", 4));
+ ConsumeDecimalNumberNoDigitsTest(std::string("\177123", 4));
+ ConsumeDecimalNumberNoDigitsTest(std::string("\377123", 4));
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/util/mutexlock.h b/src/leveldb/util/mutexlock.h
index 1ff5a9efa1..0cb2e250fb 100644
--- a/src/leveldb/util/mutexlock.h
+++ b/src/leveldb/util/mutexlock.h
@@ -22,20 +22,18 @@ namespace leveldb {
class SCOPED_LOCKABLE MutexLock {
public:
- explicit MutexLock(port::Mutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu)
- : mu_(mu) {
+ explicit MutexLock(port::Mutex* mu) EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
this->mu_->Lock();
}
~MutexLock() UNLOCK_FUNCTION() { this->mu_->Unlock(); }
+ MutexLock(const MutexLock&) = delete;
+ MutexLock& operator=(const MutexLock&) = delete;
+
private:
- port::Mutex *const mu_;
- // No copying allowed
- MutexLock(const MutexLock&);
- void operator=(const MutexLock&);
+ port::Mutex* const mu_;
};
} // namespace leveldb
-
#endif // STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_
diff --git a/src/leveldb/util/no_destructor.h b/src/leveldb/util/no_destructor.h
new file mode 100644
index 0000000000..a0d3b8703d
--- /dev/null
+++ b/src/leveldb/util/no_destructor.h
@@ -0,0 +1,46 @@
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef STORAGE_LEVELDB_UTIL_NO_DESTRUCTOR_H_
+#define STORAGE_LEVELDB_UTIL_NO_DESTRUCTOR_H_
+
+#include <type_traits>
+#include <utility>
+
+namespace leveldb {
+
+// Wraps an instance whose destructor is never called.
+//
+// This is intended for use with function-level static variables.
+template <typename InstanceType>
+class NoDestructor {
+ public:
+ template <typename... ConstructorArgTypes>
+ explicit NoDestructor(ConstructorArgTypes&&... constructor_args) {
+ static_assert(sizeof(instance_storage_) >= sizeof(InstanceType),
+ "instance_storage_ is not large enough to hold the instance");
+ static_assert(
+ alignof(decltype(instance_storage_)) >= alignof(InstanceType),
+ "instance_storage_ does not meet the instance's alignment requirement");
+ new (&instance_storage_)
+ InstanceType(std::forward<ConstructorArgTypes>(constructor_args)...);
+ }
+
+ ~NoDestructor() = default;
+
+ NoDestructor(const NoDestructor&) = delete;
+ NoDestructor& operator=(const NoDestructor&) = delete;
+
+ InstanceType* get() {
+ return reinterpret_cast<InstanceType*>(&instance_storage_);
+ }
+
+ private:
+ typename std::aligned_storage<sizeof(InstanceType),
+ alignof(InstanceType)>::type instance_storage_;
+};
+
+} // namespace leveldb
+
+#endif // STORAGE_LEVELDB_UTIL_NO_DESTRUCTOR_H_
diff --git a/src/leveldb/util/no_destructor_test.cc b/src/leveldb/util/no_destructor_test.cc
new file mode 100644
index 0000000000..b41caca694
--- /dev/null
+++ b/src/leveldb/util/no_destructor_test.cc
@@ -0,0 +1,47 @@
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include <cstdint>
+#include <cstdlib>
+#include <utility>
+
+#include "util/no_destructor.h"
+#include "util/testharness.h"
+
+namespace leveldb {
+
+namespace {
+
+struct DoNotDestruct {
+ public:
+ DoNotDestruct(uint32_t a, uint64_t b) : a(a), b(b) {}
+ ~DoNotDestruct() { std::abort(); }
+
+ // Used to check constructor argument forwarding.
+ uint32_t a;
+ uint64_t b;
+};
+
+constexpr const uint32_t kGoldenA = 0xdeadbeef;
+constexpr const uint64_t kGoldenB = 0xaabbccddeeffaabb;
+
+} // namespace
+
+class NoDestructorTest {};
+
+TEST(NoDestructorTest, StackInstance) {
+ NoDestructor<DoNotDestruct> instance(kGoldenA, kGoldenB);
+ ASSERT_EQ(kGoldenA, instance.get()->a);
+ ASSERT_EQ(kGoldenB, instance.get()->b);
+}
+
+TEST(NoDestructorTest, StaticInstance) {
+ static NoDestructor<DoNotDestruct> instance(kGoldenA, kGoldenB);
+ ASSERT_EQ(kGoldenA, instance.get()->a);
+ ASSERT_EQ(kGoldenB, instance.get()->b);
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/util/options.cc b/src/leveldb/util/options.cc
index b5e6227613..62de5bf0d2 100644
--- a/src/leveldb/util/options.cc
+++ b/src/leveldb/util/options.cc
@@ -9,22 +9,6 @@
namespace leveldb {
-Options::Options()
- : comparator(BytewiseComparator()),
- create_if_missing(false),
- error_if_exists(false),
- paranoid_checks(false),
- env(Env::Default()),
- info_log(NULL),
- write_buffer_size(4<<20),
- max_open_files(1000),
- block_cache(NULL),
- block_size(4096),
- block_restart_interval(16),
- max_file_size(2<<20),
- compression(kSnappyCompression),
- reuse_logs(false),
- filter_policy(NULL) {
-}
+Options::Options() : comparator(BytewiseComparator()), env(Env::Default()) {}
} // namespace leveldb
diff --git a/src/leveldb/util/posix_logger.h b/src/leveldb/util/posix_logger.h
index c063c2b7cb..28e15d10b4 100644
--- a/src/leveldb/util/posix_logger.h
+++ b/src/leveldb/util/posix_logger.h
@@ -3,94 +3,126 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
//
// Logger implementation that can be shared by all environments
-// where enough Posix functionality is available.
+// where enough posix functionality is available.
#ifndef STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_
#define STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_
-#include <algorithm>
-#include <stdio.h>
#include <sys/time.h>
-#include <time.h>
+
+#include <cassert>
+#include <cstdarg>
+#include <cstdio>
+#include <ctime>
+#include <sstream>
+#include <thread>
+
#include "leveldb/env.h"
namespace leveldb {
-class PosixLogger : public Logger {
- private:
- FILE* file_;
- uint64_t (*gettid_)(); // Return the thread id for the current thread
+class PosixLogger final : public Logger {
public:
- PosixLogger(FILE* f, uint64_t (*gettid)()) : file_(f), gettid_(gettid) { }
- virtual ~PosixLogger() {
- fclose(file_);
- }
- virtual void Logv(const char* format, va_list ap) {
- const uint64_t thread_id = (*gettid_)();
-
- // We try twice: the first time with a fixed-size stack allocated buffer,
- // and the second time with a much larger dynamically allocated buffer.
- char buffer[500];
- for (int iter = 0; iter < 2; iter++) {
- char* base;
- int bufsize;
- if (iter == 0) {
- bufsize = sizeof(buffer);
- base = buffer;
- } else {
- bufsize = 30000;
- base = new char[bufsize];
- }
- char* p = base;
- char* limit = base + bufsize;
-
- struct timeval now_tv;
- gettimeofday(&now_tv, NULL);
- const time_t seconds = now_tv.tv_sec;
- struct tm t;
- localtime_r(&seconds, &t);
- p += snprintf(p, limit - p,
- "%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ",
- t.tm_year + 1900,
- t.tm_mon + 1,
- t.tm_mday,
- t.tm_hour,
- t.tm_min,
- t.tm_sec,
- static_cast<int>(now_tv.tv_usec),
- static_cast<long long unsigned int>(thread_id));
-
- // Print the message
- if (p < limit) {
- va_list backup_ap;
- va_copy(backup_ap, ap);
- p += vsnprintf(p, limit - p, format, backup_ap);
- va_end(backup_ap);
- }
+ // Creates a logger that writes to the given file.
+ //
+ // The PosixLogger instance takes ownership of the file handle.
+ explicit PosixLogger(std::FILE* fp) : fp_(fp) { assert(fp != nullptr); }
+
+ ~PosixLogger() override { std::fclose(fp_); }
+
+ void Logv(const char* format, va_list arguments) override {
+ // Record the time as close to the Logv() call as possible.
+ struct ::timeval now_timeval;
+ ::gettimeofday(&now_timeval, nullptr);
+ const std::time_t now_seconds = now_timeval.tv_sec;
+ struct std::tm now_components;
+ ::localtime_r(&now_seconds, &now_components);
+
+ // Record the thread ID.
+ constexpr const int kMaxThreadIdSize = 32;
+ std::ostringstream thread_stream;
+ thread_stream << std::this_thread::get_id();
+ std::string thread_id = thread_stream.str();
+ if (thread_id.size() > kMaxThreadIdSize) {
+ thread_id.resize(kMaxThreadIdSize);
+ }
- // Truncate to available space if necessary
- if (p >= limit) {
- if (iter == 0) {
- continue; // Try again with larger buffer
- } else {
- p = limit - 1;
+ // We first attempt to print into a stack-allocated buffer. If this attempt
+ // fails, we make a second attempt with a dynamically allocated buffer.
+ constexpr const int kStackBufferSize = 512;
+ char stack_buffer[kStackBufferSize];
+ static_assert(sizeof(stack_buffer) == static_cast<size_t>(kStackBufferSize),
+ "sizeof(char) is expected to be 1 in C++");
+
+ int dynamic_buffer_size = 0; // Computed in the first iteration.
+ for (int iteration = 0; iteration < 2; ++iteration) {
+ const int buffer_size =
+ (iteration == 0) ? kStackBufferSize : dynamic_buffer_size;
+ char* const buffer =
+ (iteration == 0) ? stack_buffer : new char[dynamic_buffer_size];
+
+ // Print the header into the buffer.
+ int buffer_offset = snprintf(
+ buffer, buffer_size, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ",
+ now_components.tm_year + 1900, now_components.tm_mon + 1,
+ now_components.tm_mday, now_components.tm_hour, now_components.tm_min,
+ now_components.tm_sec, static_cast<int>(now_timeval.tv_usec),
+ thread_id.c_str());
+
+ // The header can be at most 28 characters (10 date + 15 time +
+ // 3 delimiters) plus the thread ID, which should fit comfortably into the
+ // static buffer.
+ assert(buffer_offset <= 28 + kMaxThreadIdSize);
+ static_assert(28 + kMaxThreadIdSize < kStackBufferSize,
+ "stack-allocated buffer may not fit the message header");
+ assert(buffer_offset < buffer_size);
+
+ // Print the message into the buffer.
+ std::va_list arguments_copy;
+ va_copy(arguments_copy, arguments);
+ buffer_offset +=
+ std::vsnprintf(buffer + buffer_offset, buffer_size - buffer_offset,
+ format, arguments_copy);
+ va_end(arguments_copy);
+
+ // The code below may append a newline at the end of the buffer, which
+ // requires an extra character.
+ if (buffer_offset >= buffer_size - 1) {
+ // The message did not fit into the buffer.
+ if (iteration == 0) {
+ // Re-run the loop and use a dynamically-allocated buffer. The buffer
+ // will be large enough for the log message, an extra newline and a
+ // null terminator.
+ dynamic_buffer_size = buffer_offset + 2;
+ continue;
}
+
+ // The dynamically-allocated buffer was incorrectly sized. This should
+ // not happen, assuming a correct implementation of (v)snprintf. Fail
+ // in tests, recover by truncating the log message in production.
+ assert(false);
+ buffer_offset = buffer_size - 1;
}
- // Add newline if necessary
- if (p == base || p[-1] != '\n') {
- *p++ = '\n';
+ // Add a newline if necessary.
+ if (buffer[buffer_offset - 1] != '\n') {
+ buffer[buffer_offset] = '\n';
+ ++buffer_offset;
}
- assert(p <= limit);
- fwrite(base, 1, p - base, file_);
- fflush(file_);
- if (base != buffer) {
- delete[] base;
+ assert(buffer_offset <= buffer_size);
+ std::fwrite(buffer, 1, buffer_offset, fp_);
+ std::fflush(fp_);
+
+ if (iteration != 0) {
+ delete[] buffer;
}
break;
}
}
+
+ private:
+ std::FILE* const fp_;
};
} // namespace leveldb
diff --git a/src/leveldb/util/random.h b/src/leveldb/util/random.h
index ddd51b1c7b..76f7daf52a 100644
--- a/src/leveldb/util/random.h
+++ b/src/leveldb/util/random.h
@@ -15,6 +15,7 @@ namespace leveldb {
class Random {
private:
uint32_t seed_;
+
public:
explicit Random(uint32_t s) : seed_(s & 0x7fffffffu) {
// Avoid bad seeds.
@@ -23,8 +24,8 @@ class Random {
}
}
uint32_t Next() {
- static const uint32_t M = 2147483647L; // 2^31-1
- static const uint64_t A = 16807; // bits 14, 8, 7, 5, 2, 1, 0
+ static const uint32_t M = 2147483647L; // 2^31-1
+ static const uint64_t A = 16807; // bits 14, 8, 7, 5, 2, 1, 0
// We are computing
// seed_ = (seed_ * A) % M, where M = 2^31-1
//
@@ -54,9 +55,7 @@ class Random {
// Skewed: pick "base" uniformly from range [0,max_log] and then
// return "base" random bits. The effect is to pick a number in the
// range [0,2^max_log-1] with exponential bias towards smaller numbers.
- uint32_t Skewed(int max_log) {
- return Uniform(1 << Uniform(max_log + 1));
- }
+ uint32_t Skewed(int max_log) { return Uniform(1 << Uniform(max_log + 1)); }
};
} // namespace leveldb
diff --git a/src/leveldb/util/status.cc b/src/leveldb/util/status.cc
index a44f35b314..15ce747d80 100644
--- a/src/leveldb/util/status.cc
+++ b/src/leveldb/util/status.cc
@@ -2,9 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include "leveldb/status.h"
+
#include <stdio.h>
+
#include "port/port.h"
-#include "leveldb/status.h"
namespace leveldb {
@@ -18,8 +20,8 @@ const char* Status::CopyState(const char* state) {
Status::Status(Code code, const Slice& msg, const Slice& msg2) {
assert(code != kOk);
- const uint32_t len1 = msg.size();
- const uint32_t len2 = msg2.size();
+ const uint32_t len1 = static_cast<uint32_t>(msg.size());
+ const uint32_t len2 = static_cast<uint32_t>(msg2.size());
const uint32_t size = len1 + (len2 ? (2 + len2) : 0);
char* result = new char[size + 5];
memcpy(result, &size, sizeof(size));
@@ -34,7 +36,7 @@ Status::Status(Code code, const Slice& msg, const Slice& msg2) {
}
std::string Status::ToString() const {
- if (state_ == NULL) {
+ if (state_ == nullptr) {
return "OK";
} else {
char tmp[30];
@@ -59,8 +61,8 @@ std::string Status::ToString() const {
type = "IO error: ";
break;
default:
- snprintf(tmp, sizeof(tmp), "Unknown code(%d): ",
- static_cast<int>(code()));
+ snprintf(tmp, sizeof(tmp),
+ "Unknown code(%d): ", static_cast<int>(code()));
type = tmp;
break;
}
diff --git a/src/leveldb/util/status_test.cc b/src/leveldb/util/status_test.cc
new file mode 100644
index 0000000000..2842319fbd
--- /dev/null
+++ b/src/leveldb/util/status_test.cc
@@ -0,0 +1,40 @@
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include <utility>
+
+#include "leveldb/slice.h"
+#include "leveldb/status.h"
+#include "util/testharness.h"
+
+namespace leveldb {
+
+TEST(Status, MoveConstructor) {
+ {
+ Status ok = Status::OK();
+ Status ok2 = std::move(ok);
+
+ ASSERT_TRUE(ok2.ok());
+ }
+
+ {
+ Status status = Status::NotFound("custom NotFound status message");
+ Status status2 = std::move(status);
+
+ ASSERT_TRUE(status2.IsNotFound());
+ ASSERT_EQ("NotFound: custom NotFound status message", status2.ToString());
+ }
+
+ {
+ Status self_moved = Status::IOError("custom IOError status message");
+
+ // Needed to bypass compiler warning about explicit move-assignment.
+ Status& self_moved_reference = self_moved;
+ self_moved_reference = std::move(self_moved);
+ }
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/util/testharness.cc b/src/leveldb/util/testharness.cc
index 402fab34d7..318ecfa3b7 100644
--- a/src/leveldb/util/testharness.cc
+++ b/src/leveldb/util/testharness.cc
@@ -4,11 +4,15 @@
#include "util/testharness.h"
-#include <string>
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/types.h>
+#include <string>
+#include <vector>
+
+#include "leveldb/env.h"
+
namespace leveldb {
namespace test {
@@ -19,10 +23,10 @@ struct Test {
void (*func)();
};
std::vector<Test>* tests;
-}
+} // namespace
bool RegisterTest(const char* base, const char* name, void (*func)()) {
- if (tests == NULL) {
+ if (tests == nullptr) {
tests = new std::vector<Test>;
}
Test t;
@@ -37,14 +41,14 @@ int RunAllTests() {
const char* matcher = getenv("LEVELDB_TESTS");
int num = 0;
- if (tests != NULL) {
+ if (tests != nullptr) {
for (size_t i = 0; i < tests->size(); i++) {
const Test& t = (*tests)[i];
- if (matcher != NULL) {
+ if (matcher != nullptr) {
std::string name = t.base;
name.push_back('.');
name.append(t.name);
- if (strstr(name.c_str(), matcher) == NULL) {
+ if (strstr(name.c_str(), matcher) == nullptr) {
continue;
}
}
@@ -66,7 +70,7 @@ std::string TmpDir() {
int RandomSeed() {
const char* env = getenv("TEST_RANDOM_SEED");
- int result = (env != NULL ? atoi(env) : 301);
+ int result = (env != nullptr ? atoi(env) : 301);
if (result <= 0) {
result = 301;
}
diff --git a/src/leveldb/util/testharness.h b/src/leveldb/util/testharness.h
index da4fe68bb4..72cd1629eb 100644
--- a/src/leveldb/util/testharness.h
+++ b/src/leveldb/util/testharness.h
@@ -7,10 +7,10 @@
#include <stdio.h>
#include <stdlib.h>
+
#include <sstream>
-#include "leveldb/env.h"
-#include "leveldb/slice.h"
-#include "util/random.h"
+
+#include "leveldb/status.h"
namespace leveldb {
namespace test {
@@ -27,15 +27,15 @@ namespace test {
//
// Returns 0 if all tests pass.
// Dies or returns a non-zero value if some test fails.
-extern int RunAllTests();
+int RunAllTests();
// Return the directory to use for temporary storage.
-extern std::string TmpDir();
+std::string TmpDir();
// Return a randomization seed for this run. Typically returns the
// same number on repeated invocations of this binary, but automated
// runs may be able to vary the seed.
-extern int RandomSeed();
+int RandomSeed();
// An instance of Tester is allocated to hold temporary state during
// the execution of an assertion.
@@ -47,9 +47,7 @@ class Tester {
std::stringstream ss_;
public:
- Tester(const char* f, int l)
- : ok_(true), fname_(f), line_(l) {
- }
+ Tester(const char* f, int l) : ok_(true), fname_(f), line_(l) {}
~Tester() {
if (!ok_) {
@@ -74,14 +72,14 @@ class Tester {
return *this;
}
-#define BINARY_OP(name,op) \
- template <class X, class Y> \
- Tester& name(const X& x, const Y& y) { \
- if (! (x op y)) { \
- ss_ << " failed: " << x << (" " #op " ") << y; \
- ok_ = false; \
- } \
- return *this; \
+#define BINARY_OP(name, op) \
+ template <class X, class Y> \
+ Tester& name(const X& x, const Y& y) { \
+ if (!(x op y)) { \
+ ss_ << " failed: " << x << (" " #op " ") << y; \
+ ok_ = false; \
+ } \
+ return *this; \
}
BINARY_OP(IsEq, ==)
@@ -104,33 +102,38 @@ class Tester {
#define ASSERT_TRUE(c) ::leveldb::test::Tester(__FILE__, __LINE__).Is((c), #c)
#define ASSERT_OK(s) ::leveldb::test::Tester(__FILE__, __LINE__).IsOk((s))
-#define ASSERT_EQ(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsEq((a),(b))
-#define ASSERT_NE(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsNe((a),(b))
-#define ASSERT_GE(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsGe((a),(b))
-#define ASSERT_GT(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsGt((a),(b))
-#define ASSERT_LE(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsLe((a),(b))
-#define ASSERT_LT(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsLt((a),(b))
-
-#define TCONCAT(a,b) TCONCAT1(a,b)
-#define TCONCAT1(a,b) a##b
-
-#define TEST(base,name) \
-class TCONCAT(_Test_,name) : public base { \
- public: \
- void _Run(); \
- static void _RunIt() { \
- TCONCAT(_Test_,name) t; \
- t._Run(); \
- } \
-}; \
-bool TCONCAT(_Test_ignored_,name) = \
- ::leveldb::test::RegisterTest(#base, #name, &TCONCAT(_Test_,name)::_RunIt); \
-void TCONCAT(_Test_,name)::_Run()
+#define ASSERT_EQ(a, b) \
+ ::leveldb::test::Tester(__FILE__, __LINE__).IsEq((a), (b))
+#define ASSERT_NE(a, b) \
+ ::leveldb::test::Tester(__FILE__, __LINE__).IsNe((a), (b))
+#define ASSERT_GE(a, b) \
+ ::leveldb::test::Tester(__FILE__, __LINE__).IsGe((a), (b))
+#define ASSERT_GT(a, b) \
+ ::leveldb::test::Tester(__FILE__, __LINE__).IsGt((a), (b))
+#define ASSERT_LE(a, b) \
+ ::leveldb::test::Tester(__FILE__, __LINE__).IsLe((a), (b))
+#define ASSERT_LT(a, b) \
+ ::leveldb::test::Tester(__FILE__, __LINE__).IsLt((a), (b))
+
+#define TCONCAT(a, b) TCONCAT1(a, b)
+#define TCONCAT1(a, b) a##b
+
+#define TEST(base, name) \
+ class TCONCAT(_Test_, name) : public base { \
+ public: \
+ void _Run(); \
+ static void _RunIt() { \
+ TCONCAT(_Test_, name) t; \
+ t._Run(); \
+ } \
+ }; \
+ bool TCONCAT(_Test_ignored_, name) = ::leveldb::test::RegisterTest( \
+ #base, #name, &TCONCAT(_Test_, name)::_RunIt); \
+ void TCONCAT(_Test_, name)::_Run()
// Register the specified test. Typically not used directly, but
// invoked via the macro expansion of TEST.
-extern bool RegisterTest(const char* base, const char* name, void (*func)());
-
+bool RegisterTest(const char* base, const char* name, void (*func)());
} // namespace test
} // namespace leveldb
diff --git a/src/leveldb/util/testutil.cc b/src/leveldb/util/testutil.cc
index bee56bf75f..6b151b9e64 100644
--- a/src/leveldb/util/testutil.cc
+++ b/src/leveldb/util/testutil.cc
@@ -12,7 +12,7 @@ namespace test {
Slice RandomString(Random* rnd, int len, std::string* dst) {
dst->resize(len);
for (int i = 0; i < len; i++) {
- (*dst)[i] = static_cast<char>(' ' + rnd->Uniform(95)); // ' ' .. '~'
+ (*dst)[i] = static_cast<char>(' ' + rnd->Uniform(95)); // ' ' .. '~'
}
return Slice(*dst);
}
@@ -20,9 +20,8 @@ Slice RandomString(Random* rnd, int len, std::string* dst) {
std::string RandomKey(Random* rnd, int len) {
// Make sure to generate a wide variety of characters so we
// test the boundary conditions for short-key optimizations.
- static const char kTestChars[] = {
- '\0', '\1', 'a', 'b', 'c', 'd', 'e', '\xfd', '\xfe', '\xff'
- };
+ static const char kTestChars[] = {'\0', '\1', 'a', 'b', 'c',
+ 'd', 'e', '\xfd', '\xfe', '\xff'};
std::string result;
for (int i = 0; i < len; i++) {
result += kTestChars[rnd->Uniform(sizeof(kTestChars))];
@@ -30,9 +29,8 @@ std::string RandomKey(Random* rnd, int len) {
return result;
}
-
-extern Slice CompressibleString(Random* rnd, double compressed_fraction,
- size_t len, std::string* dst) {
+Slice CompressibleString(Random* rnd, double compressed_fraction, size_t len,
+ std::string* dst) {
int raw = static_cast<int>(len * compressed_fraction);
if (raw < 1) raw = 1;
std::string raw_data;
diff --git a/src/leveldb/util/testutil.h b/src/leveldb/util/testutil.h
index d7e4583702..bb4051ba07 100644
--- a/src/leveldb/util/testutil.h
+++ b/src/leveldb/util/testutil.h
@@ -5,6 +5,7 @@
#ifndef STORAGE_LEVELDB_UTIL_TESTUTIL_H_
#define STORAGE_LEVELDB_UTIL_TESTUTIL_H_
+#include "helpers/memenv/memenv.h"
#include "leveldb/env.h"
#include "leveldb/slice.h"
#include "util/random.h"
@@ -14,17 +15,17 @@ namespace test {
// Store in *dst a random string of length "len" and return a Slice that
// references the generated data.
-extern Slice RandomString(Random* rnd, int len, std::string* dst);
+Slice RandomString(Random* rnd, int len, std::string* dst);
// Return a random key with the specified length that may contain interesting
// characters (e.g. \x00, \xff, etc.).
-extern std::string RandomKey(Random* rnd, int len);
+std::string RandomKey(Random* rnd, int len);
// Store in *dst a string of length "len" that will compress to
// "N*compressed_fraction" bytes and return a Slice that references
// the generated data.
-extern Slice CompressibleString(Random* rnd, double compressed_fraction,
- size_t len, std::string* dst);
+Slice CompressibleString(Random* rnd, double compressed_fraction, size_t len,
+ std::string* dst);
// A wrapper that allows injection of errors.
class ErrorEnv : public EnvWrapper {
@@ -32,25 +33,27 @@ class ErrorEnv : public EnvWrapper {
bool writable_file_error_;
int num_writable_file_errors_;
- ErrorEnv() : EnvWrapper(Env::Default()),
- writable_file_error_(false),
- num_writable_file_errors_(0) { }
+ ErrorEnv()
+ : EnvWrapper(NewMemEnv(Env::Default())),
+ writable_file_error_(false),
+ num_writable_file_errors_(0) {}
+ ~ErrorEnv() override { delete target(); }
- virtual Status NewWritableFile(const std::string& fname,
- WritableFile** result) {
+ Status NewWritableFile(const std::string& fname,
+ WritableFile** result) override {
if (writable_file_error_) {
++num_writable_file_errors_;
- *result = NULL;
+ *result = nullptr;
return Status::IOError(fname, "fake error");
}
return target()->NewWritableFile(fname, result);
}
- virtual Status NewAppendableFile(const std::string& fname,
- WritableFile** result) {
+ Status NewAppendableFile(const std::string& fname,
+ WritableFile** result) override {
if (writable_file_error_) {
++num_writable_file_errors_;
- *result = NULL;
+ *result = nullptr;
return Status::IOError(fname, "fake error");
}
return target()->NewAppendableFile(fname, result);
diff --git a/src/leveldb/util/windows_logger.h b/src/leveldb/util/windows_logger.h
new file mode 100644
index 0000000000..92960638d1
--- /dev/null
+++ b/src/leveldb/util/windows_logger.h
@@ -0,0 +1,124 @@
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+//
+// Logger implementation for the Windows platform.
+
+#ifndef STORAGE_LEVELDB_UTIL_WINDOWS_LOGGER_H_
+#define STORAGE_LEVELDB_UTIL_WINDOWS_LOGGER_H_
+
+#include <cassert>
+#include <cstdarg>
+#include <cstdio>
+#include <ctime>
+#include <sstream>
+#include <thread>
+
+#include "leveldb/env.h"
+
+namespace leveldb {
+
+class WindowsLogger final : public Logger {
+ public:
+ // Creates a logger that writes to the given file.
+ //
+ // The PosixLogger instance takes ownership of the file handle.
+ explicit WindowsLogger(std::FILE* fp) : fp_(fp) { assert(fp != nullptr); }
+
+ ~WindowsLogger() override { std::fclose(fp_); }
+
+ void Logv(const char* format, va_list arguments) override {
+ // Record the time as close to the Logv() call as possible.
+ SYSTEMTIME now_components;
+ ::GetLocalTime(&now_components);
+
+ // Record the thread ID.
+ constexpr const int kMaxThreadIdSize = 32;
+ std::ostringstream thread_stream;
+ thread_stream << std::this_thread::get_id();
+ std::string thread_id = thread_stream.str();
+ if (thread_id.size() > kMaxThreadIdSize) {
+ thread_id.resize(kMaxThreadIdSize);
+ }
+
+ // We first attempt to print into a stack-allocated buffer. If this attempt
+ // fails, we make a second attempt with a dynamically allocated buffer.
+ constexpr const int kStackBufferSize = 512;
+ char stack_buffer[kStackBufferSize];
+ static_assert(sizeof(stack_buffer) == static_cast<size_t>(kStackBufferSize),
+ "sizeof(char) is expected to be 1 in C++");
+
+ int dynamic_buffer_size = 0; // Computed in the first iteration.
+ for (int iteration = 0; iteration < 2; ++iteration) {
+ const int buffer_size =
+ (iteration == 0) ? kStackBufferSize : dynamic_buffer_size;
+ char* const buffer =
+ (iteration == 0) ? stack_buffer : new char[dynamic_buffer_size];
+
+ // Print the header into the buffer.
+ int buffer_offset = snprintf(
+ buffer, buffer_size, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ",
+ now_components.wYear, now_components.wMonth, now_components.wDay,
+ now_components.wHour, now_components.wMinute, now_components.wSecond,
+ static_cast<int>(now_components.wMilliseconds * 1000),
+ thread_id.c_str());
+
+ // The header can be at most 28 characters (10 date + 15 time +
+ // 3 delimiters) plus the thread ID, which should fit comfortably into the
+ // static buffer.
+ assert(buffer_offset <= 28 + kMaxThreadIdSize);
+ static_assert(28 + kMaxThreadIdSize < kStackBufferSize,
+ "stack-allocated buffer may not fit the message header");
+ assert(buffer_offset < buffer_size);
+
+ // Print the message into the buffer.
+ std::va_list arguments_copy;
+ va_copy(arguments_copy, arguments);
+ buffer_offset +=
+ std::vsnprintf(buffer + buffer_offset, buffer_size - buffer_offset,
+ format, arguments_copy);
+ va_end(arguments_copy);
+
+ // The code below may append a newline at the end of the buffer, which
+ // requires an extra character.
+ if (buffer_offset >= buffer_size - 1) {
+ // The message did not fit into the buffer.
+ if (iteration == 0) {
+ // Re-run the loop and use a dynamically-allocated buffer. The buffer
+ // will be large enough for the log message, an extra newline and a
+ // null terminator.
+ dynamic_buffer_size = buffer_offset + 2;
+ continue;
+ }
+
+ // The dynamically-allocated buffer was incorrectly sized. This should
+ // not happen, assuming a correct implementation of (v)snprintf. Fail
+ // in tests, recover by truncating the log message in production.
+ assert(false);
+ buffer_offset = buffer_size - 1;
+ }
+
+ // Add a newline if necessary.
+ if (buffer[buffer_offset - 1] != '\n') {
+ buffer[buffer_offset] = '\n';
+ ++buffer_offset;
+ }
+
+ assert(buffer_offset <= buffer_size);
+ std::fwrite(buffer, 1, buffer_offset, fp_);
+ std::fflush(fp_);
+
+ if (iteration != 0) {
+ delete[] buffer;
+ }
+ break;
+ }
+ }
+
+ private:
+ std::FILE* const fp_;
+};
+
+} // namespace leveldb
+
+#endif // STORAGE_LEVELDB_UTIL_WINDOWS_LOGGER_H_
diff --git a/src/logging/timer.h b/src/logging/timer.h
index 45bfc4aa65..2b27c71080 100644
--- a/src/logging/timer.h
+++ b/src/logging/timer.h
@@ -85,7 +85,7 @@ private:
const std::string m_title{};
//! Forwarded on to LogPrint if specified - has the effect of only
- //! outputing the timing log when a particular debug= category is specified.
+ //! outputting the timing log when a particular debug= category is specified.
const BCLog::LogFlags m_log_category{};
};
diff --git a/src/memusage.h b/src/memusage.h
index 3ae9face15..24eb450465 100644
--- a/src/memusage.h
+++ b/src/memusage.h
@@ -6,9 +6,11 @@
#define BITCOIN_MEMUSAGE_H
#include <indirectmap.h>
+#include <prevector.h>
#include <stdlib.h>
+#include <cassert>
#include <map>
#include <memory>
#include <set>
diff --git a/src/miner.cpp b/src/miner.cpp
index 6f4e10b6ed..61d27d17c1 100644
--- a/src/miner.cpp
+++ b/src/miner.cpp
@@ -20,7 +20,6 @@
#include <timedata.h>
#include <util/moneystr.h>
#include <util/system.h>
-#include <util/validation.h>
#include <algorithm>
#include <utility>
@@ -167,7 +166,7 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc
BlockValidationState state;
if (!TestBlockValidity(state, chainparams, *pblock, pindexPrev, false, false)) {
- throw std::runtime_error(strprintf("%s: TestBlockValidity failed: %s", __func__, FormatStateMessage(state)));
+ throw std::runtime_error(strprintf("%s: TestBlockValidity failed: %s", __func__, state.ToString()));
}
int64_t nTime2 = GetTimeMicros();
diff --git a/src/net.cpp b/src/net.cpp
index 18fe95e675..8352c40b98 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -45,8 +45,8 @@ static_assert(MINIUPNPC_API_VERSION >= 10, "miniUPnPc API version >= 10 assumed"
#include <math.h>
-// Dump addresses to peers.dat every 15 minutes (900s)
-static constexpr int DUMP_PEERS_INTERVAL = 15 * 60;
+// How often to dump addresses to peers.dat
+static constexpr std::chrono::minutes DUMP_PEERS_INTERVAL{15};
/** Number of DNS seeds to query when the number of connections is low. */
static constexpr int DNSSEEDS_TO_QUERY_AT_ONCE = 3;
@@ -555,9 +555,9 @@ void CNode::copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap)
}
// Raw ping time is in microseconds, but show it to user as whole seconds (Bitcoin users should be well used to small numbers with many decimal places by now :)
- stats.dPingTime = (((double)nPingUsecTime) / 1e6);
- stats.dMinPing = (((double)nMinPingUsecTime) / 1e6);
- stats.dPingWait = (((double)nPingUsecWait) / 1e6);
+ stats.m_ping_usec = nPingUsecTime;
+ stats.m_min_ping_usec = nMinPingUsecTime;
+ stats.m_ping_wait_usec = nPingUsecWait;
// Leave string empty if addrLocal invalid (not filled in yet)
CService addrLocalUnlocked = GetAddrLocal();
@@ -718,6 +718,19 @@ CNetMessage V1TransportDeserializer::GetMessage(const CMessageHeader::MessageSta
return msg;
}
+void V1TransportSerializer::prepareForTransport(CSerializedNetMsg& msg, std::vector<unsigned char>& header) {
+ // create dbl-sha256 checksum
+ uint256 hash = Hash(msg.data.begin(), msg.data.end());
+
+ // create header
+ CMessageHeader hdr(Params().MessageStart(), msg.command.c_str(), msg.data.size());
+ memcpy(hdr.pchChecksum, hash.begin(), CMessageHeader::CHECKSUM_SIZE);
+
+ // serialize header
+ header.reserve(CMessageHeader::HEADER_SIZE);
+ CVectorWriter{SER_NETWORK, INIT_PROTO_VERSION, header, 0, hdr};
+}
+
size_t CConnman::SocketSendData(CNode *pnode) const EXCLUSIVE_LOCKS_REQUIRED(pnode->cs_vSend)
{
auto it = pnode->vSendMsg.begin();
@@ -2330,7 +2343,7 @@ bool CConnman::Start(CScheduler& scheduler, const Options& connOptions)
threadMessageHandler = std::thread(&TraceThread<std::function<void()> >, "msghand", std::function<void()>(std::bind(&CConnman::ThreadMessageHandler, this)));
// Dump network addresses
- scheduler.scheduleEvery(std::bind(&CConnman::DumpAddresses, this), DUMP_PEERS_INTERVAL * 1000);
+ scheduler.scheduleEvery([this] { DumpAddresses(); }, DUMP_PEERS_INTERVAL);
return true;
}
@@ -2705,6 +2718,7 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn
}
m_deserializer = MakeUnique<V1TransportDeserializer>(V1TransportDeserializer(Params().MessageStart(), SER_NETWORK, INIT_PROTO_VERSION));
+ m_serializer = MakeUnique<V1TransportSerializer>(V1TransportSerializer());
}
CNode::~CNode()
@@ -2720,16 +2734,12 @@ bool CConnman::NodeFullyConnected(const CNode* pnode)
void CConnman::PushMessage(CNode* pnode, CSerializedNetMsg&& msg)
{
size_t nMessageSize = msg.data.size();
- size_t nTotalSize = nMessageSize + CMessageHeader::HEADER_SIZE;
LogPrint(BCLog::NET, "sending %s (%d bytes) peer=%d\n", SanitizeString(msg.command), nMessageSize, pnode->GetId());
+ // make sure we use the appropriate network transport format
std::vector<unsigned char> serializedHeader;
- serializedHeader.reserve(CMessageHeader::HEADER_SIZE);
- uint256 hash = Hash(msg.data.data(), msg.data.data() + nMessageSize);
- CMessageHeader hdr(Params().MessageStart(), msg.command.c_str(), nMessageSize);
- memcpy(hdr.pchChecksum, hash.begin(), CMessageHeader::CHECKSUM_SIZE);
-
- CVectorWriter{SER_NETWORK, INIT_PROTO_VERSION, serializedHeader, 0, hdr};
+ pnode->m_serializer->prepareForTransport(msg, serializedHeader);
+ size_t nTotalSize = nMessageSize + serializedHeader.size();
size_t nBytesSent = 0;
{
diff --git a/src/net.h b/src/net.h
index 819947658a..975d7f15d7 100644
--- a/src/net.h
+++ b/src/net.h
@@ -596,9 +596,9 @@ public:
mapMsgCmdSize mapRecvBytesPerMsgCmd;
NetPermissionFlags m_permissionFlags;
bool m_legacyWhitelisted;
- double dPingTime;
- double dPingWait;
- double dMinPing;
+ int64_t m_ping_usec;
+ int64_t m_ping_wait_usec;
+ int64_t m_min_ping_usec;
CAmount minFeeFilter;
// Our address, as reported by the peer
std::string addrLocal;
@@ -703,12 +703,27 @@ public:
CNetMessage GetMessage(const CMessageHeader::MessageStartChars& message_start, int64_t time) override;
};
+/** The TransportSerializer prepares messages for the network transport
+ */
+class TransportSerializer {
+public:
+ // prepare message for transport (header construction, error-correction computation, payload encryption, etc.)
+ virtual void prepareForTransport(CSerializedNetMsg& msg, std::vector<unsigned char>& header) = 0;
+ virtual ~TransportSerializer() {}
+};
+
+class V1TransportSerializer : public TransportSerializer {
+public:
+ void prepareForTransport(CSerializedNetMsg& msg, std::vector<unsigned char>& header) override;
+};
+
/** Information about a peer */
class CNode
{
friend class CConnman;
public:
std::unique_ptr<TransportDeserializer> m_deserializer;
+ std::unique_ptr<TransportSerializer> m_serializer;
// socket
std::atomic<ServiceFlags> nServices{NODE_NONE};
diff --git a/src/net_permissions.h b/src/net_permissions.h
index a06d2f544d..ad74848347 100644
--- a/src/net_permissions.h
+++ b/src/net_permissions.h
@@ -15,7 +15,7 @@ enum NetPermissionFlags
PF_BLOOMFILTER = (1U << 1),
// Relay and accept transactions from this peer, even if -blocksonly is true
PF_RELAY = (1U << 3),
- // Always relay transactions from this peer, even if already in mempool or rejected from policy
+ // Always relay transactions from this peer, even if already in mempool
// Keep parameter interaction: forcerelay implies relay
PF_FORCERELAY = (1U << 2) | PF_RELAY,
// Can't be banned for misbehavior
@@ -59,4 +59,4 @@ public:
CSubNet m_subnet;
};
-#endif // BITCOIN_NET_PERMISSIONS_H \ No newline at end of file
+#endif // BITCOIN_NET_PERMISSIONS_H
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index cf4aee0647..ab430cbe19 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -26,7 +26,6 @@
#include <txmempool.h>
#include <util/system.h>
#include <util/strencodings.h>
-#include <util/validation.h>
#include <memory>
#include <typeinfo>
@@ -466,7 +465,7 @@ static bool MarkBlockAsReceived(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs
// returns false, still setting pit, if the block was already in flight from the same peer
// pit will only be valid as long as the same cs_main lock is being held
-static bool MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const CBlockIndex* pindex = nullptr, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
+static bool MarkBlockAsInFlight(CTxMemPool& mempool, NodeId nodeid, const uint256& hash, const CBlockIndex* pindex = nullptr, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
CNodeState *state = State(nodeid);
assert(state != nullptr);
@@ -987,15 +986,6 @@ void Misbehaving(NodeId pnode, int howmuch, const std::string& message) EXCLUSIV
}
/**
- * Returns true if the given validation state result may result in a peer
- * banning/disconnecting us. We use this to determine which unaccepted
- * transactions from a whitelisted peer that we can safely relay.
- */
-static bool TxRelayMayResultInDisconnect(const TxValidationState& state) {
- return state.GetResult() == TxValidationResult::TX_CONSENSUS;
-}
-
-/**
* Potentially ban a node based on the contents of a BlockValidationState object
*
* @param[in] via_compact_block this bool is passed in because net_processing should
@@ -1064,10 +1054,9 @@ static bool MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& s
* Potentially ban a node based on the contents of a TxValidationState object
*
* @return Returns true if the peer was punished (probably disconnected)
- *
- * Changes here may need to be reflected in TxRelayMayResultInDisconnect().
*/
-static bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state, const std::string& message = "") {
+static bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state, const std::string& message = "")
+{
switch (state.GetResult()) {
case TxValidationResult::TX_RESULT_UNSET:
break;
@@ -1095,11 +1084,6 @@ static bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state,
}
-
-
-
-
-
//////////////////////////////////////////////////////////////////////////////
//
// blockchain -> download logic notification
@@ -1118,8 +1102,11 @@ static bool BlockRequestAllowed(const CBlockIndex* pindex, const Consensus::Para
(GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, consensusParams) < STALE_RELAY_AGE_LIMIT);
}
-PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, BanMan* banman, CScheduler& scheduler)
- : connman(connmanIn), m_banman(banman), m_stale_tip_check_time(0)
+PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, BanMan* banman, CScheduler& scheduler, CTxMemPool& pool)
+ : connman(connmanIn),
+ m_banman(banman),
+ m_mempool(pool),
+ m_stale_tip_check_time(0)
{
// Initialize global variables that cannot be constructed at startup.
recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
@@ -1140,14 +1127,14 @@ PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, BanMan* banman, CS
// combine them in one function and schedule at the quicker (peer-eviction)
// timer.
static_assert(EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer");
- scheduler.scheduleEvery(std::bind(&PeerLogicValidation::CheckForStaleTipAndEvictPeers, this, consensusParams), EXTRA_PEER_CHECK_INTERVAL * 1000);
+ scheduler.scheduleEvery([this, consensusParams] { this->CheckForStaleTipAndEvictPeers(consensusParams); }, std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
}
/**
* Evict orphan txn pool entries (EraseOrphanTx) based on a newly connected
* block. Also save the time of the last tip update.
*/
-void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex, const std::vector<CTransactionRef>& vtxConflicted)
+void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex)
{
{
LOCK(g_cs_orphans);
@@ -1330,7 +1317,7 @@ void PeerLogicValidation::BlockChecked(const CBlock& block, const BlockValidatio
//
-bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+bool static AlreadyHave(const CInv& inv, const CTxMemPool& mempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
switch (inv.type)
{
@@ -1447,7 +1434,7 @@ void static ProcessGetBlockData(CNode* pfrom, const CChainParams& chainparams, c
if (need_activate_chain) {
BlockValidationState state;
if (!ActivateBestChain(state, Params(), a_recent_block)) {
- LogPrint(BCLog::NET, "failed to activate chain (%s)\n", FormatStateMessage(state));
+ LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
}
}
@@ -1569,7 +1556,7 @@ void static ProcessGetBlockData(CNode* pfrom, const CChainParams& chainparams, c
}
}
-void static ProcessGetData(CNode* pfrom, const CChainParams& chainparams, CConnman* connman, const std::atomic<bool>& interruptMsgProc) LOCKS_EXCLUDED(cs_main)
+void static ProcessGetData(CNode* pfrom, const CChainParams& chainparams, CConnman* connman, const CTxMemPool& mempool, const std::atomic<bool>& interruptMsgProc) LOCKS_EXCLUDED(cs_main)
{
AssertLockNotHeld(cs_main);
@@ -1682,7 +1669,7 @@ inline void static SendBlockTransactions(const CBlock& block, const BlockTransac
connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
}
-bool static ProcessHeadersMessage(CNode *pfrom, CConnman *connman, const std::vector<CBlockHeader>& headers, const CChainParams& chainparams, bool via_compact_block)
+bool static ProcessHeadersMessage(CNode* pfrom, CConnman* connman, CTxMemPool& mempool, const std::vector<CBlockHeader>& headers, const CChainParams& chainparams, bool via_compact_block)
{
const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
size_t nCount = headers.size();
@@ -1810,7 +1797,7 @@ bool static ProcessHeadersMessage(CNode *pfrom, CConnman *connman, const std::ve
}
uint32_t nFetchFlags = GetFetchFlags(pfrom);
vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
- MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), pindex);
+ MarkBlockAsInFlight(mempool, pfrom->GetId(), pindex->GetBlockHash(), pindex);
LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
pindex->GetBlockHash().ToString(), pfrom->GetId());
}
@@ -1864,7 +1851,7 @@ bool static ProcessHeadersMessage(CNode *pfrom, CConnman *connman, const std::ve
return true;
}
-void static ProcessOrphanTx(CConnman* connman, std::set<uint256>& orphan_work_set, std::list<CTransactionRef>& removed_txn) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans)
+void static ProcessOrphanTx(CConnman* connman, CTxMemPool& mempool, std::set<uint256>& orphan_work_set, std::list<CTransactionRef>& removed_txn) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans)
{
AssertLockHeld(cs_main);
AssertLockHeld(g_cs_orphans);
@@ -1924,7 +1911,7 @@ void static ProcessOrphanTx(CConnman* connman, std::set<uint256>& orphan_work_se
}
}
-bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CConnman* connman, BanMan* banman, const std::atomic<bool>& interruptMsgProc)
+bool ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CTxMemPool& mempool, CConnman* connman, BanMan* banman, const std::atomic<bool>& interruptMsgProc)
{
LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->GetId());
if (gArgs.IsArgSet("-dropmessagestest") && GetRand(gArgs.GetArg("-dropmessagestest", 0)) == 0)
@@ -2276,7 +2263,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
if (interruptMsgProc)
return true;
- bool fAlreadyHave = AlreadyHave(inv);
+ bool fAlreadyHave = AlreadyHave(inv, mempool);
LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->GetId());
if (inv.type == MSG_TX) {
@@ -2327,7 +2314,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
}
pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(), vInv.end());
- ProcessGetData(pfrom, chainparams, connman, interruptMsgProc);
+ ProcessGetData(pfrom, chainparams, connman, mempool, interruptMsgProc);
return true;
}
@@ -2357,7 +2344,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
}
BlockValidationState state;
if (!ActivateBestChain(state, Params(), a_recent_block)) {
- LogPrint(BCLog::NET, "failed to activate chain (%s)\n", FormatStateMessage(state));
+ LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
}
}
@@ -2544,7 +2531,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
std::list<CTransactionRef> lRemovedTxn;
- if (!AlreadyHave(inv) &&
+ if (!AlreadyHave(inv, mempool) &&
AcceptToMemoryPool(mempool, state, ptx, &lRemovedTxn, false /* bypass_limits */, 0 /* nAbsurdFee */)) {
mempool.check(&::ChainstateActive().CoinsTip());
RelayTransaction(tx.GetHash(), *connman);
@@ -2565,7 +2552,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
mempool.size(), mempool.DynamicMemoryUsage() / 1000);
// Recursively process any orphan transactions that depended on this one
- ProcessOrphanTx(connman, pfrom->orphan_work_set, lRemovedTxn);
+ ProcessOrphanTx(connman, mempool, pfrom->orphan_work_set, lRemovedTxn);
}
else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS)
{
@@ -2583,7 +2570,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
for (const CTxIn& txin : tx.vin) {
CInv _inv(MSG_TX | nFetchFlags, txin.prevout.hash);
pfrom->AddInventoryKnown(_inv);
- if (!AlreadyHave(_inv)) RequestTx(State(pfrom->GetId()), _inv.hash, current_time);
+ if (!AlreadyHave(_inv, mempool)) RequestTx(State(pfrom->GetId()), _inv.hash, current_time);
}
AddOrphanTx(ptx, pfrom->GetId());
@@ -2615,14 +2602,11 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
if (pfrom->HasPermission(PF_FORCERELAY)) {
// Always relay transactions received from whitelisted peers, even
- // if they were already in the mempool or rejected from it due
- // to policy, allowing the node to function as a gateway for
+ // if they were already in the mempool,
+ // allowing the node to function as a gateway for
// nodes hidden behind it.
- //
- // Never relay transactions that might result in being
- // disconnected (or banned).
- if (state.IsInvalid() && TxRelayMayResultInDisconnect(state)) {
- LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s)\n", tx.GetHash().ToString(), pfrom->GetId(), FormatStateMessage(state));
+ if (!mempool.exists(tx.GetHash())) {
+ LogPrintf("Not relaying non-mempool transaction %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->GetId());
} else {
LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->GetId());
RelayTransaction(tx.GetHash(), *connman);
@@ -2654,7 +2638,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
{
LogPrint(BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(),
pfrom->GetId(),
- FormatStateMessage(state));
+ state.ToString());
MaybePunishNodeForTx(pfrom->GetId(), state);
}
return true;
@@ -2761,7 +2745,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
(fAlreadyInFlight && blockInFlightIt->second.first == pfrom->GetId())) {
std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr;
- if (!MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), pindex, &queuedBlockIt)) {
+ if (!MarkBlockAsInFlight(mempool, pfrom->GetId(), pindex->GetBlockHash(), pindex, &queuedBlockIt)) {
if (!(*queuedBlockIt)->partialBlock)
(*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&mempool));
else {
@@ -2834,7 +2818,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
} // cs_main
if (fProcessBLOCKTXN)
- return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, nTimeReceived, chainparams, connman, banman, interruptMsgProc);
+ return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, nTimeReceived, chainparams, mempool, connman, banman, interruptMsgProc);
if (fRevertToHeaderProcessing) {
// Headers received from HB compact block peers are permitted to be
@@ -2842,7 +2826,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// the peer if the header turns out to be for an invalid block.
// Note that if a peer tries to build on an invalid chain, that
// will be detected and the peer will be banned.
- return ProcessHeadersMessage(pfrom, connman, {cmpctblock.header}, chainparams, /*via_compact_block=*/true);
+ return ProcessHeadersMessage(pfrom, connman, mempool, {cmpctblock.header}, chainparams, /*via_compact_block=*/true);
}
if (fBlockReconstructed) {
@@ -2986,7 +2970,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
}
- return ProcessHeadersMessage(pfrom, connman, headers, chainparams, /*via_compact_block=*/false);
+ return ProcessHeadersMessage(pfrom, connman, mempool, headers, chainparams, /*via_compact_block=*/false);
}
if (strCommand == NetMsgType::BLOCK)
@@ -3304,12 +3288,12 @@ bool PeerLogicValidation::ProcessMessages(CNode* pfrom, std::atomic<bool>& inter
bool fMoreWork = false;
if (!pfrom->vRecvGetData.empty())
- ProcessGetData(pfrom, chainparams, connman, interruptMsgProc);
+ ProcessGetData(pfrom, chainparams, connman, m_mempool, interruptMsgProc);
if (!pfrom->orphan_work_set.empty()) {
std::list<CTransactionRef> removed_txn;
LOCK2(cs_main, g_cs_orphans);
- ProcessOrphanTx(connman, pfrom->orphan_work_set, removed_txn);
+ ProcessOrphanTx(connman, m_mempool, pfrom->orphan_work_set, removed_txn);
for (const CTransactionRef& removedTx : removed_txn) {
AddToCompactExtraTransactions(removedTx);
}
@@ -3372,7 +3356,7 @@ bool PeerLogicValidation::ProcessMessages(CNode* pfrom, std::atomic<bool>& inter
bool fRet = false;
try
{
- fRet = ProcessMessage(pfrom, strCommand, vRecv, msg.m_time, chainparams, connman, m_banman, interruptMsgProc);
+ fRet = ProcessMessage(pfrom, strCommand, vRecv, msg.m_time, chainparams, m_mempool, connman, m_banman, interruptMsgProc);
if (interruptMsgProc)
return false;
if (!pfrom->vRecvGetData.empty())
@@ -3838,7 +3822,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
// Respond to BIP35 mempool requests
if (fSendTrickle && pto->m_tx_relay->fSendMempool) {
- auto vtxinfo = mempool.infoAll();
+ auto vtxinfo = m_mempool.infoAll();
pto->m_tx_relay->fSendMempool = false;
CFeeRate filterrate;
{
@@ -3884,7 +3868,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
}
// Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
// A heap is used so that not all items need sorting if only a few are being sent.
- CompareInvMempoolOrder compareInvMempoolOrder(&mempool);
+ CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool);
std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
// No reason to drain out at many times the network's capacity,
// especially since we have many peers and some will draw much shorter delays.
@@ -3903,7 +3887,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
continue;
}
// Not in the mempool anymore? don't bother sending it.
- auto txinfo = mempool.info(hash);
+ auto txinfo = m_mempool.info(hash);
if (!txinfo.tx) {
continue;
}
@@ -4015,7 +3999,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
for (const CBlockIndex *pindex : vToDownload) {
uint32_t nFetchFlags = GetFetchFlags(pto);
vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
- MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), pindex);
+ MarkBlockAsInFlight(m_mempool, pto->GetId(), pindex->GetBlockHash(), pindex);
LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
pindex->nHeight, pto->GetId());
}
@@ -4058,7 +4042,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
// processing at a later time, see below)
tx_process_time.erase(tx_process_time.begin());
CInv inv(MSG_TX | GetFetchFlags(pto), txid);
- if (!AlreadyHave(inv)) {
+ if (!AlreadyHave(inv, m_mempool)) {
// If this transaction was last requested more than 1 minute ago,
// then request.
const auto last_request_time = GetTxRequestTime(inv.hash);
@@ -4096,7 +4080,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
// We don't want white listed peers to filter txs to us if we have -whitelistforcerelay
if (pto->m_tx_relay != nullptr && pto->nVersion >= FEEFILTER_VERSION && gArgs.GetBoolArg("-feefilter", DEFAULT_FEEFILTER) &&
!pto->HasPermission(PF_FORCERELAY)) {
- CAmount currentFilter = mempool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
+ CAmount currentFilter = m_mempool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
int64_t timeNow = GetTimeMicros();
if (timeNow > pto->m_tx_relay->nextSendTimeFeeFilter) {
static CFeeRate default_feerate(DEFAULT_MIN_RELAY_TX_FEE);
diff --git a/src/net_processing.h b/src/net_processing.h
index 6f26abc209..65e3963c41 100644
--- a/src/net_processing.h
+++ b/src/net_processing.h
@@ -6,10 +6,12 @@
#ifndef BITCOIN_NET_PROCESSING_H
#define BITCOIN_NET_PROCESSING_H
-#include <net.h>
-#include <validationinterface.h>
#include <consensus/params.h>
+#include <net.h>
#include <sync.h>
+#include <validationinterface.h>
+
+class CTxMemPool;
extern RecursiveMutex cs_main;
@@ -23,16 +25,17 @@ class PeerLogicValidation final : public CValidationInterface, public NetEventsI
private:
CConnman* const connman;
BanMan* const m_banman;
+ CTxMemPool& m_mempool;
bool CheckIfBanned(CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
public:
- PeerLogicValidation(CConnman* connman, BanMan* banman, CScheduler& scheduler);
+ PeerLogicValidation(CConnman* connman, BanMan* banman, CScheduler& scheduler, CTxMemPool& pool);
/**
* Overridden from CValidationInterface.
*/
- void BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected, const std::vector<CTransactionRef>& vtxConflicted) override;
+ void BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override;
void BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) override;
/**
* Overridden from CValidationInterface.
diff --git a/src/netaddress.cpp b/src/netaddress.cpp
index 1cac57a817..228caf74a9 100644
--- a/src/netaddress.cpp
+++ b/src/netaddress.cpp
@@ -210,6 +210,11 @@ bool CNetAddr::IsRFC7343() const
return (GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0x00 && (GetByte(12) & 0xF0) == 0x20);
}
+bool CNetAddr::IsHeNet() const
+{
+ return (GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0x04 && GetByte(12) == 0x70);
+}
+
/**
* @returns Whether or not this is a dummy address that maps an onion address
* into IPv6.
@@ -516,7 +521,7 @@ std::vector<unsigned char> CNetAddr::GetGroup(const std::vector<bool> &asmap) co
} else if (IsTor()) {
nStartByte = 6;
nBits = 4;
- } else if (GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0x04 && GetByte(12) == 0x70) {
+ } else if (IsHeNet()) {
// for he.net, use /36 groups
nBits = 36;
} else {
diff --git a/src/netaddress.h b/src/netaddress.h
index b300b709f3..b7381c1eb4 100644
--- a/src/netaddress.h
+++ b/src/netaddress.h
@@ -45,7 +45,6 @@ class CNetAddr
*/
void SetRaw(Network network, const uint8_t *data);
- public:
bool SetInternal(const std::string& name);
bool SetSpecial(const std::string &strName); // for Tor addresses
@@ -66,6 +65,7 @@ class CNetAddr
bool IsRFC4862() const; // IPv6 autoconfig (FE80::/64)
bool IsRFC6052() const; // IPv6 well-known prefix for IPv4-embedded address (64:FF9B::/96)
bool IsRFC6145() const; // IPv6 IPv4-translated address (::FFFF:0:0:0/96) (actually defined in RFC2765)
+ bool IsHeNet() const; // IPv6 Hurricane Electric - https://he.net (2001:0470::/36)
bool IsTor() const;
bool IsLocal() const;
bool IsRoutable() const;
diff --git a/src/node/coinstats.cpp b/src/node/coinstats.cpp
index a818f06d51..641b2a5d9c 100644
--- a/src/node/coinstats.cpp
+++ b/src/node/coinstats.cpp
@@ -23,7 +23,7 @@ static void ApplyStats(CCoinsStats &stats, CHashWriter& ss, const uint256& hash,
for (const auto& output : outputs) {
ss << VARINT(output.first + 1);
ss << output.second.out.scriptPubKey;
- ss << VARINT(output.second.out.nValue, VarIntMode::NONNEGATIVE_SIGNED);
+ ss << VARINT_MODE(output.second.out.nValue, VarIntMode::NONNEGATIVE_SIGNED);
stats.nTransactionOutputs++;
stats.nTotalAmount += output.second.out.nValue;
stats.nBogoSize += 32 /* txid */ + 4 /* vout index */ + 4 /* height + coinbase */ + 8 /* amount */ +
diff --git a/src/node/context.cpp b/src/node/context.cpp
index 26a01420c8..5b19a41bd4 100644
--- a/src/node/context.cpp
+++ b/src/node/context.cpp
@@ -8,6 +8,7 @@
#include <interfaces/chain.h>
#include <net.h>
#include <net_processing.h>
+#include <scheduler.h>
NodeContext::NodeContext() {}
NodeContext::~NodeContext() {}
diff --git a/src/node/context.h b/src/node/context.h
index dab5b5d048..1c592b456b 100644
--- a/src/node/context.h
+++ b/src/node/context.h
@@ -10,6 +10,7 @@
class BanMan;
class CConnman;
+class CScheduler;
class CTxMemPool;
class PeerLogicValidation;
namespace interfaces {
@@ -34,6 +35,7 @@ struct NodeContext {
std::unique_ptr<BanMan> banman;
std::unique_ptr<interfaces::Chain> chain;
std::vector<std::unique_ptr<interfaces::ChainClient>> chain_clients;
+ std::unique_ptr<CScheduler> scheduler;
//! Declare default constructor and destructor that are not inline, so code
//! instantiating the NodeContext struct doesn't need to #include class
diff --git a/src/node/psbt.cpp b/src/node/psbt.cpp
index 8678b33cf3..5b16035f7d 100644
--- a/src/node/psbt.cpp
+++ b/src/node/psbt.cpp
@@ -18,9 +18,7 @@ PSBTAnalysis AnalyzePSBT(PartiallySignedTransaction psbtx)
PSBTAnalysis result;
bool calc_fee = true;
- bool all_final = true;
- bool only_missing_sigs = true;
- bool only_missing_final = false;
+
CAmount in_amt = 0;
result.inputs.resize(psbtx.tx->vin.size());
@@ -29,6 +27,9 @@ PSBTAnalysis AnalyzePSBT(PartiallySignedTransaction psbtx)
PSBTInput& input = psbtx.inputs[i];
PSBTInputAnalysis& input_analysis = result.inputs[i];
+ // We set next role here and ratchet backwards as required
+ input_analysis.next = PSBTRole::EXTRACTOR;
+
// Check for a UTXO
CTxOut utxo;
if (psbtx.GetInputUTXO(utxo, i)) {
@@ -57,7 +58,6 @@ PSBTAnalysis AnalyzePSBT(PartiallySignedTransaction psbtx)
// Check if it is final
if (!utxo.IsNull() && !PSBTInputSigned(input)) {
input_analysis.is_final = false;
- all_final = false;
// Figure out what is missing
SignatureData outdata;
@@ -74,11 +74,9 @@ PSBTAnalysis AnalyzePSBT(PartiallySignedTransaction psbtx)
if (outdata.missing_pubkeys.empty() && outdata.missing_redeem_script.IsNull() && outdata.missing_witness_script.IsNull() && !outdata.missing_sigs.empty()) {
input_analysis.next = PSBTRole::SIGNER;
} else {
- only_missing_sigs = false;
input_analysis.next = PSBTRole::UPDATER;
}
} else {
- only_missing_final = true;
input_analysis.next = PSBTRole::FINALIZER;
}
} else if (!utxo.IsNull()){
@@ -86,10 +84,14 @@ PSBTAnalysis AnalyzePSBT(PartiallySignedTransaction psbtx)
}
}
- if (all_final) {
- only_missing_sigs = false;
- result.next = PSBTRole::EXTRACTOR;
+ // Calculate next role for PSBT by grabbing "minimum" PSBTInput next role
+ result.next = PSBTRole::EXTRACTOR;
+ for (unsigned int i = 0; i < psbtx.tx->vin.size(); ++i) {
+ PSBTInputAnalysis& input_analysis = result.inputs[i];
+ result.next = std::min(result.next, input_analysis.next);
}
+ assert(result.next > PSBTRole::CREATOR);
+
if (calc_fee) {
// Get the output amount
CAmount out_amt = std::accumulate(psbtx.tx->vout.begin(), psbtx.tx->vout.end(), CAmount(0),
@@ -139,17 +141,6 @@ PSBTAnalysis AnalyzePSBT(PartiallySignedTransaction psbtx)
result.estimated_feerate = feerate;
}
- if (only_missing_sigs) {
- result.next = PSBTRole::SIGNER;
- } else if (only_missing_final) {
- result.next = PSBTRole::FINALIZER;
- } else if (all_final) {
- result.next = PSBTRole::EXTRACTOR;
- } else {
- result.next = PSBTRole::UPDATER;
- }
- } else {
- result.next = PSBTRole::UPDATER;
}
return result;
diff --git a/src/node/transaction.cpp b/src/node/transaction.cpp
index 1bb9b88d00..201406ce3b 100644
--- a/src/node/transaction.cpp
+++ b/src/node/transaction.cpp
@@ -7,7 +7,6 @@
#include <net.h>
#include <net_processing.h>
#include <node/context.h>
-#include <util/validation.h>
#include <validation.h>
#include <validationinterface.h>
#include <node/transaction.h>
@@ -41,7 +40,7 @@ TransactionError BroadcastTransaction(NodeContext& node, const CTransactionRef t
TxValidationState state;
if (!AcceptToMemoryPool(*node.mempool, state, std::move(tx),
nullptr /* plTxnReplaced */, false /* bypass_limits */, max_tx_fee)) {
- err_string = FormatStateMessage(state);
+ err_string = state.ToString();
if (state.IsInvalid()) {
if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) {
return TransactionError::MISSING_INPUTS;
diff --git a/src/outputtype.cpp b/src/outputtype.cpp
index 567eecb5c9..71b5cba01c 100644
--- a/src/outputtype.cpp
+++ b/src/outputtype.cpp
@@ -82,30 +82,22 @@ CTxDestination AddAndGetDestinationForScript(FillableSigningProvider& keystore,
{
// Add script to keystore
keystore.AddCScript(script);
- ScriptHash sh(script);
// Note that scripts over 520 bytes are not yet supported.
switch (type) {
case OutputType::LEGACY:
- keystore.AddCScript(GetScriptForDestination(sh));
- return sh;
+ return ScriptHash(script);
case OutputType::P2SH_SEGWIT:
case OutputType::BECH32: {
CTxDestination witdest = WitnessV0ScriptHash(script);
CScript witprog = GetScriptForDestination(witdest);
// Check if the resulting program is solvable (i.e. doesn't use an uncompressed key)
- if (!IsSolvable(keystore, witprog)) {
- // Since the wsh is invalid, add and return the sh instead.
- keystore.AddCScript(GetScriptForDestination(sh));
- return sh;
- }
+ if (!IsSolvable(keystore, witprog)) return ScriptHash(script);
// Add the redeemscript, so that P2WSH and P2SH-P2WSH outputs are recognized as ours.
keystore.AddCScript(witprog);
if (type == OutputType::BECH32) {
return witdest;
} else {
- ScriptHash sh_w = ScriptHash(witprog);
- keystore.AddCScript(GetScriptForDestination(sh_w));
- return sh_w;
+ return ScriptHash(witprog);
}
}
default: assert(false);
diff --git a/src/prevector.h b/src/prevector.h
index f4ece738a8..6d690e7f96 100644
--- a/src/prevector.h
+++ b/src/prevector.h
@@ -15,7 +15,6 @@
#include <type_traits>
#include <utility>
-#pragma pack(push, 1)
/** Implements a drop-in replacement for std::vector<T> which stores up to N
* elements directly (without heap allocation). The types Size and Diff are
* used to store element counts, and can be any unsigned + signed type.
@@ -147,14 +146,20 @@ public:
};
private:
- size_type _size = 0;
+#pragma pack(push, 1)
union direct_or_indirect {
char direct[sizeof(T) * N];
struct {
- size_type capacity;
char* indirect;
+ size_type capacity;
};
- } _union = {};
+ };
+#pragma pack(pop)
+ alignas(char*) direct_or_indirect _union = {};
+ size_type _size = 0;
+
+ static_assert(alignof(char*) % alignof(size_type) == 0 && sizeof(char*) % alignof(size_type) == 0, "size_type cannot have more restrictive alignment requirement than pointer");
+ static_assert(alignof(char*) % alignof(T) == 0, "value_type T cannot have more restrictive alignment requirement than pointer");
T* direct_ptr(difference_type pos) { return reinterpret_cast<T*>(_union.direct) + pos; }
const T* direct_ptr(difference_type pos) const { return reinterpret_cast<const T*>(_union.direct) + pos; }
@@ -419,15 +424,20 @@ public:
return first;
}
- void push_back(const T& value) {
+ template<typename... Args>
+ void emplace_back(Args&&... args) {
size_type new_size = size() + 1;
if (capacity() < new_size) {
change_capacity(new_size + (new_size >> 1));
}
- new(item_ptr(size())) T(value);
+ new(item_ptr(size())) T(std::forward<Args>(args)...);
_size++;
}
+ void push_back(const T& value) {
+ emplace_back(value);
+ }
+
void pop_back() {
erase(end() - 1, end());
}
@@ -523,6 +533,5 @@ public:
return item_ptr(0);
}
};
-#pragma pack(pop)
#endif // BITCOIN_PREVECTOR_H
diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp
index 5fab267610..2918676c22 100644
--- a/src/qt/bitcoingui.cpp
+++ b/src/qt/bitcoingui.cpp
@@ -1258,7 +1258,7 @@ void BitcoinGUI::updateWalletStatus()
}
WalletModel * const walletModel = walletView->getWalletModel();
setEncryptionStatus(walletModel->getEncryptionStatus());
- setHDStatus(walletModel->privateKeysDisabled(), walletModel->wallet().hdEnabled());
+ setHDStatus(walletModel->wallet().privateKeysDisabled(), walletModel->wallet().hdEnabled());
}
#endif // ENABLE_WALLET
diff --git a/src/qt/bitcoinstrings.cpp b/src/qt/bitcoinstrings.cpp
index 3d40ee7823..64900a4343 100644
--- a/src/qt/bitcoinstrings.cpp
+++ b/src/qt/bitcoinstrings.cpp
@@ -51,9 +51,6 @@ QT_TRANSLATE_NOOP("bitcoin-core", ""
"Prune: last wallet synchronisation goes beyond pruned data. You need to -"
"reindex (download the whole blockchain again in case of pruned node)"),
QT_TRANSLATE_NOOP("bitcoin-core", ""
-"Rescans are not possible in pruned mode. You will need to use -reindex which "
-"will download the whole blockchain again."),
-QT_TRANSLATE_NOOP("bitcoin-core", ""
"The block database contains a block which appears to be from the future. "
"This may be due to your computer's date and time being set incorrectly. Only "
"rebuild the block database if you are sure that your computer's date and "
@@ -69,10 +66,6 @@ QT_TRANSLATE_NOOP("bitcoin-core", ""
QT_TRANSLATE_NOOP("bitcoin-core", ""
"This is the transaction fee you may pay when fee estimates are not available."),
QT_TRANSLATE_NOOP("bitcoin-core", ""
-"This product includes software developed by the OpenSSL Project for use in "
-"the OpenSSL Toolkit %s and cryptographic software written by Eric Young and "
-"UPnP software written by Thomas Bernard."),
-QT_TRANSLATE_NOOP("bitcoin-core", ""
"Total length of network version string (%i) exceeds maximum length (%i). "
"Reduce the number or size of uacomments."),
QT_TRANSLATE_NOOP("bitcoin-core", ""
@@ -107,6 +100,8 @@ QT_TRANSLATE_NOOP("bitcoin-core", "Change index out of range"),
QT_TRANSLATE_NOOP("bitcoin-core", "Config setting for %s only applied on %s network when in [%s] section."),
QT_TRANSLATE_NOOP("bitcoin-core", "Copyright (C) %i-%i"),
QT_TRANSLATE_NOOP("bitcoin-core", "Corrupted block database detected"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Could not find asmap file %s"),
+QT_TRANSLATE_NOOP("bitcoin-core", "Could not parse asmap file %s"),
QT_TRANSLATE_NOOP("bitcoin-core", "Do you want to rebuild the block database now?"),
QT_TRANSLATE_NOOP("bitcoin-core", "Done loading"),
QT_TRANSLATE_NOOP("bitcoin-core", "Error initializing block database"),
diff --git a/src/qt/clientmodel.cpp b/src/qt/clientmodel.cpp
index e8146982f9..a1ec3eaab1 100644
--- a/src/qt/clientmodel.cpp
+++ b/src/qt/clientmodel.cpp
@@ -242,8 +242,9 @@ static void BlockTipChanged(ClientModel *clientmodel, bool initialSync, int heig
clientmodel->cachedBestHeaderHeight = height;
clientmodel->cachedBestHeaderTime = blockTime;
}
- // if we are in-sync or if we notify a header update, update the UI regardless of last update time
- if (fHeader || !initialSync || now - nLastUpdateNotification > MODEL_UPDATE_DELAY) {
+
+ // During initial sync, block notifications, and header notifications from reindexing are both throttled.
+ if (!initialSync || (fHeader && !clientmodel->node().getReindex()) || now - nLastUpdateNotification > MODEL_UPDATE_DELAY) {
//pass an async signal to the UI thread
bool invoked = QMetaObject::invokeMethod(clientmodel, "numBlocksChanged", Qt::QueuedConnection,
Q_ARG(int, height),
diff --git a/src/qt/forms/debugwindow.ui b/src/qt/forms/debugwindow.ui
index ebb6bbd4f5..8b70800838 100644
--- a/src/qt/forms/debugwindow.ui
+++ b/src/qt/forms/debugwindow.ui
@@ -216,17 +216,17 @@
</widget>
</item>
<item row="7" column="0">
- <widget class="QLabel" name="labelNetwork">
- <property name="font">
- <font>
- <weight>75</weight>
- <bold>true</bold>
- </font>
- </property>
- <property name="text">
- <string>Network</string>
- </property>
- </widget>
+ <widget class="QLabel" name="labelNetwork">
+ <property name="font">
+ <font>
+ <weight>75</weight>
+ <bold>true</bold>
+ </font>
+ </property>
+ <property name="text">
+ <string>Network</string>
+ </property>
+ </widget>
</item>
<item row="8" column="0">
<widget class="QLabel" name="label_8">
@@ -503,12 +503,12 @@
<height>24</height>
</size>
</property>
- <property name="text">
- <string/>
- </property>
<property name="toolTip">
<string>Decrease font size</string>
</property>
+ <property name="text">
+ <string/>
+ </property>
<property name="icon">
<iconset resource="../bitcoin.qrc">
<normaloff>:/icons/fontsmaller</normaloff>:/icons/fontsmaller</iconset>
@@ -652,12 +652,12 @@
</item>
<item>
<widget class="QLineEdit" name="lineEdit">
- <property name="placeholderText">
- <string/>
- </property>
<property name="enabled">
<bool>false</bool>
</property>
+ <property name="placeholderText">
+ <string/>
+ </property>
</widget>
</item>
</layout>
@@ -1503,6 +1503,32 @@
</widget>
</item>
<item row="18" column="0">
+ <widget class="QLabel" name="peerMappedASLabel">
+ <property name="toolTip">
+ <string>The mapped Autonomous System used for diversifying peer selection.</string>
+ </property>
+ <property name="text">
+ <string>Mapped AS</string>
+ </property>
+ </widget>
+ </item>
+ <item row="18" column="1">
+ <widget class="QLabel" name="peerMappedAS">
+ <property name="cursor">
+ <cursorShape>IBeamCursor</cursorShape>
+ </property>
+ <property name="text">
+ <string>N/A</string>
+ </property>
+ <property name="textFormat">
+ <enum>Qt::PlainText</enum>
+ </property>
+ <property name="textInteractionFlags">
+ <set>Qt::LinksAccessibleByMouse|Qt::TextSelectableByKeyboard|Qt::TextSelectableByMouse</set>
+ </property>
+ </widget>
+ </item>
+ <item row="19" column="0">
<spacer name="verticalSpacer_3">
<property name="orientation">
<enum>Qt::Vertical</enum>
diff --git a/src/qt/guiutil.cpp b/src/qt/guiutil.cpp
index 911322092c..98dde1656a 100644
--- a/src/qt/guiutil.cpp
+++ b/src/qt/guiutil.cpp
@@ -773,9 +773,9 @@ QString formatServicesStr(quint64 mask)
return QObject::tr("None");
}
-QString formatPingTime(double dPingTime)
+QString formatPingTime(int64_t ping_usec)
{
- return (dPingTime == std::numeric_limits<int64_t>::max()/1e6 || dPingTime == 0) ? QObject::tr("N/A") : QString(QObject::tr("%1 ms")).arg(QString::number((int)(dPingTime * 1000), 10));
+ return (ping_usec == std::numeric_limits<int64_t>::max() || ping_usec == 0) ? QObject::tr("N/A") : QString(QObject::tr("%1 ms")).arg(QString::number((int)(ping_usec / 1000), 10));
}
QString formatTimeOffset(int64_t nTimeOffset)
diff --git a/src/qt/guiutil.h b/src/qt/guiutil.h
index 05e73cc5f0..e571262443 100644
--- a/src/qt/guiutil.h
+++ b/src/qt/guiutil.h
@@ -202,8 +202,8 @@ namespace GUIUtil
/* Format CNodeStats.nServices bitmask into a user-readable string */
QString formatServicesStr(quint64 mask);
- /* Format a CNodeCombinedStats.dPingTime into a user-readable string or display N/A, if 0*/
- QString formatPingTime(double dPingTime);
+ /* Format a CNodeStats.m_ping_usec into a user-readable string or display N/A, if 0*/
+ QString formatPingTime(int64_t ping_usec);
/* Format a CNodeCombinedStats.nTimeOffset into a user-readable string. */
QString formatTimeOffset(int64_t nTimeOffset);
diff --git a/src/qt/locale/bitcoin_en.ts b/src/qt/locale/bitcoin_en.ts
index d34fd9eb45..2302226360 100644
--- a/src/qt/locale/bitcoin_en.ts
+++ b/src/qt/locale/bitcoin_en.ts
@@ -312,7 +312,7 @@
<context>
<name>BanTableModel</name>
<message>
- <location filename="../bantablemodel.cpp" line="+88"/>
+ <location filename="../bantablemodel.cpp" line="+86"/>
<source>IP/Netmask</source>
<translation type="unfinished"></translation>
</message>
@@ -330,12 +330,12 @@
<translation>Sign &amp;message...</translation>
</message>
<message>
- <location line="+623"/>
+ <location line="+630"/>
<source>Synchronizing with network...</source>
<translation>Synchronizing with network...</translation>
</message>
<message>
- <location line="-701"/>
+ <location line="-708"/>
<source>&amp;Overview</source>
<translation>&amp;Overview</translation>
</message>
@@ -420,17 +420,17 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+1"/>
+ <location line="+2"/>
<source>Create a new wallet</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+190"/>
+ <location line="+191"/>
<source>Wallet:</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+334"/>
+ <location line="+339"/>
<source>Click to disable network activity.</source>
<translation type="unfinished"></translation>
</message>
@@ -460,7 +460,7 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="-1021"/>
+ <location line="-1028"/>
<source>Send coins to a Bitcoin address</source>
<translation>Send coins to a Bitcoin address</translation>
</message>
@@ -475,17 +475,7 @@
<translation>Change the passphrase used for wallet encryption</translation>
</message>
<message>
- <location line="+6"/>
- <source>&amp;Debug window</source>
- <translation>&amp;Debug window</translation>
- </message>
- <message>
- <location line="+1"/>
- <source>Open debugging and diagnostic console</source>
- <translation>Open debugging and diagnostic console</translation>
- </message>
- <message>
- <location line="-4"/>
+ <location line="+3"/>
<source>&amp;Verify message...</source>
<translation>&amp;Verify message...</translation>
</message>
@@ -525,7 +515,7 @@
<translation>Verify messages to ensure they were signed with specified Bitcoin addresses</translation>
</message>
<message>
- <location line="+110"/>
+ <location line="+111"/>
<source>&amp;File</source>
<translation>&amp;File</translation>
</message>
@@ -535,7 +525,7 @@
<translation>&amp;Settings</translation>
</message>
<message>
- <location line="+58"/>
+ <location line="+59"/>
<source>&amp;Help</source>
<translation>&amp;Help</translation>
</message>
@@ -545,7 +535,7 @@
<translation>Tabs toolbar</translation>
</message>
<message>
- <location line="-256"/>
+ <location line="-258"/>
<source>Request payments (generates QR codes and bitcoin: URIs)</source>
<translation type="unfinished"></translation>
</message>
@@ -560,17 +550,12 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+3"/>
- <source>Open a bitcoin: URI or payment request</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+13"/>
+ <location line="+17"/>
<source>&amp;Command-line options</source>
<translation type="unfinished"></translation>
</message>
<message numerus="yes">
- <location line="+522"/>
+ <location line="+528"/>
<source>%n active connection(s) to Bitcoin network</source>
<translation>
<numerusform>%n active connection to Bitcoin network</numerusform>
@@ -631,7 +616,17 @@
<translation>Up to date</translation>
</message>
<message>
- <location line="-642"/>
+ <location line="-655"/>
+ <source>Node window</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+1"/>
+ <source>Open node debugging and diagnostic console</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+5"/>
<source>&amp;Sending addresses</source>
<translation type="unfinished"></translation>
</message>
@@ -641,7 +636,12 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+6"/>
+ <location line="+4"/>
+ <source>Open a bitcoin: URI</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+2"/>
<source>Open Wallet</source>
<translation type="unfinished"></translation>
</message>
@@ -661,7 +661,7 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+7"/>
+ <location line="+8"/>
<source>Show the %1 help message to get a list with possible Bitcoin command-line options</source>
<translation type="unfinished"></translation>
</message>
@@ -696,7 +696,7 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+232"/>
+ <location line="+238"/>
<source>%1 client</source>
<translation type="unfinished"></translation>
</message>
@@ -792,7 +792,7 @@
<translation>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;locked&lt;/b&gt;</translation>
</message>
<message>
- <location filename="../bitcoin.cpp" line="+386"/>
+ <location filename="../bitcoin.cpp" line="+384"/>
<source>A fatal error occurred. Bitcoin can no longer continue safely and will quit.</source>
<translation type="unfinished"></translation>
</message>
@@ -885,7 +885,7 @@
<translation type="unfinished">Confirmed</translation>
</message>
<message>
- <location filename="../coincontroldialog.cpp" line="+54"/>
+ <location filename="../coincontroldialog.cpp" line="+53"/>
<source>Copy address</source>
<translation type="unfinished"></translation>
</message>
@@ -990,7 +990,7 @@
<context>
<name>CreateWalletActivity</name>
<message>
- <location filename="../walletcontroller.cpp" line="+201"/>
+ <location filename="../walletcontroller.cpp" line="+209"/>
<source>Creating Wallet &lt;b&gt;%1&lt;/b&gt;...</source>
<translation type="unfinished"></translation>
</message>
@@ -1124,7 +1124,7 @@
<context>
<name>FreespaceChecker</name>
<message>
- <location filename="../intro.cpp" line="+73"/>
+ <location filename="../intro.cpp" line="+71"/>
<source>A new data directory will be created.</source>
<translation>A new data directory will be created.</translation>
</message>
@@ -1152,18 +1152,12 @@
<context>
<name>HelpMessageDialog</name>
<message>
- <location filename="../utilitydialog.cpp" line="+39"/>
+ <location filename="../utilitydialog.cpp" line="+35"/>
<source>version</source>
<translation type="unfinished">version</translation>
</message>
<message>
- <location line="+5"/>
- <location line="+2"/>
- <source>(%1-bit)</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+5"/>
+ <location line="+4"/>
<source>About %1</source>
<translation type="unfinished"></translation>
</message>
@@ -1221,27 +1215,27 @@
<translation>Use a custom data directory:</translation>
</message>
<message>
- <location filename="../intro.cpp" line="+22"/>
+ <location filename="../intro.cpp" line="+32"/>
<source>Bitcoin</source>
<translation type="unfinished">Bitcoin</translation>
</message>
<message>
- <location line="+9"/>
+ <location line="+8"/>
<source>Discard blocks after verification, except most recent %1 GB (prune)</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+2"/>
+ <location line="+212"/>
<source>At least %1 GB of data will be stored in this directory, and it will grow over time.</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+5"/>
+ <location line="+3"/>
<source>Approximately %1 GB of data will be stored in this directory.</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+8"/>
+ <location line="+4"/>
<source>%1 will download and store a copy of the Bitcoin block chain.</source>
<translation type="unfinished"></translation>
</message>
@@ -1251,7 +1245,7 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+78"/>
+ <location line="-137"/>
<source>Error: Specified data directory &quot;%1&quot; cannot be created.</source>
<translation type="unfinished"></translation>
</message>
@@ -1261,7 +1255,7 @@
<translation>Error</translation>
</message>
<message numerus="yes">
- <location line="+9"/>
+ <location line="+21"/>
<source>%n GB of free space available</source>
<translation>
<numerusform>%n GB of free space available</numerusform>
@@ -1269,7 +1263,7 @@
</translation>
</message>
<message numerus="yes">
- <location line="+3"/>
+ <location line="+2"/>
<source>(of %n GB needed)</source>
<translation>
<numerusform>(of %n GB needed)</numerusform>
@@ -1277,7 +1271,7 @@
</translation>
</message>
<message numerus="yes">
- <location line="+4"/>
+ <location line="+3"/>
<source>(%n GB needed for full chain)</source>
<translation type="unfinished">
<numerusform></numerusform>
@@ -1310,7 +1304,7 @@
<message>
<location line="+7"/>
<location line="+26"/>
- <location filename="../modaloverlay.cpp" line="+141"/>
+ <location filename="../modaloverlay.cpp" line="+145"/>
<source>Unknown...</source>
<translation type="unfinished"></translation>
</message>
@@ -1346,36 +1340,31 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location filename="../modaloverlay.cpp" line="+6"/>
- <source>Unknown. Syncing Headers (%1, %2%)...</source>
- <translation type="unfinished"></translation>
- </message>
-</context>
-<context>
- <name>OpenURIDialog</name>
- <message>
- <location filename="../forms/openuridialog.ui" line="+14"/>
- <source>Open URI</source>
+ <location line="+3"/>
+ <source>Esc</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+6"/>
- <source>Open payment request from URI or file</source>
+ <location filename="../modaloverlay.cpp" line="-111"/>
+ <source>%1 is currently syncing. It will download headers and blocks from peers and validate them until reaching the tip of the block chain.</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+9"/>
- <source>URI:</source>
+ <location line="+117"/>
+ <source>Unknown. Syncing Headers (%1, %2%)...</source>
<translation type="unfinished"></translation>
</message>
+</context>
+<context>
+ <name>OpenURIDialog</name>
<message>
- <location line="+10"/>
- <source>Select payment request file</source>
+ <location filename="../forms/openuridialog.ui" line="+14"/>
+ <source>Open bitcoin URI</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location filename="../openuridialog.cpp" line="+45"/>
- <source>Select payment request file to open</source>
+ <location line="+8"/>
+ <source>URI:</source>
<translation type="unfinished"></translation>
</message>
</context>
@@ -1474,7 +1463,7 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+76"/>
+ <location line="+79"/>
<source>Open the %1 configuration file from the working directory.</source>
<translation type="unfinished"></translation>
</message>
@@ -1494,7 +1483,7 @@
<translation>&amp;Reset Options</translation>
</message>
<message>
- <location line="-529"/>
+ <location line="-532"/>
<source>&amp;Network</source>
<translation>&amp;Network</translation>
</message>
@@ -1682,7 +1671,7 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+41"/>
+ <location line="+44"/>
<source>Options set in this dialog are overridden by the command line or in the configuration file:</source>
<translation type="unfinished"></translation>
</message>
@@ -1702,7 +1691,7 @@
<translation>default</translation>
</message>
<message>
- <location line="+67"/>
+ <location line="+65"/>
<source>none</source>
<translation type="unfinished"></translation>
</message>
@@ -1850,65 +1839,48 @@
<context>
<name>PaymentServer</name>
<message>
- <location filename="../paymentserver.cpp" line="+226"/>
- <location line="+350"/>
- <location line="+42"/>
- <location line="+108"/>
- <location line="+14"/>
- <location line="+18"/>
+ <location filename="../paymentserver.cpp" line="+174"/>
<source>Payment request error</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="-531"/>
+ <location line="+1"/>
<source>Cannot start bitcoin: click-to-pay handler</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+62"/>
- <location line="+9"/>
- <location line="+16"/>
- <location line="+16"/>
- <location line="+7"/>
+ <location line="+50"/>
+ <location line="+13"/>
+ <location line="+6"/>
<location line="+7"/>
<source>URI handling</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="-55"/>
+ <location line="-26"/>
<source>&apos;bitcoin://&apos; is not a valid URI. Use &apos;bitcoin:&apos; instead.</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+10"/>
- <source>You are using a BIP70 URL which will be unsupported in the future.</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+16"/>
- <source>Payment request fetch URL is invalid: %1</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+16"/>
- <location line="+38"/>
- <source>Cannot process payment request because BIP70 support was not compiled in.</source>
+ <location line="+14"/>
+ <location line="+23"/>
+ <source>Cannot process payment request because BIP70 is not supported.</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="-37"/>
- <location line="+38"/>
+ <location line="-22"/>
+ <location line="+23"/>
<source>Due to widespread security flaws in BIP70 it&apos;s strongly recommended that any merchant instructions to switch wallets be ignored.</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="-37"/>
- <location line="+38"/>
+ <location line="-22"/>
+ <location line="+23"/>
<source>If you are receiving this error you should request the merchant provide a BIP21 compatible URI.</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="-34"/>
+ <location line="-20"/>
<source>Invalid payment address %1</source>
<translation type="unfinished"></translation>
</message>
@@ -1918,97 +1890,15 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+14"/>
<location line="+9"/>
<source>Payment request file handling</source>
<translation type="unfinished"></translation>
</message>
- <message>
- <location line="-8"/>
- <source>Payment request file cannot be read! This can be caused by an invalid payment request file.</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+201"/>
- <location line="+9"/>
- <location line="+31"/>
- <location line="+10"/>
- <location line="+17"/>
- <location line="+83"/>
- <source>Payment request rejected</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="-150"/>
- <source>Payment request network doesn&apos;t match client network.</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+9"/>
- <source>Payment request expired.</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+6"/>
- <source>Payment request is not initialized.</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+26"/>
- <source>Unverified payment requests to custom payment scripts are unsupported.</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+9"/>
- <location line="+17"/>
- <source>Invalid payment request.</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="-10"/>
- <source>Requested payment amount of %1 is too small (considered dust).</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+63"/>
- <source>Refund from %1</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+31"/>
- <source>Payment request %1 is too large (%2 bytes, allowed %3 bytes).</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+9"/>
- <source>Error communicating with %1: %2</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+20"/>
- <source>Payment request cannot be parsed!</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+13"/>
- <source>Bad response from server %1</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+22"/>
- <source>Network request error</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+6"/>
- <source>Payment acknowledged</source>
- <translation type="unfinished"></translation>
- </message>
</context>
<context>
<name>PeerTableModel</name>
<message>
- <location filename="../peertablemodel.cpp" line="+110"/>
+ <location filename="../peertablemodel.cpp" line="+107"/>
<source>User Agent</source>
<translation type="unfinished"></translation>
</message>
@@ -2046,12 +1936,12 @@
<translation type="unfinished">Amount</translation>
</message>
<message>
- <location filename="../guiutil.cpp" line="+108"/>
+ <location filename="../guiutil.cpp" line="+111"/>
<source>Enter a Bitcoin address (e.g. %1)</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+699"/>
+ <location line="+618"/>
<source>%1 d</source>
<translation type="unfinished"></translation>
</message>
@@ -2067,7 +1957,7 @@
</message>
<message>
<location line="+2"/>
- <location line="+47"/>
+ <location line="+48"/>
<source>%1 s</source>
<translation type="unfinished"></translation>
</message>
@@ -2161,7 +2051,7 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location filename="../bitcoin.cpp" line="+118"/>
+ <location filename="../bitcoin.cpp" line="+114"/>
<source>Error: Specified data directory &quot;%1&quot; does not exist.</source>
<translation type="unfinished"></translation>
</message>
@@ -2176,7 +2066,7 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+64"/>
+ <location line="+65"/>
<source>%1 didn&apos;t yet exit safely...</source>
<translation type="unfinished"></translation>
</message>
@@ -2209,7 +2099,7 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+38"/>
+ <location line="+39"/>
<source>QR code support not available.</source>
<translation type="unfinished"></translation>
</message>
@@ -2271,12 +2161,7 @@
<translation>&amp;Information</translation>
</message>
<message>
- <location line="-29"/>
- <source>Debug window</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+44"/>
+ <location line="+15"/>
<source>General</source>
<translation type="unfinished"></translation>
</message>
@@ -2390,7 +2275,7 @@
<message>
<location line="+65"/>
<location filename="../rpcconsole.cpp" line="+497"/>
- <location line="+759"/>
+ <location line="+755"/>
<source>Select a peer to view detailed information.</source>
<translation type="unfinished"></translation>
</message>
@@ -2431,7 +2316,12 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="-737"/>
+ <location line="-1146"/>
+ <source>Node window</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+409"/>
<source>Open the %1 debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation type="unfinished"></translation>
</message>
@@ -2521,7 +2411,7 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location filename="../rpcconsole.cpp" line="-411"/>
+ <location filename="../rpcconsole.cpp" line="-407"/>
<source>In:</source>
<translation type="unfinished"></translation>
</message>
@@ -2609,7 +2499,7 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+70"/>
+ <location line="+66"/>
<source>Executing command without any wallet</source>
<translation type="unfinished"></translation>
</message>
@@ -2680,13 +2570,11 @@
</message>
<message>
<location line="-3"/>
- <location line="+46"/>
<source>An optional message to attach to the payment request, which will be displayed when the request is opened. Note: The message will not be sent with the payment over the Bitcoin network.</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="-30"/>
- <location line="+14"/>
+ <location line="+30"/>
<source>An optional label to associate with the new receiving address.</source>
<translation type="unfinished"></translation>
</message>
@@ -2702,7 +2590,17 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="-76"/>
+ <location line="-121"/>
+ <source>An optional label to associate with the new receiving address (used by you to identify an invoice). It is also attached to the payment request.</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+30"/>
+ <source>An optional message that is attached to the payment request and may be displayed to the sender.</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+15"/>
<source>&amp;Create new receiving address</source>
<translation type="unfinished"></translation>
</message>
@@ -2752,7 +2650,7 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location filename="../receivecoinsdialog.cpp" line="+46"/>
+ <location filename="../receivecoinsdialog.cpp" line="+45"/>
<source>Copy URI</source>
<translation type="unfinished"></translation>
</message>
@@ -2795,7 +2693,7 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location filename="../receiverequestdialog.cpp" line="+63"/>
+ <location filename="../receiverequestdialog.cpp" line="+64"/>
<source>Request payment to %1</source>
<translation type="unfinished"></translation>
</message>
@@ -2877,7 +2775,7 @@
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
- <location filename="../sendcoinsdialog.cpp" line="+601"/>
+ <location filename="../sendcoinsdialog.cpp" line="+622"/>
<source>Send Coins</source>
<translation>Send Coins</translation>
</message>
@@ -2962,12 +2860,7 @@
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+26"/>
- <source>collapse fee-settings</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+51"/>
+ <location line="+77"/>
<source>Specify a custom fee per kB (1,000 bytes) of the transaction&apos;s virtual size.
Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satoshis per kB&quot; for a transaction size of 500 bytes (half of 1 kB) would ultimately yield a fee of only 50 satoshis.</source>
@@ -3019,7 +2912,12 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+543"/>
+ <location line="+457"/>
+ <source>Hide transaction fee settings</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+86"/>
<source>When there is less transaction volume than space in the blocks, miners as well as relaying nodes may enforce a minimum fee. Paying only this minimum fee is just fine, but be aware that this can result in a never confirming transaction once there is more demand for bitcoin transactions than the network can process.</source>
<translation type="unfinished"></translation>
</message>
@@ -3064,7 +2962,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation>S&amp;end</translation>
</message>
<message>
- <location filename="../sendcoinsdialog.cpp" line="-513"/>
+ <location filename="../sendcoinsdialog.cpp" line="-533"/>
<source>Copy quantity</source>
<translation type="unfinished"></translation>
</message>
@@ -3104,29 +3002,47 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+118"/>
+ <location line="+22"/>
+ <source>Cr&amp;eate Unsigned</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+1"/>
+ <source>Creates a Partially Signed Bitcoin Transaction (PSBT) for use with e.g. an offline %1 wallet, or a PSBT-compatible hardware wallet.</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+100"/>
<source> from wallet &apos;%1&apos;</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+14"/>
<location line="+11"/>
<source>%1 to &apos;%2&apos;</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="-6"/>
- <location line="+10"/>
+ <location line="+5"/>
<source>%1 to %2</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+7"/>
+ <location line="+8"/>
+ <source>Do you want to draft this transaction?</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+2"/>
<source>Are you sure you want to send?</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+42"/>
+ <location line="+5"/>
+ <source>Please, review your transaction proposal. This will produce a Partially Signed Bitcoin Transaction (PSBT) which you can copy and then sign with e.g. an offline %1 wallet, or a PSBT-compatible hardware wallet.</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+43"/>
<source>or</source>
<translation type="unfinished"></translation>
</message>
@@ -3136,12 +3052,12 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation type="unfinished"></translation>
</message>
<message>
- <location line="-21"/>
+ <location line="-22"/>
<source>Please, review your transaction.</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+7"/>
+ <location line="+8"/>
<source>Transaction fee</source>
<translation type="unfinished"></translation>
</message>
@@ -3161,12 +3077,37 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+6"/>
+ <location line="+5"/>
<source>Confirm send coins</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+190"/>
+ <location line="+0"/>
+ <source>Confirm transaction proposal</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+1"/>
+ <source>Copy PSBT to clipboard</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+0"/>
+ <source>Send</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+23"/>
+ <source>PSBT copied</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+166"/>
+ <source>Watch-only balance:</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+24"/>
<source>The recipient address is not valid. Please recheck.</source>
<translation type="unfinished"></translation>
</message>
@@ -3197,11 +3138,6 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
</message>
<message>
<location line="+4"/>
- <source>The transaction was rejected with the following reason: %1</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+4"/>
<source>A fee higher than %1 is considered an absurdly high fee.</source>
<translation type="unfinished"></translation>
</message>
@@ -3211,7 +3147,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation type="unfinished"></translation>
</message>
<message numerus="yes">
- <location line="+120"/>
+ <location line="+125"/>
<source>Estimated to begin confirmation within %n block(s).</source>
<translation>
<numerusform>Estimated to begin confirmation within %n block.</numerusform>
@@ -3248,13 +3184,13 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+155"/>
- <location line="+546"/>
+ <location line="+550"/>
<location line="+533"/>
<source>A&amp;mount:</source>
<translation>A&amp;mount:</translation>
</message>
<message>
- <location line="-1192"/>
+ <location line="-1199"/>
<source>Pay &amp;To:</source>
<translation>Pay &amp;To:</translation>
</message>
@@ -3269,12 +3205,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation type="unfinished"></translation>
</message>
<message>
- <location line="-46"/>
- <source>This is a normal payment.</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+39"/>
+ <location line="-7"/>
<source>The Bitcoin address to send the payment to</source>
<translation type="unfinished"></translation>
</message>
@@ -3295,13 +3226,18 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
</message>
<message>
<location line="+7"/>
- <location line="+555"/>
+ <location line="+562"/>
<location line="+533"/>
<source>Remove this entry</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="-1028"/>
+ <location line="-1035"/>
+ <source>The amount to send in the selected unit</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+7"/>
<source>The fee will be deducted from the amount being sent. The recipient will receive less bitcoins than you enter in the amount field. If multiple recipients are selected, the fee is split equally.</source>
<translation type="unfinished"></translation>
</message>
@@ -3331,12 +3267,13 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation type="unfinished"></translation>
</message>
<message>
- <location line="-1016"/>
+ <location line="-1023"/>
+ <location line="+3"/>
<source>Enter a label for this address to add it to the list of used addresses</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+54"/>
+ <location line="+58"/>
<source>A message that was attached to the bitcoin: URI which will be stored with the transaction for your reference. Note: This message will not be sent over the Bitcoin network.</source>
<translation type="unfinished"></translation>
</message>
@@ -3352,20 +3289,6 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<source>Memo:</source>
<translation type="unfinished"></translation>
</message>
- <message>
- <location filename="../sendcoinsentry.cpp" line="+39"/>
- <source>Enter a label for this address to add it to your address book</source>
- <translation type="unfinished"></translation>
- </message>
-</context>
-<context>
- <name>SendConfirmationDialog</name>
- <message>
- <location filename="../sendcoinsdialog.cpp" line="+88"/>
- <location line="+5"/>
- <source>Yes</source>
- <translation type="unfinished"></translation>
- </message>
</context>
<context>
<name>ShutdownWindow</name>
@@ -3404,18 +3327,18 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
</message>
<message>
<location line="+7"/>
- <location line="+210"/>
+ <location line="+216"/>
<source>Choose previously used address</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="-200"/>
- <location line="+210"/>
+ <location line="-206"/>
+ <location line="+216"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
- <location line="-200"/>
+ <location line="-206"/>
<source>Paste address from clipboard</source>
<translation>Paste address from clipboard</translation>
</message>
@@ -3426,6 +3349,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
</message>
<message>
<location line="+12"/>
+ <location line="+3"/>
<source>Enter the message you want to sign here</source>
<translation>Enter the message you want to sign here</translation>
</message>
@@ -3435,7 +3359,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation>Signature</translation>
</message>
<message>
- <location line="+27"/>
+ <location line="+30"/>
<source>Copy the current signature to the system clipboard</source>
<translation>Copy the current signature to the system clipboard</translation>
</message>
@@ -3456,12 +3380,12 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
</message>
<message>
<location line="+3"/>
- <location line="+143"/>
+ <location line="+157"/>
<source>Clear &amp;All</source>
<translation>Clear &amp;All</translation>
</message>
<message>
- <location line="-84"/>
+ <location line="-98"/>
<source>&amp;Verify Message</source>
<translation>&amp;Verify Message</translation>
</message>
@@ -3476,7 +3400,19 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+37"/>
+ <location line="+29"/>
+ <location line="+3"/>
+ <source>The signed message to verify</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+7"/>
+ <location line="+3"/>
+ <source>The signature given when the message was signed</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+9"/>
<source>Verify the message to ensure it was signed with the specified Bitcoin address</source>
<translation>Verify the message to ensure it was signed with the specified Bitcoin address</translation>
</message>
@@ -3491,63 +3427,68 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation>Reset all verify message fields</translation>
</message>
<message>
- <location filename="../signverifymessagedialog.cpp" line="+39"/>
+ <location line="-210"/>
<source>Click &quot;Sign Message&quot; to generate signature</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+81"/>
- <location line="+78"/>
+ <location filename="../signverifymessagedialog.cpp" line="+117"/>
+ <location line="+99"/>
<source>The entered address is invalid.</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="-78"/>
+ <location line="-99"/>
+ <location line="+7"/>
+ <location line="+93"/>
<location line="+7"/>
- <location line="+71"/>
- <location line="+6"/>
<source>Please check the address and try again.</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="-77"/>
- <location line="+77"/>
+ <location line="-100"/>
+ <location line="+99"/>
<source>The entered address does not refer to a key.</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="-69"/>
+ <location line="-91"/>
<source>Wallet unlock was cancelled.</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+8"/>
+ <location line="+11"/>
+ <source>No error</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+3"/>
<source>Private key for the entered address is not available.</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+12"/>
+ <location line="+3"/>
<source>Message signing failed.</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+5"/>
+ <location line="+12"/>
<source>Message signed.</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+55"/>
+ <location line="+69"/>
<source>The signature could not be decoded.</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+0"/>
- <location line="+13"/>
+ <location line="+1"/>
+ <location line="+7"/>
<source>Please check the signature and try again.</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+0"/>
+ <location line="-1"/>
<source>The signature did not match the message digest.</source>
<translation type="unfinished"></translation>
</message>
@@ -3557,7 +3498,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+5"/>
+ <location line="-32"/>
<source>Message verified.</source>
<translation type="unfinished"></translation>
</message>
@@ -3621,7 +3562,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+22"/>
+ <location line="+50"/>
<source>Status</source>
<translation type="unfinished"></translation>
</message>
@@ -3756,7 +3697,12 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+19"/>
+ <location line="+16"/>
+ <source> (Certificate was not verified)</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+3"/>
<source>Merchant</source>
<translation type="unfinished"></translation>
</message>
@@ -3907,7 +3853,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+15"/>
+ <location line="+16"/>
<source>(n/a)</source>
<translation type="unfinished"></translation>
</message>
@@ -4157,7 +4103,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<context>
<name>WalletController</name>
<message>
- <location filename="../walletcontroller.cpp" line="-205"/>
+ <location filename="../walletcontroller.cpp" line="-211"/>
<source>Close wallet</source>
<translation type="unfinished"></translation>
</message>
@@ -4175,7 +4121,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<context>
<name>WalletFrame</name>
<message>
- <location filename="../walletframe.cpp" line="+29"/>
+ <location filename="../walletframe.cpp" line="+28"/>
<source>No wallet has been loaded.</source>
<translation type="unfinished"></translation>
</message>
@@ -4183,28 +4129,34 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<context>
<name>WalletModel</name>
<message>
- <location filename="../walletmodel.cpp" line="+219"/>
+ <location filename="../walletmodel.cpp" line="+198"/>
<source>Send Coins</source>
<translation type="unfinished">Send Coins</translation>
</message>
<message>
- <location line="+309"/>
- <location line="+39"/>
+ <location line="+288"/>
+ <location line="+45"/>
+ <location line="+13"/>
<location line="+5"/>
<source>Fee bump error</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="-44"/>
+ <location line="-63"/>
<source>Increasing transaction fee failed</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+6"/>
+ <location line="+8"/>
<source>Do you want to increase the fee?</source>
<translation type="unfinished"></translation>
</message>
<message>
+ <location line="+0"/>
+ <source>Do you want to draft a transaction with fee increase?</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
<location line="+4"/>
<source>Current fee:</source>
<translation type="unfinished"></translation>
@@ -4225,7 +4177,17 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+17"/>
+ <location line="+21"/>
+ <source>Can&apos;t draft transaction.</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+7"/>
+ <source>PSBT copied</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+6"/>
<source>Can&apos;t sign transaction.</source>
<translation type="unfinished"></translation>
</message>
@@ -4243,7 +4205,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<context>
<name>WalletView</name>
<message>
- <location filename="../walletview.cpp" line="+47"/>
+ <location filename="../walletview.cpp" line="+46"/>
<source>&amp;Export</source>
<translation type="unfinished">&amp;Export</translation>
</message>
@@ -4253,7 +4215,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation type="unfinished">Export the data in the current tab to a file</translation>
</message>
<message>
- <location line="+206"/>
+ <location line="+182"/>
<source>Backup Wallet</source>
<translation type="unfinished"></translation>
</message>
@@ -4306,12 +4268,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+3"/>
- <source>Rescans are not possible in pruned mode. You will need to use -reindex which will download the whole blockchain again.</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+70"/>
+ <location line="+68"/>
<source>Error: A fatal internal error occurred, see debug.log for details</source>
<translation type="unfinished"></translation>
</message>
@@ -4326,7 +4283,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation type="unfinished"></translation>
</message>
<message>
- <location line="-167"/>
+ <location line="-162"/>
<source>The %s developers</source>
<translation type="unfinished"></translation>
</message>
@@ -4361,7 +4318,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+11"/>
+ <location line="+8"/>
<source>The block database contains a block which appears to be from the future. This may be due to your computer&apos;s date and time being set incorrectly. Only rebuild the block database if you are sure that your computer&apos;s date and time are correct</source>
<translation type="unfinished"></translation>
</message>
@@ -4376,7 +4333,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+12"/>
+ <location line="+8"/>
<source>Unable to replay blocks. You will need to rebuild the database using -reindex-chainstate.</source>
<translation type="unfinished"></translation>
</message>
@@ -4437,6 +4394,16 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
</message>
<message>
<location line="+1"/>
+ <source>Could not find asmap file %s</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+1"/>
+ <source>Could not parse asmap file %s</source>
+ <translation type="unfinished"></translation>
+ </message>
+ <message>
+ <location line="+1"/>
<source>Do you want to rebuild the block database now?</source>
<translation>Do you want to rebuild the block database now?</translation>
</message>
@@ -4631,7 +4598,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation type="unfinished"></translation>
</message>
<message>
- <location line="-156"/>
+ <location line="-151"/>
<source>Error: Listening for incoming connections failed (listen returned error %s)</source>
<translation type="unfinished"></translation>
</message>
@@ -4641,17 +4608,17 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+22"/>
+ <location line="+19"/>
<source>The transaction amount is too small to send after the fee has been deducted</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+35"/>
+ <location line="+31"/>
<source>You need to rebuild the database using -reindex to go back to unpruned mode. This will redownload the entire blockchain</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+25"/>
+ <location line="+27"/>
<source>Error reading from database, shutting down.</source>
<translation type="unfinished"></translation>
</message>
@@ -4787,22 +4754,17 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation type="unfinished"></translation>
</message>
<message>
- <location line="-179"/>
+ <location line="-174"/>
<source>-maxtxfee is set very high! Fees this large could be paid on a single transaction.</source>
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+56"/>
+ <location line="+53"/>
<source>This is the transaction fee you may pay when fee estimates are not available.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
- <source>This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit %s and cryptographic software written by Eric Young and UPnP software written by Thomas Bernard.</source>
- <translation type="unfinished"></translation>
- </message>
- <message>
- <location line="+4"/>
<source>Total length of network version string (%i) exceeds maximum length (%i). Reduce the number or size of uacomments.</source>
<translation type="unfinished"></translation>
</message>
@@ -4817,7 +4779,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+18"/>
+ <location line="+20"/>
<source>Error loading wallet %s. Duplicate -wallet filename specified.</source>
<translation type="unfinished"></translation>
</message>
@@ -4867,7 +4829,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation>Insufficient funds</translation>
</message>
<message>
- <location line="-107"/>
+ <location line="-102"/>
<source>Cannot upgrade a non HD split wallet without upgrading to support pre split keypool. Please use -upgradewallet=169900 or -upgradewallet with no version specified.</source>
<translation type="unfinished"></translation>
</message>
@@ -4877,7 +4839,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+48"/>
+ <location line="+41"/>
<source>Warning: Private keys detected in wallet {%s} with disabled private keys</source>
<translation type="unfinished"></translation>
</message>
@@ -4887,7 +4849,7 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation type="unfinished"></translation>
</message>
<message>
- <location line="+37"/>
+ <location line="+39"/>
<source>Loading block index...</source>
<translation>Loading block index...</translation>
</message>
@@ -4897,12 +4859,12 @@ Note: Since the fee is calculated on a per-byte basis, a fee of &quot;100 satos
<translation>Loading wallet...</translation>
</message>
<message>
- <location line="-40"/>
+ <location line="-42"/>
<source>Cannot downgrade wallet</source>
<translation>Cannot downgrade wallet</translation>
</message>
<message>
- <location line="+49"/>
+ <location line="+51"/>
<source>Rescanning...</source>
<translation>Rescanning...</translation>
</message>
diff --git a/src/qt/optionsmodel.cpp b/src/qt/optionsmodel.cpp
index 977076c4c2..58a7591c95 100644
--- a/src/qt/optionsmodel.cpp
+++ b/src/qt/optionsmodel.cpp
@@ -17,6 +17,7 @@
#include <net.h>
#include <netbase.h>
#include <txdb.h> // for -dbcache defaults
+#include <util/string.h>
#include <QDebug>
#include <QSettings>
@@ -241,7 +242,7 @@ void OptionsModel::SetPruneEnabled(bool prune, bool force)
QSettings settings;
settings.setValue("bPrune", prune);
const int64_t prune_target_mib = PruneGBtoMiB(settings.value("nPruneSize").toInt());
- std::string prune_val = prune ? std::to_string(prune_target_mib) : "0";
+ std::string prune_val = prune ? ToString(prune_target_mib) : "0";
if (force) {
m_node.forceSetArg("-prune", prune_val);
return;
diff --git a/src/qt/overviewpage.cpp b/src/qt/overviewpage.cpp
index 342c7cce31..c376921b72 100644
--- a/src/qt/overviewpage.cpp
+++ b/src/qt/overviewpage.cpp
@@ -161,7 +161,7 @@ void OverviewPage::setBalance(const interfaces::WalletBalances& balances)
{
int unit = walletModel->getOptionsModel()->getDisplayUnit();
m_balances = balances;
- if (walletModel->privateKeysDisabled()) {
+ if (walletModel->wallet().privateKeysDisabled()) {
ui->labelBalance->setText(BitcoinUnits::formatWithUnit(unit, balances.watch_only_balance, false, BitcoinUnits::separatorAlways));
ui->labelUnconfirmed->setText(BitcoinUnits::formatWithUnit(unit, balances.unconfirmed_watch_only_balance, false, BitcoinUnits::separatorAlways));
ui->labelImmature->setText(BitcoinUnits::formatWithUnit(unit, balances.immature_watch_only_balance, false, BitcoinUnits::separatorAlways));
@@ -184,7 +184,7 @@ void OverviewPage::setBalance(const interfaces::WalletBalances& balances)
// for symmetry reasons also show immature label when the watch-only one is shown
ui->labelImmature->setVisible(showImmature || showWatchOnlyImmature);
ui->labelImmatureText->setVisible(showImmature || showWatchOnlyImmature);
- ui->labelWatchImmature->setVisible(!walletModel->privateKeysDisabled() && showWatchOnlyImmature); // show watch-only immature balance
+ ui->labelWatchImmature->setVisible(!walletModel->wallet().privateKeysDisabled() && showWatchOnlyImmature); // show watch-only immature balance
}
// show/hide watch-only labels
@@ -236,9 +236,9 @@ void OverviewPage::setWalletModel(WalletModel *model)
connect(model->getOptionsModel(), &OptionsModel::displayUnitChanged, this, &OverviewPage::updateDisplayUnit);
- updateWatchOnlyLabels(wallet.haveWatchOnly() && !model->privateKeysDisabled());
+ updateWatchOnlyLabels(wallet.haveWatchOnly() && !model->wallet().privateKeysDisabled());
connect(model, &WalletModel::notifyWatchonlyChanged, [this](bool showWatchOnly) {
- updateWatchOnlyLabels(showWatchOnly && !walletModel->privateKeysDisabled());
+ updateWatchOnlyLabels(showWatchOnly && !walletModel->wallet().privateKeysDisabled());
});
}
diff --git a/src/qt/peertablemodel.cpp b/src/qt/peertablemodel.cpp
index a497f58b16..a1fc791536 100644
--- a/src/qt/peertablemodel.cpp
+++ b/src/qt/peertablemodel.cpp
@@ -32,7 +32,7 @@ bool NodeLessThan::operator()(const CNodeCombinedStats &left, const CNodeCombine
case PeerTableModel::Subversion:
return pLeft->cleanSubVer.compare(pRight->cleanSubVer) < 0;
case PeerTableModel::Ping:
- return pLeft->dMinPing < pRight->dMinPing;
+ return pLeft->m_min_ping_usec < pRight->m_min_ping_usec;
case PeerTableModel::Sent:
return pLeft->nSendBytes < pRight->nSendBytes;
case PeerTableModel::Received:
@@ -161,7 +161,7 @@ QVariant PeerTableModel::data(const QModelIndex &index, int role) const
case Subversion:
return QString::fromStdString(rec->nodeStats.cleanSubVer);
case Ping:
- return GUIUtil::formatPingTime(rec->nodeStats.dMinPing);
+ return GUIUtil::formatPingTime(rec->nodeStats.m_min_ping_usec);
case Sent:
return GUIUtil::formatBytes(rec->nodeStats.nSendBytes);
case Received:
diff --git a/src/qt/receivecoinsdialog.cpp b/src/qt/receivecoinsdialog.cpp
index 16597e4758..180550c5ae 100644
--- a/src/qt/receivecoinsdialog.cpp
+++ b/src/qt/receivecoinsdialog.cpp
@@ -99,11 +99,11 @@ void ReceiveCoinsDialog::setModel(WalletModel *_model)
}
// Set the button to be enabled or disabled based on whether the wallet can give out new addresses.
- ui->receiveButton->setEnabled(model->canGetAddresses());
+ ui->receiveButton->setEnabled(model->wallet().canGetAddresses());
// Enable/disable the receive button if the wallet is now able/unable to give out new addresses.
connect(model, &WalletModel::canGetAddressesChanged, [this] {
- ui->receiveButton->setEnabled(model->canGetAddresses());
+ ui->receiveButton->setEnabled(model->wallet().canGetAddresses());
});
}
}
diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp
index e1f783b0e5..0ffdc892c5 100644
--- a/src/qt/rpcconsole.cpp
+++ b/src/qt/rpcconsole.cpp
@@ -1109,15 +1109,16 @@ void RPCConsole::updateNodeDetail(const CNodeCombinedStats *stats)
ui->peerBytesSent->setText(GUIUtil::formatBytes(stats->nodeStats.nSendBytes));
ui->peerBytesRecv->setText(GUIUtil::formatBytes(stats->nodeStats.nRecvBytes));
ui->peerConnTime->setText(GUIUtil::formatDurationStr(GetSystemTimeInSeconds() - stats->nodeStats.nTimeConnected));
- ui->peerPingTime->setText(GUIUtil::formatPingTime(stats->nodeStats.dPingTime));
- ui->peerPingWait->setText(GUIUtil::formatPingTime(stats->nodeStats.dPingWait));
- ui->peerMinPing->setText(GUIUtil::formatPingTime(stats->nodeStats.dMinPing));
+ ui->peerPingTime->setText(GUIUtil::formatPingTime(stats->nodeStats.m_ping_usec));
+ ui->peerPingWait->setText(GUIUtil::formatPingTime(stats->nodeStats.m_ping_wait_usec));
+ ui->peerMinPing->setText(GUIUtil::formatPingTime(stats->nodeStats.m_min_ping_usec));
ui->timeoffset->setText(GUIUtil::formatTimeOffset(stats->nodeStats.nTimeOffset));
- ui->peerVersion->setText(QString("%1").arg(QString::number(stats->nodeStats.nVersion)));
+ ui->peerVersion->setText(QString::number(stats->nodeStats.nVersion));
ui->peerSubversion->setText(QString::fromStdString(stats->nodeStats.cleanSubVer));
ui->peerDirection->setText(stats->nodeStats.fInbound ? tr("Inbound") : tr("Outbound"));
- ui->peerHeight->setText(QString("%1").arg(QString::number(stats->nodeStats.nStartingHeight)));
+ ui->peerHeight->setText(QString::number(stats->nodeStats.nStartingHeight));
ui->peerWhitelisted->setText(stats->nodeStats.m_legacyWhitelisted ? tr("Yes") : tr("No"));
+ ui->peerMappedAS->setText(stats->nodeStats.m_mapped_as != 0 ? QString::number(stats->nodeStats.m_mapped_as) : tr("N/A"));
// This check fails for example if the lock was busy and
// nodeStateStats couldn't be fetched.
@@ -1191,7 +1192,7 @@ void RPCConsole::disconnectSelectedNode()
// Get currently selected peer address
NodeId id = nodes.at(i).data().toLongLong();
// Find the node, disconnect it and clear the selected node
- if(m_node.disconnect(id))
+ if(m_node.disconnectById(id))
clearSelectedNode();
}
}
@@ -1216,7 +1217,7 @@ void RPCConsole::banSelectedNode(int bantime)
const CNodeCombinedStats *stats = clientModel->getPeerTableModel()->getNodeStats(detailNodeRow);
if (stats) {
m_node.ban(stats->nodeStats.addr, BanReasonManuallyAdded, bantime);
- m_node.disconnect(stats->nodeStats.addr);
+ m_node.disconnectByAddress(stats->nodeStats.addr);
}
}
clearSelectedNode();
diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp
index cc01aafb23..a8c82aaf6c 100644
--- a/src/qt/sendcoinsdialog.cpp
+++ b/src/qt/sendcoinsdialog.cpp
@@ -26,7 +26,6 @@
#include <ui_interface.h>
#include <wallet/coincontrol.h>
#include <wallet/fees.h>
-#include <wallet/psbtwallet.h>
#include <wallet/wallet.h>
#include <QFontMetrics>
@@ -188,7 +187,7 @@ void SendCoinsDialog::setModel(WalletModel *_model)
// set default rbf checkbox state
ui->optInRBF->setCheckState(Qt::Checked);
- if (model->privateKeysDisabled()) {
+ if (model->wallet().privateKeysDisabled()) {
ui->sendButton->setText(tr("Cr&eate Unsigned"));
ui->sendButton->setToolTip(tr("Creates a Partially Signed Bitcoin Transaction (PSBT) for use with e.g. an offline %1 wallet, or a PSBT-compatible hardware wallet.").arg(PACKAGE_NAME));
}
@@ -313,14 +312,14 @@ void SendCoinsDialog::on_sendButton_clicked()
}
QString questionString;
- if (model->privateKeysDisabled()) {
+ if (model->wallet().privateKeysDisabled()) {
questionString.append(tr("Do you want to draft this transaction?"));
} else {
questionString.append(tr("Are you sure you want to send?"));
}
questionString.append("<br /><span style='font-size:10pt;'>");
- if (model->privateKeysDisabled()) {
+ if (model->wallet().privateKeysDisabled()) {
questionString.append(tr("Please, review your transaction proposal. This will produce a Partially Signed Bitcoin Transaction (PSBT) which you can copy and then sign with e.g. an offline %1 wallet, or a PSBT-compatible hardware wallet.").arg(PACKAGE_NAME));
} else {
questionString.append(tr("Please, review your transaction."));
@@ -375,8 +374,8 @@ void SendCoinsDialog::on_sendButton_clicked()
} else {
questionString = questionString.arg("<br /><br />" + formatted.at(0));
}
- const QString confirmation = model->privateKeysDisabled() ? tr("Confirm transaction proposal") : tr("Confirm send coins");
- const QString confirmButtonText = model->privateKeysDisabled() ? tr("Copy PSBT to clipboard") : tr("Send");
+ const QString confirmation = model->wallet().privateKeysDisabled() ? tr("Confirm transaction proposal") : tr("Confirm send coins");
+ const QString confirmButtonText = model->wallet().privateKeysDisabled() ? tr("Copy PSBT to clipboard") : tr("Send");
SendConfirmationDialog confirmationDialog(confirmation, questionString, informative_text, detailed_text, SEND_CONFIRM_DELAY, confirmButtonText, this);
confirmationDialog.exec();
QMessageBox::StandardButton retval = static_cast<QMessageBox::StandardButton>(confirmationDialog.result());
@@ -388,11 +387,11 @@ void SendCoinsDialog::on_sendButton_clicked()
}
bool send_failure = false;
- if (model->privateKeysDisabled()) {
+ if (model->wallet().privateKeysDisabled()) {
CMutableTransaction mtx = CMutableTransaction{*(currentTransaction.getWtx())};
PartiallySignedTransaction psbtx(mtx);
bool complete = false;
- const TransactionError err = model->wallet().fillPSBT(psbtx, complete, SIGHASH_ALL, false /* sign */, true /* bip32derivs */);
+ const TransactionError err = model->wallet().fillPSBT(SIGHASH_ALL, false /* sign */, true /* bip32derivs */, psbtx, complete);
assert(!complete);
assert(err == TransactionError::OK);
// Serialize the PSBT
@@ -563,7 +562,7 @@ void SendCoinsDialog::setBalance(const interfaces::WalletBalances& balances)
if(model && model->getOptionsModel())
{
CAmount balance = balances.balance;
- if (model->privateKeysDisabled()) {
+ if (model->wallet().privateKeysDisabled()) {
balance = balances.watch_only_balance;
ui->labelBalanceName->setText(tr("Watch-only balance:"));
}
@@ -653,7 +652,7 @@ void SendCoinsDialog::useAvailableBalance(SendCoinsEntry* entry)
}
// Include watch-only for wallets without private key
- coin_control.fAllowWatchOnly = model->privateKeysDisabled();
+ coin_control.fAllowWatchOnly = model->wallet().privateKeysDisabled();
// Calculate available amount to send.
CAmount amount = model->wallet().getAvailableBalance(coin_control);
@@ -708,7 +707,7 @@ void SendCoinsDialog::updateCoinControlState(CCoinControl& ctrl)
ctrl.m_confirm_target = getConfTargetForIndex(ui->confTargetSelector->currentIndex());
ctrl.m_signal_bip125_rbf = ui->optInRBF->isChecked();
// Include watch-only for wallets without private key
- ctrl.fAllowWatchOnly = model->privateKeysDisabled();
+ ctrl.fAllowWatchOnly = model->wallet().privateKeysDisabled();
}
void SendCoinsDialog::updateSmartFeeLabel()
diff --git a/src/qt/signverifymessagedialog.cpp b/src/qt/signverifymessagedialog.cpp
index 5f2836cc75..4552753bf6 100644
--- a/src/qt/signverifymessagedialog.cpp
+++ b/src/qt/signverifymessagedialog.cpp
@@ -11,7 +11,7 @@
#include <qt/walletmodel.h>
#include <key_io.h>
-#include <util/validation.h> // For strMessageMagic
+#include <util/message.h> // For MessageSign(), MessageVerify()
#include <wallet/wallet.h>
#include <vector>
@@ -133,30 +133,34 @@ void SignVerifyMessageDialog::on_signMessageButton_SM_clicked()
return;
}
- CKey key;
- if (!model->wallet().getPrivKey(GetScriptForDestination(destination), CKeyID(*pkhash), key))
- {
- ui->statusLabel_SM->setStyleSheet("QLabel { color: red; }");
- ui->statusLabel_SM->setText(tr("Private key for the entered address is not available."));
- return;
+ const std::string& message = ui->messageIn_SM->document()->toPlainText().toStdString();
+ std::string signature;
+ SigningResult res = model->wallet().signMessage(message, *pkhash, signature);
+
+ QString error;
+ switch (res) {
+ case SigningResult::OK:
+ error = tr("No error");
+ break;
+ case SigningResult::PRIVATE_KEY_NOT_AVAILABLE:
+ error = tr("Private key for the entered address is not available.");
+ break;
+ case SigningResult::SIGNING_FAILED:
+ error = tr("Message signing failed.");
+ break;
+ // no default case, so the compiler can warn about missing cases
}
- CHashWriter ss(SER_GETHASH, 0);
- ss << strMessageMagic;
- ss << ui->messageIn_SM->document()->toPlainText().toStdString();
-
- std::vector<unsigned char> vchSig;
- if (!key.SignCompact(ss.GetHash(), vchSig))
- {
+ if (res != SigningResult::OK) {
ui->statusLabel_SM->setStyleSheet("QLabel { color: red; }");
- ui->statusLabel_SM->setText(QString("<nobr>") + tr("Message signing failed.") + QString("</nobr>"));
+ ui->statusLabel_SM->setText(QString("<nobr>") + error + QString("</nobr>"));
return;
}
ui->statusLabel_SM->setStyleSheet("QLabel { color: green; }");
ui->statusLabel_SM->setText(QString("<nobr>") + tr("Message signed.") + QString("</nobr>"));
- ui->signatureOut_SM->setText(QString::fromStdString(EncodeBase64(vchSig.data(), vchSig.size())));
+ ui->signatureOut_SM->setText(QString::fromStdString(signature));
}
void SignVerifyMessageDialog::on_copySignatureButton_SM_clicked()
@@ -189,51 +193,57 @@ void SignVerifyMessageDialog::on_addressBookButton_VM_clicked()
void SignVerifyMessageDialog::on_verifyMessageButton_VM_clicked()
{
- CTxDestination destination = DecodeDestination(ui->addressIn_VM->text().toStdString());
- if (!IsValidDestination(destination)) {
+ const std::string& address = ui->addressIn_VM->text().toStdString();
+ const std::string& signature = ui->signatureIn_VM->text().toStdString();
+ const std::string& message = ui->messageIn_VM->document()->toPlainText().toStdString();
+
+ const auto result = MessageVerify(address, signature, message);
+
+ if (result == MessageVerificationResult::OK) {
+ ui->statusLabel_VM->setStyleSheet("QLabel { color: green; }");
+ } else {
ui->statusLabel_VM->setStyleSheet("QLabel { color: red; }");
- ui->statusLabel_VM->setText(tr("The entered address is invalid.") + QString(" ") + tr("Please check the address and try again."));
- return;
}
- if (!boost::get<PKHash>(&destination)) {
+
+ switch (result) {
+ case MessageVerificationResult::OK:
+ ui->statusLabel_VM->setText(
+ QString("<nobr>") + tr("Message verified.") + QString("</nobr>")
+ );
+ return;
+ case MessageVerificationResult::ERR_INVALID_ADDRESS:
+ ui->statusLabel_VM->setText(
+ tr("The entered address is invalid.") + QString(" ") +
+ tr("Please check the address and try again.")
+ );
+ return;
+ case MessageVerificationResult::ERR_ADDRESS_NO_KEY:
ui->addressIn_VM->setValid(false);
- ui->statusLabel_VM->setStyleSheet("QLabel { color: red; }");
- ui->statusLabel_VM->setText(tr("The entered address does not refer to a key.") + QString(" ") + tr("Please check the address and try again."));
+ ui->statusLabel_VM->setText(
+ tr("The entered address does not refer to a key.") + QString(" ") +
+ tr("Please check the address and try again.")
+ );
return;
- }
-
- bool fInvalid = false;
- std::vector<unsigned char> vchSig = DecodeBase64(ui->signatureIn_VM->text().toStdString().c_str(), &fInvalid);
-
- if (fInvalid)
- {
+ case MessageVerificationResult::ERR_MALFORMED_SIGNATURE:
ui->signatureIn_VM->setValid(false);
- ui->statusLabel_VM->setStyleSheet("QLabel { color: red; }");
- ui->statusLabel_VM->setText(tr("The signature could not be decoded.") + QString(" ") + tr("Please check the signature and try again."));
+ ui->statusLabel_VM->setText(
+ tr("The signature could not be decoded.") + QString(" ") +
+ tr("Please check the signature and try again.")
+ );
return;
- }
-
- CHashWriter ss(SER_GETHASH, 0);
- ss << strMessageMagic;
- ss << ui->messageIn_VM->document()->toPlainText().toStdString();
-
- CPubKey pubkey;
- if (!pubkey.RecoverCompact(ss.GetHash(), vchSig))
- {
+ case MessageVerificationResult::ERR_PUBKEY_NOT_RECOVERED:
ui->signatureIn_VM->setValid(false);
- ui->statusLabel_VM->setStyleSheet("QLabel { color: red; }");
- ui->statusLabel_VM->setText(tr("The signature did not match the message digest.") + QString(" ") + tr("Please check the signature and try again."));
+ ui->statusLabel_VM->setText(
+ tr("The signature did not match the message digest.") + QString(" ") +
+ tr("Please check the signature and try again.")
+ );
return;
- }
-
- if (!(CTxDestination(PKHash(pubkey)) == destination)) {
- ui->statusLabel_VM->setStyleSheet("QLabel { color: red; }");
- ui->statusLabel_VM->setText(QString("<nobr>") + tr("Message verification failed.") + QString("</nobr>"));
+ case MessageVerificationResult::ERR_NOT_SIGNED:
+ ui->statusLabel_VM->setText(
+ QString("<nobr>") + tr("Message verification failed.") + QString("</nobr>")
+ );
return;
}
-
- ui->statusLabel_VM->setStyleSheet("QLabel { color: green; }");
- ui->statusLabel_VM->setText(QString("<nobr>") + tr("Message verified.") + QString("</nobr>"));
}
void SignVerifyMessageDialog::on_clearButton_VM_clicked()
diff --git a/src/qt/splashscreen.cpp b/src/qt/splashscreen.cpp
index e19833019d..e4ffa6cd9a 100644
--- a/src/qt/splashscreen.cpp
+++ b/src/qt/splashscreen.cpp
@@ -137,7 +137,7 @@ SplashScreen::~SplashScreen()
bool SplashScreen::eventFilter(QObject * obj, QEvent * ev) {
if (ev->type() == QEvent::KeyPress) {
QKeyEvent *keyEvent = static_cast<QKeyEvent *>(ev);
- if(keyEvent->text()[0] == 'q') {
+ if (keyEvent->key() == Qt::Key_Q) {
m_node.startShutdown();
}
}
diff --git a/src/qt/walletcontroller.cpp b/src/qt/walletcontroller.cpp
index 7413a1f09e..233c0ab6be 100644
--- a/src/qt/walletcontroller.cpp
+++ b/src/qt/walletcontroller.cpp
@@ -218,8 +218,8 @@ void CreateWalletActivity::createWallet()
}
QTimer::singleShot(500, worker(), [this, name, flags] {
- std::unique_ptr<interfaces::Wallet> wallet;
- WalletCreationStatus status = node().createWallet(m_passphrase, flags, name, m_error_message, m_warning_message, wallet);
+ WalletCreationStatus status;
+ std::unique_ptr<interfaces::Wallet> wallet = node().createWallet(m_passphrase, flags, name, m_error_message, m_warning_message, status);
if (status == WalletCreationStatus::SUCCESS) m_wallet_model = m_wallet_controller->getOrCreateWallet(std::move(wallet));
diff --git a/src/qt/walletframe.cpp b/src/qt/walletframe.cpp
index 656fffd067..dac3326cc4 100644
--- a/src/qt/walletframe.cpp
+++ b/src/qt/walletframe.cpp
@@ -37,6 +37,10 @@ WalletFrame::~WalletFrame()
void WalletFrame::setClientModel(ClientModel *_clientModel)
{
this->clientModel = _clientModel;
+
+ for (auto i = mapWalletViews.constBegin(); i != mapWalletViews.constEnd(); ++i) {
+ i.value()->setClientModel(_clientModel);
+ }
}
bool WalletFrame::addWallet(WalletModel *walletModel)
diff --git a/src/qt/walletmodel.cpp b/src/qt/walletmodel.cpp
index 6c3a06f3a2..41876f7119 100644
--- a/src/qt/walletmodel.cpp
+++ b/src/qt/walletmodel.cpp
@@ -23,7 +23,7 @@
#include <ui_interface.h>
#include <util/system.h> // for GetBoolArg
#include <wallet/coincontrol.h>
-#include <wallet/wallet.h>
+#include <wallet/wallet.h> // for CRecipient
#include <stdint.h>
@@ -82,12 +82,12 @@ void WalletModel::pollBalanceChanged()
return;
}
- if(fForceCheckBalanceChanged || m_node.getNumBlocks() != cachedNumBlocks)
+ if(fForceCheckBalanceChanged || numBlocks != cachedNumBlocks)
{
fForceCheckBalanceChanged = false;
// Balance and number of transactions might have changed
- cachedNumBlocks = m_node.getNumBlocks();
+ cachedNumBlocks = numBlocks;
checkBalanceChanged(new_balances);
if(transactionTableModel)
@@ -184,7 +184,7 @@ WalletModel::SendCoinsReturn WalletModel::prepareTransaction(WalletModelTransact
std::string strFailReason;
auto& newTx = transaction.getWtx();
- newTx = m_wallet->createTransaction(vecSend, coinControl, !privateKeysDisabled() /* sign */, nChangePosRet, nFeeRequired, strFailReason);
+ newTx = m_wallet->createTransaction(vecSend, coinControl, !wallet().privateKeysDisabled() /* sign */, nChangePosRet, nFeeRequired, strFailReason);
transaction.setTransactionFee(nFeeRequired);
if (fSubtractFeeFromAmount && newTx)
transaction.reassignAmounts(nChangePosRet);
@@ -482,13 +482,13 @@ bool WalletModel::bumpFee(uint256 hash, uint256& new_hash)
CAmount old_fee;
CAmount new_fee;
CMutableTransaction mtx;
- if (!m_wallet->createBumpTransaction(hash, coin_control, 0 /* totalFee */, errors, old_fee, new_fee, mtx)) {
+ if (!m_wallet->createBumpTransaction(hash, coin_control, errors, old_fee, new_fee, mtx)) {
QMessageBox::critical(nullptr, tr("Fee bump error"), tr("Increasing transaction fee failed") + "<br />(" +
(errors.size() ? QString::fromStdString(errors[0]) : "") +")");
return false;
}
- const bool create_psbt = privateKeysDisabled();
+ const bool create_psbt = m_wallet->privateKeysDisabled();
// allow a user based fee verification
QString questionString = create_psbt ? tr("Do you want to draft a transaction with fee increase?") : tr("Do you want to increase the fee?");
@@ -526,7 +526,7 @@ bool WalletModel::bumpFee(uint256 hash, uint256& new_hash)
if (create_psbt) {
PartiallySignedTransaction psbtx(mtx);
bool complete = false;
- const TransactionError err = wallet().fillPSBT(psbtx, complete, SIGHASH_ALL, false /* sign */, true /* bip32derivs */);
+ const TransactionError err = wallet().fillPSBT(SIGHASH_ALL, false /* sign */, true /* bip32derivs */, psbtx, complete);
if (err != TransactionError::OK || complete) {
QMessageBox::critical(nullptr, tr("Fee bump error"), tr("Can't draft transaction."));
return false;
@@ -558,16 +558,6 @@ bool WalletModel::isWalletEnabled()
return !gArgs.GetBoolArg("-disablewallet", DEFAULT_DISABLE_WALLET);
}
-bool WalletModel::privateKeysDisabled() const
-{
- return m_wallet->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS);
-}
-
-bool WalletModel::canGetAddresses() const
-{
- return m_wallet->canGetAddresses();
-}
-
QString WalletModel::getWalletName() const
{
return QString::fromStdString(m_wallet->getWalletName());
diff --git a/src/qt/walletmodel.h b/src/qt/walletmodel.h
index 8087356f5e..7936014af9 100644
--- a/src/qt/walletmodel.h
+++ b/src/qt/walletmodel.h
@@ -140,8 +140,6 @@ public:
bool bumpFee(uint256 hash, uint256& new_hash);
static bool isWalletEnabled();
- bool privateKeysDisabled() const;
- bool canGetAddresses() const;
interfaces::Node& node() const { return m_node; }
interfaces::Wallet& wallet() const { return *m_wallet; }
diff --git a/src/qt/walletview.h b/src/qt/walletview.h
index 86e46348be..78d870f59f 100644
--- a/src/qt/walletview.h
+++ b/src/qt/walletview.h
@@ -66,7 +66,7 @@ private:
TransactionView *transactionView;
- QProgressDialog *progressDialog;
+ QProgressDialog* progressDialog{nullptr};
const PlatformStyle *platformStyle;
public Q_SLOTS:
diff --git a/src/random.cpp b/src/random.cpp
index f0082cf3e0..2a27e6ba0d 100644
--- a/src/random.cpp
+++ b/src/random.cpp
@@ -15,7 +15,7 @@
#endif
#include <logging.h> // for LogPrintf()
#include <sync.h> // for Mutex
-#include <util/time.h> // for GetTime()
+#include <util/time.h> // for GetTimeMicros()
#include <stdlib.h>
#include <thread>
@@ -315,19 +315,16 @@ void GetOSRand(unsigned char *ent32)
RandFailure();
}
#elif defined(HAVE_GETENTROPY_RAND) && defined(MAC_OSX)
- // We need a fallback for OSX < 10.12
- if (&getentropy != nullptr) {
- if (getentropy(ent32, NUM_OS_RANDOM_BYTES) != 0) {
- RandFailure();
- }
- } else {
- GetDevURandom(ent32);
+ /* getentropy() is available on macOS 10.12 and later.
+ */
+ if (getentropy(ent32, NUM_OS_RANDOM_BYTES) != 0) {
+ RandFailure();
}
#elif defined(HAVE_SYSCTL_ARND)
- /* FreeBSD and similar. It is possible for the call to return less
+ /* FreeBSD, NetBSD and similar. It is possible for the call to return less
* bytes than requested, so need to read in a loop.
*/
- static const int name[2] = {CTL_KERN, KERN_ARND};
+ static int name[2] = {CTL_KERN, KERN_ARND};
int have = 0;
do {
size_t len = NUM_OS_RANDOM_BYTES - have;
diff --git a/src/randomenv.cpp b/src/randomenv.cpp
index 6992c720ff..8b3d478529 100644
--- a/src/randomenv.cpp
+++ b/src/randomenv.cpp
@@ -38,11 +38,6 @@
#include <sys/utsname.h>
#include <unistd.h>
#endif
-#ifdef __MACH__
-#include <mach/clock.h>
-#include <mach/mach.h>
-#include <mach/mach_time.h>
-#endif
#if HAVE_DECL_GETIFADDRS
#include <ifaddrs.h>
#endif
@@ -237,8 +232,6 @@ void RandAddDynamicEnv(CSHA512& hasher)
GetSystemTimeAsFileTime(&ftime);
hasher << ftime;
#else
-# ifndef __MACH__
- // On non-MacOS systems, use various clock_gettime() calls.
struct timespec ts = {};
# ifdef CLOCK_MONOTONIC
clock_gettime(CLOCK_MONOTONIC, &ts);
@@ -252,18 +245,6 @@ void RandAddDynamicEnv(CSHA512& hasher)
clock_gettime(CLOCK_BOOTTIME, &ts);
hasher << ts;
# endif
-# else
- // On MacOS use mach_absolute_time (number of CPU ticks since boot) as a replacement for CLOCK_MONOTONIC,
- // and clock_get_time for CALENDAR_CLOCK as a replacement for CLOCK_REALTIME.
- hasher << mach_absolute_time();
- // From https://gist.github.com/jbenet/1087739
- clock_serv_t cclock;
- mach_timespec_t mts = {};
- if (host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock) == KERN_SUCCESS && clock_get_time(cclock, &mts) == KERN_SUCCESS) {
- hasher << mts;
- mach_port_deallocate(mach_task_self(), cclock);
- }
-# endif
// gettimeofday is available on all UNIX systems, but only has microsecond precision.
struct timeval tv = {};
gettimeofday(&tv, nullptr);
diff --git a/src/reverselock.h b/src/reverselock.h
deleted file mode 100644
index 9d9cc9fd77..0000000000
--- a/src/reverselock.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright (c) 2015-2016 The Bitcoin Core developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#ifndef BITCOIN_REVERSELOCK_H
-#define BITCOIN_REVERSELOCK_H
-
-/**
- * An RAII-style reverse lock. Unlocks on construction and locks on destruction.
- */
-template<typename Lock>
-class reverse_lock
-{
-public:
-
- explicit reverse_lock(Lock& _lock) : lock(_lock) {
- _lock.unlock();
- _lock.swap(templock);
- }
-
- ~reverse_lock() {
- templock.lock();
- templock.swap(lock);
- }
-
-private:
- reverse_lock(reverse_lock const&);
- reverse_lock& operator=(reverse_lock const&);
-
- Lock& lock;
- Lock templock;
-};
-
-#endif // BITCOIN_REVERSELOCK_H
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index eb5148eebd..c132f265d2 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -31,7 +31,6 @@
#include <undo.h>
#include <util/strencodings.h>
#include <util/system.h>
-#include <util/validation.h>
#include <validation.h>
#include <validationinterface.h>
#include <warnings.h>
@@ -178,8 +177,7 @@ static UniValue getblockcount(const JSONRPCRequest& request)
"The genesis block has height 0.\n",
{},
RPCResult{
- "n (numeric) The current block count\n"
- },
+ RPCResult::Type::NUM, "", "The current block count"},
RPCExamples{
HelpExampleCli("getblockcount", "")
+ HelpExampleRpc("getblockcount", "")
@@ -196,8 +194,7 @@ static UniValue getbestblockhash(const JSONRPCRequest& request)
"\nReturns the hash of the best (tip) block in the most-work fully-validated chain.\n",
{},
RPCResult{
- "\"hex\" (string) the block hash, hex-encoded\n"
- },
+ RPCResult::Type::STR_HEX, "", "the block hash, hex-encoded"},
RPCExamples{
HelpExampleCli("getbestblockhash", "")
+ HelpExampleRpc("getbestblockhash", "")
@@ -227,11 +224,11 @@ static UniValue waitfornewblock(const JSONRPCRequest& request)
{"timeout", RPCArg::Type::NUM, /* default */ "0", "Time in milliseconds to wait for a response. 0 indicates no timeout."},
},
RPCResult{
- "{ (json object)\n"
- " \"hash\" : { (string) The blockhash\n"
- " \"height\" : { (int) Block height\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "hash", "The blockhash"},
+ {RPCResult::Type::NUM, "height", "Block height"},
+ }},
RPCExamples{
HelpExampleCli("waitfornewblock", "1000")
+ HelpExampleRpc("waitfornewblock", "1000")
@@ -267,13 +264,13 @@ static UniValue waitforblock(const JSONRPCRequest& request)
{"timeout", RPCArg::Type::NUM, /* default */ "0", "Time in milliseconds to wait for a response. 0 indicates no timeout."},
},
RPCResult{
- "{ (json object)\n"
- " \"hash\" : { (string) The blockhash\n"
- " \"height\" : { (int) Block height\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "hash", "The blockhash"},
+ {RPCResult::Type::NUM, "height", "Block height"},
+ }},
RPCExamples{
- HelpExampleCli("waitforblock", "\"0000000000079f8ef3d2c688c244eb7a4570b24c9ed7b4a8c619eb02596f8862\", 1000")
+ HelpExampleCli("waitforblock", "\"0000000000079f8ef3d2c688c244eb7a4570b24c9ed7b4a8c619eb02596f8862\" 1000")
+ HelpExampleRpc("waitforblock", "\"0000000000079f8ef3d2c688c244eb7a4570b24c9ed7b4a8c619eb02596f8862\", 1000")
},
}.Check(request);
@@ -311,14 +308,14 @@ static UniValue waitforblockheight(const JSONRPCRequest& request)
{"timeout", RPCArg::Type::NUM, /* default */ "0", "Time in milliseconds to wait for a response. 0 indicates no timeout."},
},
RPCResult{
- "{ (json object)\n"
- " \"hash\" : { (string) The blockhash\n"
- " \"height\" : { (int) Block height\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "hash", "The blockhash"},
+ {RPCResult::Type::NUM, "height", "Block height"},
+ }},
RPCExamples{
- HelpExampleCli("waitforblockheight", "\"100\", 1000")
- + HelpExampleRpc("waitforblockheight", "\"100\", 1000")
+ HelpExampleCli("waitforblockheight", "100 1000")
+ + HelpExampleRpc("waitforblockheight", "100, 1000")
},
}.Check(request);
int timeout = 0;
@@ -348,7 +345,7 @@ static UniValue syncwithvalidationinterfacequeue(const JSONRPCRequest& request)
RPCHelpMan{"syncwithvalidationinterfacequeue",
"\nWaits for the validation interface queue to catch up on everything that was there when we entered this function.\n",
{},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
HelpExampleCli("syncwithvalidationinterfacequeue","")
+ HelpExampleRpc("syncwithvalidationinterfacequeue","")
@@ -365,8 +362,7 @@ static UniValue getdifficulty(const JSONRPCRequest& request)
"\nReturns the proof-of-work difficulty as a multiple of the minimum difficulty.\n",
{},
RPCResult{
- "n.nnn (numeric) the proof-of-work difficulty as a multiple of the minimum difficulty.\n"
- },
+ RPCResult::Type::NUM, "", "the proof-of-work difficulty as a multiple of the minimum difficulty."},
RPCExamples{
HelpExampleCli("getdifficulty", "")
+ HelpExampleRpc("getdifficulty", "")
@@ -377,37 +373,35 @@ static UniValue getdifficulty(const JSONRPCRequest& request)
return GetDifficulty(::ChainActive().Tip());
}
-static std::string EntryDescriptionString()
-{
- return " \"vsize\" : n, (numeric) virtual transaction size as defined in BIP 141. This is different from actual serialized size for witness transactions as witness data is discounted.\n"
- " \"size\" : n, (numeric) (DEPRECATED) same as vsize. Only returned if bitcoind is started with -deprecatedrpc=size\n"
- " size will be completely removed in v0.20.\n"
- " \"weight\" : n, (numeric) transaction weight as defined in BIP 141.\n"
- " \"fee\" : n, (numeric) transaction fee in " + CURRENCY_UNIT + " (DEPRECATED)\n"
- " \"modifiedfee\" : n, (numeric) transaction fee with fee deltas used for mining priority (DEPRECATED)\n"
- " \"time\" : n, (numeric) local time transaction entered pool in seconds since 1 Jan 1970 GMT\n"
- " \"height\" : n, (numeric) block height when transaction entered pool\n"
- " \"descendantcount\" : n, (numeric) number of in-mempool descendant transactions (including this one)\n"
- " \"descendantsize\" : n, (numeric) virtual transaction size of in-mempool descendants (including this one)\n"
- " \"descendantfees\" : n, (numeric) modified fees (see above) of in-mempool descendants (including this one) (DEPRECATED)\n"
- " \"ancestorcount\" : n, (numeric) number of in-mempool ancestor transactions (including this one)\n"
- " \"ancestorsize\" : n, (numeric) virtual transaction size of in-mempool ancestors (including this one)\n"
- " \"ancestorfees\" : n, (numeric) modified fees (see above) of in-mempool ancestors (including this one) (DEPRECATED)\n"
- " \"wtxid\" : hash, (string) hash of serialized transaction, including witness data\n"
- " \"fees\" : {\n"
- " \"base\" : n, (numeric) transaction fee in " + CURRENCY_UNIT + "\n"
- " \"modified\" : n, (numeric) transaction fee with fee deltas used for mining priority in " + CURRENCY_UNIT + "\n"
- " \"ancestor\" : n, (numeric) modified fees (see above) of in-mempool ancestors (including this one) in " + CURRENCY_UNIT + "\n"
- " \"descendant\" : n, (numeric) modified fees (see above) of in-mempool descendants (including this one) in " + CURRENCY_UNIT + "\n"
- " }\n"
- " \"depends\" : [ (array) unconfirmed transactions used as inputs for this transaction\n"
- " \"transactionid\", (string) parent transaction id\n"
- " ... ]\n"
- " \"spentby\" : [ (array) unconfirmed transactions spending outputs from this transaction\n"
- " \"transactionid\", (string) child transaction id\n"
- " ... ]\n"
- " \"bip125-replaceable\" : true|false, (boolean) Whether this transaction could be replaced due to BIP125 (replace-by-fee)\n";
-}
+static std::vector<RPCResult> MempoolEntryDescription() { return {
+ RPCResult{RPCResult::Type::NUM, "vsize", "virtual transaction size as defined in BIP 141. This is different from actual serialized size for witness transactions as witness data is discounted."},
+ RPCResult{RPCResult::Type::NUM, "size", "(DEPRECATED) same as vsize. Only returned if bitcoind is started with -deprecatedrpc=size\n"
+ "size will be completely removed in v0.20."},
+ RPCResult{RPCResult::Type::NUM, "weight", "transaction weight as defined in BIP 141."},
+ RPCResult{RPCResult::Type::STR_AMOUNT, "fee", "transaction fee in " + CURRENCY_UNIT + " (DEPRECATED)"},
+ RPCResult{RPCResult::Type::STR_AMOUNT, "modifiedfee", "transaction fee with fee deltas used for mining priority (DEPRECATED)"},
+ RPCResult{RPCResult::Type::NUM_TIME, "time", "local time transaction entered pool in seconds since 1 Jan 1970 GMT"},
+ RPCResult{RPCResult::Type::NUM, "height", "block height when transaction entered pool"},
+ RPCResult{RPCResult::Type::NUM, "descendantcount", "number of in-mempool descendant transactions (including this one)"},
+ RPCResult{RPCResult::Type::NUM, "descendantsize", "virtual transaction size of in-mempool descendants (including this one)"},
+ RPCResult{RPCResult::Type::STR_AMOUNT, "descendantfees", "modified fees (see above) of in-mempool descendants (including this one) (DEPRECATED)"},
+ RPCResult{RPCResult::Type::NUM, "ancestorcount", "number of in-mempool ancestor transactions (including this one)"},
+ RPCResult{RPCResult::Type::NUM, "ancestorsize", "virtual transaction size of in-mempool ancestors (including this one)"},
+ RPCResult{RPCResult::Type::STR_AMOUNT, "ancestorfees", "modified fees (see above) of in-mempool ancestors (including this one) (DEPRECATED)"},
+ RPCResult{RPCResult::Type::STR_HEX, "wtxid", "hash of serialized transaction, including witness data"},
+ RPCResult{RPCResult::Type::OBJ, "fees", "",
+ {
+ RPCResult{RPCResult::Type::STR_AMOUNT, "base", "transaction fee in " + CURRENCY_UNIT},
+ RPCResult{RPCResult::Type::STR_AMOUNT, "modified", "transaction fee with fee deltas used for mining priority in " + CURRENCY_UNIT},
+ RPCResult{RPCResult::Type::STR_AMOUNT, "ancestor", "modified fees (see above) of in-mempool ancestors (including this one) in " + CURRENCY_UNIT},
+ RPCResult{RPCResult::Type::STR_AMOUNT, "descendant", "modified fees (see above) of in-mempool descendants (including this one) in " + CURRENCY_UNIT},
+ }},
+ RPCResult{RPCResult::Type::ARR, "depends", "unconfirmed transactions used as inputs for this transaction",
+ {RPCResult{RPCResult::Type::STR_HEX, "transactionid", "parent transaction id"}}},
+ RPCResult{RPCResult::Type::ARR, "spentby", "unconfirmed transactions spending outputs from this transaction",
+ {RPCResult{RPCResult::Type::STR_HEX, "transactionid", "child transaction id"}}},
+ RPCResult{RPCResult::Type::BOOL, "bip125-replaceable", "Whether this transaction could be replaced due to BIP125 (replace-by-fee)"},
+};}
static void entryToJSON(const CTxMemPool& pool, UniValue& info, const CTxMemPoolEntry& e) EXCLUSIVE_LOCKS_REQUIRED(pool.cs)
{
@@ -506,17 +500,17 @@ static UniValue getrawmempool(const JSONRPCRequest& request)
{
{"verbose", RPCArg::Type::BOOL, /* default */ "false", "True for a json object, false for array of transaction ids"},
},
- RPCResult{"for verbose = false",
- "[ (json array of string)\n"
- " \"transactionid\" (string) The transaction id\n"
- " ,...\n"
- "]\n"
- "\nResult: (for verbose = true):\n"
- "{ (json object)\n"
- " \"transactionid\" : { (json object)\n"
- + EntryDescriptionString()
- + " }, ...\n"
- "}\n"
+ {
+ RPCResult{"for verbose = false",
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "", "The transaction id"},
+ }},
+ RPCResult{"for verbose = true",
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::OBJ_DYN, "transactionid", "", MempoolEntryDescription()},
+ }},
},
RPCExamples{
HelpExampleCli("getrawmempool", "true")
@@ -541,18 +535,10 @@ static UniValue getmempoolancestors(const JSONRPCRequest& request)
},
{
RPCResult{"for verbose = false",
- "[ (json array of strings)\n"
- " \"transactionid\" (string) The transaction id of an in-mempool ancestor transaction\n"
- " ,...\n"
- "]\n"
- },
+ RPCResult::Type::ARR, "", "",
+ {{RPCResult::Type::STR_HEX, "", "The transaction id of an in-mempool ancestor transaction"}}},
RPCResult{"for verbose = true",
- "{ (json object)\n"
- " \"transactionid\" : { (json object)\n"
- + EntryDescriptionString()
- + " }, ...\n"
- "}\n"
- },
+ RPCResult::Type::OBJ_DYN, "transactionid", "", MempoolEntryDescription()},
},
RPCExamples{
HelpExampleCli("getmempoolancestors", "\"mytxid\"")
@@ -609,18 +595,13 @@ static UniValue getmempooldescendants(const JSONRPCRequest& request)
},
{
RPCResult{"for verbose = false",
- "[ (json array of strings)\n"
- " \"transactionid\" (string) The transaction id of an in-mempool descendant transaction\n"
- " ,...\n"
- "]\n"
- },
+ RPCResult::Type::ARR, "", "",
+ {{RPCResult::Type::STR_HEX, "", "The transaction id of an in-mempool descendant transaction"}}},
RPCResult{"for verbose = true",
- "{ (json object)\n"
- " \"transactionid\" : { (json object)\n"
- + EntryDescriptionString()
- + " }, ...\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::OBJ_DYN, "transactionid", "", MempoolEntryDescription()},
+ }},
},
RPCExamples{
HelpExampleCli("getmempooldescendants", "\"mytxid\"")
@@ -675,10 +656,7 @@ static UniValue getmempoolentry(const JSONRPCRequest& request)
{"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The transaction id (must be in mempool)"},
},
RPCResult{
- "{ (json object)\n"
- + EntryDescriptionString()
- + "}\n"
- },
+ RPCResult::Type::OBJ_DYN, "", "", MempoolEntryDescription()},
RPCExamples{
HelpExampleCli("getmempoolentry", "\"mytxid\"")
+ HelpExampleRpc("getmempoolentry", "\"mytxid\"")
@@ -709,8 +687,7 @@ static UniValue getblockhash(const JSONRPCRequest& request)
{"height", RPCArg::Type::NUM, RPCArg::Optional::NO, "The height index"},
},
RPCResult{
- "\"hash\" (string) The block hash\n"
- },
+ RPCResult::Type::STR_HEX, "", "The block hash"},
RPCExamples{
HelpExampleCli("getblockhash", "1000")
+ HelpExampleRpc("getblockhash", "1000")
@@ -738,27 +715,26 @@ static UniValue getblockheader(const JSONRPCRequest& request)
},
{
RPCResult{"for verbose = true",
- "{\n"
- " \"hash\" : \"hash\", (string) the block hash (same as provided)\n"
- " \"confirmations\" : n, (numeric) The number of confirmations, or -1 if the block is not on the main chain\n"
- " \"height\" : n, (numeric) The block height or index\n"
- " \"version\" : n, (numeric) The block version\n"
- " \"versionHex\" : \"00000000\", (string) The block version formatted in hexadecimal\n"
- " \"merkleroot\" : \"xxxx\", (string) The merkle root\n"
- " \"time\" : ttt, (numeric) The block time expressed in " + UNIX_EPOCH_TIME + "\n"
- " \"mediantime\" : ttt, (numeric) The median block time expressed in " + UNIX_EPOCH_TIME + "\n"
- " \"nonce\" : n, (numeric) The nonce\n"
- " \"bits\" : \"1d00ffff\", (string) The bits\n"
- " \"difficulty\" : x.xxx, (numeric) The difficulty\n"
- " \"chainwork\" : \"0000...1f3\" (string) Expected number of hashes required to produce the current chain (in hex)\n"
- " \"nTx\" : n, (numeric) The number of transactions in the block.\n"
- " \"previousblockhash\" : \"hash\", (string) The hash of the previous block\n"
- " \"nextblockhash\" : \"hash\", (string) The hash of the next block\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "hash", "the block hash (same as provided)"},
+ {RPCResult::Type::NUM, "confirmations", "The number of confirmations, or -1 if the block is not on the main chain"},
+ {RPCResult::Type::NUM, "height", "The block height or index"},
+ {RPCResult::Type::NUM, "version", "The block version"},
+ {RPCResult::Type::STR_HEX, "versionHex", "The block version formatted in hexadecimal"},
+ {RPCResult::Type::STR_HEX, "merkleroot", "The merkle root"},
+ {RPCResult::Type::NUM_TIME, "time", "The block time expressed in " + UNIX_EPOCH_TIME},
+ {RPCResult::Type::NUM_TIME, "mediantime", "The median block time expressed in " + UNIX_EPOCH_TIME},
+ {RPCResult::Type::NUM, "nonce", "The nonce"},
+ {RPCResult::Type::STR_HEX, "bits", "The bits"},
+ {RPCResult::Type::NUM, "difficulty", "The difficulty"},
+ {RPCResult::Type::STR_HEX, "chainwork", "Expected number of hashes required to produce the current chain"},
+ {RPCResult::Type::NUM, "nTx", "The number of transactions in the block"},
+ {RPCResult::Type::STR_HEX, "previousblockhash", "The hash of the previous block"},
+ {RPCResult::Type::STR_HEX, "nextblockhash", "The hash of the next block"},
+ }},
RPCResult{"for verbose=false",
- "\"data\" (string) A string that is serialized, hex-encoded data for block 'hash'.\n"
- },
+ RPCResult::Type::STR_HEX, "", "A string that is serialized, hex-encoded data for block 'hash'"},
},
RPCExamples{
HelpExampleCli("getblockheader", "\"00000000c937983704a73af28acdec37b049d214adbda81d7e2a3dd146f6ed09\"")
@@ -840,44 +816,45 @@ static UniValue getblock(const JSONRPCRequest& request)
},
{
RPCResult{"for verbosity = 0",
- "\"data\" (string) A string that is serialized, hex-encoded data for block 'hash'.\n"
- },
+ RPCResult::Type::STR_HEX, "", "A string that is serialized, hex-encoded data for block 'hash'"},
RPCResult{"for verbosity = 1",
- "{\n"
- " \"hash\" : \"hash\", (string) the block hash (same as provided)\n"
- " \"confirmations\" : n, (numeric) The number of confirmations, or -1 if the block is not on the main chain\n"
- " \"size\" : n, (numeric) The block size\n"
- " \"strippedsize\" : n, (numeric) The block size excluding witness data\n"
- " \"weight\" : n (numeric) The block weight as defined in BIP 141\n"
- " \"height\" : n, (numeric) The block height or index\n"
- " \"version\" : n, (numeric) The block version\n"
- " \"versionHex\" : \"00000000\", (string) The block version formatted in hexadecimal\n"
- " \"merkleroot\" : \"xxxx\", (string) The merkle root\n"
- " \"tx\" : [ (array of string) The transaction ids\n"
- " \"transactionid\" (string) The transaction id\n"
- " ,...\n"
- " ],\n"
- " \"time\" : ttt, (numeric) The block time expressed in " + UNIX_EPOCH_TIME + "\n"
- " \"mediantime\" : ttt, (numeric) The median block time expressed in " + UNIX_EPOCH_TIME + "\n"
- " \"nonce\" : n, (numeric) The nonce\n"
- " \"bits\" : \"1d00ffff\", (string) The bits\n"
- " \"difficulty\" : x.xxx, (numeric) The difficulty\n"
- " \"chainwork\" : \"xxxx\", (string) Expected number of hashes required to produce the chain up to this block (in hex)\n"
- " \"nTx\" : n, (numeric) The number of transactions in the block.\n"
- " \"previousblockhash\" : \"hash\", (string) The hash of the previous block\n"
- " \"nextblockhash\" : \"hash\" (string) The hash of the next block\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "hash", "the block hash (same as provided)"},
+ {RPCResult::Type::NUM, "confirmations", "The number of confirmations, or -1 if the block is not on the main chain"},
+ {RPCResult::Type::NUM, "size", "The block size"},
+ {RPCResult::Type::NUM, "strippedsize", "The block size excluding witness data"},
+ {RPCResult::Type::NUM, "weight", "The block weight as defined in BIP 141"},
+ {RPCResult::Type::NUM, "height", "The block height or index"},
+ {RPCResult::Type::NUM, "version", "The block version"},
+ {RPCResult::Type::STR_HEX, "versionHex", "The block version formatted in hexadecimal"},
+ {RPCResult::Type::STR_HEX, "merkleroot", "The merkle root"},
+ {RPCResult::Type::ARR, "tx", "The transaction ids",
+ {{RPCResult::Type::STR_HEX, "", "The transaction id"}}},
+ {RPCResult::Type::NUM_TIME, "time", "The block time expressed in " + UNIX_EPOCH_TIME},
+ {RPCResult::Type::NUM_TIME, "mediantime", "The median block time expressed in " + UNIX_EPOCH_TIME},
+ {RPCResult::Type::NUM, "nonce", "The nonce"},
+ {RPCResult::Type::STR_HEX, "bits", "The bits"},
+ {RPCResult::Type::NUM, "difficulty", "The difficulty"},
+ {RPCResult::Type::STR_HEX, "chainwork", "Expected number of hashes required to produce the chain up to this block (in hex)"},
+ {RPCResult::Type::NUM, "nTx", "The number of transactions in the block"},
+ {RPCResult::Type::STR_HEX, "previousblockhash", "The hash of the previous block"},
+ {RPCResult::Type::STR_HEX, "nextblockhash", "The hash of the next block"},
+ }},
RPCResult{"for verbosity = 2",
- "{\n"
- " ..., Same output as verbosity = 1.\n"
- " \"tx\" : [ (array of Objects) The transactions in the format of the getrawtransaction RPC. Different from verbosity = 1 \"tx\" result.\n"
- " ,...\n"
- " ],\n"
- " ,... Same output as verbosity = 1.\n"
- "}\n"
- },
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::ELISION, "", "Same output as verbosity = 1"},
+ {RPCResult::Type::ARR, "tx", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::ELISION, "", "The transactions in the format of the getrawtransaction RPC. Different from verbosity = 1 \"tx\" result"},
+ }},
+ }},
+ {RPCResult::Type::ELISION, "", "Same output as verbosity = 1"},
+ }},
+ },
RPCExamples{
HelpExampleCli("getblock", "\"00000000c937983704a73af28acdec37b049d214adbda81d7e2a3dd146f6ed09\"")
+ HelpExampleRpc("getblock", "\"00000000c937983704a73af28acdec37b049d214adbda81d7e2a3dd146f6ed09\"")
@@ -928,8 +905,7 @@ static UniValue pruneblockchain(const JSONRPCRequest& request)
" to prune blocks whose block time is at least 2 hours older than the provided timestamp."},
},
RPCResult{
- "n (numeric) Height of the last block pruned.\n"
- },
+ RPCResult::Type::NUM, "", "Height of the last block pruned"},
RPCExamples{
HelpExampleCli("pruneblockchain", "1000")
+ HelpExampleRpc("pruneblockchain", "1000")
@@ -983,17 +959,17 @@ static UniValue gettxoutsetinfo(const JSONRPCRequest& request)
"Note this call may take some time.\n",
{},
RPCResult{
- "{\n"
- " \"height\":n, (numeric) The current block height (index)\n"
- " \"bestblock\": \"hex\", (string) The hash of the block at the tip of the chain\n"
- " \"transactions\": n, (numeric) The number of transactions with unspent outputs\n"
- " \"txouts\": n, (numeric) The number of unspent transaction outputs\n"
- " \"bogosize\": n, (numeric) A meaningless metric for UTXO set size\n"
- " \"hash_serialized_2\": \"hash\", (string) The serialized hash\n"
- " \"disk_size\": n, (numeric) The estimated size of the chainstate on disk\n"
- " \"total_amount\": x.xxx (numeric) The total amount\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM, "height", "The current block height (index)"},
+ {RPCResult::Type::STR_HEX, "bestblock", "The hash of the block at the tip of the chain"},
+ {RPCResult::Type::NUM, "transactions", "The number of transactions with unspent outputs"},
+ {RPCResult::Type::NUM, "txouts", "The number of unspent transaction outputs"},
+ {RPCResult::Type::NUM, "bogosize", "A meaningless metric for UTXO set size"},
+ {RPCResult::Type::STR_HEX, "hash_serialized_2", "The serialized hash"},
+ {RPCResult::Type::NUM, "disk_size", "The estimated size of the chainstate on disk"},
+ {RPCResult::Type::STR_AMOUNT, "total_amount", "The total amount"},
+ }},
RPCExamples{
HelpExampleCli("gettxoutsetinfo", "")
+ HelpExampleRpc("gettxoutsetinfo", "")
@@ -1031,23 +1007,22 @@ UniValue gettxout(const JSONRPCRequest& request)
{"include_mempool", RPCArg::Type::BOOL, /* default */ "true", "Whether to include the mempool. Note that an unspent output that is spent in the mempool won't appear."},
},
RPCResult{
- "{\n"
- " \"bestblock\": \"hash\", (string) The hash of the block at the tip of the chain\n"
- " \"confirmations\" : n, (numeric) The number of confirmations\n"
- " \"value\" : x.xxx, (numeric) The transaction value in " + CURRENCY_UNIT + "\n"
- " \"scriptPubKey\" : { (json object)\n"
- " \"asm\" : \"code\", (string) \n"
- " \"hex\" : \"hex\", (string) \n"
- " \"reqSigs\" : n, (numeric) Number of required signatures\n"
- " \"type\" : \"pubkeyhash\", (string) The type, eg pubkeyhash\n"
- " \"addresses\" : [ (array of string) array of bitcoin addresses\n"
- " \"address\" (string) bitcoin address\n"
- " ,...\n"
- " ]\n"
- " },\n"
- " \"coinbase\" : true|false (boolean) Coinbase or not\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "bestblock", "The hash of the block at the tip of the chain"},
+ {RPCResult::Type::NUM, "confirmations", "The number of confirmations"},
+ {RPCResult::Type::STR_AMOUNT, "value", "The transaction value in " + CURRENCY_UNIT},
+ {RPCResult::Type::OBJ, "scriptPubKey", "",
+ {
+ {RPCResult::Type::STR_HEX, "asm", ""},
+ {RPCResult::Type::STR_HEX, "hex", ""},
+ {RPCResult::Type::NUM, "reqSigs", "Number of required signatures"},
+ {RPCResult::Type::STR_HEX, "type", "The type, eg pubkeyhash"},
+ {RPCResult::Type::ARR, "addresses", "array of bitcoin addresses",
+ {{RPCResult::Type::STR, "address", "bitcoin address"}}},
+ }},
+ {RPCResult::Type::BOOL, "coinbase", "Coinbase or not"},
+ }},
RPCExamples{
"\nGet unspent transactions\n"
+ HelpExampleCli("listunspent", "") +
@@ -1112,8 +1087,7 @@ static UniValue verifychain(const JSONRPCRequest& request)
{"nblocks", RPCArg::Type::NUM, /* default */ strprintf("%d, 0=all", nCheckDepth), "The number of blocks to check."},
},
RPCResult{
- "true|false (boolean) Verified or not\n"
- },
+ RPCResult::Type::BOOL, "", "Verified or not"},
RPCExamples{
HelpExampleCli("verifychain", "")
+ HelpExampleRpc("verifychain", "")
@@ -1203,45 +1177,49 @@ UniValue getblockchaininfo(const JSONRPCRequest& request)
"Returns an object containing various state info regarding blockchain processing.\n",
{},
RPCResult{
- "{\n"
- " \"chain\": \"xxxx\", (string) current network name (main, test, regtest)\n"
- " \"blocks\": xxxxxx, (numeric) the height of the most-work fully-validated chain. The genesis block has height 0\n"
- " \"headers\": xxxxxx, (numeric) the current number of headers we have validated\n"
- " \"bestblockhash\": \"...\", (string) the hash of the currently best block\n"
- " \"difficulty\": xxxxxx, (numeric) the current difficulty\n"
- " \"mediantime\": xxxxxx, (numeric) median time for the current best block\n"
- " \"verificationprogress\": xxxx, (numeric) estimate of verification progress [0..1]\n"
- " \"initialblockdownload\": xxxx, (bool) (debug information) estimate of whether this node is in Initial Block Download mode.\n"
- " \"chainwork\": \"xxxx\" (string) total amount of work in active chain, in hexadecimal\n"
- " \"size_on_disk\": xxxxxx, (numeric) the estimated size of the block and undo files on disk\n"
- " \"pruned\": xx, (boolean) if the blocks are subject to pruning\n"
- " \"pruneheight\": xxxxxx, (numeric) lowest-height complete block stored (only present if pruning is enabled)\n"
- " \"automatic_pruning\": xx, (boolean) whether automatic pruning is enabled (only present if pruning is enabled)\n"
- " \"prune_target_size\": xxxxxx, (numeric) the target size used by pruning (only present if automatic pruning is enabled)\n"
- " \"softforks\": { (object) status of softforks\n"
- " \"xxxx\" : { (string) name of the softfork\n"
- " \"type\": \"xxxx\", (string) one of \"buried\", \"bip9\"\n"
- " \"bip9\": { (object) status of bip9 softforks (only for \"bip9\" type)\n"
- " \"status\": \"xxxx\", (string) one of \"defined\", \"started\", \"locked_in\", \"active\", \"failed\"\n"
- " \"bit\": xx, (numeric) the bit (0-28) in the block version field used to signal this softfork (only for \"started\" status)\n"
- " \"start_time\": xx, (numeric) the minimum median time past of a block at which the bit gains its meaning\n"
- " \"timeout\": xx, (numeric) the median time past of a block at which the deployment is considered failed if not yet locked in\n"
- " \"since\": xx, (numeric) height of the first block to which the status applies\n"
- " \"statistics\": { (object) numeric statistics about BIP9 signalling for a softfork\n"
- " \"period\": xx, (numeric) the length in blocks of the BIP9 signalling period \n"
- " \"threshold\": xx, (numeric) the number of blocks with the version bit set required to activate the feature \n"
- " \"elapsed\": xx, (numeric) the number of blocks elapsed since the beginning of the current period \n"
- " \"count\": xx, (numeric) the number of blocks with the version bit set in the current period \n"
- " \"possible\": xx (boolean) returns false if there are not enough blocks left in this period to pass activation threshold \n"
- " }\n"
- " },\n"
- " \"height\": \"xxxxxx\", (numeric) height of the first block which the rules are or will be enforced (only for \"buried\" type, or \"bip9\" type with \"active\" status)\n"
- " \"active\": xx, (boolean) true if the rules are enforced for the mempool and the next block\n"
- " }\n"
- " }\n"
- " \"warnings\" : \"...\", (string) any network and blockchain warnings.\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "chain", "current network name (main, test, regtest)"},
+ {RPCResult::Type::NUM, "blocks", "the height of the most-work fully-validated chain. The genesis block has height 0"},
+ {RPCResult::Type::NUM, "headers", "the current number of headers we have validated"},
+ {RPCResult::Type::STR, "bestblockhash", "the hash of the currently best block"},
+ {RPCResult::Type::NUM, "difficulty", "the current difficulty"},
+ {RPCResult::Type::NUM, "mediantime", "median time for the current best block"},
+ {RPCResult::Type::NUM, "verificationprogress", "estimate of verification progress [0..1]"},
+ {RPCResult::Type::BOOL, "initialblockdownload", "(debug information) estimate of whether this node is in Initial Block Download mode"},
+ {RPCResult::Type::STR_HEX, "chainwork", "total amount of work in active chain, in hexadecimal"},
+ {RPCResult::Type::NUM, "size_on_disk", "the estimated size of the block and undo files on disk"},
+ {RPCResult::Type::BOOL, "pruned", "if the blocks are subject to pruning"},
+ {RPCResult::Type::NUM, "pruneheight", "lowest-height complete block stored (only present if pruning is enabled)"},
+ {RPCResult::Type::BOOL, "automatic_pruning", "whether automatic pruning is enabled (only present if pruning is enabled)"},
+ {RPCResult::Type::NUM, "prune_target_size", "the target size used by pruning (only present if automatic pruning is enabled)"},
+ {RPCResult::Type::OBJ_DYN, "softforks", "status of softforks",
+ {
+ {RPCResult::Type::OBJ, "xxxx", "name of the softfork",
+ {
+ {RPCResult::Type::STR, "type", "one of \"buried\", \"bip9\""},
+ {RPCResult::Type::OBJ, "bip9", "status of bip9 softforks (only for \"bip9\" type)",
+ {
+ {RPCResult::Type::STR, "status", "one of \"defined\", \"started\", \"locked_in\", \"active\", \"failed\""},
+ {RPCResult::Type::NUM, "bit", "the bit (0-28) in the block version field used to signal this softfork (only for \"started\" status)"},
+ {RPCResult::Type::NUM_TIME, "start_time", "the minimum median time past of a block at which the bit gains its meaning"},
+ {RPCResult::Type::NUM_TIME, "timeout", "the median time past of a block at which the deployment is considered failed if not yet locked in"},
+ {RPCResult::Type::NUM, "since", "height of the first block to which the status applies"},
+ {RPCResult::Type::OBJ, "statistics", "numeric statistics about BIP9 signalling for a softfork (only for \"started\" status)",
+ {
+ {RPCResult::Type::NUM, "period", "the length in blocks of the BIP9 signalling period"},
+ {RPCResult::Type::NUM, "threshold", "the number of blocks with the version bit set required to activate the feature"},
+ {RPCResult::Type::NUM, "elapsed", "the number of blocks elapsed since the beginning of the current period"},
+ {RPCResult::Type::NUM, "count", "the number of blocks with the version bit set in the current period"},
+ {RPCResult::Type::BOOL, "possible", "returns false if there are not enough blocks left in this period to pass activation threshold"},
+ }},
+ }},
+ {RPCResult::Type::NUM, "height", "height of the first block which the rules are or will be enforced (only for \"buried\" type, or \"bip9\" type with \"active\" status)"},
+ {RPCResult::Type::BOOL, "active", "true if the rules are enforced for the mempool and the next block"},
+ }},
+ }},
+ {RPCResult::Type::STR, "warnings", "any network and blockchain warnings"},
+ }},
RPCExamples{
HelpExampleCli("getblockchaininfo", "")
+ HelpExampleRpc("getblockchaininfo", "")
@@ -1316,27 +1294,20 @@ static UniValue getchaintips(const JSONRPCRequest& request)
" including the main chain as well as orphaned branches.\n",
{},
RPCResult{
- "[\n"
- " {\n"
- " \"height\": xxxx, (numeric) height of the chain tip\n"
- " \"hash\": \"xxxx\", (string) block hash of the tip\n"
- " \"branchlen\": 0 (numeric) zero for main chain\n"
- " \"status\": \"active\" (string) \"active\" for the main chain\n"
- " },\n"
- " {\n"
- " \"height\": xxxx,\n"
- " \"hash\": \"xxxx\",\n"
- " \"branchlen\": 1 (numeric) length of branch connecting the tip to the main chain\n"
- " \"status\": \"xxxx\" (string) status of the chain (active, valid-fork, valid-headers, headers-only, invalid)\n"
- " }\n"
- "]\n"
+ RPCResult::Type::ARR, "", "",
+ {{RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM, "height", "height of the chain tip"},
+ {RPCResult::Type::STR_HEX, "hash", "block hash of the tip"},
+ {RPCResult::Type::NUM, "branchlen", "zero for main chain, otherwise length of branch connecting the tip to the main chain"},
+ {RPCResult::Type::STR, "status", "status of the chain, \"active\" for the main chain\n"
"Possible values for status:\n"
"1. \"invalid\" This branch contains at least one invalid block\n"
"2. \"headers-only\" Not all blocks for this branch are available, but the headers are valid\n"
"3. \"valid-headers\" All blocks are available for this branch, but they were never fully validated\n"
"4. \"valid-fork\" This branch is not part of the active chain, but is fully validated\n"
- "5. \"active\" This is the tip of the active main chain, which is certainly valid\n"
- },
+ "5. \"active\" This is the tip of the active main chain, which is certainly valid"},
+ }}}},
RPCExamples{
HelpExampleCli("getchaintips", "")
+ HelpExampleRpc("getchaintips", "")
@@ -1436,16 +1407,16 @@ static UniValue getmempoolinfo(const JSONRPCRequest& request)
"\nReturns details on the active state of the TX memory pool.\n",
{},
RPCResult{
- "{\n"
- " \"loaded\": true|false (boolean) True if the mempool is fully loaded\n"
- " \"size\": xxxxx, (numeric) Current tx count\n"
- " \"bytes\": xxxxx, (numeric) Sum of all virtual transaction sizes as defined in BIP 141. Differs from actual serialized size because witness data is discounted\n"
- " \"usage\": xxxxx, (numeric) Total memory usage for the mempool\n"
- " \"maxmempool\": xxxxx, (numeric) Maximum memory usage for the mempool\n"
- " \"mempoolminfee\": xxxxx (numeric) Minimum fee rate in " + CURRENCY_UNIT + "/kB for tx to be accepted. Is the maximum of minrelaytxfee and minimum mempool fee\n"
- " \"minrelaytxfee\": xxxxx (numeric) Current minimum relay fee for transactions\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::BOOL, "loaded", "True if the mempool is fully loaded"},
+ {RPCResult::Type::NUM, "size", "Current tx count"},
+ {RPCResult::Type::NUM, "bytes", "Sum of all virtual transaction sizes as defined in BIP 141. Differs from actual serialized size because witness data is discounted"},
+ {RPCResult::Type::NUM, "usage", "Total memory usage for the mempool"},
+ {RPCResult::Type::NUM, "maxmempool", "Maximum memory usage for the mempool"},
+ {RPCResult::Type::STR_AMOUNT, "mempoolminfee", "Minimum fee rate in " + CURRENCY_UNIT + "/kB for tx to be accepted. Is the maximum of minrelaytxfee and minimum mempool fee"},
+ {RPCResult::Type::STR_AMOUNT, "minrelaytxfee", "Current minimum relay fee for transactions"},
+ }},
RPCExamples{
HelpExampleCli("getmempoolinfo", "")
+ HelpExampleRpc("getmempoolinfo", "")
@@ -1464,7 +1435,7 @@ static UniValue preciousblock(const JSONRPCRequest& request)
{
{"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "the hash of the block to mark as precious"},
},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
HelpExampleCli("preciousblock", "\"blockhash\"")
+ HelpExampleRpc("preciousblock", "\"blockhash\"")
@@ -1486,7 +1457,7 @@ static UniValue preciousblock(const JSONRPCRequest& request)
PreciousBlock(state, Params(), pblockindex);
if (!state.IsValid()) {
- throw JSONRPCError(RPC_DATABASE_ERROR, FormatStateMessage(state));
+ throw JSONRPCError(RPC_DATABASE_ERROR, state.ToString());
}
return NullUniValue;
@@ -1499,7 +1470,7 @@ static UniValue invalidateblock(const JSONRPCRequest& request)
{
{"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "the hash of the block to mark as invalid"},
},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
HelpExampleCli("invalidateblock", "\"blockhash\"")
+ HelpExampleRpc("invalidateblock", "\"blockhash\"")
@@ -1524,7 +1495,7 @@ static UniValue invalidateblock(const JSONRPCRequest& request)
}
if (!state.IsValid()) {
- throw JSONRPCError(RPC_DATABASE_ERROR, FormatStateMessage(state));
+ throw JSONRPCError(RPC_DATABASE_ERROR, state.ToString());
}
return NullUniValue;
@@ -1538,7 +1509,7 @@ static UniValue reconsiderblock(const JSONRPCRequest& request)
{
{"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "the hash of the block to reconsider"},
},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
HelpExampleCli("reconsiderblock", "\"blockhash\"")
+ HelpExampleRpc("reconsiderblock", "\"blockhash\"")
@@ -1561,7 +1532,7 @@ static UniValue reconsiderblock(const JSONRPCRequest& request)
ActivateBestChain(state, Params());
if (!state.IsValid()) {
- throw JSONRPCError(RPC_DATABASE_ERROR, FormatStateMessage(state));
+ throw JSONRPCError(RPC_DATABASE_ERROR, state.ToString());
}
return NullUniValue;
@@ -1576,17 +1547,17 @@ static UniValue getchaintxstats(const JSONRPCRequest& request)
{"blockhash", RPCArg::Type::STR_HEX, /* default */ "chain tip", "The hash of the block that ends the window."},
},
RPCResult{
- "{\n"
- " \"time\": xxxxx, (numeric) The timestamp for the final block in the window, expressed in " + UNIX_EPOCH_TIME + ".\n"
- " \"txcount\": xxxxx, (numeric) The total number of transactions in the chain up to that point.\n"
- " \"window_final_block_hash\": \"...\", (string) The hash of the final block in the window.\n"
- " \"window_final_block_height\": xxxxx, (numeric) The height of the final block in the window.\n"
- " \"window_block_count\": xxxxx, (numeric) Size of the window in number of blocks.\n"
- " \"window_tx_count\": xxxxx, (numeric) The number of transactions in the window. Only returned if \"window_block_count\" is > 0.\n"
- " \"window_interval\": xxxxx, (numeric) The elapsed time in the window in seconds. Only returned if \"window_block_count\" is > 0.\n"
- " \"txrate\": x.xx, (numeric) The average rate of transactions per second in the window. Only returned if \"window_interval\" is > 0.\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM_TIME, "time", "The timestamp for the final block in the window, expressed in " + UNIX_EPOCH_TIME},
+ {RPCResult::Type::NUM, "txcount", "The total number of transactions in the chain up to that point"},
+ {RPCResult::Type::STR_HEX, "window_final_block_hash", "The hash of the final block in the window"},
+ {RPCResult::Type::NUM, "window_final_block_height", "The height of the final block in the window."},
+ {RPCResult::Type::NUM, "window_block_count", "Size of the window in number of blocks"},
+ {RPCResult::Type::NUM, "window_tx_count", "The number of transactions in the window. Only returned if \"window_block_count\" is > 0"},
+ {RPCResult::Type::NUM, "window_interval", "The elapsed time in the window in seconds. Only returned if \"window_block_count\" is > 0"},
+ {RPCResult::Type::NUM, "txrate", "The average rate of transactions per second in the window. Only returned if \"window_interval\" is > 0"},
+ }},
RPCExamples{
HelpExampleCli("getchaintxstats", "")
+ HelpExampleRpc("getchaintxstats", "2016")
@@ -1715,44 +1686,45 @@ static UniValue getblockstats(const JSONRPCRequest& request)
"stats"},
},
RPCResult{
- "{ (json object)\n"
- " \"avgfee\": xxxxx, (numeric) Average fee in the block\n"
- " \"avgfeerate\": xxxxx, (numeric) Average feerate (in satoshis per virtual byte)\n"
- " \"avgtxsize\": xxxxx, (numeric) Average transaction size\n"
- " \"blockhash\": xxxxx, (string) The block hash (to check for potential reorgs)\n"
- " \"feerate_percentiles\": [ (array of numeric) Feerates at the 10th, 25th, 50th, 75th, and 90th percentile weight unit (in satoshis per virtual byte)\n"
- " \"10th_percentile_feerate\", (numeric) The 10th percentile feerate\n"
- " \"25th_percentile_feerate\", (numeric) The 25th percentile feerate\n"
- " \"50th_percentile_feerate\", (numeric) The 50th percentile feerate\n"
- " \"75th_percentile_feerate\", (numeric) The 75th percentile feerate\n"
- " \"90th_percentile_feerate\", (numeric) The 90th percentile feerate\n"
- " ],\n"
- " \"height\": xxxxx, (numeric) The height of the block\n"
- " \"ins\": xxxxx, (numeric) The number of inputs (excluding coinbase)\n"
- " \"maxfee\": xxxxx, (numeric) Maximum fee in the block\n"
- " \"maxfeerate\": xxxxx, (numeric) Maximum feerate (in satoshis per virtual byte)\n"
- " \"maxtxsize\": xxxxx, (numeric) Maximum transaction size\n"
- " \"medianfee\": xxxxx, (numeric) Truncated median fee in the block\n"
- " \"mediantime\": xxxxx, (numeric) The block median time past\n"
- " \"mediantxsize\": xxxxx, (numeric) Truncated median transaction size\n"
- " \"minfee\": xxxxx, (numeric) Minimum fee in the block\n"
- " \"minfeerate\": xxxxx, (numeric) Minimum feerate (in satoshis per virtual byte)\n"
- " \"mintxsize\": xxxxx, (numeric) Minimum transaction size\n"
- " \"outs\": xxxxx, (numeric) The number of outputs\n"
- " \"subsidy\": xxxxx, (numeric) The block subsidy\n"
- " \"swtotal_size\": xxxxx, (numeric) Total size of all segwit transactions\n"
- " \"swtotal_weight\": xxxxx, (numeric) Total weight of all segwit transactions divided by segwit scale factor (4)\n"
- " \"swtxs\": xxxxx, (numeric) The number of segwit transactions\n"
- " \"time\": xxxxx, (numeric) The block time\n"
- " \"total_out\": xxxxx, (numeric) Total amount in all outputs (excluding coinbase and thus reward [ie subsidy + totalfee])\n"
- " \"total_size\": xxxxx, (numeric) Total size of all non-coinbase transactions\n"
- " \"total_weight\": xxxxx, (numeric) Total weight of all non-coinbase transactions divided by segwit scale factor (4)\n"
- " \"totalfee\": xxxxx, (numeric) The fee total\n"
- " \"txs\": xxxxx, (numeric) The number of transactions (excluding coinbase)\n"
- " \"utxo_increase\": xxxxx, (numeric) The increase/decrease in the number of unspent outputs\n"
- " \"utxo_size_inc\": xxxxx, (numeric) The increase/decrease in size for the utxo index (not discounting op_return and similar)\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM, "avgfee", "Average fee in the block"},
+ {RPCResult::Type::NUM, "avgfeerate", "Average feerate (in satoshis per virtual byte)"},
+ {RPCResult::Type::NUM, "avgtxsize", "Average transaction size"},
+ {RPCResult::Type::STR_HEX, "blockhash", "The block hash (to check for potential reorgs)"},
+ {RPCResult::Type::ARR_FIXED, "feerate_percentiles", "Feerates at the 10th, 25th, 50th, 75th, and 90th percentile weight unit (in satoshis per virtual byte)",
+ {
+ {RPCResult::Type::NUM, "10th_percentile_feerate", "The 10th percentile feerate"},
+ {RPCResult::Type::NUM, "25th_percentile_feerate", "The 25th percentile feerate"},
+ {RPCResult::Type::NUM, "50th_percentile_feerate", "The 50th percentile feerate"},
+ {RPCResult::Type::NUM, "75th_percentile_feerate", "The 75th percentile feerate"},
+ {RPCResult::Type::NUM, "90th_percentile_feerate", "The 90th percentile feerate"},
+ }},
+ {RPCResult::Type::NUM, "height", "The height of the block"},
+ {RPCResult::Type::NUM, "ins", "The number of inputs (excluding coinbase)"},
+ {RPCResult::Type::NUM, "maxfee", "Maximum fee in the block"},
+ {RPCResult::Type::NUM, "maxfeerate", "Maximum feerate (in satoshis per virtual byte)"},
+ {RPCResult::Type::NUM, "maxtxsize", "Maximum transaction size"},
+ {RPCResult::Type::NUM, "medianfee", "Truncated median fee in the block"},
+ {RPCResult::Type::NUM, "mediantime", "The block median time past"},
+ {RPCResult::Type::NUM, "mediantxsize", "Truncated median transaction size"},
+ {RPCResult::Type::NUM, "minfee", "Minimum fee in the block"},
+ {RPCResult::Type::NUM, "minfeerate", "Minimum feerate (in satoshis per virtual byte)"},
+ {RPCResult::Type::NUM, "mintxsize", "Minimum transaction size"},
+ {RPCResult::Type::NUM, "outs", "The number of outputs"},
+ {RPCResult::Type::NUM, "subsidy", "The block subsidy"},
+ {RPCResult::Type::NUM, "swtotal_size", "Total size of all segwit transactions"},
+ {RPCResult::Type::NUM, "swtotal_weight", "Total weight of all segwit transactions divided by segwit scale factor (4)"},
+ {RPCResult::Type::NUM, "swtxs", "The number of segwit transactions"},
+ {RPCResult::Type::NUM, "time", "The block time"},
+ {RPCResult::Type::NUM, "total_out", "Total amount in all outputs (excluding coinbase and thus reward [ie subsidy + totalfee])"},
+ {RPCResult::Type::NUM, "total_size", "Total size of all non-coinbase transactions"},
+ {RPCResult::Type::NUM, "total_weight", "Total weight of all non-coinbase transactions divided by segwit scale factor (4)"},
+ {RPCResult::Type::NUM, "totalfee", "The fee total"},
+ {RPCResult::Type::NUM, "txs", "The number of transactions (excluding coinbase)"},
+ {RPCResult::Type::NUM, "utxo_increase", "The increase/decrease in the number of unspent outputs"},
+ {RPCResult::Type::NUM, "utxo_size_inc", "The increase/decrease in size for the utxo index (not discounting op_return and similar)"},
+ }},
RPCExamples{
HelpExampleCli("getblockstats", "1000 '[\"minfeerate\",\"avgfeerate\"]'")
+ HelpExampleRpc("getblockstats", "1000 '[\"minfeerate\",\"avgfeerate\"]'")
@@ -1961,7 +1933,7 @@ static UniValue savemempool(const JSONRPCRequest& request)
RPCHelpMan{"savemempool",
"\nDumps the mempool to disk. It will fail until the previous dump is fully loaded.\n",
{},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
HelpExampleCli("savemempool", "")
+ HelpExampleRpc("savemempool", "")
@@ -2075,24 +2047,26 @@ UniValue scantxoutset(const JSONRPCRequest& request)
"[scanobjects,...]"},
},
RPCResult{
- "{\n"
- " \"success\": true|false, (boolean) Whether the scan was completed\n"
- " \"txouts\": n, (numeric) The number of unspent transaction outputs scanned\n"
- " \"height\": n, (numeric) The current block height (index)\n"
- " \"bestblock\": \"hex\", (string) The hash of the block at the tip of the chain\n"
- " \"unspents\": [\n"
- " {\n"
- " \"txid\": \"hash\", (string) The transaction id\n"
- " \"vout\": n, (numeric) The vout value\n"
- " \"scriptPubKey\": \"script\", (string) The script key\n"
- " \"desc\": \"descriptor\", (string) A specialized descriptor for the matched scriptPubKey\n"
- " \"amount\": x.xxx, (numeric) The total amount in " + CURRENCY_UNIT + " of the unspent output\n"
- " \"height\": n, (numeric) Height of the unspent transaction output\n"
- " }\n"
- " ,...],\n"
- " \"total_amount\": x.xxx, (numeric) The total amount of all found unspent outputs in " + CURRENCY_UNIT + "\n"
- "]\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::BOOL, "success", "Whether the scan was completed"},
+ {RPCResult::Type::NUM, "txouts", "The number of unspent transaction outputs scanned"},
+ {RPCResult::Type::NUM, "height", "The current block height (index)"},
+ {RPCResult::Type::STR_HEX, "bestblock", "The hash of the block at the tip of the chain"},
+ {RPCResult::Type::ARR, "unspents", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "txid", "The transaction id"},
+ {RPCResult::Type::NUM, "vout", "The vout value"},
+ {RPCResult::Type::STR_HEX, "scriptPubKey", "The script key"},
+ {RPCResult::Type::STR, "desc", "A specialized descriptor for the matched scriptPubKey"},
+ {RPCResult::Type::STR_AMOUNT, "amount", "The total amount in " + CURRENCY_UNIT + " of the unspent output"},
+ {RPCResult::Type::NUM, "height", "Height of the unspent transaction output"},
+ }},
+ }},
+ {RPCResult::Type::STR_AMOUNT, "total_amount", "The total amount of all found unspent outputs in " + CURRENCY_UNIT},
+ }},
RPCExamples{""},
}.Check(request);
@@ -2198,13 +2172,14 @@ static UniValue getblockfilter(const JSONRPCRequest& request)
{"filtertype", RPCArg::Type::STR, /*default*/ "basic", "The type name of the filter"},
},
RPCResult{
- "{\n"
- " \"filter\" : (string) the hex-encoded filter data\n"
- " \"header\" : (string) the hex-encoded filter header\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "filter", "the hex-encoded filter data"},
+ {RPCResult::Type::STR_HEX, "header", "the hex-encoded filter header"},
+ }},
RPCExamples{
- HelpExampleCli("getblockfilter", "\"00000000c937983704a73af28acdec37b049d214adbda81d7e2a3dd146f6ed09\" \"basic\"")
+ HelpExampleCli("getblockfilter", "\"00000000c937983704a73af28acdec37b049d214adbda81d7e2a3dd146f6ed09\" \"basic\"") +
+ HelpExampleRpc("getblockfilter", "\"00000000c937983704a73af28acdec37b049d214adbda81d7e2a3dd146f6ed09\", \"basic\"")
}
}.Check(request);
@@ -2283,12 +2258,13 @@ UniValue dumptxoutset(const JSONRPCRequest& request)
"path to the output file. If relative, will be prefixed by datadir."},
},
RPCResult{
- "{\n"
- " \"coins_written\": n, (numeric) the number of coins written in the snapshot\n"
- " \"base_hash\": \"...\", (string) the hash of the base of the snapshot\n"
- " \"base_height\": n, (string) the height of the base of the snapshot\n"
- " \"path\": \"...\" (string) the absolute path that the snapshot was written to\n"
- "]\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM, "coins_written", "the number of coins written in the snapshot"},
+ {RPCResult::Type::STR_HEX, "base_hash", "the hash of the base of the snapshot"},
+ {RPCResult::Type::NUM, "base_height", "the height of the base of the snapshot"},
+ {RPCResult::Type::STR, "path", "the absolute path that the snapshot was written to"},
+ }
},
RPCExamples{
HelpExampleCli("dumptxoutset", "utxo.dat")
diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp
index 2eaa3427eb..c1762483e9 100644
--- a/src/rpc/client.cpp
+++ b/src/rpc/client.cpp
@@ -27,6 +27,7 @@ public:
static const CRPCConvertParam vRPCConvertParams[] =
{
{ "setmocktime", 0, "timestamp" },
+ { "mockscheduler", 0, "delta_time" },
{ "utxoupdatepsbt", 1, "descriptors" },
{ "generatetoaddress", 0, "nblocks" },
{ "generatetoaddress", 2, "maxtries" },
diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp
index 69885546c8..1bbb5c4bee 100644
--- a/src/rpc/mining.cpp
+++ b/src/rpc/mining.cpp
@@ -28,7 +28,6 @@
#include <util/fees.h>
#include <util/strencodings.h>
#include <util/system.h>
-#include <util/validation.h>
#include <validation.h>
#include <validationinterface.h>
#include <versionbitsinfo.h>
@@ -90,8 +89,7 @@ static UniValue getnetworkhashps(const JSONRPCRequest& request)
{"height", RPCArg::Type::NUM, /* default */ "-1", "To estimate at the time of the given height."},
},
RPCResult{
- "x (numeric) Hashes per second estimated\n"
- },
+ RPCResult::Type::NUM, "", "Hashes per second estimated"},
RPCExamples{
HelpExampleCli("getnetworkhashps", "")
+ HelpExampleRpc("getnetworkhashps", "")
@@ -154,7 +152,11 @@ static UniValue generatetodescriptor(const JSONRPCRequest& request)
{"maxtries", RPCArg::Type::NUM, /* default */ "1000000", "How many iterations to try."},
},
RPCResult{
- "[ blockhashes ] (array) hashes of blocks generated\n"},
+ RPCResult::Type::ARR, "", "hashes of blocks generated",
+ {
+ {RPCResult::Type::STR_HEX, "", "blockhash"},
+ }
+ },
RPCExamples{
"\nGenerate 11 blocks to mydesc\n" + HelpExampleCli("generatetodescriptor", "11 \"mydesc\"")},
}
@@ -196,8 +198,10 @@ static UniValue generatetoaddress(const JSONRPCRequest& request)
{"maxtries", RPCArg::Type::NUM, /* default */ "1000000", "How many iterations to try."},
},
RPCResult{
- "[ blockhashes ] (array) hashes of blocks generated\n"
- },
+ RPCResult::Type::ARR, "", "hashes of blocks generated",
+ {
+ {RPCResult::Type::STR_HEX, "", "blockhash"},
+ }},
RPCExamples{
"\nGenerate 11 blocks to myaddress\n"
+ HelpExampleCli("generatetoaddress", "11 \"myaddress\"")
@@ -230,17 +234,17 @@ static UniValue getmininginfo(const JSONRPCRequest& request)
"\nReturns a json object containing mining-related information.",
{},
RPCResult{
- "{\n"
- " \"blocks\": nnn, (numeric) The current block\n"
- " \"currentblockweight\": nnn, (numeric, optional) The block weight of the last assembled block (only present if a block was ever assembled)\n"
- " \"currentblocktx\": nnn, (numeric, optional) The number of block transactions of the last assembled block (only present if a block was ever assembled)\n"
- " \"difficulty\": xxx.xxxxx (numeric) The current difficulty\n"
- " \"networkhashps\": nnn, (numeric) The network hashes per second\n"
- " \"pooledtx\": n (numeric) The size of the mempool\n"
- " \"chain\": \"xxxx\", (string) current network name (main, test, regtest)\n"
- " \"warnings\": \"...\" (string) any network and blockchain warnings\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM, "blocks", "The current block"},
+ {RPCResult::Type::NUM, "currentblockweight", /* optional */ true, "The block weight of the last assembled block (only present if a block was ever assembled)"},
+ {RPCResult::Type::NUM, "currentblocktx", /* optional */ true, "The number of block transactions of the last assembled block (only present if a block was ever assembled)"},
+ {RPCResult::Type::NUM, "difficulty", "The current difficulty"},
+ {RPCResult::Type::NUM, "networkhashps", "The network hashes per second"},
+ {RPCResult::Type::NUM, "pooledtx", "The size of the mempool"},
+ {RPCResult::Type::STR, "chain", "current network name (main, test, regtest)"},
+ {RPCResult::Type::STR, "warnings", "any network and blockchain warnings"},
+ }},
RPCExamples{
HelpExampleCli("getmininginfo", "")
+ HelpExampleRpc("getmininginfo", "")
@@ -278,8 +282,7 @@ static UniValue prioritisetransaction(const JSONRPCRequest& request)
" considers the transaction as it would have paid a higher (or lower) fee."},
},
RPCResult{
- "true (boolean) Returns true\n"
- },
+ RPCResult::Type::BOOL, "", "Returns true"},
RPCExamples{
HelpExampleCli("prioritisetransaction", "\"txid\" 0.0 10000")
+ HelpExampleRpc("prioritisetransaction", "\"txid\", 0.0, 10000")
@@ -307,7 +310,7 @@ static UniValue BIP22ValidationResult(const BlockValidationState& state)
return NullUniValue;
if (state.IsError())
- throw JSONRPCError(RPC_VERIFY_ERROR, FormatStateMessage(state));
+ throw JSONRPCError(RPC_VERIFY_ERROR, state.ToString());
if (state.IsInvalid())
{
std::string strRejectReason = state.GetRejectReason();
@@ -339,7 +342,7 @@ static UniValue getblocktemplate(const JSONRPCRequest& request)
" https://github.com/bitcoin/bips/blob/master/bip-0009.mediawiki#getblocktemplate_changes\n"
" https://github.com/bitcoin/bips/blob/master/bip-0145.mediawiki\n",
{
- {"template_request", RPCArg::Type::OBJ, "{}", "A json object in the following spec",
+ {"template_request", RPCArg::Type::OBJ, "{}", "Format of the template",
{
{"mode", RPCArg::Type::STR, /* treat as named arg */ RPCArg::Optional::OMITTED_NAMED_ARG, "This must be set to \"template\", \"proposal\" (see BIP 23), or omitted"},
{"capabilities", RPCArg::Type::ARR, /* treat as named arg */ RPCArg::Optional::OMITTED_NAMED_ARG, "A list of strings",
@@ -356,48 +359,58 @@ static UniValue getblocktemplate(const JSONRPCRequest& request)
"\"template_request\""},
},
RPCResult{
- "{\n"
- " \"version\" : n, (numeric) The preferred block version\n"
- " \"rules\" : [ \"rulename\", ... ], (array of strings) specific block rules that are to be enforced\n"
- " \"vbavailable\" : { (json object) set of pending, supported versionbit (BIP 9) softfork deployments\n"
- " \"rulename\" : bitnumber (numeric) identifies the bit number as indicating acceptance and readiness for the named softfork rule\n"
- " ,...\n"
- " },\n"
- " \"vbrequired\" : n, (numeric) bit mask of versionbits the server requires set in submissions\n"
- " \"previousblockhash\" : \"xxxx\", (string) The hash of current highest block\n"
- " \"transactions\" : [ (array) contents of non-coinbase transactions that should be included in the next block\n"
- " {\n"
- " \"data\" : \"xxxx\", (string) transaction data encoded in hexadecimal (byte-for-byte)\n"
- " \"txid\" : \"xxxx\", (string) transaction id encoded in little-endian hexadecimal\n"
- " \"hash\" : \"xxxx\", (string) hash encoded in little-endian hexadecimal (including witness data)\n"
- " \"depends\" : [ (array) array of numbers \n"
- " n (numeric) transactions before this one (by 1-based index in 'transactions' list) that must be present in the final block if this one is\n"
- " ,...\n"
- " ],\n"
- " \"fee\": n, (numeric) difference in value between transaction inputs and outputs (in satoshis); for coinbase transactions, this is a negative Number of the total collected block fees (ie, not including the block subsidy); if key is not present, fee is unknown and clients MUST NOT assume there isn't one\n"
- " \"sigops\" : n, (numeric) total SigOps cost, as counted for purposes of block limits; if key is not present, sigop cost is unknown and clients MUST NOT assume it is zero\n"
- " \"weight\" : n, (numeric) total transaction weight, as counted for purposes of block limits\n"
- " }\n"
- " ,...\n"
- " ],\n"
- " \"coinbaseaux\" : { ... }, (json object) data that should be included in the coinbase's scriptSig content\n"
- " \"coinbasevalue\" : n, (numeric) maximum allowable input to coinbase transaction, including the generation award and transaction fees (in satoshis)\n"
- " \"coinbasetxn\" : { ... }, (json object) information for coinbase transaction\n"
- " \"target\" : \"xxxx\", (string) The hash target\n"
- " \"mintime\" : xxx, (numeric) The minimum timestamp appropriate for the next block time, expressed in " + UNIX_EPOCH_TIME + "\n"
- " \"mutable\" : [ (array of string) list of ways the block template may be changed \n"
- " \"value\" (string) A way the block template may be changed, e.g. 'time', 'transactions', 'prevblock'\n"
- " ,...\n"
- " ],\n"
- " \"noncerange\" : \"00000000ffffffff\",(string) A range of valid nonces\n"
- " \"sigoplimit\" : n, (numeric) limit of sigops in blocks\n"
- " \"sizelimit\" : n, (numeric) limit of block size\n"
- " \"weightlimit\" : n, (numeric) limit of block weight\n"
- " \"curtime\" : ttt, (numeric) current timestamp in " + UNIX_EPOCH_TIME + "\n"
- " \"bits\" : \"xxxxxxxx\", (string) compressed target of next block\n"
- " \"height\" : n (numeric) The height of the next block\n"
- "}\n"
- },
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM, "version", "The preferred block version"},
+ {RPCResult::Type::ARR, "rules", "specific block rules that are to be enforced",
+ {
+ {RPCResult::Type::STR, "", "rulename"},
+ }},
+ {RPCResult::Type::OBJ_DYN, "vbavailable", "set of pending, supported versionbit (BIP 9) softfork deployments",
+ {
+ {RPCResult::Type::NUM, "rulename", "identifies the bit number as indicating acceptance and readiness for the named softfork rule"},
+ }},
+ {RPCResult::Type::NUM, "vbrequired", "bit mask of versionbits the server requires set in submissions"},
+ {RPCResult::Type::STR, "previousblockhash", "The hash of current highest block"},
+ {RPCResult::Type::ARR, "", "contents of non-coinbase transactions that should be included in the next block",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "data", "transaction data encoded in hexadecimal (byte-for-byte)"},
+ {RPCResult::Type::STR_HEX, "txid", "transaction id encoded in little-endian hexadecimal"},
+ {RPCResult::Type::STR_HEX, "hash", "hash encoded in little-endian hexadecimal (including witness data)"},
+ {RPCResult::Type::ARR, "depends", "array of numbers",
+ {
+ {RPCResult::Type::NUM, "", "transactions before this one (by 1-based index in 'transactions' list) that must be present in the final block if this one is"},
+ }},
+ {RPCResult::Type::NUM, "fee", "difference in value between transaction inputs and outputs (in satoshis); for coinbase transactions, this is a negative Number of the total collected block fees (ie, not including the block subsidy); if key is not present, fee is unknown and clients MUST NOT assume there isn't one"},
+ {RPCResult::Type::NUM, "sigops", "total SigOps cost, as counted for purposes of block limits; if key is not present, sigop cost is unknown and clients MUST NOT assume it is zero"},
+ {RPCResult::Type::NUM, "weight", "total transaction weight, as counted for purposes of block limits"},
+ }},
+ }},
+ {RPCResult::Type::OBJ, "coinbaseaux", "data that should be included in the coinbase's scriptSig content",
+ {
+ {RPCResult::Type::ELISION, "", ""},
+ }},
+ {RPCResult::Type::NUM, "coinbasevalue", "maximum allowable input to coinbase transaction, including the generation award and transaction fees (in satoshis)"},
+ {RPCResult::Type::OBJ, "coinbasetxn", "information for coinbase transaction",
+ {
+ {RPCResult::Type::ELISION, "", ""},
+ }},
+ {RPCResult::Type::STR, "target", "The hash target"},
+ {RPCResult::Type::NUM_TIME, "mintime", "The minimum timestamp appropriate for the next block time, expressed in " + UNIX_EPOCH_TIME},
+ {RPCResult::Type::ARR, "mutable", "list of ways the block template may be changed",
+ {
+ {RPCResult::Type::STR, "value", "A way the block template may be changed, e.g. 'time', 'transactions', 'prevblock'"},
+ }},
+ {RPCResult::Type::STR_HEX, "noncerange", "A range of valid nonces"},
+ {RPCResult::Type::NUM, "sigoplimit", "limit of sigops in blocks"},
+ {RPCResult::Type::NUM, "sizelimit", "limit of block size"},
+ {RPCResult::Type::NUM, "weightlimit", "limit of block weight"},
+ {RPCResult::Type::NUM_TIME, "curtime", "current timestamp in " + UNIX_EPOCH_TIME},
+ {RPCResult::Type::STR, "bits", "compressed target of next block"},
+ {RPCResult::Type::NUM, "height", "The height of the next block"},
+ }},
RPCExamples{
HelpExampleCli("getblocktemplate", "'{\"rules\": [\"segwit\"]}'")
+ HelpExampleRpc("getblocktemplate", "{\"rules\": [\"segwit\"]}")
@@ -738,7 +751,7 @@ static UniValue submitblock(const JSONRPCRequest& request)
{"hexdata", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "the hex-encoded block data to submit"},
{"dummy", RPCArg::Type::STR, /* default */ "ignored", "dummy value, for compatibility with BIP22. This value is ignored."},
},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", "Returns JSON Null when valid, a string according to BIP22 otherwise"},
RPCExamples{
HelpExampleCli("submitblock", "\"mydata\"")
+ HelpExampleRpc("submitblock", "\"mydata\"")
@@ -800,8 +813,7 @@ static UniValue submitheader(const JSONRPCRequest& request)
{"hexdata", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "the hex-encoded block header data"},
},
RPCResult{
- "None"
- },
+ RPCResult::Type::NONE, "", "None"},
RPCExamples{
HelpExampleCli("submitheader", "\"aabbcc\"") +
HelpExampleRpc("submitheader", "\"aabbcc\"")
@@ -823,7 +835,7 @@ static UniValue submitheader(const JSONRPCRequest& request)
ProcessNewBlockHeaders({h}, state, Params());
if (state.IsValid()) return NullUniValue;
if (state.IsError()) {
- throw JSONRPCError(RPC_VERIFY_ERROR, FormatStateMessage(state));
+ throw JSONRPCError(RPC_VERIFY_ERROR, state.ToString());
}
throw JSONRPCError(RPC_VERIFY_ERROR, state.GetRejectReason());
}
@@ -848,17 +860,19 @@ static UniValue estimatesmartfee(const JSONRPCRequest& request)
" \"CONSERVATIVE\""},
},
RPCResult{
- "{\n"
- " \"feerate\" : x.x, (numeric, optional) estimate fee rate in " + CURRENCY_UNIT + "/kB\n"
- " \"errors\": [ str... ] (json array of strings, optional) Errors encountered during processing\n"
- " \"blocks\" : n (numeric) block number where estimate was found\n"
- "}\n"
- "\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM, "feerate", /* optional */ true, "estimate fee rate in " + CURRENCY_UNIT + "/kB (only present if no errors were encountered)"},
+ {RPCResult::Type::ARR, "errors", "Errors encountered during processing",
+ {
+ {RPCResult::Type::STR, "", "error"},
+ }},
+ {RPCResult::Type::NUM, "blocks", "block number where estimate was found\n"
"The request target will be clamped between 2 and the highest target\n"
"fee estimation is able to return based on how long it has been running.\n"
"An error is returned if not enough transactions and blocks\n"
- "have been observed to make an estimate for any number of blocks.\n"
- },
+ "have been observed to make an estimate for any number of blocks."},
+ }},
RPCExamples{
HelpExampleCli("estimatesmartfee", "6")
},
@@ -908,36 +922,40 @@ static UniValue estimaterawfee(const JSONRPCRequest& request)
" lower buckets."},
},
RPCResult{
- "{ (json object) Results are returned for any horizon which tracks blocks up to the confirmation target\n"
- " \"short\" : { (json object, optional) estimate for short time horizon\n"
- " \"feerate\" : x.x, (numeric, optional) estimate fee rate in " + CURRENCY_UNIT + "/kB\n"
- " \"decay\" : x.x, (numeric) exponential decay (per block) for historical moving average of confirmation data\n"
- " \"scale\" : x, (numeric) The resolution of confirmation targets at this time horizon\n"
- " \"pass\" : { (json object, optional) information about the lowest range of feerates to succeed in meeting the threshold\n"
- " \"startrange\" : x.x, (numeric) start of feerate range\n"
- " \"endrange\" : x.x, (numeric) end of feerate range\n"
- " \"withintarget\" : x.x, (numeric) number of txs over history horizon in the feerate range that were confirmed within target\n"
- " \"totalconfirmed\" : x.x, (numeric) number of txs over history horizon in the feerate range that were confirmed at any point\n"
- " \"inmempool\" : x.x, (numeric) current number of txs in mempool in the feerate range unconfirmed for at least target blocks\n"
- " \"leftmempool\" : x.x, (numeric) number of txs over history horizon in the feerate range that left mempool unconfirmed after target\n"
- " },\n"
- " \"fail\" : { (json object, optional) information about the highest range of feerates to fail to meet the threshold\n"
- " ...\n"
- " },\n"
- " \"errors\": [ (json array, optional) Errors encountered during processing\n"
- " \"str\", (string)\n"
- " ...\n"
- " ],\n"
- " },\n"
- " \"medium\" : { (json object, optional) estimate for medium time horizon\n"
- " ...\n"
- " },\n"
- " \"long\" : { (json object, optional) estimate for long time horizon\n"
- " ...\n"
- " },\n"
- "}\n"
- "\n"
- },
+ RPCResult::Type::OBJ, "", "Results are returned for any horizon which tracks blocks up to the confirmation target",
+ {
+ {RPCResult::Type::OBJ, "short", /* optional */ true, "estimate for short time horizon",
+ {
+ {RPCResult::Type::NUM, "feerate", /* optional */ true, "estimate fee rate in " + CURRENCY_UNIT + "/kB"},
+ {RPCResult::Type::NUM, "decay", "exponential decay (per block) for historical moving average of confirmation data"},
+ {RPCResult::Type::NUM, "scale", "The resolution of confirmation targets at this time horizon"},
+ {RPCResult::Type::OBJ, "pass", /* optional */ true, "information about the lowest range of feerates to succeed in meeting the threshold",
+ {
+ {RPCResult::Type::NUM, "startrange", "start of feerate range"},
+ {RPCResult::Type::NUM, "endrange", "end of feerate range"},
+ {RPCResult::Type::NUM, "withintarget", "number of txs over history horizon in the feerate range that were confirmed within target"},
+ {RPCResult::Type::NUM, "totalconfirmed", "number of txs over history horizon in the feerate range that were confirmed at any point"},
+ {RPCResult::Type::NUM, "inmempool", "current number of txs in mempool in the feerate range unconfirmed for at least target blocks"},
+ {RPCResult::Type::NUM, "leftmempool", "number of txs over history horizon in the feerate range that left mempool unconfirmed after target"},
+ }},
+ {RPCResult::Type::OBJ, "fail", /* optional */ true, "information about the highest range of feerates to fail to meet the threshold",
+ {
+ {RPCResult::Type::ELISION, "", ""},
+ }},
+ {RPCResult::Type::ARR, "errors", /* optional */ true, "Errors encountered during processing",
+ {
+ {RPCResult::Type::STR, "error", ""},
+ }},
+ }},
+ {RPCResult::Type::OBJ, "medium", /* optional */ true, "estimate for medium time horizon",
+ {
+ {RPCResult::Type::ELISION, "", ""},
+ }},
+ {RPCResult::Type::OBJ, "long", /* optional */ true, "estimate for long time horizon",
+ {
+ {RPCResult::Type::ELISION, "", ""},
+ }},
+ }},
RPCExamples{
HelpExampleCli("estimaterawfee", "6 0.9")
},
diff --git a/src/rpc/misc.cpp b/src/rpc/misc.cpp
index 56bd33b0ec..c87c1a5418 100644
--- a/src/rpc/misc.cpp
+++ b/src/rpc/misc.cpp
@@ -5,15 +5,17 @@
#include <httpserver.h>
#include <key_io.h>
+#include <node/context.h>
#include <outputtype.h>
#include <rpc/blockchain.h>
#include <rpc/server.h>
#include <rpc/util.h>
+#include <scheduler.h>
#include <script/descriptor.h>
#include <util/check.h>
+#include <util/message.h> // For MessageSign(), MessageVerify()
#include <util/strencodings.h>
#include <util/system.h>
-#include <util/validation.h>
#include <stdint.h>
#include <tuple>
@@ -31,19 +33,20 @@ static UniValue validateaddress(const JSONRPCRequest& request)
{"address", RPCArg::Type::STR, RPCArg::Optional::NO, "The bitcoin address to validate"},
},
RPCResult{
- "{\n"
- " \"isvalid\" : true|false, (boolean) If the address is valid or not. If not, this is the only property returned.\n"
- " \"address\" : \"address\", (string) The bitcoin address validated\n"
- " \"scriptPubKey\" : \"hex\", (string) The hex-encoded scriptPubKey generated by the address\n"
- " \"isscript\" : true|false, (boolean) If the key is a script\n"
- " \"iswitness\" : true|false, (boolean) If the address is a witness address\n"
- " \"witness_version\" : version (numeric, optional) The version number of the witness program\n"
- " \"witness_program\" : \"hex\" (string, optional) The hex value of the witness program\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::BOOL, "isvalid", "If the address is valid or not. If not, this is the only property returned."},
+ {RPCResult::Type::STR, "address", "The bitcoin address validated"},
+ {RPCResult::Type::STR_HEX, "scriptPubKey", "The hex-encoded scriptPubKey generated by the address"},
+ {RPCResult::Type::BOOL, "isscript", "If the key is a script"},
+ {RPCResult::Type::BOOL, "iswitness", "If the address is a witness address"},
+ {RPCResult::Type::NUM, "witness_version", /* optional */ true, "The version number of the witness program"},
+ {RPCResult::Type::STR_HEX, "witness_program", /* optional */ true, "The hex value of the witness program"},
+ }
},
RPCExamples{
- HelpExampleCli("validateaddress", "\"1PSSGeFHDnKNxiEyFrD1wcEaHr9hrQDDWc\"")
- + HelpExampleRpc("validateaddress", "\"1PSSGeFHDnKNxiEyFrD1wcEaHr9hrQDDWc\"")
+ HelpExampleCli("validateaddress", "\"" + EXAMPLE_ADDRESS[0] + "\"") +
+ HelpExampleRpc("validateaddress", "\"" + EXAMPLE_ADDRESS[0] + "\"")
},
}.Check(request);
@@ -73,17 +76,19 @@ static UniValue createmultisig(const JSONRPCRequest& request)
"It returns a json object with the address and redeemScript.\n",
{
{"nrequired", RPCArg::Type::NUM, RPCArg::Optional::NO, "The number of required signatures out of the n keys."},
- {"keys", RPCArg::Type::ARR, RPCArg::Optional::NO, "A json array of hex-encoded public keys.",
+ {"keys", RPCArg::Type::ARR, RPCArg::Optional::NO, "The hex-encoded public keys.",
{
{"key", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, "The hex-encoded public key"},
}},
{"address_type", RPCArg::Type::STR, /* default */ "legacy", "The address type to use. Options are \"legacy\", \"p2sh-segwit\", and \"bech32\"."},
},
RPCResult{
- "{\n"
- " \"address\":\"multisigaddress\", (string) The value of the new multisig address.\n"
- " \"redeemScript\":\"script\" (string) The string value of the hex-encoded redemption script.\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "address", "The value of the new multisig address."},
+ {RPCResult::Type::STR_HEX, "redeemScript", "The string value of the hex-encoded redemption script."},
+ {RPCResult::Type::STR, "descriptor", "The descriptor for this multisig"},
+ }
},
RPCExamples{
"\nCreate a multisig address from 2 public keys\n"
@@ -119,9 +124,13 @@ static UniValue createmultisig(const JSONRPCRequest& request)
CScript inner;
const CTxDestination dest = AddAndGetMultisigDestination(required, pubkeys, output_type, keystore, inner);
+ // Make the descriptor
+ std::unique_ptr<Descriptor> descriptor = InferDescriptor(GetScriptForDestination(dest), keystore);
+
UniValue result(UniValue::VOBJ);
result.pushKV("address", EncodeDestination(dest));
result.pushKV("redeemScript", HexStr(inner.begin(), inner.end()));
+ result.pushKV("descriptor", descriptor->ToString());
return result;
}
@@ -134,13 +143,14 @@ UniValue getdescriptorinfo(const JSONRPCRequest& request)
{"descriptor", RPCArg::Type::STR, RPCArg::Optional::NO, "The descriptor."},
},
RPCResult{
- "{\n"
- " \"descriptor\" : \"desc\", (string) The descriptor in canonical form, without private keys\n"
- " \"checksum\" : \"chksum\", (string) The checksum for the input descriptor\n"
- " \"isrange\" : true|false, (boolean) Whether the descriptor is ranged\n"
- " \"issolvable\" : true|false, (boolean) Whether the descriptor is solvable\n"
- " \"hasprivatekeys\" : true|false, (boolean) Whether the input descriptor contained at least one private key\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "descriptor", "The descriptor in canonical form, without private keys"},
+ {RPCResult::Type::STR, "checksum", "The checksum for the input descriptor"},
+ {RPCResult::Type::BOOL, "isrange", "Whether the descriptor is ranged"},
+ {RPCResult::Type::BOOL, "issolvable", "Whether the descriptor is solvable"},
+ {RPCResult::Type::BOOL, "hasprivatekeys", "Whether the input descriptor contained at least one private key"},
+ }
},
RPCExamples{
"Analyse a descriptor\n" +
@@ -182,7 +192,10 @@ UniValue deriveaddresses(const JSONRPCRequest& request)
{"range", RPCArg::Type::RANGE, RPCArg::Optional::OMITTED_NAMED_ARG, "If a ranged descriptor is used, this specifies the end or the range (in [begin,end] notation) to derive."},
},
RPCResult{
- "[ address ] (array) the derived addresses\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::STR, "address", "the derived addresses"},
+ }
},
RPCExamples{
"First three native segwit receive addresses\n" +
@@ -251,7 +264,7 @@ static UniValue verifymessage(const JSONRPCRequest& request)
{"message", RPCArg::Type::STR, RPCArg::Optional::NO, "The message that was signed."},
},
RPCResult{
- "true|false (boolean) If the signature is verified or not.\n"
+ RPCResult::Type::BOOL, "", "If the signature is verified or not."
},
RPCExamples{
"\nUnlock the wallet for 30 seconds\n"
@@ -271,31 +284,21 @@ static UniValue verifymessage(const JSONRPCRequest& request)
std::string strSign = request.params[1].get_str();
std::string strMessage = request.params[2].get_str();
- CTxDestination destination = DecodeDestination(strAddress);
- if (!IsValidDestination(destination)) {
+ switch (MessageVerify(strAddress, strSign, strMessage)) {
+ case MessageVerificationResult::ERR_INVALID_ADDRESS:
throw JSONRPCError(RPC_TYPE_ERROR, "Invalid address");
- }
-
- const PKHash *pkhash = boost::get<PKHash>(&destination);
- if (!pkhash) {
+ case MessageVerificationResult::ERR_ADDRESS_NO_KEY:
throw JSONRPCError(RPC_TYPE_ERROR, "Address does not refer to key");
- }
-
- bool fInvalid = false;
- std::vector<unsigned char> vchSig = DecodeBase64(strSign.c_str(), &fInvalid);
-
- if (fInvalid)
+ case MessageVerificationResult::ERR_MALFORMED_SIGNATURE:
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Malformed base64 encoding");
-
- CHashWriter ss(SER_GETHASH, 0);
- ss << strMessageMagic;
- ss << strMessage;
-
- CPubKey pubkey;
- if (!pubkey.RecoverCompact(ss.GetHash(), vchSig))
+ case MessageVerificationResult::ERR_PUBKEY_NOT_RECOVERED:
+ case MessageVerificationResult::ERR_NOT_SIGNED:
return false;
+ case MessageVerificationResult::OK:
+ return true;
+ }
- return (pubkey.GetID() == *pkhash);
+ return false;
}
static UniValue signmessagewithprivkey(const JSONRPCRequest& request)
@@ -307,7 +310,7 @@ static UniValue signmessagewithprivkey(const JSONRPCRequest& request)
{"message", RPCArg::Type::STR, RPCArg::Optional::NO, "The message to create a signature of."},
},
RPCResult{
- "\"signature\" (string) The signature of the message encoded in base 64\n"
+ RPCResult::Type::STR, "signature", "The signature of the message encoded in base 64"
},
RPCExamples{
"\nCreate the signature\n"
@@ -327,15 +330,13 @@ static UniValue signmessagewithprivkey(const JSONRPCRequest& request)
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid private key");
}
- CHashWriter ss(SER_GETHASH, 0);
- ss << strMessageMagic;
- ss << strMessage;
+ std::string signature;
- std::vector<unsigned char> vchSig;
- if (!key.SignCompact(ss.GetHash(), vchSig))
+ if (!MessageSign(key, strMessage, signature)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Sign failed");
+ }
- return EncodeBase64(vchSig.data(), vchSig.size());
+ return signature;
}
static UniValue setmocktime(const JSONRPCRequest& request)
@@ -346,12 +347,13 @@ static UniValue setmocktime(const JSONRPCRequest& request)
{"timestamp", RPCArg::Type::NUM, RPCArg::Optional::NO, UNIX_EPOCH_TIME + "\n"
" Pass 0 to go back to using the system time."},
},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{""},
}.Check(request);
- if (!Params().MineBlocksOnDemand())
- throw std::runtime_error("setmocktime for regression testing (-regtest mode) only");
+ if (!Params().IsMockableChain()) {
+ throw std::runtime_error("setmocktime is for regression testing (-regtest mode) only");
+ }
// For now, don't change mocktime if we're in the middle of validation, as
// this could have an effect on mempool time-based eviction, as well as
@@ -366,6 +368,36 @@ static UniValue setmocktime(const JSONRPCRequest& request)
return NullUniValue;
}
+static UniValue mockscheduler(const JSONRPCRequest& request)
+{
+ RPCHelpMan{"mockscheduler",
+ "\nBump the scheduler into the future (-regtest only)\n",
+ {
+ {"delta_time", RPCArg::Type::NUM, RPCArg::Optional::NO, "Number of seconds to forward the scheduler into the future." },
+ },
+ RPCResult{RPCResult::Type::NONE, "", ""},
+ RPCExamples{""},
+ }.Check(request);
+
+ if (!Params().IsMockableChain()) {
+ throw std::runtime_error("mockscheduler is for regression testing (-regtest mode) only");
+ }
+
+ // check params are valid values
+ RPCTypeCheck(request.params, {UniValue::VNUM});
+ int64_t delta_seconds = request.params[0].get_int64();
+ if ((delta_seconds <= 0) || (delta_seconds > 3600)) {
+ throw std::runtime_error("delta_time must be between 1 and 3600 seconds (1 hr)");
+ }
+
+ // protect against null pointer dereference
+ CHECK_NONFATAL(g_rpc_node);
+ CHECK_NONFATAL(g_rpc_node->scheduler);
+ g_rpc_node->scheduler->MockForward(std::chrono::seconds(delta_seconds));
+
+ return NullUniValue;
+}
+
static UniValue RPCLockedMemoryInfo()
{
LockedPool::Stats stats = LockedPoolManager::Instance().stats();
@@ -412,19 +444,21 @@ static UniValue getmemoryinfo(const JSONRPCRequest& request)
},
{
RPCResult{"mode \"stats\"",
- "{\n"
- " \"locked\": { (json object) Information about locked memory manager\n"
- " \"used\": xxxxx, (numeric) Number of bytes used\n"
- " \"free\": xxxxx, (numeric) Number of bytes available in current arenas\n"
- " \"total\": xxxxxxx, (numeric) Total number of bytes managed\n"
- " \"locked\": xxxxxx, (numeric) Amount of bytes that succeeded locking. If this number is smaller than total, locking pages failed at some point and key data could be swapped to disk.\n"
- " \"chunks_used\": xxxxx, (numeric) Number allocated chunks\n"
- " \"chunks_free\": xxxxx, (numeric) Number unused chunks\n"
- " }\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::OBJ, "locked", "Information about locked memory manager",
+ {
+ {RPCResult::Type::NUM, "used", "Number of bytes used"},
+ {RPCResult::Type::NUM, "free", "Number of bytes available in current arenas"},
+ {RPCResult::Type::NUM, "total", "Total number of bytes managed"},
+ {RPCResult::Type::NUM, "locked", "Amount of bytes that succeeded locking. If this number is smaller than total, locking pages failed at some point and key data could be swapped to disk."},
+ {RPCResult::Type::NUM, "chunks_used", "Number allocated chunks"},
+ {RPCResult::Type::NUM, "chunks_free", "Number unused chunks"},
+ }},
+ }
},
RPCResult{"mode \"mallocinfo\"",
- "\"<malloc version=\"1\">...\"\n"
+ RPCResult::Type::STR, "", "\"<malloc version=\"1\">...\""
},
},
RPCExamples{
@@ -481,20 +515,20 @@ UniValue logging(const JSONRPCRequest& request)
" - \"none\", \"0\" : even if other logging categories are specified, ignore all of them.\n"
,
{
- {"include", RPCArg::Type::ARR, RPCArg::Optional::OMITTED_NAMED_ARG, "A json array of categories to add debug logging",
+ {"include", RPCArg::Type::ARR, RPCArg::Optional::OMITTED_NAMED_ARG, "The categories to add to debug logging",
{
{"include_category", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "the valid logging category"},
}},
- {"exclude", RPCArg::Type::ARR, RPCArg::Optional::OMITTED_NAMED_ARG, "A json array of categories to remove debug logging",
+ {"exclude", RPCArg::Type::ARR, RPCArg::Optional::OMITTED_NAMED_ARG, "The categories to remove from debug logging",
{
{"exclude_category", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "the valid logging category"},
}},
},
RPCResult{
- "{ (json object where keys are the logging categories, and values indicates its status\n"
- " \"category\": true|false, (bool) if being debug logged or not. false:inactive, true:active\n"
- " ...\n"
- "}\n"
+ RPCResult::Type::OBJ_DYN, "", "keys are the logging categories, and values indicates its status",
+ {
+ {RPCResult::Type::BOOL, "category", "if being debug logged or not. false:inactive, true:active"},
+ }
},
RPCExamples{
HelpExampleCli("logging", "\"[\\\"all\\\"]\" \"[\\\"http\\\"]\"")
@@ -545,7 +579,7 @@ static UniValue echo(const JSONRPCRequest& request)
"\nThe difference between echo and echojson is that echojson has argument conversion enabled in the client-side table in "
"bitcoin-cli and the GUI. There is no server-side difference.",
{},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", "Returns whatever was passed in"},
RPCExamples{""},
}.ToString()
);
@@ -570,6 +604,7 @@ static const CRPCCommand commands[] =
/* Not shown in help */
{ "hidden", "setmocktime", &setmocktime, {"timestamp"}},
+ { "hidden", "mockscheduler", &mockscheduler, {"delta_time"}},
{ "hidden", "echo", &echo, {"arg0","arg1","arg2","arg3","arg4","arg5","arg6","arg7","arg8","arg9"}},
{ "hidden", "echojson", &echo, {"arg0","arg1","arg2","arg3","arg4","arg5","arg6","arg7","arg8","arg9"}},
};
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index 42aec08b45..caa62ca958 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -20,6 +20,7 @@
#include <sync.h>
#include <timedata.h>
#include <util/strencodings.h>
+#include <util/string.h>
#include <util/system.h>
#include <validation.h>
#include <version.h>
@@ -33,7 +34,7 @@ static UniValue getconnectioncount(const JSONRPCRequest& request)
"\nReturns the number of connections to other nodes.\n",
{},
RPCResult{
- "n (numeric) The connection count\n"
+ RPCResult::Type::NUM, "", "The connection count"
},
RPCExamples{
HelpExampleCli("getconnectioncount", "")
@@ -54,7 +55,7 @@ static UniValue ping(const JSONRPCRequest& request)
"Results provided in getpeerinfo, pingtime and pingwait fields are decimal seconds.\n"
"Ping command is handled in queue with all other commands, so it measures processing backlog, not just network ping.\n",
{},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
HelpExampleCli("ping", "")
+ HelpExampleRpc("ping", "")
@@ -77,57 +78,60 @@ static UniValue getpeerinfo(const JSONRPCRequest& request)
"\nReturns data about each connected network node as a json array of objects.\n",
{},
RPCResult{
- "[\n"
- " {\n"
- " \"id\": n, (numeric) Peer index\n"
- " \"addr\":\"host:port\", (string) The IP address and port of the peer\n"
- " \"addrbind\":\"ip:port\", (string) Bind address of the connection to the peer\n"
- " \"addrlocal\":\"ip:port\", (string) Local address as reported by the peer\n"
- " \"mapped_as\":\"mapped_as\", (string) The AS in the BGP route to the peer used for diversifying peer selection\n"
- " \"services\":\"xxxxxxxxxxxxxxxx\", (string) The services offered\n"
- " \"servicesnames\":[ (array) the services offered, in human-readable form\n"
- " \"SERVICE_NAME\", (string) the service name if it is recognised\n"
- " ...\n"
- " ],\n"
- " \"relaytxes\":true|false, (boolean) Whether peer has asked us to relay transactions to it\n"
- " \"lastsend\": ttt, (numeric) The " + UNIX_EPOCH_TIME + " of the last send\n"
- " \"lastrecv\": ttt, (numeric) The " + UNIX_EPOCH_TIME + " of the last receive\n"
- " \"bytessent\": n, (numeric) The total bytes sent\n"
- " \"bytesrecv\": n, (numeric) The total bytes received\n"
- " \"conntime\": ttt, (numeric) The " + UNIX_EPOCH_TIME + " of the connection\n"
- " \"timeoffset\": ttt, (numeric) The time offset in seconds\n"
- " \"pingtime\": n, (numeric) ping time (if available)\n"
- " \"minping\": n, (numeric) minimum observed ping time (if any at all)\n"
- " \"pingwait\": n, (numeric) ping wait (if non-zero)\n"
- " \"version\": v, (numeric) The peer version, such as 70001\n"
- " \"subver\": \"/Satoshi:0.8.5/\", (string) The string version\n"
- " \"inbound\": true|false, (boolean) Inbound (true) or Outbound (false)\n"
- " \"addnode\": true|false, (boolean) Whether connection was due to addnode/-connect or if it was an automatic/inbound connection\n"
- " \"startingheight\": n, (numeric) The starting height (block) of the peer\n"
- " \"banscore\": n, (numeric) The ban score\n"
- " \"synced_headers\": n, (numeric) The last header we have in common with this peer\n"
- " \"synced_blocks\": n, (numeric) The last block we have in common with this peer\n"
- " \"inflight\": [\n"
- " n, (numeric) The heights of blocks we're currently asking from this peer\n"
- " ...\n"
- " ],\n"
- " \"whitelisted\": true|false, (boolean) Whether the peer is whitelisted\n"
- " \"minfeefilter\": n, (numeric) The minimum fee rate for transactions this peer accepts\n"
- " \"bytessent_per_msg\": {\n"
- " \"msg\": n, (numeric) The total bytes sent aggregated by message type\n"
- " When a message type is not listed in this json object, the bytes sent are 0.\n"
- " Only known message types can appear as keys in the object.\n"
- " ...\n"
- " },\n"
- " \"bytesrecv_per_msg\": {\n"
- " \"msg\": n, (numeric) The total bytes received aggregated by message type\n"
- " When a message type is not listed in this json object, the bytes received are 0.\n"
- " Only known message types can appear as keys in the object and all bytes received of unknown message types are listed under '"+NET_MESSAGE_COMMAND_OTHER+"'.\n"
- " ...\n"
- " }\n"
- " }\n"
- " ,...\n"
- "]\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {
+ {RPCResult::Type::NUM, "id", "Peer index"},
+ {RPCResult::Type::STR, "addr", "(host:port) The IP address and port of the peer"},
+ {RPCResult::Type::STR, "addrbind", "(ip:port) Bind address of the connection to the peer"},
+ {RPCResult::Type::STR, "addrlocal", "(ip:port) Local address as reported by the peer"},
+ {RPCResult::Type::NUM, "mapped_as", "The AS in the BGP route to the peer used for diversifying\n"
+ "peer selection (only available if the asmap config flag is set)"},
+ {RPCResult::Type::STR_HEX, "services", "The services offered"},
+ {RPCResult::Type::ARR, "servicesnames", "the services offered, in human-readable form",
+ {
+ {RPCResult::Type::STR, "SERVICE_NAME", "the service name if it is recognised"}
+ }},
+ {RPCResult::Type::BOOL, "relaytxes", "Whether peer has asked us to relay transactions to it"},
+ {RPCResult::Type::NUM_TIME, "lastsend", "The " + UNIX_EPOCH_TIME + " of the last send"},
+ {RPCResult::Type::NUM_TIME, "lastrecv", "The " + UNIX_EPOCH_TIME + " of the last receive"},
+ {RPCResult::Type::NUM, "bytessent", "The total bytes sent"},
+ {RPCResult::Type::NUM, "bytesrecv", "The total bytes received"},
+ {RPCResult::Type::NUM_TIME, "conntime", "The " + UNIX_EPOCH_TIME + " of the connection"},
+ {RPCResult::Type::NUM, "timeoffset", "The time offset in seconds"},
+ {RPCResult::Type::NUM, "pingtime", "ping time (if available)"},
+ {RPCResult::Type::NUM, "minping", "minimum observed ping time (if any at all)"},
+ {RPCResult::Type::NUM, "pingwait", "ping wait (if non-zero)"},
+ {RPCResult::Type::NUM, "version", "The peer version, such as 70001"},
+ {RPCResult::Type::STR, "subver", "The string version"},
+ {RPCResult::Type::BOOL, "inbound", "Inbound (true) or Outbound (false)"},
+ {RPCResult::Type::BOOL, "addnode", "Whether connection was due to addnode/-connect or if it was an automatic/inbound connection"},
+ {RPCResult::Type::NUM, "startingheight", "The starting height (block) of the peer"},
+ {RPCResult::Type::NUM, "banscore", "The ban score"},
+ {RPCResult::Type::NUM, "synced_headers", "The last header we have in common with this peer"},
+ {RPCResult::Type::NUM, "synced_blocks", "The last block we have in common with this peer"},
+ {RPCResult::Type::ARR, "inflight", "",
+ {
+ {RPCResult::Type::NUM, "n", "The heights of blocks we're currently asking from this peer"},
+ }},
+ {RPCResult::Type::BOOL, "whitelisted", "Whether the peer is whitelisted"},
+ {RPCResult::Type::NUM, "minfeefilter", "The minimum fee rate for transactions this peer accepts"},
+ {RPCResult::Type::OBJ_DYN, "bytessent_per_msg", "",
+ {
+ {RPCResult::Type::NUM, "msg", "The total bytes sent aggregated by message type\n"
+ "When a message type is not listed in this json object, the bytes sent are 0.\n"
+ "Only known message types can appear as keys in the object."}
+ }},
+ {RPCResult::Type::OBJ, "bytesrecv_per_msg", "",
+ {
+ {RPCResult::Type::NUM, "msg", "The total bytes received aggregated by message type\n"
+ "When a message type is not listed in this json object, the bytes received are 0.\n"
+ "Only known message types can appear as keys in the object and all bytes received of unknown message types are listed under '"+NET_MESSAGE_COMMAND_OTHER+"'."}
+ }},
+ }},
+ }},
},
RPCExamples{
HelpExampleCli("getpeerinfo", "")
@@ -165,12 +169,15 @@ static UniValue getpeerinfo(const JSONRPCRequest& request)
obj.pushKV("bytesrecv", stats.nRecvBytes);
obj.pushKV("conntime", stats.nTimeConnected);
obj.pushKV("timeoffset", stats.nTimeOffset);
- if (stats.dPingTime > 0.0)
- obj.pushKV("pingtime", stats.dPingTime);
- if (stats.dMinPing < static_cast<double>(std::numeric_limits<int64_t>::max())/1e6)
- obj.pushKV("minping", stats.dMinPing);
- if (stats.dPingWait > 0.0)
- obj.pushKV("pingwait", stats.dPingWait);
+ if (stats.m_ping_usec > 0) {
+ obj.pushKV("pingtime", ((double)stats.m_ping_usec) / 1e6);
+ }
+ if (stats.m_min_ping_usec < std::numeric_limits<int64_t>::max()) {
+ obj.pushKV("minping", ((double)stats.m_min_ping_usec) / 1e6);
+ }
+ if (stats.m_ping_wait_usec > 0) {
+ obj.pushKV("pingwait", ((double)stats.m_ping_wait_usec) / 1e6);
+ }
obj.pushKV("version", stats.nVersion);
// Use the sanitized form of subver here, to avoid tricksy remote peers from
// corrupting or modifying the JSON output by putting special characters in
@@ -234,7 +241,7 @@ static UniValue addnode(const JSONRPCRequest& request)
{"node", RPCArg::Type::STR, RPCArg::Optional::NO, "The node (see getpeerinfo for nodes)"},
{"command", RPCArg::Type::STR, RPCArg::Optional::NO, "'add' to add a node to the list, 'remove' to remove a node from the list, 'onetry' to try a connection to the node once"},
},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
HelpExampleCli("addnode", "\"192.168.0.6:8333\" \"onetry\"")
+ HelpExampleRpc("addnode", "\"192.168.0.6:8333\", \"onetry\"")
@@ -277,7 +284,7 @@ static UniValue disconnectnode(const JSONRPCRequest& request)
{"address", RPCArg::Type::STR, /* default */ "fallback to nodeid", "The IP address/port of the node"},
{"nodeid", RPCArg::Type::NUM, /* default */ "fallback to address", "The node ID (see getpeerinfo for node IDs)"},
},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
HelpExampleCli("disconnectnode", "\"192.168.0.6:8333\"")
+ HelpExampleCli("disconnectnode", "\"\" 1")
@@ -320,19 +327,22 @@ static UniValue getaddednodeinfo(const JSONRPCRequest& request)
{"node", RPCArg::Type::STR, /* default */ "all nodes", "If provided, return information about this specific node, otherwise all nodes are returned."},
},
RPCResult{
- "[\n"
- " {\n"
- " \"addednode\" : \"192.168.0.201\", (string) The node IP address or name (as provided to addnode)\n"
- " \"connected\" : true|false, (boolean) If connected\n"
- " \"addresses\" : [ (list of objects) Only when connected = true\n"
- " {\n"
- " \"address\" : \"192.168.0.201:8333\", (string) The bitcoin server IP and port we're connected to\n"
- " \"connected\" : \"outbound\" (string) connection, inbound or outbound\n"
- " }\n"
- " ]\n"
- " }\n"
- " ,...\n"
- "]\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "addednode", "The node IP address or name (as provided to addnode)"},
+ {RPCResult::Type::BOOL, "connected", "If connected"},
+ {RPCResult::Type::ARR, "addresses", "Only when connected = true",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "address", "The bitcoin server IP and port we're connected to"},
+ {RPCResult::Type::STR, "connected", "connection, inbound or outbound"},
+ }},
+ }},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("getaddednodeinfo", "\"192.168.0.201\"")
@@ -386,20 +396,21 @@ static UniValue getnettotals(const JSONRPCRequest& request)
"and current time.\n",
{},
RPCResult{
- "{\n"
- " \"totalbytesrecv\": n, (numeric) Total bytes received\n"
- " \"totalbytessent\": n, (numeric) Total bytes sent\n"
- " \"timemillis\": t, (numeric) Current UNIX time in milliseconds\n"
- " \"uploadtarget\":\n"
- " {\n"
- " \"timeframe\": n, (numeric) Length of the measuring timeframe in seconds\n"
- " \"target\": n, (numeric) Target in bytes\n"
- " \"target_reached\": true|false, (boolean) True if target is reached\n"
- " \"serve_historical_blocks\": true|false, (boolean) True if serving historical blocks\n"
- " \"bytes_left_in_cycle\": t, (numeric) Bytes left in current time cycle\n"
- " \"time_left_in_cycle\": t (numeric) Seconds left in current time cycle\n"
- " }\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM, "totalbytesrecv", "Total bytes received"},
+ {RPCResult::Type::NUM, "totalbytessent", "Total bytes sent"},
+ {RPCResult::Type::NUM_TIME, "timemillis", "Current UNIX time in milliseconds"},
+ {RPCResult::Type::OBJ, "uploadtarget", "",
+ {
+ {RPCResult::Type::NUM, "timeframe", "Length of the measuring timeframe in seconds"},
+ {RPCResult::Type::NUM, "target", "Target in bytes"},
+ {RPCResult::Type::BOOL, "target_reached", "True if target is reached"},
+ {RPCResult::Type::BOOL, "serve_historical_blocks", "True if serving historical blocks"},
+ {RPCResult::Type::NUM, "bytes_left_in_cycle", "Bytes left in current time cycle"},
+ {RPCResult::Type::NUM, "time_left_in_cycle", "Seconds left in current time cycle"},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("getnettotals", "")
@@ -452,41 +463,44 @@ static UniValue getnetworkinfo(const JSONRPCRequest& request)
"Returns an object containing various state info regarding P2P networking.\n",
{},
RPCResult{
- "{ (json object)\n"
- " \"version\": xxxxx, (numeric) the server version\n"
- " \"subversion\" : \"str\", (string) the server subversion string\n"
- " \"protocolversion\": xxxxx, (numeric) the protocol version\n"
- " \"localservices\" : \"hex\", (string) the services we offer to the network\n"
- " \"localservicesnames\": [ (array) the services we offer to the network, in human-readable form\n"
- " \"SERVICE_NAME\", (string) the service name\n"
- " ...\n"
- " ],\n"
- " \"localrelay\": true|false, (bool) true if transaction relay is requested from peers\n"
- " \"timeoffset\": xxxxx, (numeric) the time offset\n"
- " \"connections\": xxxxx, (numeric) the number of connections\n"
- " \"networkactive\": true|false, (bool) whether p2p networking is enabled\n"
- " \"networks\": [ (array) information per network\n"
- " { (json object)\n"
- " \"name\": \"str\", (string) network (ipv4, ipv6 or onion)\n"
- " \"limited\": true|false, (boolean) is the network limited using -onlynet?\n"
- " \"reachable\": true|false, (boolean) is the network reachable?\n"
- " \"proxy\" : \"str\" (string) (\"host:port\") the proxy that is used for this network, or empty if none\n"
- " \"proxy_randomize_credentials\" : true|false, (bool) Whether randomized credentials are used\n"
- " },\n"
- " ...\n"
- " ],\n"
- " \"relayfee\": x.xxxxxxxx, (numeric) minimum relay fee for transactions in " + CURRENCY_UNIT + "/kB\n"
- " \"incrementalfee\": x.xxxxxxxx, (numeric) minimum fee increment for mempool limiting or BIP 125 replacement in " + CURRENCY_UNIT + "/kB\n"
- " \"localaddresses\": [ (array) list of local addresses\n"
- " { (json object)\n"
- " \"address\" : \"xxxx\", (string) network address\n"
- " \"port\": xxx, (numeric) network port\n"
- " \"score\": xxx (numeric) relative score\n"
- " },\n"
- " ...\n"
- " ],\n"
- " \"warnings\" : \"str\", (string) any network and blockchain warnings\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM, "version", "the server version"},
+ {RPCResult::Type::STR, "subversion", "the server subversion string"},
+ {RPCResult::Type::NUM, "protocolversion", "the protocol version"},
+ {RPCResult::Type::STR_HEX, "localservices", "the services we offer to the network"},
+ {RPCResult::Type::ARR, "localservicesnames", "the services we offer to the network, in human-readable form",
+ {
+ {RPCResult::Type::STR, "SERVICE_NAME", "the service name"},
+ }},
+ {RPCResult::Type::BOOL, "localrelay", "true if transaction relay is requested from peers"},
+ {RPCResult::Type::NUM, "timeoffset", "the time offset"},
+ {RPCResult::Type::NUM, "connections", "the number of connections"},
+ {RPCResult::Type::BOOL, "networkactive", "whether p2p networking is enabled"},
+ {RPCResult::Type::ARR, "networks", "information per network",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "name", "network (ipv4, ipv6 or onion)"},
+ {RPCResult::Type::BOOL, "limited", "is the network limited using -onlynet?"},
+ {RPCResult::Type::BOOL, "reachable", "is the network reachable?"},
+ {RPCResult::Type::STR, "proxy", "(\"host:port\") the proxy that is used for this network, or empty if none"},
+ {RPCResult::Type::BOOL, "proxy_randomize_credentials", "Whether randomized credentials are used"},
+ }},
+ }},
+ {RPCResult::Type::NUM, "relayfee", "minimum relay fee for transactions in " + CURRENCY_UNIT + "/kB"},
+ {RPCResult::Type::NUM, "incrementalfee", "minimum fee increment for mempool limiting or BIP 125 replacement in " + CURRENCY_UNIT + "/kB"},
+ {RPCResult::Type::ARR, "localaddresses", "list of local addresses",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "address", "network address"},
+ {RPCResult::Type::NUM, "port", "network port"},
+ {RPCResult::Type::NUM, "score", "relative score"},
+ }},
+ }},
+ {RPCResult::Type::STR, "warnings", "any network and blockchain warnings"},
+ }
},
RPCExamples{
HelpExampleCli("getnetworkinfo", "")
@@ -540,7 +554,7 @@ static UniValue setban(const JSONRPCRequest& request)
{"bantime", RPCArg::Type::NUM, /* default */ "0", "time in seconds how long (or until when if [absolute] is set) the IP is banned (0 or empty means using the default time of 24h which can also be overwritten by the -bantime startup argument)"},
{"absolute", RPCArg::Type::BOOL, /* default */ "false", "If set, the bantime must be an absolute timestamp expressed in " + UNIX_EPOCH_TIME},
},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
HelpExampleCli("setban", "\"192.168.0.6\" \"add\" 86400")
+ HelpExampleCli("setban", "\"192.168.0.0/24\" \"add\"")
@@ -615,7 +629,16 @@ static UniValue listbanned(const JSONRPCRequest& request)
RPCHelpMan{"listbanned",
"\nList all banned IPs/Subnets.\n",
{},
- RPCResults{},
+ RPCResult{RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "address", ""},
+ {RPCResult::Type::NUM_TIME, "banned_until", ""},
+ {RPCResult::Type::NUM_TIME, "ban_created", ""},
+ {RPCResult::Type::STR, "ban_reason", ""},
+ }},
+ }},
RPCExamples{
HelpExampleCli("listbanned", "")
+ HelpExampleRpc("listbanned", "")
@@ -650,7 +673,7 @@ static UniValue clearbanned(const JSONRPCRequest& request)
RPCHelpMan{"clearbanned",
"\nClear all banned IPs.\n",
{},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
HelpExampleCli("clearbanned", "")
+ HelpExampleRpc("clearbanned", "")
@@ -672,7 +695,7 @@ static UniValue setnetworkactive(const JSONRPCRequest& request)
{
{"state", RPCArg::Type::BOOL, RPCArg::Optional::NO, "true to enable networking, false to disable"},
},
- RPCResults{},
+ RPCResult{RPCResult::Type::BOOL, "", "The value that was passed in"},
RPCExamples{""},
}.Check(request);
@@ -690,18 +713,19 @@ static UniValue getnodeaddresses(const JSONRPCRequest& request)
RPCHelpMan{"getnodeaddresses",
"\nReturn known addresses which can potentially be used to find new nodes in the network\n",
{
- {"count", RPCArg::Type::NUM, /* default */ "1", "How many addresses to return. Limited to the smaller of " + std::to_string(ADDRMAN_GETADDR_MAX) + " or " + std::to_string(ADDRMAN_GETADDR_MAX_PCT) + "% of all known addresses."},
+ {"count", RPCArg::Type::NUM, /* default */ "1", "How many addresses to return. Limited to the smaller of " + ToString(ADDRMAN_GETADDR_MAX) + " or " + ToString(ADDRMAN_GETADDR_MAX_PCT) + "% of all known addresses."},
},
RPCResult{
- "[\n"
- " {\n"
- " \"time\": ttt, (numeric) The " + UNIX_EPOCH_TIME + " of when the node was last seen\n"
- " \"services\": n, (numeric) The services offered\n"
- " \"address\": \"host\", (string) The address of the node\n"
- " \"port\": n (numeric) The port of the node\n"
- " }\n"
- " ,....\n"
- "]\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM_TIME, "time", "The " + UNIX_EPOCH_TIME + " of when the node was last seen"},
+ {RPCResult::Type::NUM, "services", "The services offered"},
+ {RPCResult::Type::STR, "address", "The address of the node"},
+ {RPCResult::Type::NUM, "port", "The port of the node"},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("getnodeaddresses", "8")
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index 972809a65b..ae3f15cec2 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -30,6 +30,7 @@
#include <uint256.h>
#include <util/moneystr.h>
#include <util/strencodings.h>
+#include <util/string.h>
#include <validation.h>
#include <validationinterface.h>
@@ -94,54 +95,62 @@ static UniValue getrawtransaction(const JSONRPCRequest& request)
},
{
RPCResult{"if verbose is not set or set to false",
- "\"data\" (string) The serialized, hex-encoded data for 'txid'\n"
+ RPCResult::Type::STR, "data", "The serialized, hex-encoded data for 'txid'"
},
RPCResult{"if verbose is set to true",
- "{\n"
- " \"in_active_chain\": b, (bool) Whether specified block is in the active chain or not (only present with explicit \"blockhash\" argument)\n"
- " \"hex\" : \"data\", (string) The serialized, hex-encoded data for 'txid'\n"
- " \"txid\" : \"id\", (string) The transaction id (same as provided)\n"
- " \"hash\" : \"id\", (string) The transaction hash (differs from txid for witness transactions)\n"
- " \"size\" : n, (numeric) The serialized transaction size\n"
- " \"vsize\" : n, (numeric) The virtual transaction size (differs from size for witness transactions)\n"
- " \"weight\" : n, (numeric) The transaction's weight (between vsize*4-3 and vsize*4)\n"
- " \"version\" : n, (numeric) The version\n"
- " \"locktime\" : ttt, (numeric) The lock time\n"
- " \"vin\" : [ (array of json objects)\n"
- " {\n"
- " \"txid\": \"id\", (string) The transaction id\n"
- " \"vout\": n, (numeric) \n"
- " \"scriptSig\": { (json object) The script\n"
- " \"asm\": \"asm\", (string) asm\n"
- " \"hex\": \"hex\" (string) hex\n"
- " },\n"
- " \"sequence\": n (numeric) The script sequence number\n"
- " \"txinwitness\": [\"hex\", ...] (array of string) hex-encoded witness data (if any)\n"
- " }\n"
- " ,...\n"
- " ],\n"
- " \"vout\" : [ (array of json objects)\n"
- " {\n"
- " \"value\" : x.xxx, (numeric) The value in " + CURRENCY_UNIT + "\n"
- " \"n\" : n, (numeric) index\n"
- " \"scriptPubKey\" : { (json object)\n"
- " \"asm\" : \"asm\", (string) the asm\n"
- " \"hex\" : \"hex\", (string) the hex\n"
- " \"reqSigs\" : n, (numeric) The required sigs\n"
- " \"type\" : \"pubkeyhash\", (string) The type, eg 'pubkeyhash'\n"
- " \"addresses\" : [ (json array of string)\n"
- " \"address\" (string) bitcoin address\n"
- " ,...\n"
- " ]\n"
- " }\n"
- " }\n"
- " ,...\n"
- " ],\n"
- " \"blockhash\" : \"hash\", (string) the block hash\n"
- " \"confirmations\" : n, (numeric) The confirmations\n"
- " \"blocktime\" : ttt (numeric) The block time expressed in " + UNIX_EPOCH_TIME + "\n"
- " \"time\" : ttt, (numeric) Same as \"blocktime\"\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::BOOL, "in_active_chain", "Whether specified block is in the active chain or not (only present with explicit \"blockhash\" argument)"},
+ {RPCResult::Type::STR_HEX, "hex", "The serialized, hex-encoded data for 'txid'"},
+ {RPCResult::Type::STR_HEX, "txid", "The transaction id (same as provided)"},
+ {RPCResult::Type::STR_HEX, "hash", "The transaction hash (differs from txid for witness transactions)"},
+ {RPCResult::Type::NUM, "size", "The serialized transaction size"},
+ {RPCResult::Type::NUM, "vsize", "The virtual transaction size (differs from size for witness transactions)"},
+ {RPCResult::Type::NUM, "weight", "The transaction's weight (between vsize*4-3 and vsize*4)"},
+ {RPCResult::Type::NUM, "version", "The version"},
+ {RPCResult::Type::NUM_TIME, "locktime", "The lock time"},
+ {RPCResult::Type::ARR, "vin", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "txid", "The transaction id"},
+ {RPCResult::Type::STR, "vout", ""},
+ {RPCResult::Type::OBJ, "scriptSig", "The script",
+ {
+ {RPCResult::Type::STR, "asm", "asm"},
+ {RPCResult::Type::STR_HEX, "hex", "hex"},
+ }},
+ {RPCResult::Type::NUM, "sequence", "The script sequence number"},
+ {RPCResult::Type::ARR, "txinwitness", "",
+ {
+ {RPCResult::Type::STR_HEX, "hex", "hex-encoded witness data (if any)"},
+ }},
+ }},
+ }},
+ {RPCResult::Type::ARR, "vout", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM, "value", "The value in " + CURRENCY_UNIT},
+ {RPCResult::Type::NUM, "n", "index"},
+ {RPCResult::Type::OBJ, "scriptPubKey", "",
+ {
+ {RPCResult::Type::STR, "asm", "the asm"},
+ {RPCResult::Type::STR, "hex", "the hex"},
+ {RPCResult::Type::NUM, "reqSigs", "The required sigs"},
+ {RPCResult::Type::STR, "type", "The type, eg 'pubkeyhash'"},
+ {RPCResult::Type::ARR, "addresses", "",
+ {
+ {RPCResult::Type::STR, "address", "bitcoin address"},
+ }},
+ }},
+ }},
+ }},
+ {RPCResult::Type::STR_HEX, "blockhash", "the block hash"},
+ {RPCResult::Type::NUM, "confirmations", "The confirmations"},
+ {RPCResult::Type::NUM_TIME, "blocktime", "The block time expressed in " + UNIX_EPOCH_TIME},
+ {RPCResult::Type::NUM, "time", "Same as \"blocktime\""},
+ }
},
},
RPCExamples{
@@ -222,7 +231,7 @@ static UniValue gettxoutproof(const JSONRPCRequest& request)
"you need to maintain a transaction index, using the -txindex command line option or\n"
"specify the block in which the transaction is included manually (by blockhash).\n",
{
- {"txids", RPCArg::Type::ARR, RPCArg::Optional::NO, "A json array of txids to filter",
+ {"txids", RPCArg::Type::ARR, RPCArg::Optional::NO, "The txids to filter",
{
{"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, "A transaction hash"},
},
@@ -230,7 +239,7 @@ static UniValue gettxoutproof(const JSONRPCRequest& request)
{"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED_NAMED_ARG, "If specified, looks for txid in the block with this hash"},
},
RPCResult{
- "\"data\" (string) A string that is a serialized, hex-encoded data for the proof.\n"
+ RPCResult::Type::STR, "data", "A string that is a serialized, hex-encoded data for the proof."
},
RPCExamples{""},
}.Check(request);
@@ -315,7 +324,10 @@ static UniValue verifytxoutproof(const JSONRPCRequest& request)
{"proof", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The hex-encoded proof generated by gettxoutproof"},
},
RPCResult{
- "[\"txid\"] (array, strings) The txid(s) which the proof commits to, or empty array if the proof can not be validated.\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "txid", "The txid(s) which the proof commits to, or empty array if the proof can not be validated."},
+ }
},
RPCExamples{""},
}.Check(request);
@@ -357,7 +369,7 @@ static UniValue createrawtransaction(const JSONRPCRequest& request)
"Note that the transaction's inputs are not signed, and\n"
"it is not stored in the wallet or transmitted to the network.\n",
{
- {"inputs", RPCArg::Type::ARR, RPCArg::Optional::NO, "A json array of json objects",
+ {"inputs", RPCArg::Type::ARR, RPCArg::Optional::NO, "The inputs",
{
{"", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "",
{
@@ -368,7 +380,7 @@ static UniValue createrawtransaction(const JSONRPCRequest& request)
},
},
},
- {"outputs", RPCArg::Type::ARR, RPCArg::Optional::NO, "a json array with outputs (key-value pairs), where none of the keys are duplicated.\n"
+ {"outputs", RPCArg::Type::ARR, RPCArg::Optional::NO, "The outputs (key-value pairs), where none of the keys are duplicated.\n"
"That is, each address can only appear once and there can only be one 'data' object.\n"
"For compatibility reasons, a dictionary, which holds the key-value pairs directly, is also\n"
" accepted as second parameter.",
@@ -390,7 +402,7 @@ static UniValue createrawtransaction(const JSONRPCRequest& request)
" Allows this transaction to be replaced by a transaction with higher fees. If provided, it is an error if explicit sequence numbers are incompatible."},
},
RPCResult{
- "\"transaction\" (string) hex string of the transaction\n"
+ RPCResult::Type::STR_HEX, "transaction", "hex string of the transaction"
},
RPCExamples{
HelpExampleCli("createrawtransaction", "\"[{\\\"txid\\\":\\\"myid\\\",\\\"vout\\\":0}]\" \"[{\\\"address\\\":0.01}]\"")
@@ -432,45 +444,53 @@ static UniValue decoderawtransaction(const JSONRPCRequest& request)
},
},
RPCResult{
- "{\n"
- " \"txid\" : \"id\", (string) The transaction id\n"
- " \"hash\" : \"id\", (string) The transaction hash (differs from txid for witness transactions)\n"
- " \"size\" : n, (numeric) The transaction size\n"
- " \"vsize\" : n, (numeric) The virtual transaction size (differs from size for witness transactions)\n"
- " \"weight\" : n, (numeric) The transaction's weight (between vsize*4 - 3 and vsize*4)\n"
- " \"version\" : n, (numeric) The version\n"
- " \"locktime\" : ttt, (numeric) The lock time\n"
- " \"vin\" : [ (array of json objects)\n"
- " {\n"
- " \"txid\": \"id\", (string) The transaction id\n"
- " \"vout\": n, (numeric) The output number\n"
- " \"scriptSig\": { (json object) The script\n"
- " \"asm\": \"asm\", (string) asm\n"
- " \"hex\": \"hex\" (string) hex\n"
- " },\n"
- " \"txinwitness\": [\"hex\", ...] (array of string) hex-encoded witness data (if any)\n"
- " \"sequence\": n (numeric) The script sequence number\n"
- " }\n"
- " ,...\n"
- " ],\n"
- " \"vout\" : [ (array of json objects)\n"
- " {\n"
- " \"value\" : x.xxx, (numeric) The value in " + CURRENCY_UNIT + "\n"
- " \"n\" : n, (numeric) index\n"
- " \"scriptPubKey\" : { (json object)\n"
- " \"asm\" : \"asm\", (string) the asm\n"
- " \"hex\" : \"hex\", (string) the hex\n"
- " \"reqSigs\" : n, (numeric) The required sigs\n"
- " \"type\" : \"pubkeyhash\", (string) The type, eg 'pubkeyhash'\n"
- " \"addresses\" : [ (json array of string)\n"
- " \"12tvKAXCxZjSmdNbao16dKXC8tRWfcF5oc\" (string) bitcoin address\n"
- " ,...\n"
- " ]\n"
- " }\n"
- " }\n"
- " ,...\n"
- " ],\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "txid", "The transaction id"},
+ {RPCResult::Type::STR_HEX, "hash", "The transaction hash (differs from txid for witness transactions)"},
+ {RPCResult::Type::NUM, "size", "The transaction size"},
+ {RPCResult::Type::NUM, "vsize", "The virtual transaction size (differs from size for witness transactions)"},
+ {RPCResult::Type::NUM, "weight", "The transaction's weight (between vsize*4 - 3 and vsize*4)"},
+ {RPCResult::Type::NUM, "version", "The version"},
+ {RPCResult::Type::NUM_TIME, "locktime", "The lock time"},
+ {RPCResult::Type::ARR, "vin", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "txid", "The transaction id"},
+ {RPCResult::Type::NUM, "vout", "The output number"},
+ {RPCResult::Type::OBJ, "scriptSig", "The script",
+ {
+ {RPCResult::Type::STR, "asm", "asm"},
+ {RPCResult::Type::STR_HEX, "hex", "hex"},
+ }},
+ {RPCResult::Type::ARR, "txinwitness", "",
+ {
+ {RPCResult::Type::STR_HEX, "hex", "hex-encoded witness data (if any)"},
+ }},
+ {RPCResult::Type::NUM, "sequence", "The script sequence number"},
+ }},
+ }},
+ {RPCResult::Type::ARR, "vout", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM, "value", "The value in " + CURRENCY_UNIT},
+ {RPCResult::Type::NUM, "n", "index"},
+ {RPCResult::Type::OBJ, "scriptPubKey", "",
+ {
+ {RPCResult::Type::STR, "asm", "the asm"},
+ {RPCResult::Type::STR_HEX, "hex", "the hex"},
+ {RPCResult::Type::NUM, "reqSigs", "The required sigs"},
+ {RPCResult::Type::STR, "type", "The type, eg 'pubkeyhash'"},
+ {RPCResult::Type::ARR, "addresses", "",
+ {
+ {RPCResult::Type::STR, "address", "bitcoin address"},
+ }},
+ }},
+ }},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("decoderawtransaction", "\"hexstring\"")
@@ -513,26 +533,29 @@ static UniValue decodescript(const JSONRPCRequest& request)
{"hexstring", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "the hex-encoded script"},
},
RPCResult{
- "{\n"
- " \"asm\":\"asm\", (string) Script public key\n"
- " \"type\":\"type\", (string) The output type (e.g. "+GetAllOutputTypes()+")\n"
- " \"reqSigs\": n, (numeric) The required signatures\n"
- " \"addresses\": [ (json array of string)\n"
- " \"address\" (string) bitcoin address\n"
- " ,...\n"
- " ],\n"
- " \"p2sh\":\"str\" (string) address of P2SH script wrapping this redeem script (not returned if the script is already a P2SH).\n"
- " \"segwit\": { (json object) Result of a witness script public key wrapping this redeem script (not returned if the script is a P2SH or witness).\n"
- " \"asm\":\"str\", (string) String representation of the script public key\n"
- " \"hex\":\"hexstr\", (string) Hex string of the script public key\n"
- " \"type\":\"str\", (string) The type of the script public key (e.g. witness_v0_keyhash or witness_v0_scripthash)\n"
- " \"reqSigs\": n, (numeric) The required signatures (always 1)\n"
- " \"addresses\": [ (json array of string) (always length 1)\n"
- " \"address\" (string) segwit address\n"
- " ,...\n"
- " ],\n"
- " \"p2sh-segwit\":\"str\" (string) address of the P2SH script wrapping this witness redeem script.\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "asm", "Script public key"},
+ {RPCResult::Type::STR, "type", "The output type (e.g. "+GetAllOutputTypes()+")"},
+ {RPCResult::Type::NUM, "reqSigs", "The required signatures"},
+ {RPCResult::Type::ARR, "addresses", "",
+ {
+ {RPCResult::Type::STR, "address", "bitcoin address"},
+ }},
+ {RPCResult::Type::STR, "p2sh", "address of P2SH script wrapping this redeem script (not returned if the script is already a P2SH)"},
+ {RPCResult::Type::OBJ, "segwit", "Result of a witness script public key wrapping this redeem script (not returned if the script is a P2SH or witness)",
+ {
+ {RPCResult::Type::STR, "asm", "String representation of the script public key"},
+ {RPCResult::Type::STR_HEX, "hex", "Hex string of the script public key"},
+ {RPCResult::Type::STR, "type", "The type of the script public key (e.g. witness_v0_keyhash or witness_v0_scripthash)"},
+ {RPCResult::Type::NUM, "reqSigs", "The required signatures (always 1)"},
+ {RPCResult::Type::ARR, "addresses", "(always length 1)",
+ {
+ {RPCResult::Type::STR, "address", "segwit address"},
+ }},
+ {RPCResult::Type::STR, "p2sh-segwit", "address of the P2SH script wrapping this witness redeem script"},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("decodescript", "\"hexstring\"")
@@ -600,14 +623,14 @@ static UniValue combinerawtransaction(const JSONRPCRequest& request)
"The combined transaction may be another partially signed transaction or a \n"
"fully signed transaction.",
{
- {"txs", RPCArg::Type::ARR, RPCArg::Optional::NO, "A json array of hex strings of partially signed transactions",
+ {"txs", RPCArg::Type::ARR, RPCArg::Optional::NO, "The hex strings of partially signed transactions",
{
{"hexstring", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, "A transaction hash"},
},
},
},
RPCResult{
- "\"hex\" (string) The hex-encoded raw transaction with signature(s)\n"
+ RPCResult::Type::STR, "", "The hex-encoded raw transaction with signature(s)"
},
RPCExamples{
HelpExampleCli("combinerawtransaction", R"('["myhex1", "myhex2", "myhex3"]')")
@@ -686,12 +709,12 @@ static UniValue signrawtransactionwithkey(const JSONRPCRequest& request)
"this transaction depends on but may not yet be in the block chain.\n",
{
{"hexstring", RPCArg::Type::STR, RPCArg::Optional::NO, "The transaction hex string"},
- {"privkeys", RPCArg::Type::ARR, RPCArg::Optional::NO, "A json array of base58-encoded private keys for signing",
+ {"privkeys", RPCArg::Type::ARR, RPCArg::Optional::NO, "The base58-encoded private keys for signing",
{
{"privatekey", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, "private key in base58-encoding"},
},
},
- {"prevtxs", RPCArg::Type::ARR, RPCArg::Optional::OMITTED_NAMED_ARG, "A json array of previous dependent transaction outputs",
+ {"prevtxs", RPCArg::Type::ARR, RPCArg::Optional::OMITTED_NAMED_ARG, "The previous dependent transaction outputs",
{
{"", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "",
{
@@ -715,20 +738,22 @@ static UniValue signrawtransactionwithkey(const JSONRPCRequest& request)
},
},
RPCResult{
- "{\n"
- " \"hex\" : \"value\", (string) The hex-encoded raw transaction with signature(s)\n"
- " \"complete\" : true|false, (boolean) If the transaction has a complete set of signatures\n"
- " \"errors\" : [ (json array of objects) Script verification errors (if there are any)\n"
- " {\n"
- " \"txid\" : \"hash\", (string) The hash of the referenced, previous transaction\n"
- " \"vout\" : n, (numeric) The index of the output to spent and used as input\n"
- " \"scriptSig\" : \"hex\", (string) The hex-encoded signature script\n"
- " \"sequence\" : n, (numeric) Script sequence number\n"
- " \"error\" : \"text\" (string) Verification or signing error related to the input\n"
- " }\n"
- " ,...\n"
- " ]\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "hex", "The hex-encoded raw transaction with signature(s)"},
+ {RPCResult::Type::BOOL, "complete", "If the transaction has a complete set of signatures"},
+ {RPCResult::Type::ARR, "errors", "Script verification errors (if there are any)",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "txid", "The hash of the referenced, previous transaction"},
+ {RPCResult::Type::NUM, "vout", "The index of the output to spent and used as input"},
+ {RPCResult::Type::STR_HEX, "scriptSig", "The hex-encoded signature script"},
+ {RPCResult::Type::NUM, "sequence", "Script sequence number"},
+ {RPCResult::Type::STR, "error", "Verification or signing error related to the input"},
+ }},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("signrawtransactionwithkey", "\"myhex\" \"[\\\"key1\\\",\\\"key2\\\"]\"")
@@ -784,7 +809,7 @@ static UniValue sendrawtransaction(const JSONRPCRequest& request)
"/kB.\nSet to 0 to accept any fee rate.\n"},
},
RPCResult{
- "\"hex\" (string) The transaction hash in hex\n"
+ RPCResult::Type::STR_HEX, "", "The transaction hash in hex"
},
RPCExamples{
"\nCreate a transaction\n"
@@ -846,14 +871,16 @@ static UniValue testmempoolaccept(const JSONRPCRequest& request)
{"maxfeerate", RPCArg::Type::AMOUNT, /* default */ FormatMoney(DEFAULT_MAX_RAW_TX_FEE_RATE.GetFeePerK()), "Reject transactions whose fee rate is higher than the specified value, expressed in " + CURRENCY_UNIT + "/kB\n"},
},
RPCResult{
- "[ (array) The result of the mempool acceptance test for each raw transaction in the input array.\n"
- " Length is exactly one for now.\n"
- " {\n"
- " \"txid\" (string) The transaction hash in hex\n"
- " \"allowed\" (boolean) If the mempool allows this tx to be inserted\n"
- " \"reject-reason\" (string) Rejection string (only present when 'allowed' is false)\n"
- " }\n"
- "]\n"
+ RPCResult::Type::ARR, "", "The result of the mempool acceptance test for each raw transaction in the input array.\n"
+ "Length is exactly one for now.",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "txid", "The transaction hash in hex"},
+ {RPCResult::Type::BOOL, "allowed", "If the mempool allows this tx to be inserted"},
+ {RPCResult::Type::STR, "reject-reason", "Rejection string (only present when 'allowed' is false)"},
+ }},
+ }
},
RPCExamples{
"\nCreate a transaction\n"
@@ -934,7 +961,7 @@ static std::string WriteHDKeypath(std::vector<uint32_t>& keypath)
num &= ~0x80000000;
}
- keypath_str += std::to_string(num);
+ keypath_str += ToString(num);
if (hardened) {
keypath_str += "'";
}
@@ -950,92 +977,108 @@ UniValue decodepsbt(const JSONRPCRequest& request)
{"psbt", RPCArg::Type::STR, RPCArg::Optional::NO, "The PSBT base64 string"},
},
RPCResult{
- "{\n"
- " \"tx\" : { (json object) The decoded network-serialized unsigned transaction.\n"
- " ... The layout is the same as the output of decoderawtransaction.\n"
- " },\n"
- " \"unknown\" : { (json object) The unknown global fields\n"
- " \"key\" : \"value\" (key-value pair) An unknown key-value pair\n"
- " ...\n"
- " },\n"
- " \"inputs\" : [ (array of json objects)\n"
- " {\n"
- " \"non_witness_utxo\" : { (json object, optional) Decoded network transaction for non-witness UTXOs\n"
- " ...\n"
- " },\n"
- " \"witness_utxo\" : { (json object, optional) Transaction output for witness UTXOs\n"
- " \"amount\" : x.xxx, (numeric) The value in " + CURRENCY_UNIT + "\n"
- " \"scriptPubKey\" : { (json object)\n"
- " \"asm\" : \"asm\", (string) The asm\n"
- " \"hex\" : \"hex\", (string) The hex\n"
- " \"type\" : \"pubkeyhash\", (string) The type, eg 'pubkeyhash'\n"
- " \"address\" : \"address\" (string) Bitcoin address if there is one\n"
- " }\n"
- " },\n"
- " \"partial_signatures\" : { (json object, optional)\n"
- " \"pubkey\" : \"signature\", (string) The public key and signature that corresponds to it.\n"
- " ,...\n"
- " }\n"
- " \"sighash\" : \"type\", (string, optional) The sighash type to be used\n"
- " \"redeem_script\" : { (json object, optional)\n"
- " \"asm\" : \"asm\", (string) The asm\n"
- " \"hex\" : \"hex\", (string) The hex\n"
- " \"type\" : \"pubkeyhash\", (string) The type, eg 'pubkeyhash'\n"
- " }\n"
- " \"witness_script\" : { (json object, optional)\n"
- " \"asm\" : \"asm\", (string) The asm\n"
- " \"hex\" : \"hex\", (string) The hex\n"
- " \"type\" : \"pubkeyhash\", (string) The type, eg 'pubkeyhash'\n"
- " }\n"
- " \"bip32_derivs\" : { (json object, optional)\n"
- " \"pubkey\" : { (json object, optional) The public key with the derivation path as the value.\n"
- " \"master_fingerprint\" : \"fingerprint\" (string) The fingerprint of the master key\n"
- " \"path\" : \"path\", (string) The path\n"
- " }\n"
- " ,...\n"
- " }\n"
- " \"final_scriptsig\" : { (json object, optional)\n"
- " \"asm\" : \"asm\", (string) The asm\n"
- " \"hex\" : \"hex\", (string) The hex\n"
- " }\n"
- " \"final_scriptwitness\": [\"hex\", ...] (array of string) hex-encoded witness data (if any)\n"
- " \"unknown\" : { (json object) The unknown global fields\n"
- " \"key\" : \"value\" (key-value pair) An unknown key-value pair\n"
- " ...\n"
- " },\n"
- " }\n"
- " ,...\n"
- " ]\n"
- " \"outputs\" : [ (array of json objects)\n"
- " {\n"
- " \"redeem_script\" : { (json object, optional)\n"
- " \"asm\" : \"asm\", (string) The asm\n"
- " \"hex\" : \"hex\", (string) The hex\n"
- " \"type\" : \"pubkeyhash\", (string) The type, eg 'pubkeyhash'\n"
- " }\n"
- " \"witness_script\" : { (json object, optional)\n"
- " \"asm\" : \"asm\", (string) The asm\n"
- " \"hex\" : \"hex\", (string) The hex\n"
- " \"type\" : \"pubkeyhash\", (string) The type, eg 'pubkeyhash'\n"
- " }\n"
- " \"bip32_derivs\" : [ (array of json objects, optional)\n"
- " {\n"
- " \"pubkey\" : \"pubkey\", (string) The public key this path corresponds to\n"
- " \"master_fingerprint\" : \"fingerprint\" (string) The fingerprint of the master key\n"
- " \"path\" : \"path\", (string) The path\n"
- " }\n"
- " }\n"
- " ,...\n"
- " ],\n"
- " \"unknown\" : { (json object) The unknown global fields\n"
- " \"key\" : \"value\" (key-value pair) An unknown key-value pair\n"
- " ...\n"
- " },\n"
- " }\n"
- " ,...\n"
- " ]\n"
- " \"fee\" : fee (numeric, optional) The transaction fee paid if all UTXOs slots in the PSBT have been filled.\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::OBJ, "tx", "The decoded network-serialized unsigned transaction.",
+ {
+ {RPCResult::Type::ELISION, "", "The layout is the same as the output of decoderawtransaction."},
+ }},
+ {RPCResult::Type::OBJ_DYN, "unknown", "The unknown global fields",
+ {
+ {RPCResult::Type::STR_HEX, "key", "(key-value pair) An unknown key-value pair"},
+ }},
+ {RPCResult::Type::ARR, "inputs", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::OBJ, "non_witness_utxo", /* optional */ true, "Decoded network transaction for non-witness UTXOs",
+ {
+ {RPCResult::Type::ELISION, "",""},
+ }},
+ {RPCResult::Type::OBJ, "witness_utxo", /* optional */ true, "Transaction output for witness UTXOs",
+ {
+ {RPCResult::Type::NUM, "amount", "The value in " + CURRENCY_UNIT},
+ {RPCResult::Type::OBJ, "scriptPubKey", "",
+ {
+ {RPCResult::Type::STR, "asm", "The asm"},
+ {RPCResult::Type::STR_HEX, "hex", "The hex"},
+ {RPCResult::Type::STR, "type", "The type, eg 'pubkeyhash'"},
+ {RPCResult::Type::STR, "address"," Bitcoin address if there is one"},
+ }},
+ }},
+ {RPCResult::Type::OBJ_DYN, "partial_signatures", /* optional */ true, "",
+ {
+ {RPCResult::Type::STR, "pubkey", "The public key and signature that corresponds to it."},
+ }},
+ {RPCResult::Type::STR, "sighash", /* optional */ true, "The sighash type to be used"},
+ {RPCResult::Type::OBJ, "redeem_script", /* optional */ true, "",
+ {
+ {RPCResult::Type::STR, "asm", "The asm"},
+ {RPCResult::Type::STR_HEX, "hex", "The hex"},
+ {RPCResult::Type::STR, "type", "The type, eg 'pubkeyhash'"},
+ }},
+ {RPCResult::Type::OBJ, "witness_script", /* optional */ true, "",
+ {
+ {RPCResult::Type::STR, "asm", "The asm"},
+ {RPCResult::Type::STR_HEX, "hex", "The hex"},
+ {RPCResult::Type::STR, "type", "The type, eg 'pubkeyhash'"},
+ }},
+ {RPCResult::Type::ARR, "bip32_derivs", /* optional */ true, "",
+ {
+ {RPCResult::Type::OBJ, "pubkey", /* optional */ true, "The public key with the derivation path as the value.",
+ {
+ {RPCResult::Type::STR, "master_fingerprint", "The fingerprint of the master key"},
+ {RPCResult::Type::STR, "path", "The path"},
+ }},
+ }},
+ {RPCResult::Type::OBJ, "final_scriptsig", /* optional */ true, "",
+ {
+ {RPCResult::Type::STR, "asm", "The asm"},
+ {RPCResult::Type::STR, "hex", "The hex"},
+ }},
+ {RPCResult::Type::ARR, "final_scriptwitness", "",
+ {
+ {RPCResult::Type::STR_HEX, "", "hex-encoded witness data (if any)"},
+ }},
+ {RPCResult::Type::OBJ_DYN, "unknown", "The unknown global fields",
+ {
+ {RPCResult::Type::STR_HEX, "key", "(key-value pair) An unknown key-value pair"},
+ }},
+ }},
+ }},
+ {RPCResult::Type::ARR, "outputs", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::OBJ, "redeem_script", /* optional */ true, "",
+ {
+ {RPCResult::Type::STR, "asm", "The asm"},
+ {RPCResult::Type::STR_HEX, "hex", "The hex"},
+ {RPCResult::Type::STR, "type", "The type, eg 'pubkeyhash'"},
+ }},
+ {RPCResult::Type::OBJ, "witness_script", /* optional */ true, "",
+ {
+ {RPCResult::Type::STR, "asm", "The asm"},
+ {RPCResult::Type::STR_HEX, "hex", "The hex"},
+ {RPCResult::Type::STR, "type", "The type, eg 'pubkeyhash'"},
+ }},
+ {RPCResult::Type::ARR, "bip32_derivs", /* optional */ true, "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "pubkey", "The public key this path corresponds to"},
+ {RPCResult::Type::STR, "master_fingerprint", "The fingerprint of the master key"},
+ {RPCResult::Type::STR, "path", "The path"},
+ }},
+ }},
+ {RPCResult::Type::OBJ_DYN, "unknown", "The unknown global fields",
+ {
+ {RPCResult::Type::STR_HEX, "key", "(key-value pair) An unknown key-value pair"},
+ }},
+ }},
+ }},
+ {RPCResult::Type::STR_AMOUNT, "fee", /* optional */ true, "The transaction fee paid if all UTXOs slots in the PSBT have been filled."},
+ }
},
RPCExamples{
HelpExampleCli("decodepsbt", "\"psbt\"")
@@ -1237,14 +1280,14 @@ UniValue combinepsbt(const JSONRPCRequest& request)
"\nCombine multiple partially signed Bitcoin transactions into one transaction.\n"
"Implements the Combiner role.\n",
{
- {"txs", RPCArg::Type::ARR, RPCArg::Optional::NO, "A json array of base64 strings of partially signed transactions",
+ {"txs", RPCArg::Type::ARR, RPCArg::Optional::NO, "The base64 strings of partially signed transactions",
{
{"psbt", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "A base64 string of a PSBT"},
},
},
},
RPCResult{
- " \"psbt\" (string) The base64-encoded partially signed transaction\n"
+ RPCResult::Type::STR, "", "The base64-encoded partially signed transaction"
},
RPCExamples{
HelpExampleCli("combinepsbt", R"('["mybase64_1", "mybase64_2", "mybase64_3"]')")
@@ -1292,11 +1335,12 @@ UniValue finalizepsbt(const JSONRPCRequest& request)
" extract and return the complete transaction in normal network serialization instead of the PSBT."},
},
RPCResult{
- "{ (json object)\n"
- " \"psbt\" : \"str\", (string) The base64-encoded partially signed transaction if not extracted\n"
- " \"hex\" : \"hex\", (string) The hex-encoded network transaction if extracted\n"
- " \"complete\" : true|false, (boolean) If the transaction has a complete set of signatures\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "psbt", "The base64-encoded partially signed transaction if not extracted"},
+ {RPCResult::Type::STR_HEX, "hex", "The hex-encoded network transaction if extracted"},
+ {RPCResult::Type::BOOL, "complete", "If the transaction has a complete set of signatures"},
+ }
},
RPCExamples{
HelpExampleCli("finalizepsbt", "\"psbt\"")
@@ -1341,7 +1385,7 @@ UniValue createpsbt(const JSONRPCRequest& request)
"\nCreates a transaction in the Partially Signed Transaction format.\n"
"Implements the Creator role.\n",
{
- {"inputs", RPCArg::Type::ARR, RPCArg::Optional::NO, "A json array of json objects",
+ {"inputs", RPCArg::Type::ARR, RPCArg::Optional::NO, "The json objects",
{
{"", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "",
{
@@ -1352,7 +1396,7 @@ UniValue createpsbt(const JSONRPCRequest& request)
},
},
},
- {"outputs", RPCArg::Type::ARR, RPCArg::Optional::NO, "a json array with outputs (key-value pairs), where none of the keys are duplicated.\n"
+ {"outputs", RPCArg::Type::ARR, RPCArg::Optional::NO, "The outputs (key-value pairs), where none of the keys are duplicated.\n"
"That is, each address can only appear once and there can only be one 'data' object.\n"
"For compatibility reasons, a dictionary, which holds the key-value pairs directly, is also\n"
" accepted as second parameter.",
@@ -1374,7 +1418,7 @@ UniValue createpsbt(const JSONRPCRequest& request)
" Allows this transaction to be replaced by a transaction with higher fees. If provided, it is an error if explicit sequence numbers are incompatible."},
},
RPCResult{
- " \"psbt\" (string) The resulting raw transaction (base64-encoded string)\n"
+ RPCResult::Type::STR, "", "The resulting raw transaction (base64-encoded string)"
},
RPCExamples{
HelpExampleCli("createpsbt", "\"[{\\\"txid\\\":\\\"myid\\\",\\\"vout\\\":0}]\" \"[{\\\"data\\\":\\\"00010203\\\"}]\"")
@@ -1431,7 +1475,7 @@ UniValue converttopsbt(const JSONRPCRequest& request)
},
},
RPCResult{
- " \"psbt\" (string) The resulting raw transaction (base64-encoded string)\n"
+ RPCResult::Type::STR, "", "The resulting raw transaction (base64-encoded string)"
},
RPCExamples{
"\nCreate a transaction\n"
@@ -1495,7 +1539,7 @@ UniValue utxoupdatepsbt(const JSONRPCRequest& request)
}},
},
RPCResult {
- " \"psbt\" (string) The base64-encoded partially signed transaction with inputs updated\n"
+ RPCResult::Type::STR, "", "The base64-encoded partially signed transaction with inputs updated"
},
RPCExamples {
HelpExampleCli("utxoupdatepsbt", "\"psbt\"")
@@ -1574,13 +1618,13 @@ UniValue joinpsbts(const JSONRPCRequest& request)
"\nJoins multiple distinct PSBTs with different inputs and outputs into one PSBT with inputs and outputs from all of the PSBTs\n"
"No input in any of the PSBTs can be in more than one of the PSBTs.\n",
{
- {"txs", RPCArg::Type::ARR, RPCArg::Optional::NO, "A json array of base64 strings of partially signed transactions",
+ {"txs", RPCArg::Type::ARR, RPCArg::Optional::NO, "The base64 strings of partially signed transactions",
{
{"psbt", RPCArg::Type::STR, RPCArg::Optional::NO, "A base64 string of a PSBT"}
}}
},
RPCResult {
- " \"psbt\" (string) The base64-encoded partially signed transaction\n"
+ RPCResult::Type::STR, "", "The base64-encoded partially signed transaction"
},
RPCExamples {
HelpExampleCli("joinpsbts", "\"psbt\"")
@@ -1669,31 +1713,36 @@ UniValue analyzepsbt(const JSONRPCRequest& request)
{"psbt", RPCArg::Type::STR, RPCArg::Optional::NO, "A base64 string of a PSBT"}
},
RPCResult {
- "{\n"
- " \"inputs\" : [ (array of json objects)\n"
- " {\n"
- " \"has_utxo\" : true|false (boolean) Whether a UTXO is provided\n"
- " \"is_final\" : true|false (boolean) Whether the input is finalized\n"
- " \"missing\" : { (json object, optional) Things that are missing that are required to complete this input\n"
- " \"pubkeys\" : [ (array, optional)\n"
- " \"keyid\" (string) Public key ID, hash160 of the public key, of a public key whose BIP 32 derivation path is missing\n"
- " ]\n"
- " \"signatures\" : [ (array, optional)\n"
- " \"keyid\" (string) Public key ID, hash160 of the public key, of a public key whose signature is missing\n"
- " ]\n"
- " \"redeemscript\" : \"hash\" (string, optional) Hash160 of the redeemScript that is missing\n"
- " \"witnessscript\" : \"hash\" (string, optional) SHA256 of the witnessScript that is missing\n"
- " }\n"
- " \"next\" : \"role\" (string, optional) Role of the next person that this input needs to go to\n"
- " }\n"
- " ,...\n"
- " ]\n"
- " \"estimated_vsize\" : vsize (numeric, optional) Estimated vsize of the final signed transaction\n"
- " \"estimated_feerate\" : feerate (numeric, optional) Estimated feerate of the final signed transaction in " + CURRENCY_UNIT + "/kB. Shown only if all UTXO slots in the PSBT have been filled.\n"
- " \"fee\" : fee (numeric, optional) The transaction fee paid. Shown only if all UTXO slots in the PSBT have been filled.\n"
- " \"next\" : \"role\" (string) Role of the next person that this psbt needs to go to\n"
- " \"error\" : \"error\" (string) Error message if there is one\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::ARR, "inputs", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::BOOL, "has_utxo", "Whether a UTXO is provided"},
+ {RPCResult::Type::BOOL, "is_final", "Whether the input is finalized"},
+ {RPCResult::Type::OBJ, "missing", /* optional */ true, "Things that are missing that are required to complete this input",
+ {
+ {RPCResult::Type::ARR, "pubkeys", /* optional */ true, "",
+ {
+ {RPCResult::Type::STR_HEX, "keyid", "Public key ID, hash160 of the public key, of a public key whose BIP 32 derivation path is missing"},
+ }},
+ {RPCResult::Type::ARR, "signatures", /* optional */ true, "",
+ {
+ {RPCResult::Type::STR_HEX, "keyid", "Public key ID, hash160 of the public key, of a public key whose signature is missing"},
+ }},
+ {RPCResult::Type::STR_HEX, "redeemscript", /* optional */ true, "Hash160 of the redeemScript that is missing"},
+ {RPCResult::Type::STR_HEX, "witnessscript", /* optional */ true, "SHA256 of the witnessScript that is missing"},
+ }},
+ {RPCResult::Type::STR, "next", /* optional */ true, "Role of the next person that this input needs to go to"},
+ }},
+ }},
+ {RPCResult::Type::NUM, "estimated_vsize", /* optional */ true, "Estimated vsize of the final signed transaction"},
+ {RPCResult::Type::STR_AMOUNT, "estimated_feerate", /* optional */ true, "Estimated feerate of the final signed transaction in " + CURRENCY_UNIT + "/kB. Shown only if all UTXO slots in the PSBT have been filled"},
+ {RPCResult::Type::STR_AMOUNT, "fee", /* optional */ true, "The transaction fee paid. Shown only if all UTXO slots in the PSBT have been filled"},
+ {RPCResult::Type::STR, "next", "Role of the next person that this psbt needs to go to"},
+ {RPCResult::Type::STR, "error", "Error message if there is one"},
+ }
},
RPCExamples {
HelpExampleCli("analyzepsbt", "\"psbt\"")
diff --git a/src/rpc/rawtransaction_util.cpp b/src/rpc/rawtransaction_util.cpp
index 40334883c5..54baec6c6f 100644
--- a/src/rpc/rawtransaction_util.cpp
+++ b/src/rpc/rawtransaction_util.cpp
@@ -272,55 +272,27 @@ void SignTransaction(CMutableTransaction& mtx, const SigningProvider* keystore,
{
int nHashType = ParseSighashString(hashType);
- bool fHashSingle = ((nHashType & ~SIGHASH_ANYONECANPAY) == SIGHASH_SINGLE);
-
// Script verification errors
- UniValue vErrors(UniValue::VARR);
-
- // Use CTransaction for the constant parts of the
- // transaction to avoid rehashing.
- const CTransaction txConst(mtx);
- // Sign what we can:
- for (unsigned int i = 0; i < mtx.vin.size(); i++) {
- CTxIn& txin = mtx.vin[i];
- auto coin = coins.find(txin.prevout);
- if (coin == coins.end() || coin->second.IsSpent()) {
- TxInErrorToJSON(txin, vErrors, "Input not found or already spent");
- continue;
- }
- const CScript& prevPubKey = coin->second.out.scriptPubKey;
- const CAmount& amount = coin->second.out.nValue;
-
- SignatureData sigdata = DataFromTransaction(mtx, i, coin->second.out);
- // Only sign SIGHASH_SINGLE if there's a corresponding output:
- if (!fHashSingle || (i < mtx.vout.size())) {
- ProduceSignature(*keystore, MutableTransactionSignatureCreator(&mtx, i, amount, nHashType), prevPubKey, sigdata);
- }
-
- UpdateInput(txin, sigdata);
+ std::map<int, std::string> input_errors;
- // amount must be specified for valid segwit signature
- if (amount == MAX_MONEY && !txin.scriptWitness.IsNull()) {
- throw JSONRPCError(RPC_TYPE_ERROR, strprintf("Missing amount for %s", coin->second.out.ToString()));
- }
+ bool complete = SignTransaction(mtx, keystore, coins, nHashType, input_errors);
+ SignTransactionResultToJSON(mtx, complete, coins, input_errors, result);
+}
- ScriptError serror = SCRIPT_ERR_OK;
- if (!VerifyScript(txin.scriptSig, prevPubKey, &txin.scriptWitness, STANDARD_SCRIPT_VERIFY_FLAGS, TransactionSignatureChecker(&txConst, i, amount), &serror)) {
- if (serror == SCRIPT_ERR_INVALID_STACK_OPERATION) {
- // Unable to sign input and verification failed (possible attempt to partially sign).
- TxInErrorToJSON(txin, vErrors, "Unable to sign input, invalid stack size (possibly missing key)");
- } else if (serror == SCRIPT_ERR_SIG_NULLFAIL) {
- // Verification failed (possibly due to insufficient signatures).
- TxInErrorToJSON(txin, vErrors, "CHECK(MULTI)SIG failing with non-zero signature (possibly need more signatures)");
- } else {
- TxInErrorToJSON(txin, vErrors, ScriptErrorString(serror));
- }
+void SignTransactionResultToJSON(CMutableTransaction& mtx, bool complete, const std::map<COutPoint, Coin>& coins, std::map<int, std::string>& input_errors, UniValue& result)
+{
+ // Make errors UniValue
+ UniValue vErrors(UniValue::VARR);
+ for (const auto& err_pair : input_errors) {
+ if (err_pair.second == "Missing amount") {
+ // This particular error needs to be an exception for some reason
+ throw JSONRPCError(RPC_TYPE_ERROR, strprintf("Missing amount for %s", coins.at(mtx.vin.at(err_pair.first).prevout).out.ToString()));
}
+ TxInErrorToJSON(mtx.vin.at(err_pair.first), vErrors, err_pair.second);
}
- bool fComplete = vErrors.empty();
result.pushKV("hex", EncodeHexTx(CTransaction(mtx)));
- result.pushKV("complete", fComplete);
+ result.pushKV("complete", complete);
if (!vErrors.empty()) {
if (result.exists("errors")) {
vErrors.push_backV(result["errors"].getValues());
diff --git a/src/rpc/rawtransaction_util.h b/src/rpc/rawtransaction_util.h
index 4750fd64ed..436db5dc60 100644
--- a/src/rpc/rawtransaction_util.h
+++ b/src/rpc/rawtransaction_util.h
@@ -6,6 +6,7 @@
#define BITCOIN_RPC_RAWTRANSACTION_UTIL_H
#include <map>
+#include <string>
class FillableSigningProvider;
class UniValue;
@@ -24,6 +25,7 @@ class SigningProvider;
* @param result JSON object where signed transaction results accumulate
*/
void SignTransaction(CMutableTransaction& mtx, const SigningProvider* keystore, const std::map<COutPoint, Coin>& coins, const UniValue& hashType, UniValue& result);
+void SignTransactionResultToJSON(CMutableTransaction& mtx, bool complete, const std::map<COutPoint, Coin>& coins, std::map<int, std::string>& input_errors, UniValue& result);
/**
* Parse a prevtxs UniValue array and get the map of coins from it
diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp
index df8e687d82..e2618c16da 100644
--- a/src/rpc/server.cpp
+++ b/src/rpc/server.cpp
@@ -137,7 +137,7 @@ UniValue help(const JSONRPCRequest& jsonRequest)
{"command", RPCArg::Type::STR, /* default */ "all commands", "The command to get help on"},
},
RPCResult{
- "\"text\" (string) The help text\n"
+ RPCResult::Type::STR, "", "The help text"
},
RPCExamples{""},
}.ToString()
@@ -153,6 +153,7 @@ UniValue help(const JSONRPCRequest& jsonRequest)
UniValue stop(const JSONRPCRequest& jsonRequest)
{
+ static const std::string RESULT{PACKAGE_NAME " stopping"};
// Accept the deprecated and ignored 'detach' boolean argument
// Also accept the hidden 'wait' integer argument (milliseconds)
// For instance, 'stop 1000' makes the call wait 1 second before returning
@@ -162,16 +163,16 @@ UniValue stop(const JSONRPCRequest& jsonRequest)
RPCHelpMan{"stop",
"\nRequest a graceful shutdown of " PACKAGE_NAME ".",
{},
- RPCResults{},
+ RPCResult{RPCResult::Type::STR, "", "A string with the content '" + RESULT + "'"},
RPCExamples{""},
}.ToString());
// Event loop will exit after current HTTP requests have been handled, so
// this reply will get back to the client.
StartShutdown();
if (jsonRequest.params[0].isNum()) {
- MilliSleep(jsonRequest.params[0].get_int());
+ UninterruptibleSleep(std::chrono::milliseconds{jsonRequest.params[0].get_int()});
}
- return PACKAGE_NAME " stopping";
+ return RESULT;
}
static UniValue uptime(const JSONRPCRequest& jsonRequest)
@@ -180,7 +181,7 @@ static UniValue uptime(const JSONRPCRequest& jsonRequest)
"\nReturns the total uptime of the server.\n",
{},
RPCResult{
- "ttt (numeric) The number of seconds that the server has been running\n"
+ RPCResult::Type::NUM, "", "The number of seconds that the server has been running"
},
RPCExamples{
HelpExampleCli("uptime", "")
@@ -197,16 +198,18 @@ static UniValue getrpcinfo(const JSONRPCRequest& request)
"\nReturns details of the RPC server.\n",
{},
RPCResult{
- "{\n"
- " \"active_commands\" (array) All active commands\n"
- " [\n"
- " { (object) Information about an active command\n"
- " \"method\" (string) The name of the RPC command \n"
- " \"duration\" (numeric) The running time in microseconds\n"
- " },...\n"
- " ],\n"
- " \"logpath\": \"xxx\" (string) The complete file path to the debug log\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::ARR, "active_commands", "All active commands",
+ {
+ {RPCResult::Type::OBJ, "", "Information about an active command",
+ {
+ {RPCResult::Type::STR, "method", "The name of the RPC command"},
+ {RPCResult::Type::NUM, "duration", "The running time in microseconds"},
+ }},
+ }},
+ {RPCResult::Type::STR, "logpath", "The complete file path to the debug log"},
+ }
},
RPCExamples{
HelpExampleCli("getrpcinfo", "")
diff --git a/src/rpc/util.cpp b/src/rpc/util.cpp
index 78586c22f9..4ba84d2515 100644
--- a/src/rpc/util.cpp
+++ b/src/rpc/util.cpp
@@ -14,6 +14,7 @@
#include <tuple>
const std::string UNIX_EPOCH_TIME = "UNIX epoch time";
+const std::string EXAMPLE_ADDRESS[2] = {"bc1q09vm5lfy0j5reeulh4x5752q25uqqvz34hufdl", "bc1q02ad21edsxd23d32dfgqqsz4vv4nmtfzuklhy3"};
void RPCTypeCheck(const UniValue& params,
const std::list<UniValueType>& typesExpected,
@@ -115,8 +116,8 @@ std::string HelpExampleCli(const std::string& methodname, const std::string& arg
std::string HelpExampleRpc(const std::string& methodname, const std::string& args)
{
- return "> curl --user myusername --data-binary '{\"jsonrpc\": \"1.0\", \"id\":\"curltest\", "
- "\"method\": \"" + methodname + "\", \"params\": [" + args + "] }' -H 'content-type: text/plain;' http://127.0.0.1:8332/\n";
+ return "> curl --user myusername --data-binary '{\"jsonrpc\": \"1.0\", \"id\": \"curltest\", "
+ "\"method\": \"" + methodname + "\", \"params\": [" + args + "]}' -H 'content-type: text/plain;' http://127.0.0.1:8332/\n";
}
// Converts a hex string to a public key if possible
@@ -311,20 +312,9 @@ struct Sections {
}
/**
- * Serializing RPCArgs depends on the outer type. Only arrays and
- * dictionaries can be nested in json. The top-level outer type is "named
- * arguments", a mix between a dictionary and arrays.
- */
- enum class OuterType {
- ARR,
- OBJ,
- NAMED_ARG, // Only set on first recursion
- };
-
- /**
* Recursive helper to translate an RPCArg into sections
*/
- void Push(const RPCArg& arg, const size_t current_indent = 5, const OuterType outer_type = OuterType::NAMED_ARG)
+ void Push(const RPCArg& arg, const size_t current_indent = 5, const OuterType outer_type = OuterType::NONE)
{
const auto indent = std::string(current_indent, ' ');
const auto indent_next = std::string(current_indent + 2, ' ');
@@ -337,7 +327,7 @@ struct Sections {
case RPCArg::Type::AMOUNT:
case RPCArg::Type::RANGE:
case RPCArg::Type::BOOL: {
- if (outer_type == OuterType::NAMED_ARG) return; // Nothing more to do for non-recursive types on first recursion
+ if (outer_type == OuterType::NONE) return; // Nothing more to do for non-recursive types on first recursion
auto left = indent;
if (arg.m_type_str.size() != 0 && push_name) {
left += "\"" + arg.m_name + "\": " + arg.m_type_str.at(0);
@@ -350,7 +340,7 @@ struct Sections {
}
case RPCArg::Type::OBJ:
case RPCArg::Type::OBJ_USER_KEYS: {
- const auto right = outer_type == OuterType::NAMED_ARG ? "" : arg.ToDescriptionString();
+ const auto right = outer_type == OuterType::NONE ? "" : arg.ToDescriptionString();
PushSection({indent + (push_name ? "\"" + arg.m_name + "\": " : "") + "{", right});
for (const auto& arg_inner : arg.m_inner) {
Push(arg_inner, current_indent + 2, OuterType::OBJ);
@@ -358,20 +348,20 @@ struct Sections {
if (arg.m_type != RPCArg::Type::OBJ) {
PushSection({indent_next + "...", ""});
}
- PushSection({indent + "}" + (outer_type != OuterType::NAMED_ARG ? "," : ""), ""});
+ PushSection({indent + "}" + (outer_type != OuterType::NONE ? "," : ""), ""});
break;
}
case RPCArg::Type::ARR: {
auto left = indent;
left += push_name ? "\"" + arg.m_name + "\": " : "";
left += "[";
- const auto right = outer_type == OuterType::NAMED_ARG ? "" : arg.ToDescriptionString();
+ const auto right = outer_type == OuterType::NONE ? "" : arg.ToDescriptionString();
PushSection({left, right});
for (const auto& arg_inner : arg.m_inner) {
Push(arg_inner, current_indent + 2, OuterType::ARR);
}
PushSection({indent_next + "...", ""});
- PushSection({indent + "]" + (outer_type != OuterType::NAMED_ARG ? "," : ""), ""});
+ PushSection({indent + "]" + (outer_type != OuterType::NONE ? "," : ""), ""});
break;
}
@@ -443,7 +433,9 @@ std::string RPCResults::ToDescriptionString() const
} else {
result += "\nResult (" + r.m_cond + "):\n";
}
- result += r.m_result;
+ Sections sections;
+ r.ToSections(sections);
+ result += sections.ToString();
}
return result;
}
@@ -497,7 +489,7 @@ std::string RPCHelpMan::ToString() const
if (i == 0) ret += "\nArguments:\n";
// Push named argument name and description
- sections.m_sections.emplace_back(std::to_string(i + 1) + ". " + arg.m_name, arg.ToDescriptionString());
+ sections.m_sections.emplace_back(::ToString(i + 1) + ". " + arg.m_name, arg.ToDescriptionString());
sections.m_max_pad = std::max(sections.m_max_pad, sections.m_sections.back().m_left.size());
// Recursively push nested args
@@ -590,6 +582,93 @@ std::string RPCArg::ToDescriptionString() const
return ret;
}
+void RPCResult::ToSections(Sections& sections, const OuterType outer_type, const int current_indent) const
+{
+ // Indentation
+ const std::string indent(current_indent, ' ');
+ const std::string indent_next(current_indent + 2, ' ');
+
+ // Elements in a JSON structure (dictionary or array) are separated by a comma
+ const std::string maybe_separator{outer_type != OuterType::NONE ? "," : ""};
+
+ // The key name if recursed into an dictionary
+ const std::string maybe_key{
+ outer_type == OuterType::OBJ ?
+ "\"" + this->m_key_name + "\" : " :
+ ""};
+
+ // Format description with type
+ const auto Description = [&](const std::string& type) {
+ return "(" + type + (this->m_optional ? ", optional" : "") + ")" +
+ (this->m_description.empty() ? "" : " " + this->m_description);
+ };
+
+ switch (m_type) {
+ case Type::ELISION: {
+ // If the inner result is empty, use three dots for elision
+ sections.PushSection({indent_next + "...", m_description});
+ return;
+ }
+ case Type::NONE: {
+ sections.PushSection({indent + "None", Description("json null")});
+ return;
+ }
+ case Type::STR: {
+ sections.PushSection({indent + maybe_key + "\"str\"" + maybe_separator, Description("string")});
+ return;
+ }
+ case Type::STR_AMOUNT: {
+ sections.PushSection({indent + maybe_key + "n" + maybe_separator, Description("numeric")});
+ return;
+ }
+ case Type::STR_HEX: {
+ sections.PushSection({indent + maybe_key + "\"hex\"" + maybe_separator, Description("string")});
+ return;
+ }
+ case Type::NUM: {
+ sections.PushSection({indent + maybe_key + "n" + maybe_separator, Description("numeric")});
+ return;
+ }
+ case Type::NUM_TIME: {
+ sections.PushSection({indent + maybe_key + "xxx" + maybe_separator, Description("numeric")});
+ return;
+ }
+ case Type::BOOL: {
+ sections.PushSection({indent + maybe_key + "true|false" + maybe_separator, Description("boolean")});
+ return;
+ }
+ case Type::ARR_FIXED:
+ case Type::ARR: {
+ sections.PushSection({indent + maybe_key + "[", Description("json array")});
+ for (const auto& i : m_inner) {
+ i.ToSections(sections, OuterType::ARR, current_indent + 2);
+ }
+ if (m_type == Type::ARR) {
+ sections.PushSection({indent_next + "...", ""});
+ }
+ sections.PushSection({indent + "]" + maybe_separator, ""});
+ return;
+ }
+ case Type::OBJ_DYN:
+ case Type::OBJ: {
+ sections.PushSection({indent + maybe_key + "{", Description("json object")});
+ for (const auto& i : m_inner) {
+ i.ToSections(sections, OuterType::OBJ, current_indent + 2);
+ }
+ if (m_type == Type::OBJ_DYN) {
+ // If the dictionary keys are dynamic, use three dots for continuation
+ sections.PushSection({indent_next + "...", ""});
+ }
+ sections.PushSection({indent + "}" + maybe_separator, ""});
+ return;
+ }
+
+ // no default case, so the compiler can warn about missing cases
+ }
+
+ CHECK_NONFATAL(false);
+}
+
std::string RPCArg::ToStringObj(const bool oneline) const
{
std::string res;
diff --git a/src/rpc/util.h b/src/rpc/util.h
index 065a992a88..f65ad1246b 100644
--- a/src/rpc/util.h
+++ b/src/rpc/util.h
@@ -28,9 +28,16 @@
*/
extern const std::string UNIX_EPOCH_TIME;
+/**
+ * Example bech32 addresses for the RPCExamples help documentation. They are intentionally
+ * invalid to prevent accidental transactions by users.
+ */
+extern const std::string EXAMPLE_ADDRESS[2];
+
class FillableSigningProvider;
class CPubKey;
class CScript;
+struct Sections;
/** Wrapper for UniValue::VType, which includes typeAny:
* Used to denote don't care type. */
@@ -95,6 +102,16 @@ std::vector<CScript> EvalDescriptorStringOrObject(const UniValue& scanobject, Fl
/** Returns, given services flags, a list of humanly readable (known) network services */
UniValue GetServicesNames(ServiceFlags services);
+/**
+ * Serializing JSON objects depends on the outer type. Only arrays and
+ * dictionaries can be nested in json. The top-level outer type is "NONE".
+ */
+enum class OuterType {
+ ARR,
+ OBJ,
+ NONE, // Only set on first recursion
+};
+
struct RPCArg {
enum class Type {
OBJ,
@@ -134,37 +151,37 @@ struct RPCArg {
const std::vector<std::string> m_type_str; //!< Should be empty unless it is supposed to override the auto-generated type strings. Vector length is either 0 or 2, m_type_str.at(0) will override the type of the value in a key-value pair, m_type_str.at(1) will override the type in the argument description.
RPCArg(
- const std::string& name,
- const Type& type,
- const Fallback& fallback,
- const std::string& description,
- const std::string& oneline_description = "",
- const std::vector<std::string>& type_str = {})
- : m_name{name},
- m_type{type},
- m_fallback{fallback},
- m_description{description},
- m_oneline_description{oneline_description},
- m_type_str{type_str}
+ const std::string name,
+ const Type type,
+ const Fallback fallback,
+ const std::string description,
+ const std::string oneline_description = "",
+ const std::vector<std::string> type_str = {})
+ : m_name{std::move(name)},
+ m_type{std::move(type)},
+ m_fallback{std::move(fallback)},
+ m_description{std::move(description)},
+ m_oneline_description{std::move(oneline_description)},
+ m_type_str{std::move(type_str)}
{
CHECK_NONFATAL(type != Type::ARR && type != Type::OBJ);
}
RPCArg(
- const std::string& name,
- const Type& type,
- const Fallback& fallback,
- const std::string& description,
- const std::vector<RPCArg>& inner,
- const std::string& oneline_description = "",
- const std::vector<std::string>& type_str = {})
- : m_name{name},
- m_type{type},
- m_inner{inner},
- m_fallback{fallback},
- m_description{description},
- m_oneline_description{oneline_description},
- m_type_str{type_str}
+ const std::string name,
+ const Type type,
+ const Fallback fallback,
+ const std::string description,
+ const std::vector<RPCArg> inner,
+ const std::string oneline_description = "",
+ const std::vector<std::string> type_str = {})
+ : m_name{std::move(name)},
+ m_type{std::move(type)},
+ m_inner{std::move(inner)},
+ m_fallback{std::move(fallback)},
+ m_description{std::move(description)},
+ m_oneline_description{std::move(oneline_description)},
+ m_type_str{std::move(type_str)}
{
CHECK_NONFATAL(type == Type::ARR || type == Type::OBJ);
}
@@ -189,31 +206,90 @@ struct RPCArg {
};
struct RPCResult {
+ enum class Type {
+ OBJ,
+ ARR,
+ STR,
+ NUM,
+ BOOL,
+ NONE,
+ STR_AMOUNT, //!< Special string to represent a floating point amount
+ STR_HEX, //!< Special string with only hex chars
+ OBJ_DYN, //!< Special dictionary with keys that are not literals
+ ARR_FIXED, //!< Special array that has a fixed number of entries
+ NUM_TIME, //!< Special numeric to denote unix epoch time
+ ELISION, //!< Special type to denote elision (...)
+ };
+
+ const Type m_type;
+ const std::string m_key_name; //!< Only used for dicts
+ const std::vector<RPCResult> m_inner; //!< Only used for arrays or dicts
+ const bool m_optional;
+ const std::string m_description;
const std::string m_cond;
- const std::string m_result;
- explicit RPCResult(std::string result)
- : m_cond{}, m_result{std::move(result)}
+ RPCResult(
+ const std::string cond,
+ const Type type,
+ const std::string m_key_name,
+ const bool optional,
+ const std::string description,
+ const std::vector<RPCResult> inner = {})
+ : m_type{std::move(type)},
+ m_key_name{std::move(m_key_name)},
+ m_inner{std::move(inner)},
+ m_optional{optional},
+ m_description{std::move(description)},
+ m_cond{std::move(cond)}
{
- CHECK_NONFATAL(!m_result.empty());
+ CHECK_NONFATAL(!m_cond.empty());
+ const bool inner_needed{type == Type::ARR || type == Type::ARR_FIXED || type == Type::OBJ || type == Type::OBJ_DYN};
+ CHECK_NONFATAL(inner_needed != inner.empty());
}
- RPCResult(std::string cond, std::string result)
- : m_cond{std::move(cond)}, m_result{std::move(result)}
+ RPCResult(
+ const std::string cond,
+ const Type type,
+ const std::string m_key_name,
+ const std::string description,
+ const std::vector<RPCResult> inner = {})
+ : RPCResult{cond, type, m_key_name, false, description, inner} {}
+
+ RPCResult(
+ const Type type,
+ const std::string m_key_name,
+ const bool optional,
+ const std::string description,
+ const std::vector<RPCResult> inner = {})
+ : m_type{std::move(type)},
+ m_key_name{std::move(m_key_name)},
+ m_inner{std::move(inner)},
+ m_optional{optional},
+ m_description{std::move(description)},
+ m_cond{}
{
- CHECK_NONFATAL(!m_cond.empty());
- CHECK_NONFATAL(!m_result.empty());
+ const bool inner_needed{type == Type::ARR || type == Type::ARR_FIXED || type == Type::OBJ || type == Type::OBJ_DYN};
+ CHECK_NONFATAL(inner_needed != inner.empty());
}
+
+ RPCResult(
+ const Type type,
+ const std::string m_key_name,
+ const std::string description,
+ const std::vector<RPCResult> inner = {})
+ : RPCResult{type, m_key_name, false, description, inner} {}
+
+ /** Append the sections of the result. */
+ void ToSections(Sections& sections, OuterType outer_type = OuterType::NONE, const int current_indent = 0) const;
+ /** Return the type string of the result when it is in an object (dict). */
+ std::string ToStringObj() const;
+ /** Return the description string, including the result type. */
+ std::string ToDescriptionString() const;
};
struct RPCResults {
const std::vector<RPCResult> m_results;
- RPCResults()
- : m_results{}
- {
- }
-
RPCResults(RPCResult result)
: m_results{{result}}
{
diff --git a/src/scheduler.cpp b/src/scheduler.cpp
index 927a3f3820..4cac5a54e0 100644
--- a/src/scheduler.cpp
+++ b/src/scheduler.cpp
@@ -5,33 +5,24 @@
#include <scheduler.h>
#include <random.h>
-#include <reverselock.h>
#include <assert.h>
#include <utility>
-CScheduler::CScheduler() : nThreadsServicingQueue(0), stopRequested(false), stopWhenEmpty(false)
+CScheduler::CScheduler()
{
}
CScheduler::~CScheduler()
{
assert(nThreadsServicingQueue == 0);
+ if (stopWhenEmpty) assert(taskQueue.empty());
}
-#if BOOST_VERSION < 105000
-static boost::system_time toPosixTime(const boost::chrono::system_clock::time_point& t)
-{
- // Creating the posix_time using from_time_t loses sub-second precision. So rather than exporting the time_point to time_t,
- // start with a posix_time at the epoch (0) and add the milliseconds that have passed since then.
- return boost::posix_time::from_time_t(0) + boost::posix_time::milliseconds(boost::chrono::duration_cast<boost::chrono::milliseconds>(t.time_since_epoch()).count());
-}
-#endif
-
void CScheduler::serviceQueue()
{
- boost::unique_lock<boost::mutex> lock(newTaskMutex);
+ WAIT_LOCK(newTaskMutex, lock);
++nThreadsServicingQueue;
// newTaskMutex is locked throughout this loop EXCEPT
@@ -40,7 +31,7 @@ void CScheduler::serviceQueue()
while (!shouldStop()) {
try {
if (!shouldStop() && taskQueue.empty()) {
- reverse_lock<boost::unique_lock<boost::mutex> > rlock(lock);
+ REVERSE_LOCK(lock);
}
while (!shouldStop() && taskQueue.empty()) {
// Wait until there is something to do.
@@ -50,21 +41,13 @@ void CScheduler::serviceQueue()
// Wait until either there is a new task, or until
// the time of the first item on the queue:
-// wait_until needs boost 1.50 or later; older versions have timed_wait:
-#if BOOST_VERSION < 105000
- while (!shouldStop() && !taskQueue.empty() &&
- newTaskScheduled.timed_wait(lock, toPosixTime(taskQueue.begin()->first))) {
- // Keep waiting until timeout
- }
-#else
- // Some boost versions have a conflicting overload of wait_until that returns void.
- // Explicitly use a template here to avoid hitting that overload.
while (!shouldStop() && !taskQueue.empty()) {
- boost::chrono::system_clock::time_point timeToWaitFor = taskQueue.begin()->first;
- if (newTaskScheduled.wait_until<>(lock, timeToWaitFor) == boost::cv_status::timeout)
+ std::chrono::system_clock::time_point timeToWaitFor = taskQueue.begin()->first;
+ if (newTaskScheduled.wait_until(lock, timeToWaitFor) == std::cv_status::timeout) {
break; // Exit loop after timeout, it means we reached the time of the event
+ }
}
-#endif
+
// If there are multiple threads, the queue can empty while we're waiting (another
// thread may service the task we were waiting on).
if (shouldStop() || taskQueue.empty())
@@ -76,7 +59,7 @@ void CScheduler::serviceQueue()
{
// Unlock before calling f, so it can reschedule itself or another task
// without deadlocking:
- reverse_lock<boost::unique_lock<boost::mutex> > rlock(lock);
+ REVERSE_LOCK(lock);
f();
}
} catch (...) {
@@ -91,7 +74,7 @@ void CScheduler::serviceQueue()
void CScheduler::stop(bool drain)
{
{
- boost::unique_lock<boost::mutex> lock(newTaskMutex);
+ LOCK(newTaskMutex);
if (drain)
stopWhenEmpty = true;
else
@@ -100,35 +83,52 @@ void CScheduler::stop(bool drain)
newTaskScheduled.notify_all();
}
-void CScheduler::schedule(CScheduler::Function f, boost::chrono::system_clock::time_point t)
+void CScheduler::schedule(CScheduler::Function f, std::chrono::system_clock::time_point t)
{
{
- boost::unique_lock<boost::mutex> lock(newTaskMutex);
+ LOCK(newTaskMutex);
taskQueue.insert(std::make_pair(t, f));
}
newTaskScheduled.notify_one();
}
-void CScheduler::scheduleFromNow(CScheduler::Function f, int64_t deltaMilliSeconds)
+void CScheduler::MockForward(std::chrono::seconds delta_seconds)
{
- schedule(f, boost::chrono::system_clock::now() + boost::chrono::milliseconds(deltaMilliSeconds));
+ assert(delta_seconds.count() > 0 && delta_seconds < std::chrono::hours{1});
+
+ {
+ LOCK(newTaskMutex);
+
+ // use temp_queue to maintain updated schedule
+ std::multimap<std::chrono::system_clock::time_point, Function> temp_queue;
+
+ for (const auto& element : taskQueue) {
+ temp_queue.emplace_hint(temp_queue.cend(), element.first - delta_seconds, element.second);
+ }
+
+ // point taskQueue to temp_queue
+ taskQueue = std::move(temp_queue);
+ }
+
+ // notify that the taskQueue needs to be processed
+ newTaskScheduled.notify_one();
}
-static void Repeat(CScheduler* s, CScheduler::Function f, int64_t deltaMilliSeconds)
+static void Repeat(CScheduler& s, CScheduler::Function f, std::chrono::milliseconds delta)
{
f();
- s->scheduleFromNow(std::bind(&Repeat, s, f, deltaMilliSeconds), deltaMilliSeconds);
+ s.scheduleFromNow([=, &s] { Repeat(s, f, delta); }, delta);
}
-void CScheduler::scheduleEvery(CScheduler::Function f, int64_t deltaMilliSeconds)
+void CScheduler::scheduleEvery(CScheduler::Function f, std::chrono::milliseconds delta)
{
- scheduleFromNow(std::bind(&Repeat, this, f, deltaMilliSeconds), deltaMilliSeconds);
+ scheduleFromNow([=] { Repeat(*this, f, delta); }, delta);
}
-size_t CScheduler::getQueueInfo(boost::chrono::system_clock::time_point &first,
- boost::chrono::system_clock::time_point &last) const
+size_t CScheduler::getQueueInfo(std::chrono::system_clock::time_point &first,
+ std::chrono::system_clock::time_point &last) const
{
- boost::unique_lock<boost::mutex> lock(newTaskMutex);
+ LOCK(newTaskMutex);
size_t result = taskQueue.size();
if (!taskQueue.empty()) {
first = taskQueue.begin()->first;
@@ -138,7 +138,7 @@ size_t CScheduler::getQueueInfo(boost::chrono::system_clock::time_point &first,
}
bool CScheduler::AreThreadsServicingQueue() const {
- boost::unique_lock<boost::mutex> lock(newTaskMutex);
+ LOCK(newTaskMutex);
return nThreadsServicingQueue;
}
@@ -152,7 +152,7 @@ void SingleThreadedSchedulerClient::MaybeScheduleProcessQueue() {
if (m_are_callbacks_running) return;
if (m_callbacks_pending.empty()) return;
}
- m_pscheduler->schedule(std::bind(&SingleThreadedSchedulerClient::ProcessQueue, this));
+ m_pscheduler->schedule(std::bind(&SingleThreadedSchedulerClient::ProcessQueue, this), std::chrono::system_clock::now());
}
void SingleThreadedSchedulerClient::ProcessQueue() {
diff --git a/src/scheduler.h b/src/scheduler.h
index 7080adf34c..1e64195484 100644
--- a/src/scheduler.h
+++ b/src/scheduler.h
@@ -7,11 +7,12 @@
//
// NOTE:
-// boost::thread / boost::chrono should be ported to std::thread / std::chrono
+// boost::thread should be ported to std::thread
// when we support C++11.
//
-#include <boost/chrono/chrono.hpp>
-#include <boost/thread.hpp>
+#include <condition_variable>
+#include <functional>
+#include <list>
#include <map>
#include <sync.h>
@@ -23,12 +24,12 @@
// Usage:
//
// CScheduler* s = new CScheduler();
-// s->scheduleFromNow(doSomething, 11); // Assuming a: void doSomething() { }
-// s->scheduleFromNow(std::bind(Class::func, this, argument), 3);
+// s->scheduleFromNow(doSomething, std::chrono::milliseconds{11}); // Assuming a: void doSomething() { }
+// s->scheduleFromNow([=] { this->func(argument); }, std::chrono::milliseconds{3});
// boost::thread* t = new boost::thread(std::bind(CScheduler::serviceQueue, s));
//
-// ... then at program shutdown, clean up the thread running serviceQueue:
-// t->interrupt();
+// ... then at program shutdown, make sure to call stop() to clean up the thread(s) running serviceQueue:
+// s->stop();
// t->join();
// delete t;
// delete s; // Must be done after thread is interrupted/joined.
@@ -43,17 +44,28 @@ public:
typedef std::function<void()> Function;
// Call func at/after time t
- void schedule(Function f, boost::chrono::system_clock::time_point t=boost::chrono::system_clock::now());
+ void schedule(Function f, std::chrono::system_clock::time_point t);
- // Convenience method: call f once deltaMilliSeconds from now
- void scheduleFromNow(Function f, int64_t deltaMilliSeconds);
+ /** Call f once after the delta has passed */
+ void scheduleFromNow(Function f, std::chrono::milliseconds delta)
+ {
+ schedule(std::move(f), std::chrono::system_clock::now() + delta);
+ }
- // Another convenience method: call f approximately
- // every deltaMilliSeconds forever, starting deltaMilliSeconds from now.
- // To be more precise: every time f is finished, it
- // is rescheduled to run deltaMilliSeconds later. If you
- // need more accurate scheduling, don't use this method.
- void scheduleEvery(Function f, int64_t deltaMilliSeconds);
+ /**
+ * Repeat f until the scheduler is stopped. First run is after delta has passed once.
+ *
+ * The timing is not exact: Every time f is finished, it is rescheduled to run again after delta. If you need more
+ * accurate scheduling, don't use this method.
+ */
+ void scheduleEvery(Function f, std::chrono::milliseconds delta);
+
+ /**
+ * Mock the scheduler to fast forward in time.
+ * Iterates through items on taskQueue and reschedules them
+ * to be delta_seconds sooner.
+ */
+ void MockForward(std::chrono::seconds delta_seconds);
// To keep things as simple as possible, there is no unschedule.
@@ -68,20 +80,20 @@ public:
// Returns number of tasks waiting to be serviced,
// and first and last task times
- size_t getQueueInfo(boost::chrono::system_clock::time_point &first,
- boost::chrono::system_clock::time_point &last) const;
+ size_t getQueueInfo(std::chrono::system_clock::time_point &first,
+ std::chrono::system_clock::time_point &last) const;
// Returns true if there are threads actively running in serviceQueue()
bool AreThreadsServicingQueue() const;
private:
- std::multimap<boost::chrono::system_clock::time_point, Function> taskQueue;
- boost::condition_variable newTaskScheduled;
- mutable boost::mutex newTaskMutex;
- int nThreadsServicingQueue;
- bool stopRequested;
- bool stopWhenEmpty;
- bool shouldStop() const { return stopRequested || (stopWhenEmpty && taskQueue.empty()); }
+ mutable Mutex newTaskMutex;
+ std::condition_variable newTaskScheduled;
+ std::multimap<std::chrono::system_clock::time_point, Function> taskQueue GUARDED_BY(newTaskMutex);
+ int nThreadsServicingQueue GUARDED_BY(newTaskMutex){0};
+ bool stopRequested GUARDED_BY(newTaskMutex){false};
+ bool stopWhenEmpty GUARDED_BY(newTaskMutex){false};
+ bool shouldStop() const EXCLUSIVE_LOCKS_REQUIRED(newTaskMutex) { return stopRequested || (stopWhenEmpty && taskQueue.empty()); }
};
/**
diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp
index 773d6a55c4..83dc046ca1 100644
--- a/src/script/descriptor.cpp
+++ b/src/script/descriptor.cpp
@@ -150,10 +150,22 @@ typedef std::vector<uint32_t> KeyPath;
/** Interface for public key objects in descriptors. */
struct PubkeyProvider
{
+protected:
+ //! Index of this key expression in the descriptor
+ //! E.g. If this PubkeyProvider is key1 in multi(2, key1, key2, key3), then m_expr_index = 0
+ uint32_t m_expr_index;
+
+public:
+ PubkeyProvider(uint32_t exp_index) : m_expr_index(exp_index) {}
+
virtual ~PubkeyProvider() = default;
- /** Derive a public key. If key==nullptr, only info is desired. */
- virtual bool GetPubKey(int pos, const SigningProvider& arg, CPubKey* key, KeyOriginInfo& info) const = 0;
+ /** Derive a public key.
+ * read_cache is the cache to read keys from (if not nullptr)
+ * write_cache is the cache to write keys to (if not nullptr)
+ * Caches are not exclusive but this is not tested. Currently we use them exclusively
+ */
+ virtual bool GetPubKey(int pos, const SigningProvider& arg, CPubKey& key, KeyOriginInfo& info, const DescriptorCache* read_cache = nullptr, DescriptorCache* write_cache = nullptr) = 0;
/** Whether this represent multiple public keys at different positions. */
virtual bool IsRange() const = 0;
@@ -182,10 +194,10 @@ class OriginPubkeyProvider final : public PubkeyProvider
}
public:
- OriginPubkeyProvider(KeyOriginInfo info, std::unique_ptr<PubkeyProvider> provider) : m_origin(std::move(info)), m_provider(std::move(provider)) {}
- bool GetPubKey(int pos, const SigningProvider& arg, CPubKey* key, KeyOriginInfo& info) const override
+ OriginPubkeyProvider(uint32_t exp_index, KeyOriginInfo info, std::unique_ptr<PubkeyProvider> provider) : PubkeyProvider(exp_index), m_origin(std::move(info)), m_provider(std::move(provider)) {}
+ bool GetPubKey(int pos, const SigningProvider& arg, CPubKey& key, KeyOriginInfo& info, const DescriptorCache* read_cache = nullptr, DescriptorCache* write_cache = nullptr) override
{
- if (!m_provider->GetPubKey(pos, arg, key, info)) return false;
+ if (!m_provider->GetPubKey(pos, arg, key, info, read_cache, write_cache)) return false;
std::copy(std::begin(m_origin.fingerprint), std::end(m_origin.fingerprint), info.fingerprint);
info.path.insert(info.path.begin(), m_origin.path.begin(), m_origin.path.end());
return true;
@@ -212,10 +224,10 @@ class ConstPubkeyProvider final : public PubkeyProvider
CPubKey m_pubkey;
public:
- ConstPubkeyProvider(const CPubKey& pubkey) : m_pubkey(pubkey) {}
- bool GetPubKey(int pos, const SigningProvider& arg, CPubKey* key, KeyOriginInfo& info) const override
+ ConstPubkeyProvider(uint32_t exp_index, const CPubKey& pubkey) : PubkeyProvider(exp_index), m_pubkey(pubkey) {}
+ bool GetPubKey(int pos, const SigningProvider& arg, CPubKey& key, KeyOriginInfo& info, const DescriptorCache* read_cache = nullptr, DescriptorCache* write_cache = nullptr) override
{
- if (key) *key = m_pubkey;
+ key = m_pubkey;
info.path.clear();
CKeyID keyid = m_pubkey.GetID();
std::copy(keyid.begin(), keyid.begin() + sizeof(info.fingerprint), info.fingerprint);
@@ -246,22 +258,36 @@ enum class DeriveType {
/** An object representing a parsed extended public key in a descriptor. */
class BIP32PubkeyProvider final : public PubkeyProvider
{
- CExtPubKey m_extkey;
+ // Root xpub, path, and final derivation step type being used, if any
+ CExtPubKey m_root_extkey;
KeyPath m_path;
DeriveType m_derive;
+ // Cache of the parent of the final derived pubkeys.
+ // Primarily useful for situations when no read_cache is provided
+ CExtPubKey m_cached_xpub;
bool GetExtKey(const SigningProvider& arg, CExtKey& ret) const
{
CKey key;
- if (!arg.GetKey(m_extkey.pubkey.GetID(), key)) return false;
- ret.nDepth = m_extkey.nDepth;
- std::copy(m_extkey.vchFingerprint, m_extkey.vchFingerprint + sizeof(ret.vchFingerprint), ret.vchFingerprint);
- ret.nChild = m_extkey.nChild;
- ret.chaincode = m_extkey.chaincode;
+ if (!arg.GetKey(m_root_extkey.pubkey.GetID(), key)) return false;
+ ret.nDepth = m_root_extkey.nDepth;
+ std::copy(m_root_extkey.vchFingerprint, m_root_extkey.vchFingerprint + sizeof(ret.vchFingerprint), ret.vchFingerprint);
+ ret.nChild = m_root_extkey.nChild;
+ ret.chaincode = m_root_extkey.chaincode;
ret.key = key;
return true;
}
+ // Derives the last xprv
+ bool GetDerivedExtKey(const SigningProvider& arg, CExtKey& xprv) const
+ {
+ if (!GetExtKey(arg, xprv)) return false;
+ for (auto entry : m_path) {
+ xprv.Derive(xprv, entry);
+ }
+ return true;
+ }
+
bool IsHardened() const
{
if (m_derive == DeriveType::HARDENED) return true;
@@ -272,37 +298,77 @@ class BIP32PubkeyProvider final : public PubkeyProvider
}
public:
- BIP32PubkeyProvider(const CExtPubKey& extkey, KeyPath path, DeriveType derive) : m_extkey(extkey), m_path(std::move(path)), m_derive(derive) {}
+ BIP32PubkeyProvider(uint32_t exp_index, const CExtPubKey& extkey, KeyPath path, DeriveType derive) : PubkeyProvider(exp_index), m_root_extkey(extkey), m_path(std::move(path)), m_derive(derive) {}
bool IsRange() const override { return m_derive != DeriveType::NO; }
size_t GetSize() const override { return 33; }
- bool GetPubKey(int pos, const SigningProvider& arg, CPubKey* key, KeyOriginInfo& info) const override
+ bool GetPubKey(int pos, const SigningProvider& arg, CPubKey& key_out, KeyOriginInfo& final_info_out, const DescriptorCache* read_cache = nullptr, DescriptorCache* write_cache = nullptr) override
{
- if (key) {
- if (IsHardened()) {
- CKey priv_key;
- if (!GetPrivKey(pos, arg, priv_key)) return false;
- *key = priv_key.GetPubKey();
- } else {
- // TODO: optimize by caching
- CExtPubKey extkey = m_extkey;
- for (auto entry : m_path) {
- extkey.Derive(extkey, entry);
- }
- if (m_derive == DeriveType::UNHARDENED) extkey.Derive(extkey, pos);
- assert(m_derive != DeriveType::HARDENED);
- *key = extkey.pubkey;
+ // Info of parent of the to be derived pubkey
+ KeyOriginInfo parent_info;
+ CKeyID keyid = m_root_extkey.pubkey.GetID();
+ std::copy(keyid.begin(), keyid.begin() + sizeof(parent_info.fingerprint), parent_info.fingerprint);
+ parent_info.path = m_path;
+
+ // Info of the derived key itself which is copied out upon successful completion
+ KeyOriginInfo final_info_out_tmp = parent_info;
+ if (m_derive == DeriveType::UNHARDENED) final_info_out_tmp.path.push_back((uint32_t)pos);
+ if (m_derive == DeriveType::HARDENED) final_info_out_tmp.path.push_back(((uint32_t)pos) | 0x80000000L);
+
+ // Derive keys or fetch them from cache
+ CExtPubKey final_extkey = m_root_extkey;
+ CExtPubKey parent_extkey = m_root_extkey;
+ bool der = true;
+ if (read_cache) {
+ if (!read_cache->GetCachedDerivedExtPubKey(m_expr_index, pos, final_extkey)) {
+ if (m_derive == DeriveType::HARDENED) return false;
+ // Try to get the derivation parent
+ if (!read_cache->GetCachedParentExtPubKey(m_expr_index, parent_extkey)) return false;
+ final_extkey = parent_extkey;
+ if (m_derive == DeriveType::UNHARDENED) der = parent_extkey.Derive(final_extkey, pos);
+ }
+ } else if (m_cached_xpub.pubkey.IsValid() && m_derive != DeriveType::HARDENED) {
+ parent_extkey = final_extkey = m_cached_xpub;
+ if (m_derive == DeriveType::UNHARDENED) der = parent_extkey.Derive(final_extkey, pos);
+ } else if (IsHardened()) {
+ CExtKey xprv;
+ if (!GetDerivedExtKey(arg, xprv)) return false;
+ parent_extkey = xprv.Neuter();
+ if (m_derive == DeriveType::UNHARDENED) der = xprv.Derive(xprv, pos);
+ if (m_derive == DeriveType::HARDENED) der = xprv.Derive(xprv, pos | 0x80000000UL);
+ final_extkey = xprv.Neuter();
+ } else {
+ for (auto entry : m_path) {
+ der = parent_extkey.Derive(parent_extkey, entry);
+ assert(der);
}
+ final_extkey = parent_extkey;
+ if (m_derive == DeriveType::UNHARDENED) der = parent_extkey.Derive(final_extkey, pos);
+ assert(m_derive != DeriveType::HARDENED);
}
- CKeyID keyid = m_extkey.pubkey.GetID();
- std::copy(keyid.begin(), keyid.begin() + sizeof(info.fingerprint), info.fingerprint);
- info.path = m_path;
- if (m_derive == DeriveType::UNHARDENED) info.path.push_back((uint32_t)pos);
- if (m_derive == DeriveType::HARDENED) info.path.push_back(((uint32_t)pos) | 0x80000000L);
+ assert(der);
+
+ final_info_out = final_info_out_tmp;
+ key_out = final_extkey.pubkey;
+
+ // We rely on the consumer to check that m_derive isn't HARDENED as above
+ // But we can't have already cached something in case we read something from the cache
+ // and parent_extkey isn't actually the parent.
+ if (!m_cached_xpub.pubkey.IsValid()) m_cached_xpub = parent_extkey;
+
+ if (write_cache) {
+ // Only cache parent if there is any unhardened derivation
+ if (m_derive != DeriveType::HARDENED) {
+ write_cache->CacheParentExtPubKey(m_expr_index, parent_extkey);
+ } else if (final_info_out.path.size() > 0) {
+ write_cache->CacheDerivedExtPubKey(m_expr_index, pos, final_extkey);
+ }
+ }
+
return true;
}
std::string ToString() const override
{
- std::string ret = EncodeExtPubKey(m_extkey) + FormatHDKeypath(m_path);
+ std::string ret = EncodeExtPubKey(m_root_extkey) + FormatHDKeypath(m_path);
if (IsRange()) {
ret += "/*";
if (m_derive == DeriveType::HARDENED) ret += '\'';
@@ -323,10 +389,7 @@ public:
bool GetPrivKey(int pos, const SigningProvider& arg, CKey& key) const override
{
CExtKey extkey;
- if (!GetExtKey(arg, extkey)) return false;
- for (auto entry : m_path) {
- extkey.Derive(extkey, entry);
- }
+ if (!GetDerivedExtKey(arg, extkey)) return false;
if (m_derive == DeriveType::UNHARDENED) extkey.Derive(extkey, pos);
if (m_derive == DeriveType::HARDENED) extkey.Derive(extkey, pos | 0x80000000UL);
key = extkey.key;
@@ -339,14 +402,15 @@ class DescriptorImpl : public Descriptor
{
//! Public key arguments for this descriptor (size 1 for PK, PKH, WPKH; any size for Multisig).
const std::vector<std::unique_ptr<PubkeyProvider>> m_pubkey_args;
+ //! The string name of the descriptor function.
+ const std::string m_name;
+
+protected:
//! The sub-descriptor argument (nullptr for everything but SH and WSH).
//! In doc/descriptors.m this is referred to as SCRIPT expressions sh(SCRIPT)
//! and wsh(SCRIPT), and distinct from KEY expressions and ADDR expressions.
const std::unique_ptr<DescriptorImpl> m_subdescriptor_arg;
- //! The string name of the descriptor function.
- const std::string m_name;
-protected:
//! Return a serialization of anything except pubkey and script arguments, to be prepended to those.
virtual std::string ToStringExtra() const { return ""; }
@@ -364,7 +428,7 @@ protected:
virtual std::vector<CScript> MakeScripts(const std::vector<CPubKey>& pubkeys, const CScript* script, FlatSigningProvider& out) const = 0;
public:
- DescriptorImpl(std::vector<std::unique_ptr<PubkeyProvider>> pubkeys, std::unique_ptr<DescriptorImpl> script, const std::string& name) : m_pubkey_args(std::move(pubkeys)), m_subdescriptor_arg(std::move(script)), m_name(name) {}
+ DescriptorImpl(std::vector<std::unique_ptr<PubkeyProvider>> pubkeys, std::unique_ptr<DescriptorImpl> script, const std::string& name) : m_pubkey_args(std::move(pubkeys)), m_name(name), m_subdescriptor_arg(std::move(script)) {}
bool IsSolvable() const override
{
@@ -424,7 +488,7 @@ public:
return ret;
}
- bool ExpandHelper(int pos, const SigningProvider& arg, Span<const unsigned char>* cache_read, std::vector<CScript>& output_scripts, FlatSigningProvider& out, std::vector<unsigned char>* cache_write) const
+ bool ExpandHelper(int pos, const SigningProvider& arg, const DescriptorCache* read_cache, std::vector<CScript>& output_scripts, FlatSigningProvider& out, DescriptorCache* write_cache) const
{
std::vector<std::pair<CPubKey, KeyOriginInfo>> entries;
entries.reserve(m_pubkey_args.size());
@@ -432,27 +496,12 @@ public:
// Construct temporary data in `entries` and `subscripts`, to avoid producing output in case of failure.
for (const auto& p : m_pubkey_args) {
entries.emplace_back();
- // If we have a cache, we don't need GetPubKey to compute the public key.
- // Pass in nullptr to signify only origin info is desired.
- if (!p->GetPubKey(pos, arg, cache_read ? nullptr : &entries.back().first, entries.back().second)) return false;
- if (cache_read) {
- // Cached expanded public key exists, use it.
- if (cache_read->size() == 0) return false;
- bool compressed = ((*cache_read)[0] == 0x02 || (*cache_read)[0] == 0x03) && cache_read->size() >= 33;
- bool uncompressed = ((*cache_read)[0] == 0x04) && cache_read->size() >= 65;
- if (!(compressed || uncompressed)) return false;
- CPubKey pubkey(cache_read->begin(), cache_read->begin() + (compressed ? 33 : 65));
- entries.back().first = pubkey;
- *cache_read = cache_read->subspan(compressed ? 33 : 65);
- }
- if (cache_write) {
- cache_write->insert(cache_write->end(), entries.back().first.begin(), entries.back().first.end());
- }
+ if (!p->GetPubKey(pos, arg, entries.back().first, entries.back().second, read_cache, write_cache)) return false;
}
std::vector<CScript> subscripts;
if (m_subdescriptor_arg) {
FlatSigningProvider subprovider;
- if (!m_subdescriptor_arg->ExpandHelper(pos, arg, cache_read, subscripts, subprovider, cache_write)) return false;
+ if (!m_subdescriptor_arg->ExpandHelper(pos, arg, read_cache, subscripts, subprovider, write_cache)) return false;
out = Merge(out, subprovider);
}
@@ -476,15 +525,14 @@ public:
return true;
}
- bool Expand(int pos, const SigningProvider& provider, std::vector<CScript>& output_scripts, FlatSigningProvider& out, std::vector<unsigned char>* cache = nullptr) const final
+ bool Expand(int pos, const SigningProvider& provider, std::vector<CScript>& output_scripts, FlatSigningProvider& out, DescriptorCache* write_cache = nullptr) const final
{
- return ExpandHelper(pos, provider, nullptr, output_scripts, out, cache);
+ return ExpandHelper(pos, provider, nullptr, output_scripts, out, write_cache);
}
- bool ExpandFromCache(int pos, const std::vector<unsigned char>& cache, std::vector<CScript>& output_scripts, FlatSigningProvider& out) const final
+ bool ExpandFromCache(int pos, const DescriptorCache& read_cache, std::vector<CScript>& output_scripts, FlatSigningProvider& out) const final
{
- Span<const unsigned char> span = MakeSpan(cache);
- return ExpandHelper(pos, DUMMY_SIGNING_PROVIDER, &span, output_scripts, out, nullptr) && span.size() == 0;
+ return ExpandHelper(pos, DUMMY_SIGNING_PROVIDER, &read_cache, output_scripts, out, nullptr);
}
void ExpandPrivate(int pos, const SigningProvider& provider, FlatSigningProvider& out) const final
@@ -500,6 +548,8 @@ public:
out = Merge(out, subprovider);
}
}
+
+ Optional<OutputType> GetOutputType() const override { return nullopt; }
};
/** A parsed addr(A) descriptor. */
@@ -512,6 +562,19 @@ protected:
public:
AddressDescriptor(CTxDestination destination) : DescriptorImpl({}, {}, "addr"), m_destination(std::move(destination)) {}
bool IsSolvable() const final { return false; }
+
+ Optional<OutputType> GetOutputType() const override
+ {
+ switch (m_destination.which()) {
+ case 1 /* PKHash */:
+ case 2 /* ScriptHash */: return OutputType::LEGACY;
+ case 3 /* WitnessV0ScriptHash */:
+ case 4 /* WitnessV0KeyHash */:
+ case 5 /* WitnessUnknown */: return OutputType::BECH32;
+ case 0 /* CNoDestination */:
+ default: return nullopt;
+ }
+ }
};
/** A parsed raw(H) descriptor. */
@@ -524,6 +587,21 @@ protected:
public:
RawDescriptor(CScript script) : DescriptorImpl({}, {}, "raw"), m_script(std::move(script)) {}
bool IsSolvable() const final { return false; }
+
+ Optional<OutputType> GetOutputType() const override
+ {
+ CTxDestination dest;
+ ExtractDestination(m_script, dest);
+ switch (dest.which()) {
+ case 1 /* PKHash */:
+ case 2 /* ScriptHash */: return OutputType::LEGACY;
+ case 3 /* WitnessV0ScriptHash */:
+ case 4 /* WitnessV0KeyHash */:
+ case 5 /* WitnessUnknown */: return OutputType::BECH32;
+ case 0 /* CNoDestination */:
+ default: return nullopt;
+ }
+ }
};
/** A parsed pk(P) descriptor. */
@@ -547,6 +625,7 @@ protected:
}
public:
PKHDescriptor(std::unique_ptr<PubkeyProvider> prov) : DescriptorImpl(Vector(std::move(prov)), {}, "pkh") {}
+ Optional<OutputType> GetOutputType() const override { return OutputType::LEGACY; }
};
/** A parsed wpkh(P) descriptor. */
@@ -561,6 +640,7 @@ protected:
}
public:
WPKHDescriptor(std::unique_ptr<PubkeyProvider> prov) : DescriptorImpl(Vector(std::move(prov)), {}, "wpkh") {}
+ Optional<OutputType> GetOutputType() const override { return OutputType::BECH32; }
};
/** A parsed combo(P) descriptor. */
@@ -612,6 +692,13 @@ protected:
std::vector<CScript> MakeScripts(const std::vector<CPubKey>&, const CScript* script, FlatSigningProvider&) const override { return Vector(GetScriptForDestination(ScriptHash(*script))); }
public:
SHDescriptor(std::unique_ptr<DescriptorImpl> desc) : DescriptorImpl({}, std::move(desc), "sh") {}
+
+ Optional<OutputType> GetOutputType() const override
+ {
+ assert(m_subdescriptor_arg);
+ if (m_subdescriptor_arg->GetOutputType() == OutputType::BECH32) return OutputType::P2SH_SEGWIT;
+ return OutputType::LEGACY;
+ }
};
/** A parsed wsh(...) descriptor. */
@@ -621,6 +708,7 @@ protected:
std::vector<CScript> MakeScripts(const std::vector<CPubKey>&, const CScript* script, FlatSigningProvider&) const override { return Vector(GetScriptForDestination(WitnessV0ScriptHash(*script))); }
public:
WSHDescriptor(std::unique_ptr<DescriptorImpl> desc) : DescriptorImpl({}, std::move(desc), "wsh") {}
+ Optional<OutputType> GetOutputType() const override { return OutputType::BECH32; }
};
////////////////////////////////////////////////////////////////////////////
@@ -657,7 +745,7 @@ NODISCARD bool ParseKeyPath(const std::vector<Span<const char>>& split, KeyPath&
}
/** Parse a public key that excludes origin information. */
-std::unique_ptr<PubkeyProvider> ParsePubkeyInner(const Span<const char>& sp, bool permit_uncompressed, FlatSigningProvider& out, std::string& error)
+std::unique_ptr<PubkeyProvider> ParsePubkeyInner(uint32_t key_exp_index, const Span<const char>& sp, bool permit_uncompressed, FlatSigningProvider& out, std::string& error)
{
using namespace spanparsing;
@@ -673,7 +761,7 @@ std::unique_ptr<PubkeyProvider> ParsePubkeyInner(const Span<const char>& sp, boo
CPubKey pubkey(data);
if (pubkey.IsFullyValid()) {
if (permit_uncompressed || pubkey.IsCompressed()) {
- return MakeUnique<ConstPubkeyProvider>(pubkey);
+ return MakeUnique<ConstPubkeyProvider>(key_exp_index, pubkey);
} else {
error = "Uncompressed keys are not allowed";
return nullptr;
@@ -687,7 +775,7 @@ std::unique_ptr<PubkeyProvider> ParsePubkeyInner(const Span<const char>& sp, boo
if (permit_uncompressed || key.IsCompressed()) {
CPubKey pubkey = key.GetPubKey();
out.keys.emplace(pubkey.GetID(), key);
- return MakeUnique<ConstPubkeyProvider>(pubkey);
+ return MakeUnique<ConstPubkeyProvider>(key_exp_index, pubkey);
} else {
error = "Uncompressed keys are not allowed";
return nullptr;
@@ -714,11 +802,11 @@ std::unique_ptr<PubkeyProvider> ParsePubkeyInner(const Span<const char>& sp, boo
extpubkey = extkey.Neuter();
out.keys.emplace(extpubkey.pubkey.GetID(), extkey.key);
}
- return MakeUnique<BIP32PubkeyProvider>(extpubkey, std::move(path), type);
+ return MakeUnique<BIP32PubkeyProvider>(key_exp_index, extpubkey, std::move(path), type);
}
/** Parse a public key including origin information (if enabled). */
-std::unique_ptr<PubkeyProvider> ParsePubkey(const Span<const char>& sp, bool permit_uncompressed, FlatSigningProvider& out, std::string& error)
+std::unique_ptr<PubkeyProvider> ParsePubkey(uint32_t key_exp_index, const Span<const char>& sp, bool permit_uncompressed, FlatSigningProvider& out, std::string& error)
{
using namespace spanparsing;
@@ -727,7 +815,7 @@ std::unique_ptr<PubkeyProvider> ParsePubkey(const Span<const char>& sp, bool per
error = "Multiple ']' characters found for a single pubkey";
return nullptr;
}
- if (origin_split.size() == 1) return ParsePubkeyInner(origin_split[0], permit_uncompressed, out, error);
+ if (origin_split.size() == 1) return ParsePubkeyInner(key_exp_index, origin_split[0], permit_uncompressed, out, error);
if (origin_split[0].size() < 1 || origin_split[0][0] != '[') {
error = strprintf("Key origin start '[ character expected but not found, got '%c' instead", origin_split[0][0]);
return nullptr;
@@ -748,30 +836,30 @@ std::unique_ptr<PubkeyProvider> ParsePubkey(const Span<const char>& sp, bool per
assert(fpr_bytes.size() == 4);
std::copy(fpr_bytes.begin(), fpr_bytes.end(), info.fingerprint);
if (!ParseKeyPath(slash_split, info.path, error)) return nullptr;
- auto provider = ParsePubkeyInner(origin_split[1], permit_uncompressed, out, error);
+ auto provider = ParsePubkeyInner(key_exp_index, origin_split[1], permit_uncompressed, out, error);
if (!provider) return nullptr;
- return MakeUnique<OriginPubkeyProvider>(std::move(info), std::move(provider));
+ return MakeUnique<OriginPubkeyProvider>(key_exp_index, std::move(info), std::move(provider));
}
/** Parse a script in a particular context. */
-std::unique_ptr<DescriptorImpl> ParseScript(Span<const char>& sp, ParseScriptContext ctx, FlatSigningProvider& out, std::string& error)
+std::unique_ptr<DescriptorImpl> ParseScript(uint32_t key_exp_index, Span<const char>& sp, ParseScriptContext ctx, FlatSigningProvider& out, std::string& error)
{
using namespace spanparsing;
auto expr = Expr(sp);
bool sorted_multi = false;
if (Func("pk", expr)) {
- auto pubkey = ParsePubkey(expr, ctx != ParseScriptContext::P2WSH, out, error);
+ auto pubkey = ParsePubkey(key_exp_index, expr, ctx != ParseScriptContext::P2WSH, out, error);
if (!pubkey) return nullptr;
return MakeUnique<PKDescriptor>(std::move(pubkey));
}
if (Func("pkh", expr)) {
- auto pubkey = ParsePubkey(expr, ctx != ParseScriptContext::P2WSH, out, error);
+ auto pubkey = ParsePubkey(key_exp_index, expr, ctx != ParseScriptContext::P2WSH, out, error);
if (!pubkey) return nullptr;
return MakeUnique<PKHDescriptor>(std::move(pubkey));
}
if (ctx == ParseScriptContext::TOP && Func("combo", expr)) {
- auto pubkey = ParsePubkey(expr, true, out, error);
+ auto pubkey = ParsePubkey(key_exp_index, expr, true, out, error);
if (!pubkey) return nullptr;
return MakeUnique<ComboDescriptor>(std::move(pubkey));
} else if (ctx != ParseScriptContext::TOP && Func("combo", expr)) {
@@ -793,10 +881,11 @@ std::unique_ptr<DescriptorImpl> ParseScript(Span<const char>& sp, ParseScriptCon
return nullptr;
}
auto arg = Expr(expr);
- auto pk = ParsePubkey(arg, ctx != ParseScriptContext::P2WSH, out, error);
+ auto pk = ParsePubkey(key_exp_index, arg, ctx != ParseScriptContext::P2WSH, out, error);
if (!pk) return nullptr;
script_size += pk->GetSize() + 1;
providers.emplace_back(std::move(pk));
+ key_exp_index++;
}
if (providers.size() < 1 || providers.size() > 16) {
error = strprintf("Cannot have %u keys in multisig; must have between 1 and 16 keys, inclusive", providers.size());
@@ -823,7 +912,7 @@ std::unique_ptr<DescriptorImpl> ParseScript(Span<const char>& sp, ParseScriptCon
return MakeUnique<MultisigDescriptor>(thres, std::move(providers), sorted_multi);
}
if (ctx != ParseScriptContext::P2WSH && Func("wpkh", expr)) {
- auto pubkey = ParsePubkey(expr, false, out, error);
+ auto pubkey = ParsePubkey(key_exp_index, expr, false, out, error);
if (!pubkey) return nullptr;
return MakeUnique<WPKHDescriptor>(std::move(pubkey));
} else if (ctx == ParseScriptContext::P2WSH && Func("wpkh", expr)) {
@@ -831,7 +920,7 @@ std::unique_ptr<DescriptorImpl> ParseScript(Span<const char>& sp, ParseScriptCon
return nullptr;
}
if (ctx == ParseScriptContext::TOP && Func("sh", expr)) {
- auto desc = ParseScript(expr, ParseScriptContext::P2SH, out, error);
+ auto desc = ParseScript(key_exp_index, expr, ParseScriptContext::P2SH, out, error);
if (!desc || expr.size()) return nullptr;
return MakeUnique<SHDescriptor>(std::move(desc));
} else if (ctx != ParseScriptContext::TOP && Func("sh", expr)) {
@@ -839,7 +928,7 @@ std::unique_ptr<DescriptorImpl> ParseScript(Span<const char>& sp, ParseScriptCon
return nullptr;
}
if (ctx != ParseScriptContext::P2WSH && Func("wsh", expr)) {
- auto desc = ParseScript(expr, ParseScriptContext::P2WSH, out, error);
+ auto desc = ParseScript(key_exp_index, expr, ParseScriptContext::P2WSH, out, error);
if (!desc || expr.size()) return nullptr;
return MakeUnique<WSHDescriptor>(std::move(desc));
} else if (ctx == ParseScriptContext::P2WSH && Func("wsh", expr)) {
@@ -876,10 +965,10 @@ std::unique_ptr<DescriptorImpl> ParseScript(Span<const char>& sp, ParseScriptCon
std::unique_ptr<PubkeyProvider> InferPubkey(const CPubKey& pubkey, ParseScriptContext, const SigningProvider& provider)
{
- std::unique_ptr<PubkeyProvider> key_provider = MakeUnique<ConstPubkeyProvider>(pubkey);
+ std::unique_ptr<PubkeyProvider> key_provider = MakeUnique<ConstPubkeyProvider>(0, pubkey);
KeyOriginInfo info;
if (provider.GetKeyOrigin(pubkey.GetID(), info)) {
- return MakeUnique<OriginPubkeyProvider>(std::move(info), std::move(key_provider));
+ return MakeUnique<OriginPubkeyProvider>(0, std::move(info), std::move(key_provider));
}
return key_provider;
}
@@ -991,7 +1080,7 @@ std::unique_ptr<Descriptor> Parse(const std::string& descriptor, FlatSigningProv
{
Span<const char> sp(descriptor.data(), descriptor.size());
if (!CheckChecksum(sp, require_checksum, error)) return nullptr;
- auto ret = ParseScript(sp, ParseScriptContext::TOP, out, error);
+ auto ret = ParseScript(0, sp, ParseScriptContext::TOP, out, error);
if (sp.size() == 0 && ret) return std::unique_ptr<Descriptor>(std::move(ret));
return nullptr;
}
@@ -1009,3 +1098,42 @@ std::unique_ptr<Descriptor> InferDescriptor(const CScript& script, const Signing
{
return InferScript(script, ParseScriptContext::TOP, provider);
}
+
+void DescriptorCache::CacheParentExtPubKey(uint32_t key_exp_pos, const CExtPubKey& xpub)
+{
+ m_parent_xpubs[key_exp_pos] = xpub;
+}
+
+void DescriptorCache::CacheDerivedExtPubKey(uint32_t key_exp_pos, uint32_t der_index, const CExtPubKey& xpub)
+{
+ auto& xpubs = m_derived_xpubs[key_exp_pos];
+ xpubs[der_index] = xpub;
+}
+
+bool DescriptorCache::GetCachedParentExtPubKey(uint32_t key_exp_pos, CExtPubKey& xpub) const
+{
+ const auto& it = m_parent_xpubs.find(key_exp_pos);
+ if (it == m_parent_xpubs.end()) return false;
+ xpub = it->second;
+ return true;
+}
+
+bool DescriptorCache::GetCachedDerivedExtPubKey(uint32_t key_exp_pos, uint32_t der_index, CExtPubKey& xpub) const
+{
+ const auto& key_exp_it = m_derived_xpubs.find(key_exp_pos);
+ if (key_exp_it == m_derived_xpubs.end()) return false;
+ const auto& der_it = key_exp_it->second.find(der_index);
+ if (der_it == key_exp_it->second.end()) return false;
+ xpub = der_it->second;
+ return true;
+}
+
+const ExtPubKeyMap DescriptorCache::GetCachedParentExtPubKeys() const
+{
+ return m_parent_xpubs;
+}
+
+const std::unordered_map<uint32_t, ExtPubKeyMap> DescriptorCache::GetCachedDerivedExtPubKeys() const
+{
+ return m_derived_xpubs;
+}
diff --git a/src/script/descriptor.h b/src/script/descriptor.h
index a5a41d78dd..34cd5760de 100644
--- a/src/script/descriptor.h
+++ b/src/script/descriptor.h
@@ -5,12 +5,57 @@
#ifndef BITCOIN_SCRIPT_DESCRIPTOR_H
#define BITCOIN_SCRIPT_DESCRIPTOR_H
+#include <optional.h>
+#include <outputtype.h>
#include <script/script.h>
#include <script/sign.h>
#include <script/signingprovider.h>
#include <vector>
+using ExtPubKeyMap = std::unordered_map<uint32_t, CExtPubKey>;
+
+/** Cache for single descriptor's derived extended pubkeys */
+class DescriptorCache {
+private:
+ /** Map key expression index -> map of (key derivation index -> xpub) */
+ std::unordered_map<uint32_t, ExtPubKeyMap> m_derived_xpubs;
+ /** Map key expression index -> parent xpub */
+ ExtPubKeyMap m_parent_xpubs;
+
+public:
+ /** Cache a parent xpub
+ *
+ * @param[in] key_exp_pos Position of the key expression within the descriptor
+ * @param[in] xpub The CExtPubKey to cache
+ */
+ void CacheParentExtPubKey(uint32_t key_exp_pos, const CExtPubKey& xpub);
+ /** Retrieve a cached parent xpub
+ *
+ * @param[in] key_exp_pos Position of the key expression within the descriptor
+ * @param[in] xpub The CExtPubKey to get from cache
+ */
+ bool GetCachedParentExtPubKey(uint32_t key_exp_pos, CExtPubKey& xpub) const;
+ /** Cache an xpub derived at an index
+ *
+ * @param[in] key_exp_pos Position of the key expression within the descriptor
+ * @param[in] der_index Derivation index of the xpub
+ * @param[in] xpub The CExtPubKey to cache
+ */
+ void CacheDerivedExtPubKey(uint32_t key_exp_pos, uint32_t der_index, const CExtPubKey& xpub);
+ /** Retrieve a cached xpub derived at an index
+ *
+ * @param[in] key_exp_pos Position of the key expression within the descriptor
+ * @param[in] der_index Derivation index of the xpub
+ * @param[in] xpub The CExtPubKey to get from cache
+ */
+ bool GetCachedDerivedExtPubKey(uint32_t key_exp_pos, uint32_t der_index, CExtPubKey& xpub) const;
+
+ /** Retrieve all cached parent xpubs */
+ const ExtPubKeyMap GetCachedParentExtPubKeys() const;
+ /** Retrieve all cached derived xpubs */
+ const std::unordered_map<uint32_t, ExtPubKeyMap> GetCachedDerivedExtPubKeys() const;
+};
/** \brief Interface for parsed descriptor objects.
*
@@ -51,18 +96,18 @@ struct Descriptor {
* @param[in] provider The provider to query for private keys in case of hardened derivation.
* @param[out] output_scripts The expanded scriptPubKeys.
* @param[out] out Scripts and public keys necessary for solving the expanded scriptPubKeys (may be equal to `provider`).
- * @param[out] cache Cache data necessary to evaluate the descriptor at this point without access to private keys.
+ * @param[out] write_cache Cache data necessary to evaluate the descriptor at this point without access to private keys.
*/
- virtual bool Expand(int pos, const SigningProvider& provider, std::vector<CScript>& output_scripts, FlatSigningProvider& out, std::vector<unsigned char>* cache = nullptr) const = 0;
+ virtual bool Expand(int pos, const SigningProvider& provider, std::vector<CScript>& output_scripts, FlatSigningProvider& out, DescriptorCache* write_cache = nullptr) const = 0;
/** Expand a descriptor at a specified position using cached expansion data.
*
* @param[in] pos The position at which to expand the descriptor. If IsRange() is false, this is ignored.
- * @param[in] cache Cached expansion data.
+ * @param[in] read_cache Cached expansion data.
* @param[out] output_scripts The expanded scriptPubKeys.
* @param[out] out Scripts and public keys necessary for solving the expanded scriptPubKeys (may be equal to `provider`).
*/
- virtual bool ExpandFromCache(int pos, const std::vector<unsigned char>& cache, std::vector<CScript>& output_scripts, FlatSigningProvider& out) const = 0;
+ virtual bool ExpandFromCache(int pos, const DescriptorCache& read_cache, std::vector<CScript>& output_scripts, FlatSigningProvider& out) const = 0;
/** Expand the private key for a descriptor at a specified position, if possible.
*
@@ -71,6 +116,9 @@ struct Descriptor {
* @param[out] out Any private keys available for the specified `pos`.
*/
virtual void ExpandPrivate(int pos, const SigningProvider& provider, FlatSigningProvider& out) const = 0;
+
+ /** @return The OutputType of the scriptPubKey(s) produced by this descriptor. Or nullopt if indeterminate (multiple or none) */
+ virtual Optional<OutputType> GetOutputType() const = 0;
};
/** Parse a `descriptor` string. Included private keys are put in `out`.
diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp
index d0865d2793..083022fbdd 100644
--- a/src/script/interpreter.cpp
+++ b/src/script/interpreter.cpp
@@ -278,6 +278,70 @@ int FindAndDelete(CScript& script, const CScript& b)
return nFound;
}
+namespace {
+/** A data type to abstract out the condition stack during script execution.
+ *
+ * Conceptually it acts like a vector of booleans, one for each level of nested
+ * IF/THEN/ELSE, indicating whether we're in the active or inactive branch of
+ * each.
+ *
+ * The elements on the stack cannot be observed individually; we only need to
+ * expose whether the stack is empty and whether or not any false values are
+ * present at all. To implement OP_ELSE, a toggle_top modifier is added, which
+ * flips the last value without returning it.
+ *
+ * This uses an optimized implementation that does not materialize the
+ * actual stack. Instead, it just stores the size of the would-be stack,
+ * and the position of the first false value in it.
+ */
+class ConditionStack {
+private:
+ //! A constant for m_first_false_pos to indicate there are no falses.
+ static constexpr uint32_t NO_FALSE = std::numeric_limits<uint32_t>::max();
+
+ //! The size of the implied stack.
+ uint32_t m_stack_size = 0;
+ //! The position of the first false value on the implied stack, or NO_FALSE if all true.
+ uint32_t m_first_false_pos = NO_FALSE;
+
+public:
+ bool empty() { return m_stack_size == 0; }
+ bool all_true() { return m_first_false_pos == NO_FALSE; }
+ void push_back(bool f)
+ {
+ if (m_first_false_pos == NO_FALSE && !f) {
+ // The stack consists of all true values, and a false is added.
+ // The first false value will appear at the current size.
+ m_first_false_pos = m_stack_size;
+ }
+ ++m_stack_size;
+ }
+ void pop_back()
+ {
+ assert(m_stack_size > 0);
+ --m_stack_size;
+ if (m_first_false_pos == m_stack_size) {
+ // When popping off the first false value, everything becomes true.
+ m_first_false_pos = NO_FALSE;
+ }
+ }
+ void toggle_top()
+ {
+ assert(m_stack_size > 0);
+ if (m_first_false_pos == NO_FALSE) {
+ // The current stack is all true values; the first false will be the top.
+ m_first_false_pos = m_stack_size - 1;
+ } else if (m_first_false_pos == m_stack_size - 1) {
+ // The top is the first false value; toggling it will make everything true.
+ m_first_false_pos = NO_FALSE;
+ } else {
+ // There is a false value, but not on top. No action is needed as toggling
+ // anything but the first false value is unobservable.
+ }
+ }
+};
+}
+
bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript& script, unsigned int flags, const BaseSignatureChecker& checker, SigVersion sigversion, ScriptError* serror)
{
static const CScriptNum bnZero(0);
@@ -293,7 +357,7 @@ bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript&
CScript::const_iterator pbegincodehash = script.begin();
opcodetype opcode;
valtype vchPushValue;
- std::vector<bool> vfExec;
+ ConditionStack vfExec;
std::vector<valtype> altstack;
set_error(serror, SCRIPT_ERR_UNKNOWN_ERROR);
if (script.size() > MAX_SCRIPT_SIZE)
@@ -305,7 +369,7 @@ bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript&
{
while (pc < pend)
{
- bool fExec = !count(vfExec.begin(), vfExec.end(), false);
+ bool fExec = vfExec.all_true();
//
// Read instruction
@@ -494,7 +558,7 @@ bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript&
{
if (vfExec.empty())
return set_error(serror, SCRIPT_ERR_UNBALANCED_CONDITIONAL);
- vfExec.back() = !vfExec.back();
+ vfExec.toggle_top();
}
break;
@@ -1414,57 +1478,61 @@ bool GenericTransactionSignatureChecker<T>::CheckSequence(const CScriptNum& nSeq
template class GenericTransactionSignatureChecker<CTransaction>;
template class GenericTransactionSignatureChecker<CMutableTransaction>;
+static bool ExecuteWitnessScript(const Span<const valtype>& stack_span, const CScript& scriptPubKey, unsigned int flags, SigVersion sigversion, const BaseSignatureChecker& checker, ScriptError* serror)
+{
+ std::vector<valtype> stack{stack_span.begin(), stack_span.end()};
+
+ // Disallow stack item size > MAX_SCRIPT_ELEMENT_SIZE in witness stack
+ for (const valtype& elem : stack) {
+ if (elem.size() > MAX_SCRIPT_ELEMENT_SIZE) return set_error(serror, SCRIPT_ERR_PUSH_SIZE);
+ }
+
+ // Run the script interpreter.
+ if (!EvalScript(stack, scriptPubKey, flags, checker, sigversion, serror)) return false;
+
+ // Scripts inside witness implicitly require cleanstack behaviour
+ if (stack.size() != 1) return set_error(serror, SCRIPT_ERR_CLEANSTACK);
+ if (!CastToBool(stack.back())) return set_error(serror, SCRIPT_ERR_EVAL_FALSE);
+ return true;
+}
+
static bool VerifyWitnessProgram(const CScriptWitness& witness, int witversion, const std::vector<unsigned char>& program, unsigned int flags, const BaseSignatureChecker& checker, ScriptError* serror)
{
- std::vector<std::vector<unsigned char> > stack;
CScript scriptPubKey;
+ Span<const valtype> stack = MakeSpan(witness.stack);
if (witversion == 0) {
if (program.size() == WITNESS_V0_SCRIPTHASH_SIZE) {
// Version 0 segregated witness program: SHA256(CScript) inside the program, CScript + inputs in witness
- if (witness.stack.size() == 0) {
+ if (stack.size() == 0) {
return set_error(serror, SCRIPT_ERR_WITNESS_PROGRAM_WITNESS_EMPTY);
}
- scriptPubKey = CScript(witness.stack.back().begin(), witness.stack.back().end());
- stack = std::vector<std::vector<unsigned char> >(witness.stack.begin(), witness.stack.end() - 1);
+ const valtype& script_bytes = SpanPopBack(stack);
+ scriptPubKey = CScript(script_bytes.begin(), script_bytes.end());
uint256 hashScriptPubKey;
CSHA256().Write(&scriptPubKey[0], scriptPubKey.size()).Finalize(hashScriptPubKey.begin());
if (memcmp(hashScriptPubKey.begin(), program.data(), 32)) {
return set_error(serror, SCRIPT_ERR_WITNESS_PROGRAM_MISMATCH);
}
+ return ExecuteWitnessScript(stack, scriptPubKey, flags, SigVersion::WITNESS_V0, checker, serror);
} else if (program.size() == WITNESS_V0_KEYHASH_SIZE) {
// Special case for pay-to-pubkeyhash; signature + pubkey in witness
- if (witness.stack.size() != 2) {
+ if (stack.size() != 2) {
return set_error(serror, SCRIPT_ERR_WITNESS_PROGRAM_MISMATCH); // 2 items in witness
}
scriptPubKey << OP_DUP << OP_HASH160 << program << OP_EQUALVERIFY << OP_CHECKSIG;
- stack = witness.stack;
+ return ExecuteWitnessScript(stack, scriptPubKey, flags, SigVersion::WITNESS_V0, checker, serror);
} else {
return set_error(serror, SCRIPT_ERR_WITNESS_PROGRAM_WRONG_LENGTH);
}
- } else if (flags & SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM) {
- return set_error(serror, SCRIPT_ERR_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM);
} else {
+ if (flags & SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM) {
+ return set_error(serror, SCRIPT_ERR_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM);
+ }
// Higher version witness scripts return true for future softfork compatibility
- return set_success(serror);
- }
-
- // Disallow stack item size > MAX_SCRIPT_ELEMENT_SIZE in witness stack
- for (unsigned int i = 0; i < stack.size(); i++) {
- if (stack.at(i).size() > MAX_SCRIPT_ELEMENT_SIZE)
- return set_error(serror, SCRIPT_ERR_PUSH_SIZE);
- }
-
- if (!EvalScript(stack, scriptPubKey, flags, checker, SigVersion::WITNESS_V0, serror)) {
- return false;
+ return true;
}
-
- // Scripts inside witness implicitly require cleanstack behaviour
- if (stack.size() != 1)
- return set_error(serror, SCRIPT_ERR_CLEANSTACK);
- if (!CastToBool(stack.back()))
- return set_error(serror, SCRIPT_ERR_EVAL_FALSE);
- return true;
+ // There is intentionally no return statement here, to be able to use "control reaches end of non-void function" warnings to detect gaps in the logic above.
}
bool VerifyScript(const CScript& scriptSig, const CScript& scriptPubKey, const CScriptWitness* witness, unsigned int flags, const BaseSignatureChecker& checker, ScriptError* serror)
diff --git a/src/script/script_error.cpp b/src/script/script_error.cpp
index 9d7deffc78..ff521d5860 100644
--- a/src/script/script_error.cpp
+++ b/src/script/script_error.cpp
@@ -58,7 +58,7 @@ const char* ScriptErrorString(const ScriptError serror)
case SCRIPT_ERR_MINIMALDATA:
return "Data push larger than necessary";
case SCRIPT_ERR_SIG_PUSHONLY:
- return "Only non-push operators allowed in signatures";
+ return "Only push operators allowed in signatures";
case SCRIPT_ERR_SIG_HIGH_S:
return "Non-canonical signature: S value is unnecessarily high";
case SCRIPT_ERR_SIG_NULLDUMMY:
diff --git a/src/script/sign.cpp b/src/script/sign.cpp
index 58eae3ce96..fe8292fe57 100644
--- a/src/script/sign.cpp
+++ b/src/script/sign.cpp
@@ -465,3 +465,54 @@ bool IsSegWitOutput(const SigningProvider& provider, const CScript& script)
}
return false;
}
+
+bool SignTransaction(CMutableTransaction& mtx, const SigningProvider* keystore, const std::map<COutPoint, Coin>& coins, int nHashType, std::map<int, std::string>& input_errors)
+{
+ bool fHashSingle = ((nHashType & ~SIGHASH_ANYONECANPAY) == SIGHASH_SINGLE);
+
+ // Use CTransaction for the constant parts of the
+ // transaction to avoid rehashing.
+ const CTransaction txConst(mtx);
+ // Sign what we can:
+ for (unsigned int i = 0; i < mtx.vin.size(); i++) {
+ CTxIn& txin = mtx.vin[i];
+ auto coin = coins.find(txin.prevout);
+ if (coin == coins.end() || coin->second.IsSpent()) {
+ input_errors[i] = "Input not found or already spent";
+ continue;
+ }
+ const CScript& prevPubKey = coin->second.out.scriptPubKey;
+ const CAmount& amount = coin->second.out.nValue;
+
+ SignatureData sigdata = DataFromTransaction(mtx, i, coin->second.out);
+ // Only sign SIGHASH_SINGLE if there's a corresponding output:
+ if (!fHashSingle || (i < mtx.vout.size())) {
+ ProduceSignature(*keystore, MutableTransactionSignatureCreator(&mtx, i, amount, nHashType), prevPubKey, sigdata);
+ }
+
+ UpdateInput(txin, sigdata);
+
+ // amount must be specified for valid segwit signature
+ if (amount == MAX_MONEY && !txin.scriptWitness.IsNull()) {
+ input_errors[i] = "Missing amount";
+ continue;
+ }
+
+ ScriptError serror = SCRIPT_ERR_OK;
+ if (!VerifyScript(txin.scriptSig, prevPubKey, &txin.scriptWitness, STANDARD_SCRIPT_VERIFY_FLAGS, TransactionSignatureChecker(&txConst, i, amount), &serror)) {
+ if (serror == SCRIPT_ERR_INVALID_STACK_OPERATION) {
+ // Unable to sign input and verification failed (possible attempt to partially sign).
+ input_errors[i] = "Unable to sign input, invalid stack size (possibly missing key)";
+ } else if (serror == SCRIPT_ERR_SIG_NULLFAIL) {
+ // Verification failed (possibly due to insufficient signatures).
+ input_errors[i] = "CHECK(MULTI)SIG failing with non-zero signature (possibly need more signatures)";
+ } else {
+ input_errors[i] = ScriptErrorString(serror);
+ }
+ } else {
+ // If this input succeeds, make sure there is no error set for it
+ input_errors.erase(i);
+ }
+ }
+ return input_errors.empty();
+}
diff --git a/src/script/sign.h b/src/script/sign.h
index 033c9ba19e..f03af0713f 100644
--- a/src/script/sign.h
+++ b/src/script/sign.h
@@ -6,6 +6,7 @@
#ifndef BITCOIN_SCRIPT_SIGN_H
#define BITCOIN_SCRIPT_SIGN_H
+#include <coins.h>
#include <hash.h>
#include <pubkey.h>
#include <script/interpreter.h>
@@ -168,4 +169,7 @@ bool IsSolvable(const SigningProvider& provider, const CScript& script);
/** Check whether a scriptPubKey is known to be segwit. */
bool IsSegWitOutput(const SigningProvider& provider, const CScript& script);
+/** Sign the CMutableTransaction */
+bool SignTransaction(CMutableTransaction& mtx, const SigningProvider* provider, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, std::string>& input_errors);
+
#endif // BITCOIN_SCRIPT_SIGN_H
diff --git a/src/script/signingprovider.h b/src/script/signingprovider.h
index 6ad20480a7..76f31d2f6f 100644
--- a/src/script/signingprovider.h
+++ b/src/script/signingprovider.h
@@ -66,7 +66,53 @@ protected:
using KeyMap = std::map<CKeyID, CKey>;
using ScriptMap = std::map<CScriptID, CScript>;
+ /**
+ * Map of key id to unencrypted private keys known by the signing provider.
+ * Map may be empty if the provider has another source of keys, like an
+ * encrypted store.
+ */
KeyMap mapKeys GUARDED_BY(cs_KeyStore);
+
+ /**
+ * Map of script id to scripts known by the signing provider.
+ *
+ * This map originally just held P2SH redeemScripts, and was used by wallet
+ * code to look up script ids referenced in "OP_HASH160 <script id>
+ * OP_EQUAL" P2SH outputs. Later in 605e8473a7d it was extended to hold
+ * P2WSH witnessScripts as well, and used to look up nested scripts
+ * referenced in "OP_0 <script hash>" P2WSH outputs. Later in commits
+ * f4691ab3a9d and 248f3a76a82, it was extended once again to hold segwit
+ * "OP_0 <key or script hash>" scriptPubKeys, in order to give the wallet a
+ * way to distinguish between segwit outputs that it generated addresses for
+ * and wanted to receive payments from, and segwit outputs that it never
+ * generated addresses for, but it could spend just because of having keys.
+ * (Before segwit activation it was also important to not treat segwit
+ * outputs to arbitrary wallet keys as payments, because these could be
+ * spent by anyone without even needing to sign with the keys.)
+ *
+ * Some of the scripts stored in mapScripts are memory-only and
+ * intentionally not saved to disk. Specifically, scripts added by
+ * ImplicitlyLearnRelatedKeyScripts(pubkey) calls are not written to disk so
+ * future wallet code can have flexibility to be more selective about what
+ * transaction outputs it recognizes as payments, instead of having to treat
+ * all outputs spending to keys it knows as payments. By contrast,
+ * mapScripts entries added by AddCScript(script),
+ * LearnRelatedScripts(pubkey, type), and LearnAllRelatedScripts(pubkey)
+ * calls are saved because they are all intentionally used to receive
+ * payments.
+ *
+ * The FillableSigningProvider::mapScripts script map should not be confused
+ * with LegacyScriptPubKeyMan::setWatchOnly script set. The two collections
+ * can hold the same scripts, but they serve different purposes. The
+ * setWatchOnly script set is intended to expand the set of outputs the
+ * wallet considers payments. Every output with a script it contains is
+ * considered to belong to the wallet, regardless of whether the script is
+ * solvable or signable. By contrast, the scripts in mapScripts are only
+ * used for solving, and to restrict which outputs are considered payments
+ * by the wallet. An output with a script in mapScripts, unlike
+ * setWatchOnly, is not automatically considered to belong to the wallet if
+ * it can't be solved and signed for.
+ */
ScriptMap mapScripts GUARDED_BY(cs_KeyStore);
void ImplicitlyLearnRelatedKeyScripts(const CPubKey& pubkey) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
diff --git a/src/serialize.h b/src/serialize.h
index 7fa669ebdb..5045cb3c7f 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -9,6 +9,7 @@
#include <compat/endian.h>
#include <algorithm>
+#include <cstring>
#include <ios>
#include <limits>
#include <map>
@@ -25,6 +26,9 @@
static const unsigned int MAX_SIZE = 0x02000000;
+/** Maximum amount of memory (in bytes) to allocate at once when deserializing vectors. */
+static const unsigned int MAX_VECTOR_ALLOCATE = 5000000;
+
/**
* Dummy data type to identify deserializing constructors.
*
@@ -136,27 +140,31 @@ template<typename Stream> inline uint64_t ser_readdata64(Stream &s)
}
inline uint64_t ser_double_to_uint64(double x)
{
- union { double x; uint64_t y; } tmp;
- tmp.x = x;
- return tmp.y;
+ uint64_t tmp;
+ std::memcpy(&tmp, &x, sizeof(x));
+ static_assert(sizeof(tmp) == sizeof(x), "double and uint64_t assumed to have the same size");
+ return tmp;
}
inline uint32_t ser_float_to_uint32(float x)
{
- union { float x; uint32_t y; } tmp;
- tmp.x = x;
- return tmp.y;
+ uint32_t tmp;
+ std::memcpy(&tmp, &x, sizeof(x));
+ static_assert(sizeof(tmp) == sizeof(x), "float and uint32_t assumed to have the same size");
+ return tmp;
}
inline double ser_uint64_to_double(uint64_t y)
{
- union { double x; uint64_t y; } tmp;
- tmp.y = y;
- return tmp.x;
+ double tmp;
+ std::memcpy(&tmp, &y, sizeof(y));
+ static_assert(sizeof(tmp) == sizeof(y), "double and uint64_t assumed to have the same size");
+ return tmp;
}
inline float ser_uint32_to_float(uint32_t y)
{
- union { float x; uint32_t y; } tmp;
- tmp.y = y;
- return tmp.x;
+ float tmp;
+ std::memcpy(&tmp, &y, sizeof(y));
+ static_assert(sizeof(tmp) == sizeof(y), "float and uint32_t assumed to have the same size");
+ return tmp;
}
@@ -490,12 +498,13 @@ public:
template<typename Formatter, typename T>
static inline Wrapper<Formatter, T&> Using(T&& t) { return Wrapper<Formatter, T&>(t); }
-#define VARINT(obj, ...) Using<VarIntFormatter<__VA_ARGS__>>(obj)
-#define COMPACTSIZE(obj) CCompactSize(REF(obj))
+#define VARINT_MODE(obj, mode) Using<VarIntFormatter<mode>>(obj)
+#define VARINT(obj) Using<VarIntFormatter<VarIntMode::DEFAULT>>(obj)
+#define COMPACTSIZE(obj) Using<CompactSizeFormatter>(obj)
#define LIMITED_STRING(obj,n) LimitedString< n >(REF(obj))
/** Serialization wrapper class for integers in VarInt format. */
-template<VarIntMode Mode=VarIntMode::DEFAULT>
+template<VarIntMode Mode>
struct VarIntFormatter
{
template<typename Stream, typename I> void Ser(Stream &s, I v)
@@ -509,6 +518,28 @@ struct VarIntFormatter
}
};
+template<int Bytes>
+struct CustomUintFormatter
+{
+ static_assert(Bytes > 0 && Bytes <= 8, "CustomUintFormatter Bytes out of range");
+ static constexpr uint64_t MAX = 0xffffffffffffffff >> (8 * (8 - Bytes));
+
+ template <typename Stream, typename I> void Ser(Stream& s, I v)
+ {
+ if (v < 0 || v > MAX) throw std::ios_base::failure("CustomUintFormatter value out of range");
+ uint64_t raw = htole64(v);
+ s.write((const char*)&raw, Bytes);
+ }
+
+ template <typename Stream, typename I> void Unser(Stream& s, I& v)
+ {
+ static_assert(std::numeric_limits<I>::max() >= MAX && std::numeric_limits<I>::min() <= 0, "CustomUintFormatter type too small");
+ uint64_t raw = 0;
+ s.read((char*)&raw, Bytes);
+ v = le64toh(raw);
+ }
+};
+
/** Serialization wrapper class for big-endian integers.
*
* Use this wrapper around integer types that are stored in memory in native
@@ -543,21 +574,26 @@ public:
}
};
-class CCompactSize
+/** Formatter for integers in CompactSize format. */
+struct CompactSizeFormatter
{
-protected:
- uint64_t &n;
-public:
- explicit CCompactSize(uint64_t& nIn) : n(nIn) { }
-
- template<typename Stream>
- void Serialize(Stream &s) const {
- WriteCompactSize<Stream>(s, n);
+ template<typename Stream, typename I>
+ void Unser(Stream& s, I& v)
+ {
+ uint64_t n = ReadCompactSize<Stream>(s);
+ if (n < std::numeric_limits<I>::min() || n > std::numeric_limits<I>::max()) {
+ throw std::ios_base::failure("CompactSize exceeds limit of type");
+ }
+ v = n;
}
- template<typename Stream>
- void Unserialize(Stream& s) {
- n = ReadCompactSize<Stream>(s);
+ template<typename Stream, typename I>
+ void Ser(Stream& s, I v)
+ {
+ static_assert(std::is_unsigned<I>::value, "CompactSize only supported for unsigned integers");
+ static_assert(std::numeric_limits<I>::max() <= std::numeric_limits<uint64_t>::max(), "CompactSize only supports 64-bit integers and below");
+
+ WriteCompactSize<Stream>(s, v);
}
};
@@ -593,6 +629,54 @@ public:
template<typename I>
BigEndian<I> WrapBigEndian(I& n) { return BigEndian<I>(n); }
+/** Formatter to serialize/deserialize vector elements using another formatter
+ *
+ * Example:
+ * struct X {
+ * std::vector<uint64_t> v;
+ * SERIALIZE_METHODS(X, obj) { READWRITE(Using<VectorFormatter<VarInt>>(obj.v)); }
+ * };
+ * will define a struct that contains a vector of uint64_t, which is serialized
+ * as a vector of VarInt-encoded integers.
+ *
+ * V is not required to be an std::vector type. It works for any class that
+ * exposes a value_type, size, reserve, emplace_back, back, and const iterators.
+ */
+template<class Formatter>
+struct VectorFormatter
+{
+ template<typename Stream, typename V>
+ void Ser(Stream& s, const V& v)
+ {
+ Formatter formatter;
+ WriteCompactSize(s, v.size());
+ for (const typename V::value_type& elem : v) {
+ formatter.Ser(s, elem);
+ }
+ }
+
+ template<typename Stream, typename V>
+ void Unser(Stream& s, V& v)
+ {
+ Formatter formatter;
+ v.clear();
+ size_t size = ReadCompactSize(s);
+ size_t allocated = 0;
+ while (allocated < size) {
+ // For DoS prevention, do not blindly allocate as much as the stream claims to contain.
+ // Instead, allocate in 5MiB batches, so that an attacker actually needs to provide
+ // X MiB of data to make us allocate X+5 Mib.
+ static_assert(sizeof(typename V::value_type) <= MAX_VECTOR_ALLOCATE, "Vector element size too large");
+ allocated = std::min(size, allocated + MAX_VECTOR_ALLOCATE / sizeof(typename V::value_type));
+ v.reserve(allocated);
+ while (v.size() < allocated) {
+ v.emplace_back();
+ formatter.Unser(s, v.back());
+ }
+ }
+ };
+};
+
/**
* Forward declarations
*/
@@ -673,6 +757,20 @@ inline void Unserialize(Stream& is, T&& a)
a.Unserialize(is);
}
+/** Default formatter. Serializes objects as themselves.
+ *
+ * The vector/prevector serialization code passes this to VectorFormatter
+ * to enable reusing that logic. It shouldn't be needed elsewhere.
+ */
+struct DefaultFormatter
+{
+ template<typename Stream, typename T>
+ static void Ser(Stream& s, const T& t) { Serialize(s, t); }
+
+ template<typename Stream, typename T>
+ static void Unser(Stream& s, T& t) { Unserialize(s, t); }
+};
+
@@ -713,9 +811,7 @@ void Serialize_impl(Stream& os, const prevector<N, T>& v, const unsigned char&)
template<typename Stream, unsigned int N, typename T, typename V>
void Serialize_impl(Stream& os, const prevector<N, T>& v, const V&)
{
- WriteCompactSize(os, v.size());
- for (typename prevector<N, T>::const_iterator vi = v.begin(); vi != v.end(); ++vi)
- ::Serialize(os, (*vi));
+ Serialize(os, Using<VectorFormatter<DefaultFormatter>>(v));
}
template<typename Stream, unsigned int N, typename T>
@@ -744,19 +840,7 @@ void Unserialize_impl(Stream& is, prevector<N, T>& v, const unsigned char&)
template<typename Stream, unsigned int N, typename T, typename V>
void Unserialize_impl(Stream& is, prevector<N, T>& v, const V&)
{
- v.clear();
- unsigned int nSize = ReadCompactSize(is);
- unsigned int i = 0;
- unsigned int nMid = 0;
- while (nMid < nSize)
- {
- nMid += 5000000 / sizeof(T);
- if (nMid > nSize)
- nMid = nSize;
- v.resize_uninitialized(nMid);
- for (; i < nMid; ++i)
- Unserialize(is, v[i]);
- }
+ Unserialize(is, Using<VectorFormatter<DefaultFormatter>>(v));
}
template<typename Stream, unsigned int N, typename T>
@@ -793,9 +877,7 @@ void Serialize_impl(Stream& os, const std::vector<T, A>& v, const bool&)
template<typename Stream, typename T, typename A, typename V>
void Serialize_impl(Stream& os, const std::vector<T, A>& v, const V&)
{
- WriteCompactSize(os, v.size());
- for (typename std::vector<T, A>::const_iterator vi = v.begin(); vi != v.end(); ++vi)
- ::Serialize(os, (*vi));
+ Serialize(os, Using<VectorFormatter<DefaultFormatter>>(v));
}
template<typename Stream, typename T, typename A>
@@ -824,19 +906,7 @@ void Unserialize_impl(Stream& is, std::vector<T, A>& v, const unsigned char&)
template<typename Stream, typename T, typename A, typename V>
void Unserialize_impl(Stream& is, std::vector<T, A>& v, const V&)
{
- v.clear();
- unsigned int nSize = ReadCompactSize(is);
- unsigned int i = 0;
- unsigned int nMid = 0;
- while (nMid < nSize)
- {
- nMid += 5000000 / sizeof(T);
- if (nMid > nSize)
- nMid = nSize;
- v.resize(nMid);
- for (; i < nMid; i++)
- Unserialize(is, v[i]);
- }
+ Unserialize(is, Using<VectorFormatter<DefaultFormatter>>(v));
}
template<typename Stream, typename T, typename A>
diff --git a/src/span.h b/src/span.h
index 77de059fa6..9379b15c81 100644
--- a/src/span.h
+++ b/src/span.h
@@ -8,6 +8,7 @@
#include <type_traits>
#include <cstddef>
#include <algorithm>
+#include <assert.h>
/** A Span is an object that can refer to a contiguous sequence of objects.
*
@@ -27,6 +28,8 @@ public:
constexpr C* data() const noexcept { return m_data; }
constexpr C* begin() const noexcept { return m_data; }
constexpr C* end() const noexcept { return m_data + m_size; }
+ constexpr C& front() const noexcept { return m_data[0]; }
+ constexpr C& back() const noexcept { return m_data[m_size - 1]; }
constexpr std::ptrdiff_t size() const noexcept { return m_size; }
constexpr C& operator[](std::ptrdiff_t pos) const noexcept { return m_data[pos]; }
@@ -57,4 +60,15 @@ constexpr Span<A> MakeSpan(A (&a)[N]) { return Span<A>(a, N); }
template<typename V>
constexpr Span<typename std::remove_pointer<decltype(std::declval<V>().data())>::type> MakeSpan(V& v) { return Span<typename std::remove_pointer<decltype(std::declval<V>().data())>::type>(v.data(), v.size()); }
+/** Pop the last element off a span, and return a reference to that element. */
+template <typename T>
+T& SpanPopBack(Span<T>& span)
+{
+ size_t size = span.size();
+ assert(size > 0);
+ T& back = span[size - 1];
+ span = Span<T>(span.data(), size - 1);
+ return back;
+}
+
#endif
diff --git a/src/support/lockedpool.cpp b/src/support/lockedpool.cpp
index 6980b6c0da..f3cc12201c 100644
--- a/src/support/lockedpool.cpp
+++ b/src/support/lockedpool.cpp
@@ -253,6 +253,9 @@ void *PosixLockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess)
}
if (addr) {
*lockingSuccess = mlock(addr, len) == 0;
+#ifdef MADV_DONTDUMP
+ madvise(addr, len, MADV_DONTDUMP);
+#endif
}
return addr;
}
diff --git a/src/sync.cpp b/src/sync.cpp
index 924e7b5bb0..71657a7439 100644
--- a/src/sync.cpp
+++ b/src/sync.cpp
@@ -13,7 +13,7 @@
#include <util/strencodings.h>
#include <util/threadnames.h>
-
+#include <system_error>
#include <map>
#include <set>
@@ -60,6 +60,11 @@ struct CLockLocation {
mutexName, sourceFile, itostr(sourceLine), (fTry ? " (TRY)" : ""), m_thread_name);
}
+ std::string Name() const
+ {
+ return mutexName;
+ }
+
private:
bool fTry;
std::string mutexName;
@@ -155,6 +160,18 @@ void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs
push_lock(cs, CLockLocation(pszName, pszFile, nLine, fTry, util::ThreadGetInternalName()));
}
+void CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line)
+{
+ if (!g_lockstack.empty()) {
+ const auto& lastlock = g_lockstack.back();
+ if (lastlock.first == cs) {
+ lockname = lastlock.second.Name();
+ return;
+ }
+ }
+ throw std::system_error(EPERM, std::generic_category(), strprintf("%s:%s %s was not most recent critical section locked", file, line, guardname));
+}
+
void LeaveCritical()
{
pop_lock();
diff --git a/src/sync.h b/src/sync.h
index 0cdbb59c70..ead2cdc67b 100644
--- a/src/sync.h
+++ b/src/sync.h
@@ -10,9 +10,9 @@
#include <util/macros.h>
#include <condition_variable>
-#include <thread>
#include <mutex>
-
+#include <string>
+#include <thread>
////////////////////////////////////////////////
// //
@@ -50,6 +50,7 @@ LEAVE_CRITICAL_SECTION(mutex); // no RAII
#ifdef DEBUG_LOCKORDER
void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false);
void LeaveCritical();
+void CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line);
std::string LocksHeld();
void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) ASSERT_EXCLUSIVE_LOCK(cs);
void AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs);
@@ -64,6 +65,7 @@ extern bool g_debug_lockorder_abort;
#else
void static inline EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false) {}
void static inline LeaveCritical() {}
+void static inline CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line) {}
void static inline AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) ASSERT_EXCLUSIVE_LOCK(cs) {}
void static inline AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) {}
void static inline DeleteLock(void* cs) {}
@@ -171,8 +173,45 @@ public:
{
return Base::owns_lock();
}
+
+protected:
+ // needed for reverse_lock
+ UniqueLock() { }
+
+public:
+ /**
+ * An RAII-style reverse lock. Unlocks on construction and locks on destruction.
+ */
+ class reverse_lock {
+ public:
+ explicit reverse_lock(UniqueLock& _lock, const char* _guardname, const char* _file, int _line) : lock(_lock), file(_file), line(_line) {
+ CheckLastCritical((void*)lock.mutex(), lockname, _guardname, _file, _line);
+ lock.unlock();
+ LeaveCritical();
+ lock.swap(templock);
+ }
+
+ ~reverse_lock() {
+ templock.swap(lock);
+ EnterCritical(lockname.c_str(), file.c_str(), line, (void*)lock.mutex());
+ lock.lock();
+ }
+
+ private:
+ reverse_lock(reverse_lock const&);
+ reverse_lock& operator=(reverse_lock const&);
+
+ UniqueLock& lock;
+ UniqueLock templock;
+ std::string lockname;
+ const std::string file;
+ const int line;
+ };
+ friend class reverse_lock;
};
+#define REVERSE_LOCK(g) decltype(g)::reverse_lock PASTE2(revlock, __COUNTER__)(g, #g, __FILE__, __LINE__)
+
template<typename MutexArg>
using DebugLock = UniqueLock<typename std::remove_reference<typename std::remove_pointer<MutexArg>::type>::type>;
diff --git a/src/test/addrman_tests.cpp b/src/test/addrman_tests.cpp
index 07cebeb35a..dfa8a6df21 100644
--- a/src/test/addrman_tests.cpp
+++ b/src/test/addrman_tests.cpp
@@ -6,6 +6,7 @@
#include <string>
#include <boost/test/unit_test.hpp>
#include <util/asmap.h>
+#include <util/string.h>
#include <test/data/asmap.raw.h>
#include <hash.h>
@@ -266,7 +267,7 @@ BOOST_AUTO_TEST_CASE(addrman_new_collisions)
BOOST_CHECK_EQUAL(addrman.size(), 0U);
for (unsigned int i = 1; i < 18; i++) {
- CService addr = ResolveService("250.1.1." + std::to_string(i));
+ CService addr = ResolveService("250.1.1." + ToString(i));
BOOST_CHECK(addrman.Add(CAddress(addr, NODE_NONE), source));
//Test: No collision in new table yet.
@@ -292,7 +293,7 @@ BOOST_AUTO_TEST_CASE(addrman_tried_collisions)
BOOST_CHECK_EQUAL(addrman.size(), 0U);
for (unsigned int i = 1; i < 80; i++) {
- CService addr = ResolveService("250.1.1." + std::to_string(i));
+ CService addr = ResolveService("250.1.1." + ToString(i));
BOOST_CHECK(addrman.Add(CAddress(addr, NODE_NONE), source));
addrman.Good(CAddress(addr, NODE_NONE));
@@ -425,7 +426,7 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr)
for (unsigned int i = 1; i < (8 * 256); i++) {
int octet1 = i % 256;
int octet2 = i >> 8 % 256;
- std::string strAddr = std::to_string(octet1) + "." + std::to_string(octet2) + ".1.23";
+ std::string strAddr = ToString(octet1) + "." + ToString(octet2) + ".1.23";
CAddress addr = CAddress(ResolveService(strAddr), NODE_NONE);
// Ensure that for all addrs in addrman, isTerrible == false.
@@ -477,8 +478,8 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket_legacy)
std::set<int> buckets;
for (int i = 0; i < 255; i++) {
CAddrInfo infoi = CAddrInfo(
- CAddress(ResolveService("250.1.1." + std::to_string(i)), NODE_NONE),
- ResolveIP("250.1.1." + std::to_string(i)));
+ CAddress(ResolveService("250.1.1." + ToString(i)), NODE_NONE),
+ ResolveIP("250.1.1." + ToString(i)));
int bucket = infoi.GetTriedBucket(nKey1, asmap);
buckets.insert(bucket);
}
@@ -489,8 +490,8 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket_legacy)
buckets.clear();
for (int j = 0; j < 255; j++) {
CAddrInfo infoj = CAddrInfo(
- CAddress(ResolveService("250." + std::to_string(j) + ".1.1"), NODE_NONE),
- ResolveIP("250." + std::to_string(j) + ".1.1"));
+ CAddress(ResolveService("250." + ToString(j) + ".1.1"), NODE_NONE),
+ ResolveIP("250." + ToString(j) + ".1.1"));
int bucket = infoj.GetTriedBucket(nKey1, asmap);
buckets.insert(bucket);
}
@@ -531,8 +532,8 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket_legacy)
std::set<int> buckets;
for (int i = 0; i < 255; i++) {
CAddrInfo infoi = CAddrInfo(
- CAddress(ResolveService("250.1.1." + std::to_string(i)), NODE_NONE),
- ResolveIP("250.1.1." + std::to_string(i)));
+ CAddress(ResolveService("250.1.1." + ToString(i)), NODE_NONE),
+ ResolveIP("250.1.1." + ToString(i)));
int bucket = infoi.GetNewBucket(nKey1, asmap);
buckets.insert(bucket);
}
@@ -544,7 +545,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket_legacy)
for (int j = 0; j < 4 * 255; j++) {
CAddrInfo infoj = CAddrInfo(CAddress(
ResolveService(
- std::to_string(250 + (j / 255)) + "." + std::to_string(j % 256) + ".1.1"), NODE_NONE),
+ ToString(250 + (j / 255)) + "." + ToString(j % 256) + ".1.1"), NODE_NONE),
ResolveIP("251.4.1.1"));
int bucket = infoj.GetNewBucket(nKey1, asmap);
buckets.insert(bucket);
@@ -557,7 +558,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket_legacy)
for (int p = 0; p < 255; p++) {
CAddrInfo infoj = CAddrInfo(
CAddress(ResolveService("250.1.1.1"), NODE_NONE),
- ResolveIP("250." + std::to_string(p) + ".1.1"));
+ ResolveIP("250." + ToString(p) + ".1.1"));
int bucket = infoj.GetNewBucket(nKey1, asmap);
buckets.insert(bucket);
}
@@ -610,8 +611,8 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket)
std::set<int> buckets;
for (int j = 0; j < 255; j++) {
CAddrInfo infoj = CAddrInfo(
- CAddress(ResolveService("101." + std::to_string(j) + ".1.1"), NODE_NONE),
- ResolveIP("101." + std::to_string(j) + ".1.1"));
+ CAddress(ResolveService("101." + ToString(j) + ".1.1"), NODE_NONE),
+ ResolveIP("101." + ToString(j) + ".1.1"));
int bucket = infoj.GetTriedBucket(nKey1, asmap);
buckets.insert(bucket);
}
@@ -622,8 +623,8 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket)
buckets.clear();
for (int j = 0; j < 255; j++) {
CAddrInfo infoj = CAddrInfo(
- CAddress(ResolveService("250." + std::to_string(j) + ".1.1"), NODE_NONE),
- ResolveIP("250." + std::to_string(j) + ".1.1"));
+ CAddress(ResolveService("250." + ToString(j) + ".1.1"), NODE_NONE),
+ ResolveIP("250." + ToString(j) + ".1.1"));
int bucket = infoj.GetTriedBucket(nKey1, asmap);
buckets.insert(bucket);
}
@@ -664,8 +665,8 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
std::set<int> buckets;
for (int i = 0; i < 255; i++) {
CAddrInfo infoi = CAddrInfo(
- CAddress(ResolveService("250.1.1." + std::to_string(i)), NODE_NONE),
- ResolveIP("250.1.1." + std::to_string(i)));
+ CAddress(ResolveService("250.1.1." + ToString(i)), NODE_NONE),
+ ResolveIP("250.1.1." + ToString(i)));
int bucket = infoi.GetNewBucket(nKey1, asmap);
buckets.insert(bucket);
}
@@ -677,7 +678,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
for (int j = 0; j < 4 * 255; j++) {
CAddrInfo infoj = CAddrInfo(CAddress(
ResolveService(
- std::to_string(250 + (j / 255)) + "." + std::to_string(j % 256) + ".1.1"), NODE_NONE),
+ ToString(250 + (j / 255)) + "." + ToString(j % 256) + ".1.1"), NODE_NONE),
ResolveIP("251.4.1.1"));
int bucket = infoj.GetNewBucket(nKey1, asmap);
buckets.insert(bucket);
@@ -690,7 +691,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
for (int p = 0; p < 255; p++) {
CAddrInfo infoj = CAddrInfo(
CAddress(ResolveService("250.1.1.1"), NODE_NONE),
- ResolveIP("101." + std::to_string(p) + ".1.1"));
+ ResolveIP("101." + ToString(p) + ".1.1"));
int bucket = infoj.GetNewBucket(nKey1, asmap);
buckets.insert(bucket);
}
@@ -702,7 +703,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
for (int p = 0; p < 255; p++) {
CAddrInfo infoj = CAddrInfo(
CAddress(ResolveService("250.1.1.1"), NODE_NONE),
- ResolveIP("250." + std::to_string(p) + ".1.1"));
+ ResolveIP("250." + ToString(p) + ".1.1"));
int bucket = infoj.GetNewBucket(nKey1, asmap);
buckets.insert(bucket);
}
@@ -791,7 +792,7 @@ BOOST_AUTO_TEST_CASE(addrman_selecttriedcollision)
// Add twenty two addresses.
CNetAddr source = ResolveIP("252.2.2.2");
for (unsigned int i = 1; i < 23; i++) {
- CService addr = ResolveService("250.1.1."+std::to_string(i));
+ CService addr = ResolveService("250.1.1."+ToString(i));
BOOST_CHECK(addrman.Add(CAddress(addr, NODE_NONE), source));
addrman.Good(addr);
@@ -802,7 +803,7 @@ BOOST_AUTO_TEST_CASE(addrman_selecttriedcollision)
// Ensure Good handles duplicates well.
for (unsigned int i = 1; i < 23; i++) {
- CService addr = ResolveService("250.1.1."+std::to_string(i));
+ CService addr = ResolveService("250.1.1."+ToString(i));
addrman.Good(addr);
BOOST_CHECK(addrman.size() == 22);
@@ -818,7 +819,7 @@ BOOST_AUTO_TEST_CASE(addrman_noevict)
// Add twenty two addresses.
CNetAddr source = ResolveIP("252.2.2.2");
for (unsigned int i = 1; i < 23; i++) {
- CService addr = ResolveService("250.1.1."+std::to_string(i));
+ CService addr = ResolveService("250.1.1."+ToString(i));
BOOST_CHECK(addrman.Add(CAddress(addr, NODE_NONE), source));
addrman.Good(addr);
@@ -841,7 +842,7 @@ BOOST_AUTO_TEST_CASE(addrman_noevict)
// Lets create two collisions.
for (unsigned int i = 24; i < 33; i++) {
- CService addr = ResolveService("250.1.1."+std::to_string(i));
+ CService addr = ResolveService("250.1.1."+ToString(i));
BOOST_CHECK(addrman.Add(CAddress(addr, NODE_NONE), source));
addrman.Good(addr);
@@ -879,7 +880,7 @@ BOOST_AUTO_TEST_CASE(addrman_evictionworks)
// Add twenty two addresses.
CNetAddr source = ResolveIP("252.2.2.2");
for (unsigned int i = 1; i < 23; i++) {
- CService addr = ResolveService("250.1.1."+std::to_string(i));
+ CService addr = ResolveService("250.1.1."+ToString(i));
BOOST_CHECK(addrman.Add(CAddress(addr, NODE_NONE), source));
addrman.Good(addr);
diff --git a/src/test/blockchain_tests.cpp b/src/test/blockchain_tests.cpp
index 3b4c480f72..aa704642bf 100644
--- a/src/test/blockchain_tests.cpp
+++ b/src/test/blockchain_tests.cpp
@@ -8,6 +8,7 @@
#include <chain.h>
#include <rpc/blockchain.h>
+#include <util/string.h>
#include <test/util/setup_common.h>
/* Equality between doubles is imprecise. Comparison should be done
@@ -30,8 +31,8 @@ static CBlockIndex* CreateBlockIndexWithNbits(uint32_t nbits)
static void RejectDifficultyMismatch(double difficulty, double expected_difficulty) {
BOOST_CHECK_MESSAGE(
DoubleEquals(difficulty, expected_difficulty, 0.00001),
- "Difficulty was " + std::to_string(difficulty)
- + " but was expected to be " + std::to_string(expected_difficulty));
+ "Difficulty was " + ToString(difficulty)
+ + " but was expected to be " + ToString(expected_difficulty));
}
/* Given a BlockIndex with the provided nbits,
diff --git a/src/test/blockfilter_index_tests.cpp b/src/test/blockfilter_index_tests.cpp
index 79e18cd2c0..5e52dc268f 100644
--- a/src/test/blockfilter_index_tests.cpp
+++ b/src/test/blockfilter_index_tests.cpp
@@ -138,7 +138,7 @@ BOOST_FIXTURE_TEST_CASE(blockfilter_index_initial_sync, BuildChainTestingSetup)
int64_t time_start = GetTimeMillis();
while (!filter_index.BlockUntilSyncedToCurrentChain()) {
BOOST_REQUIRE(time_start + timeout_ms > GetTimeMillis());
- MilliSleep(100);
+ UninterruptibleSleep(std::chrono::milliseconds{100});
}
// Check that filter index has all blocks that were in the chain before it started.
diff --git a/src/test/bswap_tests.cpp b/src/test/bswap_tests.cpp
index d5e2344a8b..0b4bfdb019 100644
--- a/src/test/bswap_tests.cpp
+++ b/src/test/bswap_tests.cpp
@@ -11,16 +11,16 @@ BOOST_FIXTURE_TEST_SUITE(bswap_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(bswap_tests)
{
- // Sibling in bitcoin/src/qt/test/compattests.cpp
- uint16_t u1 = 0x1234;
- uint32_t u2 = 0x56789abc;
- uint64_t u3 = 0xdef0123456789abc;
- uint16_t e1 = 0x3412;
- uint32_t e2 = 0xbc9a7856;
- uint64_t e3 = 0xbc9a78563412f0de;
- BOOST_CHECK(bswap_16(u1) == e1);
- BOOST_CHECK(bswap_32(u2) == e2);
- BOOST_CHECK(bswap_64(u3) == e3);
+ // Sibling in bitcoin/src/qt/test/compattests.cpp
+ uint16_t u1 = 0x1234;
+ uint32_t u2 = 0x56789abc;
+ uint64_t u3 = 0xdef0123456789abc;
+ uint16_t e1 = 0x3412;
+ uint32_t e2 = 0xbc9a7856;
+ uint64_t e3 = 0xbc9a78563412f0de;
+ BOOST_CHECK(bswap_16(u1) == e1);
+ BOOST_CHECK(bswap_32(u2) == e2);
+ BOOST_CHECK(bswap_64(u3) == e3);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/checkqueue_tests.cpp b/src/test/checkqueue_tests.cpp
index 482fe3772c..a9628e85f9 100644
--- a/src/test/checkqueue_tests.cpp
+++ b/src/test/checkqueue_tests.cpp
@@ -393,7 +393,7 @@ BOOST_AUTO_TEST_CASE(test_CheckQueueControl_Locks)
CCheckQueueControl<FakeCheck> control(queue.get());
// While sleeping, no other thread should execute to this point
auto observed = ++nThreads;
- MilliSleep(10);
+ UninterruptibleSleep(std::chrono::milliseconds{10});
fails += observed != nThreads;
});
}
diff --git a/src/test/data/script_tests.json b/src/test/data/script_tests.json
index 3241f32f56..c01ef307b7 100644
--- a/src/test/data/script_tests.json
+++ b/src/test/data/script_tests.json
@@ -121,9 +121,9 @@
["8388608", "SIZE 4 EQUAL", "P2SH,STRICTENC", "OK"],
["2147483647", "SIZE 4 EQUAL", "P2SH,STRICTENC", "OK"],
["2147483648", "SIZE 5 EQUAL", "P2SH,STRICTENC", "OK"],
-["549755813887", "SIZE 5 EQUAL", "P2SH,STRICTENC", "OK"],
-["549755813888", "SIZE 6 EQUAL", "P2SH,STRICTENC", "OK"],
-["9223372036854775807", "SIZE 8 EQUAL", "P2SH,STRICTENC", "OK"],
+["0x05ffffffff7f", "SIZE 5 EQUAL", "P2SH,STRICTENC", "OK"],
+["0x06000000008000", "SIZE 6 EQUAL", "P2SH,STRICTENC", "OK"],
+["0x08ffffffffffffff7f", "SIZE 8 EQUAL", "P2SH,STRICTENC", "OK"],
["-1", "SIZE 1 EQUAL", "P2SH,STRICTENC", "OK"],
["-127", "SIZE 1 EQUAL", "P2SH,STRICTENC", "OK"],
["-128", "SIZE 2 EQUAL", "P2SH,STRICTENC", "OK"],
@@ -133,9 +133,9 @@
["-8388608", "SIZE 4 EQUAL", "P2SH,STRICTENC", "OK"],
["-2147483647", "SIZE 4 EQUAL", "P2SH,STRICTENC", "OK"],
["-2147483648", "SIZE 5 EQUAL", "P2SH,STRICTENC", "OK"],
-["-549755813887", "SIZE 5 EQUAL", "P2SH,STRICTENC", "OK"],
-["-549755813888", "SIZE 6 EQUAL", "P2SH,STRICTENC", "OK"],
-["-9223372036854775807", "SIZE 8 EQUAL", "P2SH,STRICTENC", "OK"],
+["0x05ffffffffff", "SIZE 5 EQUAL", "P2SH,STRICTENC", "OK"],
+["0x06000000008080", "SIZE 6 EQUAL", "P2SH,STRICTENC", "OK"],
+["0x08ffffffffffffffff", "SIZE 8 EQUAL", "P2SH,STRICTENC", "OK"],
["'abcdefghijklmnopqrstuvwxyz'", "SIZE 26 EQUAL", "P2SH,STRICTENC", "OK"],
["42", "SIZE 1 EQUALVERIFY 42 EQUAL", "P2SH,STRICTENC", "OK", "SIZE does not consume argument"],
@@ -360,9 +360,9 @@
["8388608", "0x04 0x00008000 EQUAL", "P2SH,STRICTENC", "OK"],
["2147483647", "0x04 0xFFFFFF7F EQUAL", "P2SH,STRICTENC", "OK"],
["2147483648", "0x05 0x0000008000 EQUAL", "P2SH,STRICTENC", "OK"],
-["549755813887", "0x05 0xFFFFFFFF7F EQUAL", "P2SH,STRICTENC", "OK"],
-["549755813888", "0x06 0x000000008000 EQUAL", "P2SH,STRICTENC", "OK"],
-["9223372036854775807", "0x08 0xFFFFFFFFFFFFFF7F EQUAL", "P2SH,STRICTENC", "OK"],
+["0x05ffffffff7f", "0x05 0xFFFFFFFF7F EQUAL", "P2SH,STRICTENC", "OK"],
+["0x06000000008000", "0x06 0x000000008000 EQUAL", "P2SH,STRICTENC", "OK"],
+["0x08ffffffffffffff7f", "0x08 0xFFFFFFFFFFFFFF7F EQUAL", "P2SH,STRICTENC", "OK"],
["-1", "0x01 0x81 EQUAL", "P2SH,STRICTENC", "OK", "Numbers are little-endian with the MSB being a sign bit"],
["-127", "0x01 0xFF EQUAL", "P2SH,STRICTENC", "OK"],
["-128", "0x02 0x8080 EQUAL", "P2SH,STRICTENC", "OK"],
@@ -373,9 +373,9 @@
["-2147483647", "0x04 0xFFFFFFFF EQUAL", "P2SH,STRICTENC", "OK"],
["-2147483648", "0x05 0x0000008080 EQUAL", "P2SH,STRICTENC", "OK"],
["-4294967295", "0x05 0xFFFFFFFF80 EQUAL", "P2SH,STRICTENC", "OK"],
-["-549755813887", "0x05 0xFFFFFFFFFF EQUAL", "P2SH,STRICTENC", "OK"],
-["-549755813888", "0x06 0x000000008080 EQUAL", "P2SH,STRICTENC", "OK"],
-["-9223372036854775807", "0x08 0xFFFFFFFFFFFFFFFF EQUAL", "P2SH,STRICTENC", "OK"],
+["0x05ffffffffff", "0x05 0xFFFFFFFFFF EQUAL", "P2SH,STRICTENC", "OK"],
+["0x06000000008080", "0x06 0x000000008080 EQUAL", "P2SH,STRICTENC", "OK"],
+["0x08ffffffffffffffff", "0x08 0xFFFFFFFFFFFFFFFF EQUAL", "P2SH,STRICTENC", "OK"],
["2147483647", "1ADD 2147483648 EQUAL", "P2SH,STRICTENC", "OK", "We can do math on 4-byte integers, and compare 5-byte ones"],
["2147483647", "1ADD 1", "P2SH,STRICTENC", "OK"],
@@ -2521,7 +2521,7 @@
["-1", "CHECKSEQUENCEVERIFY", "CHECKSEQUENCEVERIFY", "NEGATIVE_LOCKTIME", "CSV automatically fails if stack top is negative"],
["0x0100", "CHECKSEQUENCEVERIFY", "CHECKSEQUENCEVERIFY,MINIMALDATA", "UNKNOWN_ERROR", "CSV fails if stack top is not minimally encoded"],
["0", "CHECKSEQUENCEVERIFY", "CHECKSEQUENCEVERIFY", "UNSATISFIED_LOCKTIME", "CSV fails if stack top bit 1 << 31 is set and the tx version < 2"],
-["4294967296", "CHECKSEQUENCEVERIFY", "CHECKSEQUENCEVERIFY", "UNSATISFIED_LOCKTIME",
+["0x050000000001", "CHECKSEQUENCEVERIFY", "CHECKSEQUENCEVERIFY", "UNSATISFIED_LOCKTIME",
"CSV fails if stack top bit 1 << 31 is not set, and tx version < 2"],
["MINIMALIF tests"],
diff --git a/src/test/data/tx_invalid.json b/src/test/data/tx_invalid.json
index d22175d660..3b1db449b2 100644
--- a/src/test/data/tx_invalid.json
+++ b/src/test/data/tx_invalid.json
@@ -174,7 +174,7 @@
"0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000ff64cd1d", "P2SH,CHECKLOCKTIMEVERIFY"],
["Argument 2^32 with nLockTime=2^32-1"],
-[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "4294967296 CHECKLOCKTIMEVERIFY 1"]],
+[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x050000000001 CHECKLOCKTIMEVERIFY 1"]],
"0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000ffffffff", "P2SH,CHECKLOCKTIMEVERIFY"],
["Same, but with nLockTime=2^31-1"],
diff --git a/src/test/data/tx_valid.json b/src/test/data/tx_valid.json
index 4a1c77166d..11634c90f0 100644
--- a/src/test/data/tx_valid.json
+++ b/src/test/data/tx_valid.json
@@ -292,11 +292,11 @@
"020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffffff0100000000000000000000000000", "P2SH,CHECKSEQUENCEVERIFY"],
["Argument 3<<31 with various nSequence"],
-[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "6442450944 CHECKSEQUENCEVERIFY 1"]],
+[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x050000008001 CHECKSEQUENCEVERIFY 1"]],
"020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffbf7f0100000000000000000000000000", "P2SH,CHECKSEQUENCEVERIFY"],
-[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "6442450944 CHECKSEQUENCEVERIFY 1"]],
+[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x050000008001 CHECKSEQUENCEVERIFY 1"]],
"020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffff7f0100000000000000000000000000", "P2SH,CHECKSEQUENCEVERIFY"],
-[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "6442450944 CHECKSEQUENCEVERIFY 1"]],
+[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x050000008001 CHECKSEQUENCEVERIFY 1"]],
"020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffffff0100000000000000000000000000", "P2SH,CHECKSEQUENCEVERIFY"],
["5 byte non-minimally-encoded operandss are valid"],
diff --git a/src/test/dbwrapper_tests.cpp b/src/test/dbwrapper_tests.cpp
index b647c0f70b..3dfae29de6 100644
--- a/src/test/dbwrapper_tests.cpp
+++ b/src/test/dbwrapper_tests.cpp
@@ -399,15 +399,15 @@ BOOST_AUTO_TEST_CASE(iterator_string_ordering)
BOOST_AUTO_TEST_CASE(unicodepath)
{
- // Attempt to create a database with a utf8 character in the path.
+ // Attempt to create a database with a UTF8 character in the path.
// On Windows this test will fail if the directory is created using
- // the ANSI CreateDirectoryA call and the code page isn't UTF8.
- // It will succeed if the created with CreateDirectoryW.
+ // the ANSI CreateDirectoryA call and the code page isn't UTF8.
+ // It will succeed if created with CreateDirectoryW.
fs::path ph = GetDataDir() / "test_runner_₿_🏃_20191128_104644";
CDBWrapper dbw(ph, (1 << 20));
fs::path lockPath = ph / "LOCK";
- BOOST_CHECK(boost::filesystem::exists(lockPath));
+ BOOST_CHECK(fs::exists(lockPath));
}
diff --git a/src/test/denialofservice_tests.cpp b/src/test/denialofservice_tests.cpp
index 2c2b3035e3..7310498eb6 100644
--- a/src/test/denialofservice_tests.cpp
+++ b/src/test/denialofservice_tests.cpp
@@ -13,6 +13,7 @@
#include <script/standard.h>
#include <serialize.h>
#include <util/memory.h>
+#include <util/string.h>
#include <util/system.h>
#include <util/time.h>
#include <validation.h>
@@ -78,7 +79,7 @@ BOOST_FIXTURE_TEST_SUITE(denialofservice_tests, TestingSetup)
BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction)
{
auto connman = MakeUnique<CConnman>(0x1337, 0x1337);
- auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), nullptr, scheduler);
+ auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), nullptr, *m_node.scheduler, *m_node.mempool);
// Mock an outbound peer
CAddress addr1(ip(0xa0b0c001), NODE_NONE);
@@ -148,7 +149,7 @@ static void AddRandomOutboundPeer(std::vector<CNode *> &vNodes, PeerLogicValidat
BOOST_AUTO_TEST_CASE(stale_tip_peer_management)
{
auto connman = MakeUnique<CConnmanTest>(0x1337, 0x1337);
- auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), nullptr, scheduler);
+ auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), nullptr, *m_node.scheduler, *m_node.mempool);
const Consensus::Params& consensusParams = Params().GetConsensus();
constexpr int max_outbound_full_relay = 8;
@@ -221,7 +222,7 @@ BOOST_AUTO_TEST_CASE(DoS_banning)
{
auto banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", nullptr, DEFAULT_MISBEHAVING_BANTIME);
auto connman = MakeUnique<CConnman>(0x1337, 0x1337);
- auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), banman.get(), scheduler);
+ auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), banman.get(), *m_node.scheduler, *m_node.mempool);
banman->ClearBanned();
CAddress addr1(ip(0xa0b0c001), NODE_NONE);
@@ -276,7 +277,7 @@ BOOST_AUTO_TEST_CASE(DoS_banscore)
{
auto banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", nullptr, DEFAULT_MISBEHAVING_BANTIME);
auto connman = MakeUnique<CConnman>(0x1337, 0x1337);
- auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), banman.get(), scheduler);
+ auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), banman.get(), *m_node.scheduler, *m_node.mempool);
banman->ClearBanned();
gArgs.ForceSetArg("-banscore", "111"); // because 11 is my favorite number
@@ -313,7 +314,7 @@ BOOST_AUTO_TEST_CASE(DoS_banscore)
BOOST_CHECK(peerLogic->SendMessages(&dummyNode1));
}
BOOST_CHECK(banman->IsBanned(addr1));
- gArgs.ForceSetArg("-banscore", std::to_string(DEFAULT_BANSCORE_THRESHOLD));
+ gArgs.ForceSetArg("-banscore", ToString(DEFAULT_BANSCORE_THRESHOLD));
bool dummy;
peerLogic->FinalizeNode(dummyNode1.GetId(), dummy);
@@ -323,7 +324,7 @@ BOOST_AUTO_TEST_CASE(DoS_bantime)
{
auto banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", nullptr, DEFAULT_MISBEHAVING_BANTIME);
auto connman = MakeUnique<CConnman>(0x1337, 0x1337);
- auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), banman.get(), scheduler);
+ auto peerLogic = MakeUnique<PeerLogicValidation>(connman.get(), banman.get(), *m_node.scheduler, *m_node.mempool);
banman->ClearBanned();
int64_t nStartTime = GetTime();
diff --git a/src/test/descriptor_tests.cpp b/src/test/descriptor_tests.cpp
index bcce8854e3..3154c619d2 100644
--- a/src/test/descriptor_tests.cpp
+++ b/src/test/descriptor_tests.cpp
@@ -29,6 +29,7 @@ constexpr int RANGE = 1; // Expected to be ranged descriptor
constexpr int HARDENED = 2; // Derivation needs access to private keys
constexpr int UNSOLVABLE = 4; // This descriptor is not expected to be solvable
constexpr int SIGNABLE = 8; // We can sign with this descriptor (this is not true when actual BIP32 derivation is used, as that's not integrated in our signing code)
+constexpr int DERIVE_HARDENED = 16; // The final derivation is hardened, i.e. ends with *' or *h
/** Compare two descriptors. If only one of them has a checksum, the checksum is ignored. */
bool EqualDescriptor(std::string a, std::string b)
@@ -62,7 +63,7 @@ std::string UseHInsteadOfApostrophe(const std::string& desc)
const std::set<std::vector<uint32_t>> ONLY_EMPTY{{}};
-void DoCheck(const std::string& prv, const std::string& pub, int flags, const std::vector<std::vector<std::string>>& scripts, const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY,
+void DoCheck(const std::string& prv, const std::string& pub, int flags, const std::vector<std::vector<std::string>>& scripts, const Optional<OutputType>& type, const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY,
bool replace_apostrophe_with_h_in_prv=false, bool replace_apostrophe_with_h_in_pub=false)
{
FlatSigningProvider keys_priv, keys_pub;
@@ -86,6 +87,10 @@ void DoCheck(const std::string& prv, const std::string& pub, int flags, const st
BOOST_CHECK(parse_priv);
BOOST_CHECK(parse_pub);
+ // Check that the correct OutputType is inferred
+ BOOST_CHECK(parse_priv->GetOutputType() == type);
+ BOOST_CHECK(parse_pub->GetOutputType() == type);
+
// Check private keys are extracted from the private version but not the public one.
BOOST_CHECK(keys_priv.keys.size());
BOOST_CHECK(!keys_pub.keys.size());
@@ -131,19 +136,82 @@ void DoCheck(const std::string& prv, const std::string& pub, int flags, const st
// Evaluate the descriptor selected by `t` in poisition `i`.
FlatSigningProvider script_provider, script_provider_cached;
std::vector<CScript> spks, spks_cached;
- std::vector<unsigned char> cache;
- BOOST_CHECK((t ? parse_priv : parse_pub)->Expand(i, key_provider, spks, script_provider, &cache));
+ DescriptorCache desc_cache;
+ BOOST_CHECK((t ? parse_priv : parse_pub)->Expand(i, key_provider, spks, script_provider, &desc_cache));
// Compare the output with the expected result.
BOOST_CHECK_EQUAL(spks.size(), ref.size());
// Try to expand again using cached data, and compare.
- BOOST_CHECK(parse_pub->ExpandFromCache(i, cache, spks_cached, script_provider_cached));
+ BOOST_CHECK(parse_pub->ExpandFromCache(i, desc_cache, spks_cached, script_provider_cached));
BOOST_CHECK(spks == spks_cached);
BOOST_CHECK(script_provider.pubkeys == script_provider_cached.pubkeys);
BOOST_CHECK(script_provider.scripts == script_provider_cached.scripts);
BOOST_CHECK(script_provider.origins == script_provider_cached.origins);
+ // Check whether keys are in the cache
+ const auto& der_xpub_cache = desc_cache.GetCachedDerivedExtPubKeys();
+ const auto& parent_xpub_cache = desc_cache.GetCachedParentExtPubKeys();
+ if ((flags & RANGE) && !(flags & DERIVE_HARDENED)) {
+ // For ranged, unhardened derivation, None of the keys in origins should appear in the cache but the cache should have parent keys
+ // But we can derive one level from each of those parent keys and find them all
+ BOOST_CHECK(der_xpub_cache.empty());
+ BOOST_CHECK(parent_xpub_cache.size() > 0);
+ std::set<CPubKey> pubkeys;
+ for (const auto& xpub_pair : parent_xpub_cache) {
+ const CExtPubKey& xpub = xpub_pair.second;
+ CExtPubKey der;
+ xpub.Derive(der, i);
+ pubkeys.insert(der.pubkey);
+ }
+ for (const auto& origin_pair : script_provider_cached.origins) {
+ const CPubKey& pk = origin_pair.second.first;
+ BOOST_CHECK(pubkeys.count(pk) > 0);
+ }
+ } else if (pub1.find("xpub") != std::string::npos) {
+ // For ranged, hardened derivation, or not ranged, but has an xpub, all of the keys should appear in the cache
+ BOOST_CHECK(der_xpub_cache.size() + parent_xpub_cache.size() == script_provider_cached.origins.size());
+ // Get all of the derived pubkeys
+ std::set<CPubKey> pubkeys;
+ for (const auto& xpub_map_pair : der_xpub_cache) {
+ for (const auto& xpub_pair : xpub_map_pair.second) {
+ const CExtPubKey& xpub = xpub_pair.second;
+ pubkeys.insert(xpub.pubkey);
+ }
+ }
+ // Derive one level from all of the parents
+ for (const auto& xpub_pair : parent_xpub_cache) {
+ const CExtPubKey& xpub = xpub_pair.second;
+ pubkeys.insert(xpub.pubkey);
+ CExtPubKey der;
+ xpub.Derive(der, i);
+ pubkeys.insert(der.pubkey);
+ }
+ for (const auto& origin_pair : script_provider_cached.origins) {
+ const CPubKey& pk = origin_pair.second.first;
+ BOOST_CHECK(pubkeys.count(pk) > 0);
+ }
+ } else {
+ // No xpub, nothing should be cached
+ BOOST_CHECK(der_xpub_cache.empty());
+ BOOST_CHECK(parent_xpub_cache.empty());
+ }
+
+ // Make sure we can expand using cached xpubs for unhardened derivation
+ if (!(flags & DERIVE_HARDENED)) {
+ // Evaluate the descriptor at i + 1
+ FlatSigningProvider script_provider1, script_provider_cached1;
+ std::vector<CScript> spks1, spk1_from_cache;
+ BOOST_CHECK((t ? parse_priv : parse_pub)->Expand(i + 1, key_provider, spks1, script_provider1, nullptr));
+
+ // Try again but use the cache from expanding i. That cache won't have the pubkeys for i + 1, but will have the parent xpub for derivation.
+ BOOST_CHECK(parse_pub->ExpandFromCache(i + 1, desc_cache, spk1_from_cache, script_provider_cached1));
+ BOOST_CHECK(spks1 == spk1_from_cache);
+ BOOST_CHECK(script_provider1.pubkeys == script_provider_cached1.pubkeys);
+ BOOST_CHECK(script_provider1.scripts == script_provider_cached1.scripts);
+ BOOST_CHECK(script_provider1.origins == script_provider_cached1.origins);
+ }
+
// For each of the produced scripts, verify solvability, and when possible, try to sign a transaction spending it.
for (size_t n = 0; n < spks.size(); ++n) {
BOOST_CHECK_EQUAL(ref[n], HexStr(spks[n].begin(), spks[n].end()));
@@ -181,29 +249,29 @@ void DoCheck(const std::string& prv, const std::string& pub, int flags, const st
BOOST_CHECK_MESSAGE(left_paths.empty(), "Not all expected key paths found: " + prv);
}
-void Check(const std::string& prv, const std::string& pub, int flags, const std::vector<std::vector<std::string>>& scripts, const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY)
+void Check(const std::string& prv, const std::string& pub, int flags, const std::vector<std::vector<std::string>>& scripts, const Optional<OutputType>& type, const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY)
{
bool found_apostrophes_in_prv = false;
bool found_apostrophes_in_pub = false;
// Do not replace apostrophes with 'h' in prv and pub
- DoCheck(prv, pub, flags, scripts, paths);
+ DoCheck(prv, pub, flags, scripts, type, paths);
// Replace apostrophes with 'h' in prv but not in pub, if apostrophes are found in prv
if (prv.find('\'') != std::string::npos) {
found_apostrophes_in_prv = true;
- DoCheck(prv, pub, flags, scripts, paths, /* replace_apostrophe_with_h_in_prv = */true, /*replace_apostrophe_with_h_in_pub = */false);
+ DoCheck(prv, pub, flags, scripts, type, paths, /* replace_apostrophe_with_h_in_prv = */true, /*replace_apostrophe_with_h_in_pub = */false);
}
// Replace apostrophes with 'h' in pub but not in prv, if apostrophes are found in pub
if (pub.find('\'') != std::string::npos) {
found_apostrophes_in_pub = true;
- DoCheck(prv, pub, flags, scripts, paths, /* replace_apostrophe_with_h_in_prv = */false, /*replace_apostrophe_with_h_in_pub = */true);
+ DoCheck(prv, pub, flags, scripts, type, paths, /* replace_apostrophe_with_h_in_prv = */false, /*replace_apostrophe_with_h_in_pub = */true);
}
// Replace apostrophes with 'h' both in prv and in pub, if apostrophes are found in both
if (found_apostrophes_in_prv && found_apostrophes_in_pub) {
- DoCheck(prv, pub, flags, scripts, paths, /* replace_apostrophe_with_h_in_prv = */true, /*replace_apostrophe_with_h_in_pub = */true);
+ DoCheck(prv, pub, flags, scripts, type, paths, /* replace_apostrophe_with_h_in_prv = */true, /*replace_apostrophe_with_h_in_pub = */true);
}
}
@@ -214,50 +282,50 @@ BOOST_FIXTURE_TEST_SUITE(descriptor_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(descriptor_test)
{
// Basic single-key compressed
- Check("combo(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "combo(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"2103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bdac","76a9149a1c78a507689f6f54b847ad1cef1e614ee23f1e88ac","00149a1c78a507689f6f54b847ad1cef1e614ee23f1e","a91484ab21b1b2fd065d4504ff693d832434b6108d7b87"}});
- Check("pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"2103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bdac"}});
- Check("pkh([deadbeef/1/2'/3/4']L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pkh([deadbeef/1/2'/3/4']03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"76a9149a1c78a507689f6f54b847ad1cef1e614ee23f1e88ac"}}, {{1,0x80000002UL,3,0x80000004UL}});
- Check("wpkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"00149a1c78a507689f6f54b847ad1cef1e614ee23f1e"}});
- Check("sh(wpkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a91484ab21b1b2fd065d4504ff693d832434b6108d7b87"}});
+ Check("combo(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "combo(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"2103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bdac","76a9149a1c78a507689f6f54b847ad1cef1e614ee23f1e88ac","00149a1c78a507689f6f54b847ad1cef1e614ee23f1e","a91484ab21b1b2fd065d4504ff693d832434b6108d7b87"}}, nullopt);
+ Check("pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"2103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bdac"}}, nullopt);
+ Check("pkh([deadbeef/1/2'/3/4']L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pkh([deadbeef/1/2'/3/4']03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"76a9149a1c78a507689f6f54b847ad1cef1e614ee23f1e88ac"}}, OutputType::LEGACY, {{1,0x80000002UL,3,0x80000004UL}});
+ Check("wpkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"00149a1c78a507689f6f54b847ad1cef1e614ee23f1e"}}, OutputType::BECH32);
+ Check("sh(wpkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a91484ab21b1b2fd065d4504ff693d832434b6108d7b87"}}, OutputType::P2SH_SEGWIT);
CheckUnparsable("sh(wpkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY2))", "sh(wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5))", "Pubkey '03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5' is invalid"); // Invalid pubkey
CheckUnparsable("pkh(deadbeef/1/2'/3/4']L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pkh(deadbeef/1/2'/3/4']03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", "Key origin start '[ character expected but not found, got 'd' instead"); // Missing start bracket in key origin
CheckUnparsable("pkh([deadbeef]/1/2'/3/4']L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pkh([deadbeef]/1/2'/3/4']03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", "Multiple ']' characters found for a single pubkey"); // Multiple end brackets in key origin
// Basic single-key uncompressed
- Check("combo(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "combo(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235ac","76a914b5bd079c4d57cc7fc28ecf8213a6b791625b818388ac"}});
- Check("pk(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "pk(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235ac"}});
- Check("pkh(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "pkh(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"76a914b5bd079c4d57cc7fc28ecf8213a6b791625b818388ac"}});
+ Check("combo(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "combo(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235ac","76a914b5bd079c4d57cc7fc28ecf8213a6b791625b818388ac"}}, nullopt);
+ Check("pk(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "pk(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235ac"}}, nullopt);
+ Check("pkh(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "pkh(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"76a914b5bd079c4d57cc7fc28ecf8213a6b791625b818388ac"}}, OutputType::LEGACY);
CheckUnparsable("wpkh(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "wpkh(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", "Uncompressed keys are not allowed"); // No uncompressed keys in witness
CheckUnparsable("wsh(pk(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss))", "wsh(pk(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235))", "Uncompressed keys are not allowed"); // No uncompressed keys in witness
CheckUnparsable("sh(wpkh(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss))", "sh(wpkh(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235))", "Uncompressed keys are not allowed"); // No uncompressed keys in witness
// Some unconventional single-key constructions
- Check("sh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a9141857af51a5e516552b3086430fd8ce55f7c1a52487"}});
- Check("sh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a9141a31ad23bf49c247dd531a623c2ef57da3c400c587"}});
- Check("wsh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "wsh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"00202e271faa2325c199d25d22e1ead982e45b64eeb4f31e73dbdf41bd4b5fec23fa"}});
- Check("wsh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "wsh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"0020338e023079b91c58571b20e602d7805fb808c22473cbc391a41b1bd3a192e75b"}});
- Check("sh(wsh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)))", "sh(wsh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))", SIGNABLE, {{"a91472d0c5a3bfad8c3e7bd5303a72b94240e80b6f1787"}});
- Check("sh(wsh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)))", "sh(wsh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))", SIGNABLE, {{"a914b61b92e2ca21bac1e72a3ab859a742982bea960a87"}});
+ Check("sh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a9141857af51a5e516552b3086430fd8ce55f7c1a52487"}}, OutputType::LEGACY);
+ Check("sh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a9141a31ad23bf49c247dd531a623c2ef57da3c400c587"}}, OutputType::LEGACY);
+ Check("wsh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "wsh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"00202e271faa2325c199d25d22e1ead982e45b64eeb4f31e73dbdf41bd4b5fec23fa"}}, OutputType::BECH32);
+ Check("wsh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "wsh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"0020338e023079b91c58571b20e602d7805fb808c22473cbc391a41b1bd3a192e75b"}}, OutputType::BECH32);
+ Check("sh(wsh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)))", "sh(wsh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))", SIGNABLE, {{"a91472d0c5a3bfad8c3e7bd5303a72b94240e80b6f1787"}}, OutputType::P2SH_SEGWIT);
+ Check("sh(wsh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)))", "sh(wsh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))", SIGNABLE, {{"a914b61b92e2ca21bac1e72a3ab859a742982bea960a87"}}, OutputType::P2SH_SEGWIT);
// Versions with BIP32 derivations
- Check("combo([01234567]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)", "combo([01234567]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)", SIGNABLE, {{"2102d2b36900396c9282fa14628566582f206a5dd0bcc8d5e892611806cafb0301f0ac","76a91431a507b815593dfc51ffc7245ae7e5aee304246e88ac","001431a507b815593dfc51ffc7245ae7e5aee304246e","a9142aafb926eb247cb18240a7f4c07983ad1f37922687"}});
- Check("pk(xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0)", "pk(xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0)", DEFAULT, {{"210379e45b3cf75f9c5f9befd8e9506fb962f6a9d185ac87001ec44a8d3df8d4a9e3ac"}}, {{0}});
- Check("pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0)", "pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0)", HARDENED, {{"76a914ebdc90806a9c4356c1c88e42216611e1cb4c1c1788ac"}}, {{0xFFFFFFFFUL,0}});
- Check("wpkh([ffffffff/13']xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*)", "wpkh([ffffffff/13']xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*)", RANGE, {{"0014326b2249e3a25d5dc60935f044ee835d090ba859"},{"0014af0bd98abc2f2cae66e36896a39ffe2d32984fb7"},{"00141fa798efd1cbf95cebf912c031b8a4a6e9fb9f27"}}, {{0x8000000DUL, 1, 2, 0}, {0x8000000DUL, 1, 2, 1}, {0x8000000DUL, 1, 2, 2}});
- Check("sh(wpkh(xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "sh(wpkh(xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", RANGE | HARDENED, {{"a9149a4d9901d6af519b2a23d4a2f51650fcba87ce7b87"},{"a914bed59fc0024fae941d6e20a3b44a109ae740129287"},{"a9148483aa1116eb9c05c482a72bada4b1db24af654387"}}, {{10, 20, 30, 40, 0x80000000UL}, {10, 20, 30, 40, 0x80000001UL}, {10, 20, 30, 40, 0x80000002UL}});
- Check("combo(xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334/*)", "combo(xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV/*)", RANGE, {{"2102df12b7035bdac8e3bab862a3a83d06ea6b17b6753d52edecba9be46f5d09e076ac","76a914f90e3178ca25f2c808dc76624032d352fdbdfaf288ac","0014f90e3178ca25f2c808dc76624032d352fdbdfaf2","a91408f3ea8c68d4a7585bf9e8bda226723f70e445f087"},{"21032869a233c9adff9a994e4966e5b821fd5bac066da6c3112488dc52383b4a98ecac","76a914a8409d1b6dfb1ed2a3e8aa5e0ef2ff26b15b75b788ac","0014a8409d1b6dfb1ed2a3e8aa5e0ef2ff26b15b75b7","a91473e39884cb71ae4e5ac9739e9225026c99763e6687"}}, {{0}, {1}});
+ Check("combo([01234567]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)", "combo([01234567]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)", SIGNABLE, {{"2102d2b36900396c9282fa14628566582f206a5dd0bcc8d5e892611806cafb0301f0ac","76a91431a507b815593dfc51ffc7245ae7e5aee304246e88ac","001431a507b815593dfc51ffc7245ae7e5aee304246e","a9142aafb926eb247cb18240a7f4c07983ad1f37922687"}}, nullopt);
+ Check("pk(xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0)", "pk(xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0)", DEFAULT, {{"210379e45b3cf75f9c5f9befd8e9506fb962f6a9d185ac87001ec44a8d3df8d4a9e3ac"}}, nullopt, {{0}});
+ Check("pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0)", "pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0)", HARDENED, {{"76a914ebdc90806a9c4356c1c88e42216611e1cb4c1c1788ac"}}, OutputType::LEGACY, {{0xFFFFFFFFUL,0}});
+ Check("wpkh([ffffffff/13']xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*)", "wpkh([ffffffff/13']xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*)", RANGE, {{"0014326b2249e3a25d5dc60935f044ee835d090ba859"},{"0014af0bd98abc2f2cae66e36896a39ffe2d32984fb7"},{"00141fa798efd1cbf95cebf912c031b8a4a6e9fb9f27"}}, OutputType::BECH32, {{0x8000000DUL, 1, 2, 0}, {0x8000000DUL, 1, 2, 1}, {0x8000000DUL, 1, 2, 2}});
+ Check("sh(wpkh(xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "sh(wpkh(xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", RANGE | HARDENED | DERIVE_HARDENED, {{"a9149a4d9901d6af519b2a23d4a2f51650fcba87ce7b87"},{"a914bed59fc0024fae941d6e20a3b44a109ae740129287"},{"a9148483aa1116eb9c05c482a72bada4b1db24af654387"}}, OutputType::P2SH_SEGWIT, {{10, 20, 30, 40, 0x80000000UL}, {10, 20, 30, 40, 0x80000001UL}, {10, 20, 30, 40, 0x80000002UL}});
+ Check("combo(xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334/*)", "combo(xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV/*)", RANGE, {{"2102df12b7035bdac8e3bab862a3a83d06ea6b17b6753d52edecba9be46f5d09e076ac","76a914f90e3178ca25f2c808dc76624032d352fdbdfaf288ac","0014f90e3178ca25f2c808dc76624032d352fdbdfaf2","a91408f3ea8c68d4a7585bf9e8bda226723f70e445f087"},{"21032869a233c9adff9a994e4966e5b821fd5bac066da6c3112488dc52383b4a98ecac","76a914a8409d1b6dfb1ed2a3e8aa5e0ef2ff26b15b75b788ac","0014a8409d1b6dfb1ed2a3e8aa5e0ef2ff26b15b75b7","a91473e39884cb71ae4e5ac9739e9225026c99763e6687"}}, nullopt, {{0}, {1}});
CheckUnparsable("combo([012345678]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)", "combo([012345678]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)", "Fingerprint is not 4 bytes (9 characters instead of 8 characters)"); // Too long key fingerprint
CheckUnparsable("pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483648)", "pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483648)", "Key path value 2147483648 is out of range"); // BIP 32 path element overflow
CheckUnparsable("pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/1aa)", "pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/1aa)", "Key path value '1aa' is not a valid uint32"); // Path is not valid uint
// Multisig constructions
- Check("multi(1,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "multi(1,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae"}});
- Check("sortedmulti(1,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "sortedmulti(1,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae"}});
- Check("sortedmulti(1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "sortedmulti(1,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae"}});
- Check("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))", DEFAULT, {{"a91445a9a622a8b0a1269944be477640eedc447bbd8487"}}, {{0x8000006FUL,222},{0}});
- Check("sortedmulti(2,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/*,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0/0/*)", "sortedmulti(2,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/*,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0/0/*)", RANGE, {{"5221025d5fc65ebb8d44a5274b53bac21ff8307fec2334a32df05553459f8b1f7fe1b62102fbd47cc8034098f0e6a94c6aeee8528abf0a2153a5d8e46d325b7284c046784652ae"}, {"52210264fd4d1f5dea8ded94c61e9641309349b62f27fbffe807291f664e286bfbe6472103f4ece6dfccfa37b211eb3d0af4d0c61dba9ef698622dc17eecdf764beeb005a652ae"}, {"5221022ccabda84c30bad578b13c89eb3b9544ce149787e5b538175b1d1ba259cbb83321024d902e1a2fc7a8755ab5b694c575fce742c48d9ff192e63df5193e4c7afe1f9c52ae"}}, {{0}, {1}, {2}, {0, 0, 0}, {0, 0, 1}, {0, 0, 2}});
- Check("wsh(multi(2,xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0,xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*,xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "wsh(multi(2,xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0,xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*,xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", HARDENED | RANGE, {{"0020b92623201f3bb7c3771d45b2ad1d0351ea8fbf8cfe0a0e570264e1075fa1948f"},{"002036a08bbe4923af41cf4316817c93b8d37e2f635dd25cfff06bd50df6ae7ea203"},{"0020a96e7ab4607ca6b261bfe3245ffda9c746b28d3f59e83d34820ec0e2b36c139c"}}, {{0xFFFFFFFFUL,0}, {1,2,0}, {1,2,1}, {1,2,2}, {10, 20, 30, 40, 0x80000000UL}, {10, 20, 30, 40, 0x80000001UL}, {10, 20, 30, 40, 0x80000002UL}});
- Check("sh(wsh(multi(16,KzoAz5CanayRKex3fSLQ2BwJpN7U52gZvxMyk78nDMHuqrUxuSJy,KwGNz6YCCQtYvFzMtrC6D3tKTKdBBboMrLTsjr2NYVBwapCkn7Mr,KxogYhiNfwxuswvXV66eFyKcCpm7dZ7TqHVqujHAVUjJxyivxQ9X,L2BUNduTSyZwZjwNHynQTF14mv2uz2NRq5n5sYWTb4FkkmqgEE9f,L1okJGHGn1kFjdXHKxXjwVVtmCMR2JA5QsbKCSpSb7ReQjezKeoD,KxDCNSST75HFPaW5QKpzHtAyaCQC7p9Vo3FYfi2u4dXD1vgMiboK,L5edQjFtnkcf5UWURn6UuuoFrabgDQUHdheKCziwN42aLwS3KizU,KzF8UWFcEC7BYTq8Go1xVimMkDmyNYVmXV5PV7RuDicvAocoPB8i,L3nHUboKG2w4VSJ5jYZ5CBM97oeK6YuKvfZxrefdShECcjEYKMWZ,KyjHo36dWkYhimKmVVmQTq3gERv3pnqA4xFCpvUgbGDJad7eS8WE,KwsfyHKRUTZPQtysN7M3tZ4GXTnuov5XRgjdF2XCG8faAPmFruRF,KzCUbGhN9LJhdeFfL9zQgTJMjqxdBKEekRGZX24hXdgCNCijkkap,KzgpMBwwsDLwkaC5UrmBgCYaBD2WgZ7PBoGYXR8KT7gCA9UTN5a3,KyBXTPy4T7YG4q9tcAM3LkvfRpD1ybHMvcJ2ehaWXaSqeGUxEdkP,KzJDe9iwJRPtKP2F2AoN6zBgzS7uiuAwhWCfGdNeYJ3PC1HNJ8M8,L1xbHrxynrqLKkoYc4qtoQPx6uy5qYXR5ZDYVYBSRmCV5piU3JG9)))","sh(wsh(multi(16,03669b8afcec803a0d323e9a17f3ea8e68e8abe5a278020a929adbec52421adbd0,0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600,0362a74e399c39ed5593852a30147f2959b56bb827dfa3e60e464b02ccf87dc5e8,0261345b53de74a4d721ef877c255429961b7e43714171ac06168d7e08c542a8b8,02da72e8b46901a65d4374fe6315538d8f368557dda3a1dcf9ea903f3afe7314c8,0318c82dd0b53fd3a932d16e0ba9e278fcc937c582d5781be626ff16e201f72286,0297ccef1ef99f9d73dec9ad37476ddb232f1238aff877af19e72ba04493361009,02e502cfd5c3f972fe9a3e2a18827820638f96b6f347e54d63deb839011fd5765d,03e687710f0e3ebe81c1037074da939d409c0025f17eb86adb9427d28f0f7ae0e9,02c04d3a5274952acdbc76987f3184b346a483d43be40874624b29e3692c1df5af,02ed06e0f418b5b43a7ec01d1d7d27290fa15f75771cb69b642a51471c29c84acd,036d46073cbb9ffee90473f3da429abc8de7f8751199da44485682a989a4bebb24,02f5d1ff7c9029a80a4e36b9a5497027ef7f3e73384a4a94fbfe7c4e9164eec8bc,02e41deffd1b7cce11cde209a781adcffdabd1b91c0ba0375857a2bfd9302419f3,02d76625f7956a7fc505ab02556c23ee72d832f1bac391bcd2d3abce5710a13d06,0399eb0a5487515802dc14544cf10b3666623762fbed2ec38a3975716e2c29c232)))", SIGNABLE, {{"a9147fc63e13dc25e8a95a3cee3d9a714ac3afd96f1e87"}});
+ Check("multi(1,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "multi(1,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae"}}, nullopt);
+ Check("sortedmulti(1,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "sortedmulti(1,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae"}}, nullopt);
+ Check("sortedmulti(1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "sortedmulti(1,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae"}}, nullopt);
+ Check("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))", DEFAULT, {{"a91445a9a622a8b0a1269944be477640eedc447bbd8487"}}, OutputType::LEGACY, {{0x8000006FUL,222},{0}});
+ Check("sortedmulti(2,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/*,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0/0/*)", "sortedmulti(2,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/*,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0/0/*)", RANGE, {{"5221025d5fc65ebb8d44a5274b53bac21ff8307fec2334a32df05553459f8b1f7fe1b62102fbd47cc8034098f0e6a94c6aeee8528abf0a2153a5d8e46d325b7284c046784652ae"}, {"52210264fd4d1f5dea8ded94c61e9641309349b62f27fbffe807291f664e286bfbe6472103f4ece6dfccfa37b211eb3d0af4d0c61dba9ef698622dc17eecdf764beeb005a652ae"}, {"5221022ccabda84c30bad578b13c89eb3b9544ce149787e5b538175b1d1ba259cbb83321024d902e1a2fc7a8755ab5b694c575fce742c48d9ff192e63df5193e4c7afe1f9c52ae"}}, nullopt, {{0}, {1}, {2}, {0, 0, 0}, {0, 0, 1}, {0, 0, 2}});
+ Check("wsh(multi(2,xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0,xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*,xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "wsh(multi(2,xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0,xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*,xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", HARDENED | RANGE | DERIVE_HARDENED, {{"0020b92623201f3bb7c3771d45b2ad1d0351ea8fbf8cfe0a0e570264e1075fa1948f"},{"002036a08bbe4923af41cf4316817c93b8d37e2f635dd25cfff06bd50df6ae7ea203"},{"0020a96e7ab4607ca6b261bfe3245ffda9c746b28d3f59e83d34820ec0e2b36c139c"}}, OutputType::BECH32, {{0xFFFFFFFFUL,0}, {1,2,0}, {1,2,1}, {1,2,2}, {10, 20, 30, 40, 0x80000000UL}, {10, 20, 30, 40, 0x80000001UL}, {10, 20, 30, 40, 0x80000002UL}});
+ Check("sh(wsh(multi(16,KzoAz5CanayRKex3fSLQ2BwJpN7U52gZvxMyk78nDMHuqrUxuSJy,KwGNz6YCCQtYvFzMtrC6D3tKTKdBBboMrLTsjr2NYVBwapCkn7Mr,KxogYhiNfwxuswvXV66eFyKcCpm7dZ7TqHVqujHAVUjJxyivxQ9X,L2BUNduTSyZwZjwNHynQTF14mv2uz2NRq5n5sYWTb4FkkmqgEE9f,L1okJGHGn1kFjdXHKxXjwVVtmCMR2JA5QsbKCSpSb7ReQjezKeoD,KxDCNSST75HFPaW5QKpzHtAyaCQC7p9Vo3FYfi2u4dXD1vgMiboK,L5edQjFtnkcf5UWURn6UuuoFrabgDQUHdheKCziwN42aLwS3KizU,KzF8UWFcEC7BYTq8Go1xVimMkDmyNYVmXV5PV7RuDicvAocoPB8i,L3nHUboKG2w4VSJ5jYZ5CBM97oeK6YuKvfZxrefdShECcjEYKMWZ,KyjHo36dWkYhimKmVVmQTq3gERv3pnqA4xFCpvUgbGDJad7eS8WE,KwsfyHKRUTZPQtysN7M3tZ4GXTnuov5XRgjdF2XCG8faAPmFruRF,KzCUbGhN9LJhdeFfL9zQgTJMjqxdBKEekRGZX24hXdgCNCijkkap,KzgpMBwwsDLwkaC5UrmBgCYaBD2WgZ7PBoGYXR8KT7gCA9UTN5a3,KyBXTPy4T7YG4q9tcAM3LkvfRpD1ybHMvcJ2ehaWXaSqeGUxEdkP,KzJDe9iwJRPtKP2F2AoN6zBgzS7uiuAwhWCfGdNeYJ3PC1HNJ8M8,L1xbHrxynrqLKkoYc4qtoQPx6uy5qYXR5ZDYVYBSRmCV5piU3JG9)))","sh(wsh(multi(16,03669b8afcec803a0d323e9a17f3ea8e68e8abe5a278020a929adbec52421adbd0,0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600,0362a74e399c39ed5593852a30147f2959b56bb827dfa3e60e464b02ccf87dc5e8,0261345b53de74a4d721ef877c255429961b7e43714171ac06168d7e08c542a8b8,02da72e8b46901a65d4374fe6315538d8f368557dda3a1dcf9ea903f3afe7314c8,0318c82dd0b53fd3a932d16e0ba9e278fcc937c582d5781be626ff16e201f72286,0297ccef1ef99f9d73dec9ad37476ddb232f1238aff877af19e72ba04493361009,02e502cfd5c3f972fe9a3e2a18827820638f96b6f347e54d63deb839011fd5765d,03e687710f0e3ebe81c1037074da939d409c0025f17eb86adb9427d28f0f7ae0e9,02c04d3a5274952acdbc76987f3184b346a483d43be40874624b29e3692c1df5af,02ed06e0f418b5b43a7ec01d1d7d27290fa15f75771cb69b642a51471c29c84acd,036d46073cbb9ffee90473f3da429abc8de7f8751199da44485682a989a4bebb24,02f5d1ff7c9029a80a4e36b9a5497027ef7f3e73384a4a94fbfe7c4e9164eec8bc,02e41deffd1b7cce11cde209a781adcffdabd1b91c0ba0375857a2bfd9302419f3,02d76625f7956a7fc505ab02556c23ee72d832f1bac391bcd2d3abce5710a13d06,0399eb0a5487515802dc14544cf10b3666623762fbed2ec38a3975716e2c29c232)))", SIGNABLE, {{"a9147fc63e13dc25e8a95a3cee3d9a714ac3afd96f1e87"}}, OutputType::P2SH_SEGWIT);
CheckUnparsable("sh(multi(16,KzoAz5CanayRKex3fSLQ2BwJpN7U52gZvxMyk78nDMHuqrUxuSJy,KwGNz6YCCQtYvFzMtrC6D3tKTKdBBboMrLTsjr2NYVBwapCkn7Mr,KxogYhiNfwxuswvXV66eFyKcCpm7dZ7TqHVqujHAVUjJxyivxQ9X,L2BUNduTSyZwZjwNHynQTF14mv2uz2NRq5n5sYWTb4FkkmqgEE9f,L1okJGHGn1kFjdXHKxXjwVVtmCMR2JA5QsbKCSpSb7ReQjezKeoD,KxDCNSST75HFPaW5QKpzHtAyaCQC7p9Vo3FYfi2u4dXD1vgMiboK,L5edQjFtnkcf5UWURn6UuuoFrabgDQUHdheKCziwN42aLwS3KizU,KzF8UWFcEC7BYTq8Go1xVimMkDmyNYVmXV5PV7RuDicvAocoPB8i,L3nHUboKG2w4VSJ5jYZ5CBM97oeK6YuKvfZxrefdShECcjEYKMWZ,KyjHo36dWkYhimKmVVmQTq3gERv3pnqA4xFCpvUgbGDJad7eS8WE,KwsfyHKRUTZPQtysN7M3tZ4GXTnuov5XRgjdF2XCG8faAPmFruRF,KzCUbGhN9LJhdeFfL9zQgTJMjqxdBKEekRGZX24hXdgCNCijkkap,KzgpMBwwsDLwkaC5UrmBgCYaBD2WgZ7PBoGYXR8KT7gCA9UTN5a3,KyBXTPy4T7YG4q9tcAM3LkvfRpD1ybHMvcJ2ehaWXaSqeGUxEdkP,KzJDe9iwJRPtKP2F2AoN6zBgzS7uiuAwhWCfGdNeYJ3PC1HNJ8M8,L1xbHrxynrqLKkoYc4qtoQPx6uy5qYXR5ZDYVYBSRmCV5piU3JG9))","sh(multi(16,03669b8afcec803a0d323e9a17f3ea8e68e8abe5a278020a929adbec52421adbd0,0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600,0362a74e399c39ed5593852a30147f2959b56bb827dfa3e60e464b02ccf87dc5e8,0261345b53de74a4d721ef877c255429961b7e43714171ac06168d7e08c542a8b8,02da72e8b46901a65d4374fe6315538d8f368557dda3a1dcf9ea903f3afe7314c8,0318c82dd0b53fd3a932d16e0ba9e278fcc937c582d5781be626ff16e201f72286,0297ccef1ef99f9d73dec9ad37476ddb232f1238aff877af19e72ba04493361009,02e502cfd5c3f972fe9a3e2a18827820638f96b6f347e54d63deb839011fd5765d,03e687710f0e3ebe81c1037074da939d409c0025f17eb86adb9427d28f0f7ae0e9,02c04d3a5274952acdbc76987f3184b346a483d43be40874624b29e3692c1df5af,02ed06e0f418b5b43a7ec01d1d7d27290fa15f75771cb69b642a51471c29c84acd,036d46073cbb9ffee90473f3da429abc8de7f8751199da44485682a989a4bebb24,02f5d1ff7c9029a80a4e36b9a5497027ef7f3e73384a4a94fbfe7c4e9164eec8bc,02e41deffd1b7cce11cde209a781adcffdabd1b91c0ba0375857a2bfd9302419f3,02d76625f7956a7fc505ab02556c23ee72d832f1bac391bcd2d3abce5710a13d06,0399eb0a5487515802dc14544cf10b3666623762fbed2ec38a3975716e2c29c232))", "P2SH script is too large, 547 bytes is larger than 520 bytes"); // P2SH does not fit 16 compressed pubkeys in a redeemscript
CheckUnparsable("wsh(multi(2,[aaaaaaaa][aaaaaaaa]xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0,xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*,xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "wsh(multi(2,[aaaaaaaa][aaaaaaaa]xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0,xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*,xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", "Multiple ']' characters found for a single pubkey"); // Double key origin descriptor
CheckUnparsable("wsh(multi(2,[aaaagaaa]xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0,xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*,xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "wsh(multi(2,[aaagaaaa]xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0,xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*,xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", "Fingerprint 'aaagaaaa' is not hex"); // Non hex fingerprint
@@ -280,8 +348,8 @@ BOOST_AUTO_TEST_CASE(descriptor_test)
CheckUnparsable("wsh(wsh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)))", "wsh(wsh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))", "Cannot have wsh within wsh"); // Cannot embed P2WSH inside P2WSH
// Checksums
- Check("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))#ggrsrxfy", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))#tjg09x5t", DEFAULT, {{"a91445a9a622a8b0a1269944be477640eedc447bbd8487"}}, {{0x8000006FUL,222},{0}});
- Check("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))", DEFAULT, {{"a91445a9a622a8b0a1269944be477640eedc447bbd8487"}}, {{0x8000006FUL,222},{0}});
+ Check("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))#ggrsrxfy", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))#tjg09x5t", DEFAULT, {{"a91445a9a622a8b0a1269944be477640eedc447bbd8487"}}, OutputType::LEGACY, {{0x8000006FUL,222},{0}});
+ Check("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))", DEFAULT, {{"a91445a9a622a8b0a1269944be477640eedc447bbd8487"}}, OutputType::LEGACY, {{0x8000006FUL,222},{0}});
CheckUnparsable("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))#", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))#", "Expected 8 character checksum, not 0 characters"); // Empty checksum
CheckUnparsable("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))#ggrsrxfyq", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))#tjg09x5tq", "Expected 8 character checksum, not 9 characters"); // Too long checksum
CheckUnparsable("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))#ggrsrxf", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))#tjg09x5", "Expected 8 character checksum, not 7 characters"); // Too short checksum
diff --git a/src/test/fuzz/addrdb.cpp b/src/test/fuzz/addrdb.cpp
new file mode 100644
index 0000000000..f21ff3fac3
--- /dev/null
+++ b/src/test/fuzz/addrdb.cpp
@@ -0,0 +1,43 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <addrdb.h>
+#include <optional.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/util.h>
+
+#include <cassert>
+#include <cstdint>
+#include <string>
+#include <vector>
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+
+ const CBanEntry ban_entry = [&] {
+ switch (fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 3)) {
+ case 0:
+ return CBanEntry{fuzzed_data_provider.ConsumeIntegral<int64_t>()};
+ break;
+ case 1:
+ return CBanEntry{fuzzed_data_provider.ConsumeIntegral<int64_t>(), fuzzed_data_provider.PickValueInArray<BanReason>({
+ BanReason::BanReasonUnknown,
+ BanReason::BanReasonNodeMisbehaving,
+ BanReason::BanReasonManuallyAdded,
+ })};
+ break;
+ case 2: {
+ const Optional<CBanEntry> ban_entry = ConsumeDeserializable<CBanEntry>(fuzzed_data_provider);
+ if (ban_entry) {
+ return *ban_entry;
+ }
+ break;
+ }
+ }
+ return CBanEntry{};
+ }();
+ assert(!ban_entry.banReasonToString().empty());
+}
diff --git a/src/test/fuzz/base_encode_decode.cpp b/src/test/fuzz/base_encode_decode.cpp
index cb0fbdf76f..adad6b3f96 100644
--- a/src/test/fuzz/base_encode_decode.cpp
+++ b/src/test/fuzz/base_encode_decode.cpp
@@ -5,6 +5,7 @@
#include <test/fuzz/fuzz.h>
#include <base58.h>
+#include <psbt.h>
#include <util/string.h>
#include <util/strencodings.h>
@@ -44,4 +45,8 @@ void test_one_input(const std::vector<uint8_t>& buffer)
assert(encoded_string == TrimString(encoded_string));
assert(ToLower(encoded_string) == ToLower(TrimString(random_encoded_string)));
}
+
+ PartiallySignedTransaction psbt;
+ std::string error;
+ (void)DecodeBase64PSBT(psbt, random_encoded_string, error);
}
diff --git a/src/test/fuzz/block.cpp b/src/test/fuzz/block.cpp
index 431248de4a..9d0ad369a2 100644
--- a/src/test/fuzz/block.cpp
+++ b/src/test/fuzz/block.cpp
@@ -19,7 +19,7 @@
void initialize()
{
- const static auto verify_handle = MakeUnique<ECCVerifyHandle>();
+ static const ECCVerifyHandle verify_handle;
SelectParams(CBaseChainParams::REGTEST);
}
@@ -59,5 +59,7 @@ void test_one_input(const std::vector<uint8_t>& buffer)
}
(void)GetBlockWeight(block);
(void)GetWitnessCommitmentIndex(block);
- (void)RecursiveDynamicUsage(block);
+ const size_t raw_memory_size = RecursiveDynamicUsage(block);
+ const size_t raw_memory_size_as_shared_ptr = RecursiveDynamicUsage(std::make_shared<CBlock>(block));
+ assert(raw_memory_size_as_shared_ptr > raw_memory_size);
}
diff --git a/src/test/fuzz/block_header.cpp b/src/test/fuzz/block_header.cpp
new file mode 100644
index 0000000000..92dcccc0e1
--- /dev/null
+++ b/src/test/fuzz/block_header.cpp
@@ -0,0 +1,41 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <optional.h>
+#include <primitives/block.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/util.h>
+#include <uint256.h>
+
+#include <cassert>
+#include <cstdint>
+#include <string>
+#include <vector>
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+ const Optional<CBlockHeader> block_header = ConsumeDeserializable<CBlockHeader>(fuzzed_data_provider);
+ if (!block_header) {
+ return;
+ }
+ {
+ const uint256 hash = block_header->GetHash();
+ static const uint256 u256_max(uint256S("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"));
+ assert(hash != u256_max);
+ assert(block_header->GetBlockTime() == block_header->nTime);
+ assert(block_header->IsNull() == (block_header->nBits == 0));
+ }
+ {
+ CBlockHeader mut_block_header = *block_header;
+ mut_block_header.SetNull();
+ assert(mut_block_header.IsNull());
+ CBlock block{*block_header};
+ assert(block.GetBlockHeader().GetHash() == block_header->GetHash());
+ (void)block.ToString();
+ block.SetNull();
+ assert(block.GetBlockHeader().GetHash() == mut_block_header.GetHash());
+ }
+}
diff --git a/src/test/fuzz/blockfilter.cpp b/src/test/fuzz/blockfilter.cpp
new file mode 100644
index 0000000000..be9320dcbf
--- /dev/null
+++ b/src/test/fuzz/blockfilter.cpp
@@ -0,0 +1,44 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <blockfilter.h>
+#include <optional.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/util.h>
+
+#include <cstdint>
+#include <string>
+#include <vector>
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+ const Optional<BlockFilter> block_filter = ConsumeDeserializable<BlockFilter>(fuzzed_data_provider);
+ if (!block_filter) {
+ return;
+ }
+ {
+ (void)block_filter->ComputeHeader(ConsumeUInt256(fuzzed_data_provider));
+ (void)block_filter->GetBlockHash();
+ (void)block_filter->GetEncodedFilter();
+ (void)block_filter->GetHash();
+ }
+ {
+ const BlockFilterType block_filter_type = block_filter->GetFilterType();
+ (void)BlockFilterTypeName(block_filter_type);
+ }
+ {
+ const GCSFilter gcs_filter = block_filter->GetFilter();
+ (void)gcs_filter.GetN();
+ (void)gcs_filter.GetParams();
+ (void)gcs_filter.GetEncoded();
+ (void)gcs_filter.Match(ConsumeRandomLengthByteVector(fuzzed_data_provider));
+ GCSFilter::ElementSet element_set;
+ while (fuzzed_data_provider.ConsumeBool()) {
+ element_set.insert(ConsumeRandomLengthByteVector(fuzzed_data_provider));
+ gcs_filter.MatchAny(element_set);
+ }
+ }
+}
diff --git a/src/test/fuzz/bloom_filter.cpp b/src/test/fuzz/bloom_filter.cpp
new file mode 100644
index 0000000000..d1112f8e62
--- /dev/null
+++ b/src/test/fuzz/bloom_filter.cpp
@@ -0,0 +1,80 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <bloom.h>
+#include <optional.h>
+#include <primitives/transaction.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/util.h>
+#include <uint256.h>
+
+#include <cassert>
+#include <cstdint>
+#include <string>
+#include <vector>
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+
+ CBloomFilter bloom_filter{
+ fuzzed_data_provider.ConsumeIntegralInRange<unsigned int>(1, 10000000),
+ 1.0 / fuzzed_data_provider.ConsumeIntegralInRange<unsigned int>(1, std::numeric_limits<unsigned int>::max()),
+ fuzzed_data_provider.ConsumeIntegral<unsigned int>(),
+ static_cast<unsigned char>(fuzzed_data_provider.PickValueInArray({BLOOM_UPDATE_NONE, BLOOM_UPDATE_ALL, BLOOM_UPDATE_P2PUBKEY_ONLY, BLOOM_UPDATE_MASK}))};
+ while (fuzzed_data_provider.remaining_bytes() > 0) {
+ switch (fuzzed_data_provider.ConsumeIntegralInRange(0, 6)) {
+ case 0: {
+ const std::vector<unsigned char> b = ConsumeRandomLengthByteVector(fuzzed_data_provider);
+ (void)bloom_filter.contains(b);
+ bloom_filter.insert(b);
+ const bool present = bloom_filter.contains(b);
+ assert(present);
+ break;
+ }
+ case 1: {
+ const Optional<COutPoint> out_point = ConsumeDeserializable<COutPoint>(fuzzed_data_provider);
+ if (!out_point) {
+ break;
+ }
+ (void)bloom_filter.contains(*out_point);
+ bloom_filter.insert(*out_point);
+ const bool present = bloom_filter.contains(*out_point);
+ assert(present);
+ break;
+ }
+ case 2: {
+ const Optional<uint256> u256 = ConsumeDeserializable<uint256>(fuzzed_data_provider);
+ if (!u256) {
+ break;
+ }
+ (void)bloom_filter.contains(*u256);
+ bloom_filter.insert(*u256);
+ const bool present = bloom_filter.contains(*u256);
+ assert(present);
+ break;
+ }
+ case 3:
+ bloom_filter.clear();
+ break;
+ case 4:
+ bloom_filter.reset(fuzzed_data_provider.ConsumeIntegral<unsigned int>());
+ break;
+ case 5: {
+ const Optional<CMutableTransaction> mut_tx = ConsumeDeserializable<CMutableTransaction>(fuzzed_data_provider);
+ if (!mut_tx) {
+ break;
+ }
+ const CTransaction tx{*mut_tx};
+ (void)bloom_filter.IsRelevantAndUpdate(tx);
+ break;
+ }
+ case 6:
+ bloom_filter.UpdateEmptyFull();
+ break;
+ }
+ (void)bloom_filter.IsWithinSizeConstraints();
+ }
+}
diff --git a/src/test/fuzz/chain.cpp b/src/test/fuzz/chain.cpp
new file mode 100644
index 0000000000..b322516cc7
--- /dev/null
+++ b/src/test/fuzz/chain.cpp
@@ -0,0 +1,65 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <chain.h>
+#include <optional.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/util.h>
+
+#include <cstdint>
+#include <vector>
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+ Optional<CDiskBlockIndex> disk_block_index = ConsumeDeserializable<CDiskBlockIndex>(fuzzed_data_provider);
+ if (!disk_block_index) {
+ return;
+ }
+
+ const uint256 zero{};
+ disk_block_index->phashBlock = &zero;
+ (void)disk_block_index->GetBlockHash();
+ (void)disk_block_index->GetBlockPos();
+ (void)disk_block_index->GetBlockTime();
+ (void)disk_block_index->GetBlockTimeMax();
+ (void)disk_block_index->GetMedianTimePast();
+ (void)disk_block_index->GetUndoPos();
+ (void)disk_block_index->HaveTxsDownloaded();
+ (void)disk_block_index->IsValid();
+ (void)disk_block_index->ToString();
+
+ const CBlockHeader block_header = disk_block_index->GetBlockHeader();
+ (void)CDiskBlockIndex{*disk_block_index};
+ (void)disk_block_index->BuildSkip();
+
+ while (fuzzed_data_provider.ConsumeBool()) {
+ const BlockStatus block_status = fuzzed_data_provider.PickValueInArray({
+ BlockStatus::BLOCK_VALID_UNKNOWN,
+ BlockStatus::BLOCK_VALID_RESERVED,
+ BlockStatus::BLOCK_VALID_TREE,
+ BlockStatus::BLOCK_VALID_TRANSACTIONS,
+ BlockStatus::BLOCK_VALID_CHAIN,
+ BlockStatus::BLOCK_VALID_SCRIPTS,
+ BlockStatus::BLOCK_VALID_MASK,
+ BlockStatus::BLOCK_HAVE_DATA,
+ BlockStatus::BLOCK_HAVE_UNDO,
+ BlockStatus::BLOCK_HAVE_MASK,
+ BlockStatus::BLOCK_FAILED_VALID,
+ BlockStatus::BLOCK_FAILED_CHILD,
+ BlockStatus::BLOCK_FAILED_MASK,
+ BlockStatus::BLOCK_OPT_WITNESS,
+ });
+ if (block_status & ~BLOCK_VALID_MASK) {
+ continue;
+ }
+ (void)disk_block_index->RaiseValidity(block_status);
+ }
+
+ CBlockIndex block_index{block_header};
+ block_index.phashBlock = &zero;
+ (void)block_index.GetBlockHash();
+ (void)block_index.ToString();
+}
diff --git a/src/test/fuzz/descriptor_parse.cpp b/src/test/fuzz/descriptor_parse.cpp
index 47d5038c26..a0ef08cca6 100644
--- a/src/test/fuzz/descriptor_parse.cpp
+++ b/src/test/fuzz/descriptor_parse.cpp
@@ -10,7 +10,7 @@
void initialize()
{
- static const auto verify_handle = MakeUnique<ECCVerifyHandle>();
+ static const ECCVerifyHandle verify_handle;
SelectParams(CBaseChainParams::REGTEST);
}
diff --git a/src/test/fuzz/deserialize.cpp b/src/test/fuzz/deserialize.cpp
index f06f339b9d..964fc85302 100644
--- a/src/test/fuzz/deserialize.cpp
+++ b/src/test/fuzz/deserialize.cpp
@@ -13,6 +13,7 @@
#include <key.h>
#include <merkleblock.h>
#include <net.h>
+#include <node/utxo_snapshot.h>
#include <primitives/block.h>
#include <protocol.h>
#include <psbt.h>
@@ -34,7 +35,7 @@
void initialize()
{
// Fuzzers using pubkey must hold an ECCVerifyHandle.
- static const auto verify_handle = MakeUnique<ECCVerifyHandle>();
+ static const ECCVerifyHandle verify_handle;
}
namespace {
@@ -214,9 +215,24 @@ void test_one_input(const std::vector<uint8_t>& buffer)
#elif BLOCKTRANSACTIONSREQUEST_DESERIALIZE
BlockTransactionsRequest btr;
DeserializeFromFuzzingInput(buffer, btr);
+#elif SNAPSHOTMETADATA_DESERIALIZE
+ SnapshotMetadata snapshot_metadata;
+ DeserializeFromFuzzingInput(buffer, snapshot_metadata);
+#elif UINT160_DESERIALIZE
+ uint160 u160;
+ DeserializeFromFuzzingInput(buffer, u160);
+ AssertEqualAfterSerializeDeserialize(u160);
+#elif UINT256_DESERIALIZE
+ uint256 u256;
+ DeserializeFromFuzzingInput(buffer, u256);
+ AssertEqualAfterSerializeDeserialize(u256);
#else
#error Need at least one fuzz target to compile
#endif
+ // Classes intentionally not covered in this file since their deserialization code is
+ // fuzzed elsewhere:
+ // * Deserialization of CTxOut is fuzzed in test/fuzz/tx_out.cpp
+ // * Deserialization of CMutableTransaction is fuzzed in src/test/fuzz/transaction.cpp
} catch (const invalid_fuzzing_input_exception&) {
}
}
diff --git a/src/test/fuzz/eval_script.cpp b/src/test/fuzz/eval_script.cpp
index 7acdd76857..6a1b037630 100644
--- a/src/test/fuzz/eval_script.cpp
+++ b/src/test/fuzz/eval_script.cpp
@@ -12,7 +12,7 @@
void initialize()
{
- static const auto verify_handle = MakeUnique<ECCVerifyHandle>();
+ static const ECCVerifyHandle verify_handle;
}
void test_one_input(const std::vector<uint8_t>& buffer)
diff --git a/src/test/fuzz/fee_rate.cpp b/src/test/fuzz/fee_rate.cpp
new file mode 100644
index 0000000000..f3d44d9f93
--- /dev/null
+++ b/src/test/fuzz/fee_rate.cpp
@@ -0,0 +1,40 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <amount.h>
+#include <policy/feerate.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/util.h>
+
+#include <cstdint>
+#include <limits>
+#include <string>
+#include <vector>
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+ const CAmount satoshis_per_k = ConsumeMoney(fuzzed_data_provider);
+ const CFeeRate fee_rate{satoshis_per_k};
+
+ (void)fee_rate.GetFeePerK();
+ const size_t bytes = fuzzed_data_provider.ConsumeIntegral<size_t>();
+ if (!MultiplicationOverflow(static_cast<int64_t>(bytes), satoshis_per_k) && bytes <= static_cast<uint64_t>(std::numeric_limits<int64_t>::max())) {
+ (void)fee_rate.GetFee(bytes);
+ }
+ (void)fee_rate.ToString();
+
+ const CAmount another_satoshis_per_k = ConsumeMoney(fuzzed_data_provider);
+ CFeeRate larger_fee_rate{another_satoshis_per_k};
+ larger_fee_rate += fee_rate;
+ if (satoshis_per_k != 0 && another_satoshis_per_k != 0) {
+ assert(fee_rate < larger_fee_rate);
+ assert(!(fee_rate > larger_fee_rate));
+ assert(!(fee_rate == larger_fee_rate));
+ assert(fee_rate <= larger_fee_rate);
+ assert(!(fee_rate >= larger_fee_rate));
+ assert(fee_rate != larger_fee_rate);
+ }
+}
diff --git a/src/test/fuzz/float.cpp b/src/test/fuzz/float.cpp
new file mode 100644
index 0000000000..a24bae5b35
--- /dev/null
+++ b/src/test/fuzz/float.cpp
@@ -0,0 +1,42 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <memusage.h>
+#include <serialize.h>
+#include <streams.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <version.h>
+
+#include <cassert>
+#include <cstdint>
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+
+ {
+ const double d = fuzzed_data_provider.ConsumeFloatingPoint<double>();
+ (void)memusage::DynamicUsage(d);
+ assert(ser_uint64_to_double(ser_double_to_uint64(d)) == d);
+
+ CDataStream stream(SER_NETWORK, INIT_PROTO_VERSION);
+ stream << d;
+ double d_deserialized;
+ stream >> d_deserialized;
+ assert(d == d_deserialized);
+ }
+
+ {
+ const float f = fuzzed_data_provider.ConsumeFloatingPoint<float>();
+ (void)memusage::DynamicUsage(f);
+ assert(ser_uint32_to_float(ser_float_to_uint32(f)) == f);
+
+ CDataStream stream(SER_NETWORK, INIT_PROTO_VERSION);
+ stream << f;
+ float f_deserialized;
+ stream >> f_deserialized;
+ assert(f == f_deserialized);
+ }
+}
diff --git a/src/test/fuzz/hex.cpp b/src/test/fuzz/hex.cpp
index 54693180be..3bbf0084c2 100644
--- a/src/test/fuzz/hex.cpp
+++ b/src/test/fuzz/hex.cpp
@@ -2,8 +2,13 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <core_io.h>
+#include <pubkey.h>
+#include <primitives/block.h>
+#include <rpc/util.h>
#include <test/fuzz/fuzz.h>
-
+#include <uint256.h>
+#include <univalue.h>
#include <util/strencodings.h>
#include <cassert>
@@ -11,6 +16,10 @@
#include <string>
#include <vector>
+void initialize() {
+ static const ECCVerifyHandle verify_handle;
+}
+
void test_one_input(const std::vector<uint8_t>& buffer)
{
const std::string random_hex_string(buffer.begin(), buffer.end());
@@ -19,4 +28,16 @@ void test_one_input(const std::vector<uint8_t>& buffer)
if (IsHex(random_hex_string)) {
assert(ToLower(random_hex_string) == hex_data);
}
+ (void)IsHexNumber(random_hex_string);
+ uint256 result;
+ (void)ParseHashStr(random_hex_string, result);
+ (void)uint256S(random_hex_string);
+ try {
+ (void)HexToPubKey(random_hex_string);
+ } catch (const UniValue&) {
+ }
+ CBlockHeader block_header;
+ (void)DecodeHexBlockHeader(block_header, random_hex_string);
+ CBlock block;
+ (void)DecodeHexBlk(block, random_hex_string);
}
diff --git a/src/test/fuzz/integer.cpp b/src/test/fuzz/integer.cpp
index 723938bcdb..63b9296574 100644
--- a/src/test/fuzz/integer.cpp
+++ b/src/test/fuzz/integer.cpp
@@ -2,6 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <amount.h>
#include <arith_uint256.h>
#include <compressor.h>
#include <consensus/merkle.h>
@@ -13,19 +14,25 @@
#include <netbase.h>
#include <policy/settings.h>
#include <pow.h>
+#include <protocol.h>
#include <pubkey.h>
#include <rpc/util.h>
#include <script/signingprovider.h>
#include <script/standard.h>
#include <serialize.h>
+#include <streams.h>
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
+#include <time.h>
#include <uint256.h>
+#include <util/moneystr.h>
#include <util/strencodings.h>
#include <util/system.h>
#include <util/time.h>
+#include <version.h>
#include <cassert>
+#include <chrono>
#include <limits>
#include <vector>
@@ -53,10 +60,18 @@ void test_one_input(const std::vector<uint8_t>& buffer)
// We cannot assume a specific value of std::is_signed<char>::value:
// ConsumeIntegral<char>() instead of casting from {u,}int8_t.
const char ch = fuzzed_data_provider.ConsumeIntegral<char>();
+ const bool b = fuzzed_data_provider.ConsumeBool();
const Consensus::Params& consensus_params = Params().GetConsensus();
(void)CheckProofOfWork(u256, u32, consensus_params);
- (void)CompressAmount(u64);
+ if (u64 <= MAX_MONEY) {
+ const uint64_t compressed_money_amount = CompressAmount(u64);
+ assert(u64 == DecompressAmount(compressed_money_amount));
+ static const uint64_t compressed_money_amount_max = CompressAmount(MAX_MONEY - 1);
+ assert(compressed_money_amount <= compressed_money_amount_max);
+ } else {
+ (void)CompressAmount(u64);
+ }
static const uint256 u256_min(uint256S("0000000000000000000000000000000000000000000000000000000000000000"));
static const uint256 u256_max(uint256S("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"));
const std::vector<uint256> v256{u256, u256_min, u256_max};
@@ -65,11 +80,19 @@ void test_one_input(const std::vector<uint8_t>& buffer)
(void)DecompressAmount(u64);
(void)FormatISO8601Date(i64);
(void)FormatISO8601DateTime(i64);
+ // FormatMoney(i) not defined when i == std::numeric_limits<int64_t>::min()
+ if (i64 != std::numeric_limits<int64_t>::min()) {
+ int64_t parsed_money;
+ if (ParseMoney(FormatMoney(i64), parsed_money)) {
+ assert(parsed_money == i64);
+ }
+ }
(void)GetSizeOfCompactSize(u64);
(void)GetSpecialScriptSize(u32);
// (void)GetVirtualTransactionSize(i64, i64); // function defined only for a subset of int64_t inputs
// (void)GetVirtualTransactionSize(i64, i64, u32); // function defined only for a subset of int64_t/uint32_t inputs
(void)HexDigit(ch);
+ (void)MoneyRange(i64);
(void)i64tostr(i64);
(void)IsDigit(ch);
(void)IsSpace(ch);
@@ -95,6 +118,16 @@ void test_one_input(const std::vector<uint8_t>& buffer)
(void)SipHashUint256(u64, u64, u256);
(void)SipHashUint256Extra(u64, u64, u256, u32);
(void)ToLower(ch);
+ (void)ToUpper(ch);
+ // ValueFromAmount(i) not defined when i == std::numeric_limits<int64_t>::min()
+ if (i64 != std::numeric_limits<int64_t>::min()) {
+ int64_t parsed_money;
+ if (ParseMoney(ValueFromAmount(i64).getValStr(), parsed_money)) {
+ assert(parsed_money == i64);
+ }
+ }
+ const std::chrono::seconds seconds{i64};
+ assert(count_seconds(seconds) == i64);
const arith_uint256 au256 = UintToArith256(u256);
assert(ArithToUint256(au256) == u256);
@@ -124,4 +157,114 @@ void test_one_input(const std::vector<uint8_t>& buffer)
(void)GetScriptForDestination(destination);
(void)IsValidDestination(destination);
}
+
+ {
+ CDataStream stream(SER_NETWORK, INIT_PROTO_VERSION);
+
+ uint256 deserialized_u256;
+ stream << u256;
+ stream >> deserialized_u256;
+ assert(u256 == deserialized_u256 && stream.empty());
+
+ uint160 deserialized_u160;
+ stream << u160;
+ stream >> deserialized_u160;
+ assert(u160 == deserialized_u160 && stream.empty());
+
+ uint64_t deserialized_u64;
+ stream << u64;
+ stream >> deserialized_u64;
+ assert(u64 == deserialized_u64 && stream.empty());
+
+ int64_t deserialized_i64;
+ stream << i64;
+ stream >> deserialized_i64;
+ assert(i64 == deserialized_i64 && stream.empty());
+
+ uint32_t deserialized_u32;
+ stream << u32;
+ stream >> deserialized_u32;
+ assert(u32 == deserialized_u32 && stream.empty());
+
+ int32_t deserialized_i32;
+ stream << i32;
+ stream >> deserialized_i32;
+ assert(i32 == deserialized_i32 && stream.empty());
+
+ uint16_t deserialized_u16;
+ stream << u16;
+ stream >> deserialized_u16;
+ assert(u16 == deserialized_u16 && stream.empty());
+
+ int16_t deserialized_i16;
+ stream << i16;
+ stream >> deserialized_i16;
+ assert(i16 == deserialized_i16 && stream.empty());
+
+ uint8_t deserialized_u8;
+ stream << u8;
+ stream >> deserialized_u8;
+ assert(u8 == deserialized_u8 && stream.empty());
+
+ int8_t deserialized_i8;
+ stream << i8;
+ stream >> deserialized_i8;
+ assert(i8 == deserialized_i8 && stream.empty());
+
+ char deserialized_ch;
+ stream << ch;
+ stream >> deserialized_ch;
+ assert(ch == deserialized_ch && stream.empty());
+
+ bool deserialized_b;
+ stream << b;
+ stream >> deserialized_b;
+ assert(b == deserialized_b && stream.empty());
+ }
+
+ {
+ const ServiceFlags service_flags = (ServiceFlags)u64;
+ (void)HasAllDesirableServiceFlags(service_flags);
+ (void)MayHaveUsefulAddressDB(service_flags);
+ }
+
+ {
+ CDataStream stream(SER_NETWORK, INIT_PROTO_VERSION);
+
+ ser_writedata64(stream, u64);
+ const uint64_t deserialized_u64 = ser_readdata64(stream);
+ assert(u64 == deserialized_u64 && stream.empty());
+
+ ser_writedata32(stream, u32);
+ const uint32_t deserialized_u32 = ser_readdata32(stream);
+ assert(u32 == deserialized_u32 && stream.empty());
+
+ ser_writedata32be(stream, u32);
+ const uint32_t deserialized_u32be = ser_readdata32be(stream);
+ assert(u32 == deserialized_u32be && stream.empty());
+
+ ser_writedata16(stream, u16);
+ const uint16_t deserialized_u16 = ser_readdata16(stream);
+ assert(u16 == deserialized_u16 && stream.empty());
+
+ ser_writedata16be(stream, u16);
+ const uint16_t deserialized_u16be = ser_readdata16be(stream);
+ assert(u16 == deserialized_u16be && stream.empty());
+
+ ser_writedata8(stream, u8);
+ const uint8_t deserialized_u8 = ser_readdata8(stream);
+ assert(u8 == deserialized_u8 && stream.empty());
+ }
+
+ {
+ CDataStream stream(SER_NETWORK, INIT_PROTO_VERSION);
+
+ WriteCompactSize(stream, u64);
+ try {
+ const uint64_t deserialized_u64 = ReadCompactSize(stream);
+ assert(u64 == deserialized_u64 && stream.empty());
+ }
+ catch (const std::ios_base::failure&) {
+ }
+ }
}
diff --git a/src/test/fuzz/key.cpp b/src/test/fuzz/key.cpp
new file mode 100644
index 0000000000..1919a5f881
--- /dev/null
+++ b/src/test/fuzz/key.cpp
@@ -0,0 +1,309 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <chainparams.h>
+#include <chainparamsbase.h>
+#include <key.h>
+#include <key_io.h>
+#include <outputtype.h>
+#include <policy/policy.h>
+#include <pubkey.h>
+#include <rpc/util.h>
+#include <script/keyorigin.h>
+#include <script/script.h>
+#include <script/sign.h>
+#include <script/signingprovider.h>
+#include <script/standard.h>
+#include <streams.h>
+#include <test/fuzz/fuzz.h>
+#include <util/memory.h>
+#include <util/strencodings.h>
+
+#include <cassert>
+#include <cstdint>
+#include <numeric>
+#include <string>
+#include <vector>
+
+void initialize()
+{
+ static const ECCVerifyHandle ecc_verify_handle;
+ ECC_Start();
+ SelectParams(CBaseChainParams::REGTEST);
+}
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ const CKey key = [&] {
+ CKey k;
+ k.Set(buffer.begin(), buffer.end(), true);
+ return k;
+ }();
+ if (!key.IsValid()) {
+ return;
+ }
+
+ {
+ assert(key.begin() + key.size() == key.end());
+ assert(key.IsCompressed());
+ assert(key.size() == 32);
+ assert(DecodeSecret(EncodeSecret(key)) == key);
+ }
+
+ {
+ CKey invalid_key;
+ assert(!(invalid_key == key));
+ assert(!invalid_key.IsCompressed());
+ assert(!invalid_key.IsValid());
+ assert(invalid_key.size() == 0);
+ }
+
+ {
+ CKey uncompressed_key;
+ uncompressed_key.Set(buffer.begin(), buffer.end(), false);
+ assert(!(uncompressed_key == key));
+ assert(!uncompressed_key.IsCompressed());
+ assert(key.size() == 32);
+ assert(uncompressed_key.begin() + uncompressed_key.size() == uncompressed_key.end());
+ assert(uncompressed_key.IsValid());
+ }
+
+ {
+ CKey copied_key;
+ copied_key.Set(key.begin(), key.end(), key.IsCompressed());
+ assert(copied_key == key);
+ }
+
+ {
+ CKey negated_key = key;
+ negated_key.Negate();
+ assert(negated_key.IsValid());
+ assert(!(negated_key == key));
+
+ negated_key.Negate();
+ assert(negated_key == key);
+ }
+
+ const uint256 random_uint256 = Hash(buffer.begin(), buffer.end());
+
+ {
+ CKey child_key;
+ ChainCode child_chaincode;
+ const bool ok = key.Derive(child_key, child_chaincode, 0, random_uint256);
+ assert(ok);
+ assert(child_key.IsValid());
+ assert(!(child_key == key));
+ assert(child_chaincode != random_uint256);
+ }
+
+ const CPubKey pubkey = key.GetPubKey();
+
+ {
+ assert(pubkey.size() == 33);
+ assert(key.VerifyPubKey(pubkey));
+ assert(pubkey.GetHash() != random_uint256);
+ assert(pubkey.begin() + pubkey.size() == pubkey.end());
+ assert(pubkey.data() == pubkey.begin());
+ assert(pubkey.IsCompressed());
+ assert(pubkey.IsValid());
+ assert(pubkey.IsFullyValid());
+ assert(HexToPubKey(HexStr(pubkey.begin(), pubkey.end())) == pubkey);
+ assert(GetAllDestinationsForKey(pubkey).size() == 3);
+ }
+
+ {
+ CDataStream data_stream{SER_NETWORK, INIT_PROTO_VERSION};
+ pubkey.Serialize(data_stream);
+
+ CPubKey pubkey_deserialized;
+ pubkey_deserialized.Unserialize(data_stream);
+ assert(pubkey_deserialized == pubkey);
+ }
+
+ {
+ const CScript tx_pubkey_script = GetScriptForRawPubKey(pubkey);
+ assert(!tx_pubkey_script.IsPayToScriptHash());
+ assert(!tx_pubkey_script.IsPayToWitnessScriptHash());
+ assert(!tx_pubkey_script.IsPushOnly());
+ assert(!tx_pubkey_script.IsUnspendable());
+ assert(tx_pubkey_script.HasValidOps());
+ assert(tx_pubkey_script.size() == 35);
+
+ const CScript tx_multisig_script = GetScriptForMultisig(1, {pubkey});
+ assert(!tx_multisig_script.IsPayToScriptHash());
+ assert(!tx_multisig_script.IsPayToWitnessScriptHash());
+ assert(!tx_multisig_script.IsPushOnly());
+ assert(!tx_multisig_script.IsUnspendable());
+ assert(tx_multisig_script.HasValidOps());
+ assert(tx_multisig_script.size() == 37);
+
+ FillableSigningProvider fillable_signing_provider;
+ assert(IsSolvable(fillable_signing_provider, tx_pubkey_script));
+ assert(IsSolvable(fillable_signing_provider, tx_multisig_script));
+ assert(!IsSegWitOutput(fillable_signing_provider, tx_pubkey_script));
+ assert(!IsSegWitOutput(fillable_signing_provider, tx_multisig_script));
+ assert(fillable_signing_provider.GetKeys().size() == 0);
+ assert(!fillable_signing_provider.HaveKey(pubkey.GetID()));
+
+ const bool ok_add_key = fillable_signing_provider.AddKey(key);
+ assert(ok_add_key);
+ assert(fillable_signing_provider.HaveKey(pubkey.GetID()));
+
+ FillableSigningProvider fillable_signing_provider_pub;
+ assert(!fillable_signing_provider_pub.HaveKey(pubkey.GetID()));
+
+ const bool ok_add_key_pubkey = fillable_signing_provider_pub.AddKeyPubKey(key, pubkey);
+ assert(ok_add_key_pubkey);
+ assert(fillable_signing_provider_pub.HaveKey(pubkey.GetID()));
+
+ txnouttype which_type_tx_pubkey;
+ const bool is_standard_tx_pubkey = IsStandard(tx_pubkey_script, which_type_tx_pubkey);
+ assert(is_standard_tx_pubkey);
+ assert(which_type_tx_pubkey == txnouttype::TX_PUBKEY);
+
+ txnouttype which_type_tx_multisig;
+ const bool is_standard_tx_multisig = IsStandard(tx_multisig_script, which_type_tx_multisig);
+ assert(is_standard_tx_multisig);
+ assert(which_type_tx_multisig == txnouttype::TX_MULTISIG);
+
+ std::vector<std::vector<unsigned char>> v_solutions_ret_tx_pubkey;
+ const txnouttype outtype_tx_pubkey = Solver(tx_pubkey_script, v_solutions_ret_tx_pubkey);
+ assert(outtype_tx_pubkey == txnouttype::TX_PUBKEY);
+ assert(v_solutions_ret_tx_pubkey.size() == 1);
+ assert(v_solutions_ret_tx_pubkey[0].size() == 33);
+
+ std::vector<std::vector<unsigned char>> v_solutions_ret_tx_multisig;
+ const txnouttype outtype_tx_multisig = Solver(tx_multisig_script, v_solutions_ret_tx_multisig);
+ assert(outtype_tx_multisig == txnouttype::TX_MULTISIG);
+ assert(v_solutions_ret_tx_multisig.size() == 3);
+ assert(v_solutions_ret_tx_multisig[0].size() == 1);
+ assert(v_solutions_ret_tx_multisig[1].size() == 33);
+ assert(v_solutions_ret_tx_multisig[2].size() == 1);
+
+ OutputType output_type{};
+ const CTxDestination tx_destination = GetDestinationForKey(pubkey, output_type);
+ assert(output_type == OutputType::LEGACY);
+ assert(IsValidDestination(tx_destination));
+ assert(CTxDestination{PKHash{pubkey}} == tx_destination);
+
+ const CScript script_for_destination = GetScriptForDestination(tx_destination);
+ assert(script_for_destination.size() == 25);
+
+ const std::string destination_address = EncodeDestination(tx_destination);
+ assert(DecodeDestination(destination_address) == tx_destination);
+
+ const CPubKey pubkey_from_address_string = AddrToPubKey(fillable_signing_provider, destination_address);
+ assert(pubkey_from_address_string == pubkey);
+
+ CKeyID key_id = pubkey.GetID();
+ assert(!key_id.IsNull());
+ assert(key_id == CKeyID{key_id});
+ assert(key_id == GetKeyForDestination(fillable_signing_provider, tx_destination));
+
+ CPubKey pubkey_out;
+ const bool ok_get_pubkey = fillable_signing_provider.GetPubKey(key_id, pubkey_out);
+ assert(ok_get_pubkey);
+
+ CKey key_out;
+ const bool ok_get_key = fillable_signing_provider.GetKey(key_id, key_out);
+ assert(ok_get_key);
+ assert(fillable_signing_provider.GetKeys().size() == 1);
+ assert(fillable_signing_provider.HaveKey(key_id));
+
+ KeyOriginInfo key_origin_info;
+ const bool ok_get_key_origin = fillable_signing_provider.GetKeyOrigin(key_id, key_origin_info);
+ assert(!ok_get_key_origin);
+ }
+
+ {
+ const std::vector<unsigned char> vch_pubkey{pubkey.begin(), pubkey.end()};
+ assert(CPubKey::ValidSize(vch_pubkey));
+ assert(!CPubKey::ValidSize({pubkey.begin(), pubkey.begin() + pubkey.size() - 1}));
+
+ const CPubKey pubkey_ctor_1{vch_pubkey};
+ assert(pubkey == pubkey_ctor_1);
+
+ const CPubKey pubkey_ctor_2{vch_pubkey.begin(), vch_pubkey.end()};
+ assert(pubkey == pubkey_ctor_2);
+
+ CPubKey pubkey_set;
+ pubkey_set.Set(vch_pubkey.begin(), vch_pubkey.end());
+ assert(pubkey == pubkey_set);
+ }
+
+ {
+ const CPubKey invalid_pubkey{};
+ assert(!invalid_pubkey.IsValid());
+ assert(!invalid_pubkey.IsFullyValid());
+ assert(!(pubkey == invalid_pubkey));
+ assert(pubkey != invalid_pubkey);
+ assert(pubkey < invalid_pubkey);
+ }
+
+ {
+ // Cover CPubKey's operator[](unsigned int pos)
+ unsigned int sum = 0;
+ for (size_t i = 0; i < pubkey.size(); ++i) {
+ sum += pubkey[i];
+ }
+ assert(std::accumulate(pubkey.begin(), pubkey.end(), 0U) == sum);
+ }
+
+ {
+ CPubKey decompressed_pubkey = pubkey;
+ assert(decompressed_pubkey.IsCompressed());
+
+ const bool ok = decompressed_pubkey.Decompress();
+ assert(ok);
+ assert(!decompressed_pubkey.IsCompressed());
+ assert(decompressed_pubkey.size() == 65);
+ }
+
+ {
+ std::vector<unsigned char> vch_sig;
+ const bool ok = key.Sign(random_uint256, vch_sig, false);
+ assert(ok);
+ assert(pubkey.Verify(random_uint256, vch_sig));
+ assert(CPubKey::CheckLowS(vch_sig));
+
+ const std::vector<unsigned char> vch_invalid_sig{vch_sig.begin(), vch_sig.begin() + vch_sig.size() - 1};
+ assert(!pubkey.Verify(random_uint256, vch_invalid_sig));
+ assert(!CPubKey::CheckLowS(vch_invalid_sig));
+ }
+
+ {
+ std::vector<unsigned char> vch_compact_sig;
+ const bool ok_sign_compact = key.SignCompact(random_uint256, vch_compact_sig);
+ assert(ok_sign_compact);
+
+ CPubKey recover_pubkey;
+ const bool ok_recover_compact = recover_pubkey.RecoverCompact(random_uint256, vch_compact_sig);
+ assert(ok_recover_compact);
+ assert(recover_pubkey == pubkey);
+ }
+
+ {
+ CPubKey child_pubkey;
+ ChainCode child_chaincode;
+ const bool ok = pubkey.Derive(child_pubkey, child_chaincode, 0, random_uint256);
+ assert(ok);
+ assert(child_pubkey != pubkey);
+ assert(child_pubkey.IsCompressed());
+ assert(child_pubkey.IsFullyValid());
+ assert(child_pubkey.IsValid());
+ assert(child_pubkey.size() == 33);
+ assert(child_chaincode != random_uint256);
+ }
+
+ const CPrivKey priv_key = key.GetPrivKey();
+
+ {
+ for (const bool skip_check : {true, false}) {
+ CKey loaded_key;
+ const bool ok = loaded_key.Load(priv_key, pubkey, skip_check);
+ assert(ok);
+ assert(key == loaded_key);
+ }
+ }
+}
diff --git a/src/test/fuzz/key_io.cpp b/src/test/fuzz/key_io.cpp
new file mode 100644
index 0000000000..62aefb650d
--- /dev/null
+++ b/src/test/fuzz/key_io.cpp
@@ -0,0 +1,50 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <chainparams.h>
+#include <key_io.h>
+#include <rpc/util.h>
+#include <script/signingprovider.h>
+#include <script/standard.h>
+#include <test/fuzz/fuzz.h>
+
+#include <cassert>
+#include <cstdint>
+#include <string>
+#include <vector>
+
+void initialize()
+{
+ static const ECCVerifyHandle verify_handle;
+ ECC_Start();
+ SelectParams(CBaseChainParams::MAIN);
+}
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ const std::string random_string(buffer.begin(), buffer.end());
+
+ const CKey key = DecodeSecret(random_string);
+ if (key.IsValid()) {
+ assert(key == DecodeSecret(EncodeSecret(key)));
+ }
+
+ const CExtKey ext_key = DecodeExtKey(random_string);
+ if (ext_key.key.size() == 32) {
+ assert(ext_key == DecodeExtKey(EncodeExtKey(ext_key)));
+ }
+
+ const CExtPubKey ext_pub_key = DecodeExtPubKey(random_string);
+ if (ext_pub_key.pubkey.size() == CPubKey::COMPRESSED_SIZE) {
+ assert(ext_pub_key == DecodeExtPubKey(EncodeExtPubKey(ext_pub_key)));
+ }
+
+ const CTxDestination tx_destination = DecodeDestination(random_string);
+ (void)DescribeAddress(tx_destination);
+ (void)GetKeyForDestination(/* store */ {}, tx_destination);
+ (void)GetScriptForDestination(tx_destination);
+ (void)IsValidDestination(tx_destination);
+
+ (void)IsValidDestinationString(random_string);
+}
diff --git a/src/test/fuzz/locale.cpp b/src/test/fuzz/locale.cpp
new file mode 100644
index 0000000000..c8288123e8
--- /dev/null
+++ b/src/test/fuzz/locale.cpp
@@ -0,0 +1,96 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <tinyformat.h>
+#include <util/strencodings.h>
+
+#include <cassert>
+#include <clocale>
+#include <cstdint>
+#include <locale>
+#include <string>
+#include <vector>
+
+namespace {
+const std::string locale_identifiers[] = {
+ "C", "C.UTF-8", "aa_DJ", "aa_DJ.ISO-8859-1", "aa_DJ.UTF-8", "aa_ER", "aa_ER.UTF-8", "aa_ET", "aa_ET.UTF-8", "af_ZA", "af_ZA.ISO-8859-1", "af_ZA.UTF-8", "agr_PE", "agr_PE.UTF-8", "ak_GH", "ak_GH.UTF-8", "am_ET", "am_ET.UTF-8", "an_ES", "an_ES.ISO-8859-15", "an_ES.UTF-8", "anp_IN", "anp_IN.UTF-8", "ar_AE", "ar_AE.ISO-8859-6", "ar_AE.UTF-8", "ar_BH", "ar_BH.ISO-8859-6", "ar_BH.UTF-8", "ar_DZ", "ar_DZ.ISO-8859-6", "ar_DZ.UTF-8", "ar_EG", "ar_EG.ISO-8859-6", "ar_EG.UTF-8", "ar_IN", "ar_IN.UTF-8", "ar_IQ", "ar_IQ.ISO-8859-6", "ar_IQ.UTF-8", "ar_JO", "ar_JO.ISO-8859-6", "ar_JO.UTF-8", "ar_KW", "ar_KW.ISO-8859-6", "ar_KW.UTF-8", "ar_LB", "ar_LB.ISO-8859-6", "ar_LB.UTF-8", "ar_LY", "ar_LY.ISO-8859-6", "ar_LY.UTF-8", "ar_MA", "ar_MA.ISO-8859-6", "ar_MA.UTF-8", "ar_OM", "ar_OM.ISO-8859-6", "ar_OM.UTF-8", "ar_QA", "ar_QA.ISO-8859-6", "ar_QA.UTF-8", "ar_SA", "ar_SA.ISO-8859-6", "ar_SA.UTF-8", "ar_SD", "ar_SD.ISO-8859-6", "ar_SD.UTF-8", "ar_SS", "ar_SS.UTF-8", "ar_SY", "ar_SY.ISO-8859-6", "ar_SY.UTF-8", "ar_TN", "ar_TN.ISO-8859-6", "ar_TN.UTF-8", "ar_YE", "ar_YE.ISO-8859-6", "ar_YE.UTF-8", "as_IN", "as_IN.UTF-8", "ast_ES", "ast_ES.ISO-8859-15", "ast_ES.UTF-8", "ayc_PE", "ayc_PE.UTF-8", "az_AZ", "az_AZ.UTF-8", "az_IR", "az_IR.UTF-8", "be_BY", "be_BY.CP1251", "be_BY.UTF-8", "bem_ZM", "bem_ZM.UTF-8", "ber_DZ", "ber_DZ.UTF-8", "ber_MA", "ber_MA.UTF-8", "bg_BG", "bg_BG.CP1251", "bg_BG.UTF-8", "bho_IN", "bho_IN.UTF-8", "bho_NP", "bho_NP.UTF-8", "bi_VU", "bi_VU.UTF-8", "bn_BD", "bn_BD.UTF-8", "bn_IN", "bn_IN.UTF-8", "bo_CN", "bo_CN.UTF-8", "bo_IN", "bo_IN.UTF-8", "br_FR", "br_FR.ISO-8859-1", "br_FR.UTF-8", "brx_IN", "brx_IN.UTF-8", "bs_BA", "bs_BA.ISO-8859-2", "bs_BA.UTF-8", "byn_ER", "byn_ER.UTF-8", "ca_AD", "ca_AD.ISO-8859-15", "ca_AD.UTF-8", "ca_ES", "ca_ES.ISO-8859-1", "ca_ES.UTF-8", "ca_FR", "ca_FR.ISO-8859-15", "ca_FR.UTF-8", "ca_IT", "ca_IT.ISO-8859-15", "ca_IT.UTF-8", "ce_RU", "ce_RU.UTF-8", "chr_US", "chr_US.UTF-8", "ckb_IQ", "ckb_IQ.UTF-8", "cmn_TW", "cmn_TW.UTF-8", "crh_UA", "crh_UA.UTF-8", "csb_PL", "csb_PL.UTF-8", "cs_CZ", "cs_CZ.ISO-8859-2", "cs_CZ.UTF-8", "cv_RU", "cv_RU.UTF-8", "cy_GB", "cy_GB.ISO-8859-14", "cy_GB.UTF-8", "da_DK", "da_DK.ISO-8859-1", "da_DK.UTF-8", "de_AT", "de_AT.ISO-8859-1", "de_AT.UTF-8", "de_BE", "de_BE.ISO-8859-1", "de_BE.UTF-8", "de_CH", "de_CH.ISO-8859-1", "de_CH.UTF-8", "de_DE", "de_DE.ISO-8859-1", "de_DE.UTF-8", "de_IT", "de_IT.ISO-8859-1", "de_IT.UTF-8", "de_LU", "de_LU.ISO-8859-1", "de_LU.UTF-8", "doi_IN", "doi_IN.UTF-8", "dv_MV", "dv_MV.UTF-8", "dz_BT", "dz_BT.UTF-8", "el_CY", "el_CY.ISO-8859-7", "el_CY.UTF-8", "el_GR", "el_GR.ISO-8859-7", "el_GR.UTF-8", "en_AG", "en_AG.UTF-8", "en_AU", "en_AU.ISO-8859-1", "en_AU.UTF-8", "en_BW", "en_BW.ISO-8859-1", "en_BW.UTF-8", "en_CA", "en_CA.ISO-8859-1", "en_CA.UTF-8", "en_DK", "en_DK.ISO-8859-1", "en_DK.ISO-8859-15", "en_DK.UTF-8", "en_GB", "en_GB.ISO-8859-1", "en_GB.ISO-8859-15", "en_GB.UTF-8", "en_HK", "en_HK.ISO-8859-1", "en_HK.UTF-8", "en_IE", "en_IE.ISO-8859-1", "en_IE.UTF-8", "en_IL", "en_IL.UTF-8", "en_IN", "en_IN.UTF-8", "en_NG", "en_NG.UTF-8", "en_NZ", "en_NZ.ISO-8859-1", "en_NZ.UTF-8", "en_PH", "en_PH.ISO-8859-1", "en_PH.UTF-8", "en_SG", "en_SG.ISO-8859-1", "en_SG.UTF-8", "en_US", "en_US.ISO-8859-1", "en_US.ISO-8859-15", "en_US.UTF-8", "en_ZA", "en_ZA.ISO-8859-1", "en_ZA.UTF-8", "en_ZM", "en_ZM.UTF-8", "en_ZW", "en_ZW.ISO-8859-1", "en_ZW.UTF-8", "es_AR", "es_AR.ISO-8859-1", "es_AR.UTF-8", "es_BO", "es_BO.ISO-8859-1", "es_BO.UTF-8", "es_CL", "es_CL.ISO-8859-1", "es_CL.UTF-8", "es_CO", "es_CO.ISO-8859-1", "es_CO.UTF-8", "es_CR", "es_CR.ISO-8859-1", "es_CR.UTF-8", "es_CU", "es_CU.UTF-8", "es_DO", "es_DO.ISO-8859-1", "es_DO.UTF-8", "es_EC", "es_EC.ISO-8859-1", "es_EC.UTF-8", "es_ES", "es_ES.ISO-8859-1", "es_ES.UTF-8", "es_GT", "es_GT.ISO-8859-1", "es_GT.UTF-8", "es_HN", "es_HN.ISO-8859-1", "es_HN.UTF-8", "es_MX", "es_MX.ISO-8859-1", "es_MX.UTF-8", "es_NI", "es_NI.ISO-8859-1", "es_NI.UTF-8", "es_PA", "es_PA.ISO-8859-1", "es_PA.UTF-8", "es_PE", "es_PE.ISO-8859-1", "es_PE.UTF-8", "es_PR", "es_PR.ISO-8859-1", "es_PR.UTF-8", "es_PY", "es_PY.ISO-8859-1", "es_PY.UTF-8", "es_SV", "es_SV.ISO-8859-1", "es_SV.UTF-8", "es_US", "es_US.ISO-8859-1", "es_US.UTF-8", "es_UY", "es_UY.ISO-8859-1", "es_UY.UTF-8", "es_VE", "es_VE.ISO-8859-1", "es_VE.UTF-8", "et_EE", "et_EE.ISO-8859-1", "et_EE.ISO-8859-15", "et_EE.UTF-8", "eu_ES", "eu_ES.ISO-8859-1", "eu_ES.UTF-8", "eu_FR", "eu_FR.ISO-8859-1", "eu_FR.UTF-8", "fa_IR", "fa_IR.UTF-8", "ff_SN", "ff_SN.UTF-8", "fi_FI", "fi_FI.ISO-8859-1", "fi_FI.UTF-8", "fil_PH", "fil_PH.UTF-8", "fo_FO", "fo_FO.ISO-8859-1", "fo_FO.UTF-8", "fr_BE", "fr_BE.ISO-8859-1", "fr_BE.UTF-8", "fr_CA", "fr_CA.ISO-8859-1", "fr_CA.UTF-8", "fr_CH", "fr_CH.ISO-8859-1", "fr_CH.UTF-8", "fr_FR", "fr_FR.ISO-8859-1", "fr_FR.UTF-8", "fr_LU", "fr_LU.ISO-8859-1", "fr_LU.UTF-8", "fur_IT", "fur_IT.UTF-8", "fy_DE", "fy_DE.UTF-8", "fy_NL", "fy_NL.UTF-8", "ga_IE", "ga_IE.ISO-8859-1", "ga_IE.UTF-8", "gd_GB", "gd_GB.ISO-8859-15", "gd_GB.UTF-8", "gez_ER", "gez_ER.UTF-8", "gez_ET", "gez_ET.UTF-8", "gl_ES", "gl_ES.ISO-8859-1", "gl_ES.UTF-8", "gu_IN", "gu_IN.UTF-8", "gv_GB", "gv_GB.ISO-8859-1", "gv_GB.UTF-8", "hak_TW", "hak_TW.UTF-8", "ha_NG", "ha_NG.UTF-8", "he_IL", "he_IL.ISO-8859-8", "he_IL.UTF-8", "hif_FJ", "hif_FJ.UTF-8", "hi_IN", "hi_IN.UTF-8", "hne_IN", "hne_IN.UTF-8", "hr_HR", "hr_HR.ISO-8859-2", "hr_HR.UTF-8", "hsb_DE", "hsb_DE.ISO-8859-2", "hsb_DE.UTF-8", "ht_HT", "ht_HT.UTF-8", "hu_HU", "hu_HU.ISO-8859-2", "hu_HU.UTF-8", "hy_AM", "hy_AM.ARMSCII-8", "hy_AM.UTF-8", "ia_FR", "ia_FR.UTF-8", "id_ID", "id_ID.ISO-8859-1", "id_ID.UTF-8", "ig_NG", "ig_NG.UTF-8", "ik_CA", "ik_CA.UTF-8", "is_IS", "is_IS.ISO-8859-1", "is_IS.UTF-8", "it_CH", "it_CH.ISO-8859-1", "it_CH.UTF-8", "it_IT", "it_IT.ISO-8859-1", "it_IT.UTF-8", "iu_CA", "iu_CA.UTF-8", "kab_DZ", "kab_DZ.UTF-8", "ka_GE", "ka_GE.GEORGIAN-PS", "ka_GE.UTF-8", "kk_KZ", "kk_KZ.PT154", "kk_KZ.RK1048", "kk_KZ.UTF-8", "kl_GL", "kl_GL.ISO-8859-1", "kl_GL.UTF-8", "km_KH", "km_KH.UTF-8", "kn_IN", "kn_IN.UTF-8", "kok_IN", "kok_IN.UTF-8", "ks_IN", "ks_IN.UTF-8", "ku_TR", "ku_TR.ISO-8859-9", "ku_TR.UTF-8", "kw_GB", "kw_GB.ISO-8859-1", "kw_GB.UTF-8", "ky_KG", "ky_KG.UTF-8", "lb_LU", "lb_LU.UTF-8", "lg_UG", "lg_UG.ISO-8859-10", "lg_UG.UTF-8", "li_BE", "li_BE.UTF-8", "lij_IT", "lij_IT.UTF-8", "li_NL", "li_NL.UTF-8", "ln_CD", "ln_CD.UTF-8", "lo_LA", "lo_LA.UTF-8", "lt_LT", "lt_LT.ISO-8859-13", "lt_LT.UTF-8", "lv_LV", "lv_LV.ISO-8859-13", "lv_LV.UTF-8", "lzh_TW", "lzh_TW.UTF-8", "mag_IN", "mag_IN.UTF-8", "mai_IN", "mai_IN.UTF-8", "mai_NP", "mai_NP.UTF-8", "mfe_MU", "mfe_MU.UTF-8", "mg_MG", "mg_MG.ISO-8859-15", "mg_MG.UTF-8", "mhr_RU", "mhr_RU.UTF-8", "mi_NZ", "mi_NZ.ISO-8859-13", "mi_NZ.UTF-8", "miq_NI", "miq_NI.UTF-8", "mjw_IN", "mjw_IN.UTF-8", "mk_MK", "mk_MK.ISO-8859-5", "mk_MK.UTF-8", "ml_IN", "ml_IN.UTF-8", "mni_IN", "mni_IN.UTF-8", "mn_MN", "mn_MN.UTF-8", "mr_IN", "mr_IN.UTF-8", "ms_MY", "ms_MY.ISO-8859-1", "ms_MY.UTF-8", "mt_MT", "mt_MT.ISO-8859-3", "mt_MT.UTF-8", "my_MM", "my_MM.UTF-8", "nan_TW", "nan_TW.UTF-8", "nb_NO", "nb_NO.ISO-8859-1", "nb_NO.UTF-8", "nds_DE", "nds_DE.UTF-8", "nds_NL", "nds_NL.UTF-8", "ne_NP", "ne_NP.UTF-8", "nhn_MX", "nhn_MX.UTF-8", "niu_NU", "niu_NU.UTF-8", "niu_NZ", "niu_NZ.UTF-8", "nl_AW", "nl_AW.UTF-8", "nl_BE", "nl_BE.ISO-8859-1", "nl_BE.UTF-8", "nl_NL", "nl_NL.ISO-8859-1", "nl_NL.UTF-8", "nn_NO", "nn_NO.ISO-8859-1", "nn_NO.UTF-8", "nr_ZA", "nr_ZA.UTF-8", "nso_ZA", "nso_ZA.UTF-8", "oc_FR", "oc_FR.ISO-8859-1", "oc_FR.UTF-8", "om_ET", "om_ET.UTF-8", "om_KE", "om_KE.ISO-8859-1", "om_KE.UTF-8", "or_IN", "or_IN.UTF-8", "os_RU", "os_RU.UTF-8", "pa_IN", "pa_IN.UTF-8", "pap_AW", "pap_AW.UTF-8", "pap_CW", "pap_CW.UTF-8", "pa_PK", "pa_PK.UTF-8", "pl_PL", "pl_PL.ISO-8859-2", "pl_PL.UTF-8", "ps_AF", "ps_AF.UTF-8", "pt_BR", "pt_BR.ISO-8859-1", "pt_BR.UTF-8", "pt_PT", "pt_PT.ISO-8859-1", "pt_PT.UTF-8", "quz_PE", "quz_PE.UTF-8", "raj_IN", "raj_IN.UTF-8", "ro_RO", "ro_RO.ISO-8859-2", "ro_RO.UTF-8", "ru_RU", "ru_RU.CP1251", "ru_RU.ISO-8859-5", "ru_RU.KOI8-R", "ru_RU.UTF-8", "ru_UA", "ru_UA.KOI8-U", "ru_UA.UTF-8", "rw_RW", "rw_RW.UTF-8", "sa_IN", "sa_IN.UTF-8", "sat_IN", "sat_IN.UTF-8", "sc_IT", "sc_IT.UTF-8", "sd_IN", "sd_IN.UTF-8", "sd_PK", "sd_PK.UTF-8", "se_NO", "se_NO.UTF-8", "sgs_LT", "sgs_LT.UTF-8", "shn_MM", "shn_MM.UTF-8", "shs_CA", "shs_CA.UTF-8", "sid_ET", "sid_ET.UTF-8", "si_LK", "si_LK.UTF-8", "sk_SK", "sk_SK.ISO-8859-2", "sk_SK.UTF-8", "sl_SI", "sl_SI.ISO-8859-2", "sl_SI.UTF-8", "sm_WS", "sm_WS.UTF-8", "so_DJ", "so_DJ.ISO-8859-1", "so_DJ.UTF-8", "so_ET", "so_ET.UTF-8", "so_KE", "so_KE.ISO-8859-1", "so_KE.UTF-8", "so_SO", "so_SO.ISO-8859-1", "so_SO.UTF-8", "sq_AL", "sq_AL.ISO-8859-1", "sq_AL.UTF-8", "sq_MK", "sq_MK.UTF-8", "sr_ME", "sr_ME.UTF-8", "sr_RS", "sr_RS.UTF-8", "ss_ZA", "ss_ZA.UTF-8", "st_ZA", "st_ZA.ISO-8859-1", "st_ZA.UTF-8", "sv_FI", "sv_FI.ISO-8859-1", "sv_FI.UTF-8", "sv_SE", "sv_SE.ISO-8859-1", "sv_SE.ISO-8859-15", "sv_SE.UTF-8", "sw_KE", "sw_KE.UTF-8", "sw_TZ", "sw_TZ.UTF-8", "szl_PL", "szl_PL.UTF-8", "ta_IN", "ta_IN.UTF-8", "ta_LK", "ta_LK.UTF-8", "te_IN", "te_IN.UTF-8", "tg_TJ", "tg_TJ.KOI8-T", "tg_TJ.UTF-8", "the_NP", "the_NP.UTF-8", "th_TH", "th_TH.TIS-620", "th_TH.UTF-8", "ti_ER", "ti_ER.UTF-8", "ti_ET", "ti_ET.UTF-8", "tig_ER", "tig_ER.UTF-8", "tk_TM", "tk_TM.UTF-8", "tl_PH", "tl_PH.ISO-8859-1", "tl_PH.UTF-8", "tn_ZA", "tn_ZA.UTF-8", "to_TO", "to_TO.UTF-8", "tpi_PG", "tpi_PG.UTF-8", "tr_CY", "tr_CY.ISO-8859-9", "tr_CY.UTF-8", "tr_TR", "tr_TR.ISO-8859-9", "tr_TR.UTF-8", "ts_ZA", "ts_ZA.UTF-8", "tt_RU", "tt_RU.UTF-8", "ug_CN", "ug_CN.UTF-8", "uk_UA", "uk_UA.KOI8-U", "uk_UA.UTF-8", "unm_US", "unm_US.UTF-8", "ur_IN", "ur_IN.UTF-8", "ur_PK", "ur_PK.UTF-8", "uz_UZ", "uz_UZ.ISO-8859-1", "uz_UZ.UTF-8", "ve_ZA", "ve_ZA.UTF-8", "vi_VN", "vi_VN.UTF-8", "wa_BE", "wa_BE.ISO-8859-1", "wa_BE.UTF-8", "wae_CH", "wae_CH.UTF-8", "wal_ET", "wal_ET.UTF-8", "wo_SN", "wo_SN.UTF-8", "xh_ZA", "xh_ZA.ISO-8859-1", "xh_ZA.UTF-8", "yi_US", "yi_US.CP1255", "yi_US.UTF-8", "yo_NG", "yo_NG.UTF-8", "yue_HK", "yue_HK.UTF-8", "yuw_PG", "yuw_PG.UTF-8", "zh_CN", "zh_CN.GB18030", "zh_CN.GB2312", "zh_CN.GBK", "zh_CN.UTF-8", "zh_HK", "zh_HK.BIG5-HKSCS", "zh_HK.UTF-8", "zh_SG", "zh_SG.GB2312", "zh_SG.GBK", "zh_SG.UTF-8", "zh_TW", "zh_TW.BIG5", "zh_TW.EUC-TW", "zh_TW.UTF-8", "zu_ZA", "zu_ZA.ISO-8859-1", "zu_ZA.UTF-8"
+};
+
+std::string ConsumeLocaleIdentifier(FuzzedDataProvider& fuzzed_data_provider)
+{
+ return fuzzed_data_provider.PickValueInArray<std::string>(locale_identifiers);
+}
+
+bool IsAvailableLocale(const std::string& locale_identifier)
+{
+ try {
+ (void)std::locale(locale_identifier);
+ } catch (const std::runtime_error&) {
+ return false;
+ }
+ return true;
+}
+} // namespace
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+ const std::string locale_identifier = ConsumeLocaleIdentifier(fuzzed_data_provider);
+ if (!IsAvailableLocale(locale_identifier)) {
+ return;
+ }
+ const char* c_locale = std::setlocale(LC_ALL, "C");
+ assert(c_locale != nullptr);
+
+ const std::string random_string = fuzzed_data_provider.ConsumeRandomLengthString(5);
+ int32_t parseint32_out_without_locale;
+ const bool parseint32_without_locale = ParseInt32(random_string, &parseint32_out_without_locale);
+ int64_t parseint64_out_without_locale;
+ const bool parseint64_without_locale = ParseInt64(random_string, &parseint64_out_without_locale);
+ const int64_t atoi64_without_locale = atoi64(random_string);
+ const int atoi_without_locale = atoi(random_string);
+ const int64_t atoi64c_without_locale = atoi64(random_string.c_str());
+ const int64_t random_int64 = fuzzed_data_provider.ConsumeIntegral<int64_t>();
+ const std::string i64tostr_without_locale = i64tostr(random_int64);
+ const int32_t random_int32 = fuzzed_data_provider.ConsumeIntegral<int32_t>();
+ const std::string itostr_without_locale = itostr(random_int32);
+ const std::string strprintf_int_without_locale = strprintf("%d", random_int64);
+ const double random_double = fuzzed_data_provider.ConsumeFloatingPoint<double>();
+ const std::string strprintf_double_without_locale = strprintf("%f", random_double);
+
+ const char* new_locale = std::setlocale(LC_ALL, locale_identifier.c_str());
+ assert(new_locale != nullptr);
+
+ int32_t parseint32_out_with_locale;
+ const bool parseint32_with_locale = ParseInt32(random_string, &parseint32_out_with_locale);
+ assert(parseint32_without_locale == parseint32_with_locale);
+ if (parseint32_without_locale) {
+ assert(parseint32_out_without_locale == parseint32_out_with_locale);
+ }
+ int64_t parseint64_out_with_locale;
+ const bool parseint64_with_locale = ParseInt64(random_string, &parseint64_out_with_locale);
+ assert(parseint64_without_locale == parseint64_with_locale);
+ if (parseint64_without_locale) {
+ assert(parseint64_out_without_locale == parseint64_out_with_locale);
+ }
+ const int64_t atoi64_with_locale = atoi64(random_string);
+ assert(atoi64_without_locale == atoi64_with_locale);
+ const int64_t atoi64c_with_locale = atoi64(random_string.c_str());
+ assert(atoi64c_without_locale == atoi64c_with_locale);
+ const int atoi_with_locale = atoi(random_string);
+ assert(atoi_without_locale == atoi_with_locale);
+ const std::string i64tostr_with_locale = i64tostr(random_int64);
+ assert(i64tostr_without_locale == i64tostr_with_locale);
+ const std::string itostr_with_locale = itostr(random_int32);
+ assert(itostr_without_locale == itostr_with_locale);
+ const std::string strprintf_int_with_locale = strprintf("%d", random_int64);
+ assert(strprintf_int_without_locale == strprintf_int_with_locale);
+ const std::string strprintf_double_with_locale = strprintf("%f", random_double);
+ assert(strprintf_double_without_locale == strprintf_double_with_locale);
+
+ const std::locale current_cpp_locale;
+ assert(current_cpp_locale == std::locale::classic());
+}
diff --git a/src/test/fuzz/multiplication_overflow.cpp b/src/test/fuzz/multiplication_overflow.cpp
new file mode 100644
index 0000000000..a4b158c18b
--- /dev/null
+++ b/src/test/fuzz/multiplication_overflow.cpp
@@ -0,0 +1,55 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/util.h>
+
+#include <cstdint>
+#include <string>
+#include <vector>
+
+#if defined(__has_builtin)
+#if __has_builtin(__builtin_mul_overflow)
+#define HAVE_BUILTIN_MUL_OVERFLOW
+#endif
+#elif defined(__GNUC__) && (__GNUC__ >= 5)
+#define HAVE_BUILTIN_MUL_OVERFLOW
+#endif
+
+namespace {
+template <typename T>
+void TestMultiplicationOverflow(FuzzedDataProvider& fuzzed_data_provider)
+{
+ const T i = fuzzed_data_provider.ConsumeIntegral<T>();
+ const T j = fuzzed_data_provider.ConsumeIntegral<T>();
+ const bool is_multiplication_overflow_custom = MultiplicationOverflow(i, j);
+#if defined(HAVE_BUILTIN_MUL_OVERFLOW)
+ T result_builtin;
+ const bool is_multiplication_overflow_builtin = __builtin_mul_overflow(i, j, &result_builtin);
+ assert(is_multiplication_overflow_custom == is_multiplication_overflow_builtin);
+ if (!is_multiplication_overflow_custom) {
+ assert(i * j == result_builtin);
+ }
+#else
+ if (!is_multiplication_overflow_custom) {
+ (void)(i * j);
+ }
+#endif
+}
+} // namespace
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+ TestMultiplicationOverflow<int64_t>(fuzzed_data_provider);
+ TestMultiplicationOverflow<uint64_t>(fuzzed_data_provider);
+ TestMultiplicationOverflow<int32_t>(fuzzed_data_provider);
+ TestMultiplicationOverflow<uint32_t>(fuzzed_data_provider);
+ TestMultiplicationOverflow<int16_t>(fuzzed_data_provider);
+ TestMultiplicationOverflow<uint16_t>(fuzzed_data_provider);
+ TestMultiplicationOverflow<char>(fuzzed_data_provider);
+ TestMultiplicationOverflow<unsigned char>(fuzzed_data_provider);
+ TestMultiplicationOverflow<signed char>(fuzzed_data_provider);
+}
diff --git a/src/test/fuzz/net_permissions.cpp b/src/test/fuzz/net_permissions.cpp
new file mode 100644
index 0000000000..bfc5d21427
--- /dev/null
+++ b/src/test/fuzz/net_permissions.cpp
@@ -0,0 +1,51 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <net_permissions.h>
+#include <optional.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/util.h>
+
+#include <cassert>
+#include <cstdint>
+#include <string>
+#include <vector>
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+ const std::string s = fuzzed_data_provider.ConsumeRandomLengthString(32);
+ const NetPermissionFlags net_permission_flags = fuzzed_data_provider.ConsumeBool() ? fuzzed_data_provider.PickValueInArray<NetPermissionFlags>({
+ NetPermissionFlags::PF_NONE,
+ NetPermissionFlags::PF_BLOOMFILTER,
+ NetPermissionFlags::PF_RELAY,
+ NetPermissionFlags::PF_FORCERELAY,
+ NetPermissionFlags::PF_NOBAN,
+ NetPermissionFlags::PF_MEMPOOL,
+ NetPermissionFlags::PF_ISIMPLICIT,
+ NetPermissionFlags::PF_ALL,
+ }) :
+ static_cast<NetPermissionFlags>(fuzzed_data_provider.ConsumeIntegral<uint32_t>());
+
+ NetWhitebindPermissions net_whitebind_permissions;
+ std::string error_net_whitebind_permissions;
+ if (NetWhitebindPermissions::TryParse(s, net_whitebind_permissions, error_net_whitebind_permissions)) {
+ (void)NetPermissions::ToStrings(net_whitebind_permissions.m_flags);
+ (void)NetPermissions::AddFlag(net_whitebind_permissions.m_flags, net_permission_flags);
+ assert(NetPermissions::HasFlag(net_whitebind_permissions.m_flags, net_permission_flags));
+ (void)NetPermissions::ClearFlag(net_whitebind_permissions.m_flags, net_permission_flags);
+ (void)NetPermissions::ToStrings(net_whitebind_permissions.m_flags);
+ }
+
+ NetWhitelistPermissions net_whitelist_permissions;
+ std::string error_net_whitelist_permissions;
+ if (NetWhitelistPermissions::TryParse(s, net_whitelist_permissions, error_net_whitelist_permissions)) {
+ (void)NetPermissions::ToStrings(net_whitelist_permissions.m_flags);
+ (void)NetPermissions::AddFlag(net_whitelist_permissions.m_flags, net_permission_flags);
+ assert(NetPermissions::HasFlag(net_whitelist_permissions.m_flags, net_permission_flags));
+ (void)NetPermissions::ClearFlag(net_whitelist_permissions.m_flags, net_permission_flags);
+ (void)NetPermissions::ToStrings(net_whitelist_permissions.m_flags);
+ }
+}
diff --git a/src/test/fuzz/netaddress.cpp b/src/test/fuzz/netaddress.cpp
new file mode 100644
index 0000000000..d8d53566c7
--- /dev/null
+++ b/src/test/fuzz/netaddress.cpp
@@ -0,0 +1,134 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <netaddress.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+
+#include <cassert>
+#include <cstdint>
+#include <netinet/in.h>
+#include <vector>
+
+namespace {
+CNetAddr ConsumeNetAddr(FuzzedDataProvider& fuzzed_data_provider) noexcept
+{
+ const Network network = fuzzed_data_provider.PickValueInArray({Network::NET_IPV4, Network::NET_IPV6, Network::NET_INTERNAL, Network::NET_ONION});
+ if (network == Network::NET_IPV4) {
+ const in_addr v4_addr = {
+ .s_addr = fuzzed_data_provider.ConsumeIntegral<uint32_t>()};
+ return CNetAddr{v4_addr};
+ } else if (network == Network::NET_IPV6) {
+ if (fuzzed_data_provider.remaining_bytes() < 16) {
+ return CNetAddr{};
+ }
+ in6_addr v6_addr = {};
+ memcpy(v6_addr.s6_addr, fuzzed_data_provider.ConsumeBytes<uint8_t>(16).data(), 16);
+ return CNetAddr{v6_addr, fuzzed_data_provider.ConsumeIntegral<uint32_t>()};
+ } else if (network == Network::NET_INTERNAL) {
+ CNetAddr net_addr;
+ net_addr.SetInternal(fuzzed_data_provider.ConsumeBytesAsString(32));
+ return net_addr;
+ } else if (network == Network::NET_ONION) {
+ CNetAddr net_addr;
+ net_addr.SetSpecial(fuzzed_data_provider.ConsumeBytesAsString(32));
+ return net_addr;
+ } else {
+ assert(false);
+ }
+}
+}; // namespace
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+
+ const CNetAddr net_addr = ConsumeNetAddr(fuzzed_data_provider);
+ for (int i = 0; i < 15; ++i) {
+ (void)net_addr.GetByte(i);
+ }
+ (void)net_addr.GetHash();
+ (void)net_addr.GetNetClass();
+ if (net_addr.GetNetwork() == Network::NET_IPV4) {
+ assert(net_addr.IsIPv4());
+ }
+ if (net_addr.GetNetwork() == Network::NET_IPV6) {
+ assert(net_addr.IsIPv6());
+ }
+ if (net_addr.GetNetwork() == Network::NET_ONION) {
+ assert(net_addr.IsTor());
+ }
+ if (net_addr.GetNetwork() == Network::NET_INTERNAL) {
+ assert(net_addr.IsInternal());
+ }
+ if (net_addr.GetNetwork() == Network::NET_UNROUTABLE) {
+ assert(!net_addr.IsRoutable());
+ }
+ (void)net_addr.IsBindAny();
+ if (net_addr.IsInternal()) {
+ assert(net_addr.GetNetwork() == Network::NET_INTERNAL);
+ }
+ if (net_addr.IsIPv4()) {
+ assert(net_addr.GetNetwork() == Network::NET_IPV4 || net_addr.GetNetwork() == Network::NET_UNROUTABLE);
+ }
+ if (net_addr.IsIPv6()) {
+ assert(net_addr.GetNetwork() == Network::NET_IPV6 || net_addr.GetNetwork() == Network::NET_UNROUTABLE);
+ }
+ (void)net_addr.IsLocal();
+ if (net_addr.IsRFC1918() || net_addr.IsRFC2544() || net_addr.IsRFC6598() || net_addr.IsRFC5737() || net_addr.IsRFC3927()) {
+ assert(net_addr.IsIPv4());
+ }
+ (void)net_addr.IsRFC2544();
+ if (net_addr.IsRFC3849() || net_addr.IsRFC3964() || net_addr.IsRFC4380() || net_addr.IsRFC4843() || net_addr.IsRFC7343() || net_addr.IsRFC4862() || net_addr.IsRFC6052() || net_addr.IsRFC6145()) {
+ assert(net_addr.IsIPv6());
+ }
+ (void)net_addr.IsRFC3927();
+ (void)net_addr.IsRFC3964();
+ if (net_addr.IsRFC4193()) {
+ assert(net_addr.GetNetwork() == Network::NET_ONION || net_addr.GetNetwork() == Network::NET_INTERNAL || net_addr.GetNetwork() == Network::NET_UNROUTABLE);
+ }
+ (void)net_addr.IsRFC4380();
+ (void)net_addr.IsRFC4843();
+ (void)net_addr.IsRFC4862();
+ (void)net_addr.IsRFC5737();
+ (void)net_addr.IsRFC6052();
+ (void)net_addr.IsRFC6145();
+ (void)net_addr.IsRFC6598();
+ (void)net_addr.IsRFC7343();
+ if (!net_addr.IsRoutable()) {
+ assert(net_addr.GetNetwork() == Network::NET_UNROUTABLE || net_addr.GetNetwork() == Network::NET_INTERNAL);
+ }
+ if (net_addr.IsTor()) {
+ assert(net_addr.GetNetwork() == Network::NET_ONION);
+ }
+ (void)net_addr.IsValid();
+ (void)net_addr.ToString();
+ (void)net_addr.ToStringIP();
+
+ const CSubNet sub_net{net_addr, fuzzed_data_provider.ConsumeIntegral<int32_t>()};
+ (void)sub_net.IsValid();
+ (void)sub_net.ToString();
+
+ const CService service{net_addr, fuzzed_data_provider.ConsumeIntegral<uint16_t>()};
+ (void)service.GetKey();
+ (void)service.GetPort();
+ (void)service.ToString();
+ (void)service.ToStringIPPort();
+ (void)service.ToStringPort();
+
+ const CNetAddr other_net_addr = ConsumeNetAddr(fuzzed_data_provider);
+ (void)net_addr.GetReachabilityFrom(&other_net_addr);
+ (void)sub_net.Match(other_net_addr);
+
+ const CService other_service{net_addr, fuzzed_data_provider.ConsumeIntegral<uint16_t>()};
+ assert((service == other_service) != (service != other_service));
+ (void)(service < other_service);
+
+ const CSubNet sub_net_copy_1{net_addr, other_net_addr};
+ const CSubNet sub_net_copy_2{net_addr};
+
+ CNetAddr mutable_net_addr;
+ mutable_net_addr.SetIP(net_addr);
+ assert(net_addr == mutable_net_addr);
+}
diff --git a/src/test/fuzz/p2p_transport_deserializer.cpp b/src/test/fuzz/p2p_transport_deserializer.cpp
new file mode 100644
index 0000000000..57393fed45
--- /dev/null
+++ b/src/test/fuzz/p2p_transport_deserializer.cpp
@@ -0,0 +1,47 @@
+// Copyright (c) 2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <chainparams.h>
+#include <net.h>
+#include <protocol.h>
+#include <test/fuzz/fuzz.h>
+
+#include <cassert>
+#include <cstdint>
+#include <limits>
+#include <vector>
+
+void initialize()
+{
+ SelectParams(CBaseChainParams::REGTEST);
+}
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ V1TransportDeserializer deserializer{Params().MessageStart(), SER_NETWORK, INIT_PROTO_VERSION};
+ const char* pch = (const char*)buffer.data();
+ size_t n_bytes = buffer.size();
+ while (n_bytes > 0) {
+ const int handled = deserializer.Read(pch, n_bytes);
+ if (handled < 0) {
+ break;
+ }
+ pch += handled;
+ n_bytes -= handled;
+ if (deserializer.Complete()) {
+ const int64_t m_time = std::numeric_limits<int64_t>::max();
+ const CNetMessage msg = deserializer.GetMessage(Params().MessageStart(), m_time);
+ assert(msg.m_command.size() <= CMessageHeader::COMMAND_SIZE);
+ assert(msg.m_raw_message_size <= buffer.size());
+ assert(msg.m_raw_message_size == CMessageHeader::HEADER_SIZE + msg.m_message_size);
+ assert(msg.m_time == m_time);
+ if (msg.m_valid_header) {
+ assert(msg.m_valid_netmagic);
+ }
+ if (!msg.m_valid_netmagic) {
+ assert(!msg.m_valid_header);
+ }
+ }
+ }
+}
diff --git a/src/test/fuzz/parse_univalue.cpp b/src/test/fuzz/parse_univalue.cpp
index 3ad112dbad..571364aaa6 100644
--- a/src/test/fuzz/parse_univalue.cpp
+++ b/src/test/fuzz/parse_univalue.cpp
@@ -14,7 +14,7 @@
void initialize()
{
- static const auto verify_handle = MakeUnique<ECCVerifyHandle>();
+ static const ECCVerifyHandle verify_handle;
SelectParams(CBaseChainParams::REGTEST);
}
@@ -35,21 +35,31 @@ void test_one_input(const std::vector<uint8_t>& buffer)
}
try {
(void)ParseHashO(univalue, "A");
+ } catch (const UniValue&) {
+ } catch (const std::runtime_error&) {
+ }
+ try {
(void)ParseHashO(univalue, random_string);
} catch (const UniValue&) {
} catch (const std::runtime_error&) {
}
try {
(void)ParseHashV(univalue, "A");
+ } catch (const UniValue&) {
+ } catch (const std::runtime_error&) {
+ }
+ try {
(void)ParseHashV(univalue, random_string);
} catch (const UniValue&) {
} catch (const std::runtime_error&) {
}
try {
(void)ParseHexO(univalue, "A");
+ } catch (const UniValue&) {
+ }
+ try {
(void)ParseHexO(univalue, random_string);
} catch (const UniValue&) {
- } catch (const std::runtime_error&) {
}
try {
(void)ParseHexUV(univalue, "A");
@@ -59,6 +69,10 @@ void test_one_input(const std::vector<uint8_t>& buffer)
}
try {
(void)ParseHexV(univalue, "A");
+ } catch (const UniValue&) {
+ } catch (const std::runtime_error&) {
+ }
+ try {
(void)ParseHexV(univalue, random_string);
} catch (const UniValue&) {
} catch (const std::runtime_error&) {
diff --git a/src/test/fuzz/process_message.cpp b/src/test/fuzz/process_message.cpp
new file mode 100644
index 0000000000..dc49dd499a
--- /dev/null
+++ b/src/test/fuzz/process_message.cpp
@@ -0,0 +1,98 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <banman.h>
+#include <chainparams.h>
+#include <consensus/consensus.h>
+#include <net.h>
+#include <net_processing.h>
+#include <protocol.h>
+#include <scheduler.h>
+#include <script/script.h>
+#include <streams.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <test/util/mining.h>
+#include <test/util/setup_common.h>
+#include <util/memory.h>
+#include <validationinterface.h>
+#include <version.h>
+
+#include <algorithm>
+#include <atomic>
+#include <cassert>
+#include <chrono>
+#include <cstdint>
+#include <iosfwd>
+#include <iostream>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+bool ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CTxMemPool& mempool, CConnman* connman, BanMan* banman, const std::atomic<bool>& interruptMsgProc);
+
+namespace {
+
+#ifdef MESSAGE_TYPE
+#define TO_STRING_(s) #s
+#define TO_STRING(s) TO_STRING_(s)
+const std::string LIMIT_TO_MESSAGE_TYPE{TO_STRING(MESSAGE_TYPE)};
+#else
+const std::string LIMIT_TO_MESSAGE_TYPE;
+#endif
+
+const std::map<std::string, std::set<std::string>> EXPECTED_DESERIALIZATION_EXCEPTIONS = {
+ {"CDataStream::read(): end of data: iostream error", {"addr", "block", "blocktxn", "cmpctblock", "feefilter", "filteradd", "filterload", "getblocks", "getblocktxn", "getdata", "getheaders", "headers", "inv", "notfound", "ping", "sendcmpct", "tx"}},
+ {"CompactSize exceeds limit of type: iostream error", {"cmpctblock"}},
+ {"differential value overflow: iostream error", {"getblocktxn"}},
+ {"index overflowed 16 bits: iostream error", {"getblocktxn"}},
+ {"index overflowed 16-bits: iostream error", {"cmpctblock"}},
+ {"indexes overflowed 16 bits: iostream error", {"getblocktxn"}},
+ {"non-canonical ReadCompactSize(): iostream error", {"addr", "block", "blocktxn", "cmpctblock", "filteradd", "filterload", "getblocks", "getblocktxn", "getdata", "getheaders", "headers", "inv", "notfound", "tx"}},
+ {"ReadCompactSize(): size too large: iostream error", {"addr", "block", "blocktxn", "cmpctblock", "filteradd", "filterload", "getblocks", "getblocktxn", "getdata", "getheaders", "headers", "inv", "notfound", "tx"}},
+ {"Superfluous witness record: iostream error", {"block", "blocktxn", "cmpctblock", "tx"}},
+ {"Unknown transaction optional data: iostream error", {"block", "blocktxn", "cmpctblock", "tx"}},
+};
+
+const RegTestingSetup* g_setup;
+} // namespace
+
+void initialize()
+{
+ static RegTestingSetup setup{};
+ g_setup = &setup;
+
+ for (int i = 0; i < 2 * COINBASE_MATURITY; i++) {
+ MineBlock(g_setup->m_node, CScript() << OP_TRUE);
+ }
+ SyncWithValidationInterfaceQueue();
+}
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+ const std::string random_message_type{fuzzed_data_provider.ConsumeBytesAsString(CMessageHeader::COMMAND_SIZE).c_str()};
+ if (!LIMIT_TO_MESSAGE_TYPE.empty() && random_message_type != LIMIT_TO_MESSAGE_TYPE) {
+ return;
+ }
+ CDataStream random_bytes_data_stream{fuzzed_data_provider.ConsumeRemainingBytes<unsigned char>(), SER_NETWORK, PROTOCOL_VERSION};
+ CNode p2p_node{0, ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_BLOOM), 0, INVALID_SOCKET, CAddress{CService{in_addr{0x0100007f}, 7777}, NODE_NETWORK}, 0, 0, CAddress{}, std::string{}, false};
+ p2p_node.fSuccessfullyConnected = true;
+ p2p_node.nVersion = PROTOCOL_VERSION;
+ p2p_node.SetSendVersion(PROTOCOL_VERSION);
+ g_setup->m_node.peer_logic->InitializeNode(&p2p_node);
+ try {
+ (void)ProcessMessage(&p2p_node, random_message_type, random_bytes_data_stream, GetTimeMillis(), Params(), *g_setup->m_node.mempool, g_setup->m_node.connman.get(), g_setup->m_node.banman.get(), std::atomic<bool>{false});
+ } catch (const std::ios_base::failure& e) {
+ const std::string exception_message{e.what()};
+ const auto p = EXPECTED_DESERIALIZATION_EXCEPTIONS.find(exception_message);
+ if (p == EXPECTED_DESERIALIZATION_EXCEPTIONS.cend() || p->second.count(random_message_type) == 0) {
+ std::cout << "Unexpected exception when processing message type \"" << random_message_type << "\": " << exception_message << std::endl;
+ assert(false);
+ }
+ }
+ SyncWithValidationInterfaceQueue();
+}
diff --git a/src/test/fuzz/protocol.cpp b/src/test/fuzz/protocol.cpp
new file mode 100644
index 0000000000..954471de6c
--- /dev/null
+++ b/src/test/fuzz/protocol.cpp
@@ -0,0 +1,32 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <optional.h>
+#include <protocol.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/util.h>
+
+#include <cstdint>
+#include <stdexcept>
+#include <vector>
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+ const Optional<CInv> inv = ConsumeDeserializable<CInv>(fuzzed_data_provider);
+ if (!inv) {
+ return;
+ }
+ try {
+ (void)inv->GetCommand();
+ } catch (const std::out_of_range&) {
+ }
+ (void)inv->ToString();
+ const Optional<CInv> another_inv = ConsumeDeserializable<CInv>(fuzzed_data_provider);
+ if (!another_inv) {
+ return;
+ }
+ (void)(*inv < *another_inv);
+}
diff --git a/src/test/fuzz/psbt.cpp b/src/test/fuzz/psbt.cpp
index 1ce28f9a6d..ca3e0b8586 100644
--- a/src/test/fuzz/psbt.cpp
+++ b/src/test/fuzz/psbt.cpp
@@ -19,7 +19,7 @@
void initialize()
{
- static const auto verify_handle = MakeUnique<ECCVerifyHandle>();
+ static const ECCVerifyHandle verify_handle;
}
void test_one_input(const std::vector<uint8_t>& buffer)
diff --git a/src/test/fuzz/rolling_bloom_filter.cpp b/src/test/fuzz/rolling_bloom_filter.cpp
new file mode 100644
index 0000000000..3b37321977
--- /dev/null
+++ b/src/test/fuzz/rolling_bloom_filter.cpp
@@ -0,0 +1,50 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <bloom.h>
+#include <optional.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/util.h>
+#include <uint256.h>
+
+#include <cassert>
+#include <cstdint>
+#include <string>
+#include <vector>
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+
+ CRollingBloomFilter rolling_bloom_filter{
+ fuzzed_data_provider.ConsumeIntegralInRange<unsigned int>(1, 1000),
+ 0.999 / fuzzed_data_provider.ConsumeIntegralInRange<unsigned int>(1, std::numeric_limits<unsigned int>::max())};
+ while (fuzzed_data_provider.remaining_bytes() > 0) {
+ switch (fuzzed_data_provider.ConsumeIntegralInRange(0, 2)) {
+ case 0: {
+ const std::vector<unsigned char> b = ConsumeRandomLengthByteVector(fuzzed_data_provider);
+ (void)rolling_bloom_filter.contains(b);
+ rolling_bloom_filter.insert(b);
+ const bool present = rolling_bloom_filter.contains(b);
+ assert(present);
+ break;
+ }
+ case 1: {
+ const Optional<uint256> u256 = ConsumeDeserializable<uint256>(fuzzed_data_provider);
+ if (!u256) {
+ break;
+ }
+ (void)rolling_bloom_filter.contains(*u256);
+ rolling_bloom_filter.insert(*u256);
+ const bool present = rolling_bloom_filter.contains(*u256);
+ assert(present);
+ break;
+ }
+ case 2:
+ rolling_bloom_filter.reset();
+ break;
+ }
+ }
+}
diff --git a/src/test/fuzz/script.cpp b/src/test/fuzz/script.cpp
index 0469e87de6..2f50f1b838 100644
--- a/src/test/fuzz/script.cpp
+++ b/src/test/fuzz/script.cpp
@@ -14,13 +14,18 @@
#include <script/signingprovider.h>
#include <script/standard.h>
#include <streams.h>
+#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
+#include <test/fuzz/util.h>
+#include <univalue.h>
#include <util/memory.h>
void initialize()
{
// Fuzzers using pubkey must hold an ECCVerifyHandle.
- static const auto verify_handle = MakeUnique<ECCVerifyHandle>();
+ static const ECCVerifyHandle verify_handle;
+
+ SelectParams(CBaseChainParams::REGTEST);
}
void test_one_input(const std::vector<uint8_t>& buffer)
@@ -28,7 +33,15 @@ void test_one_input(const std::vector<uint8_t>& buffer)
const CScript script(buffer.begin(), buffer.end());
std::vector<unsigned char> compressed;
- (void)CompressScript(script, compressed);
+ if (CompressScript(script, compressed)) {
+ const unsigned int size = compressed[0];
+ compressed.erase(compressed.begin());
+ assert(size >= 0 && size <= 5);
+ CScript decompressed_script;
+ const bool ok = DecompressScript(decompressed_script, size, compressed);
+ assert(ok);
+ assert(script == decompressed_script);
+ }
CTxDestination address;
(void)ExtractDestination(script, address);
@@ -61,4 +74,27 @@ void test_one_input(const std::vector<uint8_t>& buffer)
(void)script.IsPushOnly();
(void)script.IsUnspendable();
(void)script.GetSigOpCount(/* fAccurate= */ false);
+
+ (void)FormatScript(script);
+ (void)ScriptToAsmStr(script, false);
+ (void)ScriptToAsmStr(script, true);
+
+ UniValue o1(UniValue::VOBJ);
+ ScriptPubKeyToUniv(script, o1, true);
+ UniValue o2(UniValue::VOBJ);
+ ScriptPubKeyToUniv(script, o2, false);
+ UniValue o3(UniValue::VOBJ);
+ ScriptToUniv(script, o3, true);
+ UniValue o4(UniValue::VOBJ);
+ ScriptToUniv(script, o4, false);
+
+ {
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+ const std::vector<uint8_t> bytes = ConsumeRandomLengthByteVector(fuzzed_data_provider);
+ // DecompressScript(..., ..., bytes) is not guaranteed to be defined if bytes.size() <= 23.
+ if (bytes.size() >= 24) {
+ CScript decompressed_script;
+ DecompressScript(decompressed_script, fuzzed_data_provider.ConsumeIntegral<unsigned int>(), bytes);
+ }
+ }
}
diff --git a/src/test/fuzz/script_flags.cpp b/src/test/fuzz/script_flags.cpp
index 08622d0979..3d8ece7c61 100644
--- a/src/test/fuzz/script_flags.cpp
+++ b/src/test/fuzz/script_flags.cpp
@@ -15,7 +15,7 @@ static bool IsValidFlagCombination(unsigned flags);
void initialize()
{
- static const auto verify_handle = MakeUnique<ECCVerifyHandle>();
+ static const ECCVerifyHandle verify_handle;
}
void test_one_input(const std::vector<uint8_t>& buffer)
diff --git a/src/test/fuzz/script_ops.cpp b/src/test/fuzz/script_ops.cpp
new file mode 100644
index 0000000000..0cd129ba7a
--- /dev/null
+++ b/src/test/fuzz/script_ops.cpp
@@ -0,0 +1,67 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <script/script.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/util.h>
+
+#include <cstdint>
+#include <string>
+#include <vector>
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+ CScript script = ConsumeScript(fuzzed_data_provider);
+ while (fuzzed_data_provider.remaining_bytes() > 0) {
+ switch (fuzzed_data_provider.ConsumeIntegralInRange(0, 7)) {
+ case 0:
+ script += ConsumeScript(fuzzed_data_provider);
+ break;
+ case 1:
+ script = script + ConsumeScript(fuzzed_data_provider);
+ break;
+ case 2:
+ script << fuzzed_data_provider.ConsumeIntegral<int64_t>();
+ break;
+ case 3:
+ script << ConsumeOpcodeType(fuzzed_data_provider);
+ break;
+ case 4:
+ script << ConsumeScriptNum(fuzzed_data_provider);
+ break;
+ case 5:
+ script << ConsumeRandomLengthByteVector(fuzzed_data_provider);
+ break;
+ case 6:
+ script.clear();
+ break;
+ case 7: {
+ (void)script.GetSigOpCount(false);
+ (void)script.GetSigOpCount(true);
+ (void)script.GetSigOpCount(script);
+ (void)script.HasValidOps();
+ (void)script.IsPayToScriptHash();
+ (void)script.IsPayToWitnessScriptHash();
+ (void)script.IsPushOnly();
+ (void)script.IsUnspendable();
+ {
+ CScript::const_iterator pc = script.begin();
+ opcodetype opcode;
+ (void)script.GetOp(pc, opcode);
+ std::vector<uint8_t> data;
+ (void)script.GetOp(pc, opcode, data);
+ (void)script.IsPushOnly(pc);
+ }
+ {
+ int version;
+ std::vector<uint8_t> program;
+ (void)script.IsWitnessProgram(version, program);
+ }
+ break;
+ }
+ }
+ }
+}
diff --git a/src/test/fuzz/scriptnum_ops.cpp b/src/test/fuzz/scriptnum_ops.cpp
new file mode 100644
index 0000000000..db44bb9e19
--- /dev/null
+++ b/src/test/fuzz/scriptnum_ops.cpp
@@ -0,0 +1,137 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <script/script.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/util.h>
+
+#include <cassert>
+#include <cstdint>
+#include <limits>
+#include <vector>
+
+namespace {
+bool IsValidAddition(const CScriptNum& lhs, const CScriptNum& rhs)
+{
+ return rhs == 0 || (rhs > 0 && lhs <= CScriptNum{std::numeric_limits<int64_t>::max()} - rhs) || (rhs < 0 && lhs >= CScriptNum{std::numeric_limits<int64_t>::min()} - rhs);
+}
+
+bool IsValidSubtraction(const CScriptNum& lhs, const CScriptNum& rhs)
+{
+ return rhs == 0 || (rhs > 0 && lhs >= CScriptNum{std::numeric_limits<int64_t>::min()} + rhs) || (rhs < 0 && lhs <= CScriptNum{std::numeric_limits<int64_t>::max()} + rhs);
+}
+} // namespace
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+ CScriptNum script_num = ConsumeScriptNum(fuzzed_data_provider);
+ while (fuzzed_data_provider.remaining_bytes() > 0) {
+ switch (fuzzed_data_provider.ConsumeIntegralInRange(0, 11)) {
+ case 0: {
+ const int64_t i = fuzzed_data_provider.ConsumeIntegral<int64_t>();
+ assert((script_num == i) != (script_num != i));
+ assert((script_num <= i) != script_num > i);
+ assert((script_num >= i) != (script_num < i));
+ // Avoid signed integer overflow:
+ // script/script.h:264:93: runtime error: signed integer overflow: -2261405121394637306 + -9223372036854775802 cannot be represented in type 'long'
+ if (IsValidAddition(script_num, CScriptNum{i})) {
+ assert((script_num + i) - i == script_num);
+ }
+ // Avoid signed integer overflow:
+ // script/script.h:265:93: runtime error: signed integer overflow: 9223371895120855039 - -9223372036854710486 cannot be represented in type 'long'
+ if (IsValidSubtraction(script_num, CScriptNum{i})) {
+ assert((script_num - i) + i == script_num);
+ }
+ break;
+ }
+ case 1: {
+ const CScriptNum random_script_num = ConsumeScriptNum(fuzzed_data_provider);
+ assert((script_num == random_script_num) != (script_num != random_script_num));
+ assert((script_num <= random_script_num) != (script_num > random_script_num));
+ assert((script_num >= random_script_num) != (script_num < random_script_num));
+ // Avoid signed integer overflow:
+ // script/script.h:264:93: runtime error: signed integer overflow: -9223126527765971126 + -9223372036854756825 cannot be represented in type 'long'
+ if (IsValidAddition(script_num, random_script_num)) {
+ assert((script_num + random_script_num) - random_script_num == script_num);
+ }
+ // Avoid signed integer overflow:
+ // script/script.h:265:93: runtime error: signed integer overflow: 6052837899185946624 - -9223372036854775808 cannot be represented in type 'long'
+ if (IsValidSubtraction(script_num, random_script_num)) {
+ assert((script_num - random_script_num) + random_script_num == script_num);
+ }
+ break;
+ }
+ case 2: {
+ const CScriptNum random_script_num = ConsumeScriptNum(fuzzed_data_provider);
+ if (!IsValidAddition(script_num, random_script_num)) {
+ // Avoid assertion failure:
+ // ./script/script.h:292: CScriptNum &CScriptNum::operator+=(const int64_t &): Assertion `rhs == 0 || (rhs > 0 && m_value <= std::numeric_limits<int64_t>::max() - rhs) || (rhs < 0 && m_value >= std::numeric_limits<int64_t>::min() - rhs)' failed.
+ break;
+ }
+ script_num += random_script_num;
+ break;
+ }
+ case 3: {
+ const CScriptNum random_script_num = ConsumeScriptNum(fuzzed_data_provider);
+ if (!IsValidSubtraction(script_num, random_script_num)) {
+ // Avoid assertion failure:
+ // ./script/script.h:300: CScriptNum &CScriptNum::operator-=(const int64_t &): Assertion `rhs == 0 || (rhs > 0 && m_value >= std::numeric_limits<int64_t>::min() + rhs) || (rhs < 0 && m_value <= std::numeric_limits<int64_t>::max() + rhs)' failed.
+ break;
+ }
+ script_num -= random_script_num;
+ break;
+ }
+ case 4:
+ script_num = script_num & fuzzed_data_provider.ConsumeIntegral<int64_t>();
+ break;
+ case 5:
+ script_num = script_num & ConsumeScriptNum(fuzzed_data_provider);
+ break;
+ case 6:
+ script_num &= ConsumeScriptNum(fuzzed_data_provider);
+ break;
+ case 7:
+ if (script_num == CScriptNum{std::numeric_limits<int64_t>::min()}) {
+ // Avoid assertion failure:
+ // ./script/script.h:279: CScriptNum CScriptNum::operator-() const: Assertion `m_value != std::numeric_limits<int64_t>::min()' failed.
+ break;
+ }
+ script_num = -script_num;
+ break;
+ case 8:
+ script_num = fuzzed_data_provider.ConsumeIntegral<int64_t>();
+ break;
+ case 9: {
+ const int64_t random_integer = fuzzed_data_provider.ConsumeIntegral<int64_t>();
+ if (!IsValidAddition(script_num, CScriptNum{random_integer})) {
+ // Avoid assertion failure:
+ // ./script/script.h:292: CScriptNum &CScriptNum::operator+=(const int64_t &): Assertion `rhs == 0 || (rhs > 0 && m_value <= std::numeric_limits<int64_t>::max() - rhs) || (rhs < 0 && m_value >= std::numeric_limits<int64_t>::min() - rhs)' failed.
+ break;
+ }
+ script_num += random_integer;
+ break;
+ }
+ case 10: {
+ const int64_t random_integer = fuzzed_data_provider.ConsumeIntegral<int64_t>();
+ if (!IsValidSubtraction(script_num, CScriptNum{random_integer})) {
+ // Avoid assertion failure:
+ // ./script/script.h:300: CScriptNum &CScriptNum::operator-=(const int64_t &): Assertion `rhs == 0 || (rhs > 0 && m_value >= std::numeric_limits<int64_t>::min() + rhs) || (rhs < 0 && m_value <= std::numeric_limits<int64_t>::max() + rhs)' failed.
+ break;
+ }
+ script_num -= random_integer;
+ break;
+ }
+ case 11:
+ script_num &= fuzzed_data_provider.ConsumeIntegral<int64_t>();
+ break;
+ }
+ // Avoid negation failure:
+ // script/script.h:332:35: runtime error: negation of -9223372036854775808 cannot be represented in type 'int64_t' (aka 'long'); cast to an unsigned type to negate this value to itself
+ if (script_num != CScriptNum{std::numeric_limits<int64_t>::min()}) {
+ (void)script_num.getvch();
+ }
+ }
+}
diff --git a/src/test/fuzz/signature_checker.cpp b/src/test/fuzz/signature_checker.cpp
new file mode 100644
index 0000000000..312db27adc
--- /dev/null
+++ b/src/test/fuzz/signature_checker.cpp
@@ -0,0 +1,68 @@
+// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <pubkey.h>
+#include <script/interpreter.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <util/memory.h>
+
+#include <cstdint>
+#include <limits>
+#include <string>
+#include <vector>
+
+void initialize()
+{
+ static const auto verify_handle = MakeUnique<ECCVerifyHandle>();
+}
+
+namespace {
+class FuzzedSignatureChecker : public BaseSignatureChecker
+{
+ FuzzedDataProvider& m_fuzzed_data_provider;
+
+public:
+ FuzzedSignatureChecker(FuzzedDataProvider& fuzzed_data_provider) : m_fuzzed_data_provider(fuzzed_data_provider)
+ {
+ }
+
+ virtual bool CheckSig(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const
+ {
+ return m_fuzzed_data_provider.ConsumeBool();
+ }
+
+ virtual bool CheckLockTime(const CScriptNum& nLockTime) const
+ {
+ return m_fuzzed_data_provider.ConsumeBool();
+ }
+
+ virtual bool CheckSequence(const CScriptNum& nSequence) const
+ {
+ return m_fuzzed_data_provider.ConsumeBool();
+ }
+
+ virtual ~FuzzedSignatureChecker() {}
+};
+} // namespace
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+ const unsigned int flags = fuzzed_data_provider.ConsumeIntegral<unsigned int>();
+ const SigVersion sig_version = fuzzed_data_provider.PickValueInArray({SigVersion::BASE, SigVersion::WITNESS_V0});
+ const std::string script_string_1 = fuzzed_data_provider.ConsumeRandomLengthString(65536);
+ const std::vector<uint8_t> script_bytes_1{script_string_1.begin(), script_string_1.end()};
+ const std::string script_string_2 = fuzzed_data_provider.ConsumeRandomLengthString(65536);
+ const std::vector<uint8_t> script_bytes_2{script_string_2.begin(), script_string_2.end()};
+ std::vector<std::vector<unsigned char>> stack;
+ (void)EvalScript(stack, {script_bytes_1.begin(), script_bytes_1.end()}, flags, FuzzedSignatureChecker(fuzzed_data_provider), sig_version, nullptr);
+ if ((flags & SCRIPT_VERIFY_CLEANSTACK) != 0 && ((flags & SCRIPT_VERIFY_P2SH) == 0 || (flags & SCRIPT_VERIFY_WITNESS) == 0)) {
+ return;
+ }
+ if ((flags & SCRIPT_VERIFY_WITNESS) != 0 && (flags & SCRIPT_VERIFY_P2SH) == 0) {
+ return;
+ }
+ (void)VerifyScript({script_bytes_1.begin(), script_bytes_1.end()}, {script_bytes_2.begin(), script_bytes_2.end()}, nullptr, flags, FuzzedSignatureChecker(fuzzed_data_provider), nullptr);
+}
diff --git a/src/test/fuzz/string.cpp b/src/test/fuzz/string.cpp
new file mode 100644
index 0000000000..bb583885ba
--- /dev/null
+++ b/src/test/fuzz/string.cpp
@@ -0,0 +1,89 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <blockfilter.h>
+#include <clientversion.h>
+#include <logging.h>
+#include <netbase.h>
+#include <outputtype.h>
+#include <rpc/client.h>
+#include <rpc/request.h>
+#include <rpc/server.h>
+#include <rpc/util.h>
+#include <script/descriptor.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/util.h>
+#include <util/error.h>
+#include <util/fees.h>
+#include <util/message.h>
+#include <util/settings.h>
+#include <util/strencodings.h>
+#include <util/string.h>
+#include <util/system.h>
+#include <util/translation.h>
+#include <util/url.h>
+
+#include <cstdint>
+#include <string>
+#include <vector>
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+ const std::string random_string_1 = fuzzed_data_provider.ConsumeRandomLengthString(32);
+ const std::string random_string_2 = fuzzed_data_provider.ConsumeRandomLengthString(32);
+ const std::vector<std::string> random_string_vector = ConsumeRandomLengthStringVector(fuzzed_data_provider);
+
+ (void)AmountErrMsg(random_string_1, random_string_2);
+ (void)AmountHighWarn(random_string_1);
+ BlockFilterType block_filter_type;
+ (void)BlockFilterTypeByName(random_string_1, block_filter_type);
+ (void)Capitalize(random_string_1);
+ (void)CopyrightHolders(random_string_1);
+ FeeEstimateMode fee_estimate_mode;
+ (void)FeeModeFromString(random_string_1, fee_estimate_mode);
+ (void)FormatParagraph(random_string_1, fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 1000), fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 1000));
+ (void)FormatSubVersion(random_string_1, fuzzed_data_provider.ConsumeIntegral<int>(), random_string_vector);
+ (void)GetDescriptorChecksum(random_string_1);
+ (void)HelpExampleCli(random_string_1, random_string_2);
+ (void)HelpExampleRpc(random_string_1, random_string_2);
+ (void)HelpMessageGroup(random_string_1);
+ (void)HelpMessageOpt(random_string_1, random_string_2);
+ (void)IsDeprecatedRPCEnabled(random_string_1);
+ (void)Join(random_string_vector, random_string_1);
+ (void)JSONRPCError(fuzzed_data_provider.ConsumeIntegral<int>(), random_string_1);
+ const util::Settings settings;
+ (void)OnlyHasDefaultSectionSetting(settings, random_string_1, random_string_2);
+ (void)ParseNetwork(random_string_1);
+ try {
+ (void)ParseNonRFCJSONValue(random_string_1);
+ } catch (const std::runtime_error&) {
+ }
+ OutputType output_type;
+ (void)ParseOutputType(random_string_1, output_type);
+ (void)ResolveErrMsg(random_string_1, random_string_2);
+ try {
+ (void)RPCConvertNamedValues(random_string_1, random_string_vector);
+ } catch (const std::runtime_error&) {
+ }
+ try {
+ (void)RPCConvertValues(random_string_1, random_string_vector);
+ } catch (const std::runtime_error&) {
+ }
+ (void)SanitizeString(random_string_1);
+ (void)SanitizeString(random_string_1, fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 3));
+ (void)ShellEscape(random_string_1);
+ int port_out;
+ std::string host_out;
+ SplitHostPort(random_string_1, port_out, host_out);
+ (void)TimingResistantEqual(random_string_1, random_string_2);
+ (void)ToLower(random_string_1);
+ (void)ToUpper(random_string_1);
+ (void)TrimString(random_string_1);
+ (void)TrimString(random_string_1, random_string_2);
+ (void)urlDecode(random_string_1);
+ (void)ValidAsCString(random_string_1);
+ (void)_(random_string_1.c_str());
+}
diff --git a/src/test/fuzz/strprintf.cpp b/src/test/fuzz/strprintf.cpp
index 0de21f0e7c..d5be1070bd 100644
--- a/src/test/fuzz/strprintf.cpp
+++ b/src/test/fuzz/strprintf.cpp
@@ -8,7 +8,6 @@
#include <util/strencodings.h>
#include <algorithm>
-#include <cassert>
#include <cstdint>
#include <string>
#include <vector>
@@ -27,7 +26,7 @@ void test_one_input(const std::vector<uint8_t>& buffer)
// * strprintf("%.222222200000000$", 1.1);
//
// Upstream bug report: https://github.com/c42f/tinyformat/issues/70
- if (format_string.find("%") != std::string::npos && digits_in_format_specifier >= 7) {
+ if (format_string.find('%') != std::string::npos && digits_in_format_specifier >= 7) {
return;
}
@@ -35,7 +34,7 @@ void test_one_input(const std::vector<uint8_t>& buffer)
// * strprintf("%1$*1$*", -11111111);
//
// Upstream bug report: https://github.com/c42f/tinyformat/issues/70
- if (format_string.find("%") != std::string::npos && format_string.find("$") != std::string::npos && format_string.find("*") != std::string::npos && digits_in_format_specifier > 0) {
+ if (format_string.find('%') != std::string::npos && format_string.find('$') != std::string::npos && format_string.find('*') != std::string::npos && digits_in_format_specifier > 0) {
return;
}
@@ -96,7 +95,7 @@ void test_one_input(const std::vector<uint8_t>& buffer)
}
try {
- switch (fuzzed_data_provider.ConsumeIntegralInRange(0, 13)) {
+ switch (fuzzed_data_provider.ConsumeIntegralInRange(0, 5)) {
case 0:
(void)strprintf(format_string, fuzzed_data_provider.ConsumeRandomLengthString(32));
break;
@@ -115,32 +114,52 @@ void test_one_input(const std::vector<uint8_t>& buffer)
case 5:
(void)strprintf(format_string, fuzzed_data_provider.ConsumeBool());
break;
- case 6:
+ }
+ } catch (const tinyformat::format_error&) {
+ }
+
+ if (format_string.find('%') != std::string::npos && format_string.find('c') != std::string::npos) {
+ // Avoid triggering the following:
+ // * strprintf("%c", 1.31783e+38);
+ // tinyformat.h:244:36: runtime error: 1.31783e+38 is outside the range of representable values of type 'char'
+ return;
+ }
+
+ if (format_string.find('%') != std::string::npos && format_string.find('*') != std::string::npos) {
+ // Avoid triggering the following:
+ // * strprintf("%*", -2.33527e+38);
+ // tinyformat.h:283:65: runtime error: -2.33527e+38 is outside the range of representable values of type 'int'
+ // * strprintf("%*", -2147483648);
+ // tinyformat.h:763:25: runtime error: negation of -2147483648 cannot be represented in type 'int'; cast to an unsigned type to negate this value to itself
+ return;
+ }
+
+ try {
+ switch (fuzzed_data_provider.ConsumeIntegralInRange(0, 7)) {
+ case 0:
(void)strprintf(format_string, fuzzed_data_provider.ConsumeFloatingPoint<float>());
break;
- case 7:
+ case 1:
(void)strprintf(format_string, fuzzed_data_provider.ConsumeFloatingPoint<double>());
break;
- case 8:
+ case 2:
(void)strprintf(format_string, fuzzed_data_provider.ConsumeIntegral<int16_t>());
break;
- case 9:
+ case 3:
(void)strprintf(format_string, fuzzed_data_provider.ConsumeIntegral<uint16_t>());
break;
- case 10:
+ case 4:
(void)strprintf(format_string, fuzzed_data_provider.ConsumeIntegral<int32_t>());
break;
- case 11:
+ case 5:
(void)strprintf(format_string, fuzzed_data_provider.ConsumeIntegral<uint32_t>());
break;
- case 12:
+ case 6:
(void)strprintf(format_string, fuzzed_data_provider.ConsumeIntegral<int64_t>());
break;
- case 13:
+ case 7:
(void)strprintf(format_string, fuzzed_data_provider.ConsumeIntegral<uint64_t>());
break;
- default:
- assert(false);
}
} catch (const tinyformat::format_error&) {
}
diff --git a/src/test/fuzz/timedata.cpp b/src/test/fuzz/timedata.cpp
new file mode 100644
index 0000000000..a0e579a88f
--- /dev/null
+++ b/src/test/fuzz/timedata.cpp
@@ -0,0 +1,29 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/util.h>
+#include <timedata.h>
+
+#include <cstdint>
+#include <string>
+#include <vector>
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
+ const unsigned int max_size = fuzzed_data_provider.ConsumeIntegralInRange<unsigned int>(0, 1000);
+ // Divide by 2 to avoid signed integer overflow in .median()
+ const int64_t initial_value = fuzzed_data_provider.ConsumeIntegral<int64_t>() / 2;
+ CMedianFilter<int64_t> median_filter{max_size, initial_value};
+ while (fuzzed_data_provider.remaining_bytes() > 0) {
+ (void)median_filter.median();
+ assert(median_filter.size() > 0);
+ assert(static_cast<size_t>(median_filter.size()) == median_filter.sorted().size());
+ assert(static_cast<unsigned int>(median_filter.size()) <= max_size || max_size == 0);
+ // Divide by 2 to avoid signed integer overflow in .median()
+ median_filter.input(fuzzed_data_provider.ConsumeIntegral<int64_t>() / 2);
+ }
+}
diff --git a/src/test/fuzz/transaction.cpp b/src/test/fuzz/transaction.cpp
index fefafda36b..d8e84f1a0f 100644
--- a/src/test/fuzz/transaction.cpp
+++ b/src/test/fuzz/transaction.cpp
@@ -2,6 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <chainparams.h>
#include <coins.h>
#include <consensus/tx_check.h>
#include <consensus/tx_verify.h>
@@ -13,12 +14,18 @@
#include <primitives/transaction.h>
#include <streams.h>
#include <test/fuzz/fuzz.h>
+#include <univalue.h>
#include <util/rbf.h>
#include <validation.h>
#include <version.h>
#include <cassert>
+void initialize()
+{
+ SelectParams(CBaseChainParams::REGTEST);
+}
+
void test_one_input(const std::vector<uint8_t>& buffer)
{
CDataStream ds(buffer, SER_NETWORK, INIT_PROTO_VERSION);
@@ -85,4 +92,23 @@ void test_one_input(const std::vector<uint8_t>& buffer)
(void)IsStandardTx(tx, reason);
(void)RecursiveDynamicUsage(tx);
(void)SignalsOptInRBF(tx);
+
+ CCoinsView coins_view;
+ const CCoinsViewCache coins_view_cache(&coins_view);
+ (void)AreInputsStandard(tx, coins_view_cache);
+ (void)IsWitnessStandard(tx, coins_view_cache);
+
+ UniValue u(UniValue::VOBJ);
+ // ValueFromAmount(i) not defined when i == std::numeric_limits<int64_t>::min()
+ bool skip_tx_to_univ = false;
+ for (const CTxOut& txout : tx.vout) {
+ if (txout.nValue == std::numeric_limits<int64_t>::min()) {
+ skip_tx_to_univ = true;
+ }
+ }
+ if (!skip_tx_to_univ) {
+ TxToUniv(tx, /* hashBlock */ {}, u);
+ static const uint256 u256_max(uint256S("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"));
+ TxToUniv(tx, u256_max, u);
+ }
}
diff --git a/src/test/fuzz/util.h b/src/test/fuzz/util.h
new file mode 100644
index 0000000000..10be2ebaf7
--- /dev/null
+++ b/src/test/fuzz/util.h
@@ -0,0 +1,106 @@
+// Copyright (c) 2009-2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_TEST_FUZZ_UTIL_H
+#define BITCOIN_TEST_FUZZ_UTIL_H
+
+#include <amount.h>
+#include <attributes.h>
+#include <optional.h>
+#include <script/script.h>
+#include <serialize.h>
+#include <streams.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/fuzz.h>
+#include <uint256.h>
+#include <version.h>
+
+#include <cstdint>
+#include <string>
+#include <vector>
+
+NODISCARD inline std::vector<uint8_t> ConsumeRandomLengthByteVector(FuzzedDataProvider& fuzzed_data_provider, size_t max_length = 4096) noexcept
+{
+ const std::string s = fuzzed_data_provider.ConsumeRandomLengthString(max_length);
+ return {s.begin(), s.end()};
+}
+
+NODISCARD inline std::vector<std::string> ConsumeRandomLengthStringVector(FuzzedDataProvider& fuzzed_data_provider, size_t max_vector_size = 16, size_t max_string_length = 16) noexcept
+{
+ const size_t n_elements = fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, max_vector_size);
+ std::vector<std::string> r;
+ for (size_t i = 0; i < n_elements; ++i) {
+ r.push_back(fuzzed_data_provider.ConsumeRandomLengthString(max_string_length));
+ }
+ return r;
+}
+
+template <typename T>
+NODISCARD inline Optional<T> ConsumeDeserializable(FuzzedDataProvider& fuzzed_data_provider, size_t max_length = 4096) noexcept
+{
+ const std::vector<uint8_t> buffer = ConsumeRandomLengthByteVector(fuzzed_data_provider, max_length);
+ CDataStream ds{buffer, SER_NETWORK, INIT_PROTO_VERSION};
+ T obj;
+ try {
+ ds >> obj;
+ } catch (const std::ios_base::failure&) {
+ return nullopt;
+ }
+ return obj;
+}
+
+NODISCARD inline opcodetype ConsumeOpcodeType(FuzzedDataProvider& fuzzed_data_provider) noexcept
+{
+ return static_cast<opcodetype>(fuzzed_data_provider.ConsumeIntegralInRange<uint32_t>(0, MAX_OPCODE));
+}
+
+NODISCARD inline CAmount ConsumeMoney(FuzzedDataProvider& fuzzed_data_provider) noexcept
+{
+ return fuzzed_data_provider.ConsumeIntegralInRange<CAmount>(0, MAX_MONEY);
+}
+
+NODISCARD inline CScript ConsumeScript(FuzzedDataProvider& fuzzed_data_provider) noexcept
+{
+ const std::vector<uint8_t> b = ConsumeRandomLengthByteVector(fuzzed_data_provider);
+ return {b.begin(), b.end()};
+}
+
+NODISCARD inline CScriptNum ConsumeScriptNum(FuzzedDataProvider& fuzzed_data_provider) noexcept
+{
+ return CScriptNum{fuzzed_data_provider.ConsumeIntegral<int64_t>()};
+}
+
+NODISCARD inline uint256 ConsumeUInt256(FuzzedDataProvider& fuzzed_data_provider) noexcept
+{
+ const std::vector<unsigned char> v256 = fuzzed_data_provider.ConsumeBytes<unsigned char>(sizeof(uint256));
+ if (v256.size() != sizeof(uint256)) {
+ return {};
+ }
+ return uint256{v256};
+}
+
+template <typename T>
+bool MultiplicationOverflow(T i, T j)
+{
+ static_assert(std::is_integral<T>::value, "Integral required.");
+ if (std::numeric_limits<T>::is_signed) {
+ if (i > 0) {
+ if (j > 0) {
+ return i > (std::numeric_limits<T>::max() / j);
+ } else {
+ return j < (std::numeric_limits<T>::min() / i);
+ }
+ } else {
+ if (j > 0) {
+ return i < (std::numeric_limits<T>::min() / j);
+ } else {
+ return i != 0 && (j < (std::numeric_limits<T>::max() / i));
+ }
+ }
+ } else {
+ return j != 0 && i > std::numeric_limits<T>::max() / j;
+ }
+}
+
+#endif // BITCOIN_TEST_FUZZ_UTIL_H
diff --git a/src/test/key_tests.cpp b/src/test/key_tests.cpp
index 85dc961bea..034b7938f9 100644
--- a/src/test/key_tests.cpp
+++ b/src/test/key_tests.cpp
@@ -8,6 +8,7 @@
#include <uint256.h>
#include <util/system.h>
#include <util/strencodings.h>
+#include <util/string.h>
#include <test/util/setup_common.h>
#include <string>
@@ -176,7 +177,7 @@ BOOST_AUTO_TEST_CASE(key_signature_tests)
bool found_small = false;
for (int i = 0; i < 256; ++i) {
sig.clear();
- std::string msg = "A message to be signed" + std::to_string(i);
+ std::string msg = "A message to be signed" + ToString(i);
msg_hash = Hash(msg.begin(), msg.end());
BOOST_CHECK(key.Sign(msg_hash, sig));
found = sig[3] == 0x20;
diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp
index cb1ef5dcf3..9b5a86fef2 100644
--- a/src/test/net_tests.cpp
+++ b/src/test/net_tests.cpp
@@ -15,6 +15,7 @@
#include <chainparams.h>
#include <util/memory.h>
#include <util/system.h>
+#include <util/string.h>
#include <memory>
@@ -85,7 +86,7 @@ BOOST_AUTO_TEST_CASE(cnode_listen_port)
BOOST_CHECK(port == Params().GetDefaultPort());
// test set port
unsigned short altPort = 12345;
- BOOST_CHECK(gArgs.SoftSetArg("-port", std::to_string(altPort)));
+ BOOST_CHECK(gArgs.SoftSetArg("-port", ToString(altPort)));
port = GetListenPort();
BOOST_CHECK(port == altPort);
}
diff --git a/src/test/reverselock_tests.cpp b/src/test/reverselock_tests.cpp
index 532fe143ae..4e51b8c02a 100644
--- a/src/test/reverselock_tests.cpp
+++ b/src/test/reverselock_tests.cpp
@@ -2,7 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <reverselock.h>
+#include <sync.h>
#include <test/util/setup_common.h>
#include <boost/test/unit_test.hpp>
@@ -11,21 +11,50 @@ BOOST_FIXTURE_TEST_SUITE(reverselock_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(reverselock_basics)
{
- boost::mutex mutex;
- boost::unique_lock<boost::mutex> lock(mutex);
+ Mutex mutex;
+ WAIT_LOCK(mutex, lock);
BOOST_CHECK(lock.owns_lock());
{
- reverse_lock<boost::unique_lock<boost::mutex> > rlock(lock);
+ REVERSE_LOCK(lock);
BOOST_CHECK(!lock.owns_lock());
}
BOOST_CHECK(lock.owns_lock());
}
+BOOST_AUTO_TEST_CASE(reverselock_multiple)
+{
+ Mutex mutex2;
+ Mutex mutex;
+ WAIT_LOCK(mutex2, lock2);
+ WAIT_LOCK(mutex, lock);
+
+ // Make sure undoing two locks succeeds
+ {
+ REVERSE_LOCK(lock);
+ BOOST_CHECK(!lock.owns_lock());
+ REVERSE_LOCK(lock2);
+ BOOST_CHECK(!lock2.owns_lock());
+ }
+ BOOST_CHECK(lock.owns_lock());
+ BOOST_CHECK(lock2.owns_lock());
+}
+
BOOST_AUTO_TEST_CASE(reverselock_errors)
{
- boost::mutex mutex;
- boost::unique_lock<boost::mutex> lock(mutex);
+ Mutex mutex2;
+ Mutex mutex;
+ WAIT_LOCK(mutex2, lock2);
+ WAIT_LOCK(mutex, lock);
+
+#ifdef DEBUG_LOCKORDER
+ // Make sure trying to reverse lock a previous lock fails
+ try {
+ REVERSE_LOCK(lock2);
+ BOOST_CHECK(false); // REVERSE_LOCK(lock2) succeeded
+ } catch(...) { }
+ BOOST_CHECK(lock2.owns_lock());
+#endif
// Make sure trying to reverse lock an unlocked lock fails
lock.unlock();
@@ -34,7 +63,7 @@ BOOST_AUTO_TEST_CASE(reverselock_errors)
bool failed = false;
try {
- reverse_lock<boost::unique_lock<boost::mutex> > rlock(lock);
+ REVERSE_LOCK(lock);
} catch(...) {
failed = true;
}
@@ -49,7 +78,7 @@ BOOST_AUTO_TEST_CASE(reverselock_errors)
lock.lock();
BOOST_CHECK(lock.owns_lock());
{
- reverse_lock<boost::unique_lock<boost::mutex> > rlock(lock);
+ REVERSE_LOCK(lock);
BOOST_CHECK(!lock.owns_lock());
}
diff --git a/src/test/scheduler_tests.cpp b/src/test/scheduler_tests.cpp
index b292d5b0d0..801cf8e5d1 100644
--- a/src/test/scheduler_tests.cpp
+++ b/src/test/scheduler_tests.cpp
@@ -4,39 +4,26 @@
#include <random.h>
#include <scheduler.h>
-
-#include <test/util/setup_common.h>
+#include <util/time.h>
#include <boost/thread.hpp>
#include <boost/test/unit_test.hpp>
BOOST_AUTO_TEST_SUITE(scheduler_tests)
-static void microTask(CScheduler& s, boost::mutex& mutex, int& counter, int delta, boost::chrono::system_clock::time_point rescheduleTime)
+static void microTask(CScheduler& s, boost::mutex& mutex, int& counter, int delta, std::chrono::system_clock::time_point rescheduleTime)
{
{
boost::unique_lock<boost::mutex> lock(mutex);
counter += delta;
}
- boost::chrono::system_clock::time_point noTime = boost::chrono::system_clock::time_point::min();
+ std::chrono::system_clock::time_point noTime = std::chrono::system_clock::time_point::min();
if (rescheduleTime != noTime) {
CScheduler::Function f = std::bind(&microTask, std::ref(s), std::ref(mutex), std::ref(counter), -delta + 1, noTime);
s.schedule(f, rescheduleTime);
}
}
-static void MicroSleep(uint64_t n)
-{
-#if defined(HAVE_WORKING_BOOST_SLEEP_FOR)
- boost::this_thread::sleep_for(boost::chrono::microseconds(n));
-#elif defined(HAVE_WORKING_BOOST_SLEEP)
- boost::this_thread::sleep(boost::posix_time::microseconds(n));
-#else
- //should never get here
- #error missing boost sleep implementation
-#endif
-}
-
BOOST_AUTO_TEST_CASE(manythreads)
{
// Stress test: hundreds of microsecond-scheduled tasks,
@@ -58,15 +45,15 @@ BOOST_AUTO_TEST_CASE(manythreads)
auto randomMsec = [](FastRandomContext& rc) -> int { return -11 + (int)rc.randrange(1012); }; // [-11, 1000]
auto randomDelta = [](FastRandomContext& rc) -> int { return -1000 + (int)rc.randrange(2001); }; // [-1000, 1000]
- boost::chrono::system_clock::time_point start = boost::chrono::system_clock::now();
- boost::chrono::system_clock::time_point now = start;
- boost::chrono::system_clock::time_point first, last;
+ std::chrono::system_clock::time_point start = std::chrono::system_clock::now();
+ std::chrono::system_clock::time_point now = start;
+ std::chrono::system_clock::time_point first, last;
size_t nTasks = microTasks.getQueueInfo(first, last);
BOOST_CHECK(nTasks == 0);
for (int i = 0; i < 100; ++i) {
- boost::chrono::system_clock::time_point t = now + boost::chrono::microseconds(randomMsec(rng));
- boost::chrono::system_clock::time_point tReschedule = now + boost::chrono::microseconds(500 + randomMsec(rng));
+ std::chrono::system_clock::time_point t = now + std::chrono::microseconds(randomMsec(rng));
+ std::chrono::system_clock::time_point tReschedule = now + std::chrono::microseconds(500 + randomMsec(rng));
int whichCounter = zeroToNine(rng);
CScheduler::Function f = std::bind(&microTask, std::ref(microTasks),
std::ref(counterMutex[whichCounter]), std::ref(counter[whichCounter]),
@@ -83,15 +70,15 @@ BOOST_AUTO_TEST_CASE(manythreads)
for (int i = 0; i < 5; i++)
microThreads.create_thread(std::bind(&CScheduler::serviceQueue, &microTasks));
- MicroSleep(600);
- now = boost::chrono::system_clock::now();
+ UninterruptibleSleep(std::chrono::microseconds{600});
+ now = std::chrono::system_clock::now();
// More threads and more tasks:
for (int i = 0; i < 5; i++)
microThreads.create_thread(std::bind(&CScheduler::serviceQueue, &microTasks));
for (int i = 0; i < 100; i++) {
- boost::chrono::system_clock::time_point t = now + boost::chrono::microseconds(randomMsec(rng));
- boost::chrono::system_clock::time_point tReschedule = now + boost::chrono::microseconds(500 + randomMsec(rng));
+ std::chrono::system_clock::time_point t = now + std::chrono::microseconds(randomMsec(rng));
+ std::chrono::system_clock::time_point tReschedule = now + std::chrono::microseconds(500 + randomMsec(rng));
int whichCounter = zeroToNine(rng);
CScheduler::Function f = std::bind(&microTask, std::ref(microTasks),
std::ref(counterMutex[whichCounter]), std::ref(counter[whichCounter]),
@@ -111,6 +98,24 @@ BOOST_AUTO_TEST_CASE(manythreads)
BOOST_CHECK_EQUAL(counterSum, 200);
}
+BOOST_AUTO_TEST_CASE(wait_until_past)
+{
+ std::condition_variable condvar;
+ Mutex mtx;
+ WAIT_LOCK(mtx, lock);
+
+ const auto no_wait= [&](const std::chrono::seconds& d) {
+ return condvar.wait_until(lock, std::chrono::system_clock::now() - d);
+ };
+
+ BOOST_CHECK(std::cv_status::timeout == no_wait(std::chrono::seconds{1}));
+ BOOST_CHECK(std::cv_status::timeout == no_wait(std::chrono::minutes{1}));
+ BOOST_CHECK(std::cv_status::timeout == no_wait(std::chrono::hours{1}));
+ BOOST_CHECK(std::cv_status::timeout == no_wait(std::chrono::hours{10}));
+ BOOST_CHECK(std::cv_status::timeout == no_wait(std::chrono::hours{100}));
+ BOOST_CHECK(std::cv_status::timeout == no_wait(std::chrono::hours{1000}));
+}
+
BOOST_AUTO_TEST_CASE(singlethreadedscheduler_ordered)
{
CScheduler scheduler;
@@ -155,4 +160,45 @@ BOOST_AUTO_TEST_CASE(singlethreadedscheduler_ordered)
BOOST_CHECK_EQUAL(counter2, 100);
}
+BOOST_AUTO_TEST_CASE(mockforward)
+{
+ CScheduler scheduler;
+
+ int counter{0};
+ CScheduler::Function dummy = [&counter]{counter++;};
+
+ // schedule jobs for 2, 5 & 8 minutes into the future
+
+ scheduler.scheduleFromNow(dummy, std::chrono::minutes{2});
+ scheduler.scheduleFromNow(dummy, std::chrono::minutes{5});
+ scheduler.scheduleFromNow(dummy, std::chrono::minutes{8});
+
+ // check taskQueue
+ std::chrono::system_clock::time_point first, last;
+ size_t num_tasks = scheduler.getQueueInfo(first, last);
+ BOOST_CHECK_EQUAL(num_tasks, 3ul);
+
+ std::thread scheduler_thread([&]() { scheduler.serviceQueue(); });
+
+ // bump the scheduler forward 5 minutes
+ scheduler.MockForward(std::chrono::minutes{5});
+
+ // ensure scheduler has chance to process all tasks queued for before 1 ms from now.
+ scheduler.scheduleFromNow([&scheduler] { scheduler.stop(false); }, std::chrono::milliseconds{1});
+ scheduler_thread.join();
+
+ // check that the queue only has one job remaining
+ num_tasks = scheduler.getQueueInfo(first, last);
+ BOOST_CHECK_EQUAL(num_tasks, 1ul);
+
+ // check that the dummy function actually ran
+ BOOST_CHECK_EQUAL(counter, 2);
+
+ // check that the time of the remaining job has been updated
+ std::chrono::system_clock::time_point now = std::chrono::system_clock::now();
+ int delta = std::chrono::duration_cast<std::chrono::seconds>(first - now).count();
+ // should be between 2 & 3 minutes from now
+ BOOST_CHECK(delta > 2*60 && delta < 3*60);
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp
index 303bb9b88c..ea600499ca 100644
--- a/src/test/serialize_tests.cpp
+++ b/src/test/serialize_tests.cpp
@@ -182,8 +182,8 @@ BOOST_AUTO_TEST_CASE(varints)
CDataStream ss(SER_DISK, 0);
CDataStream::size_type size = 0;
for (int i = 0; i < 100000; i++) {
- ss << VARINT(i, VarIntMode::NONNEGATIVE_SIGNED);
- size += ::GetSerializeSize(VARINT(i, VarIntMode::NONNEGATIVE_SIGNED), 0);
+ ss << VARINT_MODE(i, VarIntMode::NONNEGATIVE_SIGNED);
+ size += ::GetSerializeSize(VARINT_MODE(i, VarIntMode::NONNEGATIVE_SIGNED), 0);
BOOST_CHECK(size == ss.size());
}
@@ -196,7 +196,7 @@ BOOST_AUTO_TEST_CASE(varints)
// decode
for (int i = 0; i < 100000; i++) {
int j = -1;
- ss >> VARINT(j, VarIntMode::NONNEGATIVE_SIGNED);
+ ss >> VARINT_MODE(j, VarIntMode::NONNEGATIVE_SIGNED);
BOOST_CHECK_MESSAGE(i == j, "decoded:" << j << " expected:" << i);
}
@@ -210,21 +210,21 @@ BOOST_AUTO_TEST_CASE(varints)
BOOST_AUTO_TEST_CASE(varints_bitpatterns)
{
CDataStream ss(SER_DISK, 0);
- ss << VARINT(0, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "00"); ss.clear();
- ss << VARINT(0x7f, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "7f"); ss.clear();
- ss << VARINT((int8_t)0x7f, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "7f"); ss.clear();
- ss << VARINT(0x80, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "8000"); ss.clear();
+ ss << VARINT_MODE(0, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "00"); ss.clear();
+ ss << VARINT_MODE(0x7f, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "7f"); ss.clear();
+ ss << VARINT_MODE((int8_t)0x7f, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "7f"); ss.clear();
+ ss << VARINT_MODE(0x80, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "8000"); ss.clear();
ss << VARINT((uint8_t)0x80); BOOST_CHECK_EQUAL(HexStr(ss), "8000"); ss.clear();
- ss << VARINT(0x1234, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "a334"); ss.clear();
- ss << VARINT((int16_t)0x1234, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "a334"); ss.clear();
- ss << VARINT(0xffff, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "82fe7f"); ss.clear();
+ ss << VARINT_MODE(0x1234, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "a334"); ss.clear();
+ ss << VARINT_MODE((int16_t)0x1234, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "a334"); ss.clear();
+ ss << VARINT_MODE(0xffff, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "82fe7f"); ss.clear();
ss << VARINT((uint16_t)0xffff); BOOST_CHECK_EQUAL(HexStr(ss), "82fe7f"); ss.clear();
- ss << VARINT(0x123456, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "c7e756"); ss.clear();
- ss << VARINT((int32_t)0x123456, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "c7e756"); ss.clear();
+ ss << VARINT_MODE(0x123456, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "c7e756"); ss.clear();
+ ss << VARINT_MODE((int32_t)0x123456, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "c7e756"); ss.clear();
ss << VARINT(0x80123456U); BOOST_CHECK_EQUAL(HexStr(ss), "86ffc7e756"); ss.clear();
ss << VARINT((uint32_t)0x80123456U); BOOST_CHECK_EQUAL(HexStr(ss), "86ffc7e756"); ss.clear();
ss << VARINT(0xffffffff); BOOST_CHECK_EQUAL(HexStr(ss), "8efefefe7f"); ss.clear();
- ss << VARINT(0x7fffffffffffffffLL, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "fefefefefefefefe7f"); ss.clear();
+ ss << VARINT_MODE(0x7fffffffffffffffLL, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "fefefefefefefefe7f"); ss.clear();
ss << VARINT(0xffffffffffffffffULL); BOOST_CHECK_EQUAL(HexStr(ss), "80fefefefefefefefe7f"); ss.clear();
}
diff --git a/src/test/settings_tests.cpp b/src/test/settings_tests.cpp
index 45644834a5..10b161aa80 100644
--- a/src/test/settings_tests.cpp
+++ b/src/test/settings_tests.cpp
@@ -11,6 +11,7 @@
#include <boost/test/unit_test.hpp>
#include <univalue.h>
#include <util/strencodings.h>
+#include <util/string.h>
#include <vector>
BOOST_FIXTURE_TEST_SUITE(settings_tests, BasicTestingSetup)
@@ -114,7 +115,7 @@ BOOST_FIXTURE_TEST_CASE(Merge, MergeTestingSetup)
std::vector<util::SettingsValue>& dest) {
if (action == SET || action == SECTION_SET) {
for (int i = 0; i < 2; ++i) {
- dest.push_back(value_prefix + std::to_string(++value_suffix));
+ dest.push_back(value_prefix + ToString(++value_suffix));
desc += " " + name_prefix + name + "=" + dest.back().get_str();
}
} else if (action == NEGATE || action == SECTION_NEGATE) {
diff --git a/src/test/timedata_tests.cpp b/src/test/timedata_tests.cpp
index 19bd0d142f..29b43e9bec 100644
--- a/src/test/timedata_tests.cpp
+++ b/src/test/timedata_tests.cpp
@@ -5,6 +5,7 @@
#include <netaddress.h>
#include <noui.h>
+#include <util/string.h>
#include <test/util/logging.h>
#include <test/util/setup_common.h>
#include <timedata.h>
@@ -46,7 +47,7 @@ static void MultiAddTimeData(int n, int64_t offset)
static int cnt = 0;
for (int i = 0; i < n; ++i) {
CNetAddr addr;
- addr.SetInternal(std::to_string(++cnt));
+ addr.SetInternal(ToString(++cnt));
AddTimeData(addr, offset);
}
}
diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp
index fb45ce0ee6..96520079d7 100644
--- a/src/test/transaction_tests.cpp
+++ b/src/test/transaction_tests.cpp
@@ -22,6 +22,7 @@
#include <script/standard.h>
#include <streams.h>
#include <util/strencodings.h>
+#include <test/util/transaction_utils.h>
#include <map>
#include <string>
@@ -122,10 +123,9 @@ BOOST_AUTO_TEST_CASE(tx_valid)
std::map<COutPoint, int64_t> mapprevOutValues;
UniValue inputs = test[0].get_array();
bool fValid = true;
- for (unsigned int inpIdx = 0; inpIdx < inputs.size(); inpIdx++) {
- const UniValue& input = inputs[inpIdx];
- if (!input.isArray())
- {
+ for (unsigned int inpIdx = 0; inpIdx < inputs.size(); inpIdx++) {
+ const UniValue& input = inputs[inpIdx];
+ if (!input.isArray()) {
fValid = false;
break;
}
@@ -209,10 +209,9 @@ BOOST_AUTO_TEST_CASE(tx_invalid)
std::map<COutPoint, int64_t> mapprevOutValues;
UniValue inputs = test[0].get_array();
bool fValid = true;
- for (unsigned int inpIdx = 0; inpIdx < inputs.size(); inpIdx++) {
- const UniValue& input = inputs[inpIdx];
- if (!input.isArray())
- {
+ for (unsigned int inpIdx = 0; inpIdx < inputs.size(); inpIdx++) {
+ const UniValue& input = inputs[inpIdx];
+ if (!input.isArray()) {
fValid = false;
break;
}
@@ -282,50 +281,13 @@ BOOST_AUTO_TEST_CASE(basic_transaction_tests)
BOOST_CHECK_MESSAGE(!CheckTransaction(CTransaction(tx), state) || !state.IsValid(), "Transaction with duplicate txins should be invalid.");
}
-//
-// Helper: create two dummy transactions, each with
-// two outputs. The first has 11 and 50 CENT outputs
-// paid to a TX_PUBKEY, the second 21 and 22 CENT outputs
-// paid to a TX_PUBKEYHASH.
-//
-static std::vector<CMutableTransaction>
-SetupDummyInputs(FillableSigningProvider& keystoreRet, CCoinsViewCache& coinsRet)
-{
- std::vector<CMutableTransaction> dummyTransactions;
- dummyTransactions.resize(2);
-
- // Add some keys to the keystore:
- CKey key[4];
- for (int i = 0; i < 4; i++)
- {
- key[i].MakeNewKey(i % 2);
- keystoreRet.AddKey(key[i]);
- }
-
- // Create some dummy input transactions
- dummyTransactions[0].vout.resize(2);
- dummyTransactions[0].vout[0].nValue = 11*CENT;
- dummyTransactions[0].vout[0].scriptPubKey << ToByteVector(key[0].GetPubKey()) << OP_CHECKSIG;
- dummyTransactions[0].vout[1].nValue = 50*CENT;
- dummyTransactions[0].vout[1].scriptPubKey << ToByteVector(key[1].GetPubKey()) << OP_CHECKSIG;
- AddCoins(coinsRet, CTransaction(dummyTransactions[0]), 0);
-
- dummyTransactions[1].vout.resize(2);
- dummyTransactions[1].vout[0].nValue = 21*CENT;
- dummyTransactions[1].vout[0].scriptPubKey = GetScriptForDestination(PKHash(key[2].GetPubKey()));
- dummyTransactions[1].vout[1].nValue = 22*CENT;
- dummyTransactions[1].vout[1].scriptPubKey = GetScriptForDestination(PKHash(key[3].GetPubKey()));
- AddCoins(coinsRet, CTransaction(dummyTransactions[1]), 0);
-
- return dummyTransactions;
-}
-
BOOST_AUTO_TEST_CASE(test_Get)
{
FillableSigningProvider keystore;
CCoinsView coinsDummy;
CCoinsViewCache coins(&coinsDummy);
- std::vector<CMutableTransaction> dummyTransactions = SetupDummyInputs(keystore, coins);
+ std::vector<CMutableTransaction> dummyTransactions =
+ SetupDummyInputs(keystore, coins, {11*CENT, 50*CENT, 21*CENT, 22*CENT});
CMutableTransaction t1;
t1.vin.resize(3);
@@ -685,7 +647,8 @@ BOOST_AUTO_TEST_CASE(test_IsStandard)
FillableSigningProvider keystore;
CCoinsView coinsDummy;
CCoinsViewCache coins(&coinsDummy);
- std::vector<CMutableTransaction> dummyTransactions = SetupDummyInputs(keystore, coins);
+ std::vector<CMutableTransaction> dummyTransactions =
+ SetupDummyInputs(keystore, coins, {11*CENT, 50*CENT, 21*CENT, 22*CENT});
CMutableTransaction t;
t.vin.resize(1);
@@ -821,9 +784,63 @@ BOOST_AUTO_TEST_CASE(test_IsStandard)
BOOST_CHECK(!IsStandardTx(CTransaction(t), reason));
BOOST_CHECK_EQUAL(reason, "scriptsig-size");
+ // Check scriptSig format (non-standard if there are any other ops than just PUSHs)
+ t.vin[0].scriptSig = CScript()
+ << OP_TRUE << OP_0 << OP_1NEGATE << OP_16 // OP_n (single byte pushes: n = 1, 0, -1, 16)
+ << std::vector<unsigned char>(75, 0) // OP_PUSHx [...x bytes...]
+ << std::vector<unsigned char>(235, 0) // OP_PUSHDATA1 x [...x bytes...]
+ << std::vector<unsigned char>(1234, 0) // OP_PUSHDATA2 x [...x bytes...]
+ << OP_9;
+ BOOST_CHECK(IsStandardTx(CTransaction(t), reason));
+
+ const std::vector<unsigned char> non_push_ops = { // arbitrary set of non-push operations
+ OP_NOP, OP_VERIFY, OP_IF, OP_ROT, OP_3DUP, OP_SIZE, OP_EQUAL, OP_ADD, OP_SUB,
+ OP_HASH256, OP_CODESEPARATOR, OP_CHECKSIG, OP_CHECKLOCKTIMEVERIFY };
+
+ CScript::const_iterator pc = t.vin[0].scriptSig.begin();
+ while (pc < t.vin[0].scriptSig.end()) {
+ opcodetype opcode;
+ CScript::const_iterator prev_pc = pc;
+ t.vin[0].scriptSig.GetOp(pc, opcode); // advance to next op
+ // for the sake of simplicity, we only replace single-byte push operations
+ if (opcode >= 1 && opcode <= OP_PUSHDATA4)
+ continue;
+
+ int index = prev_pc - t.vin[0].scriptSig.begin();
+ unsigned char orig_op = *prev_pc; // save op
+ // replace current push-op with each non-push-op
+ for (auto op : non_push_ops) {
+ t.vin[0].scriptSig[index] = op;
+ BOOST_CHECK(!IsStandardTx(CTransaction(t), reason));
+ BOOST_CHECK_EQUAL(reason, "scriptsig-not-pushonly");
+ }
+ t.vin[0].scriptSig[index] = orig_op; // restore op
+ BOOST_CHECK(IsStandardTx(CTransaction(t), reason));
+ }
+
+ // Check tx-size (non-standard if transaction weight is > MAX_STANDARD_TX_WEIGHT)
+ t.vin.clear();
+ t.vin.resize(2438); // size per input (empty scriptSig): 41 bytes
+ t.vout[0].scriptPubKey = CScript() << OP_RETURN << std::vector<unsigned char>(19, 0); // output size: 30 bytes
+ // tx header: 12 bytes => 48 vbytes
+ // 2438 inputs: 2438*41 = 99958 bytes => 399832 vbytes
+ // 1 output: 30 bytes => 120 vbytes
+ // ===============================
+ // total: 400000 vbytes
+ BOOST_CHECK_EQUAL(GetTransactionWeight(CTransaction(t)), 400000);
+ BOOST_CHECK(IsStandardTx(CTransaction(t), reason));
+
+ // increase output size by one byte, so we end up with 400004 vbytes
+ t.vout[0].scriptPubKey = CScript() << OP_RETURN << std::vector<unsigned char>(20, 0); // output size: 31 bytes
+ BOOST_CHECK_EQUAL(GetTransactionWeight(CTransaction(t)), 400004);
+ reason.clear();
+ BOOST_CHECK(!IsStandardTx(CTransaction(t), reason));
+ BOOST_CHECK_EQUAL(reason, "tx-size");
+
// Check bare multisig (standard if policy flag fIsBareMultisigStd is set)
fIsBareMultisigStd = true;
t.vout[0].scriptPubKey = GetScriptForMultisig(1, {key.GetPubKey()}); // simple 1-of-1
+ t.vin.resize(1);
t.vin[0].scriptSig = CScript() << std::vector<unsigned char>(65, 0);
BOOST_CHECK(IsStandardTx(CTransaction(t), reason));
diff --git a/src/test/txindex_tests.cpp b/src/test/txindex_tests.cpp
index 4b0214a15a..3550a02316 100644
--- a/src/test/txindex_tests.cpp
+++ b/src/test/txindex_tests.cpp
@@ -34,7 +34,7 @@ BOOST_FIXTURE_TEST_CASE(txindex_initial_sync, TestChain100Setup)
int64_t time_start = GetTimeMillis();
while (!txindex.BlockUntilSyncedToCurrentChain()) {
BOOST_REQUIRE(time_start + timeout_ms > GetTimeMillis());
- MilliSleep(100);
+ UninterruptibleSleep(std::chrono::milliseconds{100});
}
// Check that txindex excludes genesis block transactions.
@@ -70,6 +70,8 @@ BOOST_FIXTURE_TEST_CASE(txindex_initial_sync, TestChain100Setup)
// shutdown sequence (c.f. Shutdown() in init.cpp)
txindex.Stop();
+ // txindex job may be scheduled, so stop scheduler before destructing
+ m_node.scheduler->stop();
threadGroup.interrupt_all();
threadGroup.join_all();
diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp
index ccb3064d59..d684b97787 100644
--- a/src/test/util/setup_common.cpp
+++ b/src/test/util/setup_common.cpp
@@ -13,6 +13,7 @@
#include <init.h>
#include <miner.h>
#include <net.h>
+#include <net_processing.h>
#include <noui.h>
#include <pow.h>
#include <rpc/blockchain.h>
@@ -23,9 +24,9 @@
#include <txdb.h>
#include <util/memory.h>
#include <util/strencodings.h>
+#include <util/string.h>
#include <util/time.h>
#include <util/translation.h>
-#include <util/validation.h>
#include <validation.h>
#include <validationinterface.h>
@@ -63,7 +64,7 @@ std::ostream& operator<<(std::ostream& os, const uint256& num)
}
BasicTestingSetup::BasicTestingSetup(const std::string& chainName)
- : m_path_root{fs::temp_directory_path() / "test_common_" PACKAGE_NAME / std::to_string(g_insecure_rand_ctx_temp_path.rand32())}
+ : m_path_root{fs::temp_directory_path() / "test_common_" PACKAGE_NAME / g_insecure_rand_ctx_temp_path.rand256().ToString()}
{
fs::create_directories(m_path_root);
gArgs.ForceSetArg("-datadir", m_path_root.string());
@@ -103,10 +104,12 @@ TestingSetup::TestingSetup(const std::string& chainName) : BasicTestingSetup(cha
g_rpc_node = &m_node;
RegisterAllCoreRPCCommands(tableRPC);
+ m_node.scheduler = MakeUnique<CScheduler>();
+
// We have to run a scheduler thread to prevent ActivateBestChain
// from blocking due to queue overrun.
- threadGroup.create_thread(std::bind(&CScheduler::serviceQueue, &scheduler));
- GetMainSignals().RegisterBackgroundSignalScheduler(scheduler);
+ threadGroup.create_thread([&]{ m_node.scheduler->serviceQueue(); });
+ GetMainSignals().RegisterBackgroundSignalScheduler(*g_rpc_node->scheduler);
pblocktree.reset(new CBlockTreeDB(1 << 20, true));
g_chainstate = MakeUnique<CChainState>();
@@ -121,7 +124,7 @@ TestingSetup::TestingSetup(const std::string& chainName) : BasicTestingSetup(cha
BlockValidationState state;
if (!ActivateBestChain(state, chainparams)) {
- throw std::runtime_error(strprintf("ActivateBestChain failed. (%s)", FormatStateMessage(state)));
+ throw std::runtime_error(strprintf("ActivateBestChain failed. (%s)", state.ToString()));
}
// Start script-checking threads. Set g_parallel_script_checks to true so they are used.
@@ -135,10 +138,12 @@ TestingSetup::TestingSetup(const std::string& chainName) : BasicTestingSetup(cha
m_node.mempool->setSanityCheck(1.0);
m_node.banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", nullptr, DEFAULT_MISBEHAVING_BANTIME);
m_node.connman = MakeUnique<CConnman>(0x1337, 0x1337); // Deterministic randomness for tests.
+ m_node.peer_logic = MakeUnique<PeerLogicValidation>(m_node.connman.get(), m_node.banman.get(), *m_node.scheduler, *m_node.mempool);
}
TestingSetup::~TestingSetup()
{
+ if (m_node.scheduler) m_node.scheduler->stop();
threadGroup.interrupt_all();
threadGroup.join_all();
GetMainSignals().FlushBackgroundCallbacks();
@@ -147,6 +152,7 @@ TestingSetup::~TestingSetup()
m_node.connman.reset();
m_node.banman.reset();
m_node.mempool = nullptr;
+ m_node.scheduler.reset();
UnloadBlockIndex();
g_chainstate.reset();
pblocktree.reset();
diff --git a/src/test/util/setup_common.h b/src/test/util/setup_common.h
index 6741be8480..0930309c3a 100644
--- a/src/test/util/setup_common.h
+++ b/src/test/util/setup_common.h
@@ -13,6 +13,7 @@
#include <random.h>
#include <scheduler.h>
#include <txmempool.h>
+#include <util/string.h>
#include <type_traits>
@@ -85,7 +86,6 @@ private:
struct TestingSetup : public BasicTestingSetup {
NodeContext m_node;
boost::thread_group threadGroup;
- CScheduler scheduler;
explicit TestingSetup(const std::string& chainName = CBaseChainParams::MAIN);
~TestingSetup();
diff --git a/src/test/util/transaction_utils.cpp b/src/test/util/transaction_utils.cpp
index 90b78effb0..999b803a8d 100644
--- a/src/test/util/transaction_utils.cpp
+++ b/src/test/util/transaction_utils.cpp
@@ -3,6 +3,8 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <test/util/transaction_utils.h>
+#include <coins.h>
+#include <script/signingprovider.h>
CMutableTransaction BuildCreditingTransaction(const CScript& scriptPubKey, int nValue)
{
@@ -37,3 +39,33 @@ CMutableTransaction BuildSpendingTransaction(const CScript& scriptSig, const CSc
return txSpend;
}
+
+std::vector<CMutableTransaction> SetupDummyInputs(FillableSigningProvider& keystoreRet, CCoinsViewCache& coinsRet, const std::array<CAmount,4>& nValues)
+{
+ std::vector<CMutableTransaction> dummyTransactions;
+ dummyTransactions.resize(2);
+
+ // Add some keys to the keystore:
+ CKey key[4];
+ for (int i = 0; i < 4; i++) {
+ key[i].MakeNewKey(i % 2);
+ keystoreRet.AddKey(key[i]);
+ }
+
+ // Create some dummy input transactions
+ dummyTransactions[0].vout.resize(2);
+ dummyTransactions[0].vout[0].nValue = nValues[0];
+ dummyTransactions[0].vout[0].scriptPubKey << ToByteVector(key[0].GetPubKey()) << OP_CHECKSIG;
+ dummyTransactions[0].vout[1].nValue = nValues[1];
+ dummyTransactions[0].vout[1].scriptPubKey << ToByteVector(key[1].GetPubKey()) << OP_CHECKSIG;
+ AddCoins(coinsRet, CTransaction(dummyTransactions[0]), 0);
+
+ dummyTransactions[1].vout.resize(2);
+ dummyTransactions[1].vout[0].nValue = nValues[2];
+ dummyTransactions[1].vout[0].scriptPubKey = GetScriptForDestination(PKHash(key[2].GetPubKey()));
+ dummyTransactions[1].vout[1].nValue = nValues[3];
+ dummyTransactions[1].vout[1].scriptPubKey = GetScriptForDestination(PKHash(key[3].GetPubKey()));
+ AddCoins(coinsRet, CTransaction(dummyTransactions[1]), 0);
+
+ return dummyTransactions;
+}
diff --git a/src/test/util/transaction_utils.h b/src/test/util/transaction_utils.h
index 57604646e7..f843928a5f 100644
--- a/src/test/util/transaction_utils.h
+++ b/src/test/util/transaction_utils.h
@@ -7,6 +7,11 @@
#include <primitives/transaction.h>
+#include <array>
+
+class FillableSigningProvider;
+class CCoinsViewCache;
+
// create crediting transaction
// [1 coinbase input => 1 output with given scriptPubkey and value]
CMutableTransaction BuildCreditingTransaction(const CScript& scriptPubKey, int nValue = 0);
@@ -16,4 +21,9 @@ CMutableTransaction BuildCreditingTransaction(const CScript& scriptPubKey, int n
// 1 output with empty scriptPubKey, full value of referenced transaction]
CMutableTransaction BuildSpendingTransaction(const CScript& scriptSig, const CScriptWitness& scriptWitness, const CTransaction& txCredit);
+// Helper: create two dummy transactions, each with two outputs.
+// The first has nValues[0] and nValues[1] outputs paid to a TX_PUBKEY,
+// the second nValues[2] and nValues[3] outputs paid to a TX_PUBKEYHASH.
+std::vector<CMutableTransaction> SetupDummyInputs(FillableSigningProvider& keystoreRet, CCoinsViewCache& coinsRet, const std::array<CAmount,4>& nValues);
+
#endif // BITCOIN_TEST_UTIL_TRANSACTION_UTILS_H
diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp
index 42c2c50fa5..73b37f909f 100644
--- a/src/test/util_tests.cpp
+++ b/src/test/util_tests.cpp
@@ -5,10 +5,14 @@
#include <util/system.h>
#include <clientversion.h>
+#include <hash.h> // For Hash()
+#include <key.h> // For CKey
#include <optional.h>
#include <sync.h>
#include <test/util/setup_common.h>
#include <test/util/str.h>
+#include <uint256.h>
+#include <util/message.h> // For MessageSign(), MessageVerify(), MESSAGE_MAGIC
#include <util/moneystr.h>
#include <util/strencodings.h>
#include <util/string.h>
@@ -16,6 +20,7 @@
#include <util/spanparsing.h>
#include <util/vector.h>
+#include <array>
#include <stdint.h>
#include <thread>
#include <univalue.h>
@@ -889,7 +894,7 @@ struct ArgsMergeTestingSetup : public BasicTestingSetup {
if (action == SECTION_SET || action == SECTION_NEGATE) prefix = section + ".";
if (action == SET || action == SECTION_SET) {
for (int i = 0; i < 2; ++i) {
- values.push_back(prefix + name + "=" + value_prefix + std::to_string(++suffix));
+ values.push_back(prefix + name + "=" + value_prefix + ToString(++suffix));
}
}
if (action == NEGATE || action == SECTION_NEGATE) {
@@ -1177,6 +1182,12 @@ BOOST_AUTO_TEST_CASE(util_ParseMoney)
BOOST_CHECK_EQUAL(ret, COIN);
BOOST_CHECK(ParseMoney("1", ret));
BOOST_CHECK_EQUAL(ret, COIN);
+ BOOST_CHECK(ParseMoney(" 1", ret));
+ BOOST_CHECK_EQUAL(ret, COIN);
+ BOOST_CHECK(ParseMoney("1 ", ret));
+ BOOST_CHECK_EQUAL(ret, COIN);
+ BOOST_CHECK(ParseMoney(" 1 ", ret));
+ BOOST_CHECK_EQUAL(ret, COIN);
BOOST_CHECK(ParseMoney("0.1", ret));
BOOST_CHECK_EQUAL(ret, COIN/10);
BOOST_CHECK(ParseMoney("0.01", ret));
@@ -1193,6 +1204,26 @@ BOOST_AUTO_TEST_CASE(util_ParseMoney)
BOOST_CHECK_EQUAL(ret, COIN/10000000);
BOOST_CHECK(ParseMoney("0.00000001", ret));
BOOST_CHECK_EQUAL(ret, COIN/100000000);
+ BOOST_CHECK(ParseMoney(" 0.00000001 ", ret));
+ BOOST_CHECK_EQUAL(ret, COIN/100000000);
+ BOOST_CHECK(ParseMoney("0.00000001 ", ret));
+ BOOST_CHECK_EQUAL(ret, COIN/100000000);
+ BOOST_CHECK(ParseMoney(" 0.00000001", ret));
+ BOOST_CHECK_EQUAL(ret, COIN/100000000);
+
+ // Parsing amount that can not be represented in ret should fail
+ BOOST_CHECK(!ParseMoney("0.000000001", ret));
+
+ // Parsing empty string should fail
+ BOOST_CHECK(!ParseMoney("", ret));
+ BOOST_CHECK(!ParseMoney(" ", ret));
+ BOOST_CHECK(!ParseMoney(" ", ret));
+
+ // Parsing two numbers should fail
+ BOOST_CHECK(!ParseMoney("1 2", ret));
+ BOOST_CHECK(!ParseMoney(" 1 2 ", ret));
+ BOOST_CHECK(!ParseMoney(" 1.2 3 ", ret));
+ BOOST_CHECK(!ParseMoney(" 1 2.3 ", ret));
// Attempted 63 bit overflow should fail
BOOST_CHECK(!ParseMoney("92233720368.54775808", ret));
@@ -1322,7 +1353,7 @@ BOOST_AUTO_TEST_CASE(util_time_GetTime)
SetMockTime(111);
// Check that mock time does not change after a sleep
for (const auto& num_sleep : {0, 1}) {
- MilliSleep(num_sleep);
+ UninterruptibleSleep(std::chrono::milliseconds{num_sleep});
BOOST_CHECK_EQUAL(111, GetTime()); // Deprecated time getter
BOOST_CHECK_EQUAL(111, GetTime<std::chrono::seconds>().count());
BOOST_CHECK_EQUAL(111000, GetTime<std::chrono::milliseconds>().count());
@@ -1333,7 +1364,7 @@ BOOST_AUTO_TEST_CASE(util_time_GetTime)
// Check that system time changes after a sleep
const auto ms_0 = GetTime<std::chrono::milliseconds>();
const auto us_0 = GetTime<std::chrono::microseconds>();
- MilliSleep(1);
+ UninterruptibleSleep(std::chrono::milliseconds{1});
BOOST_CHECK(ms_0 < GetTime<std::chrono::milliseconds>());
BOOST_CHECK(us_0 < GetTime<std::chrono::microseconds>());
}
@@ -2025,4 +2056,109 @@ BOOST_AUTO_TEST_CASE(test_tracked_vector)
BOOST_CHECK_EQUAL(v8[2].copies, 0);
}
+BOOST_AUTO_TEST_CASE(message_sign)
+{
+ const std::array<unsigned char, 32> privkey_bytes = {
+ // just some random data
+ // derived address from this private key: 15CRxFdyRpGZLW9w8HnHvVduizdL5jKNbs
+ 0xD9, 0x7F, 0x51, 0x08, 0xF1, 0x1C, 0xDA, 0x6E,
+ 0xEE, 0xBA, 0xAA, 0x42, 0x0F, 0xEF, 0x07, 0x26,
+ 0xB1, 0xF8, 0x98, 0x06, 0x0B, 0x98, 0x48, 0x9F,
+ 0xA3, 0x09, 0x84, 0x63, 0xC0, 0x03, 0x28, 0x66
+ };
+
+ const std::string message = "Trust no one";
+
+ const std::string expected_signature =
+ "IPojfrX2dfPnH26UegfbGQQLrdK844DlHq5157/P6h57WyuS/Qsl+h/WSVGDF4MUi4rWSswW38oimDYfNNUBUOk=";
+
+ CKey privkey;
+ std::string generated_signature;
+
+ BOOST_REQUIRE_MESSAGE(!privkey.IsValid(),
+ "Confirm the private key is invalid");
+
+ BOOST_CHECK_MESSAGE(!MessageSign(privkey, message, generated_signature),
+ "Sign with an invalid private key");
+
+ privkey.Set(privkey_bytes.begin(), privkey_bytes.end(), true);
+
+ BOOST_REQUIRE_MESSAGE(privkey.IsValid(),
+ "Confirm the private key is valid");
+
+ BOOST_CHECK_MESSAGE(MessageSign(privkey, message, generated_signature),
+ "Sign with a valid private key");
+
+ BOOST_CHECK_EQUAL(expected_signature, generated_signature);
+}
+
+BOOST_AUTO_TEST_CASE(message_verify)
+{
+ BOOST_CHECK_EQUAL(
+ MessageVerify(
+ "invalid address",
+ "signature should be irrelevant",
+ "message too"),
+ MessageVerificationResult::ERR_INVALID_ADDRESS);
+
+ BOOST_CHECK_EQUAL(
+ MessageVerify(
+ "3B5fQsEXEaV8v6U3ejYc8XaKXAkyQj2MjV",
+ "signature should be irrelevant",
+ "message too"),
+ MessageVerificationResult::ERR_ADDRESS_NO_KEY);
+
+ BOOST_CHECK_EQUAL(
+ MessageVerify(
+ "1KqbBpLy5FARmTPD4VZnDDpYjkUvkr82Pm",
+ "invalid signature, not in base64 encoding",
+ "message should be irrelevant"),
+ MessageVerificationResult::ERR_MALFORMED_SIGNATURE);
+
+ BOOST_CHECK_EQUAL(
+ MessageVerify(
+ "1KqbBpLy5FARmTPD4VZnDDpYjkUvkr82Pm",
+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=",
+ "message should be irrelevant"),
+ MessageVerificationResult::ERR_PUBKEY_NOT_RECOVERED);
+
+ BOOST_CHECK_EQUAL(
+ MessageVerify(
+ "15CRxFdyRpGZLW9w8HnHvVduizdL5jKNbs",
+ "IPojfrX2dfPnH26UegfbGQQLrdK844DlHq5157/P6h57WyuS/Qsl+h/WSVGDF4MUi4rWSswW38oimDYfNNUBUOk=",
+ "I never signed this"),
+ MessageVerificationResult::ERR_NOT_SIGNED);
+
+ BOOST_CHECK_EQUAL(
+ MessageVerify(
+ "15CRxFdyRpGZLW9w8HnHvVduizdL5jKNbs",
+ "IPojfrX2dfPnH26UegfbGQQLrdK844DlHq5157/P6h57WyuS/Qsl+h/WSVGDF4MUi4rWSswW38oimDYfNNUBUOk=",
+ "Trust no one"),
+ MessageVerificationResult::OK);
+
+ BOOST_CHECK_EQUAL(
+ MessageVerify(
+ "11canuhp9X2NocwCq7xNrQYTmUgZAnLK3",
+ "IIcaIENoYW5jZWxsb3Igb24gYnJpbmsgb2Ygc2Vjb25kIGJhaWxvdXQgZm9yIGJhbmtzIAaHRtbCeDZINyavx14=",
+ "Trust me"),
+ MessageVerificationResult::OK);
+}
+
+BOOST_AUTO_TEST_CASE(message_hash)
+{
+ const std::string unsigned_tx = "...";
+ const std::string prefixed_message =
+ std::string(1, (char)MESSAGE_MAGIC.length()) +
+ MESSAGE_MAGIC +
+ std::string(1, (char)unsigned_tx.length()) +
+ unsigned_tx;
+
+ const uint256 signature_hash = Hash(unsigned_tx.begin(), unsigned_tx.end());
+ const uint256 message_hash1 = Hash(prefixed_message.begin(), prefixed_message.end());
+ const uint256 message_hash2 = MessageHash(unsigned_tx);
+
+ BOOST_CHECK_EQUAL(message_hash1, message_hash2);
+ BOOST_CHECK_NE(message_hash1, signature_hash);
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/util_threadnames_tests.cpp b/src/test/util_threadnames_tests.cpp
index 78dbf848bb..cee4e0ce3c 100644
--- a/src/test/util_threadnames_tests.cpp
+++ b/src/test/util_threadnames_tests.cpp
@@ -2,6 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <util/string.h>
#include <util/threadnames.h>
#include <test/util/setup_common.h>
@@ -32,7 +33,7 @@ std::set<std::string> RenameEnMasse(int num_threads)
std::mutex lock;
auto RenameThisThread = [&](int i) {
- util::ThreadRename(TEST_THREAD_NAME_BASE + std::to_string(i));
+ util::ThreadRename(TEST_THREAD_NAME_BASE + ToString(i));
std::lock_guard<std::mutex> guard(lock);
names.insert(util::ThreadGetInternalName());
};
@@ -65,7 +66,7 @@ BOOST_AUTO_TEST_CASE(util_threadnames_test_rename_threaded)
// Names "test_thread.[n]" should exist for n = [0, 99]
for (int i = 0; i < 100; ++i) {
- BOOST_CHECK(names.find(TEST_THREAD_NAME_BASE + std::to_string(i)) != names.end());
+ BOOST_CHECK(names.find(TEST_THREAD_NAME_BASE + ToString(i)) != names.end());
}
}
diff --git a/src/test/validation_block_tests.cpp b/src/test/validation_block_tests.cpp
index dae389a167..afb3db36a2 100644
--- a/src/test/validation_block_tests.cpp
+++ b/src/test/validation_block_tests.cpp
@@ -42,7 +42,7 @@ struct TestSubscriber : public CValidationInterface {
BOOST_CHECK_EQUAL(m_expected_tip, pindexNew->GetBlockHash());
}
- void BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex, const std::vector<CTransactionRef>& txnConflicted) override
+ void BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex) override
{
BOOST_CHECK_EQUAL(m_expected_tip, block->hashPrevBlock);
BOOST_CHECK_EQUAL(m_expected_tip, pindex->pprev->GetBlockHash());
@@ -205,7 +205,7 @@ BOOST_AUTO_TEST_CASE(processnewblock_signals_ordering)
t.join();
}
while (GetMainSignals().CallbacksPending() > 0) {
- MilliSleep(100);
+ UninterruptibleSleep(std::chrono::milliseconds{100});
}
UnregisterValidationInterface(&sub);
diff --git a/src/test/validation_flush_tests.cpp b/src/test/validation_flush_tests.cpp
index ab8b957f7d..c24164528f 100644
--- a/src/test/validation_flush_tests.cpp
+++ b/src/test/validation_flush_tests.cpp
@@ -2,10 +2,10 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
//
-#include <txmempool.h>
-#include <validation.h>
#include <sync.h>
#include <test/util/setup_common.h>
+#include <txmempool.h>
+#include <validation.h>
#include <boost/test/unit_test.hpp>
@@ -85,12 +85,7 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
// This is contingent not only on the dynamic memory usage of the Coins
// that we're adding (COIN_SIZE bytes per), but also on how much memory the
// cacheCoins (unordered_map) preallocates.
- //
- // I came up with the count by examining the printed memory usage of the
- // CCoinsCacheView, so it's sort of arbitrary - but it shouldn't change
- // unless we somehow change the way the cacheCoins map allocates memory.
- //
- constexpr int COINS_UNTIL_CRITICAL = is_64_bit ? 4 : 5;
+ constexpr int COINS_UNTIL_CRITICAL{3};
for (int i{0}; i < COINS_UNTIL_CRITICAL; ++i) {
COutPoint res = add_coin(view);
@@ -101,17 +96,14 @@ BOOST_AUTO_TEST_CASE(getcoinscachesizestate)
CoinsCacheSizeState::OK);
}
- // Adding an additional coin will push us over the edge to CRITICAL.
- add_coin(view);
- print_view_mem_usage(view);
-
- auto size_state = chainstate.GetCoinsCacheSizeState(
- tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0);
-
- if (!is_64_bit && size_state == CoinsCacheSizeState::LARGE) {
- // On 32 bit hosts, we may hit LARGE before CRITICAL.
+ // Adding some additional coins will push us over the edge to CRITICAL.
+ for (int i{0}; i < 4; ++i) {
add_coin(view);
print_view_mem_usage(view);
+ if (chainstate.GetCoinsCacheSizeState(tx_pool, MAX_COINS_CACHE_BYTES, /*max_mempool_size_bytes*/ 0) ==
+ CoinsCacheSizeState::CRITICAL) {
+ break;
+ }
}
BOOST_CHECK_EQUAL(
diff --git a/src/txdb.cpp b/src/txdb.cpp
index 35bbdab00d..acc47ab45e 100644
--- a/src/txdb.cpp
+++ b/src/txdb.cpp
@@ -339,7 +339,7 @@ public:
::Unserialize(s, Using<TxOutCompression>(vout[i]));
}
// coinbase height
- ::Unserialize(s, VARINT(nHeight, VarIntMode::NONNEGATIVE_SIGNED));
+ ::Unserialize(s, VARINT_MODE(nHeight, VarIntMode::NONNEGATIVE_SIGNED));
}
};
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index 5768219f3a..47b0d39ea4 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -355,7 +355,6 @@ void CTxMemPool::AddTransactionsUpdated(unsigned int n)
void CTxMemPool::addUnchecked(const CTxMemPoolEntry &entry, setEntries &setAncestors, bool validFeeEstimate)
{
- NotifyEntryAdded(entry.GetSharedTx());
// Add to memory pool without checking anything.
// Used by AcceptToMemoryPool(), which DOES do
// all the appropriate checks.
@@ -406,10 +405,12 @@ void CTxMemPool::addUnchecked(const CTxMemPoolEntry &entry, setEntries &setAnces
void CTxMemPool::removeUnchecked(txiter it, MemPoolRemovalReason reason)
{
- CTransactionRef ptx = it->GetSharedTx();
- NotifyEntryRemoved(ptx, reason);
- if (reason != MemPoolRemovalReason::BLOCK && reason != MemPoolRemovalReason::CONFLICT) {
- GetMainSignals().TransactionRemovedFromMempool(ptx);
+ if (reason != MemPoolRemovalReason::BLOCK) {
+ // Notify clients that a transaction has been removed from the mempool
+ // for any reason except being included in a block. Clients interested
+ // in transactions included in blocks can subscribe to the BlockConnected
+ // notification.
+ GetMainSignals().TransactionRemovedFromMempool(it->GetSharedTx());
}
const uint256 hash = it->GetTx().GetHash();
diff --git a/src/txmempool.h b/src/txmempool.h
index de11d626b4..3dae0a04c7 100644
--- a/src/txmempool.h
+++ b/src/txmempool.h
@@ -27,7 +27,6 @@
#include <boost/multi_index/hashed_index.hpp>
#include <boost/multi_index/ordered_index.hpp>
#include <boost/multi_index/sequenced_index.hpp>
-#include <boost/signals2/signal.hpp>
class CBlockIndex;
extern RecursiveMutex cs_main;
@@ -699,9 +698,6 @@ public:
size_t DynamicMemoryUsage() const;
- boost::signals2::signal<void (CTransactionRef)> NotifyEntryAdded;
- boost::signals2::signal<void (CTransactionRef, MemPoolRemovalReason)> NotifyEntryRemoved;
-
private:
/** UpdateForDescendants is used by UpdateTransactionsFromBlock to update
* the descendants for a single transaction that has been added to the
@@ -753,7 +749,7 @@ public:
* determine if that transaction has not yet been visited during the current
* traversal's epoch.
* Algorithms using std::set can be replaced on a one by one basis.
- * Both techniques are not fundamentally incomaptible across the codebase.
+ * Both techniques are not fundamentally incompatible across the codebase.
* Generally speaking, however, the remaining use of std::set for mempool
* traversal should be viewed as a TODO for replacement with an epoch based
* traversal, rather than a preference for std::set over epochs in that
diff --git a/src/undo.h b/src/undo.h
index 2009c721ab..47f132c7d8 100644
--- a/src/undo.h
+++ b/src/undo.h
@@ -13,58 +13,42 @@
#include <serialize.h>
#include <version.h>
-/** Undo information for a CTxIn
+/** Formatter for undo information for a CTxIn
*
* Contains the prevout's CTxOut being spent, and its metadata as well
* (coinbase or not, height). The serialization contains a dummy value of
* zero. This is compatible with older versions which expect to see
* the transaction version there.
*/
-class TxInUndoSerializer
+struct TxInUndoFormatter
{
- const Coin* txout;
-
-public:
template<typename Stream>
- void Serialize(Stream &s) const {
- ::Serialize(s, VARINT(txout->nHeight * 2 + (txout->fCoinBase ? 1u : 0u)));
- if (txout->nHeight > 0) {
+ void Ser(Stream &s, const Coin& txout) {
+ ::Serialize(s, VARINT(txout.nHeight * 2 + (txout.fCoinBase ? 1u : 0u)));
+ if (txout.nHeight > 0) {
// Required to maintain compatibility with older undo format.
::Serialize(s, (unsigned char)0);
}
- ::Serialize(s, Using<TxOutCompression>(REF(txout->out)));
+ ::Serialize(s, Using<TxOutCompression>(txout.out));
}
- explicit TxInUndoSerializer(const Coin* coin) : txout(coin) {}
-};
-
-class TxInUndoDeserializer
-{
- Coin* txout;
-
-public:
template<typename Stream>
- void Unserialize(Stream &s) {
+ void Unser(Stream &s, Coin& txout) {
unsigned int nCode = 0;
::Unserialize(s, VARINT(nCode));
- txout->nHeight = nCode / 2;
- txout->fCoinBase = nCode & 1;
- if (txout->nHeight > 0) {
+ txout.nHeight = nCode / 2;
+ txout.fCoinBase = nCode & 1;
+ if (txout.nHeight > 0) {
// Old versions stored the version number for the last spend of
// a transaction's outputs. Non-final spends were indicated with
// height = 0.
unsigned int nVersionDummy;
::Unserialize(s, VARINT(nVersionDummy));
}
- ::Unserialize(s, Using<TxOutCompression>(REF(txout->out)));
+ ::Unserialize(s, Using<TxOutCompression>(txout.out));
}
-
- explicit TxInUndoDeserializer(Coin* coin) : txout(coin) {}
};
-static const size_t MIN_TRANSACTION_INPUT_WEIGHT = WITNESS_SCALE_FACTOR * ::GetSerializeSize(CTxIn(), PROTOCOL_VERSION);
-static const size_t MAX_INPUTS_PER_BLOCK = MAX_BLOCK_WEIGHT / MIN_TRANSACTION_INPUT_WEIGHT;
-
/** Undo information for a CTransaction */
class CTxUndo
{
@@ -72,29 +56,7 @@ public:
// undo information for all txins
std::vector<Coin> vprevout;
- template <typename Stream>
- void Serialize(Stream& s) const {
- // TODO: avoid reimplementing vector serializer
- uint64_t count = vprevout.size();
- ::Serialize(s, COMPACTSIZE(REF(count)));
- for (const auto& prevout : vprevout) {
- ::Serialize(s, TxInUndoSerializer(&prevout));
- }
- }
-
- template <typename Stream>
- void Unserialize(Stream& s) {
- // TODO: avoid reimplementing vector deserializer
- uint64_t count = 0;
- ::Unserialize(s, COMPACTSIZE(count));
- if (count > MAX_INPUTS_PER_BLOCK) {
- throw std::ios_base::failure("Too many input undo records");
- }
- vprevout.resize(count);
- for (auto& prevout : vprevout) {
- ::Unserialize(s, TxInUndoDeserializer(&prevout));
- }
- }
+ SERIALIZE_METHODS(CTxUndo, obj) { READWRITE(Using<VectorFormatter<TxInUndoFormatter>>(obj.vprevout)); }
};
/** Undo information for a CBlock */
@@ -103,12 +65,7 @@ class CBlockUndo
public:
std::vector<CTxUndo> vtxundo; // for all but the coinbase
- ADD_SERIALIZE_METHODS;
-
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
- READWRITE(vtxundo);
- }
+ SERIALIZE_METHODS(CBlockUndo, obj) { READWRITE(obj.vtxundo); }
};
#endif // BITCOIN_UNDO_H
diff --git a/src/univalue/Makefile.am b/src/univalue/Makefile.am
index e283fc890e..0f5ba59954 100644
--- a/src/univalue/Makefile.am
+++ b/src/univalue/Makefile.am
@@ -95,6 +95,7 @@ TEST_FILES = \
$(TEST_DATA_DIR)/fail41.json \
$(TEST_DATA_DIR)/fail42.json \
$(TEST_DATA_DIR)/fail44.json \
+ $(TEST_DATA_DIR)/fail45.json \
$(TEST_DATA_DIR)/fail3.json \
$(TEST_DATA_DIR)/fail4.json \
$(TEST_DATA_DIR)/fail5.json \
@@ -105,6 +106,7 @@ TEST_FILES = \
$(TEST_DATA_DIR)/pass1.json \
$(TEST_DATA_DIR)/pass2.json \
$(TEST_DATA_DIR)/pass3.json \
+ $(TEST_DATA_DIR)/pass4.json \
$(TEST_DATA_DIR)/round1.json \
$(TEST_DATA_DIR)/round2.json \
$(TEST_DATA_DIR)/round3.json \
diff --git a/src/univalue/lib/univalue_read.cpp b/src/univalue/lib/univalue_read.cpp
index 14834db24d..5c6a1acf75 100644
--- a/src/univalue/lib/univalue_read.cpp
+++ b/src/univalue/lib/univalue_read.cpp
@@ -8,6 +8,14 @@
#include "univalue.h"
#include "univalue_utffilter.h"
+/*
+ * According to stackexchange, the original json test suite wanted
+ * to limit depth to 22. Widely-deployed PHP bails at depth 512,
+ * so we will follow PHP's lead, which should be more than sufficient
+ * (further stackexchange comments indicate depth > 32 rarely occurs).
+ */
+static const size_t MAX_JSON_DEPTH = 512;
+
static bool json_isdigit(int ch)
{
return ((ch >= '0') && (ch <= '9'));
@@ -323,6 +331,9 @@ bool UniValue::read(const char *raw, size_t size)
stack.push_back(newTop);
}
+ if (stack.size() > MAX_JSON_DEPTH)
+ return false;
+
if (utyp == VOBJ)
setExpect(OBJ_NAME);
else
diff --git a/src/univalue/test/fail45.json b/src/univalue/test/fail45.json
new file mode 100644
index 0000000000..03a30d8800
--- /dev/null
+++ b/src/univalue/test/fail45.json
@@ -0,0 +1 @@
+[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]
diff --git a/src/univalue/test/pass4.json b/src/univalue/test/pass4.json
new file mode 100644
index 0000000000..f5a680b31c
--- /dev/null
+++ b/src/univalue/test/pass4.json
@@ -0,0 +1 @@
+[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]
diff --git a/src/univalue/test/unitester.cpp b/src/univalue/test/unitester.cpp
index 75c0dc225a..2308afbcdf 100644
--- a/src/univalue/test/unitester.cpp
+++ b/src/univalue/test/unitester.cpp
@@ -114,6 +114,7 @@ static const char *filenames[] = {
"fail41.json", // invalid unicode: unfinished UTF-8
"fail42.json", // valid json with garbage following a nul byte
"fail44.json", // unterminated string
+ "fail45.json", // nested beyond max depth
"fail3.json",
"fail4.json", // extra comma
"fail5.json",
@@ -124,6 +125,7 @@ static const char *filenames[] = {
"pass1.json",
"pass2.json",
"pass3.json",
+ "pass4.json",
"round1.json", // round-trip test
"round2.json", // unicode
"round3.json", // bare string
diff --git a/src/util/message.cpp b/src/util/message.cpp
new file mode 100644
index 0000000000..1e7128d225
--- /dev/null
+++ b/src/util/message.cpp
@@ -0,0 +1,92 @@
+// Copyright (c) 2009-2010 Satoshi Nakamoto
+// Copyright (c) 2009-2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <hash.h> // For CHashWriter
+#include <key.h> // For CKey
+#include <key_io.h> // For DecodeDestination()
+#include <pubkey.h> // For CPubKey
+#include <script/standard.h> // For CTxDestination, IsValidDestination(), PKHash
+#include <serialize.h> // For SER_GETHASH
+#include <util/message.h>
+#include <util/strencodings.h> // For DecodeBase64()
+
+#include <string>
+#include <vector>
+
+/**
+ * Text used to signify that a signed message follows and to prevent
+ * inadvertently signing a transaction.
+ */
+const std::string MESSAGE_MAGIC = "Bitcoin Signed Message:\n";
+
+MessageVerificationResult MessageVerify(
+ const std::string& address,
+ const std::string& signature,
+ const std::string& message)
+{
+ CTxDestination destination = DecodeDestination(address);
+ if (!IsValidDestination(destination)) {
+ return MessageVerificationResult::ERR_INVALID_ADDRESS;
+ }
+
+ if (boost::get<PKHash>(&destination) == nullptr) {
+ return MessageVerificationResult::ERR_ADDRESS_NO_KEY;
+ }
+
+ bool invalid = false;
+ std::vector<unsigned char> signature_bytes = DecodeBase64(signature.c_str(), &invalid);
+ if (invalid) {
+ return MessageVerificationResult::ERR_MALFORMED_SIGNATURE;
+ }
+
+ CPubKey pubkey;
+ if (!pubkey.RecoverCompact(MessageHash(message), signature_bytes)) {
+ return MessageVerificationResult::ERR_PUBKEY_NOT_RECOVERED;
+ }
+
+ if (!(CTxDestination(PKHash(pubkey)) == destination)) {
+ return MessageVerificationResult::ERR_NOT_SIGNED;
+ }
+
+ return MessageVerificationResult::OK;
+}
+
+bool MessageSign(
+ const CKey& privkey,
+ const std::string& message,
+ std::string& signature)
+{
+ std::vector<unsigned char> signature_bytes;
+
+ if (!privkey.SignCompact(MessageHash(message), signature_bytes)) {
+ return false;
+ }
+
+ signature = EncodeBase64(signature_bytes.data(), signature_bytes.size());
+
+ return true;
+}
+
+uint256 MessageHash(const std::string& message)
+{
+ CHashWriter hasher(SER_GETHASH, 0);
+ hasher << MESSAGE_MAGIC << message;
+
+ return hasher.GetHash();
+}
+
+std::string SigningResultString(const SigningResult res)
+{
+ switch (res) {
+ case SigningResult::OK:
+ return "No error";
+ case SigningResult::PRIVATE_KEY_NOT_AVAILABLE:
+ return "Private key not available";
+ case SigningResult::SIGNING_FAILED:
+ return "Sign failed";
+ // no default case, so the compiler can warn about missing cases
+ }
+ assert(false);
+}
diff --git a/src/util/message.h b/src/util/message.h
new file mode 100644
index 0000000000..b31c5f5761
--- /dev/null
+++ b/src/util/message.h
@@ -0,0 +1,76 @@
+// Copyright (c) 2009-2010 Satoshi Nakamoto
+// Copyright (c) 2009-2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_UTIL_MESSAGE_H
+#define BITCOIN_UTIL_MESSAGE_H
+
+#include <key.h> // For CKey
+#include <uint256.h>
+
+#include <string>
+
+extern const std::string MESSAGE_MAGIC;
+
+/** The result of a signed message verification.
+ * Message verification takes as an input:
+ * - address (with whose private key the message is supposed to have been signed)
+ * - signature
+ * - message
+ */
+enum class MessageVerificationResult {
+ //! The provided address is invalid.
+ ERR_INVALID_ADDRESS,
+
+ //! The provided address is valid but does not refer to a public key.
+ ERR_ADDRESS_NO_KEY,
+
+ //! The provided signature couldn't be parsed (maybe invalid base64).
+ ERR_MALFORMED_SIGNATURE,
+
+ //! A public key could not be recovered from the provided signature and message.
+ ERR_PUBKEY_NOT_RECOVERED,
+
+ //! The message was not signed with the private key of the provided address.
+ ERR_NOT_SIGNED,
+
+ //! The message verification was successful.
+ OK
+};
+
+enum class SigningResult {
+ OK, //!< No error
+ PRIVATE_KEY_NOT_AVAILABLE,
+ SIGNING_FAILED,
+};
+
+/** Verify a signed message.
+ * @param[in] address Signer's bitcoin address, it must refer to a public key.
+ * @param[in] signature The signature in base64 format.
+ * @param[in] message The message that was signed.
+ * @return result code */
+MessageVerificationResult MessageVerify(
+ const std::string& address,
+ const std::string& signature,
+ const std::string& message);
+
+/** Sign a message.
+ * @param[in] privkey Private key to sign with.
+ * @param[in] message The message to sign.
+ * @param[out] signature Signature, base64 encoded, only set if true is returned.
+ * @return true if signing was successful. */
+bool MessageSign(
+ const CKey& privkey,
+ const std::string& message,
+ std::string& signature);
+
+/**
+ * Hashes a message for signing and verification in a manner that prevents
+ * inadvertently signing a transaction.
+ */
+uint256 MessageHash(const std::string& message);
+
+std::string SigningResultString(const SigningResult res);
+
+#endif // BITCOIN_UTIL_MESSAGE_H
diff --git a/src/util/moneystr.cpp b/src/util/moneystr.cpp
index 2797f450ca..544cfb58f9 100644
--- a/src/util/moneystr.cpp
+++ b/src/util/moneystr.cpp
@@ -31,21 +31,19 @@ std::string FormatMoney(const CAmount& n)
}
-bool ParseMoney(const std::string& str, CAmount& nRet)
+bool ParseMoney(const std::string& money_string, CAmount& nRet)
{
- if (!ValidAsCString(str)) {
+ if (!ValidAsCString(money_string)) {
+ return false;
+ }
+ const std::string str = TrimString(money_string);
+ if (str.empty()) {
return false;
}
- return ParseMoney(str.c_str(), nRet);
-}
-bool ParseMoney(const char* pszIn, CAmount& nRet)
-{
std::string strWhole;
int64_t nUnits = 0;
- const char* p = pszIn;
- while (IsSpace(*p))
- p++;
+ const char* p = str.c_str();
for (; *p; p++)
{
if (*p == '.')
@@ -60,14 +58,14 @@ bool ParseMoney(const char* pszIn, CAmount& nRet)
break;
}
if (IsSpace(*p))
- break;
+ return false;
if (!IsDigit(*p))
return false;
strWhole.insert(strWhole.end(), *p);
}
- for (; *p; p++)
- if (!IsSpace(*p))
- return false;
+ if (*p) {
+ return false;
+ }
if (strWhole.size() > 10) // guard against 63 bit overflow
return false;
if (nUnits < 0 || nUnits > COIN)
diff --git a/src/util/moneystr.h b/src/util/moneystr.h
index 027c7e2e53..d8b08adc24 100644
--- a/src/util/moneystr.h
+++ b/src/util/moneystr.h
@@ -18,7 +18,7 @@
* JSON but use AmountFromValue and ValueFromAmount for that.
*/
std::string FormatMoney(const CAmount& n);
+/** Parse an amount denoted in full coins. E.g. "0.0034" supplied on the command line. **/
NODISCARD bool ParseMoney(const std::string& str, CAmount& nRet);
-NODISCARD bool ParseMoney(const char* pszIn, CAmount& nRet);
#endif // BITCOIN_UTIL_MONEYSTR_H
diff --git a/src/util/string.h b/src/util/string.h
index 3db8fc8b98..694f0a1ca4 100644
--- a/src/util/string.h
+++ b/src/util/string.h
@@ -8,6 +8,8 @@
#include <attributes.h>
#include <cstring>
+#include <locale>
+#include <sstream>
#include <string>
#include <vector>
@@ -52,4 +54,16 @@ NODISCARD inline bool ValidAsCString(const std::string& str) noexcept
return str.size() == strlen(str.c_str());
}
+/**
+ * Locale-independent version of std::to_string
+ */
+template <typename T>
+std::string ToString(const T& t)
+{
+ std::ostringstream oss;
+ oss.imbue(std::locale::classic());
+ oss << t;
+ return oss.str();
+}
+
#endif // BITCOIN_UTIL_STRENCODINGS_H
diff --git a/src/util/system.cpp b/src/util/system.cpp
index 13ff99c663..b0a538b527 100644
--- a/src/util/system.cpp
+++ b/src/util/system.cpp
@@ -63,6 +63,7 @@
#include <malloc.h>
#endif
+#include <boost/algorithm/string/replace.hpp>
#include <thread>
#include <typeinfo>
#include <univalue.h>
@@ -1047,6 +1048,15 @@ fs::path GetSpecialFolderPath(int nFolder, bool fCreate)
}
#endif
+#ifndef WIN32
+std::string ShellEscape(const std::string& arg)
+{
+ std::string escaped = arg;
+ boost::replace_all(escaped, "'", "'\"'\"'");
+ return "'" + escaped + "'";
+}
+#endif
+
#if HAVE_SYSTEM
void runCommand(const std::string& strCommand)
{
diff --git a/src/util/system.h b/src/util/system.h
index bb69181de9..3138522b5c 100644
--- a/src/util/system.h
+++ b/src/util/system.h
@@ -81,6 +81,9 @@ fs::path GetConfigFile(const std::string& confPath);
#ifdef WIN32
fs::path GetSpecialFolderPath(int nFolder, bool fCreate = true);
#endif
+#ifndef WIN32
+std::string ShellEscape(const std::string& arg);
+#endif
#if HAVE_SYSTEM
void runCommand(const std::string& strCommand);
#endif
diff --git a/src/util/time.cpp b/src/util/time.cpp
index 2afff2626b..14937b985e 100644
--- a/src/util/time.cpp
+++ b/src/util/time.cpp
@@ -11,10 +11,13 @@
#include <atomic>
#include <boost/date_time/posix_time/posix_time.hpp>
-#include <boost/thread.hpp>
#include <ctime>
+#include <thread>
+
#include <tinyformat.h>
+void UninterruptibleSleep(const std::chrono::microseconds& n) { std::this_thread::sleep_for(n); }
+
static std::atomic<int64_t> nMockTime(0); //!< For unit testing
int64_t GetTime()
@@ -72,32 +75,16 @@ int64_t GetSystemTimeInSeconds()
return GetTimeMicros()/1000000;
}
-void MilliSleep(int64_t n)
-{
-
-/**
- * Boost's sleep_for was uninterruptible when backed by nanosleep from 1.50
- * until fixed in 1.52. Use the deprecated sleep method for the broken case.
- * See: https://svn.boost.org/trac/boost/ticket/7238
- */
-#if defined(HAVE_WORKING_BOOST_SLEEP_FOR)
- boost::this_thread::sleep_for(boost::chrono::milliseconds(n));
-#elif defined(HAVE_WORKING_BOOST_SLEEP)
- boost::this_thread::sleep(boost::posix_time::milliseconds(n));
-#else
-//should never get here
-#error missing boost sleep implementation
-#endif
-}
-
std::string FormatISO8601DateTime(int64_t nTime) {
struct tm ts;
time_t time_val = nTime;
#ifdef _MSC_VER
- gmtime_s(&ts, &time_val);
+ if (gmtime_s(&ts, &time_val) != 0) {
#else
- gmtime_r(&time_val, &ts);
+ if (gmtime_r(&time_val, &ts) == nullptr) {
#endif
+ return {};
+ }
return strprintf("%04i-%02i-%02iT%02i:%02i:%02iZ", ts.tm_year + 1900, ts.tm_mon + 1, ts.tm_mday, ts.tm_hour, ts.tm_min, ts.tm_sec);
}
@@ -105,10 +92,12 @@ std::string FormatISO8601Date(int64_t nTime) {
struct tm ts;
time_t time_val = nTime;
#ifdef _MSC_VER
- gmtime_s(&ts, &time_val);
+ if (gmtime_s(&ts, &time_val) != 0) {
#else
- gmtime_r(&time_val, &ts);
+ if (gmtime_r(&time_val, &ts) == nullptr) {
#endif
+ return {};
+ }
return strprintf("%04i-%02i-%02i", ts.tm_year + 1900, ts.tm_mon + 1, ts.tm_mday);
}
@@ -124,4 +113,4 @@ int64_t ParseISO8601DateTime(const std::string& str)
if (ptime.is_not_a_date_time() || epoch > ptime)
return 0;
return (ptime - epoch).total_seconds();
-} \ No newline at end of file
+}
diff --git a/src/util/time.h b/src/util/time.h
index af4390aa1c..77de1e047d 100644
--- a/src/util/time.h
+++ b/src/util/time.h
@@ -10,6 +10,8 @@
#include <string>
#include <chrono>
+void UninterruptibleSleep(const std::chrono::microseconds& n);
+
/**
* Helper to count the seconds of a duration.
*
@@ -36,8 +38,6 @@ void SetMockTime(int64_t nMockTimeIn);
/** For testing */
int64_t GetMockTime();
-void MilliSleep(int64_t n);
-
/** Return system time (or mocked time, if set) */
template <typename T>
T GetTime();
diff --git a/src/util/validation.cpp b/src/util/validation.cpp
deleted file mode 100644
index 89bc6665a4..0000000000
--- a/src/util/validation.cpp
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2020 The Bitcoin Core developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#include <util/validation.h>
-
-#include <consensus/validation.h>
-#include <tinyformat.h>
-
-std::string FormatStateMessage(const ValidationState &state)
-{
- if (state.IsValid()) {
- return "Valid";
- }
-
- const std::string debug_message = state.GetDebugMessage();
- if (!debug_message.empty()) {
- return strprintf("%s, %s", state.GetRejectReason(), debug_message);
- }
-
- return state.GetRejectReason();
-}
-
-const std::string strMessageMagic = "Bitcoin Signed Message:\n";
diff --git a/src/util/validation.h b/src/util/validation.h
deleted file mode 100644
index da2cf9f102..0000000000
--- a/src/util/validation.h
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2019 The Bitcoin Core developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#ifndef BITCOIN_UTIL_VALIDATION_H
-#define BITCOIN_UTIL_VALIDATION_H
-
-#include <string>
-
-class ValidationState;
-
-/** Convert ValidationState to a human-readable message for logging */
-std::string FormatStateMessage(const ValidationState &state);
-
-extern const std::string strMessageMagic;
-
-#endif // BITCOIN_UTIL_VALIDATION_H
diff --git a/src/validation.cpp b/src/validation.cpp
index bab04b8e34..7ee94f8657 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -43,7 +43,6 @@
#include <util/strencodings.h>
#include <util/system.h>
#include <util/translation.h>
-#include <util/validation.h>
#include <validationinterface.h>
#include <warnings.h>
@@ -662,7 +661,7 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
CAmount nFees = 0;
if (!Consensus::CheckTxInputs(tx, state, m_view, GetSpendHeight(m_view), nFees)) {
- return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), FormatStateMessage(state));
+ return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), state.ToString());
}
// Check for non-standard pay-to-script-hash in inputs
@@ -951,7 +950,7 @@ bool MemPoolAccept::ConsensusScriptChecks(ATMPArgs& args, Workspace& ws, Precomp
unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(::ChainActive().Tip(), chainparams.GetConsensus());
if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool, currentBlockScriptVerifyFlags, txdata)) {
return error("%s: BUG! PLEASE REPORT THIS! CheckInputScripts failed against latest-block but not STANDARD flags %s, %s",
- __func__, hash.ToString(), FormatStateMessage(state));
+ __func__, hash.ToString(), state.ToString());
}
return true;
@@ -975,7 +974,7 @@ bool MemPoolAccept::Finalize(ATMPArgs& args, Workspace& ws)
// Remove conflicting transactions from the mempool
for (CTxMemPool::txiter it : allConflicting)
{
- LogPrint(BCLog::MEMPOOL, "replacing tx %s with %s for %s BTC additional fees, %d delta bytes\n",
+ LogPrint(BCLog::MEMPOOL, "replacing tx %s with %s for %s additional fees, %d delta bytes\n",
it->GetTx().GetHash().ToString(),
hash.ToString(),
FormatMoney(nModifiedFees - nConflictingFees),
@@ -1921,13 +1920,15 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
// problems.
return AbortNode(state, "Corrupt block found indicating potential hardware failure; shutting down");
}
- return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state));
+ return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
}
// verify that the view's current state corresponds to the previous block
uint256 hashPrevBlock = pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash();
assert(hashPrevBlock == view.GetBestBlock());
+ nBlocksTotal++;
+
// Special case for the genesis block, skipping connection of its transactions
// (its coinbase is unspendable)
if (block.GetHash() == chainparams.GetConsensus().hashGenesisBlock) {
@@ -1936,8 +1937,6 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
return true;
}
- nBlocksTotal++;
-
bool fScriptChecks = true;
if (!hashAssumeValid.IsNull()) {
// We've been configured with the hash of a block which has been externally verified to have a valid history.
@@ -2099,7 +2098,7 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
// Any transaction validation failure in ConnectBlock is a block consensus failure
state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
tx_state.GetRejectReason(), tx_state.GetDebugMessage());
- return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), FormatStateMessage(state));
+ return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), state.ToString());
}
nFees += txfee;
if (!MoneyRange(nFees)) {
@@ -2142,7 +2141,7 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
tx_state.GetRejectReason(), tx_state.GetDebugMessage());
return error("ConnectBlock(): CheckInputScripts on %s failed with %s",
- tx.GetHash().ToString(), FormatStateMessage(state));
+ tx.GetHash().ToString(), state.ToString());
}
control.Add(vChecks);
}
@@ -2359,7 +2358,7 @@ void CChainState::ForceFlushStateToDisk() {
BlockValidationState state;
const CChainParams& chainparams = Params();
if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS)) {
- LogPrintf("%s: failed to flush state (%s)\n", __func__, FormatStateMessage(state));
+ LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
}
}
@@ -2369,7 +2368,7 @@ void CChainState::PruneAndFlush() {
const CChainParams& chainparams = Params();
if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::NONE)) {
- LogPrintf("%s: failed to flush state (%s)\n", __func__, FormatStateMessage(state));
+ LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
}
}
@@ -2505,35 +2504,21 @@ static int64_t nTimePostConnect = 0;
struct PerBlockConnectTrace {
CBlockIndex* pindex = nullptr;
std::shared_ptr<const CBlock> pblock;
- std::shared_ptr<std::vector<CTransactionRef>> conflictedTxs;
- PerBlockConnectTrace() : conflictedTxs(std::make_shared<std::vector<CTransactionRef>>()) {}
+ PerBlockConnectTrace() {}
};
/**
* Used to track blocks whose transactions were applied to the UTXO state as a
* part of a single ActivateBestChainStep call.
*
- * This class also tracks transactions that are removed from the mempool as
- * conflicts (per block) and can be used to pass all those transactions
- * through SyncTransaction.
- *
- * This class assumes (and asserts) that the conflicted transactions for a given
- * block are added via mempool callbacks prior to the BlockConnected() associated
- * with those transactions. If any transactions are marked conflicted, it is
- * assumed that an associated block will always be added.
- *
* This class is single-use, once you call GetBlocksConnected() you have to throw
* it away and make a new one.
*/
class ConnectTrace {
private:
std::vector<PerBlockConnectTrace> blocksConnected;
- CTxMemPool &pool;
- boost::signals2::scoped_connection m_connNotifyEntryRemoved;
public:
- explicit ConnectTrace(CTxMemPool &_pool) : blocksConnected(1), pool(_pool) {
- m_connNotifyEntryRemoved = pool.NotifyEntryRemoved.connect(std::bind(&ConnectTrace::NotifyEntryRemoved, this, std::placeholders::_1, std::placeholders::_2));
- }
+ explicit ConnectTrace() : blocksConnected(1) {}
void BlockConnected(CBlockIndex* pindex, std::shared_ptr<const CBlock> pblock) {
assert(!blocksConnected.back().pindex);
@@ -2551,17 +2536,9 @@ public:
// one waiting for the transactions from the next block. We pop
// the last entry here to make sure the list we return is sane.
assert(!blocksConnected.back().pindex);
- assert(blocksConnected.back().conflictedTxs->empty());
blocksConnected.pop_back();
return blocksConnected;
}
-
- void NotifyEntryRemoved(CTransactionRef txRemoved, MemPoolRemovalReason reason) {
- assert(!blocksConnected.back().pindex);
- if (reason == MemPoolRemovalReason::CONFLICT) {
- blocksConnected.back().conflictedTxs->emplace_back(std::move(txRemoved));
- }
- }
};
/**
@@ -2596,9 +2573,10 @@ bool CChainState::ConnectTip(BlockValidationState& state, const CChainParams& ch
if (!rv) {
if (state.IsInvalid())
InvalidBlockFound(pindexNew, state);
- return error("%s: ConnectBlock %s failed, %s", __func__, pindexNew->GetBlockHash().ToString(), FormatStateMessage(state));
+ return error("%s: ConnectBlock %s failed, %s", __func__, pindexNew->GetBlockHash().ToString(), state.ToString());
}
nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2;
+ assert(nBlocksTotal > 0);
LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO, nTimeConnectTotal * MILLI / nBlocksTotal);
bool flushed = view.Flush();
assert(flushed);
@@ -2854,7 +2832,7 @@ bool CChainState::ActivateBestChain(BlockValidationState &state, const CChainPar
do {
// We absolutely may not unlock cs_main until we've made forward progress
// (with the exception of shutdown due to hardware issues, low disk space, etc).
- ConnectTrace connectTrace(mempool); // Destructed before cs_main is unlocked
+ ConnectTrace connectTrace; // Destructed before cs_main is unlocked
if (pindexMostWork == nullptr) {
pindexMostWork = FindMostWorkChain();
@@ -2881,7 +2859,7 @@ bool CChainState::ActivateBestChain(BlockValidationState &state, const CChainPar
for (const PerBlockConnectTrace& trace : connectTrace.GetBlocksConnected()) {
assert(trace.pblock && trace.pindex);
- GetMainSignals().BlockConnected(trace.pblock, trace.pindex, trace.conflictedTxs);
+ GetMainSignals().BlockConnected(trace.pblock, trace.pindex);
}
} while (!m_chain.Tip() || (starting_tip && CBlockIndexWorkComparator()(m_chain.Tip(), starting_tip)));
if (!blocks_connected) return true;
@@ -3601,7 +3579,7 @@ bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationS
}
if (!CheckBlockHeader(block, state, chainparams.GetConsensus()))
- return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state));
+ return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__, hash.ToString(), state.ToString());
// Get prev block index
CBlockIndex* pindexPrev = nullptr;
@@ -3616,7 +3594,7 @@ bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationS
return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
}
if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, GetAdjustedTime()))
- return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state));
+ return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), state.ToString());
/* Determine if this block descends from any block which has been found
* invalid (m_failed_blocks), then mark pindexPrev and any blocks between
@@ -3766,7 +3744,7 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, Block
pindex->nStatus |= BLOCK_FAILED_VALID;
setDirtyBlockIndex.insert(pindex);
}
- return error("%s: %s", __func__, FormatStateMessage(state));
+ return error("%s: %s", __func__, state.ToString());
}
// Header is valid/has work, merkle tree and segwit merkle tree are good...RELAY NOW
@@ -3816,7 +3794,7 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<cons
}
if (!ret) {
GetMainSignals().BlockChecked(*pblock, state);
- return error("%s: AcceptBlock FAILED (%s)", __func__, FormatStateMessage(state));
+ return error("%s: AcceptBlock FAILED (%s)", __func__, state.ToString());
}
}
@@ -3824,7 +3802,7 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<cons
BlockValidationState state; // Only used to report errors, not invalidity - ignore it
if (!::ChainstateActive().ActivateBestChain(state, chainparams, pblock))
- return error("%s: ActivateBestChain failed (%s)", __func__, FormatStateMessage(state));
+ return error("%s: ActivateBestChain failed (%s)", __func__, state.ToString());
return true;
}
@@ -3842,11 +3820,11 @@ bool TestBlockValidity(BlockValidationState& state, const CChainParams& chainpar
// NOTE: CheckBlockHeader is called by CheckBlock
if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, GetAdjustedTime()))
- return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, FormatStateMessage(state));
+ return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, state.ToString());
if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot))
- return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state));
+ return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
if (!ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindexPrev))
- return error("%s: Consensus::ContextualCheckBlock: %s", __func__, FormatStateMessage(state));
+ return error("%s: Consensus::ContextualCheckBlock: %s", __func__, state.ToString());
if (!::ChainstateActive().ConnectBlock(block, state, &indexDummy, viewNew, chainparams, true))
return false;
assert(state.IsValid());
@@ -3944,7 +3922,7 @@ void PruneBlockFilesManual(int nManualPruneHeight)
const CChainParams& chainparams = Params();
if (!::ChainstateActive().FlushStateToDisk(
chainparams, state, FlushStateMode::NONE, nManualPruneHeight)) {
- LogPrintf("%s: failed to flush state (%s)\n", __func__, FormatStateMessage(state));
+ LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
}
}
@@ -4262,7 +4240,7 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
// check level 1: verify block validity
if (nCheckLevel >= 1 && !CheckBlock(block, state, chainparams.GetConsensus()))
return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__,
- pindex->nHeight, pindex->GetBlockHash().ToString(), FormatStateMessage(state));
+ pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
// check level 2: verify undo validity
if (nCheckLevel >= 2 && pindex) {
CBlockUndo undo;
@@ -4311,7 +4289,7 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
if (!::ChainstateActive().ConnectBlock(block, state, pindex, coins, chainparams))
- return error("VerifyDB(): *** found unconnectable block at %d, hash=%s (%s)", pindex->nHeight, pindex->GetBlockHash().ToString(), FormatStateMessage(state));
+ return error("VerifyDB(): *** found unconnectable block at %d, hash=%s (%s)", pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
}
}
@@ -4499,7 +4477,7 @@ bool CChainState::RewindBlockIndex(const CChainParams& params)
// Disconnect block
if (!DisconnectTip(state, params, nullptr)) {
- return error("RewindBlockIndex: unable to disconnect block at height %i (%s)", tip->nHeight, FormatStateMessage(state));
+ return error("RewindBlockIndex: unable to disconnect block at height %i (%s)", tip->nHeight, state.ToString());
}
// Reduce validity flag and have-data flags.
@@ -4519,7 +4497,7 @@ bool CChainState::RewindBlockIndex(const CChainParams& params)
// Occasionally flush state to disk.
if (!FlushStateToDisk(params, state, FlushStateMode::PERIODIC)) {
- LogPrintf("RewindBlockIndex: unable to flush state to disk (%s)\n", FormatStateMessage(state));
+ LogPrintf("RewindBlockIndex: unable to flush state to disk (%s)\n", state.ToString());
return false;
}
}
@@ -4550,7 +4528,7 @@ bool RewindBlockIndex(const CChainParams& params) {
// it'll get called a bunch real soon.
BlockValidationState state;
if (!::ChainstateActive().FlushStateToDisk(params, state, FlushStateMode::ALWAYS)) {
- LogPrintf("RewindBlockIndex: unable to flush state to disk (%s)\n", FormatStateMessage(state));
+ LogPrintf("RewindBlockIndex: unable to flush state to disk (%s)\n", state.ToString());
return false;
}
}
diff --git a/src/validationinterface.cpp b/src/validationinterface.cpp
index 0f513c065f..c895904b19 100644
--- a/src/validationinterface.cpp
+++ b/src/validationinterface.cpp
@@ -11,7 +11,6 @@
#include <primitives/block.h>
#include <primitives/transaction.h>
#include <scheduler.h>
-#include <util/validation.h>
#include <future>
#include <unordered_map>
@@ -33,7 +32,7 @@ struct ValidationInterfaceConnections {
struct MainSignalsInstance {
boost::signals2::signal<void (const CBlockIndex *, const CBlockIndex *, bool fInitialDownload)> UpdatedBlockTip;
boost::signals2::signal<void (const CTransactionRef &)> TransactionAddedToMempool;
- boost::signals2::signal<void (const std::shared_ptr<const CBlock> &, const CBlockIndex *pindex, const std::vector<CTransactionRef>&)> BlockConnected;
+ boost::signals2::signal<void (const std::shared_ptr<const CBlock> &, const CBlockIndex *pindex)> BlockConnected;
boost::signals2::signal<void (const std::shared_ptr<const CBlock>&, const CBlockIndex* pindex)> BlockDisconnected;
boost::signals2::signal<void (const CTransactionRef &)> TransactionRemovedFromMempool;
boost::signals2::signal<void (const CBlockLocator &)> ChainStateFlushed;
@@ -80,7 +79,7 @@ void RegisterValidationInterface(CValidationInterface* pwalletIn) {
ValidationInterfaceConnections& conns = g_signals.m_internals->m_connMainSignals[pwalletIn];
conns.UpdatedBlockTip = g_signals.m_internals->UpdatedBlockTip.connect(std::bind(&CValidationInterface::UpdatedBlockTip, pwalletIn, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3));
conns.TransactionAddedToMempool = g_signals.m_internals->TransactionAddedToMempool.connect(std::bind(&CValidationInterface::TransactionAddedToMempool, pwalletIn, std::placeholders::_1));
- conns.BlockConnected = g_signals.m_internals->BlockConnected.connect(std::bind(&CValidationInterface::BlockConnected, pwalletIn, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3));
+ conns.BlockConnected = g_signals.m_internals->BlockConnected.connect(std::bind(&CValidationInterface::BlockConnected, pwalletIn, std::placeholders::_1, std::placeholders::_2));
conns.BlockDisconnected = g_signals.m_internals->BlockDisconnected.connect(std::bind(&CValidationInterface::BlockDisconnected, pwalletIn, std::placeholders::_1, std::placeholders::_2));
conns.TransactionRemovedFromMempool = g_signals.m_internals->TransactionRemovedFromMempool.connect(std::bind(&CValidationInterface::TransactionRemovedFromMempool, pwalletIn, std::placeholders::_1));
conns.ChainStateFlushed = g_signals.m_internals->ChainStateFlushed.connect(std::bind(&CValidationInterface::ChainStateFlushed, pwalletIn, std::placeholders::_1));
@@ -164,9 +163,9 @@ void CMainSignals::TransactionRemovedFromMempool(const CTransactionRef &ptx) {
ptx->GetWitnessHash().ToString());
}
-void CMainSignals::BlockConnected(const std::shared_ptr<const CBlock> &pblock, const CBlockIndex *pindex, const std::shared_ptr<const std::vector<CTransactionRef>>& pvtxConflicted) {
- auto event = [pblock, pindex, pvtxConflicted, this] {
- m_internals->BlockConnected(pblock, pindex, *pvtxConflicted);
+void CMainSignals::BlockConnected(const std::shared_ptr<const CBlock> &pblock, const CBlockIndex *pindex) {
+ auto event = [pblock, pindex, this] {
+ m_internals->BlockConnected(pblock, pindex);
};
ENQUEUE_AND_LOG_EVENT(event, "%s: block hash=%s block height=%d", __func__,
pblock->GetHash().ToString(),
@@ -193,7 +192,7 @@ void CMainSignals::ChainStateFlushed(const CBlockLocator &locator) {
void CMainSignals::BlockChecked(const CBlock& block, const BlockValidationState& state) {
LOG_EVENT("%s: block hash=%s state=%s", __func__,
- block.GetHash().ToString(), FormatStateMessage(state));
+ block.GetHash().ToString(), state.ToString());
m_internals->BlockChecked(block, state);
}
diff --git a/src/validationinterface.h b/src/validationinterface.h
index ed6c560944..5707422635 100644
--- a/src/validationinterface.h
+++ b/src/validationinterface.h
@@ -92,10 +92,32 @@ protected:
/**
* Notifies listeners of a transaction leaving mempool.
*
- * This only fires for transactions which leave mempool because of expiry,
- * size limiting, reorg (changes in lock times/coinbase maturity), or
- * replacement. This does not include any transactions which are included
- * in BlockConnectedDisconnected either in block->vtx or in txnConflicted.
+ * This notification fires for transactions that are removed from the
+ * mempool for the following reasons:
+ *
+ * - EXPIRY (expired from mempool after -mempoolexpiry hours)
+ * - SIZELIMIT (removed in size limiting if the mempool exceeds -maxmempool megabytes)
+ * - REORG (removed during a reorg)
+ * - CONFLICT (removed because it conflicts with in-block transaction)
+ * - REPLACED (removed due to RBF replacement)
+ *
+ * This does not fire for transactions that are removed from the mempool
+ * because they have been included in a block. Any client that is interested
+ * in transactions removed from the mempool for inclusion in a block can learn
+ * about those transactions from the BlockConnected notification.
+ *
+ * Transactions that are removed from the mempool because they conflict
+ * with a transaction in the new block will have
+ * TransactionRemovedFromMempool events fired *before* the BlockConnected
+ * event is fired. If multiple blocks are connected in one step, then the
+ * ordering could be:
+ *
+ * - TransactionRemovedFromMempool(tx1 from block A)
+ * - TransactionRemovedFromMempool(tx2 from block A)
+ * - TransactionRemovedFromMempool(tx1 from block B)
+ * - TransactionRemovedFromMempool(tx2 from block B)
+ * - BlockConnected(A)
+ * - BlockConnected(B)
*
* Called on a background thread.
*/
@@ -106,7 +128,7 @@ protected:
*
* Called on a background thread.
*/
- virtual void BlockConnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex *pindex, const std::vector<CTransactionRef> &txnConflicted) {}
+ virtual void BlockConnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex *pindex) {}
/**
* Notifies listeners of a block being disconnected
*
@@ -170,7 +192,7 @@ public:
void UpdatedBlockTip(const CBlockIndex *, const CBlockIndex *, bool fInitialDownload);
void TransactionAddedToMempool(const CTransactionRef &);
void TransactionRemovedFromMempool(const CTransactionRef &);
- void BlockConnected(const std::shared_ptr<const CBlock> &, const CBlockIndex *pindex, const std::shared_ptr<const std::vector<CTransactionRef>> &);
+ void BlockConnected(const std::shared_ptr<const CBlock> &, const CBlockIndex *pindex);
void BlockDisconnected(const std::shared_ptr<const CBlock> &, const CBlockIndex* pindex);
void ChainStateFlushed(const CBlockLocator &);
void BlockChecked(const CBlock&, const BlockValidationState&);
diff --git a/src/wallet/db.cpp b/src/wallet/db.cpp
index 8b042162d8..67be4d85d2 100644
--- a/src/wallet/db.cpp
+++ b/src/wallet/db.cpp
@@ -756,7 +756,7 @@ bool BerkeleyBatch::Rewrite(BerkeleyDatabase& database, const char* pszSkip)
return fSuccess;
}
}
- MilliSleep(100);
+ UninterruptibleSleep(std::chrono::milliseconds{100});
}
}
@@ -850,7 +850,7 @@ bool BerkeleyDatabase::Rewrite(const char* pszSkip)
return BerkeleyBatch::Rewrite(*this, pszSkip);
}
-bool BerkeleyDatabase::Backup(const std::string& strDest)
+bool BerkeleyDatabase::Backup(const std::string& strDest) const
{
if (IsDummy()) {
return false;
@@ -887,7 +887,7 @@ bool BerkeleyDatabase::Backup(const std::string& strDest)
}
}
}
- MilliSleep(100);
+ UninterruptibleSleep(std::chrono::milliseconds{100});
}
}
diff --git a/src/wallet/db.h b/src/wallet/db.h
index abec3ae4e2..bebaa55d05 100644
--- a/src/wallet/db.h
+++ b/src/wallet/db.h
@@ -157,7 +157,7 @@ public:
/** Back up the entire database to a file.
*/
- bool Backup(const std::string& strDest);
+ bool Backup(const std::string& strDest) const;
/** Make sure all changes are flushed to disk.
*/
@@ -193,7 +193,7 @@ private:
* Only to be used at a low level, application should ideally not care
* about this.
*/
- bool IsDummy() { return env == nullptr; }
+ bool IsDummy() const { return env == nullptr; }
};
/** RAII class that provides access to a Berkeley database */
diff --git a/src/wallet/feebumper.cpp b/src/wallet/feebumper.cpp
index b93b9ef1bc..1623ab9074 100644
--- a/src/wallet/feebumper.cpp
+++ b/src/wallet/feebumper.cpp
@@ -12,7 +12,6 @@
#include <util/moneystr.h>
#include <util/rbf.h>
#include <util/system.h>
-#include <util/validation.h>
//! Check whether transaction has descendant in wallet or mempool, or has been
//! mined, or conflicts with a mined transaction. Return a feebumper::Result.
@@ -61,7 +60,7 @@ static feebumper::Result PreconditionChecks(const CWallet& wallet, const CWallet
static feebumper::Result CheckFeeRate(const CWallet& wallet, const CWalletTx& wtx, const CFeeRate& newFeerate, const int64_t maxTxSize, std::vector<std::string>& errors) {
// check that fee rate is higher than mempool's minimum fee
// (no point in bumping fee if we know that the new tx won't be accepted to the mempool)
- // This may occur if the user set FeeRate, TotalFee or paytxfee too low, if fallbackfee is too low, or, perhaps,
+ // This may occur if the user set fee_rate or paytxfee too low, if fallbackfee is too low, or, perhaps,
// in a rare situation where the mempool minimum fee increased significantly since the fee estimation just a
// moment earlier. In this case, we report an error to the user, who may adjust the fee.
CFeeRate minMempoolFeeRate = wallet.chain().mempoolMinFee();
@@ -151,132 +150,6 @@ bool TransactionCanBeBumped(const CWallet& wallet, const uint256& txid)
return res == feebumper::Result::OK;
}
-Result CreateTotalBumpTransaction(const CWallet* wallet, const uint256& txid, const CCoinControl& coin_control, CAmount total_fee, std::vector<std::string>& errors,
- CAmount& old_fee, CAmount& new_fee, CMutableTransaction& mtx)
-{
- new_fee = total_fee;
-
- auto locked_chain = wallet->chain().lock();
- LOCK(wallet->cs_wallet);
- errors.clear();
- auto it = wallet->mapWallet.find(txid);
- if (it == wallet->mapWallet.end()) {
- errors.push_back("Invalid or non-wallet transaction id");
- return Result::INVALID_ADDRESS_OR_KEY;
- }
- const CWalletTx& wtx = it->second;
-
- Result result = PreconditionChecks(*wallet, wtx, errors);
- if (result != Result::OK) {
- return result;
- }
-
- // figure out which output was change
- // if there was no change output or multiple change outputs, fail
- int nOutput = -1;
- for (size_t i = 0; i < wtx.tx->vout.size(); ++i) {
- if (wallet->IsChange(wtx.tx->vout[i])) {
- if (nOutput != -1) {
- errors.push_back("Transaction has multiple change outputs");
- return Result::WALLET_ERROR;
- }
- nOutput = i;
- }
- }
- if (nOutput == -1) {
- errors.push_back("Transaction does not have a change output");
- return Result::WALLET_ERROR;
- }
-
- // Calculate the expected size of the new transaction.
- int64_t txSize = GetVirtualTransactionSize(*(wtx.tx));
- const int64_t maxNewTxSize = CalculateMaximumSignedTxSize(*wtx.tx, wallet);
- if (maxNewTxSize < 0) {
- errors.push_back("Transaction contains inputs that cannot be signed");
- return Result::INVALID_ADDRESS_OR_KEY;
- }
-
- // calculate the old fee and fee-rate
- isminefilter filter = wallet->GetLegacyScriptPubKeyMan() && wallet->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS) ? ISMINE_WATCH_ONLY : ISMINE_SPENDABLE;
- old_fee = wtx.GetDebit(filter) - wtx.tx->GetValueOut();
- CFeeRate nOldFeeRate(old_fee, txSize);
- // The wallet uses a conservative WALLET_INCREMENTAL_RELAY_FEE value to
- // future proof against changes to network wide policy for incremental relay
- // fee that our node may not be aware of.
- CFeeRate nodeIncrementalRelayFee = wallet->chain().relayIncrementalFee();
- CFeeRate walletIncrementalRelayFee = CFeeRate(WALLET_INCREMENTAL_RELAY_FEE);
- if (nodeIncrementalRelayFee > walletIncrementalRelayFee) {
- walletIncrementalRelayFee = nodeIncrementalRelayFee;
- }
-
- CAmount minTotalFee = nOldFeeRate.GetFee(maxNewTxSize) + nodeIncrementalRelayFee.GetFee(maxNewTxSize);
- if (total_fee < minTotalFee) {
- errors.push_back(strprintf("Insufficient totalFee, must be at least %s (oldFee %s + incrementalFee %s)",
- FormatMoney(minTotalFee), FormatMoney(nOldFeeRate.GetFee(maxNewTxSize)), FormatMoney(nodeIncrementalRelayFee.GetFee(maxNewTxSize))));
- return Result::INVALID_PARAMETER;
- }
- CAmount requiredFee = GetRequiredFee(*wallet, maxNewTxSize);
- if (total_fee < requiredFee) {
- errors.push_back(strprintf("Insufficient totalFee (cannot be less than required fee %s)",
- FormatMoney(requiredFee)));
- return Result::INVALID_PARAMETER;
- }
-
- // Check that in all cases the new fee doesn't violate maxTxFee
- const CAmount max_tx_fee = wallet->m_default_max_tx_fee;
- if (new_fee > max_tx_fee) {
- errors.push_back(strprintf("Specified or calculated fee %s is too high (cannot be higher than -maxtxfee %s)",
- FormatMoney(new_fee), FormatMoney(max_tx_fee)));
- return Result::WALLET_ERROR;
- }
-
- // check that fee rate is higher than mempool's minimum fee
- // (no point in bumping fee if we know that the new tx won't be accepted to the mempool)
- // This may occur if the user set TotalFee or paytxfee too low, if fallbackfee is too low, or, perhaps,
- // in a rare situation where the mempool minimum fee increased significantly since the fee estimation just a
- // moment earlier. In this case, we report an error to the user, who may use total_fee to make an adjustment.
- CFeeRate minMempoolFeeRate = wallet->chain().mempoolMinFee();
- CFeeRate nNewFeeRate = CFeeRate(total_fee, maxNewTxSize);
- if (nNewFeeRate.GetFeePerK() < minMempoolFeeRate.GetFeePerK()) {
- errors.push_back(strprintf(
- "New fee rate (%s) is lower than the minimum fee rate (%s) to get into the mempool -- "
- "the totalFee value should be at least %s to add transaction",
- FormatMoney(nNewFeeRate.GetFeePerK()),
- FormatMoney(minMempoolFeeRate.GetFeePerK()),
- FormatMoney(minMempoolFeeRate.GetFee(maxNewTxSize))));
- return Result::WALLET_ERROR;
- }
-
- // Now modify the output to increase the fee.
- // If the output is not large enough to pay the fee, fail.
- CAmount nDelta = new_fee - old_fee;
- assert(nDelta > 0);
- mtx = CMutableTransaction{*wtx.tx};
- CTxOut* poutput = &(mtx.vout[nOutput]);
- if (poutput->nValue < nDelta) {
- errors.push_back("Change output is too small to bump the fee");
- return Result::WALLET_ERROR;
- }
-
- // If the output would become dust, discard it (converting the dust to fee)
- poutput->nValue -= nDelta;
- if (poutput->nValue <= GetDustThreshold(*poutput, GetDiscardRate(*wallet))) {
- wallet->WalletLogPrintf("Bumping fee and discarding dust output\n");
- new_fee += poutput->nValue;
- mtx.vout.erase(mtx.vout.begin() + nOutput);
- }
-
- // Mark new tx not replaceable, if requested.
- if (!coin_control.m_signal_bip125_rbf.get_value_or(wallet->m_signal_rbf)) {
- for (auto& input : mtx.vin) {
- if (input.nSequence < 0xfffffffe) input.nSequence = 0xfffffffe;
- }
- }
-
- return Result::OK;
-}
-
-
Result CreateRateBumpTransaction(CWallet& wallet, const uint256& txid, const CCoinControl& coin_control, std::vector<std::string>& errors,
CAmount& old_fee, CAmount& new_fee, CMutableTransaction& mtx)
{
diff --git a/src/wallet/feebumper.h b/src/wallet/feebumper.h
index 9357397606..859f754761 100644
--- a/src/wallet/feebumper.h
+++ b/src/wallet/feebumper.h
@@ -28,16 +28,6 @@ enum class Result
//! Return whether transaction can be bumped.
bool TransactionCanBeBumped(const CWallet& wallet, const uint256& txid);
-//! Create bumpfee transaction based on total amount.
-Result CreateTotalBumpTransaction(const CWallet* wallet,
- const uint256& txid,
- const CCoinControl& coin_control,
- CAmount total_fee,
- std::vector<std::string>& errors,
- CAmount& old_fee,
- CAmount& new_fee,
- CMutableTransaction& mtx);
-
//! Create bumpfee transaction based on feerate estimates.
Result CreateRateBumpTransaction(CWallet& wallet,
const uint256& txid,
diff --git a/src/wallet/init.cpp b/src/wallet/init.cpp
index 2ebc9aba39..50f064b305 100644
--- a/src/wallet/init.cpp
+++ b/src/wallet/init.cpp
@@ -62,7 +62,7 @@ void WalletInit::AddWalletOptions() const
gArgs.AddArg("-walletbroadcast", strprintf("Make the wallet broadcast transactions (default: %u)", DEFAULT_WALLETBROADCAST), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
gArgs.AddArg("-walletdir=<dir>", "Specify directory to hold wallets (default: <datadir>/wallets if it exists, otherwise <datadir>)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::WALLET);
#if HAVE_SYSTEM
- gArgs.AddArg("-walletnotify=<cmd>", "Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
+ gArgs.AddArg("-walletnotify=<cmd>", "Execute command when a wallet transaction changes. %s in cmd is replaced by TxID and %w is replaced by wallet name. %w is not currently implemented on windows. On systems where %w is supported, it should NOT be quoted because this would break shell escaping used to invoke the command.", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
#endif
gArgs.AddArg("-walletrbf", strprintf("Send transactions with full-RBF opt-in enabled (RPC only, default: %u)", DEFAULT_WALLET_RBF), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET);
gArgs.AddArg("-zapwallettxes=<mode>", "Delete all wallet transactions and only recover those parts of the blockchain through -rescan on startup"
diff --git a/src/wallet/load.cpp b/src/wallet/load.cpp
index 071befaebf..3e92c07d64 100644
--- a/src/wallet/load.cpp
+++ b/src/wallet/load.cpp
@@ -88,8 +88,8 @@ void StartWallets(CScheduler& scheduler)
}
// Schedule periodic wallet flushes and tx rebroadcasts
- scheduler.scheduleEvery(MaybeCompactWalletDB, 500);
- scheduler.scheduleEvery(MaybeResendWalletTxs, 1000);
+ scheduler.scheduleEvery(MaybeCompactWalletDB, std::chrono::milliseconds{500});
+ scheduler.scheduleEvery(MaybeResendWalletTxs, std::chrono::milliseconds{1000});
}
void FlushWallets()
diff --git a/src/wallet/psbtwallet.cpp b/src/wallet/psbtwallet.cpp
deleted file mode 100644
index d995fb06d4..0000000000
--- a/src/wallet/psbtwallet.cpp
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright (c) 2009-2019 The Bitcoin Core developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#include <wallet/psbtwallet.h>
-
-TransactionError FillPSBT(const CWallet* pwallet, PartiallySignedTransaction& psbtx, bool& complete, int sighash_type, bool sign, bool bip32derivs)
-{
- LOCK(pwallet->cs_wallet);
- // Get all of the previous transactions
- complete = true;
- for (unsigned int i = 0; i < psbtx.tx->vin.size(); ++i) {
- const CTxIn& txin = psbtx.tx->vin[i];
- PSBTInput& input = psbtx.inputs.at(i);
-
- if (PSBTInputSigned(input)) {
- continue;
- }
-
- // Verify input looks sane. This will check that we have at most one uxto, witness or non-witness.
- if (!input.IsSane()) {
- return TransactionError::INVALID_PSBT;
- }
-
- // If we have no utxo, grab it from the wallet.
- if (!input.non_witness_utxo && input.witness_utxo.IsNull()) {
- const uint256& txhash = txin.prevout.hash;
- const auto it = pwallet->mapWallet.find(txhash);
- if (it != pwallet->mapWallet.end()) {
- const CWalletTx& wtx = it->second;
- // We only need the non_witness_utxo, which is a superset of the witness_utxo.
- // The signing code will switch to the smaller witness_utxo if this is ok.
- input.non_witness_utxo = wtx.tx;
- }
- }
-
- // Get the Sighash type
- if (sign && input.sighash_type > 0 && input.sighash_type != sighash_type) {
- return TransactionError::SIGHASH_MISMATCH;
- }
-
- // Get the scriptPubKey to know which SigningProvider to use
- CScript script;
- if (!input.witness_utxo.IsNull()) {
- script = input.witness_utxo.scriptPubKey;
- } else if (input.non_witness_utxo) {
- if (txin.prevout.n >= input.non_witness_utxo->vout.size()) {
- return TransactionError::MISSING_INPUTS;
- }
- script = input.non_witness_utxo->vout[txin.prevout.n].scriptPubKey;
- } else {
- // There's no UTXO so we can just skip this now
- complete = false;
- continue;
- }
- SignatureData sigdata;
- input.FillSignatureData(sigdata);
- std::unique_ptr<SigningProvider> provider = pwallet->GetSigningProvider(script, sigdata);
- if (!provider) {
- complete = false;
- continue;
- }
-
- complete &= SignPSBTInput(HidingSigningProvider(provider.get(), !sign, !bip32derivs), psbtx, i, sighash_type);
- }
-
- // Fill in the bip32 keypaths and redeemscripts for the outputs so that hardware wallets can identify change
- for (unsigned int i = 0; i < psbtx.tx->vout.size(); ++i) {
- const CTxOut& out = psbtx.tx->vout.at(i);
- std::unique_ptr<SigningProvider> provider = pwallet->GetSigningProvider(out.scriptPubKey);
- if (provider) {
- UpdatePSBTOutput(HidingSigningProvider(provider.get(), true, !bip32derivs), psbtx, i);
- }
- }
-
- return TransactionError::OK;
-}
diff --git a/src/wallet/psbtwallet.h b/src/wallet/psbtwallet.h
deleted file mode 100644
index b35a0a58d1..0000000000
--- a/src/wallet/psbtwallet.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) 2009-2019 The Bitcoin Core developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#ifndef BITCOIN_WALLET_PSBTWALLET_H
-#define BITCOIN_WALLET_PSBTWALLET_H
-
-#include <psbt.h>
-#include <wallet/wallet.h>
-
-/**
- * Fills out a PSBT with information from the wallet. Fills in UTXOs if we have
- * them. Tries to sign if sign=true. Sets `complete` if the PSBT is now complete
- * (i.e. has all required signatures or signature-parts, and is ready to
- * finalize.) Sets `error` and returns false if something goes wrong.
- *
- * @param[in] pwallet pointer to a wallet
- * @param[in] psbtx PartiallySignedTransaction to fill in
- * @param[out] complete indicates whether the PSBT is now complete
- * @param[in] sighash_type the sighash type to use when signing (if PSBT does not specify)
- * @param[in] sign whether to sign or not
- * @param[in] bip32derivs whether to fill in bip32 derivation information if available
- * return error
- */
-NODISCARD TransactionError FillPSBT(const CWallet* pwallet,
- PartiallySignedTransaction& psbtx,
- bool& complete,
- int sighash_type = 1 /* SIGHASH_ALL */,
- bool sign = true,
- bool bip32derivs = false);
-
-#endif // BITCOIN_WALLET_PSBTWALLET_H
diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp
index 7e704a95fe..e4d0a3fa6d 100644
--- a/src/wallet/rpcdump.cpp
+++ b/src/wallet/rpcdump.cpp
@@ -54,7 +54,7 @@ static std::string DecodeDumpString(const std::string &str) {
return ret.str();
}
-static bool GetWalletAddressesForKey(LegacyScriptPubKeyMan* spk_man, CWallet* const pwallet, const CKeyID& keyid, std::string& strAddr, std::string& strLabel) EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet)
+static bool GetWalletAddressesForKey(LegacyScriptPubKeyMan* spk_man, const CWallet* const pwallet, const CKeyID& keyid, std::string& strAddr, std::string& strLabel) EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet)
{
bool fLabelFound = false;
CKey key;
@@ -65,7 +65,7 @@ static bool GetWalletAddressesForKey(LegacyScriptPubKeyMan* spk_man, CWallet* co
strAddr += ",";
}
strAddr += EncodeDestination(dest);
- strLabel = EncodeDumpString(pwallet->mapAddressBook[dest].name);
+ strLabel = EncodeDumpString(pwallet->mapAddressBook.at(dest).name);
fLabelFound = true;
}
}
@@ -106,7 +106,7 @@ UniValue importprivkey(const JSONRPCRequest& request)
{"label", RPCArg::Type::STR, /* default */ "current label if address exists, otherwise \"\"", "An optional label"},
{"rescan", RPCArg::Type::BOOL, /* default */ "true", "Rescan the wallet for transactions"},
},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
"\nDump a private key\n"
+ HelpExampleCli("dumpprivkey", "\"myaddress\"") +
@@ -203,7 +203,7 @@ UniValue abortrescan(const JSONRPCRequest& request)
"\nStops current wallet rescan triggered by an RPC call, e.g. by an importprivkey call.\n"
"Note: Use \"getwalletinfo\" to query the scanning progress.\n",
{},
- RPCResults{},
+ RPCResult{RPCResult::Type::BOOL, "", "Whether the abort was successful"},
RPCExamples{
"\nImport a private key\n"
+ HelpExampleCli("importprivkey", "\"mykey\"") +
@@ -242,7 +242,7 @@ UniValue importaddress(const JSONRPCRequest& request)
{"rescan", RPCArg::Type::BOOL, /* default */ "true", "Rescan the wallet for transactions"},
{"p2sh", RPCArg::Type::BOOL, /* default */ "false", "Add the P2SH version of the script as well"},
},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
"\nImport an address with rescan\n"
+ HelpExampleCli("importaddress", "\"myaddress\"") +
@@ -337,7 +337,7 @@ UniValue importprunedfunds(const JSONRPCRequest& request)
{"rawtransaction", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "A raw transaction in hex funding an already-existing address in wallet"},
{"txoutproof", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The hex output from gettxoutproof that contains the transaction"},
},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{""},
}.Check(request);
@@ -397,7 +397,7 @@ UniValue removeprunedfunds(const JSONRPCRequest& request)
{
{"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The hex-encoded id of the transaction you are deleting"},
},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
HelpExampleCli("removeprunedfunds", "\"a8d0c0184dde994a09ec054286f1ce581bebf46446a512166eae7628734ea0a5\"") +
"\nAs a JSON-RPC call\n"
@@ -443,7 +443,7 @@ UniValue importpubkey(const JSONRPCRequest& request)
{"label", RPCArg::Type::STR, /* default */ "\"\"", "An optional label"},
{"rescan", RPCArg::Type::BOOL, /* default */ "true", "Rescan the wallet for transactions"},
},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
"\nImport a public key with rescan\n"
+ HelpExampleCli("importpubkey", "\"mypubkey\"") +
@@ -527,7 +527,7 @@ UniValue importwallet(const JSONRPCRequest& request)
{
{"filename", RPCArg::Type::STR, RPCArg::Optional::NO, "The wallet file"},
},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
"\nDump the wallet\n"
+ HelpExampleCli("dumpwallet", "\"test\"") +
@@ -676,7 +676,7 @@ UniValue importwallet(const JSONRPCRequest& request)
UniValue dumpprivkey(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
}
@@ -688,7 +688,7 @@ UniValue dumpprivkey(const JSONRPCRequest& request)
{"address", RPCArg::Type::STR, RPCArg::Optional::NO, "The bitcoin address for the private key"},
},
RPCResult{
- "\"key\" (string) The private key\n"
+ RPCResult::Type::STR, "key", "The private key"
},
RPCExamples{
HelpExampleCli("dumpprivkey", "\"myaddress\"")
@@ -724,7 +724,7 @@ UniValue dumpprivkey(const JSONRPCRequest& request)
UniValue dumpwallet(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
}
@@ -738,9 +738,10 @@ UniValue dumpwallet(const JSONRPCRequest& request)
{"filename", RPCArg::Type::STR, RPCArg::Optional::NO, "The filename with path (either absolute or relative to bitcoind)"},
},
RPCResult{
- "{ (json object)\n"
- " \"filename\" : { (string) The filename with full absolute path\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "filename", "The filename with full absolute path"},
+ }
},
RPCExamples{
HelpExampleCli("dumpwallet", "\"test\"")
@@ -1321,19 +1322,21 @@ UniValue importmulti(const JSONRPCRequest& mainRequest)
"\"options\""},
},
RPCResult{
- "[ (json array) Response is an array with the same size as the input that has the execution result\n"
- " { (json object)\n"
- " \"success\" : true|false, (boolean)\n"
- " \"warnings\" : [ (json array, optional)\n"
- " \"str\", (string)\n"
- " ...\n"
- " ],\n"
- " \"error\" : { (json object, optional)\n"
- " ... JSONRPC error\n"
- " },\n"
- " },\n"
- " ...\n"
- "]\n"
+ RPCResult::Type::ARR, "", "Response is an array with the same size as the input that has the execution result",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::BOOL, "success", ""},
+ {RPCResult::Type::ARR, "warnings", /* optional */ true, "",
+ {
+ {RPCResult::Type::STR, "", ""},
+ }},
+ {RPCResult::Type::OBJ, "error", /* optional */ true, "",
+ {
+ {RPCResult::Type::ELISION, "", "JSONRPC error"},
+ }},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("importmulti", "'[{ \"scriptPubKey\": { \"address\": \"<my address>\" }, \"timestamp\":1455191478 }, "
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index 00c4a7427a..0eb7ed2b71 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -19,14 +19,14 @@
#include <script/sign.h>
#include <util/bip32.h>
#include <util/fees.h>
+#include <util/message.h> // For MessageSign()
#include <util/moneystr.h>
#include <util/string.h>
#include <util/system.h>
#include <util/url.h>
-#include <util/validation.h>
+#include <util/vector.h>
#include <wallet/coincontrol.h>
#include <wallet/feebumper.h>
-#include <wallet/psbtwallet.h>
#include <wallet/rpcwallet.h>
#include <wallet/wallet.h>
#include <wallet/walletdb.h>
@@ -39,7 +39,7 @@
static const std::string WALLET_ENDPOINT_BASE = "/wallet/";
-static inline bool GetAvoidReuseFlag(CWallet * const pwallet, const UniValue& param) {
+static inline bool GetAvoidReuseFlag(const CWallet* const pwallet, const UniValue& param) {
bool can_avoid_reuse = pwallet->IsWalletFlagSet(WALLET_FLAG_AVOID_REUSE);
bool avoid_reuse = param.isNull() ? can_avoid_reuse : param.get_bool();
@@ -205,7 +205,7 @@ static UniValue getnewaddress(const JSONRPCRequest& request)
{"address_type", RPCArg::Type::STR, /* default */ "set by -addresstype", "The address type to use. Options are \"legacy\", \"p2sh-segwit\", and \"bech32\"."},
},
RPCResult{
- "\"address\" (string) The new bitcoin address\n"
+ RPCResult::Type::STR, "address", "The new bitcoin address"
},
RPCExamples{
HelpExampleCli("getnewaddress", "")
@@ -256,7 +256,7 @@ static UniValue getrawchangeaddress(const JSONRPCRequest& request)
{"address_type", RPCArg::Type::STR, /* default */ "set by -changetype", "The address type to use. Options are \"legacy\", \"p2sh-segwit\", and \"bech32\"."},
},
RPCResult{
- "\"address\" (string) The address\n"
+ RPCResult::Type::STR, "address", "The address"
},
RPCExamples{
HelpExampleCli("getrawchangeaddress", "")
@@ -301,10 +301,10 @@ static UniValue setlabel(const JSONRPCRequest& request)
{"address", RPCArg::Type::STR, RPCArg::Optional::NO, "The bitcoin address to be associated with a label."},
{"label", RPCArg::Type::STR, RPCArg::Optional::NO, "The label to assign to the address."},
},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
- HelpExampleCli("setlabel", "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\" \"tabby\"")
- + HelpExampleRpc("setlabel", "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\", \"tabby\"")
+ HelpExampleCli("setlabel", "\"" + EXAMPLE_ADDRESS[0] + "\" \"tabby\"")
+ + HelpExampleRpc("setlabel", "\"" + EXAMPLE_ADDRESS[0] + "\", \"tabby\"")
},
}.Check(request);
@@ -342,7 +342,7 @@ static CTransactionRef SendMoney(interfaces::Chain::Lock& locked_chain, CWallet
CScript scriptPubKey = GetScriptForDestination(address);
// Create and send the transaction
- CAmount nFeeRequired;
+ CAmount nFeeRequired = 0;
std::string strError;
std::vector<CRecipient> vecSend;
int nChangePosRet = -1;
@@ -390,13 +390,13 @@ static UniValue sendtoaddress(const JSONRPCRequest& request)
" dirty if they have previously been used in a transaction."},
},
RPCResult{
- "\"txid\" (string) The transaction id.\n"
+ RPCResult::Type::STR_HEX, "txid", "The transaction id."
},
RPCExamples{
- HelpExampleCli("sendtoaddress", "\"1M72Sfpbz1BPpXFHz9m3CdqATR44Jvaydd\" 0.1")
- + HelpExampleCli("sendtoaddress", "\"1M72Sfpbz1BPpXFHz9m3CdqATR44Jvaydd\" 0.1 \"donation\" \"seans outpost\"")
- + HelpExampleCli("sendtoaddress", "\"1M72Sfpbz1BPpXFHz9m3CdqATR44Jvaydd\" 0.1 \"\" \"\" true")
- + HelpExampleRpc("sendtoaddress", "\"1M72Sfpbz1BPpXFHz9m3CdqATR44Jvaydd\", 0.1, \"donation\", \"seans outpost\"")
+ HelpExampleCli("sendtoaddress", "\"" + EXAMPLE_ADDRESS[0] + "\" 0.1")
+ + HelpExampleCli("sendtoaddress", "\"" + EXAMPLE_ADDRESS[0] + "\" 0.1 \"donation\" \"seans outpost\"")
+ + HelpExampleCli("sendtoaddress", "\"" + EXAMPLE_ADDRESS[0] + "\" 0.1 \"\" \"\" true")
+ + HelpExampleRpc("sendtoaddress", "\"" + EXAMPLE_ADDRESS[0] + "\", 0.1, \"donation\", \"seans outpost\"")
},
}.Check(request);
@@ -457,7 +457,7 @@ static UniValue sendtoaddress(const JSONRPCRequest& request)
static UniValue listaddressgroupings(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -469,17 +469,18 @@ static UniValue listaddressgroupings(const JSONRPCRequest& request)
"in past transactions\n",
{},
RPCResult{
- "[\n"
- " [\n"
- " [\n"
- " \"address\", (string) The bitcoin address\n"
- " amount, (numeric) The amount in " + CURRENCY_UNIT + "\n"
- " \"label\" (string, optional) The label\n"
- " ]\n"
- " ,...\n"
- " ]\n"
- " ,...\n"
- "]\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::STR, "address", "The bitcoin address"},
+ {RPCResult::Type::STR_AMOUNT, "amount", "The amount in " + CURRENCY_UNIT},
+ {RPCResult::Type::STR, "label", /* optional */ true, "The label"},
+ }},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("listaddressgroupings", "")
@@ -518,7 +519,7 @@ static UniValue listaddressgroupings(const JSONRPCRequest& request)
static UniValue signmessage(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -532,7 +533,7 @@ static UniValue signmessage(const JSONRPCRequest& request)
{"message", RPCArg::Type::STR, RPCArg::Optional::NO, "The message to create a signature of."},
},
RPCResult{
- "\"signature\" (string) The signature of the message encoded in base 64\n"
+ RPCResult::Type::STR, "signature", "The signature of the message encoded in base 64"
},
RPCExamples{
"\nUnlock the wallet for 30 seconds\n"
@@ -564,33 +565,21 @@ static UniValue signmessage(const JSONRPCRequest& request)
throw JSONRPCError(RPC_TYPE_ERROR, "Address does not refer to key");
}
- CScript script_pub_key = GetScriptForDestination(*pkhash);
- std::unique_ptr<SigningProvider> provider = pwallet->GetSigningProvider(script_pub_key);
- if (!provider) {
- throw JSONRPCError(RPC_WALLET_ERROR, "Private key not available");
- }
-
- CKey key;
- CKeyID keyID(*pkhash);
- if (!provider->GetKey(keyID, key)) {
- throw JSONRPCError(RPC_WALLET_ERROR, "Private key not available");
+ std::string signature;
+ SigningResult err = pwallet->SignMessage(strMessage, *pkhash, signature);
+ if (err == SigningResult::SIGNING_FAILED) {
+ throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, SigningResultString(err));
+ } else if (err != SigningResult::OK){
+ throw JSONRPCError(RPC_WALLET_ERROR, SigningResultString(err));
}
- CHashWriter ss(SER_GETHASH, 0);
- ss << strMessageMagic;
- ss << strMessage;
-
- std::vector<unsigned char> vchSig;
- if (!key.SignCompact(ss.GetHash(), vchSig))
- throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Sign failed");
-
- return EncodeBase64(vchSig.data(), vchSig.size());
+ return signature;
}
static UniValue getreceivedbyaddress(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -603,17 +592,17 @@ static UniValue getreceivedbyaddress(const JSONRPCRequest& request)
{"minconf", RPCArg::Type::NUM, /* default */ "1", "Only include transactions confirmed at least this many times."},
},
RPCResult{
- "amount (numeric) The total amount in " + CURRENCY_UNIT + " received at this address.\n"
+ RPCResult::Type::STR_AMOUNT, "amount", "The total amount in " + CURRENCY_UNIT + " received at this address."
},
RPCExamples{
"\nThe amount from transactions with at least 1 confirmation\n"
- + HelpExampleCli("getreceivedbyaddress", "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\"") +
+ + HelpExampleCli("getreceivedbyaddress", "\"" + EXAMPLE_ADDRESS[0] + "\"") +
"\nThe amount including unconfirmed transactions, zero confirmations\n"
- + HelpExampleCli("getreceivedbyaddress", "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\" 0") +
+ + HelpExampleCli("getreceivedbyaddress", "\"" + EXAMPLE_ADDRESS[0] + "\" 0") +
"\nThe amount with at least 6 confirmations\n"
- + HelpExampleCli("getreceivedbyaddress", "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\" 6") +
+ + HelpExampleCli("getreceivedbyaddress", "\"" + EXAMPLE_ADDRESS[0] + "\" 6") +
"\nAs a JSON-RPC call\n"
- + HelpExampleRpc("getreceivedbyaddress", "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\", 6")
+ + HelpExampleRpc("getreceivedbyaddress", "\"" + EXAMPLE_ADDRESS[0] + "\", 6")
},
}.Check(request);
@@ -660,7 +649,7 @@ static UniValue getreceivedbyaddress(const JSONRPCRequest& request)
static UniValue getreceivedbylabel(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -673,7 +662,7 @@ static UniValue getreceivedbylabel(const JSONRPCRequest& request)
{"minconf", RPCArg::Type::NUM, /* default */ "1", "Only include transactions confirmed at least this many times."},
},
RPCResult{
- "amount (numeric) The total amount in " + CURRENCY_UNIT + " received for this label.\n"
+ RPCResult::Type::STR_AMOUNT, "amount", "The total amount in " + CURRENCY_UNIT + " received for this label."
},
RPCExamples{
"\nAmount received by the default label with at least 1 confirmation\n"
@@ -728,7 +717,7 @@ static UniValue getreceivedbylabel(const JSONRPCRequest& request)
static UniValue getbalance(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -745,7 +734,7 @@ static UniValue getbalance(const JSONRPCRequest& request)
{"avoid_reuse", RPCArg::Type::BOOL, /* default */ "true", "(only available if avoid_reuse wallet flag is set) Do not include balance in dirty outputs; addresses are considered dirty if they have previously been used in a transaction."},
},
RPCResult{
- "amount (numeric) The total amount in " + CURRENCY_UNIT + " received for this wallet.\n"
+ RPCResult::Type::STR_AMOUNT, "amount", "The total amount in " + CURRENCY_UNIT + " received for this wallet."
},
RPCExamples{
"\nThe total amount in the wallet with 1 or more confirmations\n"
@@ -786,7 +775,7 @@ static UniValue getbalance(const JSONRPCRequest& request)
static UniValue getunconfirmedbalance(const JSONRPCRequest &request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -795,7 +784,7 @@ static UniValue getunconfirmedbalance(const JSONRPCRequest &request)
RPCHelpMan{"getunconfirmedbalance",
"DEPRECATED\nIdentical to getbalances().mine.untrusted_pending\n",
{},
- RPCResults{},
+ RPCResult{RPCResult::Type::NUM, "", "The balance"},
RPCExamples{""},
}.Check(request);
@@ -824,14 +813,14 @@ static UniValue sendmany(const JSONRPCRequest& request)
HelpRequiringPassphrase(pwallet) + "\n",
{
{"dummy", RPCArg::Type::STR, RPCArg::Optional::NO, "Must be set to \"\" for backwards compatibility.", "\"\""},
- {"amounts", RPCArg::Type::OBJ, RPCArg::Optional::NO, "A json object with addresses and amounts",
+ {"amounts", RPCArg::Type::OBJ, RPCArg::Optional::NO, "The addresses and amounts",
{
{"address", RPCArg::Type::AMOUNT, RPCArg::Optional::NO, "The bitcoin address is the key, the numeric amount (can be string) in " + CURRENCY_UNIT + " is the value"},
},
},
{"minconf", RPCArg::Type::NUM, RPCArg::Optional::OMITTED_NAMED_ARG, "Ignored dummy value"},
{"comment", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "A comment"},
- {"subtractfeefrom", RPCArg::Type::ARR, RPCArg::Optional::OMITTED_NAMED_ARG, "A json array with addresses.\n"
+ {"subtractfeefrom", RPCArg::Type::ARR, RPCArg::Optional::OMITTED_NAMED_ARG, "The addresses.\n"
" The fee will be equally deducted from the amount of each selected address.\n"
" Those recipients will receive less bitcoins than you enter in their corresponding amount field.\n"
" If no addresses are specified here, the sender pays the fee.",
@@ -847,18 +836,18 @@ static UniValue sendmany(const JSONRPCRequest& request)
" \"CONSERVATIVE\""},
},
RPCResult{
- "\"txid\" (string) The transaction id for the send. Only 1 transaction is created regardless of \n"
- " the number of addresses.\n"
+ RPCResult::Type::STR_HEX, "txid", "The transaction id for the send. Only 1 transaction is created regardless of\n"
+ "the number of addresses."
},
RPCExamples{
"\nSend two amounts to two different addresses:\n"
- + HelpExampleCli("sendmany", "\"\" \"{\\\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\\\":0.01,\\\"1353tsE8YMTA4EuV7dgUXGjNFf9KpVvKHz\\\":0.02}\"") +
+ + HelpExampleCli("sendmany", "\"\" \"{\\\"" + EXAMPLE_ADDRESS[0] + "\\\":0.01,\\\"" + EXAMPLE_ADDRESS[1] + "\\\":0.02}\"") +
"\nSend two amounts to two different addresses setting the confirmation and comment:\n"
- + HelpExampleCli("sendmany", "\"\" \"{\\\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\\\":0.01,\\\"1353tsE8YMTA4EuV7dgUXGjNFf9KpVvKHz\\\":0.02}\" 6 \"testing\"") +
+ + HelpExampleCli("sendmany", "\"\" \"{\\\"" + EXAMPLE_ADDRESS[0] + "\\\":0.01,\\\"" + EXAMPLE_ADDRESS[1] + "\\\":0.02}\" 6 \"testing\"") +
"\nSend two amounts to two different addresses, subtract fee from amount:\n"
- + HelpExampleCli("sendmany", "\"\" \"{\\\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\\\":0.01,\\\"1353tsE8YMTA4EuV7dgUXGjNFf9KpVvKHz\\\":0.02}\" 1 \"\" \"[\\\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\\\",\\\"1353tsE8YMTA4EuV7dgUXGjNFf9KpVvKHz\\\"]\"") +
+ + HelpExampleCli("sendmany", "\"\" \"{\\\"" + EXAMPLE_ADDRESS[0] + "\\\":0.01,\\\"" + EXAMPLE_ADDRESS[1] + "\\\":0.02}\" 1 \"\" \"[\\\"" + EXAMPLE_ADDRESS[0] + "\\\",\\\"" + EXAMPLE_ADDRESS[1] + "\\\"]\"") +
"\nAs a JSON-RPC call\n"
- + HelpExampleRpc("sendmany", "\"\", {\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\":0.01,\"1353tsE8YMTA4EuV7dgUXGjNFf9KpVvKHz\":0.02}, 6, \"testing\"")
+ + HelpExampleRpc("sendmany", "\"\", {\"" + EXAMPLE_ADDRESS[0] + "\":0.01,\"" + EXAMPLE_ADDRESS[1] + "\":0.02}, 6, \"testing\"")
},
}.Check(request);
@@ -962,7 +951,7 @@ static UniValue addmultisigaddress(const JSONRPCRequest& request)
"If 'label' is specified, assign address to that label.\n",
{
{"nrequired", RPCArg::Type::NUM, RPCArg::Optional::NO, "The number of required signatures out of the n keys or addresses."},
- {"keys", RPCArg::Type::ARR, RPCArg::Optional::NO, "A json array of bitcoin addresses or hex-encoded public keys",
+ {"keys", RPCArg::Type::ARR, RPCArg::Optional::NO, "The bitcoin addresses or hex-encoded public keys",
{
{"key", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "bitcoin address or hex-encoded public key"},
},
@@ -971,16 +960,18 @@ static UniValue addmultisigaddress(const JSONRPCRequest& request)
{"address_type", RPCArg::Type::STR, /* default */ "set by -addresstype", "The address type to use. Options are \"legacy\", \"p2sh-segwit\", and \"bech32\"."},
},
RPCResult{
- "{\n"
- " \"address\":\"multisigaddress\", (string) The value of the new multisig address.\n"
- " \"redeemScript\":\"script\" (string) The string value of the hex-encoded redemption script.\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "address", "The value of the new multisig address"},
+ {RPCResult::Type::STR_HEX, "redeemScript", "The string value of the hex-encoded redemption script"},
+ {RPCResult::Type::STR, "descriptor", "The descriptor for this multisig"},
+ }
},
RPCExamples{
"\nAdd a multisig address from 2 addresses\n"
- + HelpExampleCli("addmultisigaddress", "2 \"[\\\"16sSauSf5pF2UkUwvKGq4qjNRzBZYqgEL5\\\",\\\"171sgjn4YtPu27adkKGrdDwzRTxnRkBfKV\\\"]\"") +
+ + HelpExampleCli("addmultisigaddress", "2 \"[\\\"" + EXAMPLE_ADDRESS[0] + "\\\",\\\"" + EXAMPLE_ADDRESS[1] + "\\\"]\"") +
"\nAs a JSON-RPC call\n"
- + HelpExampleRpc("addmultisigaddress", "2, \"[\\\"16sSauSf5pF2UkUwvKGq4qjNRzBZYqgEL5\\\",\\\"171sgjn4YtPu27adkKGrdDwzRTxnRkBfKV\\\"]\"")
+ + HelpExampleRpc("addmultisigaddress", "2, \"[\\\"" + EXAMPLE_ADDRESS[0] + "\\\",\\\"" + EXAMPLE_ADDRESS[1] + "\\\"]\"")
},
}.Check(request);
@@ -1018,9 +1009,13 @@ static UniValue addmultisigaddress(const JSONRPCRequest& request)
CTxDestination dest = AddAndGetMultisigDestination(required, pubkeys, output_type, spk_man, inner);
pwallet->SetAddressBook(dest, label, "send");
+ // Make the descriptor
+ std::unique_ptr<Descriptor> descriptor = InferDescriptor(GetScriptForDestination(dest), spk_man);
+
UniValue result(UniValue::VOBJ);
result.pushKV("address", EncodeDestination(dest));
result.pushKV("redeemScript", HexStr(inner.begin(), inner.end()));
+ result.pushKV("descriptor", descriptor->ToString());
return result;
}
@@ -1035,7 +1030,7 @@ struct tallyitem
}
};
-static UniValue ListReceived(interfaces::Chain::Lock& locked_chain, CWallet * const pwallet, const UniValue& params, bool by_label) EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet)
+static UniValue ListReceived(interfaces::Chain::Lock& locked_chain, const CWallet* const pwallet, const UniValue& params, bool by_label) EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet)
{
// Minimum confirmations
int nMinDepth = 1;
@@ -1184,7 +1179,7 @@ static UniValue ListReceived(interfaces::Chain::Lock& locked_chain, CWallet * co
static UniValue listreceivedbyaddress(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -1199,26 +1194,27 @@ static UniValue listreceivedbyaddress(const JSONRPCRequest& request)
{"address_filter", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "If present, only return information on this address."},
},
RPCResult{
- "[\n"
- " {\n"
- " \"involvesWatchonly\" : true, (bool) Only returns true if imported addresses were involved in transaction.\n"
- " \"address\" : \"receivingaddress\", (string) The receiving address\n"
- " \"amount\" : x.xxx, (numeric) The total amount in " + CURRENCY_UNIT + " received by the address\n"
- " \"confirmations\" : n, (numeric) The number of confirmations of the most recent transaction included\n"
- " \"label\" : \"label\", (string) The label of the receiving address. The default label is \"\".\n"
- " \"txids\": [\n"
- " \"txid\", (string) The ids of transactions received with the address \n"
- " ...\n"
- " ]\n"
- " }\n"
- " ,...\n"
- "]\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::BOOL, "involvesWatchonly", "Only returns true if imported addresses were involved in transaction"},
+ {RPCResult::Type::STR, "address", "The receiving address"},
+ {RPCResult::Type::STR_AMOUNT, "amount", "The total amount in " + CURRENCY_UNIT + " received by the address"},
+ {RPCResult::Type::NUM, "confirmations", "The number of confirmations of the most recent transaction included"},
+ {RPCResult::Type::STR, "label", "The label of the receiving address. The default label is \"\""},
+ {RPCResult::Type::ARR, "txids", "",
+ {
+ {RPCResult::Type::STR_HEX, "txid", "The ids of transactions received with the address"},
+ }},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("listreceivedbyaddress", "")
+ HelpExampleCli("listreceivedbyaddress", "6 true")
+ HelpExampleRpc("listreceivedbyaddress", "6, true, true")
- + HelpExampleRpc("listreceivedbyaddress", "6, true, true, \"1M72Sfpbz1BPpXFHz9m3CdqATR44Jvaydd\"")
+ + HelpExampleRpc("listreceivedbyaddress", "6, true, true, \"" + EXAMPLE_ADDRESS[0] + "\"")
},
}.Check(request);
@@ -1235,7 +1231,7 @@ static UniValue listreceivedbyaddress(const JSONRPCRequest& request)
static UniValue listreceivedbylabel(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -1249,15 +1245,16 @@ static UniValue listreceivedbylabel(const JSONRPCRequest& request)
{"include_watchonly", RPCArg::Type::BOOL, /* default */ "true for watch-only wallets, otherwise false", "Whether to include watch-only addresses (see 'importaddress')"},
},
RPCResult{
- "[\n"
- " {\n"
- " \"involvesWatchonly\" : true, (bool) Only returns true if imported addresses were involved in transaction.\n"
- " \"amount\" : x.xxx, (numeric) The total amount received by addresses with this label\n"
- " \"confirmations\" : n, (numeric) The number of confirmations of the most recent transaction included\n"
- " \"label\" : \"label\" (string) The label of the receiving address. The default label is \"\".\n"
- " }\n"
- " ,...\n"
- "]\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::BOOL, "involvesWatchonly", "Only returns true if imported addresses were involved in transaction"},
+ {RPCResult::Type::STR_AMOUNT, "amount", "The total amount received by addresses with this label"},
+ {RPCResult::Type::NUM, "confirmations", "The number of confirmations of the most recent transaction included"},
+ {RPCResult::Type::STR, "label", "The label of the receiving address. The default label is \"\""},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("listreceivedbylabel", "")
@@ -1294,7 +1291,7 @@ static void MaybePushAddress(UniValue & entry, const CTxDestination &dest)
* @param filter_ismine The "is mine" filter flags.
* @param filter_label Optional label string to filter incoming transactions.
*/
-static void ListTransactions(interfaces::Chain::Lock& locked_chain, CWallet* const pwallet, const CWalletTx& wtx, int nMinDepth, bool fLong, UniValue& ret, const isminefilter& filter_ismine, const std::string* filter_label) EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet)
+static void ListTransactions(interfaces::Chain::Lock& locked_chain, const CWallet* const pwallet, const CWalletTx& wtx, int nMinDepth, bool fLong, UniValue& ret, const isminefilter& filter_ismine, const std::string* filter_label) EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet)
{
CAmount nFee;
std::list<COutputEntry> listReceived;
@@ -1317,7 +1314,7 @@ static void ListTransactions(interfaces::Chain::Lock& locked_chain, CWallet* con
entry.pushKV("category", "send");
entry.pushKV("amount", ValueFromAmount(-s.amount));
if (pwallet->mapAddressBook.count(s.destination)) {
- entry.pushKV("label", pwallet->mapAddressBook[s.destination].name);
+ entry.pushKV("label", pwallet->mapAddressBook.at(s.destination).name);
}
entry.pushKV("vout", s.vout);
entry.pushKV("fee", ValueFromAmount(-nFee));
@@ -1334,7 +1331,7 @@ static void ListTransactions(interfaces::Chain::Lock& locked_chain, CWallet* con
{
std::string label;
if (pwallet->mapAddressBook.count(r.destination)) {
- label = pwallet->mapAddressBook[r.destination].name;
+ label = pwallet->mapAddressBook.at(r.destination).name;
}
if (filter_label && label != *filter_label) {
continue;
@@ -1369,32 +1366,32 @@ static void ListTransactions(interfaces::Chain::Lock& locked_chain, CWallet* con
}
}
-static const std::string TransactionDescriptionString()
+static const std::vector<RPCResult> TransactionDescriptionString()
{
- return " \"confirmations\": n, (numeric) The number of confirmations for the transaction. Negative confirmations means the\n"
- " transaction conflicted that many blocks ago.\n"
- " \"generated\": xxx, (bool) Only present if transaction only input is a coinbase one.\n"
- " \"trusted\": xxx, (bool) Only present if we consider transaction to be trusted and so safe to spend from.\n"
- " \"blockhash\": \"hashvalue\", (string) The block hash containing the transaction.\n"
- " \"blockheight\": n, (numeric) The block height containing the transaction.\n"
- " \"blockindex\": n, (numeric) The index of the transaction in the block that includes it.\n"
- " \"blocktime\": xxx, (numeric) The block time expressed in " + UNIX_EPOCH_TIME + ".\n"
- " \"txid\": \"transactionid\", (string) The transaction id.\n"
- " \"walletconflicts\": [ (array) Conflicting transaction ids.\n"
- " \"txid\", (string) The transaction id.\n"
- " ...\n"
- " ],\n"
- " \"time\": xxx, (numeric) The transaction time expressed in " + UNIX_EPOCH_TIME + ".\n"
- " \"timereceived\": xxx, (numeric) The time received expressed in " + UNIX_EPOCH_TIME + ".\n"
- " \"comment\": \"...\", (string) If a comment is associated with the transaction, only present if not empty.\n"
- " \"bip125-replaceable\" : \"str\", (string) (\"yes|no|unknown\") Whether this transaction could be replaced due to BIP125 (replace-by-fee);\n"
- " may be unknown for unconfirmed transactions not in the mempool\n";
+ return{{RPCResult::Type::NUM, "confirmations", "The number of confirmations for the transaction. Negative confirmations means the\n"
+ "transaction conflicted that many blocks ago."},
+ {RPCResult::Type::BOOL, "generated", "Only present if transaction only input is a coinbase one."},
+ {RPCResult::Type::BOOL, "trusted", "Only present if we consider transaction to be trusted and so safe to spend from."},
+ {RPCResult::Type::STR_HEX, "blockhash", "The block hash containing the transaction."},
+ {RPCResult::Type::NUM, "blockheight", "The block height containing the transaction."},
+ {RPCResult::Type::NUM, "blockindex", "The index of the transaction in the block that includes it."},
+ {RPCResult::Type::NUM_TIME, "blocktime", "The block time expressed in " + UNIX_EPOCH_TIME + "."},
+ {RPCResult::Type::STR_HEX, "txid", "The transaction id."},
+ {RPCResult::Type::ARR, "walletconflicts", "Conflicting transaction ids.",
+ {
+ {RPCResult::Type::STR_HEX, "txid", "The transaction id."},
+ }},
+ {RPCResult::Type::NUM_TIME, "time", "The transaction time expressed in " + UNIX_EPOCH_TIME + "."},
+ {RPCResult::Type::NUM_TIME, "timereceived", "The time received expressed in " + UNIX_EPOCH_TIME + "."},
+ {RPCResult::Type::STR, "comment", "If a comment is associated with the transaction, only present if not empty."},
+ {RPCResult::Type::STR, "bip125-replaceable", "(\"yes|no|unknown\") Whether this transaction could be replaced due to BIP125 (replace-by-fee);\n"
+ "may be unknown for unconfirmed transactions not in the mempool"}};
}
UniValue listtransactions(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -1411,27 +1408,31 @@ UniValue listtransactions(const JSONRPCRequest& request)
{"include_watchonly", RPCArg::Type::BOOL, /* default */ "true for watch-only wallets, otherwise false", "Include transactions to watch-only addresses (see 'importaddress')"},
},
RPCResult{
- "[\n"
- " {\n"
- " \"involvesWatchonly\": xxx, (bool) Only returns true if imported addresses were involved in transaction.\n"
- " \"address\":\"address\", (string) The bitcoin address of the transaction.\n"
- " \"category\": (string) The transaction category.\n"
- " \"send\" Transactions sent.\n"
- " \"receive\" Non-coinbase transactions received.\n"
- " \"generate\" Coinbase transactions received with more than 100 confirmations.\n"
- " \"immature\" Coinbase transactions received with 100 or fewer confirmations.\n"
- " \"orphan\" Orphaned coinbase transactions received.\n"
- " \"amount\": x.xxx, (numeric) The amount in " + CURRENCY_UNIT + ". This is negative for the 'send' category, and is positive\n"
- " for all other categories\n"
- " \"label\": \"label\", (string) A comment for the address/transaction, if any\n"
- " \"vout\": n, (numeric) the vout value\n"
- " \"fee\": x.xxx, (numeric) The amount of the fee in " + CURRENCY_UNIT + ". This is negative and only available for the \n"
- " 'send' category of transactions.\n"
- + TransactionDescriptionString()
- + " \"abandoned\": xxx (bool) 'true' if the transaction has been abandoned (inputs are respendable). Only available for the \n"
- " 'send' category of transactions.\n"
- " }\n"
- "]\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::OBJ, "", "", Cat(Cat<std::vector<RPCResult>>(
+ {
+ {RPCResult::Type::BOOL, "involvesWatchonly", "Only returns true if imported addresses were involved in transaction."},
+ {RPCResult::Type::STR, "address", "The bitcoin address of the transaction."},
+ {RPCResult::Type::STR, "category", "The transaction category.\n"
+ "\"send\" Transactions sent.\n"
+ "\"receive\" Non-coinbase transactions received.\n"
+ "\"generate\" Coinbase transactions received with more than 100 confirmations.\n"
+ "\"immature\" Coinbase transactions received with 100 or fewer confirmations.\n"
+ "\"orphan\" Orphaned coinbase transactions received."},
+ {RPCResult::Type::STR_AMOUNT, "amount", "The amount in " + CURRENCY_UNIT + ". This is negative for the 'send' category, and is positive\n"
+ "for all other categories"},
+ {RPCResult::Type::STR, "label", "A comment for the address/transaction, if any"},
+ {RPCResult::Type::NUM, "vout", "the vout value"},
+ {RPCResult::Type::STR_AMOUNT, "fee", "The amount of the fee in " + CURRENCY_UNIT + ". This is negative and only available for the\n"
+ "'send' category of transactions."},
+ },
+ TransactionDescriptionString()),
+ {
+ {RPCResult::Type::BOOL, "abandoned", "'true' if the transaction has been abandoned (inputs are respendable). Only available for the \n"
+ "'send' category of transactions."},
+ })},
+ }
},
RPCExamples{
"\nList the most recent 10 transactions in the systems\n"
@@ -1495,29 +1496,16 @@ UniValue listtransactions(const JSONRPCRequest& request)
if ((nFrom + nCount) > (int)ret.size())
nCount = ret.size() - nFrom;
- std::vector<UniValue> arrTmp = ret.getValues();
-
- std::vector<UniValue>::iterator first = arrTmp.begin();
- std::advance(first, nFrom);
- std::vector<UniValue>::iterator last = arrTmp.begin();
- std::advance(last, nFrom+nCount);
-
- if (last != arrTmp.end()) arrTmp.erase(last, arrTmp.end());
- if (first != arrTmp.begin()) arrTmp.erase(arrTmp.begin(), first);
-
- std::reverse(arrTmp.begin(), arrTmp.end()); // Return oldest to newest
-
- ret.clear();
- ret.setArray();
- ret.push_backV(arrTmp);
-
- return ret;
+ const std::vector<UniValue>& txs = ret.getValues();
+ UniValue result{UniValue::VARR};
+ result.push_backV({ txs.rend() - nFrom - nCount, txs.rend() - nFrom }); // Return oldest to newest
+ return result;
}
static UniValue listsinceblock(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -1535,34 +1523,39 @@ static UniValue listsinceblock(const JSONRPCRequest& request)
" (not guaranteed to work on pruned nodes)"},
},
RPCResult{
- "{ (json object)\n"
- " \"transactions\" : [ (json array)\n"
- " { (json object)\n"
- " \"involvesWatchonly\": xxx, (bool) Only returns true if imported addresses were involved in transaction.\n"
- " \"address\" : \"str\", (string) The bitcoin address of the transaction.\n"
- " \"category\" : \"str\", (string) The transaction category.\n"
- " \"send\" Transactions sent.\n"
- " \"receive\" Non-coinbase transactions received.\n"
- " \"generate\" Coinbase transactions received with more than 100 confirmations.\n"
- " \"immature\" Coinbase transactions received with 100 or fewer confirmations.\n"
- " \"orphan\" Orphaned coinbase transactions received.\n"
- " \"amount\": x.xxx, (numeric) The amount in " + CURRENCY_UNIT + ". This is negative for the 'send' category, and is positive\n"
- " for all other categories\n"
- " \"vout\" : n, (numeric) the vout value\n"
- " \"fee\": x.xxx, (numeric) The amount of the fee in " + CURRENCY_UNIT + ". This is negative and only available for the 'send' category of transactions.\n"
- + TransactionDescriptionString()
- + " \"abandoned\": xxx, (bool) 'true' if the transaction has been abandoned (inputs are respendable). Only available for the 'send' category of transactions.\n"
- " \"label\" : \"label\" (string) A comment for the address/transaction, if any\n"
- " \"to\": \"...\", (string) If a comment to is associated with the transaction.\n"
- " },\n"
- " ...\n"
- " ],\n"
- " \"removed\": [ (json array)\n"
- " <structure is the same as \"transactions\" above, only present if include_removed=true>\n"
- " Note: transactions that were re-added in the active chain will appear as-is in this array, and may thus have a positive confirmation count.\n"
- " ],\n"
- " \"lastblock\": \"hex\" (string) The hash of the block (target_confirmations-1) from the best block on the main chain. This is typically used to feed back into listsinceblock the next time you call it. So you would generally use a target_confirmations of say 6, so you will be continually re-notified of transactions until they've reached 6 confirmations plus any new ones\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::ARR, "transactions", "",
+ {
+ {RPCResult::Type::OBJ, "", "", Cat(Cat<std::vector<RPCResult>>(
+ {
+ {RPCResult::Type::BOOL, "involvesWatchonly", "Only returns true if imported addresses were involved in transaction."},
+ {RPCResult::Type::STR, "address", "The bitcoin address of the transaction."},
+ {RPCResult::Type::STR, "category", "The transaction category.\n"
+ "\"send\" Transactions sent.\n"
+ "\"receive\" Non-coinbase transactions received.\n"
+ "\"generate\" Coinbase transactions received with more than 100 confirmations.\n"
+ "\"immature\" Coinbase transactions received with 100 or fewer confirmations.\n"
+ "\"orphan\" Orphaned coinbase transactions received."},
+ {RPCResult::Type::STR_AMOUNT, "amount", "The amount in " + CURRENCY_UNIT + ". This is negative for the 'send' category, and is positive\n"
+ "for all other categories"},
+ {RPCResult::Type::NUM, "vout", "the vout value"},
+ {RPCResult::Type::STR_AMOUNT, "fee", "The amount of the fee in " + CURRENCY_UNIT + ". This is negative and only available for the\n"
+ "'send' category of transactions."},
+ },
+ TransactionDescriptionString()),
+ {
+ {RPCResult::Type::BOOL, "abandoned", "'true' if the transaction has been abandoned (inputs are respendable). Only available for the \n"
+ "'send' category of transactions."},
+ {RPCResult::Type::STR, "label", "A comment for the address/transaction, if any"},
+ {RPCResult::Type::STR, "to", "If a comment to is associated with the transaction."},
+ })},
+ }},
+ {RPCResult::Type::ARR, "removed", "<structure is the same as \"transactions\" above, only present if include_removed=true>\n"
+ "Note: transactions that were re-added in the active chain will appear as-is in this array, and may thus have a positive confirmation count."
+ , {{RPCResult::Type::ELISION, "", ""},}},
+ {RPCResult::Type::STR_HEX, "lastblock", "The hash of the block (target_confirmations-1) from the best block on the main chain. This is typically used to feed back into listsinceblock the next time you call it. So you would generally use a target_confirmations of say 6, so you will be continually re-notified of transactions until they've reached 6 confirmations plus any new ones"},
+ }
},
RPCExamples{
HelpExampleCli("listsinceblock", "")
@@ -1654,7 +1647,7 @@ static UniValue listsinceblock(const JSONRPCRequest& request)
static UniValue gettransaction(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -1670,35 +1663,41 @@ static UniValue gettransaction(const JSONRPCRequest& request)
"Whether to include a `decoded` field containing the decoded transaction (equivalent to RPC decoderawtransaction)"},
},
RPCResult{
- "{\n"
- " \"amount\" : x.xxx, (numeric) The transaction amount in " + CURRENCY_UNIT + "\n"
- " \"fee\": x.xxx, (numeric) The amount of the fee in " + CURRENCY_UNIT + ". This is negative and only available for the \n"
- " 'send' category of transactions.\n"
- + TransactionDescriptionString()
- + " \"details\" : [\n"
- " {\n"
- " \"involvesWatchonly\": xxx, (bool) Only returns true if imported addresses were involved in transaction.\n"
- " \"address\" : \"address\", (string) The bitcoin address involved in the transaction\n"
- " \"category\" : (string) The transaction category.\n"
- " \"send\" Transactions sent.\n"
- " \"receive\" Non-coinbase transactions received.\n"
- " \"generate\" Coinbase transactions received with more than 100 confirmations.\n"
- " \"immature\" Coinbase transactions received with 100 or fewer confirmations.\n"
- " \"orphan\" Orphaned coinbase transactions received.\n"
- " \"amount\" : x.xxx, (numeric) The amount in " + CURRENCY_UNIT + "\n"
- " \"label\" : \"label\", (string) A comment for the address/transaction, if any\n"
- " \"vout\" : n, (numeric) the vout value\n"
- " \"fee\": x.xxx, (numeric) The amount of the fee in " + CURRENCY_UNIT + ". This is negative and only available for the \n"
- " 'send' category of transactions.\n"
- " \"abandoned\": xxx (bool) 'true' if the transaction has been abandoned (inputs are respendable). Only available for the \n"
- " 'send' category of transactions.\n"
- " }\n"
- " ,...\n"
- " ],\n"
- " \"hex\" : \"data\" (string) Raw data for transaction\n"
- " \"decoded\" : transaction (json object) Optional, the decoded transaction (only present when `verbose` is passed), equivalent to the\n"
- " RPC decoderawtransaction method, or the RPC getrawtransaction method when `verbose` is passed.\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "", Cat(Cat<std::vector<RPCResult>>(
+ {
+ {RPCResult::Type::STR_AMOUNT, "amount", "The amount in " + CURRENCY_UNIT},
+ {RPCResult::Type::STR_AMOUNT, "fee", "The amount of the fee in " + CURRENCY_UNIT + ". This is negative and only available for the\n"
+ "'send' category of transactions."},
+ },
+ TransactionDescriptionString()),
+ {
+ {RPCResult::Type::ARR, "details", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::BOOL, "involvesWatchonly", "Only returns true if imported addresses were involved in transaction."},
+ {RPCResult::Type::STR, "address", "The bitcoin address involved in the transaction."},
+ {RPCResult::Type::STR, "category", "The transaction category.\n"
+ "\"send\" Transactions sent.\n"
+ "\"receive\" Non-coinbase transactions received.\n"
+ "\"generate\" Coinbase transactions received with more than 100 confirmations.\n"
+ "\"immature\" Coinbase transactions received with 100 or fewer confirmations.\n"
+ "\"orphan\" Orphaned coinbase transactions received."},
+ {RPCResult::Type::STR_AMOUNT, "amount", "The amount in " + CURRENCY_UNIT},
+ {RPCResult::Type::STR, "label", "A comment for the address/transaction, if any"},
+ {RPCResult::Type::NUM, "vout", "the vout value"},
+ {RPCResult::Type::STR_AMOUNT, "fee", "The amount of the fee in " + CURRENCY_UNIT + ". This is negative and only available for the \n"
+ "'send' category of transactions."},
+ {RPCResult::Type::BOOL, "abandoned", "'true' if the transaction has been abandoned (inputs are respendable). Only available for the \n"
+ "'send' category of transactions."},
+ }},
+ }},
+ {RPCResult::Type::STR_HEX, "hex", "Raw data for transaction"},
+ {RPCResult::Type::OBJ, "decoded", "Optional, the decoded transaction (only present when `verbose` is passed)",
+ {
+ {RPCResult::Type::ELISION, "", "Equivalent to the RPC decoderawtransaction method, or the RPC getrawtransaction method when `verbose` is passed."},
+ }},
+ })
},
RPCExamples{
HelpExampleCli("gettransaction", "\"1075db55d416d3ca199f55b6084e2115b9345e16c5cf302fc80e9d5fbf5d48d\"")
@@ -1777,7 +1776,7 @@ static UniValue abandontransaction(const JSONRPCRequest& request)
{
{"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The transaction id"},
},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
HelpExampleCli("abandontransaction", "\"1075db55d416d3ca199f55b6084e2115b9345e16c5cf302fc80e9d5fbf5d48d\"")
+ HelpExampleRpc("abandontransaction", "\"1075db55d416d3ca199f55b6084e2115b9345e16c5cf302fc80e9d5fbf5d48d\"")
@@ -1807,7 +1806,7 @@ static UniValue abandontransaction(const JSONRPCRequest& request)
static UniValue backupwallet(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -1818,7 +1817,7 @@ static UniValue backupwallet(const JSONRPCRequest& request)
{
{"destination", RPCArg::Type::STR, RPCArg::Optional::NO, "The destination directory or file"},
},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
HelpExampleCli("backupwallet", "\"backup.dat\"")
+ HelpExampleRpc("backupwallet", "\"backup.dat\"")
@@ -1856,7 +1855,7 @@ static UniValue keypoolrefill(const JSONRPCRequest& request)
{
{"newsize", RPCArg::Type::NUM, /* default */ "100", "The new keypool size"},
},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
HelpExampleCli("keypoolrefill", "")
+ HelpExampleRpc("keypoolrefill", "")
@@ -1908,7 +1907,7 @@ static UniValue walletpassphrase(const JSONRPCRequest& request)
{"passphrase", RPCArg::Type::STR, RPCArg::Optional::NO, "The wallet passphrase"},
{"timeout", RPCArg::Type::NUM, RPCArg::Optional::NO, "The time to keep the decryption key in seconds; capped at 100000000 (~3 years)."},
},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
"\nUnlock the wallet for 60 seconds\n"
+ HelpExampleCli("walletpassphrase", "\"my pass phrase\" 60") +
@@ -1988,7 +1987,7 @@ static UniValue walletpassphrasechange(const JSONRPCRequest& request)
{"oldpassphrase", RPCArg::Type::STR, RPCArg::Optional::NO, "The current passphrase"},
{"newpassphrase", RPCArg::Type::STR, RPCArg::Optional::NO, "The new passphrase"},
},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
HelpExampleCli("walletpassphrasechange", "\"old one\" \"new one\"")
+ HelpExampleRpc("walletpassphrasechange", "\"old one\", \"new one\"")
@@ -2038,12 +2037,12 @@ static UniValue walletlock(const JSONRPCRequest& request)
"After calling this method, you will need to call walletpassphrase again\n"
"before being able to call any methods which require the wallet to be unlocked.\n",
{},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
"\nSet the passphrase for 2 minutes to perform a transaction\n"
+ HelpExampleCli("walletpassphrase", "\"my pass phrase\" 120") +
"\nPerform a send (requires passphrase set)\n"
- + HelpExampleCli("sendtoaddress", "\"1M72Sfpbz1BPpXFHz9m3CdqATR44Jvaydd\" 1.0") +
+ + HelpExampleCli("sendtoaddress", "\"" + EXAMPLE_ADDRESS[0] + "\" 1.0") +
"\nClear the passphrase since we are done before 2 minutes is up\n"
+ HelpExampleCli("walletlock", "") +
"\nAs a JSON-RPC call\n"
@@ -2083,7 +2082,7 @@ static UniValue encryptwallet(const JSONRPCRequest& request)
{
{"passphrase", RPCArg::Type::STR, RPCArg::Optional::NO, "The pass phrase to encrypt the wallet with. It must be at least 1 character, but should be long."},
},
- RPCResults{},
+ RPCResult{RPCResult::Type::STR, "", "A string with further instructions"},
RPCExamples{
"\nEncrypt your wallet\n"
+ HelpExampleCli("encryptwallet", "\"my pass phrase\"") +
@@ -2145,7 +2144,7 @@ static UniValue lockunspent(const JSONRPCRequest& request)
"Also see the listunspent call\n",
{
{"unlock", RPCArg::Type::BOOL, RPCArg::Optional::NO, "Whether to unlock (true) or lock (false) the specified transactions"},
- {"transactions", RPCArg::Type::ARR, /* default */ "empty array", "A json array of objects. Each object the txid (string) vout (numeric).",
+ {"transactions", RPCArg::Type::ARR, /* default */ "empty array", "The transaction outputs and within each, the txid (string) vout (numeric).",
{
{"", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "",
{
@@ -2157,7 +2156,7 @@ static UniValue lockunspent(const JSONRPCRequest& request)
},
},
RPCResult{
- "true|false (boolean) Whether the command was successful or not\n"
+ RPCResult::Type::BOOL, "", "Whether the command was successful or not"
},
RPCExamples{
"\nList the unspent transactions\n"
@@ -2256,7 +2255,7 @@ static UniValue lockunspent(const JSONRPCRequest& request)
static UniValue listlockunspent(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -2267,13 +2266,14 @@ static UniValue listlockunspent(const JSONRPCRequest& request)
"See the lockunspent call to lock and unlock transactions for spending.\n",
{},
RPCResult{
- "[\n"
- " {\n"
- " \"txid\" : \"transactionid\", (string) The transaction id locked\n"
- " \"vout\" : n (numeric) The vout value\n"
- " }\n"
- " ,...\n"
- "]\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "txid", "The transaction id locked"},
+ {RPCResult::Type::NUM, "vout", "The vout value"},
+ }},
+ }
},
RPCExamples{
"\nList the unspent transactions\n"
@@ -2323,7 +2323,7 @@ static UniValue settxfee(const JSONRPCRequest& request)
{"amount", RPCArg::Type::AMOUNT, RPCArg::Optional::NO, "The transaction fee in " + CURRENCY_UNIT + "/kB"},
},
RPCResult{
- "true|false (boolean) Returns true if successful\n"
+ RPCResult::Type::BOOL, "", "Returns true if successful"
},
RPCExamples{
HelpExampleCli("settxfee", "0.00001")
@@ -2361,19 +2361,23 @@ static UniValue getbalances(const JSONRPCRequest& request)
"Returns an object with all balances in " + CURRENCY_UNIT + ".\n",
{},
RPCResult{
- "{\n"
- " \"mine\": { (object) balances from outputs that the wallet can sign\n"
- " \"trusted\": xxx (numeric) trusted balance (outputs created by the wallet or confirmed outputs)\n"
- " \"untrusted_pending\": xxx (numeric) untrusted pending balance (outputs created by others that are in the mempool)\n"
- " \"immature\": xxx (numeric) balance from immature coinbase outputs\n"
- " \"used\": xxx (numeric) (only present if avoid_reuse is set) balance from coins sent to addresses that were previously spent from (potentially privacy violating)\n"
- " },\n"
- " \"watchonly\": { (object) watchonly balances (not present if wallet does not watch anything)\n"
- " \"trusted\": xxx (numeric) trusted balance (outputs created by the wallet or confirmed outputs)\n"
- " \"untrusted_pending\": xxx (numeric) untrusted pending balance (outputs created by others that are in the mempool)\n"
- " \"immature\": xxx (numeric) balance from immature coinbase outputs\n"
- " },\n"
- "}\n"},
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::OBJ, "mine", "balances from outputs that the wallet can sign",
+ {
+ {RPCResult::Type::STR_AMOUNT, "trusted", "trusted balance (outputs created by the wallet or confirmed outputs)"},
+ {RPCResult::Type::STR_AMOUNT, "untrusted_pending", "untrusted pending balance (outputs created by others that are in the mempool)"},
+ {RPCResult::Type::STR_AMOUNT, "immature", "balance from immature coinbase outputs"},
+ {RPCResult::Type::STR_AMOUNT, "used", "(only present if avoid_reuse is set) balance from coins sent to addresses that were previously spent from (potentially privacy violating)"},
+ }},
+ {RPCResult::Type::OBJ, "watchonly", "watchonly balances (not present if wallet does not watch anything)",
+ {
+ {RPCResult::Type::STR_AMOUNT, "trusted", "trusted balance (outputs created by the wallet or confirmed outputs)"},
+ {RPCResult::Type::STR_AMOUNT, "untrusted_pending", "untrusted pending balance (outputs created by others that are in the mempool)"},
+ {RPCResult::Type::STR_AMOUNT, "immature", "balance from immature coinbase outputs"},
+ }},
+ }
+ },
RPCExamples{
HelpExampleCli("getbalances", "") +
HelpExampleRpc("getbalances", "")},
@@ -2417,7 +2421,7 @@ static UniValue getbalances(const JSONRPCRequest& request)
static UniValue getwalletinfo(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -2427,27 +2431,29 @@ static UniValue getwalletinfo(const JSONRPCRequest& request)
"Returns an object containing various wallet state info.\n",
{},
RPCResult{
- "{\n"
- " \"walletname\": xxxxx, (string) the wallet name\n"
- " \"walletversion\": xxxxx, (numeric) the wallet version\n"
- " \"balance\": xxxxxxx, (numeric) DEPRECATED. Identical to getbalances().mine.trusted\n"
- " \"unconfirmed_balance\": xxx, (numeric) DEPRECATED. Identical to getbalances().mine.untrusted_pending\n"
- " \"immature_balance\": xxxxxx, (numeric) DEPRECATED. Identical to getbalances().mine.immature\n"
- " \"txcount\": xxxxxxx, (numeric) the total number of transactions in the wallet\n"
- " \"keypoololdest\": xxxxxx, (numeric) the " + UNIX_EPOCH_TIME + " of the oldest pre-generated key in the key pool\n"
- " \"keypoolsize\": xxxx, (numeric) how many new keys are pre-generated (only counts external keys)\n"
- " \"keypoolsize_hd_internal\": xxxx, (numeric) how many new keys are pre-generated for internal use (used for change outputs, only appears if the wallet is using this feature, otherwise external keys are used)\n"
- " \"unlocked_until\": ttt, (numeric) the " + UNIX_EPOCH_TIME + " until which the wallet is unlocked for transfers, or 0 if the wallet is locked\n"
- " \"paytxfee\": x.xxxx, (numeric) the transaction fee configuration, set in " + CURRENCY_UNIT + "/kB\n"
- " \"hdseedid\": \"<hash160>\" (string, optional) the Hash160 of the HD seed (only present when HD is enabled)\n"
- " \"private_keys_enabled\": true|false (boolean) false if privatekeys are disabled for this wallet (enforced watch-only wallet)\n"
- " \"avoid_reuse\": true|false (boolean) whether this wallet tracks clean/dirty coins in terms of reuse\n"
- " \"scanning\": (json object) current scanning details, or false if no scan is in progress\n"
- " {\n"
- " \"duration\" : xxxx (numeric) elapsed seconds since scan start\n"
- " \"progress\" : x.xxxx, (numeric) scanning progress percentage [0.0, 1.0]\n"
- " }\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {
+ {RPCResult::Type::STR, "walletname", "the wallet name"},
+ {RPCResult::Type::NUM, "walletversion", "the wallet version"},
+ {RPCResult::Type::STR_AMOUNT, "balance", "DEPRECATED. Identical to getbalances().mine.trusted"},
+ {RPCResult::Type::STR_AMOUNT, "unconfirmed_balance", "DEPRECATED. Identical to getbalances().mine.untrusted_pending"},
+ {RPCResult::Type::STR_AMOUNT, "immature_balance", "DEPRECATED. Identical to getbalances().mine.immature"},
+ {RPCResult::Type::NUM, "txcount", "the total number of transactions in the wallet"},
+ {RPCResult::Type::NUM_TIME, "keypoololdest", "the " + UNIX_EPOCH_TIME + " of the oldest pre-generated key in the key pool"},
+ {RPCResult::Type::NUM, "keypoolsize", "how many new keys are pre-generated (only counts external keys)"},
+ {RPCResult::Type::NUM, "keypoolsize_hd_internal", "how many new keys are pre-generated for internal use (used for change outputs, only appears if the wallet is using this feature, otherwise external keys are used)"},
+ {RPCResult::Type::NUM_TIME, "unlocked_until", "the " + UNIX_EPOCH_TIME + " until which the wallet is unlocked for transfers, or 0 if the wallet is locked"},
+ {RPCResult::Type::STR_AMOUNT, "paytxfee", "the transaction fee configuration, set in " + CURRENCY_UNIT + "/kB"},
+ {RPCResult::Type::STR_HEX, "hdseedid", /* optional */ true, "the Hash160 of the HD seed (only present when HD is enabled)"},
+ {RPCResult::Type::BOOL, "private_keys_enabled", "false if privatekeys are disabled for this wallet (enforced watch-only wallet)"},
+ {RPCResult::Type::BOOL, "avoid_reuse", "whether this wallet tracks clean/dirty coins in terms of reuse"},
+ {RPCResult::Type::OBJ, "scanning", "current scanning details, or false if no scan is in progress",
+ {
+ {RPCResult::Type::NUM, "duration", "elapsed seconds since scan start"},
+ {RPCResult::Type::NUM, "progress", "scanning progress percentage [0.0, 1.0]"},
+ }},
+ }},
},
RPCExamples{
HelpExampleCli("getwalletinfo", "")
@@ -2509,14 +2515,16 @@ static UniValue listwalletdir(const JSONRPCRequest& request)
"Returns a list of wallets in the wallet directory.\n",
{},
RPCResult{
- "{\n"
- " \"wallets\" : [ (json array of objects)\n"
- " {\n"
- " \"name\" : \"name\" (string) The wallet name\n"
- " }\n"
- " ,...\n"
- " ]\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::ARR, "wallets", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "name", "The wallet name"},
+ }},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("listwalletdir", "")
@@ -2543,10 +2551,10 @@ static UniValue listwallets(const JSONRPCRequest& request)
"For full information on the wallet, use \"getwalletinfo\"\n",
{},
RPCResult{
- "[ (json array of strings)\n"
- " \"walletname\" (string) the wallet name\n"
- " ...\n"
- "]\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::STR, "walletname", "the wallet name"},
+ }
},
RPCExamples{
HelpExampleCli("listwallets", "")
@@ -2579,10 +2587,11 @@ static UniValue loadwallet(const JSONRPCRequest& request)
{"filename", RPCArg::Type::STR, RPCArg::Optional::NO, "The wallet directory or .dat file."},
},
RPCResult{
- "{\n"
- " \"name\" : <wallet_name>, (string) The wallet name if loaded successfully.\n"
- " \"warning\" : <warning>, (string) Warning message if wallet was not loaded cleanly.\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "name", "The wallet name if loaded successfully."},
+ {RPCResult::Type::STR, "warning", "Warning message if wallet was not loaded cleanly."},
+ }
},
RPCExamples{
HelpExampleCli("loadwallet", "\"test.dat\"")
@@ -2634,11 +2643,12 @@ static UniValue setwalletflag(const JSONRPCRequest& request)
{"value", RPCArg::Type::BOOL, /* default */ "true", "The new state."},
},
RPCResult{
- "{\n"
- " \"flag_name\": string (string) The name of the flag that was modified\n"
- " \"flag_state\": bool (bool) The new state of the flag\n"
- " \"warnings\": string (string) Any warnings associated with the change\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "flag_name", "The name of the flag that was modified"},
+ {RPCResult::Type::BOOL, "flag_state", "The new state of the flag"},
+ {RPCResult::Type::STR, "warnings", "Any warnings associated with the change"},
+ }
},
RPCExamples{
HelpExampleCli("setwalletflag", "avoid_reuse")
@@ -2694,10 +2704,11 @@ static UniValue createwallet(const JSONRPCRequest& request)
{"avoid_reuse", RPCArg::Type::BOOL, /* default */ "false", "Keep track of coin reuse, and treat dirty and clean coins differently with privacy considerations in mind."},
},
RPCResult{
- "{\n"
- " \"name\" : <wallet_name>, (string) The wallet name if created successfully. If the wallet was created using a full path, the wallet_name will be the full path.\n"
- " \"warning\" : <warning>, (string) Warning message if wallet was not loaded cleanly.\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "name", "The wallet name if created successfully. If the wallet was created using a full path, the wallet_name will be the full path."},
+ {RPCResult::Type::STR, "warning", "Warning message if wallet was not loaded cleanly."},
+ }
},
RPCExamples{
HelpExampleCli("createwallet", "\"testwallet\"")
@@ -2756,7 +2767,7 @@ static UniValue unloadwallet(const JSONRPCRequest& request)
{
{"wallet_name", RPCArg::Type::STR, /* default */ "the wallet name from the RPC request", "The name of the wallet to unload."},
},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
HelpExampleCli("unloadwallet", "wallet_name")
+ HelpExampleRpc("unloadwallet", "wallet_name")
@@ -2792,7 +2803,7 @@ static UniValue unloadwallet(const JSONRPCRequest& request)
static UniValue listunspent(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -2806,7 +2817,7 @@ static UniValue listunspent(const JSONRPCRequest& request)
{
{"minconf", RPCArg::Type::NUM, /* default */ "1", "The minimum confirmations to filter"},
{"maxconf", RPCArg::Type::NUM, /* default */ "9999999", "The maximum confirmations to filter"},
- {"addresses", RPCArg::Type::ARR, /* default */ "empty array", "A json array of bitcoin addresses to filter",
+ {"addresses", RPCArg::Type::ARR, /* default */ "empty array", "The bitcoin addresses to filter",
{
{"address", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "bitcoin address"},
},
@@ -2823,32 +2834,33 @@ static UniValue listunspent(const JSONRPCRequest& request)
"query_options"},
},
RPCResult{
- "[ (array of json object)\n"
- " {\n"
- " \"txid\" : \"txid\", (string) the transaction id \n"
- " \"vout\" : n, (numeric) the vout value\n"
- " \"address\" : \"address\", (string) the bitcoin address\n"
- " \"label\" : \"label\", (string) The associated label, or \"\" for the default label\n"
- " \"scriptPubKey\" : \"key\", (string) the script key\n"
- " \"amount\" : x.xxx, (numeric) the transaction output amount in " + CURRENCY_UNIT + "\n"
- " \"confirmations\" : n, (numeric) The number of confirmations\n"
- " \"redeemScript\" : \"script\" (string) The redeemScript if scriptPubKey is P2SH\n"
- " \"witnessScript\" : \"script\" (string) witnessScript if the scriptPubKey is P2WSH or P2SH-P2WSH\n"
- " \"spendable\" : xxx, (bool) Whether we have the private keys to spend this output\n"
- " \"solvable\" : xxx, (bool) Whether we know how to spend this output, ignoring the lack of keys\n"
- " \"reused\" : xxx, (bool) (only present if avoid_reuse is set) Whether this output is reused/dirty (sent to an address that was previously spent from)\n"
- " \"desc\" : xxx, (string, only when solvable) A descriptor for spending this output\n"
- " \"safe\" : xxx (bool) Whether this output is considered safe to spend. Unconfirmed transactions\n"
- " from outside keys and unconfirmed replacement transactions are considered unsafe\n"
- " and are not eligible for spending by fundrawtransaction and sendtoaddress.\n"
- " }\n"
- " ,...\n"
- "]\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "txid", "the transaction id"},
+ {RPCResult::Type::NUM, "vout", "the vout value"},
+ {RPCResult::Type::STR, "address", "the bitcoin address"},
+ {RPCResult::Type::STR, "label", "The associated label, or \"\" for the default label"},
+ {RPCResult::Type::STR, "scriptPubKey", "the script key"},
+ {RPCResult::Type::STR_AMOUNT, "amount", "the transaction output amount in " + CURRENCY_UNIT},
+ {RPCResult::Type::NUM, "confirmations", "The number of confirmations"},
+ {RPCResult::Type::STR_HEX, "redeemScript", "The redeemScript if scriptPubKey is P2SH"},
+ {RPCResult::Type::STR, "witnessScript", "witnessScript if the scriptPubKey is P2WSH or P2SH-P2WSH"},
+ {RPCResult::Type::BOOL, "spendable", "Whether we have the private keys to spend this output"},
+ {RPCResult::Type::BOOL, "solvable", "Whether we know how to spend this output, ignoring the lack of keys"},
+ {RPCResult::Type::BOOL, "reused", "(only present if avoid_reuse is set) Whether this output is reused/dirty (sent to an address that was previously spent from)"},
+ {RPCResult::Type::STR, "desc", "(only when solvable) A descriptor for spending this output"},
+ {RPCResult::Type::BOOL, "safe", "Whether this output is considered safe to spend. Unconfirmed transactions\n"
+ "from outside keys and unconfirmed replacement transactions are considered unsafe\n"
+ "and are not eligible for spending by fundrawtransaction and sendtoaddress."},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("listunspent", "")
- + HelpExampleCli("listunspent", "6 9999999 \"[\\\"1PGFqEzfmQch1gKD3ra4k18PNj3tTUUSqg\\\",\\\"1LtvqCaApEdUGFkpKMM4MstjcaL4dKg8SP\\\"]\"")
- + HelpExampleRpc("listunspent", "6, 9999999 \"[\\\"1PGFqEzfmQch1gKD3ra4k18PNj3tTUUSqg\\\",\\\"1LtvqCaApEdUGFkpKMM4MstjcaL4dKg8SP\\\"]\"")
+ + HelpExampleCli("listunspent", "6 9999999 \"[\\\"" + EXAMPLE_ADDRESS[0] + "\\\",\\\"" + EXAMPLE_ADDRESS[1] + "\\\"]\"")
+ + HelpExampleRpc("listunspent", "6, 9999999 \"[\\\"" + EXAMPLE_ADDRESS[0] + "\\\",\\\"" + EXAMPLE_ADDRESS[1] + "\\\"]\"")
+ HelpExampleCli("listunspent", "6 9999999 '[]' true '{ \"minimumAmount\": 0.005 }'")
+ HelpExampleRpc("listunspent", "6, 9999999, [] , true, { \"minimumAmount\": 0.005 } ")
},
@@ -2933,7 +2945,7 @@ static UniValue listunspent(const JSONRPCRequest& request)
CTxDestination address;
const CScript& scriptPubKey = out.tx->tx->vout[out.i].scriptPubKey;
bool fValidAddress = ExtractDestination(scriptPubKey, address);
- bool reused = avoid_reuse && pwallet->IsUsedDestination(out.tx->GetHash(), out.i);
+ bool reused = avoid_reuse && pwallet->IsSpentKey(out.tx->GetHash(), out.i);
if (destinations.size() && (!fValidAddress || !destinations.count(address)))
continue;
@@ -2950,7 +2962,7 @@ static UniValue listunspent(const JSONRPCRequest& request)
entry.pushKV("label", i->second.name);
}
- std::unique_ptr<SigningProvider> provider = pwallet->GetSigningProvider(scriptPubKey);
+ std::unique_ptr<SigningProvider> provider = pwallet->GetSolvingProvider(scriptPubKey);
if (provider) {
if (scriptPubKey.IsPayToScriptHash()) {
const CScriptID& hash = CScriptID(boost::get<ScriptHash>(address));
@@ -2990,7 +3002,7 @@ static UniValue listunspent(const JSONRPCRequest& request)
entry.pushKV("spendable", out.fSpendable);
entry.pushKV("solvable", out.fSolvable);
if (out.fSolvable) {
- std::unique_ptr<SigningProvider> provider = pwallet->GetSigningProvider(scriptPubKey);
+ std::unique_ptr<SigningProvider> provider = pwallet->GetSolvingProvider(scriptPubKey);
if (provider) {
auto descriptor = InferDescriptor(scriptPubKey, *provider);
entry.pushKV("desc", descriptor->ToString());
@@ -3155,7 +3167,7 @@ static UniValue fundrawtransaction(const JSONRPCRequest& request)
"e.g. with 'importpubkey' or 'importmulti' with the 'pubkeys' or 'desc' field."},
{"lockUnspents", RPCArg::Type::BOOL, /* default */ "false", "Lock selected unspent outputs"},
{"feeRate", RPCArg::Type::AMOUNT, /* default */ "not set: makes wallet determine the fee", "Set a specific fee rate in " + CURRENCY_UNIT + "/kB"},
- {"subtractFeeFromOutputs", RPCArg::Type::ARR, /* default */ "empty array", "A json array of integers.\n"
+ {"subtractFeeFromOutputs", RPCArg::Type::ARR, /* default */ "empty array", "The integers.\n"
" The fee will be equally deducted from the amount of each specified output.\n"
" Those recipients will receive less bitcoins than you enter in their corresponding amount field.\n"
" If no outputs are specified here, the sender pays the fee.",
@@ -3181,11 +3193,12 @@ static UniValue fundrawtransaction(const JSONRPCRequest& request)
},
},
RPCResult{
- "{\n"
- " \"hex\": \"value\", (string) The resulting raw transaction (hex-encoded string)\n"
- " \"fee\": n, (numeric) Fee in " + CURRENCY_UNIT + " the resulting transaction pays\n"
- " \"changepos\": n (numeric) The position of the added change output, or -1\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "hex", "The resulting raw transaction (hex-encoded string)"},
+ {RPCResult::Type::STR_AMOUNT, "fee", "Fee in " + CURRENCY_UNIT + " the resulting transaction pays"},
+ {RPCResult::Type::NUM, "changepos", "The position of the added change output, or -1"},
+ }
},
RPCExamples{
"\nCreate a transaction with no inputs\n"
@@ -3224,7 +3237,7 @@ static UniValue fundrawtransaction(const JSONRPCRequest& request)
UniValue signrawtransactionwithwallet(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -3237,7 +3250,7 @@ UniValue signrawtransactionwithwallet(const JSONRPCRequest& request)
HelpRequiringPassphrase(pwallet) + "\n",
{
{"hexstring", RPCArg::Type::STR, RPCArg::Optional::NO, "The transaction hex string"},
- {"prevtxs", RPCArg::Type::ARR, RPCArg::Optional::OMITTED_NAMED_ARG, "A json array of previous dependent transaction outputs",
+ {"prevtxs", RPCArg::Type::ARR, RPCArg::Optional::OMITTED_NAMED_ARG, "The previous dependent transaction outputs",
{
{"", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "",
{
@@ -3260,20 +3273,22 @@ UniValue signrawtransactionwithwallet(const JSONRPCRequest& request)
" \"SINGLE|ANYONECANPAY\""},
},
RPCResult{
- "{\n"
- " \"hex\" : \"value\", (string) The hex-encoded raw transaction with signature(s)\n"
- " \"complete\" : true|false, (boolean) If the transaction has a complete set of signatures\n"
- " \"errors\" : [ (json array of objects) Script verification errors (if there are any)\n"
- " {\n"
- " \"txid\" : \"hash\", (string) The hash of the referenced, previous transaction\n"
- " \"vout\" : n, (numeric) The index of the output to spent and used as input\n"
- " \"scriptSig\" : \"hex\", (string) The hex-encoded signature script\n"
- " \"sequence\" : n, (numeric) Script sequence number\n"
- " \"error\" : \"text\" (string) Verification or signing error related to the input\n"
- " }\n"
- " ,...\n"
- " ]\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "hex", "The hex-encoded raw transaction with signature(s)"},
+ {RPCResult::Type::BOOL, "complete", "If the transaction has a complete set of signatures"},
+ {RPCResult::Type::ARR, "errors", "Script verification errors (if there are any)",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR_HEX, "txid", "The hash of the referenced, previous transaction"},
+ {RPCResult::Type::NUM, "vout", "The index of the output to spent and used as input"},
+ {RPCResult::Type::STR_HEX, "scriptSig", "The hex-encoded signature script"},
+ {RPCResult::Type::NUM, "sequence", "Script sequence number"},
+ {RPCResult::Type::STR, "error", "Verification or signing error related to the input"},
+ }},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("signrawtransactionwithwallet", "\"myhex\"")
@@ -3303,23 +3318,15 @@ UniValue signrawtransactionwithwallet(const JSONRPCRequest& request)
// Parse the prevtxs array
ParsePrevouts(request.params[1], nullptr, coins);
- std::set<std::shared_ptr<SigningProvider>> providers;
- for (const std::pair<COutPoint, Coin> coin_pair : coins) {
- std::unique_ptr<SigningProvider> provider = pwallet->GetSigningProvider(coin_pair.second.out.scriptPubKey);
- if (provider) {
- providers.insert(std::move(provider));
- }
- }
- if (providers.size() == 0) {
- // When there are no available providers, use a dummy SigningProvider so we can check if the tx is complete
- providers.insert(std::make_shared<SigningProvider>());
- }
+ int nHashType = ParseSighashString(request.params[2]);
+
+ // Script verification errors
+ std::map<int, std::string> input_errors;
+ bool complete = pwallet->SignTransaction(mtx, coins, nHashType, input_errors);
UniValue result(UniValue::VOBJ);
- for (std::shared_ptr<SigningProvider> provider : providers) {
- SignTransaction(mtx, provider.get(), coins, request.params[2], result);
- }
- return result;
+ SignTransactionResultToJSON(mtx, complete, coins, input_errors, result);
+ return result;
}
static UniValue bumpfee(const JSONRPCRequest& request)
@@ -3335,12 +3342,11 @@ static UniValue bumpfee(const JSONRPCRequest& request)
"\nBumps the fee of an opt-in-RBF transaction T, replacing it with a new transaction B.\n"
"An opt-in RBF transaction with the given txid must be in the wallet.\n"
"The command will pay the additional fee by reducing change outputs or adding inputs when necessary. It may add a new change output if one does not already exist.\n"
- "If `totalFee` (DEPRECATED) is given, adding inputs is not supported, so there must be a single change output that is big enough or it will fail.\n"
"All inputs in the original transaction will be included in the replacement transaction.\n"
"The command will fail if the wallet or mempool contains a transaction that spends one of T's outputs.\n"
"By default, the new fee will be calculated automatically using estimatesmartfee.\n"
"The user can specify a confirmation target for estimatesmartfee.\n"
- "Alternatively, the user can specify totalFee (DEPRECATED), or fee_rate (" + CURRENCY_UNIT + " per kB) for the new transaction .\n"
+ "Alternatively, the user can specify a fee_rate (" + CURRENCY_UNIT + " per kB) for the new transaction.\n"
"At a minimum, the new fee rate must be high enough to pay an additional new relay fee (incrementalfee\n"
"returned by getnetworkinfo) to enter the node's mempool.\n",
{
@@ -3348,13 +3354,9 @@ static UniValue bumpfee(const JSONRPCRequest& request)
{"options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED_NAMED_ARG, "",
{
{"confTarget", RPCArg::Type::NUM, /* default */ "wallet default", "Confirmation target (in blocks)"},
- {"totalFee", RPCArg::Type::NUM, /* default */ "fallback to 'confTarget'", "Total fee (NOT feerate) to pay, in satoshis. (DEPRECATED)\n"
- " In rare cases, the actual fee paid might be slightly higher than the specified\n"
- " totalFee if the tx change output has to be removed because it is too close to\n"
- " the dust threshold."},
- {"fee_rate", RPCArg::Type::NUM, /* default */ "fallback to 'confTarget'", "FeeRate (NOT total fee) to pay, in " + CURRENCY_UNIT + " per kB\n"
+ {"fee_rate", RPCArg::Type::NUM, /* default */ "fall back to 'confTarget'", "fee rate (NOT total fee) to pay, in " + CURRENCY_UNIT + " per kB\n"
" Specify a fee rate instead of relying on the built-in fee estimator.\n"
- " Must be at least 0.0001 BTC per kB higher than the current transaction fee rate.\n"},
+ "Must be at least 0.0001 " + CURRENCY_UNIT + " per kB higher than the current transaction fee rate.\n"},
{"replaceable", RPCArg::Type::BOOL, /* default */ "true", "Whether the new transaction should still be\n"
" marked bip-125 replaceable. If true, the sequence numbers in the transaction will\n"
" be left unchanged from the original. If false, any input sequence numbers in the\n"
@@ -3370,13 +3372,16 @@ static UniValue bumpfee(const JSONRPCRequest& request)
"options"},
},
RPCResult{
- "{\n"
- " \"psbt\": \"psbt\", (string) The base64-encoded unsigned PSBT of the new transaction. Only returned when wallet private keys are disabled.\n"
- " \"txid\": \"value\", (string) The id of the new transaction. Only returned when wallet private keys are enabled.\n"
- " \"origfee\": n, (numeric) The fee of the replaced transaction.\n"
- " \"fee\": n, (numeric) The fee of the new transaction.\n"
- " \"errors\": [ str... ] (json array of strings) Errors encountered during processing (may be empty).\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "", {
+ {RPCResult::Type::STR, "psbt", "The base64-encoded unsigned PSBT of the new transaction. Only returned when wallet private keys are disabled."},
+ {RPCResult::Type::STR_HEX, "txid", "The id of the new transaction. Only returned when wallet private keys are enabled."},
+ {RPCResult::Type::STR_AMOUNT, "origfee", "The fee of the replaced transaction."},
+ {RPCResult::Type::STR_AMOUNT, "fee", "The fee of the new transaction."},
+ {RPCResult::Type::ARR, "errors", "Errors encountered during processing (may be empty).",
+ {
+ {RPCResult::Type::STR, "", ""},
+ }},
+ }
},
RPCExamples{
"\nBump the fee, get the new transaction\'s txid\n" +
@@ -3390,7 +3395,6 @@ static UniValue bumpfee(const JSONRPCRequest& request)
CCoinControl coin_control;
coin_control.fAllowWatchOnly = pwallet->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS);
// optional parameters
- CAmount totalFee = 0;
coin_control.m_signal_bip125_rbf = true;
if (!request.params[1].isNull()) {
@@ -3398,26 +3402,15 @@ static UniValue bumpfee(const JSONRPCRequest& request)
RPCTypeCheckObj(options,
{
{"confTarget", UniValueType(UniValue::VNUM)},
- {"totalFee", UniValueType(UniValue::VNUM)},
{"fee_rate", UniValueType(UniValue::VNUM)},
{"replaceable", UniValueType(UniValue::VBOOL)},
{"estimate_mode", UniValueType(UniValue::VSTR)},
},
true, true);
- if (options.exists("confTarget") && (options.exists("totalFee") || options.exists("fee_rate"))) {
- throw JSONRPCError(RPC_INVALID_PARAMETER, "confTarget can't be set with totalFee or fee_rate. Please provide either a confirmation target in blocks for automatic fee estimation, or an explicit fee rate.");
- } else if (options.exists("fee_rate") && options.exists("totalFee")) {
- throw JSONRPCError(RPC_INVALID_PARAMETER, "fee_rate can't be set along with totalFee.");
+ if (options.exists("confTarget") && options.exists("fee_rate")) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "confTarget can't be set with fee_rate. Please provide either a confirmation target in blocks for automatic fee estimation, or an explicit fee rate.");
} else if (options.exists("confTarget")) { // TODO: alias this to conf_target
coin_control.m_confirm_target = ParseConfirmTarget(options["confTarget"], pwallet->chain().estimateMaxBlocks());
- } else if (options.exists("totalFee")) {
- if (!pwallet->chain().rpcEnableDeprecated("totalFee")) {
- throw JSONRPCError(RPC_INVALID_PARAMETER, "totalFee argument has been deprecated and will be removed in 0.20. Please use -deprecatedrpc=totalFee to continue using this argument until removal.");
- }
- totalFee = options["totalFee"].get_int64();
- if (totalFee <= 0) {
- throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("Invalid totalFee %s (must be greater than 0)", FormatMoney(totalFee)));
- }
} else if (options.exists("fee_rate")) {
CFeeRate fee_rate(AmountFromValue(options["fee_rate"]));
if (fee_rate <= CFeeRate(0)) {
@@ -3450,13 +3443,8 @@ static UniValue bumpfee(const JSONRPCRequest& request)
CAmount new_fee;
CMutableTransaction mtx;
feebumper::Result res;
- if (totalFee > 0) {
- // Targeting total fee bump. Requires a change output of sufficient size.
- res = feebumper::CreateTotalBumpTransaction(pwallet, hash, coin_control, totalFee, errors, old_fee, new_fee, mtx);
- } else {
- // Targeting feerate bump.
- res = feebumper::CreateRateBumpTransaction(*pwallet, hash, coin_control, errors, old_fee, new_fee, mtx);
- }
+ // Targeting feerate bump.
+ res = feebumper::CreateRateBumpTransaction(*pwallet, hash, coin_control, errors, old_fee, new_fee, mtx);
if (res != feebumper::Result::OK) {
switch(res) {
case feebumper::Result::INVALID_ADDRESS_OR_KEY:
@@ -3495,7 +3483,7 @@ static UniValue bumpfee(const JSONRPCRequest& request)
} else {
PartiallySignedTransaction psbtx(mtx);
bool complete = false;
- const TransactionError err = FillPSBT(pwallet, psbtx, complete, SIGHASH_ALL, false /* sign */, true /* bip32derivs */);
+ const TransactionError err = pwallet->FillPSBT(psbtx, complete, SIGHASH_ALL, false /* sign */, true /* bip32derivs */);
CHECK_NONFATAL(err == TransactionError::OK);
CHECK_NONFATAL(!complete);
CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
@@ -3531,10 +3519,11 @@ UniValue rescanblockchain(const JSONRPCRequest& request)
{"stop_height", RPCArg::Type::NUM, RPCArg::Optional::OMITTED_NAMED_ARG, "the last block height that should be scanned. If none is provided it will rescan up to the tip at return time of this call."},
},
RPCResult{
- "{\n"
- " \"start_height\" (numeric) The block height where the rescan started (the requested height or 0)\n"
- " \"stop_height\" (numeric) The height of the last rescanned block. May be null in rare cases if there was a reorg and the call didn't scan any blocks because they were already scanned in the background.\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::NUM, "start_height", "The block height where the rescan started (the requested height or 0)"},
+ {RPCResult::Type::NUM, "stop_height", "The height of the last rescanned block. May be null in rare cases if there was a reorg and the call didn't scan any blocks because they were already scanned in the background."},
+ }
},
RPCExamples{
HelpExampleCli("rescanblockchain", "100000 120000")
@@ -3698,14 +3687,14 @@ public:
UniValue operator()(const WitnessUnknown& id) const { return UniValue(UniValue::VOBJ); }
};
-static UniValue DescribeWalletAddress(CWallet* pwallet, const CTxDestination& dest)
+static UniValue DescribeWalletAddress(const CWallet* const pwallet, const CTxDestination& dest)
{
UniValue ret(UniValue::VOBJ);
UniValue detail = DescribeAddress(dest);
CScript script = GetScriptForDestination(dest);
std::unique_ptr<SigningProvider> provider = nullptr;
if (pwallet) {
- provider = pwallet->GetSigningProvider(script);
+ provider = pwallet->GetSolvingProvider(script);
}
ret.pushKVs(detail);
ret.pushKVs(boost::apply_visitor(DescribeWalletAddressVisitor(provider.get()), dest));
@@ -3726,14 +3715,12 @@ static UniValue AddressBookDataToJSON(const CAddressBookData& data, const bool v
UniValue getaddressinfo(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
}
- const std::string example_address = "\"bc1q09vm5lfy0j5reeulh4x5752q25uqqvz34hufdl\"";
-
RPCHelpMan{"getaddressinfo",
"\nReturn information about the given bitcoin address.\n"
"Some of the information will only be present if the address is in the active wallet.\n",
@@ -3741,53 +3728,56 @@ UniValue getaddressinfo(const JSONRPCRequest& request)
{"address", RPCArg::Type::STR, RPCArg::Optional::NO, "The bitcoin address for which to get information."},
},
RPCResult{
- "{\n"
- " \"address\" : \"address\", (string) The bitcoin address validated.\n"
- " \"scriptPubKey\" : \"hex\", (string) The hex-encoded scriptPubKey generated by the address.\n"
- " \"ismine\" : true|false, (boolean) If the address is yours.\n"
- " \"iswatchonly\" : true|false, (boolean) If the address is watchonly.\n"
- " \"solvable\" : true|false, (boolean) If we know how to spend coins sent to this address, ignoring the possible lack of private keys.\n"
- " \"desc\" : \"desc\", (string, optional) A descriptor for spending coins sent to this address (only when solvable).\n"
- " \"isscript\" : true|false, (boolean) If the key is a script.\n"
- " \"ischange\" : true|false, (boolean) If the address was used for change output.\n"
- " \"iswitness\" : true|false, (boolean) If the address is a witness address.\n"
- " \"witness_version\" : version (numeric, optional) The version number of the witness program.\n"
- " \"witness_program\" : \"hex\" (string, optional) The hex value of the witness program.\n"
- " \"script\" : \"type\" (string, optional) The output script type. Only if isscript is true and the redeemscript is known. Possible\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "address", "The bitcoin address validated."},
+ {RPCResult::Type::STR_HEX, "scriptPubKey", "The hex-encoded scriptPubKey generated by the address."},
+ {RPCResult::Type::BOOL, "ismine", "If the address is yours."},
+ {RPCResult::Type::BOOL, "iswatchonly", "If the address is watchonly."},
+ {RPCResult::Type::BOOL, "solvable", "If we know how to spend coins sent to this address, ignoring the possible lack of private keys."},
+ {RPCResult::Type::STR, "desc", /* optional */ true, "A descriptor for spending coins sent to this address (only when solvable)."},
+ {RPCResult::Type::BOOL, "isscript", "If the key is a script."},
+ {RPCResult::Type::BOOL, "ischange", "If the address was used for change output."},
+ {RPCResult::Type::BOOL, "iswitness", "If the address is a witness address."},
+ {RPCResult::Type::NUM, "witness_version", /* optional */ true, "The version number of the witness program."},
+ {RPCResult::Type::STR_HEX, "witness_program", /* optional */ true, "The hex value of the witness program."},
+ {RPCResult::Type::STR, "script", /* optional */ true, "The output script type. Only if isscript is true and the redeemscript is known. Possible\n"
" types: nonstandard, pubkey, pubkeyhash, scripthash, multisig, nulldata, witness_v0_keyhash,\n"
- " witness_v0_scripthash, witness_unknown.\n"
- " \"hex\" : \"hex\", (string, optional) The redeemscript for the p2sh address.\n"
- " \"pubkeys\" (array, optional) Array of pubkeys associated with the known redeemscript (only if script is multisig).\n"
- " [\n"
- " \"pubkey\" (string)\n"
- " ,...\n"
- " ]\n"
- " \"sigsrequired\" : xxxxx (numeric, optional) The number of signatures required to spend multisig output (only if script is multisig).\n"
- " \"pubkey\" : \"publickeyhex\", (string, optional) The hex value of the raw public key for single-key addresses (possibly embedded in P2SH or P2WSH).\n"
- " \"embedded\" : {...}, (object, optional) Information about the address embedded in P2SH or P2WSH, if relevant and known. Includes all\n"
+ "witness_v0_scripthash, witness_unknown."},
+ {RPCResult::Type::STR_HEX, "hex", /* optional */ true, "The redeemscript for the p2sh address."},
+ {RPCResult::Type::ARR, "pubkeys", /* optional */ true, "Array of pubkeys associated with the known redeemscript (only if script is multisig).",
+ {
+ {RPCResult::Type::STR, "pubkey", ""},
+ }},
+ {RPCResult::Type::NUM, "sigsrequired", /* optional */ true, "The number of signatures required to spend multisig output (only if script is multisig)."},
+ {RPCResult::Type::STR_HEX, "pubkey", /* optional */ true, "The hex value of the raw public key for single-key addresses (possibly embedded in P2SH or P2WSH)."},
+ {RPCResult::Type::OBJ, "embedded", /* optional */ true, "Information about the address embedded in P2SH or P2WSH, if relevant and known.",
+ {
+ {RPCResult::Type::ELISION, "", "Includes all\n"
" getaddressinfo output fields for the embedded address, excluding metadata (timestamp, hdkeypath,\n"
- " hdseedid) and relation to the wallet (ismine, iswatchonly).\n"
- " \"iscompressed\" : true|false, (boolean, optional) If the pubkey is compressed.\n"
- " \"label\" : \"label\" (string) DEPRECATED. The label associated with the address. Defaults to \"\". Replaced by the labels array below.\n"
- " \"timestamp\" : timestamp, (number, optional) The creation time of the key, if available, expressed in " + UNIX_EPOCH_TIME + ".\n"
- " \"hdkeypath\" : \"keypath\" (string, optional) The HD keypath, if the key is HD and available.\n"
- " \"hdseedid\" : \"<hash160>\" (string, optional) The Hash160 of the HD seed.\n"
- " \"hdmasterfingerprint\" : \"<hash160>\" (string, optional) The fingerprint of the master key.\n"
- " \"labels\" (array) Array of labels associated with the address. Currently limited to one label but returned\n"
- " as an array to keep the API stable if multiple labels are enabled in the future.\n"
- " [\n"
- " \"label name\" (string) The label name. Defaults to \"\".\n"
- " DEPRECATED, will be removed in 0.21. To re-enable, launch bitcoind with `-deprecatedrpc=labelspurpose`:\n"
- " {\n"
- " \"name\" : \"label name\" (string) The label name. Defaults to \"\".\n"
- " \"purpose\" : \"purpose\" (string) The purpose of the associated address (send or receive).\n"
- " }\n"
- " ]\n"
- "}\n"
+ "hdseedid) and relation to the wallet (ismine, iswatchonly)."},
+ }},
+ {RPCResult::Type::BOOL, "iscompressed", /* optional */ true, "If the pubkey is compressed."},
+ {RPCResult::Type::STR, "label", "DEPRECATED. The label associated with the address. Defaults to \"\". Replaced by the labels array below."},
+ {RPCResult::Type::NUM_TIME, "timestamp", /* optional */ true, "The creation time of the key, if available, expressed in " + UNIX_EPOCH_TIME + "."},
+ {RPCResult::Type::STR, "hdkeypath", /* optional */ true, "The HD keypath, if the key is HD and available."},
+ {RPCResult::Type::STR_HEX, "hdseedid", /* optional */ true, "The Hash160 of the HD seed."},
+ {RPCResult::Type::STR_HEX, "hdmasterfingerprint", /* optional */ true, "The fingerprint of the master key."},
+ {RPCResult::Type::ARR, "labels", "Array of labels associated with the address. Currently limited to one label but returned\n"
+ "as an array to keep the API stable if multiple labels are enabled in the future.",
+ {
+ {RPCResult::Type::STR, "label name", "The label name. Defaults to \"\"."},
+ {RPCResult::Type::OBJ, "", "label data, DEPRECATED, will be removed in 0.21. To re-enable, launch bitcoind with `-deprecatedrpc=labelspurpose`",
+ {
+ {RPCResult::Type::STR, "name", "The label name. Defaults to \"\"."},
+ {RPCResult::Type::STR, "purpose", "The purpose of the associated address (send or receive)."},
+ }},
+ }},
+ }
},
RPCExamples{
- HelpExampleCli("getaddressinfo", example_address) +
- HelpExampleRpc("getaddressinfo", example_address)
+ HelpExampleCli("getaddressinfo", "\"" + EXAMPLE_ADDRESS[0] + "\"") +
+ HelpExampleRpc("getaddressinfo", "\"" + EXAMPLE_ADDRESS[0] + "\"")
},
}.Check(request);
@@ -3806,7 +3796,7 @@ UniValue getaddressinfo(const JSONRPCRequest& request)
CScript scriptPubKey = GetScriptForDestination(dest);
ret.pushKV("scriptPubKey", HexStr(scriptPubKey.begin(), scriptPubKey.end()));
- std::unique_ptr<SigningProvider> provider = pwallet->GetSigningProvider(scriptPubKey);
+ std::unique_ptr<SigningProvider> provider = pwallet->GetSolvingProvider(scriptPubKey);
isminetype mine = pwallet->IsMine(dest);
ret.pushKV("ismine", bool(mine & ISMINE_SPENDABLE));
@@ -3827,7 +3817,7 @@ UniValue getaddressinfo(const JSONRPCRequest& request)
// be associated with an address, so the label should be equivalent to the
// value of the name key/value pair in the labels array below.
if ((pwallet->chain().rpcEnableDeprecated("label")) && (pwallet->mapAddressBook.count(dest))) {
- ret.pushKV("label", pwallet->mapAddressBook[dest].name);
+ ret.pushKV("label", pwallet->mapAddressBook.at(dest).name);
}
ret.pushKV("ischange", pwallet->IsChange(scriptPubKey));
@@ -3850,7 +3840,7 @@ UniValue getaddressinfo(const JSONRPCRequest& request)
// stable if we allow multiple labels to be associated with an address in
// the future.
UniValue labels(UniValue::VARR);
- std::map<CTxDestination, CAddressBookData>::iterator mi = pwallet->mapAddressBook.find(dest);
+ std::map<CTxDestination, CAddressBookData>::const_iterator mi = pwallet->mapAddressBook.find(dest);
if (mi != pwallet->mapAddressBook.end()) {
// DEPRECATED: The previous behavior of returning an array containing a
// JSON object of `name` and `purpose` key/value pairs is deprecated.
@@ -3868,7 +3858,7 @@ UniValue getaddressinfo(const JSONRPCRequest& request)
static UniValue getaddressesbylabel(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -3880,11 +3870,13 @@ static UniValue getaddressesbylabel(const JSONRPCRequest& request)
{"label", RPCArg::Type::STR, RPCArg::Optional::NO, "The label."},
},
RPCResult{
- "{ (json object with addresses as keys)\n"
- " \"address\": { (json object with information about address)\n"
- " \"purpose\": \"string\" (string) Purpose of address (\"send\" for sending address, \"receive\" for receiving address)\n"
- " },...\n"
- "}\n"
+ RPCResult::Type::OBJ_DYN, "", "json object with addresses as keys",
+ {
+ {RPCResult::Type::OBJ, "address", "json object with information about address",
+ {
+ {RPCResult::Type::STR, "purpose", "Purpose of address (\"send\" for sending address, \"receive\" for receiving address)"},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("getaddressesbylabel", "\"tabby\"")
@@ -3925,7 +3917,7 @@ static UniValue getaddressesbylabel(const JSONRPCRequest& request)
static UniValue listlabels(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -3937,10 +3929,10 @@ static UniValue listlabels(const JSONRPCRequest& request)
{"purpose", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "Address purpose to list labels for ('send','receive'). An empty string is the same as not providing this argument."},
},
RPCResult{
- "[ (json array of string)\n"
- " \"label\", (string) Label name\n"
- " ...\n"
- "]\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::STR, "label", "Label name"},
+ }
},
RPCExamples{
"\nList all labels\n"
@@ -3999,7 +3991,7 @@ UniValue sethdseed(const JSONRPCRequest& request)
{"seed", RPCArg::Type::STR, /* default */ "random seed", "The WIF private key to use as the new HD seed.\n"
" The seed value can be retrieved using the dumpwallet command. It is the private key marked hdseed=1"},
},
- RPCResults{},
+ RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
HelpExampleCli("sethdseed", "")
+ HelpExampleCli("sethdseed", "false")
@@ -4058,7 +4050,7 @@ UniValue sethdseed(const JSONRPCRequest& request)
UniValue walletprocesspsbt(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
- CWallet* const pwallet = wallet.get();
+ const CWallet* const pwallet = wallet.get();
if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
return NullUniValue;
@@ -4078,13 +4070,14 @@ UniValue walletprocesspsbt(const JSONRPCRequest& request)
" \"ALL|ANYONECANPAY\"\n"
" \"NONE|ANYONECANPAY\"\n"
" \"SINGLE|ANYONECANPAY\""},
- {"bip32derivs", RPCArg::Type::BOOL, /* default */ "false", "If true, includes the BIP 32 derivation paths for public keys if we know them"},
+ {"bip32derivs", RPCArg::Type::BOOL, /* default */ "true", "Include BIP 32 derivation paths for public keys if we know them"},
},
RPCResult{
- "{ (json object)\n"
- " \"psbt\" : \"str\", (string) The base64-encoded partially signed transaction\n"
- " \"complete\" : true|false, (boolean) If the transaction has a complete set of signatures\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "psbt", "The base64-encoded partially signed transaction"},
+ {RPCResult::Type::BOOL, "complete", "If the transaction has a complete set of signatures"},
+ }
},
RPCExamples{
HelpExampleCli("walletprocesspsbt", "\"psbt\"")
@@ -4105,9 +4098,9 @@ UniValue walletprocesspsbt(const JSONRPCRequest& request)
// Fill transaction with our data and also sign
bool sign = request.params[1].isNull() ? true : request.params[1].get_bool();
- bool bip32derivs = request.params[3].isNull() ? false : request.params[3].get_bool();
+ bool bip32derivs = request.params[3].isNull() ? true : request.params[3].get_bool();
bool complete = true;
- const TransactionError err = FillPSBT(pwallet, psbtx, complete, nHashType, sign, bip32derivs);
+ const TransactionError err = pwallet->FillPSBT(psbtx, complete, nHashType, sign, bip32derivs);
if (err != TransactionError::OK) {
throw JSONRPCTransactionError(err);
}
@@ -4134,7 +4127,7 @@ UniValue walletcreatefundedpsbt(const JSONRPCRequest& request)
"\nCreates and funds a transaction in the Partially Signed Transaction format. Inputs will be added if supplied inputs are not enough\n"
"Implements the Creator and Updater roles.\n",
{
- {"inputs", RPCArg::Type::ARR, RPCArg::Optional::NO, "A json array of json objects",
+ {"inputs", RPCArg::Type::ARR, RPCArg::Optional::NO, "The inputs",
{
{"", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "",
{
@@ -4145,7 +4138,7 @@ UniValue walletcreatefundedpsbt(const JSONRPCRequest& request)
},
},
},
- {"outputs", RPCArg::Type::ARR, RPCArg::Optional::NO, "a json array with outputs (key-value pairs), where none of the keys are duplicated.\n"
+ {"outputs", RPCArg::Type::ARR, RPCArg::Optional::NO, "The outputs (key-value pairs), where none of the keys are duplicated.\n"
"That is, each address can only appear once and there can only be one 'data' object.\n"
"For compatibility reasons, a dictionary, which holds the key-value pairs directly, is also\n"
" accepted as second parameter.",
@@ -4171,7 +4164,7 @@ UniValue walletcreatefundedpsbt(const JSONRPCRequest& request)
{"includeWatching", RPCArg::Type::BOOL, /* default */ "true for watch-only wallets, otherwise false", "Also select inputs which are watch only"},
{"lockUnspents", RPCArg::Type::BOOL, /* default */ "false", "Lock selected unspent outputs"},
{"feeRate", RPCArg::Type::AMOUNT, /* default */ "not set: makes wallet determine the fee", "Set a specific fee rate in " + CURRENCY_UNIT + "/kB"},
- {"subtractFeeFromOutputs", RPCArg::Type::ARR, /* default */ "empty array", "A json array of integers.\n"
+ {"subtractFeeFromOutputs", RPCArg::Type::ARR, /* default */ "empty array", "The outputs to subtract the fee from.\n"
" The fee will be equally deducted from the amount of each specified output.\n"
" Those recipients will receive less bitcoins than you enter in their corresponding amount field.\n"
" If no outputs are specified here, the sender pays the fee.",
@@ -4181,21 +4174,22 @@ UniValue walletcreatefundedpsbt(const JSONRPCRequest& request)
},
{"replaceable", RPCArg::Type::BOOL, /* default */ "wallet default", "Marks this transaction as BIP125 replaceable.\n"
" Allows this transaction to be replaced by a transaction with higher fees"},
- {"conf_target", RPCArg::Type::NUM, /* default */ "Fallback to wallet's confirmation target", "Confirmation target (in blocks)"},
+ {"conf_target", RPCArg::Type::NUM, /* default */ "fall back to wallet's confirmation target (txconfirmtarget)", "Confirmation target (in blocks)"},
{"estimate_mode", RPCArg::Type::STR, /* default */ "UNSET", "The fee estimate mode, must be one of:\n"
" \"UNSET\"\n"
" \"ECONOMICAL\"\n"
" \"CONSERVATIVE\""},
},
"options"},
- {"bip32derivs", RPCArg::Type::BOOL, /* default */ "false", "If true, includes the BIP 32 derivation paths for public keys if we know them"},
+ {"bip32derivs", RPCArg::Type::BOOL, /* default */ "true", "Include BIP 32 derivation paths for public keys if we know them"},
},
RPCResult{
- "{\n"
- " \"psbt\": \"value\", (string) The resulting raw transaction (base64-encoded string)\n"
- " \"fee\": n, (numeric) Fee in " + CURRENCY_UNIT + " the resulting transaction pays\n"
- " \"changepos\": n (numeric) The position of the added change output, or -1\n"
- "}\n"
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "psbt", "The resulting raw transaction (base64-encoded string)"},
+ {RPCResult::Type::STR_AMOUNT, "fee", "Fee in " + CURRENCY_UNIT + " the resulting transaction pays"},
+ {RPCResult::Type::NUM, "changepos", "The position of the added change output, or -1"},
+ }
},
RPCExamples{
"\nCreate a transaction with no inputs\n"
@@ -4227,9 +4221,9 @@ UniValue walletcreatefundedpsbt(const JSONRPCRequest& request)
PartiallySignedTransaction psbtx(rawTx);
// Fill transaction with out data but don't sign
- bool bip32derivs = request.params[4].isNull() ? false : request.params[4].get_bool();
+ bool bip32derivs = request.params[4].isNull() ? true : request.params[4].get_bool();
bool complete = true;
- const TransactionError err = FillPSBT(pwallet, psbtx, complete, 1, false, bip32derivs);
+ const TransactionError err = pwallet->FillPSBT(psbtx, complete, 1, false, bip32derivs);
if (err != TransactionError::OK) {
throw JSONRPCTransactionError(err);
}
diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp
index 4c9d88973e..b96cb0aa1a 100644
--- a/src/wallet/scriptpubkeyman.cpp
+++ b/src/wallet/scriptpubkeyman.cpp
@@ -5,8 +5,10 @@
#include <key_io.h>
#include <outputtype.h>
#include <script/descriptor.h>
+#include <script/sign.h>
#include <util/bip32.h>
#include <util/strencodings.h>
+#include <util/string.h>
#include <util/translation.h>
#include <wallet/scriptpubkeyman.h>
@@ -70,7 +72,15 @@ bool HaveKeys(const std::vector<valtype>& pubkeys, const LegacyScriptPubKeyMan&
return true;
}
-IsMineResult IsMineInner(const LegacyScriptPubKeyMan& keystore, const CScript& scriptPubKey, IsMineSigVersion sigversion)
+//! Recursively solve script and return spendable/watchonly/invalid status.
+//!
+//! @param keystore legacy key and script store
+//! @param script script to solve
+//! @param sigversion script type (top-level / redeemscript / witnessscript)
+//! @param recurse_scripthash whether to recurse into nested p2sh and p2wsh
+//! scripts or simply treat any script that has been
+//! stored in the keystore as spendable
+IsMineResult IsMineInner(const LegacyScriptPubKeyMan& keystore, const CScript& scriptPubKey, IsMineSigVersion sigversion, bool recurse_scripthash=true)
{
IsMineResult ret = IsMineResult::NO;
@@ -129,7 +139,7 @@ IsMineResult IsMineInner(const LegacyScriptPubKeyMan& keystore, const CScript& s
CScriptID scriptID = CScriptID(uint160(vSolutions[0]));
CScript subscript;
if (keystore.GetCScript(scriptID, subscript)) {
- ret = std::max(ret, IsMineInner(keystore, subscript, IsMineSigVersion::P2SH));
+ ret = std::max(ret, recurse_scripthash ? IsMineInner(keystore, subscript, IsMineSigVersion::P2SH) : IsMineResult::SPENDABLE);
}
break;
}
@@ -147,7 +157,7 @@ IsMineResult IsMineInner(const LegacyScriptPubKeyMan& keystore, const CScript& s
CScriptID scriptID = CScriptID(hash);
CScript subscript;
if (keystore.GetCScript(scriptID, subscript)) {
- ret = std::max(ret, IsMineInner(keystore, subscript, IsMineSigVersion::WITNESS_V0));
+ ret = std::max(ret, recurse_scripthash ? IsMineInner(keystore, subscript, IsMineSigVersion::WITNESS_V0) : IsMineResult::SPENDABLE);
}
break;
}
@@ -350,7 +360,7 @@ bool LegacyScriptPubKeyMan::IsHDEnabled() const
return !hdChain.seed_id.IsNull();
}
-bool LegacyScriptPubKeyMan::CanGetAddresses(bool internal)
+bool LegacyScriptPubKeyMan::CanGetAddresses(bool internal) const
{
LOCK(cs_KeyStore);
// Check if the keypool has keys
@@ -433,7 +443,7 @@ static int64_t GetOldestKeyTimeInPool(const std::set<int64_t>& setKeyPool, Walle
return keypool.nTime;
}
-int64_t LegacyScriptPubKeyMan::GetOldestKeyPoolTime()
+int64_t LegacyScriptPubKeyMan::GetOldestKeyPoolTime() const
{
LOCK(cs_KeyStore);
@@ -451,7 +461,7 @@ int64_t LegacyScriptPubKeyMan::GetOldestKeyPoolTime()
return oldestKey;
}
-size_t LegacyScriptPubKeyMan::KeypoolCountExternalKeys()
+size_t LegacyScriptPubKeyMan::KeypoolCountExternalKeys() const
{
LOCK(cs_KeyStore);
return setExternalKeyPool.size() + set_pre_split_keypool.size();
@@ -469,18 +479,18 @@ int64_t LegacyScriptPubKeyMan::GetTimeFirstKey() const
return nTimeFirstKey;
}
-std::unique_ptr<SigningProvider> LegacyScriptPubKeyMan::GetSigningProvider(const CScript& script) const
+std::unique_ptr<SigningProvider> LegacyScriptPubKeyMan::GetSolvingProvider(const CScript& script) const
{
return MakeUnique<LegacySigningProvider>(*this);
}
bool LegacyScriptPubKeyMan::CanProvide(const CScript& script, SignatureData& sigdata)
{
- if (IsMine(script) != ISMINE_NO) {
- // If it IsMine, we can always provide in some way
- return true;
- } else if (HaveCScript(CScriptID(script))) {
- // We can still provide some stuff if we have the script, but IsMine failed because we don't have keys
+ IsMineResult ismine = IsMineInner(*this, script, IsMineSigVersion::TOP, /* recurse_scripthash= */ false);
+ if (ismine == IsMineResult::SPENDABLE || ismine == IsMineResult::WATCH_ONLY) {
+ // If ismine, it means we recognize keys or script ids in the script, or
+ // are watching the script itself, and we can at least provide metadata
+ // or solving information, even if not able to sign fully.
return true;
} else {
// If, given the stuff in sigdata, we could make a valid sigature, then we can provide for this script
@@ -497,6 +507,67 @@ bool LegacyScriptPubKeyMan::CanProvide(const CScript& script, SignatureData& sig
}
}
+bool LegacyScriptPubKeyMan::SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, std::string>& input_errors) const
+{
+ return ::SignTransaction(tx, this, coins, sighash, input_errors);
+}
+
+SigningResult LegacyScriptPubKeyMan::SignMessage(const std::string& message, const PKHash& pkhash, std::string& str_sig) const
+{
+ CKeyID key_id(pkhash);
+ CKey key;
+ if (!GetKey(key_id, key)) {
+ return SigningResult::PRIVATE_KEY_NOT_AVAILABLE;
+ }
+
+ if (MessageSign(key, message, str_sig)) {
+ return SigningResult::OK;
+ }
+ return SigningResult::SIGNING_FAILED;
+}
+
+TransactionError LegacyScriptPubKeyMan::FillPSBT(PartiallySignedTransaction& psbtx, int sighash_type, bool sign, bool bip32derivs) const
+{
+ for (unsigned int i = 0; i < psbtx.tx->vin.size(); ++i) {
+ const CTxIn& txin = psbtx.tx->vin[i];
+ PSBTInput& input = psbtx.inputs.at(i);
+
+ if (PSBTInputSigned(input)) {
+ continue;
+ }
+
+ // Verify input looks sane. This will check that we have at most one uxto, witness or non-witness.
+ if (!input.IsSane()) {
+ return TransactionError::INVALID_PSBT;
+ }
+
+ // Get the Sighash type
+ if (sign && input.sighash_type > 0 && input.sighash_type != sighash_type) {
+ return TransactionError::SIGHASH_MISMATCH;
+ }
+
+ // Check non_witness_utxo has specified prevout
+ if (input.non_witness_utxo) {
+ if (txin.prevout.n >= input.non_witness_utxo->vout.size()) {
+ return TransactionError::MISSING_INPUTS;
+ }
+ } else if (input.witness_utxo.IsNull()) {
+ // There's no UTXO so we can just skip this now
+ continue;
+ }
+ SignatureData sigdata;
+ input.FillSignatureData(sigdata);
+ SignPSBTInput(HidingSigningProvider(this, !sign, !bip32derivs), psbtx, i, sighash_type);
+ }
+
+ // Fill in the bip32 keypaths and redeemscripts for the outputs so that hardware wallets can identify change
+ for (unsigned int i = 0; i < psbtx.tx->vout.size(); ++i) {
+ UpdatePSBTOutput(HidingSigningProvider(this, true, !bip32derivs), psbtx, i);
+ }
+
+ return TransactionError::OK;
+}
+
const CKeyMetadata* LegacyScriptPubKeyMan::GetMetadata(const CTxDestination& dest) const
{
LOCK(cs_KeyStore);
@@ -919,7 +990,7 @@ void LegacyScriptPubKeyMan::DeriveNewChildKey(WalletBatch &batch, CKeyMetadata&
// example: 1 | BIP32_HARDENED_KEY_LIMIT == 0x80000001 == 2147483649
if (internal) {
chainChildKey.Derive(childKey, hdChain.nInternalChainCounter | BIP32_HARDENED_KEY_LIMIT);
- metadata.hdKeypath = "m/0'/1'/" + std::to_string(hdChain.nInternalChainCounter) + "'";
+ metadata.hdKeypath = "m/0'/1'/" + ToString(hdChain.nInternalChainCounter) + "'";
metadata.key_origin.path.push_back(0 | BIP32_HARDENED_KEY_LIMIT);
metadata.key_origin.path.push_back(1 | BIP32_HARDENED_KEY_LIMIT);
metadata.key_origin.path.push_back(hdChain.nInternalChainCounter | BIP32_HARDENED_KEY_LIMIT);
@@ -927,7 +998,7 @@ void LegacyScriptPubKeyMan::DeriveNewChildKey(WalletBatch &batch, CKeyMetadata&
}
else {
chainChildKey.Derive(childKey, hdChain.nExternalChainCounter | BIP32_HARDENED_KEY_LIMIT);
- metadata.hdKeypath = "m/0'/0'/" + std::to_string(hdChain.nExternalChainCounter) + "'";
+ metadata.hdKeypath = "m/0'/0'/" + ToString(hdChain.nExternalChainCounter) + "'";
metadata.key_origin.path.push_back(0 | BIP32_HARDENED_KEY_LIMIT);
metadata.key_origin.path.push_back(0 | BIP32_HARDENED_KEY_LIMIT);
metadata.key_origin.path.push_back(hdChain.nExternalChainCounter | BIP32_HARDENED_KEY_LIMIT);
@@ -965,7 +1036,7 @@ void LegacyScriptPubKeyMan::LoadKeyPool(int64_t nIndex, const CKeyPool &keypool)
mapKeyMetadata[keyid] = CKeyMetadata(keypool.nTime);
}
-bool LegacyScriptPubKeyMan::CanGenerateKeys()
+bool LegacyScriptPubKeyMan::CanGenerateKeys() const
{
// A wallet can generate keys if it has an HD seed (IsHDEnabled) or it is a non-HD wallet (pre FEATURE_HD)
LOCK(cs_KeyStore);
diff --git a/src/wallet/scriptpubkeyman.h b/src/wallet/scriptpubkeyman.h
index 7b1c023bc9..8512eadf31 100644
--- a/src/wallet/scriptpubkeyman.h
+++ b/src/wallet/scriptpubkeyman.h
@@ -5,8 +5,11 @@
#ifndef BITCOIN_WALLET_SCRIPTPUBKEYMAN_H
#define BITCOIN_WALLET_SCRIPTPUBKEYMAN_H
+#include <psbt.h>
#include <script/signingprovider.h>
#include <script/standard.h>
+#include <util/error.h>
+#include <util/message.h>
#include <wallet/crypter.h>
#include <wallet/ismine.h>
#include <wallet/walletdb.h>
@@ -184,7 +187,7 @@ public:
virtual bool IsHDEnabled() const { return false; }
/* Returns true if the wallet can give out new addresses. This means it has keys in the keypool or can generate new keys */
- virtual bool CanGetAddresses(bool internal = false) { return false; }
+ virtual bool CanGetAddresses(bool internal = false) const { return false; }
/** Upgrades the wallet to the specified version */
virtual bool Upgrade(int prev_version, std::string& error) { return false; }
@@ -194,22 +197,29 @@ public:
//! The action to do when the DB needs rewrite
virtual void RewriteDB() {}
- virtual int64_t GetOldestKeyPoolTime() { return GetTime(); }
+ virtual int64_t GetOldestKeyPoolTime() const { return GetTime(); }
- virtual size_t KeypoolCountExternalKeys() { return 0; }
+ virtual size_t KeypoolCountExternalKeys() const { return 0; }
virtual unsigned int GetKeyPoolSize() const { return 0; }
virtual int64_t GetTimeFirstKey() const { return 0; }
virtual const CKeyMetadata* GetMetadata(const CTxDestination& dest) const { return nullptr; }
- virtual std::unique_ptr<SigningProvider> GetSigningProvider(const CScript& script) const { return nullptr; }
+ virtual std::unique_ptr<SigningProvider> GetSolvingProvider(const CScript& script) const { return nullptr; }
- /** Whether this ScriptPubKeyMan can provide a SigningProvider (via GetSigningProvider) that, combined with
- * sigdata, can produce a valid signature.
+ /** Whether this ScriptPubKeyMan can provide a SigningProvider (via GetSolvingProvider) that, combined with
+ * sigdata, can produce solving data.
*/
virtual bool CanProvide(const CScript& script, SignatureData& sigdata) { return false; }
+ /** Creates new signatures and adds them to the transaction. Returns whether all inputs were signed */
+ virtual bool SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, std::string>& input_errors) const { return false; }
+ /** Sign a message with the given script */
+ virtual SigningResult SignMessage(const std::string& message, const PKHash& pkhash, std::string& str_sig) const { return SigningResult::SIGNING_FAILED; };
+ /** Adds script and derivation path information to a PSBT, and optionally signs it. */
+ virtual TransactionError FillPSBT(PartiallySignedTransaction& psbt, int sighash_type = 1 /* SIGHASH_ALL */, bool sign = true, bool bip32derivs = false) const { return TransactionError::INVALID_PSBT; }
+
virtual uint256 GetID() const { return uint256(); }
/** Prepends the wallet name in logging output to ease debugging in multi-wallet use cases */
@@ -336,20 +346,24 @@ public:
void RewriteDB() override;
- int64_t GetOldestKeyPoolTime() override;
- size_t KeypoolCountExternalKeys() override;
+ int64_t GetOldestKeyPoolTime() const override;
+ size_t KeypoolCountExternalKeys() const override;
unsigned int GetKeyPoolSize() const override;
int64_t GetTimeFirstKey() const override;
const CKeyMetadata* GetMetadata(const CTxDestination& dest) const override;
- bool CanGetAddresses(bool internal = false) override;
+ bool CanGetAddresses(bool internal = false) const override;
- std::unique_ptr<SigningProvider> GetSigningProvider(const CScript& script) const override;
+ std::unique_ptr<SigningProvider> GetSolvingProvider(const CScript& script) const override;
bool CanProvide(const CScript& script, SignatureData& sigdata) override;
+ bool SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, std::string>& input_errors) const override;
+ SigningResult SignMessage(const std::string& message, const PKHash& pkhash, std::string& str_sig) const override;
+ TransactionError FillPSBT(PartiallySignedTransaction& psbt, int sighash_type = 1 /* SIGHASH_ALL */, bool sign = true, bool bip32derivs = false) const override;
+
uint256 GetID() const override;
// Map from Key ID to key metadata.
@@ -410,7 +424,7 @@ public:
bool ImportScriptPubKeys(const std::set<CScript>& script_pub_keys, const bool have_solving_data, const int64_t timestamp) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore);
/* Returns true if the wallet can generate new keys */
- bool CanGenerateKeys();
+ bool CanGenerateKeys() const;
/* Generates a new HD seed (will not be activated) */
CPubKey GenerateNewSeed();
@@ -447,7 +461,7 @@ public:
std::set<CKeyID> GetKeys() const override;
};
-/** Wraps a LegacyScriptPubKeyMan so that it can be returned in a new unique_ptr */
+/** Wraps a LegacyScriptPubKeyMan so that it can be returned in a new unique_ptr. Does not provide privkeys */
class LegacySigningProvider : public SigningProvider
{
private:
@@ -458,8 +472,8 @@ public:
bool GetCScript(const CScriptID &scriptid, CScript& script) const override { return m_spk_man.GetCScript(scriptid, script); }
bool HaveCScript(const CScriptID &scriptid) const override { return m_spk_man.HaveCScript(scriptid); }
bool GetPubKey(const CKeyID &address, CPubKey& pubkey) const override { return m_spk_man.GetPubKey(address, pubkey); }
- bool GetKey(const CKeyID &address, CKey& key) const override { return m_spk_man.GetKey(address, key); }
- bool HaveKey(const CKeyID &address) const override { return m_spk_man.HaveKey(address); }
+ bool GetKey(const CKeyID &address, CKey& key) const override { return false; }
+ bool HaveKey(const CKeyID &address) const override { return false; }
bool GetKeyOrigin(const CKeyID& keyid, KeyOriginInfo& info) const override { return m_spk_man.GetKeyOrigin(keyid, info); }
};
diff --git a/src/wallet/test/coinselector_tests.cpp b/src/wallet/test/coinselector_tests.cpp
index d65a0e9075..21d57cb898 100644
--- a/src/wallet/test/coinselector_tests.cpp
+++ b/src/wallet/test/coinselector_tests.cpp
@@ -189,6 +189,19 @@ BOOST_AUTO_TEST_CASE(bnb_search_test)
actual_selection.clear();
selection.clear();
+ // Cost of change is greater than the difference between target value and utxo sum
+ add_coin(1 * CENT, 1, actual_selection);
+ BOOST_CHECK(SelectCoinsBnB(GroupCoins(utxo_pool), 0.9 * CENT, 0.5 * CENT, selection, value_ret, not_input_fees));
+ BOOST_CHECK_EQUAL(value_ret, 1 * CENT);
+ BOOST_CHECK(equal_sets(selection, actual_selection));
+ actual_selection.clear();
+ selection.clear();
+
+ // Cost of change is less than the difference between target value and utxo sum
+ BOOST_CHECK(!SelectCoinsBnB(GroupCoins(utxo_pool), 0.9 * CENT, 0, selection, value_ret, not_input_fees));
+ actual_selection.clear();
+ selection.clear();
+
// Select 10 Cent
add_coin(5 * CENT, 5, utxo_pool);
add_coin(4 * CENT, 4, actual_selection);
diff --git a/src/wallet/test/psbt_wallet_tests.cpp b/src/wallet/test/psbt_wallet_tests.cpp
index f923de6178..8b7b7af21d 100644
--- a/src/wallet/test/psbt_wallet_tests.cpp
+++ b/src/wallet/test/psbt_wallet_tests.cpp
@@ -5,7 +5,6 @@
#include <key_io.h>
#include <util/bip32.h>
#include <util/strencodings.h>
-#include <wallet/psbtwallet.h>
#include <wallet/wallet.h>
#include <boost/test/unit_test.hpp>
@@ -61,7 +60,7 @@ BOOST_AUTO_TEST_CASE(psbt_updater_test)
// Fill transaction with our data
bool complete = true;
- BOOST_REQUIRE_EQUAL(TransactionError::OK, FillPSBT(&m_wallet, psbtx, complete, SIGHASH_ALL, false, true));
+ BOOST_REQUIRE_EQUAL(TransactionError::OK, m_wallet.FillPSBT(psbtx, complete, SIGHASH_ALL, false, true));
// Get the final tx
CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
@@ -74,9 +73,7 @@ BOOST_AUTO_TEST_CASE(psbt_updater_test)
// Try to sign the mutated input
SignatureData sigdata;
- psbtx.inputs[0].FillSignatureData(sigdata);
- const std::unique_ptr<SigningProvider> provider = m_wallet.GetSigningProvider(ws1, sigdata);
- BOOST_CHECK(!SignPSBTInput(*provider, psbtx, 0, SIGHASH_ALL));
+ BOOST_CHECK(spk_man->FillPSBT(psbtx, SIGHASH_ALL, true, true) != TransactionError::OK);
}
BOOST_AUTO_TEST_CASE(parse_hd_keypath)
diff --git a/src/wallet/test/scriptpubkeyman_tests.cpp b/src/wallet/test/scriptpubkeyman_tests.cpp
new file mode 100644
index 0000000000..757865ea37
--- /dev/null
+++ b/src/wallet/test/scriptpubkeyman_tests.cpp
@@ -0,0 +1,43 @@
+// Copyright (c) 2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <key.h>
+#include <script/standard.h>
+#include <test/util/setup_common.h>
+#include <wallet/scriptpubkeyman.h>
+#include <wallet/wallet.h>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_FIXTURE_TEST_SUITE(scriptpubkeyman_tests, BasicTestingSetup)
+
+// Test LegacyScriptPubKeyMan::CanProvide behavior, making sure it returns true
+// for recognized scripts even when keys may not be available for signing.
+BOOST_AUTO_TEST_CASE(CanProvide)
+{
+ // Set up wallet and keyman variables.
+ NodeContext node;
+ std::unique_ptr<interfaces::Chain> chain = interfaces::MakeChain(node);
+ CWallet wallet(chain.get(), WalletLocation(), WalletDatabase::CreateDummy());
+ LegacyScriptPubKeyMan& keyman = *wallet.GetOrCreateLegacyScriptPubKeyMan();
+
+ // Make a 1 of 2 multisig script
+ std::vector<CKey> keys(2);
+ std::vector<CPubKey> pubkeys;
+ for (CKey& key : keys) {
+ key.MakeNewKey(true);
+ pubkeys.emplace_back(key.GetPubKey());
+ }
+ CScript multisig_script = GetScriptForMultisig(1, pubkeys);
+ CScript p2sh_script = GetScriptForDestination(ScriptHash(multisig_script));
+ SignatureData data;
+
+ // Verify the p2sh(multisig) script is not recognized until the multisig
+ // script is added to the keystore to make it solvable
+ BOOST_CHECK(!keyman.CanProvide(p2sh_script, data));
+ keyman.AddCScript(multisig_script);
+ BOOST_CHECK(keyman.CanProvide(p2sh_script, data));
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index 405afb6d8d..9a972febab 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -27,7 +27,6 @@
#include <util/moneystr.h>
#include <util/rbf.h>
#include <util/translation.h>
-#include <util/validation.h>
#include <wallet/coincontrol.h>
#include <wallet/fees.h>
@@ -345,7 +344,7 @@ bool CWallet::ChangeWalletPassphrase(const SecureString& strOldWalletPassphrase,
return false;
}
-void CWallet::ChainStateFlushed(const CBlockLocator& loc)
+void CWallet::chainStateFlushed(const CBlockLocator& loc)
{
WalletBatch batch(*database);
batch.WriteBestBlock(loc);
@@ -714,7 +713,7 @@ bool CWallet::MarkReplaced(const uint256& originalHash, const uint256& newHash)
return success;
}
-void CWallet::SetUsedDestinationState(WalletBatch& batch, const uint256& hash, unsigned int n, bool used, std::set<CTxDestination>& tx_destinations)
+void CWallet::SetSpentKeyState(WalletBatch& batch, const uint256& hash, unsigned int n, bool used, std::set<CTxDestination>& tx_destinations)
{
AssertLockHeld(cs_wallet);
const CWalletTx* srctx = GetWalletTx(hash);
@@ -734,7 +733,7 @@ void CWallet::SetUsedDestinationState(WalletBatch& batch, const uint256& hash, u
}
}
-bool CWallet::IsUsedDestination(const uint256& hash, unsigned int n) const
+bool CWallet::IsSpentKey(const uint256& hash, unsigned int n) const
{
AssertLockHeld(cs_wallet);
CTxDestination dst;
@@ -777,7 +776,7 @@ bool CWallet::AddToWallet(const CWalletTx& wtxIn, bool fFlushOnClose)
for (const CTxIn& txin : wtxIn.tx->vin) {
const COutPoint& op = txin.prevout;
- SetUsedDestinationState(batch, op.hash, op.n, true, tx_destinations);
+ SetSpentKeyState(batch, op.hash, op.n, true, tx_destinations);
}
MarkDestinationsDirty(tx_destinations);
@@ -847,6 +846,14 @@ bool CWallet::AddToWallet(const CWalletTx& wtxIn, bool fFlushOnClose)
if (!strCmd.empty())
{
boost::replace_all(strCmd, "%s", wtxIn.GetHash().GetHex());
+#ifndef WIN32
+ // Substituting the wallet name isn't currently supported on windows
+ // because windows shell escaping has not been implemented yet:
+ // https://github.com/bitcoin/bitcoin/pull/13339#issuecomment-537384875
+ // A few ways it could be implemented in the future are described in:
+ // https://github.com/bitcoin/bitcoin/pull/13339#issuecomment-461288094
+ boost::replace_all(strCmd, "%w", ShellEscape(GetName()));
+#endif
std::thread t(runCommand, strCmd);
t.detach(); // thread runs free
}
@@ -1082,7 +1089,7 @@ void CWallet::SyncTransaction(const CTransactionRef& ptx, CWalletTx::Confirmatio
MarkInputsDirty(ptx);
}
-void CWallet::TransactionAddedToMempool(const CTransactionRef& ptx) {
+void CWallet::transactionAddedToMempool(const CTransactionRef& ptx) {
auto locked_chain = chain().lock();
LOCK(cs_wallet);
CWalletTx::Confirmation confirm(CWalletTx::Status::UNCONFIRMED, /* block_height */ 0, {}, /* nIndex */ 0);
@@ -1094,7 +1101,7 @@ void CWallet::TransactionAddedToMempool(const CTransactionRef& ptx) {
}
}
-void CWallet::TransactionRemovedFromMempool(const CTransactionRef &ptx) {
+void CWallet::transactionRemovedFromMempool(const CTransactionRef &ptx) {
LOCK(cs_wallet);
auto it = mapWallet.find(ptx->GetHash());
if (it != mapWallet.end()) {
@@ -1102,7 +1109,7 @@ void CWallet::TransactionRemovedFromMempool(const CTransactionRef &ptx) {
}
}
-void CWallet::BlockConnected(const CBlock& block, const std::vector<CTransactionRef>& vtxConflicted, int height)
+void CWallet::blockConnected(const CBlock& block, int height)
{
const uint256& block_hash = block.GetHash();
auto locked_chain = chain().lock();
@@ -1113,14 +1120,11 @@ void CWallet::BlockConnected(const CBlock& block, const std::vector<CTransaction
for (size_t index = 0; index < block.vtx.size(); index++) {
CWalletTx::Confirmation confirm(CWalletTx::Status::CONFIRMED, height, block_hash, index);
SyncTransaction(block.vtx[index], confirm);
- TransactionRemovedFromMempool(block.vtx[index]);
- }
- for (const CTransactionRef& ptx : vtxConflicted) {
- TransactionRemovedFromMempool(ptx);
+ transactionRemovedFromMempool(block.vtx[index]);
}
}
-void CWallet::BlockDisconnected(const CBlock& block, int height)
+void CWallet::blockDisconnected(const CBlock& block, int height)
{
auto locked_chain = chain().lock();
LOCK(cs_wallet);
@@ -1137,13 +1141,13 @@ void CWallet::BlockDisconnected(const CBlock& block, int height)
}
}
-void CWallet::UpdatedBlockTip()
+void CWallet::updatedBlockTip()
{
m_best_block_time = GetTime();
}
-void CWallet::BlockUntilSyncedToCurrentChain() {
+void CWallet::BlockUntilSyncedToCurrentChain() const {
AssertLockNotHeld(cs_wallet);
// Skip the queue-draining stuff if we know we're caught up with
// ::ChainActive().Tip(), otherwise put a callback in the validation interface queue and wait
@@ -1326,7 +1330,7 @@ bool CWallet::IsHDEnabled() const
return result;
}
-bool CWallet::CanGetAddresses(bool internal)
+bool CWallet::CanGetAddresses(bool internal) const
{
LOCK(cs_wallet);
if (m_spk_managers.empty()) return false;
@@ -1400,7 +1404,7 @@ bool CWallet::DummySignInput(CTxIn &tx_in, const CTxOut &txout, bool use_max_sig
const CScript& scriptPubKey = txout.scriptPubKey;
SignatureData sigdata;
- std::unique_ptr<SigningProvider> provider = GetSigningProvider(scriptPubKey);
+ std::unique_ptr<SigningProvider> provider = GetSolvingProvider(scriptPubKey);
if (!provider) {
// We don't know about this scriptpbuKey;
return false;
@@ -1781,7 +1785,7 @@ bool CWalletTx::SubmitMemoryPoolAndRelay(std::string& err_string, bool relay)
// Irrespective of the failure reason, un-marking fInMempool
// out-of-order is incorrect - it should be unmarked when
// TransactionRemovedFromMempool fires.
- bool ret = pwallet->chain().broadcastTransaction(tx, err_string, pwallet->m_default_max_tx_fee, relay);
+ bool ret = pwallet->chain().broadcastTransaction(tx, pwallet->m_default_max_tx_fee, relay, err_string);
fInMempool |= ret;
return ret;
}
@@ -1870,7 +1874,7 @@ CAmount CWalletTx::GetAvailableCredit(bool fUseCache, const isminefilter& filter
uint256 hashTx = GetHash();
for (unsigned int i = 0; i < tx->vout.size(); i++)
{
- if (!pwallet->IsSpent(hashTx, i) && (allow_used_addresses || !pwallet->IsUsedDestination(hashTx, i))) {
+ if (!pwallet->IsSpent(hashTx, i) && (allow_used_addresses || !pwallet->IsSpentKey(hashTx, i))) {
const CTxOut &txout = tx->vout[i];
nCredit += pwallet->GetCredit(txout, filter);
if (!MoneyRange(nCredit))
@@ -2160,11 +2164,11 @@ void CWallet::AvailableCoins(interfaces::Chain::Lock& locked_chain, std::vector<
continue;
}
- if (!allow_used_addresses && IsUsedDestination(wtxid, i)) {
+ if (!allow_used_addresses && IsSpentKey(wtxid, i)) {
continue;
}
- std::unique_ptr<SigningProvider> provider = GetSigningProvider(wtx.tx->vout[i].scriptPubKey);
+ std::unique_ptr<SigningProvider> provider = GetSolvingProvider(wtx.tx->vout[i].scriptPubKey);
bool solvable = provider ? IsSolvable(*provider, wtx.tx->vout[i].scriptPubKey) : false;
bool spendable = ((mine & ISMINE_SPENDABLE) != ISMINE_NO) || (((mine & ISMINE_WATCH_ONLY) != ISMINE_NO) && (coinControl && coinControl->fAllowWatchOnly && solvable));
@@ -2403,34 +2407,172 @@ bool CWallet::SelectCoins(const std::vector<COutput>& vAvailableCoins, const CAm
return res;
}
-bool CWallet::SignTransaction(CMutableTransaction& tx)
+bool CWallet::SignTransaction(CMutableTransaction& tx) const
{
AssertLockHeld(cs_wallet);
- // sign the new tx
- int nIn = 0;
+ // Build coins map
+ std::map<COutPoint, Coin> coins;
for (auto& input : tx.vin) {
std::map<uint256, CWalletTx>::const_iterator mi = mapWallet.find(input.prevout.hash);
if(mi == mapWallet.end() || input.prevout.n >= mi->second.tx->vout.size()) {
return false;
}
- const CScript& scriptPubKey = mi->second.tx->vout[input.prevout.n].scriptPubKey;
- const CAmount& amount = mi->second.tx->vout[input.prevout.n].nValue;
+ const CWalletTx& wtx = mi->second;
+ coins[input.prevout] = Coin(wtx.tx->vout[input.prevout.n], wtx.m_confirm.block_height, wtx.IsCoinBase());
+ }
+ std::map<int, std::string> input_errors;
+ return SignTransaction(tx, coins, SIGHASH_ALL, input_errors);
+}
+
+bool CWallet::SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, std::string>& input_errors) const
+{
+ // Sign the tx with ScriptPubKeyMans
+ // Because each ScriptPubKeyMan can sign more than one input, we need to keep track of each ScriptPubKeyMan that has signed this transaction.
+ // Each iteration, we may sign more txins than the txin that is specified in that iteration.
+ // We assume that each input is signed by only one ScriptPubKeyMan.
+ std::set<uint256> visited_spk_mans;
+ for (unsigned int i = 0; i < tx.vin.size(); i++) {
+ // Get the prevout
+ CTxIn& txin = tx.vin[i];
+ auto coin = coins.find(txin.prevout);
+ if (coin == coins.end() || coin->second.IsSpent()) {
+ input_errors[i] = "Input not found or already spent";
+ continue;
+ }
+
+ // Check if this input is complete
+ SignatureData sigdata = DataFromTransaction(tx, i, coin->second.out);
+ if (sigdata.complete) {
+ continue;
+ }
+
+ // Input needs to be signed, find the right ScriptPubKeyMan
+ std::set<ScriptPubKeyMan*> spk_mans = GetScriptPubKeyMans(coin->second.out.scriptPubKey, sigdata);
+ if (spk_mans.size() == 0) {
+ input_errors[i] = "Unable to sign input, missing keys";
+ continue;
+ }
+
+ for (auto& spk_man : spk_mans) {
+ // If we've already been signed by this spk_man, skip it
+ if (visited_spk_mans.count(spk_man->GetID()) > 0) {
+ continue;
+ }
+
+ // Sign the tx.
+ // spk_man->SignTransaction will return true if the transaction is complete,
+ // so we can exit early and return true if that happens.
+ if (spk_man->SignTransaction(tx, coins, sighash, input_errors)) {
+ return true;
+ }
+
+ // Add this spk_man to visited_spk_mans so we can skip it later
+ visited_spk_mans.insert(spk_man->GetID());
+ }
+ }
+ return false;
+}
+
+TransactionError CWallet::FillPSBT(PartiallySignedTransaction& psbtx, bool& complete, int sighash_type, bool sign, bool bip32derivs) const
+{
+ LOCK(cs_wallet);
+ // Get all of the previous transactions
+ for (unsigned int i = 0; i < psbtx.tx->vin.size(); ++i) {
+ const CTxIn& txin = psbtx.tx->vin[i];
+ PSBTInput& input = psbtx.inputs.at(i);
+
+ if (PSBTInputSigned(input)) {
+ continue;
+ }
+
+ // Verify input looks sane. This will check that we have at most one uxto, witness or non-witness.
+ if (!input.IsSane()) {
+ return TransactionError::INVALID_PSBT;
+ }
+
+ // If we have no utxo, grab it from the wallet.
+ if (!input.non_witness_utxo && input.witness_utxo.IsNull()) {
+ const uint256& txhash = txin.prevout.hash;
+ const auto it = mapWallet.find(txhash);
+ if (it != mapWallet.end()) {
+ const CWalletTx& wtx = it->second;
+ // We only need the non_witness_utxo, which is a superset of the witness_utxo.
+ // The signing code will switch to the smaller witness_utxo if this is ok.
+ input.non_witness_utxo = wtx.tx;
+ }
+ }
+ }
+
+ // Fill in information from ScriptPubKeyMans
+ // Because each ScriptPubKeyMan may be able to fill more than one input, we need to keep track of each ScriptPubKeyMan that has filled this psbt.
+ // Each iteration, we may fill more inputs than the input that is specified in that iteration.
+ // We assume that each input is filled by only one ScriptPubKeyMan
+ std::set<uint256> visited_spk_mans;
+ for (unsigned int i = 0; i < psbtx.tx->vin.size(); ++i) {
+ const CTxIn& txin = psbtx.tx->vin[i];
+ PSBTInput& input = psbtx.inputs.at(i);
+
+ if (PSBTInputSigned(input)) {
+ continue;
+ }
+
+ // Get the scriptPubKey to know which ScriptPubKeyMan to use
+ CScript script;
+ if (!input.witness_utxo.IsNull()) {
+ script = input.witness_utxo.scriptPubKey;
+ } else if (input.non_witness_utxo) {
+ if (txin.prevout.n >= input.non_witness_utxo->vout.size()) {
+ return TransactionError::MISSING_INPUTS;
+ }
+ script = input.non_witness_utxo->vout[txin.prevout.n].scriptPubKey;
+ } else {
+ // There's no UTXO so we can just skip this now
+ continue;
+ }
SignatureData sigdata;
+ input.FillSignatureData(sigdata);
+ std::set<ScriptPubKeyMan*> spk_mans = GetScriptPubKeyMans(script, sigdata);
+ if (spk_mans.size() == 0) {
+ continue;
+ }
- std::unique_ptr<SigningProvider> provider = GetSigningProvider(scriptPubKey);
- if (!provider) {
- // We don't know about this scriptpbuKey;
- return false;
+ for (auto& spk_man : spk_mans) {
+ // If we've already been signed by this spk_man, skip it
+ if (visited_spk_mans.count(spk_man->GetID()) > 0) {
+ continue;
+ }
+
+ // Fill in the information from the spk_man
+ TransactionError res = spk_man->FillPSBT(psbtx, sighash_type, sign, bip32derivs);
+ if (res != TransactionError::OK) {
+ return res;
+ }
+
+ // Add this spk_man to visited_spk_mans so we can skip it later
+ visited_spk_mans.insert(spk_man->GetID());
}
+ }
- if (!ProduceSignature(*provider, MutableTransactionSignatureCreator(&tx, nIn, amount, SIGHASH_ALL), scriptPubKey, sigdata)) {
- return false;
+ // Complete if every input is now signed
+ complete = true;
+ for (const auto& input : psbtx.inputs) {
+ complete &= PSBTInputSigned(input);
+ }
+
+ return TransactionError::OK;
+}
+
+SigningResult CWallet::SignMessage(const std::string& message, const PKHash& pkhash, std::string& str_sig) const
+{
+ SignatureData sigdata;
+ CScript script_pub_key = GetScriptForDestination(pkhash);
+ for (const auto& spk_man_pair : m_spk_managers) {
+ if (spk_man_pair.second->CanProvide(script_pub_key, sigdata)) {
+ return spk_man_pair.second->SignMessage(message, pkhash, str_sig);
}
- UpdateInput(input, sigdata);
- nIn++;
}
- return true;
+ return SigningResult::PRIVATE_KEY_NOT_AVAILABLE;
}
bool CWallet::FundTransaction(CMutableTransaction& tx, CAmount& nFeeRet, int& nChangePosInOut, std::string& strFailReason, bool lockUnspents, const std::set<int>& setSubtractFeeFromOutputs, CCoinControl coinControl)
@@ -2879,25 +3021,9 @@ bool CWallet::CreateTransaction(interfaces::Chain::Lock& locked_chain, const std
txNew.vin.push_back(CTxIn(coin.outpoint, CScript(), nSequence));
}
- if (sign)
- {
- int nIn = 0;
- for (const auto& coin : selected_coins)
- {
- const CScript& scriptPubKey = coin.txout.scriptPubKey;
- SignatureData sigdata;
-
- std::unique_ptr<SigningProvider> provider = GetSigningProvider(scriptPubKey);
- if (!provider || !ProduceSignature(*provider, MutableTransactionSignatureCreator(&txNew, nIn, coin.txout.nValue, SIGHASH_ALL), scriptPubKey, sigdata))
- {
- strFailReason = _("Signing transaction failed").translated;
- return false;
- } else {
- UpdateInput(txNew.vin.at(nIn), sigdata);
- }
-
- nIn++;
- }
+ if (sign && !SignTransaction(txNew)) {
+ strFailReason = _("Signing transaction failed").translated;
+ return false;
}
// Return the constructed transaction data.
@@ -3105,7 +3231,7 @@ bool CWallet::DelAddressBook(const CTxDestination& address)
return WalletBatch(*database).EraseName(EncodeDestination(address));
}
-size_t CWallet::KeypoolCountExternalKeys()
+size_t CWallet::KeypoolCountExternalKeys() const
{
AssertLockHeld(cs_wallet);
@@ -3170,7 +3296,7 @@ bool CWallet::GetNewChangeDestination(const OutputType type, CTxDestination& des
return true;
}
-int64_t CWallet::GetOldestKeyPoolTime()
+int64_t CWallet::GetOldestKeyPoolTime() const
{
LOCK(cs_wallet);
int64_t oldestKey = std::numeric_limits<int64_t>::max();
@@ -3194,7 +3320,7 @@ void CWallet::MarkDestinationsDirty(const std::set<CTxDestination>& destinations
}
}
-std::map<CTxDestination, CAmount> CWallet::GetAddressBalances(interfaces::Chain::Lock& locked_chain)
+std::map<CTxDestination, CAmount> CWallet::GetAddressBalances(interfaces::Chain::Lock& locked_chain) const
{
std::map<CTxDestination, CAmount> balances;
@@ -3235,7 +3361,7 @@ std::map<CTxDestination, CAmount> CWallet::GetAddressBalances(interfaces::Chain:
return balances;
}
-std::set< std::set<CTxDestination> > CWallet::GetAddressGroupings()
+std::set< std::set<CTxDestination> > CWallet::GetAddressGroupings() const
{
AssertLockHeld(cs_wallet);
std::set< std::set<CTxDestination> > groupings;
@@ -3749,7 +3875,7 @@ std::shared_ptr<CWallet> CWallet::CreateWalletFromFile(interfaces::Chain& chain,
}
auto locked_chain = chain.lock();
- walletInstance->ChainStateFlushed(locked_chain->getTipLocator());
+ walletInstance->chainStateFlushed(locked_chain->getTipLocator());
} else if (wallet_creation_flags & WALLET_FLAG_DISABLE_PRIVATE_KEYS) {
// Make it impossible to disable private keys after creation
error = strprintf(_("Error loading %s: Private keys can only be disabled during creation").translated, walletFile);
@@ -3930,7 +4056,7 @@ std::shared_ptr<CWallet> CWallet::CreateWalletFromFile(interfaces::Chain& chain,
return nullptr;
}
}
- walletInstance->ChainStateFlushed(locked_chain->getTipLocator());
+ walletInstance->chainStateFlushed(locked_chain->getTipLocator());
walletInstance->database->IncrementUpdateCounter();
// Restore wallet transaction metadata after -zapwallettxes=1
@@ -3997,7 +4123,7 @@ void CWallet::postInitProcess()
chain().requestMempoolTransactions(*this);
}
-bool CWallet::BackupWallet(const std::string& strDest)
+bool CWallet::BackupWallet(const std::string& strDest) const
{
return database->Backup(strDest);
}
@@ -4148,6 +4274,17 @@ ScriptPubKeyMan* CWallet::GetScriptPubKeyMan(const OutputType& type, bool intern
return it->second;
}
+std::set<ScriptPubKeyMan*> CWallet::GetScriptPubKeyMans(const CScript& script, SignatureData& sigdata) const
+{
+ std::set<ScriptPubKeyMan*> spk_mans;
+ for (const auto& spk_man_pair : m_spk_managers) {
+ if (spk_man_pair.second->CanProvide(script, sigdata)) {
+ spk_mans.insert(spk_man_pair.second.get());
+ }
+ }
+ return spk_mans;
+}
+
ScriptPubKeyMan* CWallet::GetScriptPubKeyMan(const CScript& script) const
{
SignatureData sigdata;
@@ -4167,17 +4304,17 @@ ScriptPubKeyMan* CWallet::GetScriptPubKeyMan(const uint256& id) const
return nullptr;
}
-std::unique_ptr<SigningProvider> CWallet::GetSigningProvider(const CScript& script) const
+std::unique_ptr<SigningProvider> CWallet::GetSolvingProvider(const CScript& script) const
{
SignatureData sigdata;
- return GetSigningProvider(script, sigdata);
+ return GetSolvingProvider(script, sigdata);
}
-std::unique_ptr<SigningProvider> CWallet::GetSigningProvider(const CScript& script, SignatureData& sigdata) const
+std::unique_ptr<SigningProvider> CWallet::GetSolvingProvider(const CScript& script, SignatureData& sigdata) const
{
for (const auto& spk_man_pair : m_spk_managers) {
if (spk_man_pair.second->CanProvide(script, sigdata)) {
- return spk_man_pair.second->GetSigningProvider(script);
+ return spk_man_pair.second->GetSolvingProvider(script);
}
}
return nullptr;
diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h
index a918bb8833..75fd14a80e 100644
--- a/src/wallet/wallet.h
+++ b/src/wallet/wallet.h
@@ -11,8 +11,10 @@
#include <interfaces/handler.h>
#include <outputtype.h>
#include <policy/feerate.h>
+#include <psbt.h>
#include <tinyformat.h>
#include <ui_interface.h>
+#include <util/message.h>
#include <util/strencodings.h>
#include <util/system.h>
#include <validationinterface.h>
@@ -141,7 +143,7 @@ class ReserveDestination
{
protected:
//! The wallet to reserve from
- CWallet* const pwallet;
+ const CWallet* const pwallet;
//! The ScriptPubKeyMan to reserve from. Based on type when GetReservedDestination is called
ScriptPubKeyMan* m_spk_man{nullptr};
OutputType const type;
@@ -817,8 +819,8 @@ public:
bool IsSpent(const uint256& hash, unsigned int n) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
// Whether this or any known UTXO with the same single key has been spent.
- bool IsUsedDestination(const uint256& hash, unsigned int n) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
- void SetUsedDestinationState(WalletBatch& batch, const uint256& hash, unsigned int n, bool used, std::set<CTxDestination>& tx_destinations) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ bool IsSpentKey(const uint256& hash, unsigned int n) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ void SetSpentKeyState(WalletBatch& batch, const uint256& hash, unsigned int n, bool used, std::set<CTxDestination>& tx_destinations) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
std::vector<OutputGroup> GroupOutputs(const std::vector<COutput>& outputs, bool single_coin) const;
@@ -832,8 +834,8 @@ public:
* Rescan abort properties
*/
void AbortRescan() { fAbortRescan = true; }
- bool IsAbortingRescan() { return fAbortRescan; }
- bool IsScanning() { return fScanningWallet; }
+ bool IsAbortingRescan() const { return fAbortRescan; }
+ bool IsScanning() const { return fScanningWallet; }
int64_t ScanningDuration() const { return fScanningWallet ? GetTimeMillis() - m_scanning_start : 0; }
double ScanningProgress() const { return fScanningWallet ? (double) m_scanning_progress : 0; }
@@ -873,10 +875,10 @@ public:
void MarkDirty();
bool AddToWallet(const CWalletTx& wtxIn, bool fFlushOnClose=true);
void LoadToWallet(CWalletTx& wtxIn) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
- void TransactionAddedToMempool(const CTransactionRef& tx) override;
- void BlockConnected(const CBlock& block, const std::vector<CTransactionRef>& vtxConflicted, int height) override;
- void BlockDisconnected(const CBlock& block, int height) override;
- void UpdatedBlockTip() override;
+ void transactionAddedToMempool(const CTransactionRef& tx) override;
+ void blockConnected(const CBlock& block, int height) override;
+ void blockDisconnected(const CBlock& block, int height) override;
+ void updatedBlockTip() override;
int64_t RescanFromTime(int64_t startTime, const WalletRescanReserver& reserver, bool update);
struct ScanResult {
@@ -895,7 +897,7 @@ public:
uint256 last_failed_block;
};
ScanResult ScanForWalletTransactions(const uint256& first_block, const uint256& last_block, const WalletRescanReserver& reserver, bool fUpdate);
- void TransactionRemovedFromMempool(const CTransactionRef &ptx) override;
+ void transactionRemovedFromMempool(const CTransactionRef &ptx) override;
void ReacceptWalletTransactions() EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
void ResendWalletTransactions();
struct Balance {
@@ -916,7 +918,30 @@ public:
* calling CreateTransaction();
*/
bool FundTransaction(CMutableTransaction& tx, CAmount& nFeeRet, int& nChangePosInOut, std::string& strFailReason, bool lockUnspents, const std::set<int>& setSubtractFeeFromOutputs, CCoinControl);
- bool SignTransaction(CMutableTransaction& tx) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ // Fetch the inputs and sign with SIGHASH_ALL.
+ bool SignTransaction(CMutableTransaction& tx) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ // Sign the tx given the input coins and sighash.
+ bool SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, std::string>& input_errors) const;
+ SigningResult SignMessage(const std::string& message, const PKHash& pkhash, std::string& str_sig) const;
+
+ /**
+ * Fills out a PSBT with information from the wallet. Fills in UTXOs if we have
+ * them. Tries to sign if sign=true. Sets `complete` if the PSBT is now complete
+ * (i.e. has all required signatures or signature-parts, and is ready to
+ * finalize.) Sets `error` and returns false if something goes wrong.
+ *
+ * @param[in] psbtx PartiallySignedTransaction to fill in
+ * @param[out] complete indicates whether the PSBT is now complete
+ * @param[in] sighash_type the sighash type to use when signing (if PSBT does not specify)
+ * @param[in] sign whether to sign or not
+ * @param[in] bip32derivs whether to fill in bip32 derivation information if available
+ * return error
+ */
+ TransactionError FillPSBT(PartiallySignedTransaction& psbtx,
+ bool& complete,
+ int sighash_type = 1 /* SIGHASH_ALL */,
+ bool sign = true,
+ bool bip32derivs = true) const;
/**
* Create a new transaction paying the recipients with a set of coins
@@ -968,13 +993,13 @@ public:
/** Absolute maximum transaction fee (in satoshis) used by default for the wallet */
CAmount m_default_max_tx_fee{DEFAULT_TRANSACTION_MAXFEE};
- size_t KeypoolCountExternalKeys() EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ size_t KeypoolCountExternalKeys() const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
bool TopUpKeyPool(unsigned int kpSize = 0);
- int64_t GetOldestKeyPoolTime();
+ int64_t GetOldestKeyPoolTime() const;
- std::set<std::set<CTxDestination>> GetAddressGroupings() EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
- std::map<CTxDestination, CAmount> GetAddressBalances(interfaces::Chain::Lock& locked_chain);
+ std::set<std::set<CTxDestination>> GetAddressGroupings() const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ std::map<CTxDestination, CAmount> GetAddressBalances(interfaces::Chain::Lock& locked_chain) const;
std::set<CTxDestination> GetLabelAddresses(const std::string& label) const;
@@ -1008,7 +1033,7 @@ public:
bool IsAllFromMe(const CTransaction& tx, const isminefilter& filter) const;
CAmount GetCredit(const CTransaction& tx, const isminefilter& filter) const;
CAmount GetChange(const CTransaction& tx) const;
- void ChainStateFlushed(const CBlockLocator& loc) override;
+ void chainStateFlushed(const CBlockLocator& loc) override;
DBErrors LoadWallet(bool& fFirstRunRet);
DBErrors ZapWalletTx(std::vector<CWalletTx>& vWtx);
@@ -1027,7 +1052,7 @@ public:
bool SetMaxVersion(int nVersion);
//! get the current wallet format (the oldest client version guaranteed to understand this wallet)
- int GetVersion() { LOCK(cs_wallet); return nWalletVersion; }
+ int GetVersion() const { LOCK(cs_wallet); return nWalletVersion; }
//! Get wallet transactions that conflict with given transaction (spend same outputs)
std::set<uint256> GetConflicts(const uint256& txid) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
@@ -1098,13 +1123,13 @@ public:
*/
void postInitProcess();
- bool BackupWallet(const std::string& strDest);
+ bool BackupWallet(const std::string& strDest) const;
/* Returns true if HD is enabled */
bool IsHDEnabled() const;
/* Returns true if the wallet can give out new addresses. This means it has keys in the keypool or can generate new keys */
- bool CanGetAddresses(bool internal = false);
+ bool CanGetAddresses(bool internal = false) const;
/**
* Blocks until the wallet state is up-to-date to /at least/ the current
@@ -1112,7 +1137,7 @@ public:
* Obviously holding cs_main/cs_wallet when going into this call may cause
* deadlock
*/
- void BlockUntilSyncedToCurrentChain() LOCKS_EXCLUDED(cs_main, cs_wallet);
+ void BlockUntilSyncedToCurrentChain() const LOCKS_EXCLUDED(cs_main, cs_wallet);
/** set a single wallet flag */
void SetWalletFlag(uint64_t flags);
@@ -1153,9 +1178,12 @@ public:
//! Get the ScriptPubKeyMan by id
ScriptPubKeyMan* GetScriptPubKeyMan(const uint256& id) const;
+ //! Get all of the ScriptPubKeyMans for a script given additional information in sigdata (populated by e.g. a psbt)
+ std::set<ScriptPubKeyMan*> GetScriptPubKeyMans(const CScript& script, SignatureData& sigdata) const;
+
//! Get the SigningProvider for a script
- std::unique_ptr<SigningProvider> GetSigningProvider(const CScript& script) const;
- std::unique_ptr<SigningProvider> GetSigningProvider(const CScript& script, SignatureData& sigdata) const;
+ std::unique_ptr<SigningProvider> GetSolvingProvider(const CScript& script) const;
+ std::unique_ptr<SigningProvider> GetSolvingProvider(const CScript& script, SignatureData& sigdata) const;
//! Get the LegacyScriptPubKeyMan which is used for all types, internal, and external.
LegacyScriptPubKeyMan* GetLegacyScriptPubKeyMan() const;
diff --git a/src/zmq/zmqnotificationinterface.cpp b/src/zmq/zmqnotificationinterface.cpp
index 0ce14f232e..d55b106e04 100644
--- a/src/zmq/zmqnotificationinterface.cpp
+++ b/src/zmq/zmqnotificationinterface.cpp
@@ -177,7 +177,7 @@ void CZMQNotificationInterface::TransactionAddedToMempool(const CTransactionRef&
}
}
-void CZMQNotificationInterface::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected, const std::vector<CTransactionRef>& vtxConflicted)
+void CZMQNotificationInterface::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected)
{
for (const CTransactionRef& ptx : pblock->vtx) {
// Do a normal notify for each transaction added in the block
diff --git a/src/zmq/zmqnotificationinterface.h b/src/zmq/zmqnotificationinterface.h
index c820865497..60f3b6148a 100644
--- a/src/zmq/zmqnotificationinterface.h
+++ b/src/zmq/zmqnotificationinterface.h
@@ -26,7 +26,7 @@ protected:
// CValidationInterface
void TransactionAddedToMempool(const CTransactionRef& tx) override;
- void BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected, const std::vector<CTransactionRef>& vtxConflicted) override;
+ void BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override;
void BlockDisconnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexDisconnected) override;
void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override;
diff --git a/src/zmq/zmqrpc.cpp b/src/zmq/zmqrpc.cpp
index 5652877f3c..0fbefb6023 100644
--- a/src/zmq/zmqrpc.cpp
+++ b/src/zmq/zmqrpc.cpp
@@ -19,14 +19,15 @@ UniValue getzmqnotifications(const JSONRPCRequest& request)
"\nReturns information about the active ZeroMQ notifications.\n",
{},
RPCResult{
- "[\n"
- " { (json object)\n"
- " \"type\": \"pubhashtx\", (string) Type of notification\n"
- " \"address\": \"...\", (string) Address of the publisher\n"
- " \"hwm\": n (numeric) Outbound message high water mark\n"
- " },\n"
- " ...\n"
- "]\n"
+ RPCResult::Type::ARR, "", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "type", "Type of notification"},
+ {RPCResult::Type::STR, "address", "Address of the publisher"},
+ {RPCResult::Type::NUM, "hwm", "Outbound message high water mark"},
+ }},
+ }
},
RPCExamples{
HelpExampleCli("getzmqnotifications", "")
diff --git a/test/README.md b/test/README.md
index c3e4ae9ad2..e1dab92a06 100644
--- a/test/README.md
+++ b/test/README.md
@@ -145,7 +145,7 @@ levels using the logger included in the test_framework, e.g.
`test_framework.log` and no logs are output to the console.
- when run directly, *all* logs are written to `test_framework.log` and INFO
level and above are output to the console.
-- when run on Travis, no logs are output to the console. However, if a test
+- when run by [our CI (Continuous Integration)](/ci/README.md), no logs are output to the console. However, if a test
fails, the `test_framework.log` and bitcoind `debug.log`s will all be dumped
to the console to help troubleshooting.
diff --git a/test/functional/README.md b/test/functional/README.md
index 77a9ce9acb..004e0afb1d 100644
--- a/test/functional/README.md
+++ b/test/functional/README.md
@@ -51,10 +51,13 @@ don't have test cases for.
#### General test-writing advice
+- Instead of inline comments or no test documentation at all, log the comments to the test log, e.g.
+ `self.log.info('Create enough transactions to fill a block')`. Logs make the test code easier to read and the test
+ logic easier [to debug](/test/README.md#test-logging).
- Set `self.num_nodes` to the minimum number of nodes necessary for the test.
Having additional unrequired nodes adds to the execution time of the test as
well as memory/CPU/disk requirements (which is important when running tests in
- parallel or on Travis).
+ parallel).
- Avoid stop-starting the nodes multiple times during the test if possible. A
stop-start takes several seconds, so doing it several times blows up the
runtime of the test.
@@ -131,9 +134,6 @@ Utilities for manipulating transaction scripts (originally from python-bitcoinli
#### [key.py](test_framework/key.py)
Test-only secp256k1 elliptic curve implementation
-#### [bignum.py](test_framework/bignum.py)
-Helpers for script.py
-
#### [blocktools.py](test_framework/blocktools.py)
Helper functions for creating blocks and transactions.
diff --git a/test/functional/data/invalid_txs.py b/test/functional/data/invalid_txs.py
index 99c88bbcc0..ce14998fd1 100644
--- a/test/functional/data/invalid_txs.py
+++ b/test/functional/data/invalid_txs.py
@@ -21,7 +21,13 @@ Invalid tx cases not covered here can be found by running:
"""
import abc
-from test_framework.messages import CTransaction, CTxIn, CTxOut, COutPoint
+from test_framework.messages import (
+ COutPoint,
+ CTransaction,
+ CTxIn,
+ CTxOut,
+ MAX_MONEY,
+)
from test_framework import script as sc
from test_framework.blocktools import create_tx_with_script, MAX_BLOCK_SIGOPS
from test_framework.script import (
@@ -166,7 +172,7 @@ class SpendTooMuch(BadTxTemplate):
self.spend_tx, 0, script_pub_key=basic_p2sh, amount=(self.spend_avail + 1))
-class SpendNegative(BadTxTemplate):
+class CreateNegative(BadTxTemplate):
reject_reason = 'bad-txns-vout-negative'
expect_disconnect = True
@@ -174,6 +180,25 @@ class SpendNegative(BadTxTemplate):
return create_tx_with_script(self.spend_tx, 0, amount=-1)
+class CreateTooLarge(BadTxTemplate):
+ reject_reason = 'bad-txns-vout-toolarge'
+ expect_disconnect = True
+
+ def get_tx(self):
+ return create_tx_with_script(self.spend_tx, 0, amount=MAX_MONEY + 1)
+
+
+class CreateSumTooLarge(BadTxTemplate):
+ reject_reason = 'bad-txns-txouttotal-toolarge'
+ expect_disconnect = True
+
+ def get_tx(self):
+ tx = create_tx_with_script(self.spend_tx, 0, amount=MAX_MONEY)
+ tx.vout = [tx.vout[0]] * 2
+ tx.calc_sha256()
+ return tx
+
+
class InvalidOPIFConstruction(BadTxTemplate):
reject_reason = "mandatory-script-verify-flag-failed (Invalid OP_IF construction)"
expect_disconnect = True
@@ -237,4 +262,3 @@ DisabledOpcodeTemplates = [getDisabledOpcodeTemplate(opcode) for opcode in [
def iter_all_templates():
"""Iterate through all bad transaction template types."""
return BadTxTemplate.__subclasses__()
-
diff --git a/test/functional/feature_abortnode.py b/test/functional/feature_abortnode.py
index 9b878e8bf8..e47e709431 100755
--- a/test/functional/feature_abortnode.py
+++ b/test/functional/feature_abortnode.py
@@ -14,11 +14,12 @@ from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import wait_until, get_datadir_path, connect_nodes
import os
-class AbortNodeTest(BitcoinTestFramework):
+class AbortNodeTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
+ self.rpc_timeout = 240
def setup_network(self):
self.setup_nodes()
@@ -44,5 +45,6 @@ class AbortNodeTest(BitcoinTestFramework):
self.log.info("Node crashed - now verifying restart fails")
self.nodes[0].assert_start_raises_init_error()
+
if __name__ == '__main__':
AbortNodeTest().main()
diff --git a/test/functional/feature_asmap.py b/test/functional/feature_asmap.py
new file mode 100755
index 0000000000..2c6553fbe2
--- /dev/null
+++ b/test/functional/feature_asmap.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python3
+# Copyright (c) 2020 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test asmap config argument for ASN-based IP bucketing.
+
+Verify node behaviour and debug log when launching bitcoind in these cases:
+
+1. `bitcoind` with no -asmap arg, using /16 prefix for IP bucketing
+
+2. `bitcoind -asmap=<absolute path>`, using the unit test skeleton asmap
+
+3. `bitcoind -asmap=<relative path>`, using the unit test skeleton asmap
+
+4. `bitcoind -asmap/-asmap=` with no file specified, using the default asmap
+
+5. `bitcoind -asmap` with no file specified and a missing default asmap file
+
+6. `bitcoind -asmap` with an empty (unparsable) default asmap file
+
+The tests are order-independent.
+
+"""
+import os
+import shutil
+
+from test_framework.test_framework import BitcoinTestFramework
+
+DEFAULT_ASMAP_FILENAME = 'ip_asn.map' # defined in src/init.cpp
+ASMAP = '../../src/test/data/asmap.raw' # path to unit test skeleton asmap
+VERSION = 'fec61fa21a9f46f3b17bdcd660d7f4cd90b966aad3aec593c99b35f0aca15853'
+
+def expected_messages(filename):
+ return ['Opened asmap file "{}" (59 bytes) from disk'.format(filename),
+ 'Using asmap version {} for IP bucketing'.format(VERSION)]
+
+class AsmapTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.setup_clean_chain = False
+ self.num_nodes = 1
+
+ def test_without_asmap_arg(self):
+ self.log.info('Test bitcoind with no -asmap arg passed')
+ self.stop_node(0)
+ with self.node.assert_debug_log(['Using /16 prefix for IP bucketing']):
+ self.start_node(0)
+
+ def test_asmap_with_absolute_path(self):
+ self.log.info('Test bitcoind -asmap=<absolute path>')
+ self.stop_node(0)
+ filename = os.path.join(self.datadir, 'my-map-file.map')
+ shutil.copyfile(self.asmap_raw, filename)
+ with self.node.assert_debug_log(expected_messages(filename)):
+ self.start_node(0, ['-asmap={}'.format(filename)])
+ os.remove(filename)
+
+ def test_asmap_with_relative_path(self):
+ self.log.info('Test bitcoind -asmap=<relative path>')
+ self.stop_node(0)
+ name = 'ASN_map'
+ filename = os.path.join(self.datadir, name)
+ shutil.copyfile(self.asmap_raw, filename)
+ with self.node.assert_debug_log(expected_messages(filename)):
+ self.start_node(0, ['-asmap={}'.format(name)])
+ os.remove(filename)
+
+ def test_default_asmap(self):
+ shutil.copyfile(self.asmap_raw, self.default_asmap)
+ for arg in ['-asmap', '-asmap=']:
+ self.log.info('Test bitcoind {} (using default map file)'.format(arg))
+ self.stop_node(0)
+ with self.node.assert_debug_log(expected_messages(self.default_asmap)):
+ self.start_node(0, [arg])
+ os.remove(self.default_asmap)
+
+ def test_default_asmap_with_missing_file(self):
+ self.log.info('Test bitcoind -asmap with missing default map file')
+ self.stop_node(0)
+ msg = "Error: Could not find asmap file \"{}\"".format(self.default_asmap)
+ self.node.assert_start_raises_init_error(extra_args=['-asmap'], expected_msg=msg)
+
+ def test_empty_asmap(self):
+ self.log.info('Test bitcoind -asmap with empty map file')
+ self.stop_node(0)
+ with open(self.default_asmap, "w", encoding="utf-8") as f:
+ f.write("")
+ msg = "Error: Could not parse asmap file \"{}\"".format(self.default_asmap)
+ self.node.assert_start_raises_init_error(extra_args=['-asmap'], expected_msg=msg)
+ os.remove(self.default_asmap)
+
+ def run_test(self):
+ self.node = self.nodes[0]
+ self.datadir = os.path.join(self.node.datadir, self.chain)
+ self.default_asmap = os.path.join(self.datadir, DEFAULT_ASMAP_FILENAME)
+ self.asmap_raw = os.path.join(os.path.dirname(os.path.realpath(__file__)), ASMAP)
+
+ self.test_without_asmap_arg()
+ self.test_asmap_with_absolute_path()
+ self.test_asmap_with_relative_path()
+ self.test_default_asmap()
+ self.test_default_asmap_with_missing_file()
+ self.test_empty_asmap()
+
+
+if __name__ == '__main__':
+ AsmapTest().main()
diff --git a/test/functional/feature_assumevalid.py b/test/functional/feature_assumevalid.py
index 1b434c4485..ef4d9411c5 100755
--- a/test/functional/feature_assumevalid.py
+++ b/test/functional/feature_assumevalid.py
@@ -47,16 +47,19 @@ from test_framework.script import (CScript, OP_TRUE)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
+
class BaseNode(P2PInterface):
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
+
class AssumeValidTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
+ self.rpc_timeout = 120
def setup_network(self):
self.add_nodes(3)
@@ -187,5 +190,6 @@ class AssumeValidTest(BitcoinTestFramework):
self.send_blocks_until_disconnected(p2p2)
self.assert_blockchain_height(self.nodes[2], 101)
+
if __name__ == '__main__':
AssumeValidTest().main()
diff --git a/test/functional/feature_backwards_compatibility.py b/test/functional/feature_backwards_compatibility.py
new file mode 100755
index 0000000000..0db74432e2
--- /dev/null
+++ b/test/functional/feature_backwards_compatibility.py
@@ -0,0 +1,347 @@
+#!/usr/bin/env python3
+# Copyright (c) 2018-2019 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Backwards compatibility functional test
+
+Test various backwards compatibility scenarios. Download the previous node binaries:
+
+contrib/devtools/previous_release.sh -b v0.19.0.1 v0.18.1 v0.17.1
+
+Due to RPC changes introduced in various versions the below tests
+won't work for older versions without some patches or workarounds.
+
+Use only the latest patch version of each release, unless a test specifically
+needs an older patch version.
+"""
+
+import os
+import shutil
+
+from test_framework.test_framework import BitcoinTestFramework, SkipTest
+from test_framework.descriptors import descsum_create
+
+from test_framework.util import (
+ assert_equal,
+ sync_blocks,
+ sync_mempools
+)
+
+class BackwardsCompatibilityTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.setup_clean_chain = True
+ self.num_nodes = 5
+ # Add new version after each release:
+ self.extra_args = [
+ ["-addresstype=bech32"], # Pre-release: use to mine blocks
+ ["-nowallet", "-walletrbf=1", "-addresstype=bech32"], # Pre-release: use to receive coins, swap wallets, etc
+ ["-nowallet", "-walletrbf=1", "-addresstype=bech32"], # v0.19.0.1
+ ["-nowallet", "-walletrbf=1", "-addresstype=bech32"], # v0.18.1
+ ["-nowallet", "-walletrbf=1", "-addresstype=bech32"] # v0.17.1
+ ]
+
+ def setup_nodes(self):
+ if os.getenv("TEST_PREVIOUS_RELEASES") == "false":
+ raise SkipTest("backwards compatibility tests")
+
+ releases_path = os.getenv("PREVIOUS_RELEASES_DIR") or os.getcwd() + "/releases"
+ if not os.path.isdir(releases_path):
+ if os.getenv("TEST_PREVIOUS_RELEASES") == "true":
+ raise AssertionError("TEST_PREVIOUS_RELEASES=1 but releases missing: " + releases_path)
+ raise SkipTest("This test requires binaries for previous releases")
+
+ self.add_nodes(self.num_nodes, extra_args=self.extra_args, versions=[
+ None,
+ None,
+ 190000,
+ 180100,
+ 170100
+ ], binary=[
+ self.options.bitcoind,
+ self.options.bitcoind,
+ releases_path + "/v0.19.0.1/bin/bitcoind",
+ releases_path + "/v0.18.1/bin/bitcoind",
+ releases_path + "/v0.17.1/bin/bitcoind"
+ ], binary_cli=[
+ self.options.bitcoincli,
+ self.options.bitcoincli,
+ releases_path + "/v0.19.0.1/bin/bitcoin-cli",
+ releases_path + "/v0.18.1/bin/bitcoin-cli",
+ releases_path + "/v0.17.1/bin/bitcoin-cli"
+ ])
+
+ self.start_nodes()
+
+ def run_test(self):
+ self.nodes[0].generatetoaddress(101, self.nodes[0].getnewaddress())
+
+ sync_blocks(self.nodes)
+
+ # Sanity check the test framework:
+ res = self.nodes[self.num_nodes - 1].getblockchaininfo()
+ assert_equal(res['blocks'], 101)
+
+ node_master = self.nodes[self.num_nodes - 4]
+ node_v19 = self.nodes[self.num_nodes - 3]
+ node_v18 = self.nodes[self.num_nodes - 2]
+ node_v17 = self.nodes[self.num_nodes - 1]
+
+ self.log.info("Test wallet backwards compatibility...")
+ # Create a number of wallets and open them in older versions:
+
+ # w1: regular wallet, created on master: update this test when default
+ # wallets can no longer be opened by older versions.
+ node_master.createwallet(wallet_name="w1")
+ wallet = node_master.get_wallet_rpc("w1")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled']
+ assert info['keypoolsize'] > 0
+ # Create a confirmed transaction, receiving coins
+ address = wallet.getnewaddress()
+ self.nodes[0].sendtoaddress(address, 10)
+ sync_mempools(self.nodes)
+ self.nodes[0].generate(1)
+ sync_blocks(self.nodes)
+ # Create a conflicting transaction using RBF
+ return_address = self.nodes[0].getnewaddress()
+ tx1_id = self.nodes[1].sendtoaddress(return_address, 1)
+ tx2_id = self.nodes[1].bumpfee(tx1_id)["txid"]
+ # Confirm the transaction
+ sync_mempools(self.nodes)
+ self.nodes[0].generate(1)
+ sync_blocks(self.nodes)
+ # Create another conflicting transaction using RBF
+ tx3_id = self.nodes[1].sendtoaddress(return_address, 1)
+ tx4_id = self.nodes[1].bumpfee(tx3_id)["txid"]
+ # Abandon transaction, but don't confirm
+ self.nodes[1].abandontransaction(tx3_id)
+
+ # w1_v19: regular wallet, created with v0.19
+ node_v19.createwallet(wallet_name="w1_v19")
+ wallet = node_v19.get_wallet_rpc("w1_v19")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled']
+ assert info['keypoolsize'] > 0
+ # Use addmultisigaddress (see #18075)
+ address_18075 = wallet.addmultisigaddress(1, ["0296b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52", "037211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073"], "", "legacy")["address"]
+ assert wallet.getaddressinfo(address_18075)["solvable"]
+
+ # w1_v18: regular wallet, created with v0.18
+ node_v18.createwallet(wallet_name="w1_v18")
+ wallet = node_v18.get_wallet_rpc("w1_v18")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled']
+ assert info['keypoolsize'] > 0
+
+ # w2: wallet with private keys disabled, created on master: update this
+ # test when default wallets private keys disabled can no longer be
+ # opened by older versions.
+ node_master.createwallet(wallet_name="w2", disable_private_keys=True)
+ wallet = node_master.get_wallet_rpc("w2")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled'] == False
+ assert info['keypoolsize'] == 0
+
+ # w2_v19: wallet with private keys disabled, created with v0.19
+ node_v19.createwallet(wallet_name="w2_v19", disable_private_keys=True)
+ wallet = node_v19.get_wallet_rpc("w2_v19")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled'] == False
+ assert info['keypoolsize'] == 0
+
+ # w2_v18: wallet with private keys disabled, created with v0.18
+ node_v18.createwallet(wallet_name="w2_v18", disable_private_keys=True)
+ wallet = node_v18.get_wallet_rpc("w2_v18")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled'] == False
+ assert info['keypoolsize'] == 0
+
+ # w3: blank wallet, created on master: update this
+ # test when default blank wallets can no longer be opened by older versions.
+ node_master.createwallet(wallet_name="w3", blank=True)
+ wallet = node_master.get_wallet_rpc("w3")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled']
+ assert info['keypoolsize'] == 0
+
+ # w3_v19: blank wallet, created with v0.19
+ node_v19.createwallet(wallet_name="w3_v19", blank=True)
+ wallet = node_v19.get_wallet_rpc("w3_v19")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled']
+ assert info['keypoolsize'] == 0
+
+ # w3_v18: blank wallet, created with v0.18
+ node_v18.createwallet(wallet_name="w3_v18", blank=True)
+ wallet = node_v18.get_wallet_rpc("w3_v18")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled']
+ assert info['keypoolsize'] == 0
+
+ # Copy the wallets to older nodes:
+ node_master_wallets_dir = os.path.join(node_master.datadir, "regtest/wallets")
+ node_v19_wallets_dir = os.path.join(node_v19.datadir, "regtest/wallets")
+ node_v18_wallets_dir = os.path.join(node_v18.datadir, "regtest/wallets")
+ node_v17_wallets_dir = os.path.join(node_v17.datadir, "regtest/wallets")
+ node_master.unloadwallet("w1")
+ node_master.unloadwallet("w2")
+ node_v19.unloadwallet("w1_v19")
+ node_v19.unloadwallet("w2_v19")
+ node_v18.unloadwallet("w1_v18")
+ node_v18.unloadwallet("w2_v18")
+
+ # Copy wallets to v0.17
+ for wallet in os.listdir(node_master_wallets_dir):
+ shutil.copytree(
+ os.path.join(node_master_wallets_dir, wallet),
+ os.path.join(node_v17_wallets_dir, wallet)
+ )
+ for wallet in os.listdir(node_v18_wallets_dir):
+ shutil.copytree(
+ os.path.join(node_v18_wallets_dir, wallet),
+ os.path.join(node_v17_wallets_dir, wallet)
+ )
+
+ # Copy wallets to v0.18
+ for wallet in os.listdir(node_master_wallets_dir):
+ shutil.copytree(
+ os.path.join(node_master_wallets_dir, wallet),
+ os.path.join(node_v18_wallets_dir, wallet)
+ )
+
+ # Copy wallets to v0.19
+ for wallet in os.listdir(node_master_wallets_dir):
+ shutil.copytree(
+ os.path.join(node_master_wallets_dir, wallet),
+ os.path.join(node_v19_wallets_dir, wallet)
+ )
+
+ # Open the wallets in v0.19
+ node_v19.loadwallet("w1")
+ wallet = node_v19.get_wallet_rpc("w1")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled']
+ assert info['keypoolsize'] > 0
+ txs = wallet.listtransactions()
+ assert_equal(len(txs), 5)
+ assert_equal(txs[1]["txid"], tx1_id)
+ assert_equal(txs[2]["walletconflicts"], [tx1_id])
+ assert_equal(txs[1]["replaced_by_txid"], tx2_id)
+ assert not(txs[1]["abandoned"])
+ assert_equal(txs[1]["confirmations"], -1)
+ assert_equal(txs[2]["blockindex"], 1)
+ assert txs[3]["abandoned"]
+ assert_equal(txs[4]["walletconflicts"], [tx3_id])
+ assert_equal(txs[3]["replaced_by_txid"], tx4_id)
+ assert not(hasattr(txs[3], "blockindex"))
+
+ node_v19.loadwallet("w2")
+ wallet = node_v19.get_wallet_rpc("w2")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled'] == False
+ assert info['keypoolsize'] == 0
+
+ node_v19.loadwallet("w3")
+ wallet = node_v19.get_wallet_rpc("w3")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled']
+ assert info['keypoolsize'] == 0
+
+ # Open the wallets in v0.18
+ node_v18.loadwallet("w1")
+ wallet = node_v18.get_wallet_rpc("w1")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled']
+ assert info['keypoolsize'] > 0
+ txs = wallet.listtransactions()
+ assert_equal(len(txs), 5)
+ assert_equal(txs[1]["txid"], tx1_id)
+ assert_equal(txs[2]["walletconflicts"], [tx1_id])
+ assert_equal(txs[1]["replaced_by_txid"], tx2_id)
+ assert not(txs[1]["abandoned"])
+ assert_equal(txs[1]["confirmations"], -1)
+ assert_equal(txs[2]["blockindex"], 1)
+ assert txs[3]["abandoned"]
+ assert_equal(txs[4]["walletconflicts"], [tx3_id])
+ assert_equal(txs[3]["replaced_by_txid"], tx4_id)
+ assert not(hasattr(txs[3], "blockindex"))
+
+ node_v18.loadwallet("w2")
+ wallet = node_v18.get_wallet_rpc("w2")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled'] == False
+ assert info['keypoolsize'] == 0
+
+ node_v18.loadwallet("w3")
+ wallet = node_v18.get_wallet_rpc("w3")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled']
+ assert info['keypoolsize'] == 0
+
+ # Open the wallets in v0.17
+ node_v17.loadwallet("w1_v18")
+ wallet = node_v17.get_wallet_rpc("w1_v18")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled']
+ assert info['keypoolsize'] > 0
+
+ node_v17.loadwallet("w1")
+ wallet = node_v17.get_wallet_rpc("w1")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled']
+ assert info['keypoolsize'] > 0
+
+ node_v17.loadwallet("w2_v18")
+ wallet = node_v17.get_wallet_rpc("w2_v18")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled'] == False
+ assert info['keypoolsize'] == 0
+
+ node_v17.loadwallet("w2")
+ wallet = node_v17.get_wallet_rpc("w2")
+ info = wallet.getwalletinfo()
+ assert info['private_keys_enabled'] == False
+ assert info['keypoolsize'] == 0
+
+ # RPC loadwallet failure causes bitcoind to exit, in addition to the RPC
+ # call failure, so the following test won't work:
+ # assert_raises_rpc_error(-4, "Wallet loading failed.", node_v17.loadwallet, 'w3_v18')
+
+ # Instead, we stop node and try to launch it with the wallet:
+ self.stop_node(self.num_nodes - 1)
+ node_v17.assert_start_raises_init_error(["-wallet=w3_v18"], "Error: Error loading w3_v18: Wallet requires newer version of Bitcoin Core")
+ node_v17.assert_start_raises_init_error(["-wallet=w3"], "Error: Error loading w3: Wallet requires newer version of Bitcoin Core")
+ self.start_node(self.num_nodes - 1)
+
+ self.log.info("Test wallet upgrade path...")
+ # u1: regular wallet, created with v0.17
+ node_v17.createwallet(wallet_name="u1_v17")
+ wallet = node_v17.get_wallet_rpc("u1_v17")
+ address = wallet.getnewaddress("bech32")
+ info = wallet.getaddressinfo(address)
+ hdkeypath = info["hdkeypath"]
+ pubkey = info["pubkey"]
+
+ # Copy the 0.17 wallet to the last Bitcoin Core version and open it:
+ node_v17.unloadwallet("u1_v17")
+ shutil.copytree(
+ os.path.join(node_v17_wallets_dir, "u1_v17"),
+ os.path.join(node_master_wallets_dir, "u1_v17")
+ )
+ node_master.loadwallet("u1_v17")
+ wallet = node_master.get_wallet_rpc("u1_v17")
+ info = wallet.getaddressinfo(address)
+ descriptor = "wpkh([" + info["hdmasterfingerprint"] + hdkeypath[1:] + "]" + pubkey + ")"
+ assert_equal(info["desc"], descsum_create(descriptor))
+
+ # Copy the 0.19 wallet to the last Bitcoin Core version and open it:
+ shutil.copytree(
+ os.path.join(node_v19_wallets_dir, "w1_v19"),
+ os.path.join(node_master_wallets_dir, "w1_v19")
+ )
+ node_master.loadwallet("w1_v19")
+ wallet = node_master.get_wallet_rpc("w1_v19")
+ assert wallet.getaddressinfo(address_18075)["solvable"]
+
+if __name__ == '__main__':
+ BackwardsCompatibilityTest().main()
diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py
index 95905f477b..0c591de869 100755
--- a/test/functional/feature_block.py
+++ b/test/functional/feature_block.py
@@ -630,17 +630,19 @@ class FullBlockTest(BitcoinTestFramework):
self.log.info("Reject a block with invalid work")
self.move_tip(44)
- b47 = self.next_block(47, solve=False)
+ b47 = self.next_block(47)
target = uint256_from_compact(b47.nBits)
- while b47.sha256 < target:
+ while b47.sha256 <= target:
+ # Rehash nonces until an invalid too-high-hash block is found.
b47.nNonce += 1
b47.rehash()
self.send_blocks([b47], False, force_send=True, reject_reason='high-hash', reconnect=True)
self.log.info("Reject a block with a timestamp >2 hours in the future")
self.move_tip(44)
- b48 = self.next_block(48, solve=False)
+ b48 = self.next_block(48)
b48.nTime = int(time.time()) + 60 * 60 * 3
+ # Header timestamp has changed. Re-solve the block.
b48.solve()
self.send_blocks([b48], False, force_send=True, reject_reason='time-too-new')
@@ -1261,7 +1263,7 @@ class FullBlockTest(BitcoinTestFramework):
self.save_spendable_output()
spend = self.get_spendable_output()
- self.send_blocks(blocks, True, timeout=960)
+ self.send_blocks(blocks, True, timeout=2440)
chain1_tip = i
# now create alt chain of same length
@@ -1273,14 +1275,14 @@ class FullBlockTest(BitcoinTestFramework):
# extend alt chain to trigger re-org
block = self.next_block("alt" + str(chain1_tip + 1), version=4)
- self.send_blocks([block], True, timeout=960)
+ self.send_blocks([block], True, timeout=2440)
# ... and re-org back to the first chain
self.move_tip(chain1_tip)
block = self.next_block(chain1_tip + 1, version=4)
self.send_blocks([block], False, force_send=True)
block = self.next_block(chain1_tip + 2, version=4)
- self.send_blocks([block], True, timeout=960)
+ self.send_blocks([block], True, timeout=2440)
self.log.info("Reject a block with an invalid block header version")
b_v1 = self.next_block('b_v1', version=1)
@@ -1321,7 +1323,7 @@ class FullBlockTest(BitcoinTestFramework):
tx.rehash()
return tx
- def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True, *, version=1):
+ def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), *, version=1):
if self.tip is None:
base_block_hash = self.genesis_hash
block_time = int(time.time()) + 1
@@ -1343,8 +1345,8 @@ class FullBlockTest(BitcoinTestFramework):
self.sign_tx(tx, spend)
self.add_transactions_to_block(block, [tx])
block.hashMerkleRoot = block.calc_merkle_root()
- if solve:
- block.solve()
+ # Block is created. Find a valid nonce.
+ block.solve()
self.tip = block
self.block_heights[block.sha256] = height
assert number not in self.blocks
diff --git a/test/functional/feature_cltv.py b/test/functional/feature_cltv.py
index 2c6f2e733b..073ed8d7c7 100755
--- a/test/functional/feature_cltv.py
+++ b/test/functional/feature_cltv.py
@@ -55,12 +55,12 @@ class BIP65Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[
- '-whitelist=127.0.0.1',
+ '-whitelist=noban@127.0.0.1',
'-par=1', # Use only one script thread to get the exact reject reason for testing
'-acceptnonstdtxn=1', # cltv_invalidate is nonstandard
]]
self.setup_clean_chain = True
- self.rpc_timeout = 120
+ self.rpc_timeout = 480
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
diff --git a/test/functional/feature_csv_activation.py b/test/functional/feature_csv_activation.py
index c2b4de54f2..a98480a6dd 100755
--- a/test/functional/feature_csv_activation.py
+++ b/test/functional/feature_csv_activation.py
@@ -35,6 +35,7 @@ bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evalu
bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112tx_special - test negative argument to OP_CSV
+bip112tx_emptystack - test empty stack (= no argument) OP_CSV
"""
from decimal import Decimal
from itertools import product
@@ -56,6 +57,8 @@ from test_framework.util import (
softfork_active,
)
+TESTING_TX_COUNT = 83 # Number of testing transactions: 1 BIP113 tx, 16 BIP68 txs, 66 BIP112 txs (see comments above)
+COINBASE_BLOCK_COUNT = TESTING_TX_COUNT # Number of coinbase blocks we need to generate as inputs for our txs
BASE_RELATIVE_LOCKTIME = 10
CSV_ACTIVATION_HEIGHT = 432
SEQ_DISABLE_FLAG = 1 << 31
@@ -95,6 +98,13 @@ def create_bip112special(node, input, txversion, address):
signtx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
return signtx
+def create_bip112emptystack(node, input, txversion, address):
+ tx = create_transaction(node, input, address, amount=Decimal("49.98"))
+ tx.nVersion = txversion
+ signtx = sign_transaction(node, tx)
+ signtx.vin[0].scriptSig = CScript([OP_CHECKSEQUENCEVERIFY] + list(CScript(signtx.vin[0].scriptSig)))
+ return signtx
+
def send_generic_input_tx(node, coinbases, address):
return node.sendrawtransaction(ToHex(sign_transaction(node, create_transaction(node, node.getblock(coinbases.pop())['tx'][0], address, amount=Decimal("49.99")))))
@@ -138,7 +148,12 @@ class BIP68_112_113Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
- self.extra_args = [['-whitelist=127.0.0.1', '-blockversion=4', '-addresstype=legacy']]
+ self.extra_args = [[
+ '-whitelist=noban@127.0.0.1',
+ '-blockversion=4',
+ '-addresstype=legacy',
+ '-par=1', # Use only one script thread to get the exact reject reason for testing
+ ]]
self.supports_cli = False
def skip_test_if_missing_module(self):
@@ -163,11 +178,11 @@ class BIP68_112_113Test(BitcoinTestFramework):
block.solve()
return block
- def send_blocks(self, blocks, success=True):
+ def send_blocks(self, blocks, success=True, reject_reason=None):
"""Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block.
Call with success = False if the tip shouldn't advance to the most recent block."""
- self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success)
+ self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_reason=reject_reason)
def run_test(self):
self.nodes[0].add_p2p_connection(P2PDataStore())
@@ -175,15 +190,16 @@ class BIP68_112_113Test(BitcoinTestFramework):
self.log.info("Generate blocks in the past for coinbase outputs.")
long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future
self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time
- self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2 * 32 + 1) # 82 blocks generated for inputs
+ self.coinbase_blocks = self.nodes[0].generate(COINBASE_BLOCK_COUNT) # blocks generated for inputs
self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time
- self.tipheight = 82 # height of the next block to build
+ self.tipheight = COINBASE_BLOCK_COUNT # height of the next block to build
self.last_block_time = long_past_time
self.tip = int(self.nodes[0].getbestblockhash(), 16)
self.nodeaddress = self.nodes[0].getnewaddress()
# Activation height is hardcoded
- test_blocks = self.generate_blocks(345)
+ # We advance to block height five below BIP112 activation for the following tests
+ test_blocks = self.generate_blocks(CSV_ACTIVATION_HEIGHT-5 - COINBASE_BLOCK_COUNT)
self.send_blocks(test_blocks)
assert not softfork_active(self.nodes[0], 'csv')
@@ -214,6 +230,8 @@ class BIP68_112_113Test(BitcoinTestFramework):
# 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112specialinput = send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)
+ # 1 special input with (empty stack) OP_CSV (actually will be prepended to spending scriptSig)
+ bip112emptystackinput = send_generic_input_tx(self.nodes[0],self.coinbase_blocks, self.nodeaddress)
# 1 normal input
bip113input = send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)
@@ -224,7 +242,7 @@ class BIP68_112_113Test(BitcoinTestFramework):
self.tip = int(inputblockhash, 16)
self.tipheight += 1
self.last_block_time += 600
- assert_equal(len(self.nodes[0].getblock(inputblockhash, True)["tx"]), 82 + 1)
+ assert_equal(len(self.nodes[0].getblock(inputblockhash, True)["tx"]), TESTING_TX_COUNT + 1)
# 2 more version 4 blocks
test_blocks = self.generate_blocks(2)
@@ -263,6 +281,9 @@ class BIP68_112_113Test(BitcoinTestFramework):
# -1 OP_CSV OP_DROP input
bip112tx_special_v1 = create_bip112special(self.nodes[0], bip112specialinput, 1, self.nodeaddress)
bip112tx_special_v2 = create_bip112special(self.nodes[0], bip112specialinput, 2, self.nodeaddress)
+ # (empty stack) OP_CSV input
+ bip112tx_emptystack_v1 = create_bip112emptystack(self.nodes[0], bip112emptystackinput, 1, self.nodeaddress)
+ bip112tx_emptystack_v2 = create_bip112emptystack(self.nodes[0], bip112emptystackinput, 2, self.nodeaddress)
self.log.info("TESTING")
@@ -270,11 +291,12 @@ class BIP68_112_113Test(BitcoinTestFramework):
self.log.info("Test version 1 txs")
success_txs = []
- # add BIP113 tx and -1 CSV tx
+ # BIP113 tx, -1 CSV tx and empty stack CSV tx should succeed
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1)
success_txs.append(bip113signed1)
success_txs.append(bip112tx_special_v1)
+ success_txs.append(bip112tx_emptystack_v1)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v1))
# add BIP 112 with seq=10 txs
@@ -289,11 +311,12 @@ class BIP68_112_113Test(BitcoinTestFramework):
self.log.info("Test version 2 txs")
success_txs = []
- # add BIP113 tx and -1 CSV tx
+ # BIP113 tx, -1 CSV tx and empty stack CSV tx should succeed
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2)
success_txs.append(bip113signed2)
success_txs.append(bip112tx_special_v2)
+ success_txs.append(bip112tx_emptystack_v2)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v2))
# add BIP 112 with seq=10 txs
@@ -320,7 +343,7 @@ class BIP68_112_113Test(BitcoinTestFramework):
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
- self.send_blocks([self.create_test_block([bip113tx])], success=False)
+ self.send_blocks([self.create_test_block([bip113tx])], success=False, reject_reason='bad-txns-nonfinal')
# BIP 113 tests should now pass if the locktime is < MTP
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1)
@@ -352,11 +375,11 @@ class BIP68_112_113Test(BitcoinTestFramework):
# All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512
bip68timetxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and tx['stf']]
for tx in bip68timetxs:
- self.send_blocks([self.create_test_block([tx])], success=False)
+ self.send_blocks([self.create_test_block([tx])], success=False, reject_reason='bad-txns-nonfinal')
bip68heighttxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and not tx['stf']]
for tx in bip68heighttxs:
- self.send_blocks([self.create_test_block([tx])], success=False)
+ self.send_blocks([self.create_test_block([tx])], success=False, reject_reason='bad-txns-nonfinal')
# Advance one block to 438
test_blocks = self.generate_blocks(1)
@@ -367,7 +390,7 @@ class BIP68_112_113Test(BitcoinTestFramework):
self.send_blocks([self.create_test_block(bip68success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
for tx in bip68heighttxs:
- self.send_blocks([self.create_test_block([tx])], success=False)
+ self.send_blocks([self.create_test_block([tx])], success=False, reject_reason='bad-txns-nonfinal')
# Advance one block to 439
test_blocks = self.generate_blocks(1)
@@ -381,8 +404,11 @@ class BIP68_112_113Test(BitcoinTestFramework):
self.log.info("BIP 112 tests")
self.log.info("Test version 1 txs")
- # -1 OP_CSV tx should fail
- self.send_blocks([self.create_test_block([bip112tx_special_v1])], success=False)
+ # -1 OP_CSV tx and (empty stack) OP_CSV tx should fail
+ self.send_blocks([self.create_test_block([bip112tx_special_v1])], success=False,
+ reject_reason='non-mandatory-script-verify-flag (Negative locktime)')
+ self.send_blocks([self.create_test_block([bip112tx_emptystack_v1])], success=False,
+ reject_reason='non-mandatory-script-verify-flag (Operation not valid with the current stack size)')
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass
success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v1 if tx['sdf']]
@@ -393,15 +419,19 @@ class BIP68_112_113Test(BitcoinTestFramework):
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail
fail_txs = all_rlt_txs(bip112txs_vary_nSequence_v1)
fail_txs += all_rlt_txs(bip112txs_vary_nSequence_9_v1)
- fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']]
+ fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v1 if not tx['sdf']]
fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']]
for tx in fail_txs:
- self.send_blocks([self.create_test_block([tx])], success=False)
+ self.send_blocks([self.create_test_block([tx])], success=False,
+ reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)')
self.log.info("Test version 2 txs")
- # -1 OP_CSV tx should fail
- self.send_blocks([self.create_test_block([bip112tx_special_v2])], success=False)
+ # -1 OP_CSV tx and (empty stack) OP_CSV tx should fail
+ self.send_blocks([self.create_test_block([bip112tx_special_v2])], success=False,
+ reject_reason='non-mandatory-script-verify-flag (Negative locktime)')
+ self.send_blocks([self.create_test_block([bip112tx_emptystack_v2])], success=False,
+ reject_reason='non-mandatory-script-verify-flag (Operation not valid with the current stack size)')
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met)
success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if tx['sdf']]
@@ -416,18 +446,21 @@ class BIP68_112_113Test(BitcoinTestFramework):
fail_txs = all_rlt_txs(bip112txs_vary_nSequence_9_v2)
fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if not tx['sdf']]
for tx in fail_txs:
- self.send_blocks([self.create_test_block([tx])], success=False)
+ self.send_blocks([self.create_test_block([tx])], success=False,
+ reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)')
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail
fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if tx['sdf']]
for tx in fail_txs:
- self.send_blocks([self.create_test_block([tx])], success=False)
+ self.send_blocks([self.create_test_block([tx])], success=False,
+ reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)')
# If sequencelock types mismatch, tx should fail
fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and tx['stf']]
fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']]
for tx in fail_txs:
- self.send_blocks([self.create_test_block([tx])], success=False)
+ self.send_blocks([self.create_test_block([tx])], success=False,
+ reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)')
# Remaining txs should pass, just test masking works properly
success_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and not tx['stf']]
@@ -445,7 +478,5 @@ class BIP68_112_113Test(BitcoinTestFramework):
self.send_blocks([self.create_test_block(time_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
- # TODO: Test empty stack fails
-
if __name__ == '__main__':
BIP68_112_113Test().main()
diff --git a/test/functional/feature_dersig.py b/test/functional/feature_dersig.py
index 27da49cf24..38cdf0501e 100755
--- a/test/functional/feature_dersig.py
+++ b/test/functional/feature_dersig.py
@@ -36,13 +36,15 @@ def unDERify(tx):
tx.vin[0].scriptSig = CScript(newscript)
-
class BIP66Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
- self.extra_args = [['-whitelist=127.0.0.1', '-par=1']] # Use only one script thread to get the exact log msg for testing
+ self.extra_args = [[
+ '-whitelist=noban@127.0.0.1',
+ '-par=1', # Use only one script thread to get the exact log msg for testing
+ ]]
self.setup_clean_chain = True
- self.rpc_timeout = 120
+ self.rpc_timeout = 240
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
diff --git a/test/functional/feature_fee_estimation.py b/test/functional/feature_fee_estimation.py
index d2d41b1206..5128485ec0 100755
--- a/test/functional/feature_fee_estimation.py
+++ b/test/functional/feature_fee_estimation.py
@@ -99,8 +99,20 @@ def split_inputs(from_node, txins, txouts, initial_split=False):
txouts.append({"txid": txid, "vout": 0, "amount": half_change})
txouts.append({"txid": txid, "vout": 1, "amount": rem_change})
+def check_raw_estimates(node, fees_seen):
+ """Call estimaterawfee and verify that the estimates meet certain invariants."""
-def check_estimates(node, fees_seen):
+ delta = 1.0e-6 # account for rounding error
+ for i in range(1, 26):
+ for _, e in node.estimaterawfee(i).items():
+ feerate = float(e["feerate"])
+ assert_greater_than(feerate, 0)
+
+ if feerate + delta < min(fees_seen) or feerate - delta > max(fees_seen):
+ raise AssertionError("Estimated fee (%f) out of range (%f,%f)"
+ % (feerate, min(fees_seen), max(fees_seen)))
+
+def check_smart_estimates(node, fees_seen):
"""Call estimatesmartfee and verify that the estimates meet certain invariants."""
delta = 1.0e-6 # account for rounding error
@@ -123,16 +135,19 @@ def check_estimates(node, fees_seen):
else:
assert_greater_than_or_equal(i + 1, e["blocks"])
+def check_estimates(node, fees_seen):
+ check_raw_estimates(node, fees_seen)
+ check_smart_estimates(node, fees_seen)
class EstimateFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
# mine non-standard txs (e.g. txs with "dust" outputs)
- # Force fSendTrickle to true (via whitelist)
+ # Force fSendTrickle to true (via whitelist.noban)
self.extra_args = [
- ["-acceptnonstdtxn", "-whitelist=127.0.0.1"],
- ["-acceptnonstdtxn", "-whitelist=127.0.0.1", "-blockmaxweight=68000"],
- ["-acceptnonstdtxn", "-whitelist=127.0.0.1", "-blockmaxweight=32000"],
+ ["-acceptnonstdtxn", "-whitelist=noban@127.0.0.1"],
+ ["-acceptnonstdtxn", "-whitelist=noban@127.0.0.1", "-blockmaxweight=68000"],
+ ["-acceptnonstdtxn", "-whitelist=noban@127.0.0.1", "-blockmaxweight=32000"],
]
def skip_test_if_missing_module(self):
diff --git a/test/functional/feature_maxuploadtarget.py b/test/functional/feature_maxuploadtarget.py
index 8d200915bd..974388d798 100755
--- a/test/functional/feature_maxuploadtarget.py
+++ b/test/functional/feature_maxuploadtarget.py
@@ -140,10 +140,9 @@ class MaxUploadTest(BitcoinTestFramework):
self.nodes[0].disconnect_p2ps()
- #stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
- self.log.info("Restarting nodes with -whitelist=127.0.0.1")
+ self.log.info("Restarting node 0 with noban permission and 1MB maxuploadtarget")
self.stop_node(0)
- self.start_node(0, ["-whitelist=127.0.0.1", "-maxuploadtarget=1"])
+ self.start_node(0, ["-whitelist=noban@127.0.0.1", "-maxuploadtarget=1"])
# Reconnect to self.nodes[0]
self.nodes[0].add_p2p_connection(TestP2PConn())
diff --git a/test/functional/feature_notifications.py b/test/functional/feature_notifications.py
index da00b773ad..b110a559c0 100755
--- a/test/functional/feature_notifications.py
+++ b/test/functional/feature_notifications.py
@@ -13,6 +13,16 @@ from test_framework.util import (
connect_nodes,
)
+# Linux allow all characters other than \x00
+# Windows disallow control characters (0-31) and /\?%:|"<>
+FILE_CHAR_START = 32 if os.name == 'nt' else 1
+FILE_CHAR_END = 128
+FILE_CHAR_BLACKLIST = '/\\?%*:|"<>' if os.name == 'nt' else '/'
+
+
+def notify_outputname(walletname, txid):
+ return txid if os.name == 'nt' else '{}_{}'.format(walletname, txid)
+
class NotificationsTest(BitcoinTestFramework):
def set_test_params(self):
@@ -20,6 +30,7 @@ class NotificationsTest(BitcoinTestFramework):
self.setup_clean_chain = True
def setup_network(self):
+ self.wallet = ''.join(chr(i) for i in range(FILE_CHAR_START, FILE_CHAR_END) if chr(i) not in FILE_CHAR_BLACKLIST)
self.alertnotify_dir = os.path.join(self.options.tmpdir, "alertnotify")
self.blocknotify_dir = os.path.join(self.options.tmpdir, "blocknotify")
self.walletnotify_dir = os.path.join(self.options.tmpdir, "walletnotify")
@@ -33,7 +44,8 @@ class NotificationsTest(BitcoinTestFramework):
"-blocknotify=echo > {}".format(os.path.join(self.blocknotify_dir, '%s'))],
["-blockversion=211",
"-rescan",
- "-walletnotify=echo > {}".format(os.path.join(self.walletnotify_dir, '%s'))]]
+ "-wallet={}".format(self.wallet),
+ "-walletnotify=echo > {}".format(os.path.join(self.walletnotify_dir, notify_outputname('%w', '%s')))]]
super().setup_network()
def run_test(self):
@@ -53,7 +65,7 @@ class NotificationsTest(BitcoinTestFramework):
wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10)
# directory content should equal the generated transaction hashes
- txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count)))
+ txids_rpc = list(map(lambda t: notify_outputname(self.wallet, t['txid']), self.nodes[1].listtransactions("*", block_count)))
assert_equal(sorted(txids_rpc), sorted(os.listdir(self.walletnotify_dir)))
self.stop_node(1)
for tx_file in os.listdir(self.walletnotify_dir):
@@ -67,7 +79,7 @@ class NotificationsTest(BitcoinTestFramework):
wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10)
# directory content should equal the generated transaction hashes
- txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count)))
+ txids_rpc = list(map(lambda t: notify_outputname(self.wallet, t['txid']), self.nodes[1].listtransactions("*", block_count)))
assert_equal(sorted(txids_rpc), sorted(os.listdir(self.walletnotify_dir)))
# TODO: add test for `-alertnotify` large fork notifications
diff --git a/test/functional/feature_nulldummy.py b/test/functional/feature_nulldummy.py
index aaf56a42d0..9c92ee7f90 100755
--- a/test/functional/feature_nulldummy.py
+++ b/test/functional/feature_nulldummy.py
@@ -41,7 +41,10 @@ class NULLDUMMYTest(BitcoinTestFramework):
self.setup_clean_chain = True
# This script tests NULLDUMMY activation, which is part of the 'segwit' deployment, so we go through
# normal segwit activation here (and don't use the default always-on behaviour).
- self.extra_args = [['-whitelist=127.0.0.1', '-segwitheight=432', '-addresstype=legacy']]
+ self.extra_args = [[
+ '-segwitheight=432',
+ '-addresstype=legacy',
+ ]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
diff --git a/test/functional/feature_segwit.py b/test/functional/feature_segwit.py
index 82c7e55245..909a43c8d9 100755
--- a/test/functional/feature_segwit.py
+++ b/test/functional/feature_segwit.py
@@ -72,6 +72,7 @@ class SegWitTest(BitcoinTestFramework):
"-addresstype=legacy",
],
]
+ self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
diff --git a/test/functional/framework_test_script.py b/test/functional/framework_test_script.py
new file mode 100755
index 0000000000..9d916c0022
--- /dev/null
+++ b/test/functional/framework_test_script.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python3
+# Copyright (c) 2020 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Tests for test_framework.script."""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.script import bn2vch
+from test_framework.util import assert_equal
+
+def test_bn2vch():
+ assert_equal(bn2vch(0), bytes([]))
+ assert_equal(bn2vch(1), bytes([0x01]))
+ assert_equal(bn2vch(-1), bytes([0x81]))
+ assert_equal(bn2vch(0x7F), bytes([0x7F]))
+ assert_equal(bn2vch(-0x7F), bytes([0xFF]))
+ assert_equal(bn2vch(0x80), bytes([0x80, 0x00]))
+ assert_equal(bn2vch(-0x80), bytes([0x80, 0x80]))
+ assert_equal(bn2vch(0xFF), bytes([0xFF, 0x00]))
+ assert_equal(bn2vch(-0xFF), bytes([0xFF, 0x80]))
+ assert_equal(bn2vch(0x100), bytes([0x00, 0x01]))
+ assert_equal(bn2vch(-0x100), bytes([0x00, 0x81]))
+ assert_equal(bn2vch(0x7FFF), bytes([0xFF, 0x7F]))
+ assert_equal(bn2vch(-0x8000), bytes([0x00, 0x80, 0x80]))
+ assert_equal(bn2vch(-0x7FFFFF), bytes([0xFF, 0xFF, 0xFF]))
+ assert_equal(bn2vch(0x80000000), bytes([0x00, 0x00, 0x00, 0x80, 0x00]))
+ assert_equal(bn2vch(-0x80000000), bytes([0x00, 0x00, 0x00, 0x80, 0x80]))
+ assert_equal(bn2vch(0xFFFFFFFF), bytes([0xFF, 0xFF, 0xFF, 0xFF, 0x00]))
+
+ assert_equal(bn2vch(123456789), bytes([0x15, 0xCD, 0x5B, 0x07]))
+ assert_equal(bn2vch(-54321), bytes([0x31, 0xD4, 0x80]))
+
+class FrameworkTestScript(BitcoinTestFramework):
+ def setup_network(self):
+ pass
+
+ def set_test_params(self):
+ self.num_nodes = 0
+
+ def run_test(self):
+ test_bn2vch()
+
+if __name__ == '__main__':
+ FrameworkTestScript().main()
diff --git a/test/functional/mempool_accept.py b/test/functional/mempool_accept.py
index 900cabccda..9ade22a7eb 100755
--- a/test/functional/mempool_accept.py
+++ b/test/functional/mempool_accept.py
@@ -16,6 +16,7 @@ from test_framework.messages import (
CTransaction,
CTxOut,
MAX_BLOCK_BASE_SIZE,
+ MAX_MONEY,
)
from test_framework.script import (
hash160,
@@ -220,7 +221,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework):
# The following two validations prevent overflow of the output amounts (see CVE-2010-5139).
self.log.info('A transaction with too large output value')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
- tx.vout[0].nValue = 21000000 * COIN + 1
+ tx.vout[0].nValue = MAX_MONEY + 1
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-vout-toolarge'}],
rawtxs=[tx.serialize().hex()],
@@ -229,7 +230,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework):
self.log.info('A transaction with too large sum of output values')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout = [tx.vout[0]] * 2
- tx.vout[0].nValue = 21000000 * COIN
+ tx.vout[0].nValue = MAX_MONEY
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-txouttotal-toolarge'}],
rawtxs=[tx.serialize().hex()],
diff --git a/test/functional/mempool_expiry.py b/test/functional/mempool_expiry.py
new file mode 100755
index 0000000000..8b9b7b155a
--- /dev/null
+++ b/test/functional/mempool_expiry.py
@@ -0,0 +1,100 @@
+#!/usr/bin/env python3
+# Copyright (c) 2020 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Tests that a mempool transaction expires after a given timeout and that its
+children are removed as well.
+
+Both the default expiry timeout defined by DEFAULT_MEMPOOL_EXPIRY and a user
+definable expiry timeout via the '-mempoolexpiry=<n>' command line argument
+(<n> is the timeout in hours) are tested.
+"""
+
+from datetime import timedelta
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_equal,
+ assert_raises_rpc_error,
+ find_vout_for_address,
+)
+
+DEFAULT_MEMPOOL_EXPIRY = 336 # hours
+CUSTOM_MEMPOOL_EXPIRY = 10 # hours
+
+
+class MempoolExpiryTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.num_nodes = 1
+
+ def skip_test_if_missing_module(self):
+ self.skip_if_no_wallet()
+
+ def test_transaction_expiry(self, timeout):
+ """Tests that a transaction expires after the expiry timeout and its
+ children are removed as well."""
+ node = self.nodes[0]
+
+ # Send a parent transaction that will expire.
+ parent_address = node.getnewaddress()
+ parent_txid = node.sendtoaddress(parent_address, 1.0)
+
+ # Set the mocktime to the arrival time of the parent transaction.
+ entry_time = node.getmempoolentry(parent_txid)['time']
+ node.setmocktime(entry_time)
+
+ # Create child transaction spending the parent transaction
+ vout = find_vout_for_address(node, parent_txid, parent_address)
+ inputs = [{'txid': parent_txid, 'vout': vout}]
+ outputs = {node.getnewaddress(): 0.99}
+ child_raw = node.createrawtransaction(inputs, outputs)
+ child_signed = node.signrawtransactionwithwallet(child_raw)['hex']
+
+ # Let half of the timeout elapse and broadcast the child transaction.
+ half_expiry_time = entry_time + int(60 * 60 * timeout/2)
+ node.setmocktime(half_expiry_time)
+ child_txid = node.sendrawtransaction(child_signed)
+ self.log.info('Broadcast child transaction after {} hours.'.format(
+ timedelta(seconds=(half_expiry_time-entry_time))))
+
+ # Let most of the timeout elapse and check that the parent tx is still
+ # in the mempool.
+ nearly_expiry_time = entry_time + 60 * 60 * timeout - 5
+ node.setmocktime(nearly_expiry_time)
+ # Expiry of mempool transactions is only checked when a new transaction
+ # is added to the to the mempool.
+ node.sendtoaddress(node.getnewaddress(), 1.0)
+ self.log.info('Test parent tx not expired after {} hours.'.format(
+ timedelta(seconds=(nearly_expiry_time-entry_time))))
+ assert_equal(entry_time, node.getmempoolentry(parent_txid)['time'])
+
+ # Transaction should be evicted from the mempool after the expiry time
+ # has passed.
+ expiry_time = entry_time + 60 * 60 * timeout + 5
+ node.setmocktime(expiry_time)
+ # Expiry of mempool transactions is only checked when a new transaction
+ # is added to the to the mempool.
+ node.sendtoaddress(node.getnewaddress(), 1.0)
+ self.log.info('Test parent tx expiry after {} hours.'.format(
+ timedelta(seconds=(expiry_time-entry_time))))
+ assert_raises_rpc_error(-5, 'Transaction not in mempool',
+ node.getmempoolentry, parent_txid)
+
+ # The child transaction should be removed from the mempool as well.
+ self.log.info('Test child tx is evicted as well.')
+ assert_raises_rpc_error(-5, 'Transaction not in mempool',
+ node.getmempoolentry, child_txid)
+
+ def run_test(self):
+ self.log.info('Test default mempool expiry timeout of %d hours.' %
+ DEFAULT_MEMPOOL_EXPIRY)
+ self.test_transaction_expiry(DEFAULT_MEMPOOL_EXPIRY)
+
+ self.log.info('Test custom mempool expiry timeout of %d hours.' %
+ CUSTOM_MEMPOOL_EXPIRY)
+ self.restart_node(0, ['-mempoolexpiry=%d' % CUSTOM_MEMPOOL_EXPIRY])
+ self.test_transaction_expiry(CUSTOM_MEMPOOL_EXPIRY)
+
+
+if __name__ == '__main__':
+ MempoolExpiryTest().main()
diff --git a/test/functional/mempool_packages.py b/test/functional/mempool_packages.py
index 7014105d88..a07dad18d6 100755
--- a/test/functional/mempool_packages.py
+++ b/test/functional/mempool_packages.py
@@ -12,6 +12,7 @@ from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
satoshi_round,
+ wait_until,
)
# default limits
@@ -19,13 +20,22 @@ MAX_ANCESTORS = 25
MAX_DESCENDANTS = 25
# custom limits for node1
MAX_ANCESTORS_CUSTOM = 5
+MAX_DESCENDANTS_CUSTOM = 10
+assert MAX_DESCENDANTS_CUSTOM >= MAX_ANCESTORS_CUSTOM
class MempoolPackagesTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [
- ["-maxorphantx=1000"],
- ["-maxorphantx=1000", "-limitancestorcount={}".format(MAX_ANCESTORS_CUSTOM)],
+ [
+ "-maxorphantx=1000",
+ "-whitelist=noban@127.0.0.1", # immediate tx relay
+ ],
+ [
+ "-maxorphantx=1000",
+ "-limitancestorcount={}".format(MAX_ANCESTORS_CUSTOM),
+ "-limitdescendantcount={}".format(MAX_DESCENDANTS_CUSTOM),
+ ],
]
def skip_test_if_missing_module(self):
@@ -219,9 +229,11 @@ class MempoolPackagesTest(BitcoinTestFramework):
transaction_package.append({'txid': txid, 'vout': i, 'amount': sent_value})
# Sign and send up to MAX_DESCENDANT transactions chained off the parent tx
+ chain = [] # save sent txs for the purpose of checking node1's mempool later (see below)
for i in range(MAX_DESCENDANTS - 1):
utxo = transaction_package.pop(0)
(txid, sent_value) = self.chain_transaction(self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
+ chain.append(txid)
if utxo['txid'] is parent_transaction:
tx_children.append(txid)
for j in range(10):
@@ -238,7 +250,21 @@ class MempoolPackagesTest(BitcoinTestFramework):
utxo = transaction_package.pop(0)
assert_raises_rpc_error(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
- # TODO: check that node1's mempool is as expected
+ # Check that node1's mempool is as expected, containing:
+ # - txs from previous ancestor test (-> custom ancestor limit)
+ # - parent tx for descendant test
+ # - txs chained off parent tx (-> custom descendant limit)
+ wait_until(lambda: len(self.nodes[1].getrawmempool(False)) ==
+ MAX_ANCESTORS_CUSTOM + 1 + MAX_DESCENDANTS_CUSTOM, timeout=10)
+ mempool0 = self.nodes[0].getrawmempool(False)
+ mempool1 = self.nodes[1].getrawmempool(False)
+ assert set(mempool1).issubset(set(mempool0))
+ assert parent_transaction in mempool1
+ for tx in chain[:MAX_DESCENDANTS_CUSTOM]:
+ assert tx in mempool1
+ for tx in chain[MAX_DESCENDANTS_CUSTOM:]:
+ assert tx not in mempool1
+ # TODO: more detailed check of node1's mempool (fees etc.)
# TODO: test descendant size limits
diff --git a/test/functional/mempool_reorg.py b/test/functional/mempool_reorg.py
index 3b148d5cf0..d797dff134 100755
--- a/test/functional/mempool_reorg.py
+++ b/test/functional/mempool_reorg.py
@@ -76,7 +76,7 @@ class MempoolCoinbaseTest(BitcoinTestFramework):
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
- self.sync_all(timeout=360)
+ self.sync_all(timeout=720)
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, timelock_tx_id})
@@ -91,10 +91,11 @@ class MempoolCoinbaseTest(BitcoinTestFramework):
for node in self.nodes:
node.invalidateblock(new_blocks[0])
- self.sync_all(timeout=360)
+ self.sync_all(timeout=720)
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
+
if __name__ == '__main__':
MempoolCoinbaseTest().main()
diff --git a/test/functional/p2p_invalid_block.py b/test/functional/p2p_invalid_block.py
index 905534b862..801407757f 100755
--- a/test/functional/p2p_invalid_block.py
+++ b/test/functional/p2p_invalid_block.py
@@ -22,7 +22,7 @@ class InvalidBlockRequestTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
- self.extra_args = [["-whitelist=127.0.0.1"]]
+ self.extra_args = [["-whitelist=noban@127.0.0.1"]]
def run_test(self):
# Add p2p connection to node0
diff --git a/test/functional/p2p_permissions.py b/test/functional/p2p_permissions.py
index 93e2957fd0..3a7bf4bfc3 100755
--- a/test/functional/p2p_permissions.py
+++ b/test/functional/p2p_permissions.py
@@ -132,6 +132,16 @@ class P2PPermissionsTests(BitcoinTestFramework):
p2p_rebroadcast_wallet.send_txs_and_test([tx], self.nodes[1])
wait_until(lambda: txid in self.nodes[0].getrawmempool())
+ self.log.debug("Check that node[1] will not send an invalid tx to node[0]")
+ tx.vout[0].nValue += 1
+ txid = tx.rehash()
+ p2p_rebroadcast_wallet.send_txs_and_test(
+ [tx],
+ self.nodes[1],
+ success=False,
+ reject_reason='Not relaying non-mempool transaction {} from whitelisted peer=0'.format(txid),
+ )
+
def checkpermission(self, args, expectedPermissions, whitelisted):
self.restart_node(1, args)
connect_nodes(self.nodes[0], 1)
diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py
index 3223c27e0b..785c476e19 100755
--- a/test/functional/p2p_segwit.py
+++ b/test/functional/p2p_segwit.py
@@ -147,6 +147,11 @@ class TestP2PConn(P2PInterface):
super().__init__()
self.getdataset = set()
+ # Avoid sending out msg_getdata in the mininode thread as a reply to invs.
+ # They are not needed and would only lead to races because we send msg_getdata out in the test thread
+ def on_inv(self, message):
+ pass
+
def on_getdata(self, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
@@ -188,9 +193,9 @@ class SegWitTest(BitcoinTestFramework):
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [
- ["-whitelist=127.0.0.1", "-acceptnonstdtxn=1", "-segwitheight={}".format(SEGWIT_HEIGHT)],
- ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0", "-segwitheight={}".format(SEGWIT_HEIGHT)],
- ["-whitelist=127.0.0.1", "-acceptnonstdtxn=1", "-segwitheight=-1"]
+ ["-acceptnonstdtxn=1", "-segwitheight={}".format(SEGWIT_HEIGHT), "-whitelist=noban@127.0.0.1"],
+ ["-acceptnonstdtxn=0", "-segwitheight={}".format(SEGWIT_HEIGHT)],
+ ["-acceptnonstdtxn=1", "-segwitheight=-1"],
]
self.supports_cli = False
diff --git a/test/functional/rpc_createmultisig.py b/test/functional/rpc_createmultisig.py
index aa7f12848c..a983716177 100755
--- a/test/functional/rpc_createmultisig.py
+++ b/test/functional/rpc_createmultisig.py
@@ -4,7 +4,7 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multisig RPCs"""
-from test_framework.descriptors import descsum_create
+from test_framework.descriptors import descsum_create, drop_origins
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_raises_rpc_error,
@@ -116,9 +116,20 @@ class RpcCreateMultiSigTest(BitcoinTestFramework):
def do_multisig(self):
node0, node1, node2 = self.nodes
+ # Construct the expected descriptor
+ desc = 'multi({},{})'.format(self.nsigs, ','.join(self.pub))
+ if self.output_type == 'legacy':
+ desc = 'sh({})'.format(desc)
+ elif self.output_type == 'p2sh-segwit':
+ desc = 'sh(wsh({}))'.format(desc)
+ elif self.output_type == 'bech32':
+ desc = 'wsh({})'.format(desc)
+ desc = descsum_create(desc)
+
msig = node2.createmultisig(self.nsigs, self.pub, self.output_type)
madd = msig["address"]
mredeem = msig["redeemScript"]
+ assert_equal(desc, msig['descriptor'])
if self.output_type == 'bech32':
assert madd[0:4] == "bcrt" # actually a bech32 address
@@ -126,6 +137,7 @@ class RpcCreateMultiSigTest(BitcoinTestFramework):
msigw = node1.addmultisigaddress(self.nsigs, self.pub, None, self.output_type)
maddw = msigw["address"]
mredeemw = msigw["redeemScript"]
+ assert_equal(desc, drop_origins(msigw['descriptor']))
# addmultisigiaddress and createmultisig work the same
assert maddw == madd
assert mredeemw == mredeem
diff --git a/test/functional/rpc_estimatefee.py b/test/functional/rpc_estimatefee.py
new file mode 100755
index 0000000000..8bdecfc8cd
--- /dev/null
+++ b/test/functional/rpc_estimatefee.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python3
+# Copyright (c) 2018 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test the estimatefee RPCs.
+
+Test the following RPCs:
+ - estimatesmartfee
+ - estimaterawfee
+"""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import assert_raises_rpc_error
+
+class EstimateFeeTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.setup_clean_chain = False
+ self.num_nodes = 1
+
+ def run_test(self):
+ # missing required params
+ assert_raises_rpc_error(-1, "estimatesmartfee", self.nodes[0].estimatesmartfee)
+ assert_raises_rpc_error(-1, "estimaterawfee", self.nodes[0].estimaterawfee)
+
+ # wrong type for conf_target
+ assert_raises_rpc_error(-3, "Expected type number, got string", self.nodes[0].estimatesmartfee, 'foo')
+ assert_raises_rpc_error(-3, "Expected type number, got string", self.nodes[0].estimaterawfee, 'foo')
+
+ # wrong type for estimatesmartfee(estimate_mode)
+ assert_raises_rpc_error(-3, "Expected type string, got number", self.nodes[0].estimatesmartfee, 1, 1)
+ assert_raises_rpc_error(-8, "Invalid estimate_mode parameter", self.nodes[0].estimatesmartfee, 1, 'foo')
+
+ # wrong type for estimaterawfee(threshold)
+ assert_raises_rpc_error(-3, "Expected type number, got string", self.nodes[0].estimaterawfee, 1, 'foo')
+
+ # extra params
+ assert_raises_rpc_error(-1, "estimatesmartfee", self.nodes[0].estimatesmartfee, 1, 'ECONOMICAL', 1)
+ assert_raises_rpc_error(-1, "estimaterawfee", self.nodes[0].estimaterawfee, 1, 1, 1)
+
+ # valid calls
+ self.nodes[0].estimatesmartfee(1)
+ # self.nodes[0].estimatesmartfee(1, None)
+ self.nodes[0].estimatesmartfee(1, 'ECONOMICAL')
+
+ self.nodes[0].estimaterawfee(1)
+ self.nodes[0].estimaterawfee(1, None)
+ self.nodes[0].estimaterawfee(1, 1)
+
+
+if __name__ == '__main__':
+ EstimateFeeTest().main()
diff --git a/test/functional/rpc_fundrawtransaction.py b/test/functional/rpc_fundrawtransaction.py
index 6f1ae0d3ba..c435ef24ce 100755
--- a/test/functional/rpc_fundrawtransaction.py
+++ b/test/functional/rpc_fundrawtransaction.py
@@ -30,7 +30,7 @@ class RawTransactionsTest(BitcoinTestFramework):
self.setup_clean_chain = True
# This test isn't testing tx relay. Set whitelist on the peers for
# instant tx relay.
- self.extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
+ self.extra_args = [['-whitelist=noban@127.0.0.1']] * self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
diff --git a/test/functional/rpc_getdescriptorinfo.py b/test/functional/rpc_getdescriptorinfo.py
new file mode 100755
index 0000000000..977dc805ef
--- /dev/null
+++ b/test/functional/rpc_getdescriptorinfo.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python3
+# Copyright (c) 2019 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test getdescriptorinfo RPC.
+"""
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.descriptors import descsum_create
+from test_framework.util import (
+ assert_equal,
+ assert_raises_rpc_error,
+)
+
+
+class DescriptorTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.num_nodes = 1
+ self.extra_args = [["-disablewallet"]]
+
+ def test_desc(self, desc, isrange, issolvable, hasprivatekeys):
+ info = self.nodes[0].getdescriptorinfo(desc)
+ assert_equal(info, self.nodes[0].getdescriptorinfo(descsum_create(desc)))
+ assert_equal(info['descriptor'], descsum_create(desc))
+ assert_equal(info['isrange'], isrange)
+ assert_equal(info['issolvable'], issolvable)
+ assert_equal(info['hasprivatekeys'], hasprivatekeys)
+
+ def run_test(self):
+ assert_raises_rpc_error(-1, 'getdescriptorinfo', self.nodes[0].getdescriptorinfo)
+ assert_raises_rpc_error(-3, 'Expected type string', self.nodes[0].getdescriptorinfo, 1)
+ assert_raises_rpc_error(-5, 'is not a valid descriptor function', self.nodes[0].getdescriptorinfo, '')
+
+ # P2PK output with the specified public key.
+ self.test_desc('pk(0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798)', isrange=False, issolvable=True, hasprivatekeys=False)
+ # P2PKH output with the specified public key.
+ self.test_desc('pkh(02c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5)', isrange=False, issolvable=True, hasprivatekeys=False)
+ # P2WPKH output with the specified public key.
+ self.test_desc('wpkh(02f9308a019258c31049344f85f89d5229b531c845836f99b08601f113bce036f9)', isrange=False, issolvable=True, hasprivatekeys=False)
+ # P2SH-P2WPKH output with the specified public key.
+ self.test_desc('sh(wpkh(03fff97bd5755eeea420453a14355235d382f6472f8568a18b2f057a1460297556))', isrange=False, issolvable=True, hasprivatekeys=False)
+ # Any P2PK, P2PKH, P2WPKH, or P2SH-P2WPKH output with the specified public key.
+ self.test_desc('combo(0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798)', isrange=False, issolvable=True, hasprivatekeys=False)
+ # An (overly complicated) P2SH-P2WSH-P2PKH output with the specified public key.
+ self.test_desc('sh(wsh(pkh(02e493dbf1c10d80f3581e4904930b1404cc6c13900ee0758474fa94abe8c4cd13)))', isrange=False, issolvable=True, hasprivatekeys=False)
+ # A bare *1-of-2* multisig output with keys in the specified order.
+ self.test_desc('multi(1,022f8bde4d1a07209355b4a7250a5c5128e88b84bddc619ab7cba8d569b240efe4,025cbdf0646e5db4eaa398f365f2ea7a0e3d419b7e0330e39ce92bddedcac4f9bc)', isrange=False, issolvable=True, hasprivatekeys=False)
+ # A P2SH *2-of-2* multisig output with keys in the specified order.
+ self.test_desc('sh(multi(2,022f01e5e15cca351daff3843fb70f3c2f0a1bdd05e5af888a67784ef3e10a2a01,03acd484e2f0c7f65309ad178a9f559abde09796974c57e714c35f110dfc27ccbe))', isrange=False, issolvable=True, hasprivatekeys=False)
+ # A P2WSH *2-of-3* multisig output with keys in the specified order.
+ self.test_desc('wsh(multi(2,03a0434d9e47f3c86235477c7b1ae6ae5d3442d49b1943c2b752a68e2a47e247c7,03774ae7f858a9411e5ef4246b70c65aac5649980be5c17891bbec17895da008cb,03d01115d548e7561b15c38f004d734633687cf4419620095bc5b0f47070afe85a))', isrange=False, issolvable=True, hasprivatekeys=False)
+ # A P2SH-P2WSH *1-of-3* multisig output with keys in the specified order.
+ self.test_desc('sh(wsh(multi(1,03f28773c2d975288bc7d1d205c3748651b075fbc6610e58cddeeddf8f19405aa8,03499fdf9e895e719cfd64e67f07d38e3226aa7b63678949e6e49b241a60e823e4,02d7924d4f7d43ea965a465ae3095ff41131e5946f3c85f79e44adbcf8e27e080e)))', isrange=False, issolvable=True, hasprivatekeys=False)
+ # A P2PK output with the public key of the specified xpub.
+ self.test_desc('pk(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B)', isrange=False, issolvable=True, hasprivatekeys=False)
+ # A P2PKH output with child key *1'/2* of the specified xpub.
+ self.test_desc("pkh(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1'/2)", isrange=False, issolvable=True, hasprivatekeys=False)
+ # A set of P2PKH outputs, but additionally specifies that the specified xpub is a child of a master with fingerprint `d34db33f`, and derived using path `44'/0'/0'`.
+ self.test_desc("pkh([d34db33f/44'/0'/0']tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/*)", isrange=True, issolvable=True, hasprivatekeys=False)
+ # A set of *1-of-2* P2WSH multisig outputs where the first multisig key is the *1/0/`i`* child of the first specified xpub and the second multisig key is the *0/0/`i`* child of the second specified xpub, and `i` is any number in a configurable range (`0-1000` by default).
+ self.test_desc("wsh(multi(1,tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/0/*,tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/0/0/*))", isrange=True, issolvable=True, hasprivatekeys=False)
+
+
+if __name__ == '__main__':
+ DescriptorTest().main()
diff --git a/test/functional/rpc_psbt.py b/test/functional/rpc_psbt.py
index 33af819d34..3a63377545 100755
--- a/test/functional/rpc_psbt.py
+++ b/test/functional/rpc_psbt.py
@@ -193,12 +193,20 @@ class PSBTTest(BitcoinTestFramework):
psbt_orig = self.nodes[0].createpsbt([{"txid":txid1, "vout":vout1}, {"txid":txid2, "vout":vout2}], {self.nodes[0].getnewaddress():25.999})
# Update psbts, should only have data for one input and not the other
- psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig)['psbt']
+ psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig, False, "ALL")['psbt']
psbt1_decoded = self.nodes[0].decodepsbt(psbt1)
assert psbt1_decoded['inputs'][0] and not psbt1_decoded['inputs'][1]
- psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig)['psbt']
+ # Check that BIP32 path was added
+ assert "bip32_derivs" in psbt1_decoded['inputs'][0]
+ psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig, False, "ALL", False)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert not psbt2_decoded['inputs'][0] and psbt2_decoded['inputs'][1]
+ # Check that BIP32 paths were not added
+ assert "bip32_derivs" not in psbt2_decoded['inputs'][1]
+
+ # Sign PSBTs (workaround issue #18039)
+ psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig)['psbt']
+ psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig)['psbt']
# Combine, finalize, and send the psbts
combined = self.nodes[0].combinepsbt([psbt1, psbt2])
@@ -231,16 +239,18 @@ class PSBTTest(BitcoinTestFramework):
# Same construction without optional arguments
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}])
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
- for tx_in in decoded_psbt["tx"]["vin"]:
+ for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
+ assert "bip32_derivs" in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], 0)
# Same construction without optional arguments, for a node with -walletrbf=0
unspent1 = self.nodes[1].listunspent()[0]
psbtx_info = self.nodes[1].walletcreatefundedpsbt([{"txid":unspent1["txid"], "vout":unspent1["vout"]}], [{self.nodes[2].getnewaddress():unspent1["amount"]+1}], block_height)
decoded_psbt = self.nodes[1].decodepsbt(psbtx_info["psbt"])
- for tx_in in decoded_psbt["tx"]["vin"]:
+ for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_greater_than(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
+ assert "bip32_derivs" in psbt_in
# Make sure change address wallet does not have P2SH innerscript access to results in success
# when attempting BnB coin selection
@@ -427,6 +437,10 @@ class PSBTTest(BitcoinTestFramework):
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 has invalid value')
+ self.log.info("PSBT with signed, but not finalized, inputs should have Finalizer as next")
+ analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAZYezcxdnbXoQCmrD79t/LzDgtUo9ERqixk8wgioAobrAAAAAAD9////AlDDAAAAAAAAFgAUy/UxxZuzZswcmFnN/E9DGSiHLUsuGPUFAAAAABYAFLsH5o0R38wXx+X2cCosTMCZnQ4baAAAAAABAR8A4fUFAAAAABYAFOBI2h5thf3+Lflb2LGCsVSZwsltIgIC/i4dtVARCRWtROG0HHoGcaVklzJUcwo5homgGkSNAnJHMEQCIGx7zKcMIGr7cEES9BR4Kdt/pzPTK3fKWcGyCJXb7MVnAiALOBgqlMH4GbC1HDh/HmylmO54fyEy4lKde7/BT/PWxwEBAwQBAAAAIgYC/i4dtVARCRWtROG0HHoGcaVklzJUcwo5homgGkSNAnIYDwVpQ1QAAIABAACAAAAAgAAAAAAAAAAAAAAiAgL+CIiB59NSCssOJRGiMYQK1chahgAaaJpIXE41Cyir+xgPBWlDVAAAgAEAAIAAAACAAQAAAAAAAAAA')
+ assert_equal(analysis['next'], 'finalizer')
+
analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAfA00BFgAm6tp86RowwH6BMImQNL5zXUcTT97XoLGz0BAAAAAAD/////AgCAgWrj0AcAFgAUKNw0x8HRctAgmvoevm4u1SbN7XL87QKVAAAAABYAFPck4gF7iL4NL4wtfRAKgQbghiTUAAAAAAABAR8A8gUqAQAAABYAFJUDtxf2PHo641HEOBOAIvFMNTr2AAAA')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Output amount invalid')
diff --git a/test/functional/test_framework/bignum.py b/test/functional/test_framework/bignum.py
deleted file mode 100644
index db5ccd62c2..0000000000
--- a/test/functional/test_framework/bignum.py
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/env python3
-#
-# Distributed under the MIT software license, see the accompanying
-# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-"""Big number routines.
-
-This file is copied from python-bitcoinlib.
-"""
-
-import struct
-
-
-# generic big endian MPI format
-
-def bn_bytes(v, have_ext=False):
- ext = 0
- if have_ext:
- ext = 1
- return ((v.bit_length()+7)//8) + ext
-
-def bn2bin(v):
- s = bytearray()
- i = bn_bytes(v)
- while i > 0:
- s.append((v >> ((i-1) * 8)) & 0xff)
- i -= 1
- return s
-
-def bn2mpi(v):
- have_ext = False
- if v.bit_length() > 0:
- have_ext = (v.bit_length() & 0x07) == 0
-
- neg = False
- if v < 0:
- neg = True
- v = -v
-
- s = struct.pack(b">I", bn_bytes(v, have_ext))
- ext = bytearray()
- if have_ext:
- ext.append(0)
- v_bin = bn2bin(v)
- if neg:
- if have_ext:
- ext[0] |= 0x80
- else:
- v_bin[0] |= 0x80
- return s + ext + v_bin
-
-# bitcoin-specific little endian format, with implicit size
-def mpi2vch(s):
- r = s[4:] # strip size
- r = r[::-1] # reverse string, converting BE->LE
- return r
-
-def bn2vch(v):
- return bytes(mpi2vch(bn2mpi(v)))
diff --git a/test/functional/test_framework/descriptors.py b/test/functional/test_framework/descriptors.py
index 29482ce01e..46b405749b 100644
--- a/test/functional/test_framework/descriptors.py
+++ b/test/functional/test_framework/descriptors.py
@@ -4,6 +4,8 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utility functions related to output descriptors"""
+import re
+
INPUT_CHARSET = "0123456789()[],'/*abcdefgh@:$%{}IJKLMNOPQRSTUVWXYZ&+-.;<=>?!^_|~ijklmnopqrstuvwxyzABCDEFGH`#\"\\ "
CHECKSUM_CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
GENERATOR = [0xf5dee51989, 0xa9fdca3312, 0x1bab10e32d, 0x3706b1677a, 0x644d626ffd]
@@ -53,3 +55,10 @@ def descsum_check(s, require=True):
return False
symbols = descsum_expand(s[:-9]) + [CHECKSUM_CHARSET.find(x) for x in s[-8:]]
return descsum_polymod(symbols) == 1
+
+def drop_origins(s):
+ '''Drop the key origins from a descriptor'''
+ desc = re.sub(r'\[.+?\]', '', s)
+ if '#' in s:
+ desc = desc[:desc.index('#')]
+ return descsum_create(desc)
diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py
index 4f7a9a8b13..285a3fbbf4 100755
--- a/test/functional/test_framework/messages.py
+++ b/test/functional/test_framework/messages.py
@@ -39,6 +39,7 @@ MAX_LOCATOR_SZ = 101
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
+MAX_MONEY = 21000000 * COIN
BIP125_SEQUENCE_NUMBER = 0xfffffffd # Sequence number that is BIP 125 opt-in and BIP 68-opt-out
diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py
index 51aa9057f7..92725dfcf4 100644
--- a/test/functional/test_framework/script.py
+++ b/test/functional/test_framework/script.py
@@ -6,21 +6,35 @@
This file is modified from python-bitcoinlib.
"""
-
-from .messages import CTransaction, CTxOut, sha256, hash256, uint256_from_str, ser_uint256, ser_string
-
import hashlib
import struct
-from .bignum import bn2vch
+from .messages import (
+ CTransaction,
+ CTxOut,
+ hash256,
+ ser_string,
+ ser_uint256,
+ sha256,
+ uint256_from_str,
+)
MAX_SCRIPT_ELEMENT_SIZE = 520
-
OPCODE_NAMES = {}
def hash160(s):
return hashlib.new('ripemd160', sha256(s)).digest()
+def bn2vch(v):
+ """Convert number to bitcoin-specific little endian format."""
+ # We need v.bit_length() bits, plus a sign bit for every nonzero number.
+ n_bits = v.bit_length() + (v != 0)
+ # The number of bytes for that is:
+ n_bytes = (n_bits + 7) // 8
+ # Convert number to absolute value + sign in top bit.
+ encoded_v = 0 if v == 0 else abs(v) | ((v < 0) << (n_bytes * 8 - 1))
+ # Serialize to bytes
+ return encoded_v.to_bytes(n_bytes, 'little')
_opcode_instances = []
class CScriptOp(int):
@@ -31,13 +45,13 @@ class CScriptOp(int):
def encode_op_pushdata(d):
"""Encode a PUSHDATA op, returning bytes"""
if len(d) < 0x4c:
- return b'' + bytes([len(d)]) + d # OP_PUSHDATA
+ return b'' + bytes([len(d)]) + d # OP_PUSHDATA
elif len(d) <= 0xff:
- return b'\x4c' + bytes([len(d)]) + d # OP_PUSHDATA1
+ return b'\x4c' + bytes([len(d)]) + d # OP_PUSHDATA1
elif len(d) <= 0xffff:
- return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2
+ return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2
elif len(d) <= 0xffffffff:
- return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4
+ return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4
else:
raise ValueError("Data too long to encode in a PUSHDATA op")
@@ -50,7 +64,7 @@ class CScriptOp(int):
if n == 0:
return OP_0
else:
- return CScriptOp(OP_1 + n-1)
+ return CScriptOp(OP_1 + n - 1)
def decode_op_n(self):
"""Decode a small integer opcode, returning an integer"""
@@ -60,7 +74,7 @@ class CScriptOp(int):
if not (self == OP_0 or OP_1 <= self <= OP_16):
raise ValueError('op %r is not an OP_N' % self)
- return int(self - OP_1+1)
+ return int(self - OP_1 + 1)
def is_small_int(self):
"""Return true if the op pushes a small integer to the stack"""
@@ -87,7 +101,7 @@ class CScriptOp(int):
return _opcode_instances[n]
# Populate opcode instance table
-for n in range(0xff+1):
+for n in range(0xff + 1):
CScriptOp(n)
@@ -100,7 +114,7 @@ OP_PUSHDATA4 = CScriptOp(0x4e)
OP_1NEGATE = CScriptOp(0x4f)
OP_RESERVED = CScriptOp(0x50)
OP_1 = CScriptOp(0x51)
-OP_TRUE=OP_1
+OP_TRUE = OP_1
OP_2 = CScriptOp(0x52)
OP_3 = CScriptOp(0x53)
OP_4 = CScriptOp(0x54)
@@ -232,122 +246,122 @@ OP_PUBKEY = CScriptOp(0xfe)
OP_INVALIDOPCODE = CScriptOp(0xff)
OPCODE_NAMES.update({
- OP_0 : 'OP_0',
- OP_PUSHDATA1 : 'OP_PUSHDATA1',
- OP_PUSHDATA2 : 'OP_PUSHDATA2',
- OP_PUSHDATA4 : 'OP_PUSHDATA4',
- OP_1NEGATE : 'OP_1NEGATE',
- OP_RESERVED : 'OP_RESERVED',
- OP_1 : 'OP_1',
- OP_2 : 'OP_2',
- OP_3 : 'OP_3',
- OP_4 : 'OP_4',
- OP_5 : 'OP_5',
- OP_6 : 'OP_6',
- OP_7 : 'OP_7',
- OP_8 : 'OP_8',
- OP_9 : 'OP_9',
- OP_10 : 'OP_10',
- OP_11 : 'OP_11',
- OP_12 : 'OP_12',
- OP_13 : 'OP_13',
- OP_14 : 'OP_14',
- OP_15 : 'OP_15',
- OP_16 : 'OP_16',
- OP_NOP : 'OP_NOP',
- OP_VER : 'OP_VER',
- OP_IF : 'OP_IF',
- OP_NOTIF : 'OP_NOTIF',
- OP_VERIF : 'OP_VERIF',
- OP_VERNOTIF : 'OP_VERNOTIF',
- OP_ELSE : 'OP_ELSE',
- OP_ENDIF : 'OP_ENDIF',
- OP_VERIFY : 'OP_VERIFY',
- OP_RETURN : 'OP_RETURN',
- OP_TOALTSTACK : 'OP_TOALTSTACK',
- OP_FROMALTSTACK : 'OP_FROMALTSTACK',
- OP_2DROP : 'OP_2DROP',
- OP_2DUP : 'OP_2DUP',
- OP_3DUP : 'OP_3DUP',
- OP_2OVER : 'OP_2OVER',
- OP_2ROT : 'OP_2ROT',
- OP_2SWAP : 'OP_2SWAP',
- OP_IFDUP : 'OP_IFDUP',
- OP_DEPTH : 'OP_DEPTH',
- OP_DROP : 'OP_DROP',
- OP_DUP : 'OP_DUP',
- OP_NIP : 'OP_NIP',
- OP_OVER : 'OP_OVER',
- OP_PICK : 'OP_PICK',
- OP_ROLL : 'OP_ROLL',
- OP_ROT : 'OP_ROT',
- OP_SWAP : 'OP_SWAP',
- OP_TUCK : 'OP_TUCK',
- OP_CAT : 'OP_CAT',
- OP_SUBSTR : 'OP_SUBSTR',
- OP_LEFT : 'OP_LEFT',
- OP_RIGHT : 'OP_RIGHT',
- OP_SIZE : 'OP_SIZE',
- OP_INVERT : 'OP_INVERT',
- OP_AND : 'OP_AND',
- OP_OR : 'OP_OR',
- OP_XOR : 'OP_XOR',
- OP_EQUAL : 'OP_EQUAL',
- OP_EQUALVERIFY : 'OP_EQUALVERIFY',
- OP_RESERVED1 : 'OP_RESERVED1',
- OP_RESERVED2 : 'OP_RESERVED2',
- OP_1ADD : 'OP_1ADD',
- OP_1SUB : 'OP_1SUB',
- OP_2MUL : 'OP_2MUL',
- OP_2DIV : 'OP_2DIV',
- OP_NEGATE : 'OP_NEGATE',
- OP_ABS : 'OP_ABS',
- OP_NOT : 'OP_NOT',
- OP_0NOTEQUAL : 'OP_0NOTEQUAL',
- OP_ADD : 'OP_ADD',
- OP_SUB : 'OP_SUB',
- OP_MUL : 'OP_MUL',
- OP_DIV : 'OP_DIV',
- OP_MOD : 'OP_MOD',
- OP_LSHIFT : 'OP_LSHIFT',
- OP_RSHIFT : 'OP_RSHIFT',
- OP_BOOLAND : 'OP_BOOLAND',
- OP_BOOLOR : 'OP_BOOLOR',
- OP_NUMEQUAL : 'OP_NUMEQUAL',
- OP_NUMEQUALVERIFY : 'OP_NUMEQUALVERIFY',
- OP_NUMNOTEQUAL : 'OP_NUMNOTEQUAL',
- OP_LESSTHAN : 'OP_LESSTHAN',
- OP_GREATERTHAN : 'OP_GREATERTHAN',
- OP_LESSTHANOREQUAL : 'OP_LESSTHANOREQUAL',
- OP_GREATERTHANOREQUAL : 'OP_GREATERTHANOREQUAL',
- OP_MIN : 'OP_MIN',
- OP_MAX : 'OP_MAX',
- OP_WITHIN : 'OP_WITHIN',
- OP_RIPEMD160 : 'OP_RIPEMD160',
- OP_SHA1 : 'OP_SHA1',
- OP_SHA256 : 'OP_SHA256',
- OP_HASH160 : 'OP_HASH160',
- OP_HASH256 : 'OP_HASH256',
- OP_CODESEPARATOR : 'OP_CODESEPARATOR',
- OP_CHECKSIG : 'OP_CHECKSIG',
- OP_CHECKSIGVERIFY : 'OP_CHECKSIGVERIFY',
- OP_CHECKMULTISIG : 'OP_CHECKMULTISIG',
- OP_CHECKMULTISIGVERIFY : 'OP_CHECKMULTISIGVERIFY',
- OP_NOP1 : 'OP_NOP1',
- OP_CHECKLOCKTIMEVERIFY : 'OP_CHECKLOCKTIMEVERIFY',
- OP_CHECKSEQUENCEVERIFY : 'OP_CHECKSEQUENCEVERIFY',
- OP_NOP4 : 'OP_NOP4',
- OP_NOP5 : 'OP_NOP5',
- OP_NOP6 : 'OP_NOP6',
- OP_NOP7 : 'OP_NOP7',
- OP_NOP8 : 'OP_NOP8',
- OP_NOP9 : 'OP_NOP9',
- OP_NOP10 : 'OP_NOP10',
- OP_SMALLINTEGER : 'OP_SMALLINTEGER',
- OP_PUBKEYS : 'OP_PUBKEYS',
- OP_PUBKEYHASH : 'OP_PUBKEYHASH',
- OP_PUBKEY : 'OP_PUBKEY',
- OP_INVALIDOPCODE : 'OP_INVALIDOPCODE',
+ OP_0: 'OP_0',
+ OP_PUSHDATA1: 'OP_PUSHDATA1',
+ OP_PUSHDATA2: 'OP_PUSHDATA2',
+ OP_PUSHDATA4: 'OP_PUSHDATA4',
+ OP_1NEGATE: 'OP_1NEGATE',
+ OP_RESERVED: 'OP_RESERVED',
+ OP_1: 'OP_1',
+ OP_2: 'OP_2',
+ OP_3: 'OP_3',
+ OP_4: 'OP_4',
+ OP_5: 'OP_5',
+ OP_6: 'OP_6',
+ OP_7: 'OP_7',
+ OP_8: 'OP_8',
+ OP_9: 'OP_9',
+ OP_10: 'OP_10',
+ OP_11: 'OP_11',
+ OP_12: 'OP_12',
+ OP_13: 'OP_13',
+ OP_14: 'OP_14',
+ OP_15: 'OP_15',
+ OP_16: 'OP_16',
+ OP_NOP: 'OP_NOP',
+ OP_VER: 'OP_VER',
+ OP_IF: 'OP_IF',
+ OP_NOTIF: 'OP_NOTIF',
+ OP_VERIF: 'OP_VERIF',
+ OP_VERNOTIF: 'OP_VERNOTIF',
+ OP_ELSE: 'OP_ELSE',
+ OP_ENDIF: 'OP_ENDIF',
+ OP_VERIFY: 'OP_VERIFY',
+ OP_RETURN: 'OP_RETURN',
+ OP_TOALTSTACK: 'OP_TOALTSTACK',
+ OP_FROMALTSTACK: 'OP_FROMALTSTACK',
+ OP_2DROP: 'OP_2DROP',
+ OP_2DUP: 'OP_2DUP',
+ OP_3DUP: 'OP_3DUP',
+ OP_2OVER: 'OP_2OVER',
+ OP_2ROT: 'OP_2ROT',
+ OP_2SWAP: 'OP_2SWAP',
+ OP_IFDUP: 'OP_IFDUP',
+ OP_DEPTH: 'OP_DEPTH',
+ OP_DROP: 'OP_DROP',
+ OP_DUP: 'OP_DUP',
+ OP_NIP: 'OP_NIP',
+ OP_OVER: 'OP_OVER',
+ OP_PICK: 'OP_PICK',
+ OP_ROLL: 'OP_ROLL',
+ OP_ROT: 'OP_ROT',
+ OP_SWAP: 'OP_SWAP',
+ OP_TUCK: 'OP_TUCK',
+ OP_CAT: 'OP_CAT',
+ OP_SUBSTR: 'OP_SUBSTR',
+ OP_LEFT: 'OP_LEFT',
+ OP_RIGHT: 'OP_RIGHT',
+ OP_SIZE: 'OP_SIZE',
+ OP_INVERT: 'OP_INVERT',
+ OP_AND: 'OP_AND',
+ OP_OR: 'OP_OR',
+ OP_XOR: 'OP_XOR',
+ OP_EQUAL: 'OP_EQUAL',
+ OP_EQUALVERIFY: 'OP_EQUALVERIFY',
+ OP_RESERVED1: 'OP_RESERVED1',
+ OP_RESERVED2: 'OP_RESERVED2',
+ OP_1ADD: 'OP_1ADD',
+ OP_1SUB: 'OP_1SUB',
+ OP_2MUL: 'OP_2MUL',
+ OP_2DIV: 'OP_2DIV',
+ OP_NEGATE: 'OP_NEGATE',
+ OP_ABS: 'OP_ABS',
+ OP_NOT: 'OP_NOT',
+ OP_0NOTEQUAL: 'OP_0NOTEQUAL',
+ OP_ADD: 'OP_ADD',
+ OP_SUB: 'OP_SUB',
+ OP_MUL: 'OP_MUL',
+ OP_DIV: 'OP_DIV',
+ OP_MOD: 'OP_MOD',
+ OP_LSHIFT: 'OP_LSHIFT',
+ OP_RSHIFT: 'OP_RSHIFT',
+ OP_BOOLAND: 'OP_BOOLAND',
+ OP_BOOLOR: 'OP_BOOLOR',
+ OP_NUMEQUAL: 'OP_NUMEQUAL',
+ OP_NUMEQUALVERIFY: 'OP_NUMEQUALVERIFY',
+ OP_NUMNOTEQUAL: 'OP_NUMNOTEQUAL',
+ OP_LESSTHAN: 'OP_LESSTHAN',
+ OP_GREATERTHAN: 'OP_GREATERTHAN',
+ OP_LESSTHANOREQUAL: 'OP_LESSTHANOREQUAL',
+ OP_GREATERTHANOREQUAL: 'OP_GREATERTHANOREQUAL',
+ OP_MIN: 'OP_MIN',
+ OP_MAX: 'OP_MAX',
+ OP_WITHIN: 'OP_WITHIN',
+ OP_RIPEMD160: 'OP_RIPEMD160',
+ OP_SHA1: 'OP_SHA1',
+ OP_SHA256: 'OP_SHA256',
+ OP_HASH160: 'OP_HASH160',
+ OP_HASH256: 'OP_HASH256',
+ OP_CODESEPARATOR: 'OP_CODESEPARATOR',
+ OP_CHECKSIG: 'OP_CHECKSIG',
+ OP_CHECKSIGVERIFY: 'OP_CHECKSIGVERIFY',
+ OP_CHECKMULTISIG: 'OP_CHECKMULTISIG',
+ OP_CHECKMULTISIGVERIFY: 'OP_CHECKMULTISIGVERIFY',
+ OP_NOP1: 'OP_NOP1',
+ OP_CHECKLOCKTIMEVERIFY: 'OP_CHECKLOCKTIMEVERIFY',
+ OP_CHECKSEQUENCEVERIFY: 'OP_CHECKSEQUENCEVERIFY',
+ OP_NOP4: 'OP_NOP4',
+ OP_NOP5: 'OP_NOP5',
+ OP_NOP6: 'OP_NOP6',
+ OP_NOP7: 'OP_NOP7',
+ OP_NOP8: 'OP_NOP8',
+ OP_NOP9: 'OP_NOP9',
+ OP_NOP10: 'OP_NOP10',
+ OP_SMALLINTEGER: 'OP_SMALLINTEGER',
+ OP_PUBKEYS: 'OP_PUBKEYS',
+ OP_PUBKEYHASH: 'OP_PUBKEYHASH',
+ OP_PUBKEY: 'OP_PUBKEY',
+ OP_INVALIDOPCODE: 'OP_INVALIDOPCODE',
})
class CScriptInvalidError(Exception):
@@ -392,10 +406,10 @@ class CScriptNum:
if len(value) == 0:
return result
for i, byte in enumerate(value):
- result |= int(byte) << 8*i
+ result |= int(byte) << 8 * i
if value[-1] >= 0x80:
# Mask for all but the highest result bit
- num_mask = (2**(len(value)*8) - 1) >> 1
+ num_mask = (2**(len(value) * 8) - 1) >> 1
result &= num_mask
result *= -1
return result
@@ -493,21 +507,20 @@ class CScript(bytes):
pushdata_type = 'PUSHDATA2'
if i + 1 >= len(self):
raise CScriptInvalidError('PUSHDATA2: missing data length')
- datasize = self[i] + (self[i+1] << 8)
+ datasize = self[i] + (self[i + 1] << 8)
i += 2
elif opcode == OP_PUSHDATA4:
pushdata_type = 'PUSHDATA4'
if i + 3 >= len(self):
raise CScriptInvalidError('PUSHDATA4: missing data length')
- datasize = self[i] + (self[i+1] << 8) + (self[i+2] << 16) + (self[i+3] << 24)
+ datasize = self[i] + (self[i + 1] << 8) + (self[i + 2] << 16) + (self[i + 3] << 24)
i += 4
else:
- assert False # shouldn't happen
-
+ assert False # shouldn't happen
- data = bytes(self[i:i+datasize])
+ data = bytes(self[i:i + datasize])
# Check for truncation
if len(data) < datasize:
diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py
index da92c6325a..e36fb350c6 100755
--- a/test/functional/test_framework/test_framework.py
+++ b/test/functional/test_framework/test_framework.py
@@ -369,7 +369,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
# Public helper methods. These can be accessed by the subclass test scripts.
- def add_nodes(self, num_nodes, extra_args=None, *, rpchost=None, binary=None):
+ def add_nodes(self, num_nodes, extra_args=None, *, rpchost=None, binary=None, binary_cli=None, versions=None):
"""Instantiate TestNode objects.
Should only be called once after the nodes have been specified in
@@ -380,11 +380,17 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
extra_confs = [[]] * num_nodes
if extra_args is None:
extra_args = [[]] * num_nodes
+ if versions is None:
+ versions = [None] * num_nodes
if binary is None:
binary = [self.options.bitcoind] * num_nodes
+ if binary_cli is None:
+ binary_cli = [self.options.bitcoincli] * num_nodes
assert_equal(len(extra_confs), num_nodes)
assert_equal(len(extra_args), num_nodes)
+ assert_equal(len(versions), num_nodes)
assert_equal(len(binary), num_nodes)
+ assert_equal(len(binary_cli), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(
i,
@@ -393,7 +399,8 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
rpchost=rpchost,
timewait=self.rpc_timeout,
bitcoind=binary[i],
- bitcoin_cli=self.options.bitcoincli,
+ bitcoin_cli=binary_cli[i],
+ version=versions[i],
coverage_dir=self.options.coveragedir,
cwd=self.options.tmpdir,
extra_conf=extra_confs[i],
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index 0742dbe617..c7559ac7c8 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -60,7 +60,7 @@ class TestNode():
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
- def __init__(self, i, datadir, *, chain, rpchost, timewait, bitcoind, bitcoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False, use_valgrind=False):
+ def __init__(self, i, datadir, *, chain, rpchost, timewait, bitcoind, bitcoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False, use_valgrind=False, version=None):
"""
Kwargs:
start_perf (bool): If True, begin profiling the node with `perf` as soon as
@@ -84,6 +84,7 @@ class TestNode():
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
+ self.version = version
# Configuration for logging is set as command-line args rather than in the bitcoin.conf file.
# This means that starting a bitcoind using the temp dir to debug a failed test won't
# spam debug.log.
@@ -91,7 +92,6 @@ class TestNode():
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
- "-logthreadnames",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
@@ -107,6 +107,9 @@ class TestNode():
"--gen-suppressions=all", "--exit-on-first-error=yes",
"--error-exitcode=1", "--quiet"] + self.args
+ if self.version is None or self.version >= 190000:
+ self.args.append("-logthreadnames")
+
self.cli = TestNodeCLI(bitcoin_cli, self.datadir)
self.use_cli = use_cli
self.start_perf = start_perf
@@ -254,7 +257,11 @@ class TestNode():
return
self.log.debug("Stopping node")
try:
- self.stop(wait=wait)
+ # Do not use wait argument when testing older nodes, e.g. in feature_backwards_compatibility.py
+ if self.version is None or self.version >= 180000:
+ self.stop(wait=wait)
+ else:
+ self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index 8b527bcff0..ce9b37edfb 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -19,9 +19,8 @@ import datetime
import os
import time
import shutil
-import signal
-import sys
import subprocess
+import sys
import tempfile
import re
import logging
@@ -158,6 +157,7 @@ BASE_SCRIPTS = [
'feature_assumevalid.py',
'example_test.py',
'wallet_txn_doublespend.py',
+ 'feature_backwards_compatibility.py',
'wallet_txn_clone.py --mineblock',
'feature_notifications.py',
'rpc_getblockfilter.py',
@@ -174,6 +174,7 @@ BASE_SCRIPTS = [
'wallet_balance.py',
'feature_nulldummy.py',
'mempool_accept.py',
+ 'mempool_expiry.py',
'wallet_import_rescan.py',
'wallet_import_with_label.py',
'rpc_bind.py --ipv4',
@@ -181,7 +182,6 @@ BASE_SCRIPTS = [
'rpc_bind.py --nonloopback',
'mining_basic.py',
'wallet_bumpfee.py',
- 'wallet_bumpfee_totalfee_deprecation.py',
'wallet_implicitsegwit.py',
'rpc_named_arguments.py',
'wallet_listsinceblock.py',
@@ -194,6 +194,7 @@ BASE_SCRIPTS = [
'wallet_fallbackfee.py',
'rpc_dumptxoutset.py',
'feature_minchainwork.py',
+ 'rpc_estimatefee.py',
'rpc_getblockstats.py',
'wallet_create_tx.py',
'p2p_fingerprint.py',
@@ -204,6 +205,7 @@ BASE_SCRIPTS = [
'p2p_dos_header_tree.py',
'p2p_unrequested_blocks.py',
'feature_includeconf.py',
+ 'feature_asmap.py',
'rpc_deriveaddresses.py',
'rpc_deriveaddresses.py --usecli',
'rpc_scantxoutset.py',
@@ -214,9 +216,11 @@ BASE_SCRIPTS = [
'feature_config_args.py',
'rpc_getaddressinfo_labels_purpose_deprecation.py',
'rpc_getaddressinfo_label_deprecation.py',
+ 'rpc_getdescriptorinfo.py',
'rpc_help.py',
'feature_help.py',
'feature_shutdown.py',
+ 'framework_test_script.py',
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
]
@@ -360,11 +364,10 @@ def main():
args=passon_args,
combined_logs_len=args.combinedlogslen,
failfast=args.failfast,
- runs_ci=args.ci,
use_term_control=args.ansi,
)
-def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False, runs_ci, use_term_control):
+def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False, use_term_control):
args = args or []
# Warn if bitcoind is already running
@@ -406,7 +409,6 @@ def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=
tmpdir=tmpdir,
test_list=test_list,
flags=flags,
- timeout_duration=40 * 60 if runs_ci else float('inf'), # in seconds
use_term_control=use_term_control,
)
start_time = time.time()
@@ -491,12 +493,11 @@ class TestHandler:
Trigger the test scripts passed in via the list.
"""
- def __init__(self, *, num_tests_parallel, tests_dir, tmpdir, test_list, flags, timeout_duration, use_term_control):
+ def __init__(self, *, num_tests_parallel, tests_dir, tmpdir, test_list, flags, use_term_control):
assert num_tests_parallel >= 1
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
- self.timeout_duration = timeout_duration
self.test_list = test_list
self.flags = flags
self.num_running = 0
@@ -537,10 +538,6 @@ class TestHandler:
time.sleep(.5)
for job in self.jobs:
(name, start_time, proc, testdir, log_out, log_err) = job
- if int(time.time() - start_time) > self.timeout_duration:
- # Timeout individual tests if timeout is specified (to stop
- # tests hanging and not providing useful output).
- proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [log_file.read().decode('utf-8') for log_file in (log_out, log_err)]
@@ -609,7 +606,7 @@ class TestResult():
def check_script_prefixes():
"""Check that test scripts start with one of the allowed name prefixes."""
- good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet|tool)_")
+ good_prefixes_re = re.compile("^(example|feature|interface|mempool|mining|p2p|rpc|wallet|tool|framework_test)_")
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
if bad_script_names:
diff --git a/test/functional/wallet_address_types.py b/test/functional/wallet_address_types.py
index f08606e622..79b6db986b 100755
--- a/test/functional/wallet_address_types.py
+++ b/test/functional/wallet_address_types.py
@@ -82,7 +82,7 @@ class AddressTypeTest(BitcoinTestFramework):
]
# whitelist all peers to speed up tx relay / mempool sync
for args in self.extra_args:
- args.append("-whitelist=127.0.0.1")
+ args.append("-whitelist=noban@127.0.0.1")
self.supports_cli = False
def skip_test_if_missing_module(self):
diff --git a/test/functional/wallet_avoidreuse.py b/test/functional/wallet_avoidreuse.py
index 1ca02a695c..8e2dc03ac2 100755
--- a/test/functional/wallet_avoidreuse.py
+++ b/test/functional/wallet_avoidreuse.py
@@ -70,7 +70,7 @@ class AvoidReuseTest(BitcoinTestFramework):
self.num_nodes = 2
# This test isn't testing txn relay/timing, so set whitelist on the
# peers for instant txn relay. This speeds up the test run time 2-3x.
- self.extra_args = [["-whitelist=127.0.0.1"]] * self.num_nodes
+ self.extra_args = [["-whitelist=noban@127.0.0.1"]] * self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
diff --git a/test/functional/wallet_backup.py b/test/functional/wallet_backup.py
index 16c69f304a..fb80a06433 100755
--- a/test/functional/wallet_backup.py
+++ b/test/functional/wallet_backup.py
@@ -50,10 +50,10 @@ class WalletBackupTest(BitcoinTestFramework):
# nodes 1, 2,3 are spenders, let's give them a keypool=100
# whitelist all peers to speed up tx relay / mempool sync
self.extra_args = [
- ["-keypool=100", "-whitelist=127.0.0.1"],
- ["-keypool=100", "-whitelist=127.0.0.1"],
- ["-keypool=100", "-whitelist=127.0.0.1"],
- ["-whitelist=127.0.0.1"]
+ ["-whitelist=noban@127.0.0.1", "-keypool=100"],
+ ["-whitelist=noban@127.0.0.1", "-keypool=100"],
+ ["-whitelist=noban@127.0.0.1", "-keypool=100"],
+ ["-whitelist=noban@127.0.0.1"],
]
self.rpc_timeout = 120
diff --git a/test/functional/wallet_bumpfee.py b/test/functional/wallet_bumpfee.py
index 4eb0d19a4f..38c9807757 100755
--- a/test/functional/wallet_bumpfee.py
+++ b/test/functional/wallet_bumpfee.py
@@ -30,6 +30,13 @@ from test_framework.util import (
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
+# Fee rates (in BTC per 1000 vbytes)
+INSUFFICIENT = 0.00001000
+ECONOMICAL = 0.00050000
+NORMAL = 0.00100000
+HIGH = 0.00500000
+TOO_HIGH = 1.00000000
+
class BumpFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
@@ -37,7 +44,6 @@ class BumpFeeTest(BitcoinTestFramework):
self.extra_args = [[
"-walletrbf={}".format(i),
"-mintxfee=0.00002",
- "-deprecatedrpc=totalFee",
"-addresstype=bech32",
] for i in range(self.num_nodes)]
@@ -71,34 +77,34 @@ class BumpFeeTest(BitcoinTestFramework):
test_simple_bumpfee_succeeds(self, "default", rbf_node, peer_node, dest_address)
test_simple_bumpfee_succeeds(self, "fee_rate", rbf_node, peer_node, dest_address)
test_feerate_args(self, rbf_node, peer_node, dest_address)
- test_segwit_bumpfee_succeeds(rbf_node, dest_address)
- test_nonrbf_bumpfee_fails(peer_node, dest_address)
- test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
- test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
- test_small_output_fails(rbf_node, dest_address)
- test_dust_to_fee(rbf_node, dest_address)
- test_settxfee(rbf_node, dest_address)
+ test_segwit_bumpfee_succeeds(self, rbf_node, dest_address)
+ test_nonrbf_bumpfee_fails(self, peer_node, dest_address)
+ test_notmine_bumpfee_fails(self, rbf_node, peer_node, dest_address)
+ test_bumpfee_with_descendant_fails(self, rbf_node, rbf_node_address, dest_address)
+ test_dust_to_fee(self, rbf_node, dest_address)
+ test_settxfee(self, rbf_node, dest_address)
test_watchonly_psbt(self, peer_node, rbf_node, dest_address)
- test_rebumping(rbf_node, dest_address)
- test_rebumping_not_replaceable(rbf_node, dest_address)
- test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
- test_bumpfee_metadata(rbf_node, dest_address)
- test_locked_wallet_fails(rbf_node, dest_address)
- test_change_script_match(rbf_node, dest_address)
+ test_rebumping(self, rbf_node, dest_address)
+ test_rebumping_not_replaceable(self, rbf_node, dest_address)
+ test_unconfirmed_not_spendable(self, rbf_node, rbf_node_address)
+ test_bumpfee_metadata(self, rbf_node, dest_address)
+ test_locked_wallet_fails(self, rbf_node, dest_address)
+ test_change_script_match(self, rbf_node, dest_address)
test_maxtxfee_fails(self, rbf_node, dest_address)
# These tests wipe out a number of utxos that are expected in other tests
- test_small_output_with_feerate_succeeds(rbf_node, dest_address)
- test_no_more_inputs_fails(rbf_node, dest_address)
+ test_small_output_with_feerate_succeeds(self, rbf_node, dest_address)
+ test_no_more_inputs_fails(self, rbf_node, dest_address)
self.log.info("Success")
def test_simple_bumpfee_succeeds(self, mode, rbf_node, peer_node, dest_address):
+ self.log.info('Test simple bumpfee: {}'.format(mode))
rbfid = spend_one_input(rbf_node, dest_address)
rbftx = rbf_node.gettransaction(rbfid)
self.sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
if mode == "fee_rate":
- bumped_tx = rbf_node.bumpfee(rbfid, {"fee_rate":0.0015})
+ bumped_tx = rbf_node.bumpfee(rbfid, {"fee_rate": NORMAL})
else:
bumped_tx = rbf_node.bumpfee(rbfid)
assert_equal(bumped_tx["errors"], [])
@@ -119,23 +125,25 @@ def test_simple_bumpfee_succeeds(self, mode, rbf_node, peer_node, dest_address):
assert_equal(bumpedwtx["replaces_txid"], rbfid)
def test_feerate_args(self, rbf_node, peer_node, dest_address):
+ self.log.info('Test fee_rate args')
rbfid = spend_one_input(rbf_node, dest_address)
self.sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
- assert_raises_rpc_error(-8, "confTarget can't be set with totalFee or fee_rate. Please provide either a confirmation target in blocks for automatic fee estimation, or an explicit fee rate.", rbf_node.bumpfee, rbfid, {"fee_rate":0.00001, "confTarget":1})
- assert_raises_rpc_error(-8, "confTarget can't be set with totalFee or fee_rate. Please provide either a confirmation target in blocks for automatic fee estimation, or an explicit fee rate.", rbf_node.bumpfee, rbfid, {"totalFee":0.00001, "confTarget":1})
- assert_raises_rpc_error(-8, "fee_rate can't be set along with totalFee.", rbf_node.bumpfee, rbfid, {"fee_rate":0.00001, "totalFee":0.001})
+ assert_raises_rpc_error(-8, "confTarget can't be set with fee_rate. Please provide either a confirmation target in blocks for automatic fee estimation, or an explicit fee rate.", rbf_node.bumpfee, rbfid, {"fee_rate": NORMAL, "confTarget": 1})
+
+ assert_raises_rpc_error(-3, "Unexpected key totalFee", rbf_node.bumpfee, rbfid, {"totalFee": NORMAL})
# Bumping to just above minrelay should fail to increase total fee enough, at least
- assert_raises_rpc_error(-8, "Insufficient total fee", rbf_node.bumpfee, rbfid, {"fee_rate":0.00001000})
+ assert_raises_rpc_error(-8, "Insufficient total fee", rbf_node.bumpfee, rbfid, {"fee_rate": INSUFFICIENT})
assert_raises_rpc_error(-3, "Amount out of range", rbf_node.bumpfee, rbfid, {"fee_rate":-1})
- assert_raises_rpc_error(-4, "is too high (cannot be higher than", rbf_node.bumpfee, rbfid, {"fee_rate":1})
+ assert_raises_rpc_error(-4, "is too high (cannot be higher than", rbf_node.bumpfee, rbfid, {"fee_rate": TOO_HIGH})
-def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
+def test_segwit_bumpfee_succeeds(self, rbf_node, dest_address):
+ self.log.info('Test that segwit-sourcing bumpfee works')
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
@@ -165,14 +173,14 @@ def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
assert rbfid not in rbf_node.getrawmempool()
-def test_nonrbf_bumpfee_fails(peer_node, dest_address):
- # cannot replace a non RBF transaction (from node which did not enable RBF)
+def test_nonrbf_bumpfee_fails(self, peer_node, dest_address):
+ self.log.info('Test that we cannot replace a non RBF transaction')
not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000"))
assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
-def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
- # cannot bump fee unless the tx has only inputs that we own.
+def test_notmine_bumpfee_fails(self, rbf_node, peer_node, dest_address):
+ self.log.info('Test that it cannot bump fee if non-owned inputs are included')
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
@@ -192,8 +200,8 @@ def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
rbf_node.bumpfee, rbfid)
-def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address):
- # cannot bump fee if the transaction has a descendant
+def test_bumpfee_with_descendant_fails(self, rbf_node, rbf_node_address, dest_address):
+ self.log.info('Test that fee cannot be bumped when it has descendant')
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = spend_one_input(rbf_node, rbf_node_address)
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000})
@@ -201,15 +209,8 @@ def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
rbf_node.sendrawtransaction(tx["hex"])
assert_raises_rpc_error(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
-def test_small_output_fails(rbf_node, dest_address):
- # cannot bump fee with a too-small output
- rbfid = spend_one_input(rbf_node, dest_address)
- rbf_node.bumpfee(rbfid, {"totalFee": 50000})
-
- rbfid = spend_one_input(rbf_node, dest_address)
- assert_raises_rpc_error(-4, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 50001})
-
-def test_small_output_with_feerate_succeeds(rbf_node, dest_address):
+def test_small_output_with_feerate_succeeds(self, rbf_node, dest_address):
+ self.log.info('Testing small output with feerate bump succeeds')
# Make sure additional inputs exist
rbf_node.generatetoaddress(101, rbf_node.getnewaddress())
@@ -217,9 +218,9 @@ def test_small_output_with_feerate_succeeds(rbf_node, dest_address):
input_list = rbf_node.getrawtransaction(rbfid, 1)["vin"]
assert_equal(len(input_list), 1)
original_txin = input_list[0]
- # Keep bumping until we out-spend change output
+ self.log.info('Keep bumping until transaction fee out-spends non-destination value')
tx_fee = 0
- while tx_fee < Decimal("0.0005"):
+ while True:
input_list = rbf_node.getrawtransaction(rbfid, 1)["vin"]
new_item = list(input_list)[0]
assert_equal(len(input_list), 1)
@@ -231,7 +232,11 @@ def test_small_output_with_feerate_succeeds(rbf_node, dest_address):
assert rbfid not in raw_pool
assert rbfid_new in raw_pool
rbfid = rbfid_new
- tx_fee = rbfid_new_details["origfee"]
+ tx_fee = rbfid_new_details["fee"]
+
+ # Total value from input not going to destination
+ if tx_fee > Decimal('0.00050000'):
+ break
# input(s) have been added
final_input_list = rbf_node.getrawtransaction(rbfid, 1)["vin"]
@@ -244,20 +249,25 @@ def test_small_output_with_feerate_succeeds(rbf_node, dest_address):
rbf_node.generatetoaddress(1, rbf_node.getnewaddress())
assert_equal(rbf_node.gettransaction(rbfid)["confirmations"], 1)
-def test_dust_to_fee(rbf_node, dest_address):
- # check that if output is reduced to dust, it will be converted to fee
- # the bumped tx sets fee=49,900, but it converts to 50,000
+def test_dust_to_fee(self, rbf_node, dest_address):
+ self.log.info('Test that bumped output that is dust is dropped to fee')
rbfid = spend_one_input(rbf_node, dest_address)
fulltx = rbf_node.getrawtransaction(rbfid, 1)
- # (31-vbyte p2wpkh output size + 67-vbyte p2wpkh spend estimate) * 10k(discard_rate) / 1000 = 980
- bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 50000 - 980})
+ # size of transaction (p2wpkh, 1 input, 2 outputs): 141 vbytes
+ assert_equal(fulltx["vsize"], 141)
+ # bump with fee_rate of 0.00350000 BTC per 1000 vbytes
+ # expected bump fee of 141 vbytes * fee_rate 0.00350000 BTC / 1000 vbytes = 0.00049350 BTC
+ # but dust is dropped, so actual bump fee is 0.00050000
+ bumped_tx = rbf_node.bumpfee(rbfid, {"fee_rate": 0.0035})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.00050000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) # change output is eliminated
+ assert_equal(full_bumped_tx["vout"][0]['value'], Decimal("0.00050000"))
-def test_settxfee(rbf_node, dest_address):
+def test_settxfee(self, rbf_node, dest_address):
+ self.log.info('Test settxfee')
assert_raises_rpc_error(-8, "txfee cannot be less than min relay tx fee", rbf_node.settxfee, Decimal('0.000005'))
assert_raises_rpc_error(-8, "txfee cannot be less than wallet min fee", rbf_node.settxfee, Decimal('0.000015'))
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
@@ -272,17 +282,20 @@ def test_settxfee(rbf_node, dest_address):
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
-def test_maxtxfee_fails(test, rbf_node, dest_address):
+def test_maxtxfee_fails(self, rbf_node, dest_address):
+ self.log.info('Test that bumpfee fails when it hits -matxfee')
# size of bumped transaction (p2wpkh, 1 input, 2 outputs): 141 vbytes
- # expected bumping feerate of 20 sats/vbyte => 141*20 sats = 0.00002820 btc
- test.restart_node(1, ['-maxtxfee=0.000025'] + test.extra_args[1])
+ # expected bump fee of 141 vbytes * 0.00200000 BTC / 1000 vbytes = 0.00002820 BTC
+ # which exceeds maxtxfee and is expected to raise
+ self.restart_node(1, ['-maxtxfee=0.000025'] + self.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Unable to create transaction: Fee exceeds maximum configured by -maxtxfee", rbf_node.bumpfee, rbfid)
- test.restart_node(1, test.extra_args[1])
+ self.restart_node(1, self.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
-def test_watchonly_psbt(test, peer_node, rbf_node, dest_address):
+def test_watchonly_psbt(self, peer_node, rbf_node, dest_address):
+ self.log.info('Test that PSBT is returned for bumpfee in watchonly wallets')
priv_rec_desc = "wpkh([00000001/84'/1'/0']tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0/*)#rweraev0"
pub_rec_desc = rbf_node.getdescriptorinfo(priv_rec_desc)["descriptor"]
priv_change_desc = "wpkh([00000001/84'/1'/0']tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/*)#j6uzqvuh"
@@ -334,7 +347,7 @@ def test_watchonly_psbt(test, peer_node, rbf_node, dest_address):
funding_address2 = watcher.getnewaddress(address_type='bech32')
peer_node.sendmany("", {funding_address1: 0.001, funding_address2: 0.001})
peer_node.generate(1)
- test.sync_all()
+ self.sync_all()
# Create single-input PSBT for transaction to be bumped
psbt = watcher.walletcreatefundedpsbt([], {dest_address:0.0005}, 0, {"feeRate": 0.00001}, True)['psbt']
@@ -344,7 +357,7 @@ def test_watchonly_psbt(test, peer_node, rbf_node, dest_address):
assert_equal(len(watcher.decodepsbt(psbt)["tx"]["vin"]), 1)
# Bump fee, obnoxiously high to add additional watchonly input
- bumped_psbt = watcher.bumpfee(original_txid, {"fee_rate":0.005})
+ bumped_psbt = watcher.bumpfee(original_txid, {"fee_rate": HIGH})
assert_greater_than(len(watcher.decodepsbt(bumped_psbt['psbt'])["tx"]["vin"]), 1)
assert "txid" not in bumped_psbt
assert_equal(bumped_psbt["origfee"], -watcher.gettransaction(original_txid)["fee"])
@@ -363,24 +376,24 @@ def test_watchonly_psbt(test, peer_node, rbf_node, dest_address):
rbf_node.unloadwallet("watcher")
rbf_node.unloadwallet("signer")
-def test_rebumping(rbf_node, dest_address):
- # check that re-bumping the original tx fails, but bumping the bumper succeeds
+def test_rebumping(self, rbf_node, dest_address):
+ self.log.info('Test that re-bumping the original tx fails, but bumping successor works')
rbfid = spend_one_input(rbf_node, dest_address)
- bumped = rbf_node.bumpfee(rbfid, {"totalFee": 2000})
- assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 3000})
- rbf_node.bumpfee(bumped["txid"], {"totalFee": 3000})
+ bumped = rbf_node.bumpfee(rbfid, {"fee_rate": ECONOMICAL})
+ assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"fee_rate": NORMAL})
+ rbf_node.bumpfee(bumped["txid"], {"fee_rate": NORMAL})
-def test_rebumping_not_replaceable(rbf_node, dest_address):
- # check that re-bumping a non-replaceable bump tx fails
+def test_rebumping_not_replaceable(self, rbf_node, dest_address):
+ self.log.info('Test that re-bumping non-replaceable fails')
rbfid = spend_one_input(rbf_node, dest_address)
- bumped = rbf_node.bumpfee(rbfid, {"totalFee": 10000, "replaceable": False})
+ bumped = rbf_node.bumpfee(rbfid, {"fee_rate": ECONOMICAL, "replaceable": False})
assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
- {"totalFee": 20000})
+ {"fee_rate": NORMAL})
-def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
- # check that unconfirmed outputs from bumped transactions are not spendable
+def test_unconfirmed_not_spendable(self, rbf_node, rbf_node_address):
+ self.log.info('Test that unconfirmed outputs from bumped txns are not spendable')
rbfid = spend_one_input(rbf_node, rbf_node_address)
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
@@ -418,7 +431,8 @@ def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
-def test_bumpfee_metadata(rbf_node, dest_address):
+def test_bumpfee_metadata(self, rbf_node, dest_address):
+ self.log.info('Test that bumped txn metadata persists to new txn record')
assert(rbf_node.getbalance() < 49)
rbf_node.generatetoaddress(101, rbf_node.getnewaddress())
rbfid = rbf_node.sendtoaddress(dest_address, 49, "comment value", "to value")
@@ -428,15 +442,17 @@ def test_bumpfee_metadata(rbf_node, dest_address):
assert_equal(bumped_wtx["to"], "to value")
-def test_locked_wallet_fails(rbf_node, dest_address):
+def test_locked_wallet_fails(self, rbf_node, dest_address):
+ self.log.info('Test that locked wallet cannot bump txn')
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
-def test_change_script_match(rbf_node, dest_address):
- """Test that the same change addresses is used for the replacement transaction when possible."""
+def test_change_script_match(self, rbf_node, dest_address):
+ self.log.info('Test that the same change addresses is used for the replacement transaction when possible')
+
def get_change_address(tx):
tx_details = rbf_node.getrawtransaction(tx, 1)
txout_addresses = [txout['scriptPubKey']['addresses'][0] for txout in tx_details["vout"]]
@@ -448,7 +464,7 @@ def test_change_script_match(rbf_node, dest_address):
assert_equal(len(change_addresses), 1)
# Now find that address in each subsequent tx, and no other change
- bumped_total_tx = rbf_node.bumpfee(rbfid, {"totalFee": 2000})
+ bumped_total_tx = rbf_node.bumpfee(rbfid, {"fee_rate": ECONOMICAL})
assert_equal(change_addresses, get_change_address(bumped_total_tx['txid']))
bumped_rate_tx = rbf_node.bumpfee(bumped_total_tx["txid"])
assert_equal(change_addresses, get_change_address(bumped_rate_tx['txid']))
@@ -480,12 +496,14 @@ def submit_block_with_tx(node, tx):
node.submitblock(block.serialize().hex())
return block
-def test_no_more_inputs_fails(rbf_node, dest_address):
+def test_no_more_inputs_fails(self, rbf_node, dest_address):
+ self.log.info('Test that bumpfee fails when there are no available confirmed outputs')
# feerate rbf requires confirmed outputs when change output doesn't exist or is insufficient
rbf_node.generatetoaddress(1, dest_address)
# spend all funds, no change output
rbfid = rbf_node.sendtoaddress(rbf_node.getnewaddress(), rbf_node.getbalance(), "", "", True)
assert_raises_rpc_error(-4, "Unable to create transaction: Insufficient funds", rbf_node.bumpfee, rbfid)
+
if __name__ == "__main__":
BumpFeeTest().main()
diff --git a/test/functional/wallet_bumpfee_totalfee_deprecation.py b/test/functional/wallet_bumpfee_totalfee_deprecation.py
deleted file mode 100755
index b8e097c32e..0000000000
--- a/test/functional/wallet_bumpfee_totalfee_deprecation.py
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) 2019 The Bitcoin Core developers
-# Distributed under the MIT software license, see the accompanying
-# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-"""Test deprecation of passing `totalFee` to the bumpfee RPC."""
-from decimal import Decimal
-
-from test_framework.messages import BIP125_SEQUENCE_NUMBER
-from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import assert_raises_rpc_error
-
-class BumpFeeWithTotalFeeArgumentDeprecationTest(BitcoinTestFramework):
- def set_test_params(self):
- self.num_nodes = 2
- self.extra_args = [[
- "-walletrbf={}".format(i),
- "-mintxfee=0.00002",
- ] for i in range(self.num_nodes)]
-
- def skip_test_if_missing_module(self):
- self.skip_if_no_wallet()
-
- def run_test(self):
- peer_node, rbf_node = self.nodes
- peer_node.generate(110)
- self.sync_all()
- peer_node.sendtoaddress(rbf_node.getnewaddress(), 0.001)
- self.sync_all()
- peer_node.generate(1)
- self.sync_all()
- rbfid = spend_one_input(rbf_node, peer_node.getnewaddress())
-
- self.log.info("Testing bumpfee with totalFee argument raises RPC error with deprecation message")
- assert_raises_rpc_error(
- -8,
- "totalFee argument has been deprecated and will be removed in 0.20. " +
- "Please use -deprecatedrpc=totalFee to continue using this argument until removal.",
- rbf_node.bumpfee, rbfid, {"totalFee": 2000})
-
- self.log.info("Testing bumpfee without totalFee argument does not raise")
- rbf_node.bumpfee(rbfid)
-
-def spend_one_input(node, dest_address, change_size=Decimal("0.00049000")):
- tx_input = dict(sequence=BIP125_SEQUENCE_NUMBER,
- **next(u for u in node.listunspent() if u["amount"] == Decimal("0.00100000")))
- destinations = {dest_address: Decimal("0.00050000")}
- destinations[node.getrawchangeaddress()] = change_size
- rawtx = node.createrawtransaction([tx_input], destinations)
- signedtx = node.signrawtransactionwithwallet(rawtx)
- txid = node.sendrawtransaction(signedtx["hex"])
- return txid
-
-if __name__ == "__main__":
- BumpFeeWithTotalFeeArgumentDeprecationTest().main()
diff --git a/test/functional/wallet_createwallet.py b/test/functional/wallet_createwallet.py
index 048b3127ff..b24d312e27 100755
--- a/test/functional/wallet_createwallet.py
+++ b/test/functional/wallet_createwallet.py
@@ -79,7 +79,7 @@ class CreateWalletTest(BitcoinTestFramework):
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getrawchangeaddress)
# Now set a seed and it should work. Wallet should also be encrypted
- w4.walletpassphrase('pass', 2)
+ w4.walletpassphrase('pass', 60)
w4.sethdseed()
w4.getnewaddress()
w4.getrawchangeaddress()
@@ -99,7 +99,7 @@ class CreateWalletTest(BitcoinTestFramework):
self.nodes[0].createwallet(wallet_name='wblank', disable_private_keys=False, blank=True, passphrase='thisisapassphrase')
wblank = node.get_wallet_rpc('wblank')
assert_raises_rpc_error(-13, "Error: Please enter the wallet passphrase with walletpassphrase first.", wblank.signmessage, "needanargument", "test")
- wblank.walletpassphrase('thisisapassphrase', 10)
+ wblank.walletpassphrase('thisisapassphrase', 60)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", wblank.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", wblank.getrawchangeaddress)
@@ -108,7 +108,7 @@ class CreateWalletTest(BitcoinTestFramework):
self.nodes[0].createwallet(wallet_name='w6', disable_private_keys=False, blank=False, passphrase='thisisapassphrase')
w6 = node.get_wallet_rpc('w6')
assert_raises_rpc_error(-13, "Error: Please enter the wallet passphrase with walletpassphrase first.", w6.signmessage, "needanargument", "test")
- w6.walletpassphrase('thisisapassphrase', 10)
+ w6.walletpassphrase('thisisapassphrase', 60)
w6.signmessage(w6.getnewaddress('', 'legacy'), "test")
w6.keypoolrefill(1)
# There should only be 1 key
@@ -119,12 +119,12 @@ class CreateWalletTest(BitcoinTestFramework):
resp = self.nodes[0].createwallet(wallet_name='w7', disable_private_keys=False, blank=False, passphrase='')
assert_equal(resp['warning'], 'Empty string given as passphrase, wallet will not be encrypted.')
w7 = node.get_wallet_rpc('w7')
- assert_raises_rpc_error(-15, 'Error: running with an unencrypted wallet, but walletpassphrase was called.', w7.walletpassphrase, '', 10)
+ assert_raises_rpc_error(-15, 'Error: running with an unencrypted wallet, but walletpassphrase was called.', w7.walletpassphrase, '', 60)
self.log.info('Test making a wallet with avoid reuse flag')
self.nodes[0].createwallet('w8', False, False, '', True) # Use positional arguments to check for bug where avoid_reuse could not be set for wallets without needing them to be encrypted
w8 = node.get_wallet_rpc('w8')
- assert_raises_rpc_error(-15, 'Error: running with an unencrypted wallet, but walletpassphrase was called.', w7.walletpassphrase, '', 10)
+ assert_raises_rpc_error(-15, 'Error: running with an unencrypted wallet, but walletpassphrase was called.', w7.walletpassphrase, '', 60)
assert_equal(w8.getwalletinfo()["avoid_reuse"], True)
self.log.info('Using a passphrase with private keys disabled returns error')
diff --git a/test/functional/wallet_groups.py b/test/functional/wallet_groups.py
index f2fa1d3e40..261a43472b 100755
--- a/test/functional/wallet_groups.py
+++ b/test/functional/wallet_groups.py
@@ -11,12 +11,13 @@ from test_framework.util import (
assert_equal,
)
+
class WalletGroupTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], [], ['-avoidpartialspends']]
- self.rpc_timeout = 240
+ self.rpc_timeout = 480
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
@@ -87,5 +88,6 @@ class WalletGroupTest(BitcoinTestFramework):
# is way too big.
assert self.nodes[2].sendtoaddress(address=addr2[0], amount=5)
+
if __name__ == '__main__':
- WalletGroupTest().main ()
+ WalletGroupTest().main()
diff --git a/test/functional/wallet_listsinceblock.py b/test/functional/wallet_listsinceblock.py
index 6f248c9bd3..229eda9806 100755
--- a/test/functional/wallet_listsinceblock.py
+++ b/test/functional/wallet_listsinceblock.py
@@ -2,7 +2,7 @@
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-"""Test the listsincelast RPC."""
+"""Test the listsinceblock RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.messages import BIP125_SEQUENCE_NUMBER
@@ -38,6 +38,7 @@ class ListSinceBlockTest(BitcoinTestFramework):
self.double_spends_filtered()
def test_no_blockhash(self):
+ self.log.info("Test no blockhash")
txid = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
blockhash, = self.nodes[2].generate(1)
blockheight = self.nodes[2].getblockheader(blockhash)['height']
@@ -63,6 +64,7 @@ class ListSinceBlockTest(BitcoinTestFramework):
"transactions": txs})
def test_invalid_blockhash(self):
+ self.log.info("Test invalid blockhash")
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
"42759cde25462784395a337460bde75f58e73d3f08bd31fdc3507cbac856a2c4")
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
@@ -100,6 +102,7 @@ class ListSinceBlockTest(BitcoinTestFramework):
This test only checks that [tx0] is present.
'''
+ self.log.info("Test reorg")
# Split network into two
self.split_network()
@@ -110,7 +113,7 @@ class ListSinceBlockTest(BitcoinTestFramework):
# generate on both sides
lastblockhash = self.nodes[1].generate(6)[5]
self.nodes[2].generate(7)
- self.log.info('lastblockhash=%s' % (lastblockhash))
+ self.log.debug('lastblockhash={}'.format(lastblockhash))
self.sync_all(self.nodes[:2])
self.sync_all(self.nodes[2:])
@@ -155,6 +158,7 @@ class ListSinceBlockTest(BitcoinTestFramework):
until the fork point, and to include all transactions that relate to the
node wallet.
'''
+ self.log.info("Test double spend")
self.sync_all()
@@ -234,6 +238,7 @@ class ListSinceBlockTest(BitcoinTestFramework):
3. It is listed with a confirmation count of 2 (bb3, bb4), not
3 (aa1, aa2, aa3).
'''
+ self.log.info("Test double send")
self.sync_all()
@@ -302,6 +307,7 @@ class ListSinceBlockTest(BitcoinTestFramework):
`listsinceblock` was returning conflicted transactions even if they
occurred before the specified cutoff blockhash
'''
+ self.log.info("Test spends filtered")
spending_node = self.nodes[2]
dest_address = spending_node.getnewaddress()
diff --git a/test/functional/wallet_multiwallet.py b/test/functional/wallet_multiwallet.py
index f2fa41b647..78ead514a5 100755
--- a/test/functional/wallet_multiwallet.py
+++ b/test/functional/wallet_multiwallet.py
@@ -24,6 +24,7 @@ class MultiWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
+ self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
diff --git a/test/functional/wallet_resendwallettransactions.py b/test/functional/wallet_resendwallettransactions.py
index 91d26e9cb3..d122e3db52 100755
--- a/test/functional/wallet_resendwallettransactions.py
+++ b/test/functional/wallet_resendwallettransactions.py
@@ -12,6 +12,7 @@ from test_framework.mininode import P2PInterface, mininode_lock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, wait_until
+
class P2PStoreTxInvs(P2PInterface):
def __init__(self):
super().__init__()
@@ -24,6 +25,7 @@ class P2PStoreTxInvs(P2PInterface):
# save txid
self.tx_invs_received[i.hash] += 1
+
class ResendWalletTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
@@ -63,6 +65,7 @@ class ResendWalletTransactionsTest(BitcoinTestFramework):
node.submitblock(ToHex(block))
# Transaction should not be rebroadcast
+ node.syncwithvalidationinterfacequeue()
node.p2ps[1].sync_with_ping()
assert_equal(node.p2ps[1].tx_invs_received[txid], 0)
@@ -72,5 +75,6 @@ class ResendWalletTransactionsTest(BitcoinTestFramework):
node.setmocktime(rebroadcast_time)
wait_until(lambda: node.p2ps[1].tx_invs_received[txid] >= 1, lock=mininode_lock)
+
if __name__ == '__main__':
ResendWalletTransactionsTest().main()
diff --git a/test/fuzz/test_runner.py b/test/fuzz/test_runner.py
index 50e86cf9dc..1786c39c36 100755
--- a/test/fuzz/test_runner.py
+++ b/test/fuzz/test_runner.py
@@ -12,43 +12,12 @@ import sys
import subprocess
import logging
-# Fuzzers known to lack a seed corpus in https://github.com/bitcoin-core/qa-assets/tree/master/fuzz_seed_corpus
-FUZZERS_MISSING_CORPORA = [
- "addr_info_deserialize",
- "asmap",
- "base_encode_decode",
- "block",
- "block_file_info_deserialize",
- "block_filter_deserialize",
- "block_header_and_short_txids_deserialize",
- "decode_tx",
- "fee_rate_deserialize",
- "flat_file_pos_deserialize",
- "hex",
- "integer",
- "key_origin_info_deserialize",
- "merkle_block_deserialize",
- "out_point_deserialize",
- "parse_hd_keypath",
- "parse_numbers",
- "parse_script",
- "parse_univalue",
- "partial_merkle_tree_deserialize",
- "partially_signed_transaction_deserialize",
- "prefilled_transaction_deserialize",
- "psbt_input_deserialize",
- "psbt_output_deserialize",
- "pub_key_deserialize",
- "script_deserialize",
- "strprintf",
- "sub_net_deserialize",
- "tx_in",
- "tx_in_deserialize",
- "tx_out",
-]
def main():
- parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ description='''Run the fuzz targets with all inputs from the seed_dir once.''',
+ )
parser.add_argument(
"-l",
"--loglevel",
@@ -57,9 +26,14 @@ def main():
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console.",
)
parser.add_argument(
- '--export_coverage',
+ '--valgrind',
action='store_true',
- help='If true, export coverage information to files in the seed corpus',
+ help='If true, run fuzzing binaries under the valgrind memory error detector',
+ )
+ parser.add_argument(
+ '-x',
+ '--exclude',
+ help="A comma-separated list of targets to exclude",
)
parser.add_argument(
'seed_dir',
@@ -70,6 +44,10 @@ def main():
nargs='*',
help='The target(s) to run. Default is to run all targets.',
)
+ parser.add_argument(
+ '--m_dir',
+ help='Merge inputs from this directory into the seed_dir. Needs /target subdirectory.',
+ )
args = parser.parse_args()
@@ -95,7 +73,7 @@ def main():
logging.error("No fuzz targets found")
sys.exit(1)
- logging.info("Fuzz targets found: {}".format(test_list_all))
+ logging.debug("{} fuzz target(s) found: {}".format(len(test_list_all), " ".join(sorted(test_list_all))))
args.target = args.target or test_list_all # By default run all
test_list_error = list(set(args.target).difference(set(test_list_all)))
@@ -104,7 +82,29 @@ def main():
test_list_selection = list(set(test_list_all).intersection(set(args.target)))
if not test_list_selection:
logging.error("No fuzz targets selected")
- logging.info("Fuzz targets selected: {}".format(test_list_selection))
+ if args.exclude:
+ for excluded_target in args.exclude.split(","):
+ if excluded_target not in test_list_selection:
+ logging.error("Target \"{}\" not found in current target list.".format(excluded_target))
+ continue
+ test_list_selection.remove(excluded_target)
+ test_list_selection.sort()
+
+ logging.info("{} of {} detected fuzz target(s) selected: {}".format(len(test_list_selection), len(test_list_all), " ".join(test_list_selection)))
+
+ test_list_seedless = []
+ for t in test_list_selection:
+ corpus_path = os.path.join(args.seed_dir, t)
+ if not os.path.exists(corpus_path) or len(os.listdir(corpus_path)) == 0:
+ test_list_seedless.append(t)
+ test_list_seedless.sort()
+ if test_list_seedless:
+ logging.info(
+ "Fuzzing harnesses lacking a seed corpus: {}".format(
+ " ".join(test_list_seedless)
+ )
+ )
+ logging.info("Please consider adding a fuzz seed corpus at https://github.com/bitcoin-core/qa-assets")
try:
help_output = subprocess.run(
@@ -112,7 +112,7 @@ def main():
os.path.join(config["environment"]["BUILDDIR"], 'src', 'test', 'fuzz', test_list_selection[0]),
'-help=1',
],
- timeout=10,
+ timeout=20,
check=True,
stderr=subprocess.PIPE,
universal_newlines=True,
@@ -124,37 +124,62 @@ def main():
logging.error("subprocess timed out: Currently only libFuzzer is supported")
sys.exit(1)
+ if args.m_dir:
+ merge_inputs(
+ corpus=args.seed_dir,
+ test_list=test_list_selection,
+ build_dir=config["environment"]["BUILDDIR"],
+ merge_dir=args.m_dir,
+ )
+
run_once(
corpus=args.seed_dir,
test_list=test_list_selection,
build_dir=config["environment"]["BUILDDIR"],
- export_coverage=args.export_coverage,
+ use_valgrind=args.valgrind,
)
-def run_once(*, corpus, test_list, build_dir, export_coverage):
+def merge_inputs(*, corpus, test_list, build_dir, merge_dir):
+ logging.info("Merge the inputs in the passed dir into the seed_dir. Passed dir {}".format(merge_dir))
+ for t in test_list:
+ args = [
+ os.path.join(build_dir, 'src', 'test', 'fuzz', t),
+ '-merge=1',
+ os.path.join(corpus, t),
+ os.path.join(merge_dir, t),
+ ]
+ os.makedirs(os.path.join(corpus, t), exist_ok=True)
+ os.makedirs(os.path.join(merge_dir, t), exist_ok=True)
+ logging.debug('Run {} with args {}'.format(t, args))
+ output = subprocess.run(args, check=True, stderr=subprocess.PIPE, universal_newlines=True).stderr
+ logging.debug('Output: {}'.format(output))
+
+
+def run_once(*, corpus, test_list, build_dir, use_valgrind):
for t in test_list:
corpus_path = os.path.join(corpus, t)
- if t in FUZZERS_MISSING_CORPORA:
- os.makedirs(corpus_path, exist_ok=True)
+ os.makedirs(corpus_path, exist_ok=True)
args = [
os.path.join(build_dir, 'src', 'test', 'fuzz', t),
'-runs=1',
- '-detect_leaks=0',
corpus_path,
]
+ if use_valgrind:
+ args = ['valgrind', '--quiet', '--error-exitcode=1'] + args
logging.debug('Run {} with args {}'.format(t, args))
result = subprocess.run(args, stderr=subprocess.PIPE, universal_newlines=True)
output = result.stderr
logging.debug('Output: {}'.format(output))
- result.check_returncode()
- if not export_coverage:
- continue
- for l in output.splitlines():
- if 'INITED' in l:
- with open(os.path.join(corpus, t + '_coverage'), 'w', encoding='utf-8') as cov_file:
- cov_file.write(l)
- break
+ try:
+ result.check_returncode()
+ except subprocess.CalledProcessError as e:
+ if e.stdout:
+ logging.info(e.stdout)
+ if e.stderr:
+ logging.info(e.stderr)
+ logging.info("Target \"{}\" failed with exit code {}: {}".format(t, e.returncode, " ".join(args)))
+ sys.exit(1)
def parse_test_list(makefile):
diff --git a/test/lint/README.md b/test/lint/README.md
index f415d619ee..6b95cc3540 100644
--- a/test/lint/README.md
+++ b/test/lint/README.md
@@ -21,6 +21,7 @@ maintained:
* for `src/leveldb`: https://github.com/bitcoin-core/leveldb.git (branch bitcoin-fork)
* for `src/univalue`: https://github.com/bitcoin-core/univalue.git (branch master)
* for `src/crypto/ctaes`: https://github.com/bitcoin-core/ctaes.git (branch master)
+* for `src/crc32c`: https://github.com/google/crc32c.git (branch master)
Usage: `git-subtree-check.sh DIR (COMMIT)`
diff --git a/test/lint/extended-lint-cppcheck.sh b/test/lint/extended-lint-cppcheck.sh
index 47df25ba6b..ae18d74ebf 100755
--- a/test/lint/extended-lint-cppcheck.sh
+++ b/test/lint/extended-lint-cppcheck.sh
@@ -65,8 +65,8 @@ function join_array {
ENABLED_CHECKS_REGEXP=$(join_array "|" "${ENABLED_CHECKS[@]}")
IGNORED_WARNINGS_REGEXP=$(join_array "|" "${IGNORED_WARNINGS[@]}")
-WARNINGS=$(git ls-files -- "*.cpp" "*.h" ":(exclude)src/leveldb/" ":(exclude)src/secp256k1/" ":(exclude)src/univalue/" | \
- xargs cppcheck --enable=all -j "$(getconf _NPROCESSORS_ONLN)" --language=c++ --std=c++11 --template=gcc -D__cplusplus -DCLIENT_VERSION_BUILD -DCLIENT_VERSION_IS_RELEASE -DCLIENT_VERSION_MAJOR -DCLIENT_VERSION_MINOR -DCLIENT_VERSION_REVISION -DCOPYRIGHT_YEAR -DDEBUG -DHAVE_WORKING_BOOST_SLEEP_FOR -I src/ -q 2>&1 | sort -u | \
+WARNINGS=$(git ls-files -- "*.cpp" "*.h" ":(exclude)src/leveldb/" ":(exclude)src/crc32c/" ":(exclude)src/secp256k1/" ":(exclude)src/univalue/" | \
+ xargs cppcheck --enable=all -j "$(getconf _NPROCESSORS_ONLN)" --language=c++ --std=c++11 --template=gcc -D__cplusplus -DCLIENT_VERSION_BUILD -DCLIENT_VERSION_IS_RELEASE -DCLIENT_VERSION_MAJOR -DCLIENT_VERSION_MINOR -DCLIENT_VERSION_REVISION -DCOPYRIGHT_YEAR -DDEBUG -I src/ -q 2>&1 | sort -u | \
grep -E "${ENABLED_CHECKS_REGEXP}" | \
grep -vE "${IGNORED_WARNINGS_REGEXP}")
if [[ ${WARNINGS} != "" ]]; then
diff --git a/test/lint/lint-include-guards.sh b/test/lint/lint-include-guards.sh
index 2d3beaf582..3a0494c190 100755
--- a/test/lint/lint-include-guards.sh
+++ b/test/lint/lint-include-guards.sh
@@ -10,7 +10,7 @@ export LC_ALL=C
HEADER_ID_PREFIX="BITCOIN_"
HEADER_ID_SUFFIX="_H"
-REGEXP_EXCLUDE_FILES_WITH_PREFIX="src/(crypto/ctaes/|leveldb/|secp256k1/|test/fuzz/FuzzedDataProvider.h|tinyformat.h|univalue/)"
+REGEXP_EXCLUDE_FILES_WITH_PREFIX="src/(crypto/ctaes/|leveldb/|crc32c/|secp256k1/|test/fuzz/FuzzedDataProvider.h|tinyformat.h|univalue/)"
EXIT_CODE=0
for HEADER_FILE in $(git ls-files -- "*.h" | grep -vE "^${REGEXP_EXCLUDE_FILES_WITH_PREFIX}")
diff --git a/test/lint/lint-includes.sh b/test/lint/lint-includes.sh
index bb2bd4e56c..1cece6a525 100755
--- a/test/lint/lint-includes.sh
+++ b/test/lint/lint-includes.sh
@@ -9,7 +9,7 @@
# Check includes: Check for duplicate includes. Enforce bracket syntax includes.
export LC_ALL=C
-IGNORE_REGEXP="/(leveldb|secp256k1|univalue)/"
+IGNORE_REGEXP="/(leveldb|secp256k1|univalue|crc32c)/"
# cd to root folder of git repo for git ls-files to work properly
cd "$(dirname $0)/../.." || exit 1
@@ -53,7 +53,6 @@ EXPECTED_BOOST_INCLUDES=(
boost/algorithm/string/classification.hpp
boost/algorithm/string/replace.hpp
boost/algorithm/string/split.hpp
- boost/chrono/chrono.hpp
boost/date_time/posix_time/posix_time.hpp
boost/filesystem.hpp
boost/filesystem/fstream.hpp
diff --git a/test/lint/lint-locale-dependence.sh b/test/lint/lint-locale-dependence.sh
index 35e58c2df6..70410d7405 100755
--- a/test/lint/lint-locale-dependence.sh
+++ b/test/lint/lint-locale-dependence.sh
@@ -6,30 +6,16 @@
export LC_ALL=C
KNOWN_VIOLATIONS=(
"src/bitcoin-tx.cpp.*stoul"
- "src/bitcoin-tx.cpp.*std::to_string"
"src/bitcoin-tx.cpp.*trim_right"
"src/dbwrapper.cpp.*stoul"
"src/dbwrapper.cpp:.*vsnprintf"
"src/httprpc.cpp.*trim"
"src/init.cpp:.*atoi"
- "src/qt/optionsmodel.cpp.*std::to_string"
"src/qt/rpcconsole.cpp:.*atoi"
"src/rest.cpp:.*strtol"
- "src/rpc/net.cpp.*std::to_string"
- "src/rpc/rawtransaction.cpp.*std::to_string"
- "src/rpc/util.cpp.*std::to_string"
- "src/test/addrman_tests.cpp.*std::to_string"
- "src/test/blockchain_tests.cpp.*std::to_string"
"src/test/dbwrapper_tests.cpp:.*snprintf"
- "src/test/denialofservice_tests.cpp.*std::to_string"
+ "src/test/fuzz/locale.cpp"
"src/test/fuzz/parse_numbers.cpp:.*atoi"
- "src/test/key_tests.cpp.*std::to_string"
- "src/test/net_tests.cpp.*std::to_string"
- "src/test/settings_tests.cpp.*std::to_string"
- "src/test/timedata_tests.cpp.*std::to_string"
- "src/test/util/setup_common.cpp.*std::to_string"
- "src/test/util_tests.cpp.*std::to_string"
- "src/test/util_threadnames_tests.cpp.*std::to_string"
"src/torcontrol.cpp:.*atoi"
"src/torcontrol.cpp:.*strtol"
"src/util/strencodings.cpp:.*atoi"
@@ -37,7 +23,6 @@ KNOWN_VIOLATIONS=(
"src/util/strencodings.cpp:.*strtoul"
"src/util/strencodings.h:.*atoi"
"src/util/system.cpp:.*atoi"
- "src/wallet/scriptpubkeyman.cpp.*std::to_string"
)
REGEXP_IGNORE_EXTERNAL_DEPENDENCIES="^src/(crypto/ctaes/|leveldb/|secp256k1/|tinyformat.h|univalue/)"
diff --git a/test/lint/lint-python-utf8-encoding.sh b/test/lint/lint-python-utf8-encoding.sh
index d03c20205d..773855bed1 100755
--- a/test/lint/lint-python-utf8-encoding.sh
+++ b/test/lint/lint-python-utf8-encoding.sh
@@ -9,7 +9,7 @@
export LC_ALL=C
EXIT_CODE=0
-OUTPUT=$(git grep " open(" -- "*.py" | grep -vE "encoding=.(ascii|utf8|utf-8)." | grep -vE "open\([^,]*, ['\"][^'\"]*b[^'\"]*['\"]")
+OUTPUT=$(git grep " open(" -- "*.py" ":(exclude)src/crc32c/" | grep -vE "encoding=.(ascii|utf8|utf-8)." | grep -vE "open\([^,]*, ['\"][^'\"]*b[^'\"]*['\"]")
if [[ ${OUTPUT} != "" ]]; then
echo "Python's open(...) seems to be used to open text files without explicitly"
echo "specifying encoding=\"utf8\":"
@@ -17,7 +17,7 @@ if [[ ${OUTPUT} != "" ]]; then
echo "${OUTPUT}"
EXIT_CODE=1
fi
-OUTPUT=$(git grep "check_output(" -- "*.py" | grep "universal_newlines=True" | grep -vE "encoding=.(ascii|utf8|utf-8).")
+OUTPUT=$(git grep "check_output(" -- "*.py" ":(exclude)src/crc32c/"| grep "universal_newlines=True" | grep -vE "encoding=.(ascii|utf8|utf-8).")
if [[ ${OUTPUT} != "" ]]; then
echo "Python's check_output(...) seems to be used to get program outputs without explicitly"
echo "specifying encoding=\"utf8\":"
diff --git a/test/lint/lint-shell.sh b/test/lint/lint-shell.sh
index 63624e3ae0..f59b2c9945 100755
--- a/test/lint/lint-shell.sh
+++ b/test/lint/lint-shell.sh
@@ -41,7 +41,7 @@ if ! shellcheck "$EXCLUDE" $(git ls-files -- '*.sh' | grep -vE 'src/(leveldb|sec
fi
if ! command -v yq > /dev/null; then
- echo "Skipping Gitian desriptor scripts checking since yq is not installed."
+ echo "Skipping Gitian descriptor scripts checking since yq is not installed."
exit $EXIT_CODE
fi
diff --git a/test/lint/lint-spelling.ignore-words.txt b/test/lint/lint-spelling.ignore-words.txt
index 576ae94098..a7a97eb41f 100644
--- a/test/lint/lint-spelling.ignore-words.txt
+++ b/test/lint/lint-spelling.ignore-words.txt
@@ -12,3 +12,5 @@ keyserver
homogenous
setban
hist
+ser
+unselect
diff --git a/test/lint/lint-spelling.sh b/test/lint/lint-spelling.sh
index c7a3d0de44..cb84727ba5 100755
--- a/test/lint/lint-spelling.sh
+++ b/test/lint/lint-spelling.sh
@@ -15,6 +15,6 @@ if ! command -v codespell > /dev/null; then
fi
IGNORE_WORDS_FILE=test/lint/lint-spelling.ignore-words.txt
-if ! codespell --check-filenames --disable-colors --quiet-level=7 --ignore-words=${IGNORE_WORDS_FILE} $(git ls-files -- ":(exclude)build-aux/m4/" ":(exclude)contrib/seeds/*.txt" ":(exclude)depends/" ":(exclude)doc/release-notes/" ":(exclude)src/leveldb/" ":(exclude)src/qt/locale/" ":(exclude)src/qt/*.qrc" ":(exclude)src/secp256k1/" ":(exclude)src/univalue/"); then
+if ! codespell --check-filenames --disable-colors --quiet-level=7 --ignore-words=${IGNORE_WORDS_FILE} $(git ls-files -- ":(exclude)build-aux/m4/" ":(exclude)contrib/seeds/*.txt" ":(exclude)depends/" ":(exclude)doc/release-notes/" ":(exclude)src/leveldb/" ":(exclude)src/crc32c/" ":(exclude)src/qt/locale/" ":(exclude)src/qt/*.qrc" ":(exclude)src/secp256k1/" ":(exclude)src/univalue/"); then
echo "^ Warning: codespell identified likely spelling errors. Any false positives? Add them to the list of ignored words in ${IGNORE_WORDS_FILE}"
fi
diff --git a/test/lint/lint-submodule.sh b/test/lint/lint-submodule.sh
new file mode 100755
index 0000000000..d9aa021df7
--- /dev/null
+++ b/test/lint/lint-submodule.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2020 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#
+# This script checks for git modules
+export LC_ALL=C
+EXIT_CODE=0
+
+CMD=$(git submodule status --recursive)
+if test -n "$CMD";
+then
+ echo These submodules were found, delete them:
+ echo "$CMD"
+ EXIT_CODE=1
+fi
+
+exit $EXIT_CODE
+
diff --git a/test/lint/lint-whitespace.sh b/test/lint/lint-whitespace.sh
index 861faf8516..d8bdb0a8d7 100755
--- a/test/lint/lint-whitespace.sh
+++ b/test/lint/lint-whitespace.sh
@@ -31,14 +31,14 @@ if [ -z "${TRAVIS_COMMIT_RANGE}" ]; then
fi
showdiff() {
- if ! git diff -U0 "${TRAVIS_COMMIT_RANGE}" -- "." ":(exclude)depends/patches/" ":(exclude)src/leveldb/" ":(exclude)src/secp256k1/" ":(exclude)src/univalue/" ":(exclude)doc/release-notes/" ":(exclude)src/qt/locale/"; then
+ if ! git diff -U0 "${TRAVIS_COMMIT_RANGE}" -- "." ":(exclude)depends/patches/" ":(exclude)src/leveldb/" ":(exclude)src/crc32c/" ":(exclude)src/secp256k1/" ":(exclude)src/univalue/" ":(exclude)doc/release-notes/" ":(exclude)src/qt/locale/"; then
echo "Failed to get a diff"
exit 1
fi
}
showcodediff() {
- if ! git diff -U0 "${TRAVIS_COMMIT_RANGE}" -- *.cpp *.h *.md *.py *.sh ":(exclude)src/leveldb/" ":(exclude)src/secp256k1/" ":(exclude)src/univalue/" ":(exclude)doc/release-notes/" ":(exclude)src/qt/locale/"; then
+ if ! git diff -U0 "${TRAVIS_COMMIT_RANGE}" -- *.cpp *.h *.md *.py *.sh ":(exclude)src/leveldb/" ":(exclude)src/crc32c/" ":(exclude)src/secp256k1/" ":(exclude)src/univalue/" ":(exclude)doc/release-notes/" ":(exclude)src/qt/locale/"; then
echo "Failed to get a diff"
exit 1
fi
diff --git a/test/sanitizer_suppressions/ubsan b/test/sanitizer_suppressions/ubsan
index f5de358bcb..b3d9b9e6ec 100644
--- a/test/sanitizer_suppressions/ubsan
+++ b/test/sanitizer_suppressions/ubsan
@@ -1,7 +1,5 @@
# -fsanitize=undefined suppressions
# =================================
-alignment:move.h
-alignment:prevector.h
float-divide-by-zero:policy/fees.cpp
float-divide-by-zero:validation.cpp
float-divide-by-zero:wallet/wallet.cpp
@@ -84,3 +82,4 @@ implicit-signed-integer-truncation:test/skiplist_tests.cpp
implicit-signed-integer-truncation:torcontrol.cpp
implicit-unsigned-integer-truncation:crypto/*
implicit-unsigned-integer-truncation:leveldb/*
+implicit-integer-sign-change:crc32c/*
diff --git a/test/util/data/bitcoin-util-test.json b/test/util/data/bitcoin-util-test.json
index 761923a818..99cd4ab695 100644
--- a/test/util/data/bitcoin-util-test.json
+++ b/test/util/data/bitcoin-util-test.json
@@ -219,6 +219,12 @@
"description": "Parses a transaction with no inputs and a single output script (output in json)"
},
{ "exec": "./bitcoin-tx",
+ "args": ["-create", "outscript=0:123badscript"],
+ "return_code": 1,
+ "error_txt": "error: script parse error",
+ "description": "Create a new transaction with an invalid output script"
+ },
+ { "exec": "./bitcoin-tx",
"args": ["-create", "outscript=0:OP_DROP", "nversion=1"],
"output_cmp": "txcreatescript1.hex",
"description": "Create a new transaction with a single output script (OP_DROP)"
@@ -259,6 +265,40 @@
"description": "Create a new transaction with a single output script (OP_DROP) in a P2SH, wrapped in a P2SH (output as json)"
},
{ "exec": "./bitcoin-tx",
+ "args": ["-create", "outscript=0:9999999999"],
+ "return_code": 1,
+ "error_txt": "error: script parse error: decimal numeric value only allowed in the range -0xFFFFFFFF...0xFFFFFFFF",
+ "description": "Try to parse an output script with a decimal number above the allowed range"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-create", "outscript=0:4294967296"],
+ "return_code": 1,
+ "error_txt": "error: script parse error: decimal numeric value only allowed in the range -0xFFFFFFFF...0xFFFFFFFF",
+ "description": "Try to parse an output script with a decimal number just above the allowed range"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-create", "outscript=0:4294967295"],
+ "output_cmp": "txcreatescript5.hex",
+ "description": "Try to parse an output script with a decimal number at the upper limit of the allowed range"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-create", "outscript=0:-9999999999"],
+ "return_code": 1,
+ "error_txt": "error: script parse error: decimal numeric value only allowed in the range -0xFFFFFFFF...0xFFFFFFFF",
+ "description": "Try to parse an output script with a decimal number below the allowed range"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-create", "outscript=0:-4294967296"],
+ "return_code": 1,
+ "error_txt": "error: script parse error: decimal numeric value only allowed in the range -0xFFFFFFFF...0xFFFFFFFF",
+ "description": "Try to parse an output script with a decimal number just below the allowed range"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-create", "outscript=0:-4294967295"],
+ "output_cmp": "txcreatescript6.hex",
+ "description": "Try to parse an output script with a decimal number at the lower limit of the allowed range"
+ },
+ { "exec": "./bitcoin-tx",
"args":
["-create", "nversion=1",
"in=4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485:0",
diff --git a/test/util/data/txcreatescript5.hex b/test/util/data/txcreatescript5.hex
new file mode 100644
index 0000000000..48e0a12b0c
--- /dev/null
+++ b/test/util/data/txcreatescript5.hex
@@ -0,0 +1 @@
+02000000000100000000000000000605ffffffff0000000000
diff --git a/test/util/data/txcreatescript6.hex b/test/util/data/txcreatescript6.hex
new file mode 100644
index 0000000000..b98293813d
--- /dev/null
+++ b/test/util/data/txcreatescript6.hex
@@ -0,0 +1 @@
+02000000000100000000000000000605ffffffff8000000000