diff options
Diffstat (limited to 'src')
278 files changed, 13103 insertions, 8831 deletions
diff --git a/src/Makefile.am b/src/Makefile.am index e58a89ca03..6edd5e75b7 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -718,6 +718,7 @@ if HARDEN endif if EMBEDDED_LEVELDB +include Makefile.crc32c.include include Makefile.leveldb.include endif diff --git a/src/Makefile.crc32c.include b/src/Makefile.crc32c.include new file mode 100644 index 0000000000..802b3a2e4b --- /dev/null +++ b/src/Makefile.crc32c.include @@ -0,0 +1,75 @@ +# Copyright (c) 2019 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +LIBCRC32C_INT = crc32c/libcrc32c.a +LIBLEVELDB_SSE42_INT = leveldb/libleveldb_sse42.a + +EXTRA_LIBRARIES += $(LIBCRC32C_INT) + +LIBCRC32C = $(LIBCRC32C_INT) + +CRC32C_CPPFLAGS_INT = +CRC32C_CPPFLAGS_INT += -I$(srcdir)/crc32c/include +CRC32C_CPPFLAGS_INT += -DHAVE_BUILTIN_PREFETCH=@HAVE_BUILTIN_PREFETCH@ +CRC32C_CPPFLAGS_INT += -DHAVE_MM_PREFETCH=@HAVE_MM_PREFETCH@ +CRC32C_CPPFLAGS_INT += -DHAVE_STRONG_GETAUXVAL=@HAVE_STRONG_GETAUXVAL@ +CRC32C_CPPFLAGS_INT += -DHAVE_WEAK_GETAUXVAL=@HAVE_WEAK_GETAUXVAL@ +CRC32C_CPPFLAGS_INT += -DCRC32C_TESTS_BUILT_WITH_GLOG=0 + +if ENABLE_SSE42 +CRC32C_CPPFLAGS_INT += -DHAVE_SSE42=1 +else +CRC32C_CPPFLAGS_INT += -DHAVE_SSE42=0 +endif + +if ENABLE_ARM_CRC +CRC32C_CPPFLAGS_INT += -DHAVE_ARM64_CRC32C=1 +else +CRC32C_CPPFLAGS_INT += -DHAVE_ARM64_CRC32C=0 +endif + +if WORDS_BIGENDIAN +CRC32C_CPPFLAGS_INT += -DBYTE_ORDER_BIG_ENDIAN=1 +else +CRC32C_CPPFLAGS_INT += -DBYTE_ORDER_BIG_ENDIAN=0 +endif + +crc32c_libcrc32c_a_CPPFLAGS = $(AM_CPPFLAGS) $(CRC32C_CPPFLAGS_INT) $(CRC32C_CPPFLAGS) +crc32c_libcrc32c_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) + +crc32c_libcrc32c_a_SOURCES = +crc32c_libcrc32c_a_SOURCES += crc32c/include/crc32c/crc32c.h +crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_arm64.h +crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_arm64_linux_check.h +crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_internal.h +crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_prefetch.h +crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_read_le.h +crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_round_up.h +crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_sse42_check.h +crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_sse42.h + +crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c.cc +crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_portable.cc + +if ENABLE_SSE42 +LIBCRC32C_SSE42_INT = crc32c/libcrc32c_sse42.a +EXTRA_LIBRARIES += $(LIBCRC32C_SSE42_INT) +LIBCRC32C += $(LIBCRC32C_SSE42_INT) + +crc32c_libcrc32c_sse42_a_CPPFLAGS = $(crc32c_libcrc32c_a_CPPFLAGS) +crc32c_libcrc32c_sse42_a_CXXFLAGS = $(crc32c_libcrc32c_a_CXXFLAGS) $(SSE42_CXXFLAGS) + +crc32c_libcrc32c_sse42_a_SOURCES = crc32c/src/crc32c_sse42.cc +endif + +if ENABLE_ARM_CRC +LIBCRC32C_ARM_CRC_INT = crc32c/libcrc32c_arm_crc.a +EXTRA_LIBRARIES += $(LIBCRC32C_ARM_CRC_INT) +LIBCRC32C += $(LIBCRC32C_ARM_CRC_INT) + +crc32c_libcrc32c_arm_crc_a_CPPFLAGS = $(crc32c_libcrc32c_a_CPPFLAGS) +crc32c_libcrc32c_arm_crc_a_CXXFLAGS = $(crc32c_libcrc32c_a_CXXFLAGS) $(ARM_CRC_CXXFLAGS) + +crc32c_libcrc32c_arm_crc_a_SOURCES = crc32c/src/crc32c_arm64.cc +endif diff --git a/src/Makefile.leveldb.include b/src/Makefile.leveldb.include index bd08bcb4ed..04b53471e4 100644 --- a/src/Makefile.leveldb.include +++ b/src/Makefile.leveldb.include @@ -4,27 +4,33 @@ LIBLEVELDB_INT = leveldb/libleveldb.a LIBMEMENV_INT = leveldb/libmemenv.a -LIBLEVELDB_SSE42_INT = leveldb/libleveldb_sse42.a EXTRA_LIBRARIES += $(LIBLEVELDB_INT) EXTRA_LIBRARIES += $(LIBMEMENV_INT) -EXTRA_LIBRARIES += $(LIBLEVELDB_SSE42_INT) -LIBLEVELDB += $(LIBLEVELDB_INT) +LIBLEVELDB += $(LIBLEVELDB_INT) $(LIBCRC32C) LIBMEMENV += $(LIBMEMENV_INT) -LIBLEVELDB_SSE42 = $(LIBLEVELDB_SSE42_INT) LEVELDB_CPPFLAGS += -I$(srcdir)/leveldb/include LEVELDB_CPPFLAGS += -I$(srcdir)/leveldb/helpers/memenv LEVELDB_CPPFLAGS_INT = LEVELDB_CPPFLAGS_INT += -I$(srcdir)/leveldb -LEVELDB_CPPFLAGS_INT += $(LEVELDB_TARGET_FLAGS) -LEVELDB_CPPFLAGS_INT += -DLEVELDB_ATOMIC_PRESENT +LEVELDB_CPPFLAGS_INT += -I$(srcdir)/crc32c/include LEVELDB_CPPFLAGS_INT += -D__STDC_LIMIT_MACROS +LEVELDB_CPPFLAGS_INT += -DHAVE_SNAPPY=0 -DHAVE_CRC32C=1 +LEVELDB_CPPFLAGS_INT += -DHAVE_FDATASYNC=@HAVE_FDATASYNC@ +LEVELDB_CPPFLAGS_INT += -DHAVE_FULLFSYNC=@HAVE_FULLFSYNC@ +LEVELDB_CPPFLAGS_INT += -DHAVE_O_CLOEXEC=@HAVE_O_CLOEXEC@ + +if WORDS_BIGENDIAN +LEVELDB_CPPFLAGS_INT += -DLEVELDB_IS_BIG_ENDIAN=1 +else +LEVELDB_CPPFLAGS_INT += -DLEVELDB_IS_BIG_ENDIAN=0 +endif if TARGET_WINDOWS -LEVELDB_CPPFLAGS_INT += -DLEVELDB_PLATFORM_WINDOWS -D__USE_MINGW_ANSI_STDIO=1 +LEVELDB_CPPFLAGS_INT += -DLEVELDB_PLATFORM_WINDOWS -D_UNICODE -DUNICODE -D__USE_MINGW_ANSI_STDIO=1 else LEVELDB_CPPFLAGS_INT += -DLEVELDB_PLATFORM_POSIX endif @@ -33,12 +39,8 @@ leveldb_libleveldb_a_CPPFLAGS = $(AM_CPPFLAGS) $(LEVELDB_CPPFLAGS_INT) $(LEVELDB leveldb_libleveldb_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) leveldb_libleveldb_a_SOURCES= -leveldb_libleveldb_a_SOURCES += leveldb/port/atomic_pointer.h -leveldb_libleveldb_a_SOURCES += leveldb/port/port_example.h -leveldb_libleveldb_a_SOURCES += leveldb/port/port_posix.h -leveldb_libleveldb_a_SOURCES += leveldb/port/win/stdint.h +leveldb_libleveldb_a_SOURCES += leveldb/port/port_stdcxx.h leveldb_libleveldb_a_SOURCES += leveldb/port/port.h -leveldb_libleveldb_a_SOURCES += leveldb/port/port_win.h leveldb_libleveldb_a_SOURCES += leveldb/port/thread_annotations.h leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/db.h leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/options.h @@ -47,6 +49,7 @@ leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/filter_policy.h leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/slice.h leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/table_builder.h leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/env.h +leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/export.h leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/c.h leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/iterator.h leveldb_libleveldb_a_SOURCES += leveldb/include/leveldb/cache.h @@ -78,6 +81,7 @@ leveldb_libleveldb_a_SOURCES += leveldb/table/format.h leveldb_libleveldb_a_SOURCES += leveldb/table/iterator_wrapper.h leveldb_libleveldb_a_SOURCES += leveldb/util/crc32c.h leveldb_libleveldb_a_SOURCES += leveldb/util/env_posix_test_helper.h +leveldb_libleveldb_a_SOURCES += leveldb/util/env_windows_test_helper.h leveldb_libleveldb_a_SOURCES += leveldb/util/arena.h leveldb_libleveldb_a_SOURCES += leveldb/util/random.h leveldb_libleveldb_a_SOURCES += leveldb/util/posix_logger.h @@ -87,7 +91,9 @@ leveldb_libleveldb_a_SOURCES += leveldb/util/coding.h leveldb_libleveldb_a_SOURCES += leveldb/util/testutil.h leveldb_libleveldb_a_SOURCES += leveldb/util/mutexlock.h leveldb_libleveldb_a_SOURCES += leveldb/util/logging.h +leveldb_libleveldb_a_SOURCES += leveldb/util/no_destructor.h leveldb_libleveldb_a_SOURCES += leveldb/util/testharness.h +leveldb_libleveldb_a_SOURCES += leveldb/util/windows_logger.h leveldb_libleveldb_a_SOURCES += leveldb/db/builder.cc leveldb_libleveldb_a_SOURCES += leveldb/db/c.cc @@ -120,7 +126,6 @@ leveldb_libleveldb_a_SOURCES += leveldb/util/coding.cc leveldb_libleveldb_a_SOURCES += leveldb/util/comparator.cc leveldb_libleveldb_a_SOURCES += leveldb/util/crc32c.cc leveldb_libleveldb_a_SOURCES += leveldb/util/env.cc -leveldb_libleveldb_a_SOURCES += leveldb/util/env_posix.cc leveldb_libleveldb_a_SOURCES += leveldb/util/filter_policy.cc leveldb_libleveldb_a_SOURCES += leveldb/util/hash.cc leveldb_libleveldb_a_SOURCES += leveldb/util/histogram.cc @@ -129,21 +134,12 @@ leveldb_libleveldb_a_SOURCES += leveldb/util/options.cc leveldb_libleveldb_a_SOURCES += leveldb/util/status.cc if TARGET_WINDOWS -leveldb_libleveldb_a_SOURCES += leveldb/util/env_win.cc -leveldb_libleveldb_a_SOURCES += leveldb/port/port_win.cc +leveldb_libleveldb_a_SOURCES += leveldb/util/env_windows.cc else -leveldb_libleveldb_a_SOURCES += leveldb/port/port_posix.cc +leveldb_libleveldb_a_SOURCES += leveldb/util/env_posix.cc endif leveldb_libmemenv_a_CPPFLAGS = $(leveldb_libleveldb_a_CPPFLAGS) leveldb_libmemenv_a_CXXFLAGS = $(leveldb_libleveldb_a_CXXFLAGS) leveldb_libmemenv_a_SOURCES = leveldb/helpers/memenv/memenv.cc leveldb_libmemenv_a_SOURCES += leveldb/helpers/memenv/memenv.h - -leveldb_libleveldb_sse42_a_CPPFLAGS = $(leveldb_libleveldb_a_CPPFLAGS) -leveldb_libleveldb_sse42_a_CXXFLAGS = $(leveldb_libleveldb_a_CXXFLAGS) -if ENABLE_HWCRC32 -leveldb_libleveldb_sse42_a_CPPFLAGS += -DLEVELDB_PLATFORM_POSIX_SSE -leveldb_libleveldb_sse42_a_CXXFLAGS += $(SSE42_CXXFLAGS) -endif -leveldb_libleveldb_sse42_a_SOURCES = leveldb/port/port_posix_sse.cc diff --git a/src/Makefile.test.include b/src/Makefile.test.include index ed81622717..c76f30de8e 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -7,6 +7,7 @@ FUZZ_TARGETS = \ test/fuzz/addr_info_deserialize \ test/fuzz/address_deserialize \ test/fuzz/addrman_deserialize \ + test/fuzz/asmap \ test/fuzz/banentry_deserialize \ test/fuzz/base_encode_decode \ test/fuzz/bech32 \ @@ -54,6 +55,7 @@ FUZZ_TARGETS = \ test/fuzz/script_flags \ test/fuzz/service_deserialize \ test/fuzz/spanparsing \ + test/fuzz/strprintf \ test/fuzz/sub_net_deserialize \ test/fuzz/transaction \ test/fuzz/tx_in \ @@ -254,6 +256,12 @@ test_fuzz_addrman_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON) test_fuzz_addrman_deserialize_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) test_fuzz_addrman_deserialize_SOURCES = $(FUZZ_SUITE) test/fuzz/deserialize.cpp +test_fuzz_asmap_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) +test_fuzz_asmap_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) +test_fuzz_asmap_LDADD = $(FUZZ_SUITE_LD_COMMON) +test_fuzz_asmap_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) +test_fuzz_asmap_SOURCES = $(FUZZ_SUITE) test/fuzz/asmap.cpp + test_fuzz_banentry_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DBANENTRY_DESERIALIZE=1 test_fuzz_banentry_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) test_fuzz_banentry_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON) @@ -536,6 +544,12 @@ test_fuzz_spanparsing_LDADD = $(FUZZ_SUITE_LD_COMMON) test_fuzz_spanparsing_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) test_fuzz_spanparsing_SOURCES = $(FUZZ_SUITE) test/fuzz/spanparsing.cpp +test_fuzz_strprintf_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) +test_fuzz_strprintf_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) +test_fuzz_strprintf_LDADD = $(FUZZ_SUITE_LD_COMMON) +test_fuzz_strprintf_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) +test_fuzz_strprintf_SOURCES = $(FUZZ_SUITE) test/fuzz/strprintf.cpp + test_fuzz_sub_net_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DSUB_NET_DESERIALIZE=1 test_fuzz_sub_net_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) test_fuzz_sub_net_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON) diff --git a/src/bench/coin_selection.cpp b/src/bench/coin_selection.cpp index de8e2e5e8f..d6d5e67c5b 100644 --- a/src/bench/coin_selection.cpp +++ b/src/bench/coin_selection.cpp @@ -31,7 +31,8 @@ static void CoinSelection(benchmark::State& state) { NodeContext node; auto chain = interfaces::MakeChain(node); - const CWallet wallet(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); + CWallet wallet(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); + wallet.SetupLegacyScriptPubKeyMan(); std::vector<std::unique_ptr<CWalletTx>> wtxs; LOCK(wallet.cs_wallet); @@ -64,7 +65,7 @@ static void CoinSelection(benchmark::State& state) typedef std::set<CInputCoin> CoinSet; static NodeContext testNode; static auto testChain = interfaces::MakeChain(testNode); -static const CWallet testWallet(testChain.get(), WalletLocation(), WalletDatabase::CreateDummy()); +static CWallet testWallet(testChain.get(), WalletLocation(), WalletDatabase::CreateDummy()); std::vector<std::unique_ptr<CWalletTx>> wtxn; // Copied from src/wallet/test/coinselector_tests.cpp @@ -93,6 +94,7 @@ static CAmount make_hard_case(int utxos, std::vector<OutputGroup>& utxo_pool) static void BnBExhaustion(benchmark::State& state) { // Setup + testWallet.SetupLegacyScriptPubKeyMan(); std::vector<OutputGroup> utxo_pool; CoinSet selection; CAmount value_ret = 0; diff --git a/src/bench/wallet_balance.cpp b/src/bench/wallet_balance.cpp index da94afd62b..62568a9da5 100644 --- a/src/bench/wallet_balance.cpp +++ b/src/bench/wallet_balance.cpp @@ -20,6 +20,7 @@ static void WalletBalance(benchmark::State& state, const bool set_dirty, const b std::unique_ptr<interfaces::Chain> chain = interfaces::MakeChain(node); CWallet wallet{chain.get(), WalletLocation(), WalletDatabase::CreateMock()}; { + wallet.SetupLegacyScriptPubKeyMan(); bool first_run; if (wallet.LoadWallet(first_run) != DBErrors::LOAD_OK) assert(false); wallet.handleNotifications(); diff --git a/src/chain.h b/src/chain.h index 48bcb8bfdd..64c016a1d6 100644 --- a/src/chain.h +++ b/src/chain.h @@ -333,12 +333,12 @@ public: SERIALIZE_METHODS(CDiskBlockIndex, obj) { int _nVersion = s.GetVersion(); - if (!(s.GetType() & SER_GETHASH)) READWRITE(VARINT(_nVersion, VarIntMode::NONNEGATIVE_SIGNED)); + if (!(s.GetType() & SER_GETHASH)) READWRITE(VARINT_MODE(_nVersion, VarIntMode::NONNEGATIVE_SIGNED)); - READWRITE(VARINT(obj.nHeight, VarIntMode::NONNEGATIVE_SIGNED)); + READWRITE(VARINT_MODE(obj.nHeight, VarIntMode::NONNEGATIVE_SIGNED)); READWRITE(VARINT(obj.nStatus)); READWRITE(VARINT(obj.nTx)); - if (obj.nStatus & (BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO)) READWRITE(VARINT(obj.nFile, VarIntMode::NONNEGATIVE_SIGNED)); + if (obj.nStatus & (BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO)) READWRITE(VARINT_MODE(obj.nFile, VarIntMode::NONNEGATIVE_SIGNED)); if (obj.nStatus & BLOCK_HAVE_DATA) READWRITE(VARINT(obj.nDataPos)); if (obj.nStatus & BLOCK_HAVE_UNDO) READWRITE(VARINT(obj.nUndoPos)); diff --git a/src/crc32c/.appveyor.yml b/src/crc32c/.appveyor.yml new file mode 100644 index 0000000000..7345746750 --- /dev/null +++ b/src/crc32c/.appveyor.yml @@ -0,0 +1,37 @@ +# Build matrix / environment variables are explained on: +# https://www.appveyor.com/docs/appveyor-yml/ +# This file can be validated on: https://ci.appveyor.com/tools/validate-yaml + +version: "{build}" + +environment: + matrix: + # AppVeyor currently has no custom job name feature. + # http://help.appveyor.com/discussions/questions/1623-can-i-provide-a-friendly-name-for-jobs + - JOB: Visual Studio 2017 + APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 + CMAKE_GENERATOR: Visual Studio 15 2017 + +platform: + - x86 + - x64 + +configuration: + - RelWithDebInfo + - Debug + +build_script: + - git submodule update --init --recursive + - mkdir build + - cd build + - if "%platform%"=="x64" set CMAKE_GENERATOR=%CMAKE_GENERATOR% Win64 + - cmake --version + - cmake .. -G "%CMAKE_GENERATOR%" -DCRC32C_USE_GLOG=0 + -DCMAKE_CONFIGURATION_TYPES="%CONFIGURATION%" + - cmake --build . --config "%CONFIGURATION%" + - cd .. + +test_script: + - build\%CONFIGURATION%\crc32c_tests.exe + - build\%CONFIGURATION%\crc32c_capi_tests.exe + - build\%CONFIGURATION%\crc32c_bench.exe diff --git a/src/crc32c/.clang-format b/src/crc32c/.clang-format new file mode 100644 index 0000000000..be9b80799f --- /dev/null +++ b/src/crc32c/.clang-format @@ -0,0 +1,3 @@ +--- +Language: Cpp +BasedOnStyle: Google diff --git a/src/crc32c/.clang_complete b/src/crc32c/.clang_complete new file mode 100644 index 0000000000..fa6757c6f3 --- /dev/null +++ b/src/crc32c/.clang_complete @@ -0,0 +1,8 @@ +-Ibuild/include/ +-Ibuild/third_party/glog/ +-Iinclude/ +-Ithird_party/benchmark/include/ +-Ithird_party/googletest/googletest/include/ +-Ithird_party/googletest/googlemock/include/ +-Ithird_party/glog/src/ +-std=c++11 diff --git a/src/crc32c/.gitignore b/src/crc32c/.gitignore new file mode 100644 index 0000000000..61769727e3 --- /dev/null +++ b/src/crc32c/.gitignore @@ -0,0 +1,8 @@ +# Editors. +*.sw* +.DS_Store +/.vscode + +# Build directory. +build/ +out/ diff --git a/src/crc32c/.gitmodules b/src/crc32c/.gitmodules new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/src/crc32c/.gitmodules diff --git a/src/crc32c/.travis.yml b/src/crc32c/.travis.yml new file mode 100644 index 0000000000..d990a89f07 --- /dev/null +++ b/src/crc32c/.travis.yml @@ -0,0 +1,76 @@ +# Build matrix / environment variables are explained on: +# http://about.travis-ci.org/docs/user/build-configuration/ +# This file can be validated on: http://lint.travis-ci.org/ + +language: cpp +dist: bionic +osx_image: xcode10.3 + +compiler: +- gcc +- clang +os: +- linux +- osx + +env: +- GLOG=1 SHARED_LIB=0 BUILD_TYPE=Debug +- GLOG=1 SHARED_LIB=0 BUILD_TYPE=RelWithDebInfo +- GLOG=0 SHARED_LIB=0 BUILD_TYPE=Debug +- GLOG=0 SHARED_LIB=0 BUILD_TYPE=RelWithDebInfo +- GLOG=0 SHARED_LIB=1 BUILD_TYPE=Debug +- GLOG=0 SHARED_LIB=1 BUILD_TYPE=RelWithDebInfo + +addons: + apt: + sources: + - sourceline: 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main' + key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key' + - sourceline: 'ppa:ubuntu-toolchain-r/test' + packages: + - clang-9 + - cmake + - gcc-9 + - g++-9 + - ninja-build + homebrew: + packages: + - cmake + - gcc@9 + - llvm@9 + - ninja + update: true + +install: +# The following Homebrew packages aren't linked by default, and need to be +# prepended to the path explicitly. +- if [ "$TRAVIS_OS_NAME" = "osx" ]; then + export PATH="$(brew --prefix llvm)/bin:$PATH"; + fi +# /usr/bin/gcc points to an older compiler on both Linux and macOS. +- if [ "$CXX" = "g++" ]; then export CXX="g++-9" CC="gcc-9"; fi +# /usr/bin/clang points to an older compiler on both Linux and macOS. +# +# Homebrew's llvm package doesn't ship a versioned clang++ binary, so the values +# below don't work on macOS. Fortunately, the path change above makes the +# default values (clang and clang++) resolve to the correct compiler on macOS. +- if [ "$TRAVIS_OS_NAME" = "linux" ]; then + if [ "$CXX" = "clang++" ]; then export CXX="clang++-9" CC="clang-9"; fi; + fi +- echo ${CC} +- echo ${CXX} +- ${CXX} --version +- cmake --version + +before_script: +- mkdir -p build && cd build +- cmake .. -G Ninja -DCRC32C_USE_GLOG=$GLOG -DCMAKE_BUILD_TYPE=$BUILD_TYPE + -DBUILD_SHARED_LIBS=$SHARED_LIB -DCMAKE_INSTALL_PREFIX=$HOME/.local +- cmake --build . +- cd .. + +script: +- build/crc32c_tests +- build/crc32c_capi_tests +- build/crc32c_bench +- cd build && cmake --build . --target install diff --git a/src/crc32c/.ycm_extra_conf.py b/src/crc32c/.ycm_extra_conf.py new file mode 100644 index 0000000000..536aadcec8 --- /dev/null +++ b/src/crc32c/.ycm_extra_conf.py @@ -0,0 +1,142 @@ +# Copyright 2017 The CRC32C Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +"""YouCompleteMe configuration that interprets a .clang_complete file. + +This module implementes the YouCompleteMe configuration API documented at: +https://github.com/Valloric/ycmd#ycm_extra_confpy-specification + +The implementation loads and processes a .clang_complete file, documented at: +https://github.com/Rip-Rip/clang_complete/blob/master/README.md +""" + +import os + +# Flags added to the list in .clang_complete. +BASE_FLAGS = [ + '-Werror', # Unlike clang_complete, YCM can also be used as a linter. + '-DUSE_CLANG_COMPLETER', # YCM needs this. + '-xc++', # YCM needs this to avoid compiling headers as C code. +] + +# Clang flags that take in paths. +# See https://clang.llvm.org/docs/ClangCommandLineReference.html +PATH_FLAGS = [ + '-isystem', + '-I', + '-iquote', + '--sysroot=' +] + + +def DirectoryOfThisScript(): + """Returns the absolute path to the directory containing this script.""" + return os.path.dirname(os.path.abspath(__file__)) + + +def MakeRelativePathsInFlagsAbsolute(flags, build_root): + """Expands relative paths in a list of Clang command-line flags. + + Args: + flags: The list of flags passed to Clang. + build_root: The current directory when running the Clang compiler. Should be + an absolute path. + + Returns: + A list of flags with relative paths replaced by absolute paths. + """ + new_flags = [] + make_next_absolute = False + for flag in flags: + new_flag = flag + + if make_next_absolute: + make_next_absolute = False + if not flag.startswith('/'): + new_flag = os.path.join(build_root, flag) + + for path_flag in PATH_FLAGS: + if flag == path_flag: + make_next_absolute = True + break + + if flag.startswith(path_flag): + path = flag[len(path_flag):] + new_flag = path_flag + os.path.join(build_root, path) + break + + if new_flag: + new_flags.append(new_flag) + return new_flags + + +def FindNearest(target, path, build_root): + """Looks for a file with a specific name closest to a project path. + + This is similar to the logic used by a version-control system (like git) to + find its configuration directory (.git) based on the current directory when a + command is invoked. + + Args: + target: The file name to search for. + path: The directory where the search starts. The search will explore the + given directory's ascendants using the parent relationship. Should be an + absolute path. + build_root: A directory that acts as a fence for the search. If the search + reaches this directory, it will not advance to its parent. Should be an + absolute path. + + Returns: + The path to a file with the desired name. None if the search failed. + """ + candidate = os.path.join(path, target) + if os.path.isfile(candidate): + return candidate + + if path == build_root: + return None + + parent = os.path.dirname(path) + if parent == path: + return None + + return FindNearest(target, parent, build_root) + + +def FlagsForClangComplete(file_path, build_root): + """Reads the .clang_complete flags for a source file. + + Args: + file_path: The path to the source file. Should be inside the project. Used + to locate the relevant .clang_complete file. + build_root: The current directory when running the Clang compiler for this + file. Should be an absolute path. + + Returns: + A list of strings, where each element is a Clang command-line flag. + """ + clang_complete_path = FindNearest('.clang_complete', file_path, build_root) + if clang_complete_path is None: + return None + clang_complete_flags = open(clang_complete_path, 'r').read().splitlines() + return clang_complete_flags + + +def FlagsForFile(filename, **kwargs): + """Implements the YouCompleteMe API.""" + + # kwargs can be used to pass 'client_data' to the YCM configuration. This + # configuration script does not need any extra information, so + # pylint: disable=unused-argument + + build_root = DirectoryOfThisScript() + file_path = os.path.realpath(filename) + + flags = BASE_FLAGS + clang_flags = FlagsForClangComplete(file_path, build_root) + if clang_flags: + flags += clang_flags + + final_flags = MakeRelativePathsInFlagsAbsolute(flags, build_root) + + return {'flags': final_flags} diff --git a/src/crc32c/AUTHORS b/src/crc32c/AUTHORS new file mode 100644 index 0000000000..6f1f6871a6 --- /dev/null +++ b/src/crc32c/AUTHORS @@ -0,0 +1,9 @@ +# This is the list of CRC32C authors for copyright purposes. +# +# This does not necessarily list everyone who has contributed code, since in +# some cases, their employer may be the copyright holder. To see the full list +# of contributors, see the revision history in source control. +Google Inc. + +Fangming Fang <Fangming.Fang@arm.com> +Vadim Skipin <vadim.skipin@gmail.com> diff --git a/src/crc32c/CMakeLists.txt b/src/crc32c/CMakeLists.txt new file mode 100644 index 0000000000..111a3e3614 --- /dev/null +++ b/src/crc32c/CMakeLists.txt @@ -0,0 +1,423 @@ +# Copyright 2017 The CRC32C Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. See the AUTHORS file for names of contributors. + +cmake_minimum_required(VERSION 3.1) +project(Crc32c VERSION 1.1.0 LANGUAGES C CXX) + +# This project can use C11, but will gracefully decay down to C89. +set(CMAKE_C_STANDARD 11) +set(CMAKE_C_STANDARD_REQUIRED OFF) +set(CMAKE_C_EXTENSIONS OFF) + +# This project requires C++11. +set(CMAKE_CXX_STANDARD 11) +set(CMAKE_CXX_STANDARD_REQUIRED ON) +set(CMAKE_CXX_EXTENSIONS OFF) + +# https://github.com/izenecloud/cmake/blob/master/SetCompilerWarningAll.cmake +if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + # Use the highest warning level for Visual Studio. + set(CMAKE_CXX_WARNING_LEVEL 4) + if(CMAKE_CXX_FLAGS MATCHES "/W[0-4]") + string(REGEX REPLACE "/W[0-4]" "/W4" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + else(CMAKE_CXX_FLAGS MATCHES "/W[0-4]") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4") + endif(CMAKE_CXX_FLAGS MATCHES "/W[0-4]") + + # Disable C++ exceptions. + string(REGEX REPLACE "/EH[a-z]+" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHs-c-") + add_definitions(-D_HAS_EXCEPTIONS=0) + + # Disable RTTI. + string(REGEX REPLACE "/GR" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /GR-") +else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + # Use -Wall for clang and gcc. + if(NOT CMAKE_CXX_FLAGS MATCHES "-Wall") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall") + endif(NOT CMAKE_CXX_FLAGS MATCHES "-Wall") + + # Use -Wextra for clang and gcc. + if(NOT CMAKE_CXX_FLAGS MATCHES "-Wextra") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wextra") + endif(NOT CMAKE_CXX_FLAGS MATCHES "-Wextra") + + # Use -Werror for clang and gcc. + if(NOT CMAKE_CXX_FLAGS MATCHES "-Werror") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror") + endif(NOT CMAKE_CXX_FLAGS MATCHES "-Werror") + + # Disable C++ exceptions. + string(REGEX REPLACE "-fexceptions" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions") + + # Disable RTTI. + string(REGEX REPLACE "-frtti" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti") +endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + +option(CRC32C_BUILD_TESTS "Build CRC32C's unit tests" ON) +option(CRC32C_BUILD_BENCHMARKS "Build CRC32C's benchmarks" ON) +option(CRC32C_USE_GLOG "Build CRC32C's tests with Google Logging" ON) +option(CRC32C_INSTALL "Install CRC32C's header and library" ON) + +include(TestBigEndian) +test_big_endian(BYTE_ORDER_BIG_ENDIAN) + +include(CheckCXXCompilerFlag) +# Used by glog. +check_cxx_compiler_flag(-Wno-deprecated CRC32C_HAVE_NO_DEPRECATED) +# Used by glog. +check_cxx_compiler_flag(-Wno-sign-compare CRC32C_HAVE_NO_SIGN_COMPARE) +# Used by glog. +check_cxx_compiler_flag(-Wno-unused-parameter CRC32C_HAVE_NO_UNUSED_PARAMETER) +# Used by googletest. +check_cxx_compiler_flag(-Wno-missing-field-initializers + CRC32C_HAVE_NO_MISSING_FIELD_INITIALIZERS) + +# Check for __builtin_prefetch support in the compiler. +include(CheckCXXSourceCompiles) +check_cxx_source_compiles(" +int main() { + char data = 0; + const char* address = &data; + __builtin_prefetch(address, 0, 0); + return 0; +} +" HAVE_BUILTIN_PREFETCH) + +# Check for _mm_prefetch support in the compiler. +include(CheckCXXSourceCompiles) +check_cxx_source_compiles(" +#if defined(_MSC_VER) +#include <intrin.h> +#else // !defined(_MSC_VER) +#include <xmmintrin.h> +#endif // defined(_MSC_VER) + +int main() { + char data = 0; + const char* address = &data; + _mm_prefetch(address, _MM_HINT_NTA); + return 0; +} +" HAVE_MM_PREFETCH) + +# Check for SSE4.2 support in the compiler. +set(OLD_CMAKE_REQURED_FLAGS ${CMAKE_REQUIRED_FLAGS}) +if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} /arch:AVX") +else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -msse4.2") +endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") +check_cxx_source_compiles(" +#if defined(_MSC_VER) +#include <intrin.h> +#else // !defined(_MSC_VER) +#include <cpuid.h> +#include <nmmintrin.h> +#endif // defined(_MSC_VER) + +int main() { + _mm_crc32_u8(0, 0); _mm_crc32_u32(0, 0); +#if defined(_M_X64) || defined(__x86_64__) + _mm_crc32_u64(0, 0); +#endif // defined(_M_X64) || defined(__x86_64__) + return 0; +} +" HAVE_SSE42) +set(CMAKE_REQUIRED_FLAGS ${OLD_CMAKE_REQURED_FLAGS}) + +# Check for ARMv8 w/ CRC and CRYPTO extensions support in the compiler. +set(OLD_CMAKE_REQURED_FLAGS ${CMAKE_REQUIRED_FLAGS}) +if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + # TODO(pwnall): Insert correct flag when VS gets ARM CRC32C support. + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} /arch:NOTYET") +else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -march=armv8-a+crc+crypto") +endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") +check_cxx_source_compiles(" +#include <arm_acle.h> +#include <arm_neon.h> + +int main() { + __crc32cb(0, 0); __crc32ch(0, 0); __crc32cw(0, 0); __crc32cd(0, 0); + vmull_p64(0, 0); + return 0; +} +" HAVE_ARM64_CRC32C) +set(CMAKE_REQUIRED_FLAGS ${OLD_CMAKE_REQURED_FLAGS}) + +# Check for strong getauxval() support in the system headers. +check_cxx_source_compiles(" +#include <arm_acle.h> +#include <arm_neon.h> +#include <sys/auxv.h> + +int main() { + getauxval(AT_HWCAP); + return 0; +} +" HAVE_STRONG_GETAUXVAL) + +# Check for weak getauxval() support in the compiler. +check_cxx_source_compiles(" +unsigned long getauxval(unsigned long type) __attribute__((weak)); +#define AT_HWCAP 16 + +int main() { + getauxval(AT_HWCAP); + return 0; +} +" HAVE_WEAK_GETAUXVAL) + +if(CRC32C_USE_GLOG) + # glog requires this setting to avoid using dynamic_cast. + set(DISABLE_RTTI ON CACHE BOOL "" FORCE) + + # glog's test targets trigger deprecation warnings, and compiling them burns + # CPU cycles on the CI. + set(BUILD_TESTING_SAVED "${BUILD_TESTING}") + set(BUILD_TESTING OFF CACHE BOOL "" FORCE) + add_subdirectory("third_party/glog" EXCLUDE_FROM_ALL) + set(BUILD_TESTING "${BUILD_TESTING_SAVED}" CACHE BOOL "" FORCE) + + # glog triggers deprecation warnings on OSX. + # https://github.com/google/glog/issues/185 + if(CRC32C_HAVE_NO_DEPRECATED) + set_property(TARGET glog APPEND PROPERTY COMPILE_OPTIONS -Wno-deprecated) + endif(CRC32C_HAVE_NO_DEPRECATED) + + # glog triggers sign comparison warnings on gcc. + if(CRC32C_HAVE_NO_SIGN_COMPARE) + set_property(TARGET glog APPEND PROPERTY COMPILE_OPTIONS -Wno-sign-compare) + endif(CRC32C_HAVE_NO_SIGN_COMPARE) + + # glog triggers unused parameter warnings on clang. + if(CRC32C_HAVE_NO_UNUSED_PARAMETER) + set_property(TARGET glog + APPEND PROPERTY COMPILE_OPTIONS -Wno-unused-parameter) + endif(CRC32C_HAVE_NO_UNUSED_PARAMETER) + + set(CRC32C_TESTS_BUILT_WITH_GLOG 1) +endif(CRC32C_USE_GLOG) + +configure_file( + "src/crc32c_config.h.in" + "${PROJECT_BINARY_DIR}/include/crc32c/crc32c_config.h" +) + +include_directories("${PROJECT_BINARY_DIR}/include") + +# ARM64 CRC32C code is built separately, so we don't accidentally compile +# unsupported instructions into code that gets run without ARM32 support. +add_library(crc32c_arm64 OBJECT "") +target_sources(crc32c_arm64 + PRIVATE + "${PROJECT_BINARY_DIR}/include/crc32c/crc32c_config.h" + "src/crc32c_arm64.cc" + "src/crc32c_arm64.h" +) +if(HAVE_ARM64_CRC32C) + if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + # TODO(pwnall): Insert correct flag when VS gets ARM64 CRC32C support. + target_compile_options(crc32c_arm64 PRIVATE "/arch:NOTYET") + else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + target_compile_options(crc32c_arm64 PRIVATE "-march=armv8-a+crc+crypto") + endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") +endif(HAVE_ARM64_CRC32C) + +# CMake only enables PIC by default in SHARED and MODULE targets. +if(BUILD_SHARED_LIBS) + set_property(TARGET crc32c_arm64 PROPERTY POSITION_INDEPENDENT_CODE TRUE) +endif(BUILD_SHARED_LIBS) + +# SSE4.2 code is built separately, so we don't accidentally compile unsupported +# instructions into code that gets run without SSE4.2 support. +add_library(crc32c_sse42 OBJECT "") +target_sources(crc32c_sse42 + PRIVATE + "${PROJECT_BINARY_DIR}/include/crc32c/crc32c_config.h" + "src/crc32c_sse42.cc" + "src/crc32c_sse42.h" +) +if(HAVE_SSE42) + if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + target_compile_options(crc32c_sse42 PRIVATE "/arch:AVX") + else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + target_compile_options(crc32c_sse42 PRIVATE "-msse4.2") + endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") +endif(HAVE_SSE42) + +# CMake only enables PIC by default in SHARED and MODULE targets. +if(BUILD_SHARED_LIBS) + set_property(TARGET crc32c_sse42 PROPERTY POSITION_INDEPENDENT_CODE TRUE) +endif(BUILD_SHARED_LIBS) + +# Must be included before CMAKE_INSTALL_INCLUDEDIR is used. +include(GNUInstallDirs) + +add_library(crc32c "" + # TODO(pwnall): Move the TARGET_OBJECTS generator expressions to the PRIVATE + # section of target_sources when cmake_minimum_required becomes 3.9 or above. + $<TARGET_OBJECTS:crc32c_arm64> + $<TARGET_OBJECTS:crc32c_sse42> +) +target_sources(crc32c + PRIVATE + "${PROJECT_BINARY_DIR}/include/crc32c/crc32c_config.h" + "src/crc32c_arm64.h" + "src/crc32c_arm64_linux_check.h" + "src/crc32c_internal.h" + "src/crc32c_portable.cc" + "src/crc32c_prefetch.h" + "src/crc32c_read_le.h" + "src/crc32c_round_up.h" + "src/crc32c_sse42.h" + "src/crc32c_sse42_check.h" + "src/crc32c.cc" + + # Only CMake 3.3+ supports PUBLIC sources in targets exported by "install". + $<$<VERSION_GREATER:CMAKE_VERSION,3.2>:PUBLIC> + "include/crc32c/crc32c.h" +) + +target_include_directories(crc32c + PUBLIC + $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include> + $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}> +) + +target_compile_definitions(crc32c +PRIVATE + CRC32C_HAVE_CONFIG_H=1 +) + +set_target_properties(crc32c + PROPERTIES VERSION ${PROJECT_VERSION} SOVERSION ${PROJECT_VERSION_MAJOR}) + +# Warnings as errors in Visual Studio for this project's targets. +if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + set_property(TARGET crc32c APPEND PROPERTY COMPILE_OPTIONS "/WX") + set_property(TARGET crc32c_arm64 APPEND PROPERTY COMPILE_OPTIONS "/WX") + set_property(TARGET crc32c_sse42 APPEND PROPERTY COMPILE_OPTIONS "/WX") +endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + +if(CRC32C_BUILD_TESTS) + enable_testing() + + # Prevent overriding the parent project's compiler/linker settings on Windows. + set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) + set(install_gtest OFF) + set(install_gmock OFF) + + # This project is tested using GoogleTest. + add_subdirectory("third_party/googletest") + + # GoogleTest triggers a missing field initializers warning. + if(CRC32C_HAVE_NO_MISSING_FIELD_INITIALIZERS) + set_property(TARGET gtest + APPEND PROPERTY COMPILE_OPTIONS -Wno-missing-field-initializers) + set_property(TARGET gmock + APPEND PROPERTY COMPILE_OPTIONS -Wno-missing-field-initializers) + endif(CRC32C_HAVE_NO_MISSING_FIELD_INITIALIZERS) + + add_executable(crc32c_tests "") + target_sources(crc32c_tests + PRIVATE + "${PROJECT_BINARY_DIR}/include/crc32c/crc32c_config.h" + "src/crc32c_arm64_unittest.cc" + "src/crc32c_extend_unittests.h" + "src/crc32c_portable_unittest.cc" + "src/crc32c_prefetch_unittest.cc" + "src/crc32c_read_le_unittest.cc" + "src/crc32c_round_up_unittest.cc" + "src/crc32c_sse42_unittest.cc" + "src/crc32c_unittest.cc" + "src/crc32c_test_main.cc" + ) + target_link_libraries(crc32c_tests crc32c gtest) + + # Warnings as errors in Visual Studio for this project's targets. + if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + set_property(TARGET crc32c_tests APPEND PROPERTY COMPILE_OPTIONS "/WX") + endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + + if(CRC32C_USE_GLOG) + target_link_libraries(crc32c_tests glog) + endif(CRC32C_USE_GLOG) + + add_test(NAME crc32c_tests COMMAND crc32c_tests) + + add_executable(crc32c_capi_tests "") + target_sources(crc32c_capi_tests + PRIVATE + "src/crc32c_capi_unittest.c" + ) + target_link_libraries(crc32c_capi_tests crc32c) + + # Warnings as errors in Visual Studio for this project's targets. + if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + set_property(TARGET crc32c_capi_tests APPEND PROPERTY COMPILE_OPTIONS "/WX") + endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + + add_test(NAME crc32c_capi_tests COMMAND crc32c_capi_tests) +endif(CRC32C_BUILD_TESTS) + +if(CRC32C_BUILD_BENCHMARKS) + add_executable(crc32c_bench "") + target_sources(crc32c_bench + PRIVATE + "${PROJECT_BINARY_DIR}/include/crc32c/crc32c_config.h" + "src/crc32c_benchmark.cc" + ) + target_link_libraries(crc32c_bench crc32c) + + # This project uses Google benchmark for benchmarking. + set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "" FORCE) + set(BENCHMARK_ENABLE_EXCEPTIONS OFF CACHE BOOL "" FORCE) + add_subdirectory("third_party/benchmark") + target_link_libraries(crc32c_bench benchmark) + + if(CRC32C_USE_GLOG) + target_link_libraries(crc32c_bench glog) + endif(CRC32C_USE_GLOG) + + # Warnings as errors in Visual Studio for this project's targets. + if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + set_property(TARGET crc32c_bench APPEND PROPERTY COMPILE_OPTIONS "/WX") + endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") +endif(CRC32C_BUILD_BENCHMARKS) + +if(CRC32C_INSTALL) + install(TARGETS crc32c + EXPORT Crc32cTargets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + ) + install( + FILES + "include/crc32c/crc32c.h" + DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/crc32c" + ) + + include(CMakePackageConfigHelpers) + write_basic_package_version_file( + "${PROJECT_BINARY_DIR}/Crc32cConfigVersion.cmake" + COMPATIBILITY SameMajorVersion + ) + install( + EXPORT Crc32cTargets + NAMESPACE Crc32c:: + DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/Crc32c" + ) + install( + FILES + "Crc32cConfig.cmake" + "${PROJECT_BINARY_DIR}/Crc32cConfigVersion.cmake" + DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/Crc32c" + ) +endif(CRC32C_INSTALL) diff --git a/src/crc32c/CONTRIBUTING.md b/src/crc32c/CONTRIBUTING.md new file mode 100644 index 0000000000..ae319c70ac --- /dev/null +++ b/src/crc32c/CONTRIBUTING.md @@ -0,0 +1,23 @@ +# How to Contribute + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution, +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to <https://cla.developers.google.com/> to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult +[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more +information on using pull requests. diff --git a/src/crc32c/Crc32cConfig.cmake b/src/crc32c/Crc32cConfig.cmake new file mode 100644 index 0000000000..4d6057ec26 --- /dev/null +++ b/src/crc32c/Crc32cConfig.cmake @@ -0,0 +1,5 @@ +# Copyright 2017 The CRC32C Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. See the AUTHORS file for names of contributors. + +include("${CMAKE_CURRENT_LIST_DIR}/Crc32cTargets.cmake") diff --git a/src/crc32c/LICENSE b/src/crc32c/LICENSE new file mode 100644 index 0000000000..8c8735cf12 --- /dev/null +++ b/src/crc32c/LICENSE @@ -0,0 +1,28 @@ +Copyright 2017, The CRC32C Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/crc32c/README.md b/src/crc32c/README.md new file mode 100644 index 0000000000..0bd69f7f09 --- /dev/null +++ b/src/crc32c/README.md @@ -0,0 +1,125 @@ +# CRC32C + +[![Build Status](https://travis-ci.org/google/crc32c.svg?branch=master)](https://travis-ci.org/google/crc32c) +[![Build Status](https://ci.appveyor.com/api/projects/status/moiq7331pett4xuj/branch/master?svg=true)](https://ci.appveyor.com/project/pwnall/crc32c) + +New file format authors should consider +[HighwayHash](https://github.com/google/highwayhash). The initial version of +this code was extracted from [LevelDB](https://github.com/google/leveldb), which +is a stable key-value store that is widely used at Google. + +This project collects a few CRC32C implementations under an umbrella that +dispatches to a suitable implementation based on the host computer's hardware +capabilities. + +CRC32C is specified as the CRC that uses the iSCSI polynomial in +[RFC 3720](https://tools.ietf.org/html/rfc3720#section-12.1). The polynomial was +introduced by G. Castagnoli, S. Braeuer and M. Herrmann. CRC32C is used in +software such as Btrfs, ext4, Ceph and leveldb. + + +## Usage + +```cpp +#include "crc32c/crc32c.h" + +int main() { + const std::uint8_t buffer[] = {0, 0, 0, 0}; + std::uint32_t result; + + // Process a raw buffer. + result = crc32c::Crc32c(buffer, 4); + + // Process a std::string. + std::string string; + string.resize(4); + result = crc32c::Crc32c(string); + + // If you have C++17 support, process a std::string_view. + std::string_view string_view(string); + result = crc32c::Crc32c(string_view); + + return 0; +} +``` + + +## Prerequisites + +This project uses [CMake](https://cmake.org/) for building and testing. CMake is +available in all popular Linux distributions, as well as in +[Homebrew](https://brew.sh/). + +This project uses submodules for dependency management. + +```bash +git submodule update --init --recursive +``` + +If you're using [Atom](https://atom.io/), the following packages can help. + +```bash +apm install autocomplete-clang build build-cmake clang-format language-cmake \ + linter linter-clang +``` + +If you don't mind more setup in return for more speed, replace +`autocomplete-clang` and `linter-clang` with `you-complete-me`. This requires +[setting up ycmd](https://github.com/Valloric/ycmd#building). + +```bash +apm install autocomplete-plus build build-cmake clang-format language-cmake \ + linter you-complete-me +``` + +## Building + +The following commands build and install the project. + +```bash +mkdir build +cd build +cmake -DCRC32C_BUILD_TESTS=0 -DCRC32C_BUILD_BENCHMARKS=0 .. && make all install +``` + + +## Development + +The following command (when executed from `build/`) (re)builds the project and +runs the tests. + +```bash +cmake .. && cmake --build . && ctest --output-on-failure +``` + + +### Android testing + +The following command builds the project against the Android NDK, which is +useful for benchmarking against ARM processors. + +```bash +cmake .. -DCMAKE_SYSTEM_NAME=Android -DCMAKE_ANDROID_ARCH_ABI=arm64-v8a \ + -DCMAKE_ANDROID_NDK=$HOME/Library/Android/sdk/ndk-bundle \ + -DCMAKE_ANDROID_NDK_TOOLCHAIN_VERSION=clang \ + -DCMAKE_ANDROID_STL_TYPE=c++_static -DCRC32C_USE_GLOG=0 \ + -DCMAKE_BUILD_TYPE=Release && cmake --build . +``` + +The following commands install and run the benchmarks. + +```bash +adb push crc32c_bench /data/local/tmp +adb shell chmod +x /data/local/tmp/crc32c_bench +adb shell 'cd /data/local/tmp && ./crc32c_bench' +adb shell rm /data/local/tmp/crc32c_bench +``` + +The following commands install and run the tests. + +```bash +adb push crc32c_tests /data/local/tmp +adb shell chmod +x /data/local/tmp/crc32c_tests +adb shell 'cd /data/local/tmp && ./crc32c_tests' +adb shell rm /data/local/tmp/crc32c_tests +``` diff --git a/src/crc32c/include/crc32c/crc32c.h b/src/crc32c/include/crc32c/crc32c.h new file mode 100644 index 0000000000..e8a78170a9 --- /dev/null +++ b/src/crc32c/include/crc32c/crc32c.h @@ -0,0 +1,89 @@ +/* Copyright 2017 The CRC32C Authors. All rights reserved. + Use of this source code is governed by a BSD-style license that can be + found in the LICENSE file. See the AUTHORS file for names of contributors. */ + +#ifndef CRC32C_CRC32C_H_ +#define CRC32C_CRC32C_H_ + +/* The API exported by the CRC32C project. */ + +#if defined(__cplusplus) + +#include <cstddef> +#include <cstdint> +#include <string> + +#else /* !defined(__cplusplus) */ + +#include <stddef.h> +#include <stdint.h> + +#endif /* !defined(__cplusplus) */ + + +/* The C API. */ + +#if defined(__cplusplus) +extern "C" { +#endif /* defined(__cplusplus) */ + +/* Extends "crc" with the CRC32C of "count" bytes in the buffer pointed by + "data" */ +uint32_t crc32c_extend(uint32_t crc, const uint8_t* data, size_t count); + +/* Computes the CRC32C of "count" bytes in the buffer pointed by "data". */ +uint32_t crc32c_value(const uint8_t* data, size_t count); + +#ifdef __cplusplus +} /* end extern "C" */ +#endif /* defined(__cplusplus) */ + + +/* The C++ API. */ + +#if defined(__cplusplus) + +namespace crc32c { + +// Extends "crc" with the CRC32C of "count" bytes in the buffer pointed by +// "data". +uint32_t Extend(uint32_t crc, const uint8_t* data, size_t count); + +// Computes the CRC32C of "count" bytes in the buffer pointed by "data". +inline uint32_t Crc32c(const uint8_t* data, size_t count) { + return Extend(0, data, count); +} + +// Computes the CRC32C of "count" bytes in the buffer pointed by "data". +inline uint32_t Crc32c(const char* data, size_t count) { + return Extend(0, reinterpret_cast<const uint8_t*>(data), count); +} + +// Computes the CRC32C of the string's content. +inline uint32_t Crc32c(const std::string& string) { + return Crc32c(reinterpret_cast<const uint8_t*>(string.data()), + string.size()); +} + +} // namespace crc32c + +#if __cplusplus > 201402L +#if __has_include(<string_view>) +#include <string_view> + +namespace crc32c { + +// Computes the CRC32C of the bytes in the string_view. +inline uint32_t Crc32c(const std::string_view& string_view) { + return Crc32c(reinterpret_cast<const uint8_t*>(string_view.data()), + string_view.size()); +} + +} // namespace crc32c + +#endif // __has_include(<string_view>) +#endif // __cplusplus > 201402L + +#endif /* defined(__cplusplus) */ + +#endif // CRC32C_CRC32C_H_ diff --git a/src/crc32c/src/crc32c.cc b/src/crc32c/src/crc32c.cc new file mode 100644 index 0000000000..4d3018af47 --- /dev/null +++ b/src/crc32c/src/crc32c.cc @@ -0,0 +1,39 @@ +// Copyright 2017 The CRC32C Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "crc32c/crc32c.h" + +#include <cstddef> +#include <cstdint> + +#include "./crc32c_arm64.h" +#include "./crc32c_arm64_linux_check.h" +#include "./crc32c_internal.h" +#include "./crc32c_sse42.h" +#include "./crc32c_sse42_check.h" + +namespace crc32c { + +uint32_t Extend(uint32_t crc, const uint8_t* data, size_t count) { +#if HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__)) + static bool can_use_sse42 = CanUseSse42(); + if (can_use_sse42) return ExtendSse42(crc, data, count); +#elif HAVE_ARM64_CRC32C + static bool can_use_arm_linux = CanUseArm64Linux(); + if (can_use_arm_linux) return ExtendArm64(crc, data, count); +#endif // HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__)) + + return ExtendPortable(crc, data, count); +} + +extern "C" uint32_t crc32c_extend(uint32_t crc, const uint8_t* data, + size_t count) { + return crc32c::Extend(crc, data, count); +} + +extern "C" uint32_t crc32c_value(const uint8_t* data, size_t count) { + return crc32c::Crc32c(data, count); +} + +} // namespace crc32c diff --git a/src/crc32c/src/crc32c_arm64.cc b/src/crc32c/src/crc32c_arm64.cc new file mode 100644 index 0000000000..b872245f95 --- /dev/null +++ b/src/crc32c/src/crc32c_arm64.cc @@ -0,0 +1,126 @@ +// Copyright 2017 The CRC32C Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "./crc32c_arm64.h" + +// In a separate source file to allow this accelerated CRC32C function to be +// compiled with the appropriate compiler flags to enable ARM NEON CRC32C +// instructions. + +// This implementation is based on https://github.com/google/leveldb/pull/490. + +#include <cstddef> +#include <cstdint> + +#include "./crc32c_internal.h" +#ifdef CRC32C_HAVE_CONFIG_H +#include "crc32c/crc32c_config.h" +#endif + +#if HAVE_ARM64_CRC32C + +#include <arm_acle.h> +#include <arm_neon.h> + +#define KBYTES 1032 +#define SEGMENTBYTES 256 + +// compute 8bytes for each segment parallelly +#define CRC32C32BYTES(P, IND) \ + do { \ + crc1 = __crc32cd( \ + crc1, *((const uint64_t *)(P) + (SEGMENTBYTES / 8) * 1 + (IND))); \ + crc2 = __crc32cd( \ + crc2, *((const uint64_t *)(P) + (SEGMENTBYTES / 8) * 2 + (IND))); \ + crc3 = __crc32cd( \ + crc3, *((const uint64_t *)(P) + (SEGMENTBYTES / 8) * 3 + (IND))); \ + crc0 = __crc32cd( \ + crc0, *((const uint64_t *)(P) + (SEGMENTBYTES / 8) * 0 + (IND))); \ + } while (0); + +// compute 8*8 bytes for each segment parallelly +#define CRC32C256BYTES(P, IND) \ + do { \ + CRC32C32BYTES((P), (IND)*8 + 0) \ + CRC32C32BYTES((P), (IND)*8 + 1) \ + CRC32C32BYTES((P), (IND)*8 + 2) \ + CRC32C32BYTES((P), (IND)*8 + 3) \ + CRC32C32BYTES((P), (IND)*8 + 4) \ + CRC32C32BYTES((P), (IND)*8 + 5) \ + CRC32C32BYTES((P), (IND)*8 + 6) \ + CRC32C32BYTES((P), (IND)*8 + 7) \ + } while (0); + +// compute 4*8*8 bytes for each segment parallelly +#define CRC32C1024BYTES(P) \ + do { \ + CRC32C256BYTES((P), 0) \ + CRC32C256BYTES((P), 1) \ + CRC32C256BYTES((P), 2) \ + CRC32C256BYTES((P), 3) \ + (P) += 4 * SEGMENTBYTES; \ + } while (0) + +namespace crc32c { + +uint32_t ExtendArm64(uint32_t crc, const uint8_t *buf, size_t size) { + int64_t length = size; + uint32_t crc0, crc1, crc2, crc3; + uint64_t t0, t1, t2; + + // k0=CRC(x^(3*SEGMENTBYTES*8)), k1=CRC(x^(2*SEGMENTBYTES*8)), + // k2=CRC(x^(SEGMENTBYTES*8)) + const poly64_t k0 = 0x8d96551c, k1 = 0xbd6f81f8, k2 = 0xdcb17aa4; + + crc = crc ^ kCRC32Xor; + const uint8_t *p = reinterpret_cast<const uint8_t *>(buf); + + while (length >= KBYTES) { + crc0 = crc; + crc1 = 0; + crc2 = 0; + crc3 = 0; + + // Process 1024 bytes in parallel. + CRC32C1024BYTES(p); + + // Merge the 4 partial CRC32C values. + t2 = (uint64_t)vmull_p64(crc2, k2); + t1 = (uint64_t)vmull_p64(crc1, k1); + t0 = (uint64_t)vmull_p64(crc0, k0); + crc = __crc32cd(crc3, *(uint64_t *)p); + p += sizeof(uint64_t); + crc ^= __crc32cd(0, t2); + crc ^= __crc32cd(0, t1); + crc ^= __crc32cd(0, t0); + + length -= KBYTES; + } + + while (length >= 8) { + crc = __crc32cd(crc, *(uint64_t *)p); + p += 8; + length -= 8; + } + + if (length & 4) { + crc = __crc32cw(crc, *(uint32_t *)p); + p += 4; + } + + if (length & 2) { + crc = __crc32ch(crc, *(uint16_t *)p); + p += 2; + } + + if (length & 1) { + crc = __crc32cb(crc, *p); + } + + return crc ^ kCRC32Xor; +} + +} // namespace crc32c + +#endif // HAVE_ARM64_CRC32C diff --git a/src/crc32c/src/crc32c_arm64.h b/src/crc32c/src/crc32c_arm64.h new file mode 100644 index 0000000000..100cd56ec8 --- /dev/null +++ b/src/crc32c/src/crc32c_arm64.h @@ -0,0 +1,27 @@ +// Copyright 2017 The CRC32C Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +// Linux-specific code checking the availability for ARM CRC32C instructions. + +#ifndef CRC32C_CRC32C_ARM_LINUX_H_ +#define CRC32C_CRC32C_ARM_LINUX_H_ + +#include <cstddef> +#include <cstdint> + +#ifdef CRC32C_HAVE_CONFIG_H +#include "crc32c/crc32c_config.h" +#endif + +#if HAVE_ARM64_CRC32C + +namespace crc32c { + +uint32_t ExtendArm64(uint32_t crc, const uint8_t* data, size_t count); + +} // namespace crc32c + +#endif // HAVE_ARM64_CRC32C + +#endif // CRC32C_CRC32C_ARM_LINUX_H_ diff --git a/src/crc32c/src/crc32c_arm64_linux_check.h b/src/crc32c/src/crc32c_arm64_linux_check.h new file mode 100644 index 0000000000..1a20a757bb --- /dev/null +++ b/src/crc32c/src/crc32c_arm64_linux_check.h @@ -0,0 +1,50 @@ +// Copyright 2017 The CRC32C Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +// ARM Linux-specific code checking for the availability of CRC32C instructions. + +#ifndef CRC32C_CRC32C_ARM_LINUX_CHECK_H_ +#define CRC32C_CRC32C_ARM_LINUX_CHECK_H_ + +// X86-specific code checking for the availability of SSE4.2 instructions. + +#include <cstddef> +#include <cstdint> + +#ifdef CRC32C_HAVE_CONFIG_H +#include "crc32c/crc32c_config.h" +#endif + +#if HAVE_ARM64_CRC32C + +#if HAVE_STRONG_GETAUXVAL +#include <sys/auxv.h> +#elif HAVE_WEAK_GETAUXVAL +// getauxval() is not available on Android until API level 20. Link it as a weak +// symbol. +extern "C" unsigned long getauxval(unsigned long type) __attribute__((weak)); + +#define AT_HWCAP 16 +#endif // HAVE_STRONG_GETAUXVAL || HAVE_WEAK_GETAUXVAL + +namespace crc32c { + +inline bool CanUseArm64Linux() { +#if HAVE_STRONG_GETAUXVAL || HAVE_WEAK_GETAUXVAL + // From 'arch/arm64/include/uapi/asm/hwcap.h' in Linux kernel source code. + constexpr unsigned long kHWCAP_PMULL = 1 << 4; + constexpr unsigned long kHWCAP_CRC32 = 1 << 7; + unsigned long hwcap = (&getauxval != nullptr) ? getauxval(AT_HWCAP) : 0; + return (hwcap & (kHWCAP_PMULL | kHWCAP_CRC32)) == + (kHWCAP_PMULL | kHWCAP_CRC32); +#else + return false; +#endif // HAVE_STRONG_GETAUXVAL || HAVE_WEAK_GETAUXVAL +} + +} // namespace crc32c + +#endif // HAVE_ARM64_CRC32C + +#endif // CRC32C_CRC32C_ARM_LINUX_CHECK_H_ diff --git a/src/crc32c/src/crc32c_arm64_unittest.cc b/src/crc32c/src/crc32c_arm64_unittest.cc new file mode 100644 index 0000000000..6f917d9c0c --- /dev/null +++ b/src/crc32c/src/crc32c_arm64_unittest.cc @@ -0,0 +1,24 @@ +// Copyright 2017 The CRC32C Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "gtest/gtest.h" + +#include "./crc32c_arm64.h" +#include "./crc32c_extend_unittests.h" + +namespace crc32c { + +#if HAVE_ARM64_CRC32C + +struct Arm64TestTraits { + static uint32_t Extend(uint32_t crc, const uint8_t* data, size_t count) { + return ExtendArm64(crc, data, count); + } +}; + +INSTANTIATE_TYPED_TEST_SUITE_P(Arm64, ExtendTest, Arm64TestTraits); + +#endif // HAVE_ARM64_CRC32C + +} // namespace crc32c diff --git a/src/crc32c/src/crc32c_benchmark.cc b/src/crc32c/src/crc32c_benchmark.cc new file mode 100644 index 0000000000..c464304b3f --- /dev/null +++ b/src/crc32c/src/crc32c_benchmark.cc @@ -0,0 +1,106 @@ +// Copyright 2017 The CRC32C Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include <cstddef> +#include <cstdint> + +#ifdef CRC32C_HAVE_CONFIG_H +#include "crc32c/crc32c_config.h" +#endif + +#include "benchmark/benchmark.h" + +#if CRC32C_TESTS_BUILT_WITH_GLOG +#include "glog/logging.h" +#endif // CRC32C_TESTS_BUILT_WITH_GLOG + +#include "./crc32c_arm64.h" +#include "./crc32c_arm64_linux_check.h" +#include "./crc32c_internal.h" +#include "./crc32c_sse42.h" +#include "./crc32c_sse42_check.h" +#include "crc32c/crc32c.h" + +class CRC32CBenchmark : public benchmark::Fixture { + public: + void SetUp(const benchmark::State& state) override { + block_size_ = static_cast<size_t>(state.range(0)); + block_data_ = std::string(block_size_, 'x'); + block_buffer_ = reinterpret_cast<const uint8_t*>(block_data_.data()); + } + + protected: + std::string block_data_; + const uint8_t* block_buffer_; + size_t block_size_; +}; + +BENCHMARK_DEFINE_F(CRC32CBenchmark, Public)(benchmark::State& state) { + uint32_t crc = 0; + for (auto _ : state) + crc = crc32c::Extend(crc, block_buffer_, block_size_); + state.SetBytesProcessed(state.iterations() * block_size_); +} +BENCHMARK_REGISTER_F(CRC32CBenchmark, Public) + ->RangeMultiplier(16) + ->Range(256, 16777216); // Block size. + +BENCHMARK_DEFINE_F(CRC32CBenchmark, Portable)(benchmark::State& state) { + uint32_t crc = 0; + for (auto _ : state) + crc = crc32c::ExtendPortable(crc, block_buffer_, block_size_); + state.SetBytesProcessed(state.iterations() * block_size_); +} +BENCHMARK_REGISTER_F(CRC32CBenchmark, Portable) + ->RangeMultiplier(16) + ->Range(256, 16777216); // Block size. + +#if HAVE_ARM64_CRC32C + +BENCHMARK_DEFINE_F(CRC32CBenchmark, ArmLinux)(benchmark::State& state) { + if (!crc32c::CanUseArm64Linux()) { + state.SkipWithError("ARM CRC32C instructions not available or not enabled"); + return; + } + + uint32_t crc = 0; + for (auto _ : state) + crc = crc32c::ExtendArm64(crc, block_buffer_, block_size_); + state.SetBytesProcessed(state.iterations() * block_size_); +} +BENCHMARK_REGISTER_F(CRC32CBenchmark, ArmLinux) + ->RangeMultiplier(16) + ->Range(256, 16777216); // Block size. + +#endif // HAVE_ARM64_CRC32C + +#if HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__)) + +BENCHMARK_DEFINE_F(CRC32CBenchmark, Sse42)(benchmark::State& state) { + if (!crc32c::CanUseSse42()) { + state.SkipWithError("SSE4.2 instructions not available or not enabled"); + return; + } + + uint32_t crc = 0; + for (auto _ : state) + crc = crc32c::ExtendSse42(crc, block_buffer_, block_size_); + state.SetBytesProcessed(state.iterations() * block_size_); +} +BENCHMARK_REGISTER_F(CRC32CBenchmark, Sse42) + ->RangeMultiplier(16) + ->Range(256, 16777216); // Block size. + +#endif // HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__)) + +int main(int argc, char** argv) { +#if CRC32C_TESTS_BUILT_WITH_GLOG + google::InitGoogleLogging(argv[0]); + google::InstallFailureSignalHandler(); +#endif // CRC32C_TESTS_BUILT_WITH_GLOG + + benchmark::Initialize(&argc, argv); + benchmark::RunSpecifiedBenchmarks(); + return 0; +} diff --git a/src/crc32c/src/crc32c_capi_unittest.c b/src/crc32c/src/crc32c_capi_unittest.c new file mode 100644 index 0000000000..c8993a0959 --- /dev/null +++ b/src/crc32c/src/crc32c_capi_unittest.c @@ -0,0 +1,66 @@ +// Copyright 2017 The CRC32C Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "crc32c/crc32c.h" + +#include <stddef.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +int main() { + /* From rfc3720 section B.4. */ + uint8_t buf[32]; + + memset(buf, 0, sizeof(buf)); + if ((uint32_t)0x8a9136aa != crc32c_value(buf, sizeof(buf))) { + printf("crc32c_value(zeros) test failed\n"); + return 1; + } + + memset(buf, 0xff, sizeof(buf)); + if ((uint32_t)0x62a8ab43 != crc32c_value(buf, sizeof(buf))) { + printf("crc32c_value(0xff) test failed\n"); + return 1; + } + + for (size_t i = 0; i < 32; ++i) + buf[i] = (uint8_t)i; + if ((uint32_t)0x46dd794e != crc32c_value(buf, sizeof(buf))) { + printf("crc32c_value(0..31) test failed\n"); + return 1; + } + + for (size_t i = 0; i < 32; ++i) + buf[i] = (uint8_t)(31 - i); + if ((uint32_t)0x113fdb5c != crc32c_value(buf, sizeof(buf))) { + printf("crc32c_value(31..0) test failed\n"); + return 1; + } + + uint8_t data[48] = { + 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }; + if ((uint32_t)0xd9963a56 != crc32c_value(data, sizeof(data))) { + printf("crc32c_value(31..0) test failed\n"); + return 1; + } + + const uint8_t* hello_space_world = (const uint8_t*)"hello world"; + const uint8_t* hello_space = (const uint8_t*)"hello "; + const uint8_t* world = (const uint8_t*)"world"; + + if (crc32c_value(hello_space_world, 11) != + crc32c_extend(crc32c_value(hello_space, 6), world, 5)) { + printf("crc32c_extend test failed\n"); + return 1; + } + + printf("All tests passed\n"); + return 0; +} diff --git a/src/crc32c/src/crc32c_config.h.in b/src/crc32c/src/crc32c_config.h.in new file mode 100644 index 0000000000..4034fa5644 --- /dev/null +++ b/src/crc32c/src/crc32c_config.h.in @@ -0,0 +1,36 @@ +// Copyright 2017 The CRC32C Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef CRC32C_CRC32C_CONFIG_H_ +#define CRC32C_CRC32C_CONFIG_H_ + +// Define to 1 if building for a big-endian platform. +#cmakedefine01 BYTE_ORDER_BIG_ENDIAN + +// Define to 1 if the compiler has the __builtin_prefetch intrinsic. +#cmakedefine01 HAVE_BUILTIN_PREFETCH + +// Define to 1 if targeting X86 and the compiler has the _mm_prefetch intrinsic. +#cmakedefine01 HAVE_MM_PREFETCH + +// Define to 1 if targeting X86 and the compiler has the _mm_crc32_u{8,32,64} +// intrinsics. +#cmakedefine01 HAVE_SSE42 + +// Define to 1 if targeting ARM and the compiler has the __crc32c{b,h,w,d} and +// the vmull_p64 intrinsics. +#cmakedefine01 HAVE_ARM64_CRC32C + +// Define to 1 if the system libraries have the getauxval function in the +// <sys/auxv.h> header. Should be true on Linux and Android API level 20+. +#cmakedefine01 HAVE_STRONG_GETAUXVAL + +// Define to 1 if the compiler supports defining getauxval as a weak symbol. +// Should be true for any compiler that supports __attribute__((weak)). +#cmakedefine01 HAVE_WEAK_GETAUXVAL + +// Define to 1 if CRC32C tests have been built with Google Logging. +#cmakedefine01 CRC32C_TESTS_BUILT_WITH_GLOG + +#endif // CRC32C_CRC32C_CONFIG_H_ diff --git a/src/crc32c/src/crc32c_extend_unittests.h b/src/crc32c/src/crc32c_extend_unittests.h new file mode 100644 index 0000000000..0732973737 --- /dev/null +++ b/src/crc32c/src/crc32c_extend_unittests.h @@ -0,0 +1,112 @@ +// Copyright 2017 The CRC32C Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef CRC32C_CRC32C_EXTEND_UNITTESTS_H_ +#define CRC32C_CRC32C_EXTEND_UNITTESTS_H_ + +#include <cstddef> +#include <cstdint> +#include <cstring> + +#include "gtest/gtest.h" + +// Common test cases for all implementations of CRC32C_Extend(). + +namespace crc32c { + +template<typename TestTraits> +class ExtendTest : public testing::Test {}; + +TYPED_TEST_SUITE_P(ExtendTest); + +TYPED_TEST_P(ExtendTest, StandardResults) { + // From rfc3720 section B.4. + uint8_t buf[32]; + + std::memset(buf, 0, sizeof(buf)); + EXPECT_EQ(static_cast<uint32_t>(0x8a9136aa), + TypeParam::Extend(0, buf, sizeof(buf))); + + std::memset(buf, 0xff, sizeof(buf)); + EXPECT_EQ(static_cast<uint32_t>(0x62a8ab43), + TypeParam::Extend(0, buf, sizeof(buf))); + + for (int i = 0; i < 32; ++i) + buf[i] = static_cast<uint8_t>(i); + EXPECT_EQ(static_cast<uint32_t>(0x46dd794e), + TypeParam::Extend(0, buf, sizeof(buf))); + + for (int i = 0; i < 32; ++i) + buf[i] = static_cast<uint8_t>(31 - i); + EXPECT_EQ(static_cast<uint32_t>(0x113fdb5c), + TypeParam::Extend(0, buf, sizeof(buf))); + + uint8_t data[48] = { + 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }; + EXPECT_EQ(static_cast<uint32_t>(0xd9963a56), + TypeParam::Extend(0, data, sizeof(data))); +} + +TYPED_TEST_P(ExtendTest, HelloWorld) { + const uint8_t* hello_space_world = + reinterpret_cast<const uint8_t*>("hello world"); + const uint8_t* hello_space = reinterpret_cast<const uint8_t*>("hello "); + const uint8_t* world = reinterpret_cast<const uint8_t*>("world"); + + EXPECT_EQ(TypeParam::Extend(0, hello_space_world, 11), + TypeParam::Extend(TypeParam::Extend(0, hello_space, 6), world, 5)); +} + +TYPED_TEST_P(ExtendTest, BufferSlicing) { + uint8_t buffer[48] = { + 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }; + + for (size_t i = 0; i < 48; ++i) { + for (size_t j = i + 1; j <= 48; ++j) { + uint32_t crc = 0; + + if (i > 0) crc = TypeParam::Extend(crc, buffer, i); + crc = TypeParam::Extend(crc, buffer + i, j - i); + if (j < 48) crc = TypeParam::Extend(crc, buffer + j, 48 - j); + + EXPECT_EQ(static_cast<uint32_t>(0xd9963a56), crc); + } + } +} + +TYPED_TEST_P(ExtendTest, LargeBufferSlicing) { + uint8_t buffer[2048]; + for (size_t i = 0; i < 2048; i++) + buffer[i] = static_cast<uint8_t>(3 * i * i + 7 * i + 11); + + for (size_t i = 0; i < 2048; ++i) { + for (size_t j = i + 1; j <= 2048; ++j) { + uint32_t crc = 0; + + if (i > 0) crc = TypeParam::Extend(crc, buffer, i); + crc = TypeParam::Extend(crc, buffer + i, j - i); + if (j < 2048) crc = TypeParam::Extend(crc, buffer + j, 2048 - j); + + EXPECT_EQ(static_cast<uint32_t>(0x36dcc753), crc); + } + } +} + +REGISTER_TYPED_TEST_SUITE_P(ExtendTest, + StandardResults, + HelloWorld, + BufferSlicing, + LargeBufferSlicing); + +} // namespace crc32c + +#endif // CRC32C_CRC32C_EXTEND_UNITTESTS_H_ diff --git a/src/crc32c/src/crc32c_internal.h b/src/crc32c/src/crc32c_internal.h new file mode 100644 index 0000000000..2bd23dea43 --- /dev/null +++ b/src/crc32c/src/crc32c_internal.h @@ -0,0 +1,23 @@ +// Copyright 2017 The CRC32C Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef CRC32C_CRC32C_INTERNAL_H_ +#define CRC32C_CRC32C_INTERNAL_H_ + +// Internal functions that may change between releases. + +#include <cstddef> +#include <cstdint> + +namespace crc32c { + +// Un-accelerated implementation that works on all CPUs. +uint32_t ExtendPortable(uint32_t crc, const uint8_t* data, size_t count); + +// CRCs are pre- and post- conditioned by xoring with all ones. +static constexpr const uint32_t kCRC32Xor = static_cast<uint32_t>(0xffffffffU); + +} // namespace crc32c + +#endif // CRC32C_CRC32C_INTERNAL_H_ diff --git a/src/crc32c/src/crc32c_portable.cc b/src/crc32c/src/crc32c_portable.cc new file mode 100644 index 0000000000..31ec6eac53 --- /dev/null +++ b/src/crc32c/src/crc32c_portable.cc @@ -0,0 +1,351 @@ +// Copyright 2008 The CRC32C Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "./crc32c_internal.h" + +#include <cstddef> +#include <cstdint> + +#include "./crc32c_prefetch.h" +#include "./crc32c_read_le.h" +#include "./crc32c_round_up.h" + +namespace { + +const uint32_t kByteExtensionTable[256] = { + 0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, 0xc79a971f, 0x35f1141c, + 0x26a1e7e8, 0xd4ca64eb, 0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b, + 0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24, 0x105ec76f, 0xe235446c, + 0xf165b798, 0x030e349b, 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384, + 0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54, 0x5d1d08bf, 0xaf768bbc, + 0xbc267848, 0x4e4dfb4b, 0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a, + 0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35, 0xaa64d611, 0x580f5512, + 0x4b5fa6e6, 0xb93425e5, 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa, + 0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45, 0xf779deae, 0x05125dad, + 0x1642ae59, 0xe4292d5a, 0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a, + 0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595, 0x417b1dbc, 0xb3109ebf, + 0xa0406d4b, 0x522bee48, 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957, + 0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687, 0x0c38d26c, 0xfe53516f, + 0xed03a29b, 0x1f682198, 0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927, + 0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38, 0xdbfc821c, 0x2997011f, + 0x3ac7f2eb, 0xc8ac71e8, 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7, + 0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096, 0xa65c047d, 0x5437877e, + 0x4767748a, 0xb50cf789, 0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859, + 0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46, 0x7198540d, 0x83f3d70e, + 0x90a324fa, 0x62c8a7f9, 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6, + 0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36, 0x3cdb9bdd, 0xceb018de, + 0xdde0eb2a, 0x2f8b6829, 0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c, + 0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93, 0x082f63b7, 0xfa44e0b4, + 0xe9141340, 0x1b7f9043, 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c, + 0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3, 0x55326b08, 0xa759e80b, + 0xb4091bff, 0x466298fc, 0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c, + 0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033, 0xa24bb5a6, 0x502036a5, + 0x4370c551, 0xb11b4652, 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d, + 0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d, 0xef087a76, 0x1d63f975, + 0x0e330a81, 0xfc588982, 0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d, + 0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622, 0x38cc2a06, 0xcaa7a905, + 0xd9f75af1, 0x2b9cd9f2, 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed, + 0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530, 0x0417b1db, 0xf67c32d8, + 0xe52cc12c, 0x1747422f, 0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff, + 0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0, 0xd3d3e1ab, 0x21b862a8, + 0x32e8915c, 0xc083125f, 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540, + 0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90, 0x9e902e7b, 0x6cfbad78, + 0x7fab5e8c, 0x8dc0dd8f, 0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee, + 0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1, 0x69e9f0d5, 0x9b8273d6, + 0x88d28022, 0x7ab90321, 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e, + 0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81, 0x34f4f86a, 0xc69f7b69, + 0xd5cf889d, 0x27a40b9e, 0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e, + 0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351}; + +const uint32_t kStrideExtensionTable0[256] = { + 0x00000000, 0x30d23865, 0x61a470ca, 0x517648af, 0xc348e194, 0xf39ad9f1, + 0xa2ec915e, 0x923ea93b, 0x837db5d9, 0xb3af8dbc, 0xe2d9c513, 0xd20bfd76, + 0x4035544d, 0x70e76c28, 0x21912487, 0x11431ce2, 0x03171d43, 0x33c52526, + 0x62b36d89, 0x526155ec, 0xc05ffcd7, 0xf08dc4b2, 0xa1fb8c1d, 0x9129b478, + 0x806aa89a, 0xb0b890ff, 0xe1ced850, 0xd11ce035, 0x4322490e, 0x73f0716b, + 0x228639c4, 0x125401a1, 0x062e3a86, 0x36fc02e3, 0x678a4a4c, 0x57587229, + 0xc566db12, 0xf5b4e377, 0xa4c2abd8, 0x941093bd, 0x85538f5f, 0xb581b73a, + 0xe4f7ff95, 0xd425c7f0, 0x461b6ecb, 0x76c956ae, 0x27bf1e01, 0x176d2664, + 0x053927c5, 0x35eb1fa0, 0x649d570f, 0x544f6f6a, 0xc671c651, 0xf6a3fe34, + 0xa7d5b69b, 0x97078efe, 0x8644921c, 0xb696aa79, 0xe7e0e2d6, 0xd732dab3, + 0x450c7388, 0x75de4bed, 0x24a80342, 0x147a3b27, 0x0c5c750c, 0x3c8e4d69, + 0x6df805c6, 0x5d2a3da3, 0xcf149498, 0xffc6acfd, 0xaeb0e452, 0x9e62dc37, + 0x8f21c0d5, 0xbff3f8b0, 0xee85b01f, 0xde57887a, 0x4c692141, 0x7cbb1924, + 0x2dcd518b, 0x1d1f69ee, 0x0f4b684f, 0x3f99502a, 0x6eef1885, 0x5e3d20e0, + 0xcc0389db, 0xfcd1b1be, 0xada7f911, 0x9d75c174, 0x8c36dd96, 0xbce4e5f3, + 0xed92ad5c, 0xdd409539, 0x4f7e3c02, 0x7fac0467, 0x2eda4cc8, 0x1e0874ad, + 0x0a724f8a, 0x3aa077ef, 0x6bd63f40, 0x5b040725, 0xc93aae1e, 0xf9e8967b, + 0xa89eded4, 0x984ce6b1, 0x890ffa53, 0xb9ddc236, 0xe8ab8a99, 0xd879b2fc, + 0x4a471bc7, 0x7a9523a2, 0x2be36b0d, 0x1b315368, 0x096552c9, 0x39b76aac, + 0x68c12203, 0x58131a66, 0xca2db35d, 0xfaff8b38, 0xab89c397, 0x9b5bfbf2, + 0x8a18e710, 0xbacadf75, 0xebbc97da, 0xdb6eafbf, 0x49500684, 0x79823ee1, + 0x28f4764e, 0x18264e2b, 0x18b8ea18, 0x286ad27d, 0x791c9ad2, 0x49cea2b7, + 0xdbf00b8c, 0xeb2233e9, 0xba547b46, 0x8a864323, 0x9bc55fc1, 0xab1767a4, + 0xfa612f0b, 0xcab3176e, 0x588dbe55, 0x685f8630, 0x3929ce9f, 0x09fbf6fa, + 0x1baff75b, 0x2b7dcf3e, 0x7a0b8791, 0x4ad9bff4, 0xd8e716cf, 0xe8352eaa, + 0xb9436605, 0x89915e60, 0x98d24282, 0xa8007ae7, 0xf9763248, 0xc9a40a2d, + 0x5b9aa316, 0x6b489b73, 0x3a3ed3dc, 0x0aecebb9, 0x1e96d09e, 0x2e44e8fb, + 0x7f32a054, 0x4fe09831, 0xddde310a, 0xed0c096f, 0xbc7a41c0, 0x8ca879a5, + 0x9deb6547, 0xad395d22, 0xfc4f158d, 0xcc9d2de8, 0x5ea384d3, 0x6e71bcb6, + 0x3f07f419, 0x0fd5cc7c, 0x1d81cddd, 0x2d53f5b8, 0x7c25bd17, 0x4cf78572, + 0xdec92c49, 0xee1b142c, 0xbf6d5c83, 0x8fbf64e6, 0x9efc7804, 0xae2e4061, + 0xff5808ce, 0xcf8a30ab, 0x5db49990, 0x6d66a1f5, 0x3c10e95a, 0x0cc2d13f, + 0x14e49f14, 0x2436a771, 0x7540efde, 0x4592d7bb, 0xd7ac7e80, 0xe77e46e5, + 0xb6080e4a, 0x86da362f, 0x97992acd, 0xa74b12a8, 0xf63d5a07, 0xc6ef6262, + 0x54d1cb59, 0x6403f33c, 0x3575bb93, 0x05a783f6, 0x17f38257, 0x2721ba32, + 0x7657f29d, 0x4685caf8, 0xd4bb63c3, 0xe4695ba6, 0xb51f1309, 0x85cd2b6c, + 0x948e378e, 0xa45c0feb, 0xf52a4744, 0xc5f87f21, 0x57c6d61a, 0x6714ee7f, + 0x3662a6d0, 0x06b09eb5, 0x12caa592, 0x22189df7, 0x736ed558, 0x43bced3d, + 0xd1824406, 0xe1507c63, 0xb02634cc, 0x80f40ca9, 0x91b7104b, 0xa165282e, + 0xf0136081, 0xc0c158e4, 0x52fff1df, 0x622dc9ba, 0x335b8115, 0x0389b970, + 0x11ddb8d1, 0x210f80b4, 0x7079c81b, 0x40abf07e, 0xd2955945, 0xe2476120, + 0xb331298f, 0x83e311ea, 0x92a00d08, 0xa272356d, 0xf3047dc2, 0xc3d645a7, + 0x51e8ec9c, 0x613ad4f9, 0x304c9c56, 0x009ea433}; + +const uint32_t kStrideExtensionTable1[256] = { + 0x00000000, 0x54075546, 0xa80eaa8c, 0xfc09ffca, 0x55f123e9, 0x01f676af, + 0xfdff8965, 0xa9f8dc23, 0xabe247d2, 0xffe51294, 0x03eced5e, 0x57ebb818, + 0xfe13643b, 0xaa14317d, 0x561dceb7, 0x021a9bf1, 0x5228f955, 0x062fac13, + 0xfa2653d9, 0xae21069f, 0x07d9dabc, 0x53de8ffa, 0xafd77030, 0xfbd02576, + 0xf9cabe87, 0xadcdebc1, 0x51c4140b, 0x05c3414d, 0xac3b9d6e, 0xf83cc828, + 0x043537e2, 0x503262a4, 0xa451f2aa, 0xf056a7ec, 0x0c5f5826, 0x58580d60, + 0xf1a0d143, 0xa5a78405, 0x59ae7bcf, 0x0da92e89, 0x0fb3b578, 0x5bb4e03e, + 0xa7bd1ff4, 0xf3ba4ab2, 0x5a429691, 0x0e45c3d7, 0xf24c3c1d, 0xa64b695b, + 0xf6790bff, 0xa27e5eb9, 0x5e77a173, 0x0a70f435, 0xa3882816, 0xf78f7d50, + 0x0b86829a, 0x5f81d7dc, 0x5d9b4c2d, 0x099c196b, 0xf595e6a1, 0xa192b3e7, + 0x086a6fc4, 0x5c6d3a82, 0xa064c548, 0xf463900e, 0x4d4f93a5, 0x1948c6e3, + 0xe5413929, 0xb1466c6f, 0x18beb04c, 0x4cb9e50a, 0xb0b01ac0, 0xe4b74f86, + 0xe6add477, 0xb2aa8131, 0x4ea37efb, 0x1aa42bbd, 0xb35cf79e, 0xe75ba2d8, + 0x1b525d12, 0x4f550854, 0x1f676af0, 0x4b603fb6, 0xb769c07c, 0xe36e953a, + 0x4a964919, 0x1e911c5f, 0xe298e395, 0xb69fb6d3, 0xb4852d22, 0xe0827864, + 0x1c8b87ae, 0x488cd2e8, 0xe1740ecb, 0xb5735b8d, 0x497aa447, 0x1d7df101, + 0xe91e610f, 0xbd193449, 0x4110cb83, 0x15179ec5, 0xbcef42e6, 0xe8e817a0, + 0x14e1e86a, 0x40e6bd2c, 0x42fc26dd, 0x16fb739b, 0xeaf28c51, 0xbef5d917, + 0x170d0534, 0x430a5072, 0xbf03afb8, 0xeb04fafe, 0xbb36985a, 0xef31cd1c, + 0x133832d6, 0x473f6790, 0xeec7bbb3, 0xbac0eef5, 0x46c9113f, 0x12ce4479, + 0x10d4df88, 0x44d38ace, 0xb8da7504, 0xecdd2042, 0x4525fc61, 0x1122a927, + 0xed2b56ed, 0xb92c03ab, 0x9a9f274a, 0xce98720c, 0x32918dc6, 0x6696d880, + 0xcf6e04a3, 0x9b6951e5, 0x6760ae2f, 0x3367fb69, 0x317d6098, 0x657a35de, + 0x9973ca14, 0xcd749f52, 0x648c4371, 0x308b1637, 0xcc82e9fd, 0x9885bcbb, + 0xc8b7de1f, 0x9cb08b59, 0x60b97493, 0x34be21d5, 0x9d46fdf6, 0xc941a8b0, + 0x3548577a, 0x614f023c, 0x635599cd, 0x3752cc8b, 0xcb5b3341, 0x9f5c6607, + 0x36a4ba24, 0x62a3ef62, 0x9eaa10a8, 0xcaad45ee, 0x3eced5e0, 0x6ac980a6, + 0x96c07f6c, 0xc2c72a2a, 0x6b3ff609, 0x3f38a34f, 0xc3315c85, 0x973609c3, + 0x952c9232, 0xc12bc774, 0x3d2238be, 0x69256df8, 0xc0ddb1db, 0x94dae49d, + 0x68d31b57, 0x3cd44e11, 0x6ce62cb5, 0x38e179f3, 0xc4e88639, 0x90efd37f, + 0x39170f5c, 0x6d105a1a, 0x9119a5d0, 0xc51ef096, 0xc7046b67, 0x93033e21, + 0x6f0ac1eb, 0x3b0d94ad, 0x92f5488e, 0xc6f21dc8, 0x3afbe202, 0x6efcb744, + 0xd7d0b4ef, 0x83d7e1a9, 0x7fde1e63, 0x2bd94b25, 0x82219706, 0xd626c240, + 0x2a2f3d8a, 0x7e2868cc, 0x7c32f33d, 0x2835a67b, 0xd43c59b1, 0x803b0cf7, + 0x29c3d0d4, 0x7dc48592, 0x81cd7a58, 0xd5ca2f1e, 0x85f84dba, 0xd1ff18fc, + 0x2df6e736, 0x79f1b270, 0xd0096e53, 0x840e3b15, 0x7807c4df, 0x2c009199, + 0x2e1a0a68, 0x7a1d5f2e, 0x8614a0e4, 0xd213f5a2, 0x7beb2981, 0x2fec7cc7, + 0xd3e5830d, 0x87e2d64b, 0x73814645, 0x27861303, 0xdb8fecc9, 0x8f88b98f, + 0x267065ac, 0x727730ea, 0x8e7ecf20, 0xda799a66, 0xd8630197, 0x8c6454d1, + 0x706dab1b, 0x246afe5d, 0x8d92227e, 0xd9957738, 0x259c88f2, 0x719bddb4, + 0x21a9bf10, 0x75aeea56, 0x89a7159c, 0xdda040da, 0x74589cf9, 0x205fc9bf, + 0xdc563675, 0x88516333, 0x8a4bf8c2, 0xde4cad84, 0x2245524e, 0x76420708, + 0xdfbadb2b, 0x8bbd8e6d, 0x77b471a7, 0x23b324e1}; + +const uint32_t kStrideExtensionTable2[256] = { + 0x00000000, 0x678efd01, 0xcf1dfa02, 0xa8930703, 0x9bd782f5, 0xfc597ff4, + 0x54ca78f7, 0x334485f6, 0x3243731b, 0x55cd8e1a, 0xfd5e8919, 0x9ad07418, + 0xa994f1ee, 0xce1a0cef, 0x66890bec, 0x0107f6ed, 0x6486e636, 0x03081b37, + 0xab9b1c34, 0xcc15e135, 0xff5164c3, 0x98df99c2, 0x304c9ec1, 0x57c263c0, + 0x56c5952d, 0x314b682c, 0x99d86f2f, 0xfe56922e, 0xcd1217d8, 0xaa9cead9, + 0x020fedda, 0x658110db, 0xc90dcc6c, 0xae83316d, 0x0610366e, 0x619ecb6f, + 0x52da4e99, 0x3554b398, 0x9dc7b49b, 0xfa49499a, 0xfb4ebf77, 0x9cc04276, + 0x34534575, 0x53ddb874, 0x60993d82, 0x0717c083, 0xaf84c780, 0xc80a3a81, + 0xad8b2a5a, 0xca05d75b, 0x6296d058, 0x05182d59, 0x365ca8af, 0x51d255ae, + 0xf94152ad, 0x9ecfafac, 0x9fc85941, 0xf846a440, 0x50d5a343, 0x375b5e42, + 0x041fdbb4, 0x639126b5, 0xcb0221b6, 0xac8cdcb7, 0x97f7ee29, 0xf0791328, + 0x58ea142b, 0x3f64e92a, 0x0c206cdc, 0x6bae91dd, 0xc33d96de, 0xa4b36bdf, + 0xa5b49d32, 0xc23a6033, 0x6aa96730, 0x0d279a31, 0x3e631fc7, 0x59ede2c6, + 0xf17ee5c5, 0x96f018c4, 0xf371081f, 0x94fff51e, 0x3c6cf21d, 0x5be20f1c, + 0x68a68aea, 0x0f2877eb, 0xa7bb70e8, 0xc0358de9, 0xc1327b04, 0xa6bc8605, + 0x0e2f8106, 0x69a17c07, 0x5ae5f9f1, 0x3d6b04f0, 0x95f803f3, 0xf276fef2, + 0x5efa2245, 0x3974df44, 0x91e7d847, 0xf6692546, 0xc52da0b0, 0xa2a35db1, + 0x0a305ab2, 0x6dbea7b3, 0x6cb9515e, 0x0b37ac5f, 0xa3a4ab5c, 0xc42a565d, + 0xf76ed3ab, 0x90e02eaa, 0x387329a9, 0x5ffdd4a8, 0x3a7cc473, 0x5df23972, + 0xf5613e71, 0x92efc370, 0xa1ab4686, 0xc625bb87, 0x6eb6bc84, 0x09384185, + 0x083fb768, 0x6fb14a69, 0xc7224d6a, 0xa0acb06b, 0x93e8359d, 0xf466c89c, + 0x5cf5cf9f, 0x3b7b329e, 0x2a03aaa3, 0x4d8d57a2, 0xe51e50a1, 0x8290ada0, + 0xb1d42856, 0xd65ad557, 0x7ec9d254, 0x19472f55, 0x1840d9b8, 0x7fce24b9, + 0xd75d23ba, 0xb0d3debb, 0x83975b4d, 0xe419a64c, 0x4c8aa14f, 0x2b045c4e, + 0x4e854c95, 0x290bb194, 0x8198b697, 0xe6164b96, 0xd552ce60, 0xb2dc3361, + 0x1a4f3462, 0x7dc1c963, 0x7cc63f8e, 0x1b48c28f, 0xb3dbc58c, 0xd455388d, + 0xe711bd7b, 0x809f407a, 0x280c4779, 0x4f82ba78, 0xe30e66cf, 0x84809bce, + 0x2c139ccd, 0x4b9d61cc, 0x78d9e43a, 0x1f57193b, 0xb7c41e38, 0xd04ae339, + 0xd14d15d4, 0xb6c3e8d5, 0x1e50efd6, 0x79de12d7, 0x4a9a9721, 0x2d146a20, + 0x85876d23, 0xe2099022, 0x878880f9, 0xe0067df8, 0x48957afb, 0x2f1b87fa, + 0x1c5f020c, 0x7bd1ff0d, 0xd342f80e, 0xb4cc050f, 0xb5cbf3e2, 0xd2450ee3, + 0x7ad609e0, 0x1d58f4e1, 0x2e1c7117, 0x49928c16, 0xe1018b15, 0x868f7614, + 0xbdf4448a, 0xda7ab98b, 0x72e9be88, 0x15674389, 0x2623c67f, 0x41ad3b7e, + 0xe93e3c7d, 0x8eb0c17c, 0x8fb73791, 0xe839ca90, 0x40aacd93, 0x27243092, + 0x1460b564, 0x73ee4865, 0xdb7d4f66, 0xbcf3b267, 0xd972a2bc, 0xbefc5fbd, + 0x166f58be, 0x71e1a5bf, 0x42a52049, 0x252bdd48, 0x8db8da4b, 0xea36274a, + 0xeb31d1a7, 0x8cbf2ca6, 0x242c2ba5, 0x43a2d6a4, 0x70e65352, 0x1768ae53, + 0xbffba950, 0xd8755451, 0x74f988e6, 0x137775e7, 0xbbe472e4, 0xdc6a8fe5, + 0xef2e0a13, 0x88a0f712, 0x2033f011, 0x47bd0d10, 0x46bafbfd, 0x213406fc, + 0x89a701ff, 0xee29fcfe, 0xdd6d7908, 0xbae38409, 0x1270830a, 0x75fe7e0b, + 0x107f6ed0, 0x77f193d1, 0xdf6294d2, 0xb8ec69d3, 0x8ba8ec25, 0xec261124, + 0x44b51627, 0x233beb26, 0x223c1dcb, 0x45b2e0ca, 0xed21e7c9, 0x8aaf1ac8, + 0xb9eb9f3e, 0xde65623f, 0x76f6653c, 0x1178983d}; + +const uint32_t kStrideExtensionTable3[256] = { + 0x00000000, 0xf20c0dfe, 0xe1f46d0d, 0x13f860f3, 0xc604aceb, 0x3408a115, + 0x27f0c1e6, 0xd5fccc18, 0x89e52f27, 0x7be922d9, 0x6811422a, 0x9a1d4fd4, + 0x4fe183cc, 0xbded8e32, 0xae15eec1, 0x5c19e33f, 0x162628bf, 0xe42a2541, + 0xf7d245b2, 0x05de484c, 0xd0228454, 0x222e89aa, 0x31d6e959, 0xc3dae4a7, + 0x9fc30798, 0x6dcf0a66, 0x7e376a95, 0x8c3b676b, 0x59c7ab73, 0xabcba68d, + 0xb833c67e, 0x4a3fcb80, 0x2c4c517e, 0xde405c80, 0xcdb83c73, 0x3fb4318d, + 0xea48fd95, 0x1844f06b, 0x0bbc9098, 0xf9b09d66, 0xa5a97e59, 0x57a573a7, + 0x445d1354, 0xb6511eaa, 0x63add2b2, 0x91a1df4c, 0x8259bfbf, 0x7055b241, + 0x3a6a79c1, 0xc866743f, 0xdb9e14cc, 0x29921932, 0xfc6ed52a, 0x0e62d8d4, + 0x1d9ab827, 0xef96b5d9, 0xb38f56e6, 0x41835b18, 0x527b3beb, 0xa0773615, + 0x758bfa0d, 0x8787f7f3, 0x947f9700, 0x66739afe, 0x5898a2fc, 0xaa94af02, + 0xb96ccff1, 0x4b60c20f, 0x9e9c0e17, 0x6c9003e9, 0x7f68631a, 0x8d646ee4, + 0xd17d8ddb, 0x23718025, 0x3089e0d6, 0xc285ed28, 0x17792130, 0xe5752cce, + 0xf68d4c3d, 0x048141c3, 0x4ebe8a43, 0xbcb287bd, 0xaf4ae74e, 0x5d46eab0, + 0x88ba26a8, 0x7ab62b56, 0x694e4ba5, 0x9b42465b, 0xc75ba564, 0x3557a89a, + 0x26afc869, 0xd4a3c597, 0x015f098f, 0xf3530471, 0xe0ab6482, 0x12a7697c, + 0x74d4f382, 0x86d8fe7c, 0x95209e8f, 0x672c9371, 0xb2d05f69, 0x40dc5297, + 0x53243264, 0xa1283f9a, 0xfd31dca5, 0x0f3dd15b, 0x1cc5b1a8, 0xeec9bc56, + 0x3b35704e, 0xc9397db0, 0xdac11d43, 0x28cd10bd, 0x62f2db3d, 0x90fed6c3, + 0x8306b630, 0x710abbce, 0xa4f677d6, 0x56fa7a28, 0x45021adb, 0xb70e1725, + 0xeb17f41a, 0x191bf9e4, 0x0ae39917, 0xf8ef94e9, 0x2d1358f1, 0xdf1f550f, + 0xcce735fc, 0x3eeb3802, 0xb13145f8, 0x433d4806, 0x50c528f5, 0xa2c9250b, + 0x7735e913, 0x8539e4ed, 0x96c1841e, 0x64cd89e0, 0x38d46adf, 0xcad86721, + 0xd92007d2, 0x2b2c0a2c, 0xfed0c634, 0x0cdccbca, 0x1f24ab39, 0xed28a6c7, + 0xa7176d47, 0x551b60b9, 0x46e3004a, 0xb4ef0db4, 0x6113c1ac, 0x931fcc52, + 0x80e7aca1, 0x72eba15f, 0x2ef24260, 0xdcfe4f9e, 0xcf062f6d, 0x3d0a2293, + 0xe8f6ee8b, 0x1afae375, 0x09028386, 0xfb0e8e78, 0x9d7d1486, 0x6f711978, + 0x7c89798b, 0x8e857475, 0x5b79b86d, 0xa975b593, 0xba8dd560, 0x4881d89e, + 0x14983ba1, 0xe694365f, 0xf56c56ac, 0x07605b52, 0xd29c974a, 0x20909ab4, + 0x3368fa47, 0xc164f7b9, 0x8b5b3c39, 0x795731c7, 0x6aaf5134, 0x98a35cca, + 0x4d5f90d2, 0xbf539d2c, 0xacabfddf, 0x5ea7f021, 0x02be131e, 0xf0b21ee0, + 0xe34a7e13, 0x114673ed, 0xc4babff5, 0x36b6b20b, 0x254ed2f8, 0xd742df06, + 0xe9a9e704, 0x1ba5eafa, 0x085d8a09, 0xfa5187f7, 0x2fad4bef, 0xdda14611, + 0xce5926e2, 0x3c552b1c, 0x604cc823, 0x9240c5dd, 0x81b8a52e, 0x73b4a8d0, + 0xa64864c8, 0x54446936, 0x47bc09c5, 0xb5b0043b, 0xff8fcfbb, 0x0d83c245, + 0x1e7ba2b6, 0xec77af48, 0x398b6350, 0xcb876eae, 0xd87f0e5d, 0x2a7303a3, + 0x766ae09c, 0x8466ed62, 0x979e8d91, 0x6592806f, 0xb06e4c77, 0x42624189, + 0x519a217a, 0xa3962c84, 0xc5e5b67a, 0x37e9bb84, 0x2411db77, 0xd61dd689, + 0x03e11a91, 0xf1ed176f, 0xe215779c, 0x10197a62, 0x4c00995d, 0xbe0c94a3, + 0xadf4f450, 0x5ff8f9ae, 0x8a0435b6, 0x78083848, 0x6bf058bb, 0x99fc5545, + 0xd3c39ec5, 0x21cf933b, 0x3237f3c8, 0xc03bfe36, 0x15c7322e, 0xe7cb3fd0, + 0xf4335f23, 0x063f52dd, 0x5a26b1e2, 0xa82abc1c, 0xbbd2dcef, 0x49ded111, + 0x9c221d09, 0x6e2e10f7, 0x7dd67004, 0x8fda7dfa}; + +constexpr const ptrdiff_t kPrefetchHorizon = 256; + +} // namespace + +namespace crc32c { + +uint32_t ExtendPortable(uint32_t crc, const uint8_t* data, size_t size) { + const uint8_t* p = data; + const uint8_t* e = p + size; + uint32_t l = crc ^ kCRC32Xor; + +// Process one byte at a time. +#define STEP1 \ + do { \ + int c = (l & 0xff) ^ *p++; \ + l = kByteExtensionTable[c] ^ (l >> 8); \ + } while (0) + +// Process one of the 4 strides of 4-byte data. +#define STEP4(s) \ + do { \ + crc##s = ReadUint32LE(p + s * 4) ^ kStrideExtensionTable3[crc##s & 0xff] ^ \ + kStrideExtensionTable2[(crc##s >> 8) & 0xff] ^ \ + kStrideExtensionTable1[(crc##s >> 16) & 0xff] ^ \ + kStrideExtensionTable0[crc##s >> 24]; \ + } while (0) + +// Process a 16-byte swath of 4 strides, each of which has 4 bytes of data. +#define STEP16 \ + do { \ + STEP4(0); \ + STEP4(1); \ + STEP4(2); \ + STEP4(3); \ + p += 16; \ + } while (0) + +// Process 4 bytes that were already loaded into a word. +#define STEP4W(w) \ + do { \ + w ^= l; \ + for (size_t i = 0; i < 4; ++i) { \ + w = (w >> 8) ^ kByteExtensionTable[w & 0xff]; \ + } \ + l = w; \ + } while (0) + + // Point x at first 4-byte aligned byte in the buffer. This might be past the + // end of the buffer. + const uint8_t* x = RoundUp<4>(p); + if (x <= e) { + // Process bytes p is 4-byte aligned. + while (p != x) { + STEP1; + } + } + + if ((e - p) >= 16) { + // Load a 16-byte swath into the stride partial results. + uint32_t crc0 = ReadUint32LE(p + 0 * 4) ^ l; + uint32_t crc1 = ReadUint32LE(p + 1 * 4); + uint32_t crc2 = ReadUint32LE(p + 2 * 4); + uint32_t crc3 = ReadUint32LE(p + 3 * 4); + p += 16; + + while ((e - p) > kPrefetchHorizon) { + RequestPrefetch(p + kPrefetchHorizon); + + // Process 64 bytes at a time. + STEP16; + STEP16; + STEP16; + STEP16; + } + + // Process one 16-byte swath at a time. + while ((e - p) >= 16) { + STEP16; + } + + // Advance one word at a time as far as possible. + while ((e - p) >= 4) { + STEP4(0); + uint32_t tmp = crc0; + crc0 = crc1; + crc1 = crc2; + crc2 = crc3; + crc3 = tmp; + p += 4; + } + + // Combine the 4 partial stride results. + l = 0; + STEP4W(crc0); + STEP4W(crc1); + STEP4W(crc2); + STEP4W(crc3); + } + + // Process the last few bytes. + while (p != e) { + STEP1; + } +#undef STEP4W +#undef STEP16 +#undef STEP4 +#undef STEP1 + return l ^ kCRC32Xor; +} + +} // namespace crc32c diff --git a/src/crc32c/src/crc32c_portable_unittest.cc b/src/crc32c/src/crc32c_portable_unittest.cc new file mode 100644 index 0000000000..5098e2c373 --- /dev/null +++ b/src/crc32c/src/crc32c_portable_unittest.cc @@ -0,0 +1,20 @@ +// Copyright 2017 The CRC32C Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "gtest/gtest.h" + +#include "./crc32c_extend_unittests.h" +#include "./crc32c_internal.h" + +namespace crc32c { + +struct PortableTestTraits { + static uint32_t Extend(uint32_t crc, const uint8_t* data, size_t count) { + return ExtendPortable(crc, data, count); + } +}; + +INSTANTIATE_TYPED_TEST_SUITE_P(Portable, ExtendTest, PortableTestTraits); + +} // namespace crc32c diff --git a/src/crc32c/src/crc32c_prefetch.h b/src/crc32c/src/crc32c_prefetch.h new file mode 100644 index 0000000000..aec7d54e84 --- /dev/null +++ b/src/crc32c/src/crc32c_prefetch.h @@ -0,0 +1,46 @@ +// Copyright 2017 The CRC32C Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef CRC32C_CRC32C_PREFETCH_H_ +#define CRC32C_CRC32C_PREFETCH_H_ + +#include <cstddef> +#include <cstdint> + +#ifdef CRC32C_HAVE_CONFIG_H +#include "crc32c/crc32c_config.h" +#endif + +#if HAVE_MM_PREFETCH + +#if defined(_MSC_VER) +#include <intrin.h> +#else // !defined(_MSC_VER) +#include <xmmintrin.h> +#endif // defined(_MSC_VER) + +#endif // HAVE_MM_PREFETCH + +namespace crc32c { + +// Ask the hardware to prefetch the data at the given address into the L1 cache. +inline void RequestPrefetch(const uint8_t* address) { +#if HAVE_BUILTIN_PREFETCH + // Clang and GCC implement the __builtin_prefetch non-standard extension, + // which maps to the best instruction on the target architecture. + __builtin_prefetch(reinterpret_cast<const char*>(address), 0 /* Read only. */, + 0 /* No temporal locality. */); +#elif HAVE_MM_PREFETCH + // Visual Studio doesn't implement __builtin_prefetch, but exposes the + // PREFETCHNTA instruction via the _mm_prefetch intrinsic. + _mm_prefetch(reinterpret_cast<const char*>(address), _MM_HINT_NTA); +#else + // No prefetch support. Silence compiler warnings. + (void)address; +#endif // HAVE_BUILTIN_PREFETCH +} + +} // namespace crc32c + +#endif // CRC32C_CRC32C_ROUND_UP_H_ diff --git a/src/crc32c/src/crc32c_prefetch_unittest.cc b/src/crc32c/src/crc32c_prefetch_unittest.cc new file mode 100644 index 0000000000..b34ed2d5fe --- /dev/null +++ b/src/crc32c/src/crc32c_prefetch_unittest.cc @@ -0,0 +1,9 @@ +// Copyright 2017 The CRC32C Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "./crc32c_prefetch.h" + +// There is no easy way to test cache prefetching. We can only test that the +// crc32c_prefetch.h header compiles on its own, so it doesn't have any unstated +// dependencies. diff --git a/src/crc32c/src/crc32c_read_le.h b/src/crc32c/src/crc32c_read_le.h new file mode 100644 index 0000000000..3bd45fe3aa --- /dev/null +++ b/src/crc32c/src/crc32c_read_le.h @@ -0,0 +1,53 @@ +// Copyright 2017 The CRC32C Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef CRC32C_CRC32C_READ_LE_H_ +#define CRC32C_CRC32C_READ_LE_H_ + +#include <cstdint> +#include <cstring> + +#ifdef CRC32C_HAVE_CONFIG_H +#include "crc32c/crc32c_config.h" +#endif + +namespace crc32c { + +// Reads a little-endian 32-bit integer from a 32-bit-aligned buffer. +inline uint32_t ReadUint32LE(const uint8_t* buffer) { +#if BYTE_ORDER_BIG_ENDIAN + return ((static_cast<uint32_t>(static_cast<uint8_t>(buffer[0]))) | + (static_cast<uint32_t>(static_cast<uint8_t>(buffer[1])) << 8) | + (static_cast<uint32_t>(static_cast<uint8_t>(buffer[2])) << 16) | + (static_cast<uint32_t>(static_cast<uint8_t>(buffer[3])) << 24)); +#else // !BYTE_ORDER_BIG_ENDIAN + uint32_t result; + // This should be optimized to a single instruction. + std::memcpy(&result, buffer, sizeof(result)); + return result; +#endif // BYTE_ORDER_BIG_ENDIAN +} + +// Reads a little-endian 64-bit integer from a 64-bit-aligned buffer. +inline uint64_t ReadUint64LE(const uint8_t* buffer) { +#if BYTE_ORDER_BIG_ENDIAN + return ((static_cast<uint32_t>(static_cast<uint8_t>(buffer[0]))) | + (static_cast<uint32_t>(static_cast<uint8_t>(buffer[1])) << 8) | + (static_cast<uint32_t>(static_cast<uint8_t>(buffer[2])) << 16) | + (static_cast<uint32_t>(static_cast<uint8_t>(buffer[3])) << 24) | + (static_cast<uint32_t>(static_cast<uint8_t>(buffer[4])) << 32) | + (static_cast<uint32_t>(static_cast<uint8_t>(buffer[5])) << 40) | + (static_cast<uint32_t>(static_cast<uint8_t>(buffer[6])) << 48) | + (static_cast<uint32_t>(static_cast<uint8_t>(buffer[7])) << 56)); +#else // !BYTE_ORDER_BIG_ENDIAN + uint64_t result; + // This should be optimized to a single instruction. + std::memcpy(&result, buffer, sizeof(result)); + return result; +#endif // BYTE_ORDER_BIG_ENDIAN +} + +} // namespace crc32c + +#endif // CRC32C_CRC32C_READ_LE_H_ diff --git a/src/crc32c/src/crc32c_read_le_unittest.cc b/src/crc32c/src/crc32c_read_le_unittest.cc new file mode 100644 index 0000000000..2a30302adf --- /dev/null +++ b/src/crc32c/src/crc32c_read_le_unittest.cc @@ -0,0 +1,32 @@ +// Copyright 2017 The CRC32C Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "./crc32c_read_le.h" + +#include <cstddef> +#include <cstdint> + +#include "gtest/gtest.h" + +#include "./crc32c_round_up.h" + +namespace crc32c { + +TEST(Crc32CReadLETest, ReadUint32LE) { + // little-endian 0x12345678 + alignas(4) uint8_t bytes[] = {0x78, 0x56, 0x34, 0x12}; + + ASSERT_EQ(RoundUp<4>(bytes), bytes) << "Stack array is not aligned"; + EXPECT_EQ(static_cast<uint32_t>(0x12345678), ReadUint32LE(bytes)); +} + +TEST(Crc32CReadLETest, ReadUint64LE) { + // little-endian 0x123456789ABCDEF0 + alignas(8) uint8_t bytes[] = {0xF0, 0xDE, 0xBC, 0x9A, 0x78, 0x56, 0x34, 0x12}; + + ASSERT_EQ(RoundUp<8>(bytes), bytes) << "Stack array is not aligned"; + EXPECT_EQ(static_cast<uint64_t>(0x123456789ABCDEF0), ReadUint64LE(bytes)); +} + +} // namespace crc32c diff --git a/src/crc32c/src/crc32c_round_up.h b/src/crc32c/src/crc32c_round_up.h new file mode 100644 index 0000000000..d3b922beb9 --- /dev/null +++ b/src/crc32c/src/crc32c_round_up.h @@ -0,0 +1,34 @@ +// Copyright 2017 The CRC32C Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef CRC32C_CRC32C_ROUND_UP_H_ +#define CRC32C_CRC32C_ROUND_UP_H_ + +#include <cstddef> +#include <cstdint> + +namespace crc32c { + +// Returns the smallest number >= the given number that is evenly divided by N. +// +// N must be a power of two. +template <int N> +constexpr inline uintptr_t RoundUp(uintptr_t pointer) { + static_assert((N & (N - 1)) == 0, "N must be a power of two"); + return (pointer + (N - 1)) & ~(N - 1); +} + +// Returns the smallest address >= the given address that is aligned to N bytes. +// +// N must be a power of two. +template <int N> +constexpr inline const uint8_t* RoundUp(const uint8_t* pointer) { + static_assert((N & (N - 1)) == 0, "N must be a power of two"); + return reinterpret_cast<uint8_t*>( + RoundUp<N>(reinterpret_cast<uintptr_t>(pointer))); +} + +} // namespace crc32c + +#endif // CRC32C_CRC32C_ROUND_UP_H_ diff --git a/src/crc32c/src/crc32c_round_up_unittest.cc b/src/crc32c/src/crc32c_round_up_unittest.cc new file mode 100644 index 0000000000..5ff657bb5c --- /dev/null +++ b/src/crc32c/src/crc32c_round_up_unittest.cc @@ -0,0 +1,84 @@ +// Copyright 2017 The CRC32C Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "./crc32c_round_up.h" + +#include <cstddef> +#include <cstdint> + +#include "gtest/gtest.h" + +namespace crc32c { + +TEST(CRC32CRoundUpTest, RoundUpUintptr) { + uintptr_t zero = 0; + + ASSERT_EQ(zero, RoundUp<1>(zero)); + ASSERT_EQ(1U, RoundUp<1>(1U)); + ASSERT_EQ(2U, RoundUp<1>(2U)); + ASSERT_EQ(3U, RoundUp<1>(3U)); + ASSERT_EQ(~static_cast<uintptr_t>(0), RoundUp<1>(~static_cast<uintptr_t>(0))); + ASSERT_EQ(~static_cast<uintptr_t>(1), RoundUp<1>(~static_cast<uintptr_t>(1))); + ASSERT_EQ(~static_cast<uintptr_t>(2), RoundUp<1>(~static_cast<uintptr_t>(2))); + ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<1>(~static_cast<uintptr_t>(3))); + + ASSERT_EQ(zero, RoundUp<2>(zero)); + ASSERT_EQ(2U, RoundUp<2>(1U)); + ASSERT_EQ(2U, RoundUp<2>(2U)); + ASSERT_EQ(4U, RoundUp<2>(3U)); + ASSERT_EQ(4U, RoundUp<2>(4U)); + ASSERT_EQ(6U, RoundUp<2>(5U)); + ASSERT_EQ(6U, RoundUp<2>(6U)); + ASSERT_EQ(8U, RoundUp<2>(7U)); + ASSERT_EQ(8U, RoundUp<2>(8U)); + ASSERT_EQ(~static_cast<uintptr_t>(1), RoundUp<2>(~static_cast<uintptr_t>(1))); + ASSERT_EQ(~static_cast<uintptr_t>(1), RoundUp<2>(~static_cast<uintptr_t>(2))); + ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<2>(~static_cast<uintptr_t>(3))); + ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<2>(~static_cast<uintptr_t>(4))); + + ASSERT_EQ(zero, RoundUp<4>(zero)); + ASSERT_EQ(4U, RoundUp<4>(1U)); + ASSERT_EQ(4U, RoundUp<4>(2U)); + ASSERT_EQ(4U, RoundUp<4>(3U)); + ASSERT_EQ(4U, RoundUp<4>(4U)); + ASSERT_EQ(8U, RoundUp<4>(5U)); + ASSERT_EQ(8U, RoundUp<4>(6U)); + ASSERT_EQ(8U, RoundUp<4>(7U)); + ASSERT_EQ(8U, RoundUp<4>(8U)); + ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<4>(~static_cast<uintptr_t>(3))); + ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<4>(~static_cast<uintptr_t>(4))); + ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<4>(~static_cast<uintptr_t>(5))); + ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<4>(~static_cast<uintptr_t>(6))); + ASSERT_EQ(~static_cast<uintptr_t>(7), RoundUp<4>(~static_cast<uintptr_t>(7))); + ASSERT_EQ(~static_cast<uintptr_t>(7), RoundUp<4>(~static_cast<uintptr_t>(8))); + ASSERT_EQ(~static_cast<uintptr_t>(7), RoundUp<4>(~static_cast<uintptr_t>(9))); +} + +TEST(CRC32CRoundUpTest, RoundUpPointer) { + uintptr_t zero = 0, three = 3, four = 4, seven = 7, eight = 8; + + const uint8_t* zero_ptr = reinterpret_cast<const uint8_t*>(zero); + const uint8_t* three_ptr = reinterpret_cast<const uint8_t*>(three); + const uint8_t* four_ptr = reinterpret_cast<const uint8_t*>(four); + const uint8_t* seven_ptr = reinterpret_cast<const uint8_t*>(seven); + const uint8_t* eight_ptr = reinterpret_cast<uint8_t*>(eight); + + ASSERT_EQ(zero_ptr, RoundUp<1>(zero_ptr)); + ASSERT_EQ(zero_ptr, RoundUp<4>(zero_ptr)); + ASSERT_EQ(zero_ptr, RoundUp<8>(zero_ptr)); + + ASSERT_EQ(three_ptr, RoundUp<1>(three_ptr)); + ASSERT_EQ(four_ptr, RoundUp<4>(three_ptr)); + ASSERT_EQ(eight_ptr, RoundUp<8>(three_ptr)); + + ASSERT_EQ(four_ptr, RoundUp<1>(four_ptr)); + ASSERT_EQ(four_ptr, RoundUp<4>(four_ptr)); + ASSERT_EQ(eight_ptr, RoundUp<8>(four_ptr)); + + ASSERT_EQ(seven_ptr, RoundUp<1>(seven_ptr)); + ASSERT_EQ(eight_ptr, RoundUp<4>(seven_ptr)); + ASSERT_EQ(eight_ptr, RoundUp<8>(four_ptr)); +} + +} // namespace crc32c diff --git a/src/crc32c/src/crc32c_sse42.cc b/src/crc32c/src/crc32c_sse42.cc new file mode 100644 index 0000000000..139520428e --- /dev/null +++ b/src/crc32c/src/crc32c_sse42.cc @@ -0,0 +1,258 @@ +// Copyright 2008 The CRC32C Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "./crc32c_sse42.h" + +// In a separate source file to allow this accelerated CRC32C function to be +// compiled with the appropriate compiler flags to enable SSE4.2 instructions. + +// This implementation is loosely based on Intel Pub 323405 from April 2011, +// "Fast CRC Computation for iSCSI Polynomial Using CRC32 Instruction". + +#include <cstddef> +#include <cstdint> + +#include "./crc32c_internal.h" +#include "./crc32c_prefetch.h" +#include "./crc32c_read_le.h" +#include "./crc32c_round_up.h" +#ifdef CRC32C_HAVE_CONFIG_H +#include "crc32c/crc32c_config.h" +#endif + +#if HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__)) + +#if defined(_MSC_VER) +#include <intrin.h> +#else // !defined(_MSC_VER) +#include <nmmintrin.h> +#endif // defined(_MSC_VER) + +namespace crc32c { + +namespace { + +constexpr const ptrdiff_t kGroups = 3; +constexpr const ptrdiff_t kBlock0Size = 16 * 1024 / kGroups / 64 * 64; +constexpr const ptrdiff_t kBlock1Size = 4 * 1024 / kGroups / 8 * 8; +constexpr const ptrdiff_t kBlock2Size = 1024 / kGroups / 8 * 8; + +const uint32_t kBlock0SkipTable[8][16] = { + {0x00000000, 0xff770459, 0xfb027e43, 0x04757a1a, 0xf3e88a77, 0x0c9f8e2e, + 0x08eaf434, 0xf79df06d, 0xe23d621f, 0x1d4a6646, 0x193f1c5c, 0xe6481805, + 0x11d5e868, 0xeea2ec31, 0xead7962b, 0x15a09272}, + {0x00000000, 0xc196b2cf, 0x86c1136f, 0x4757a1a0, 0x086e502f, 0xc9f8e2e0, + 0x8eaf4340, 0x4f39f18f, 0x10dca05e, 0xd14a1291, 0x961db331, 0x578b01fe, + 0x18b2f071, 0xd92442be, 0x9e73e31e, 0x5fe551d1}, + {0x00000000, 0x21b940bc, 0x43728178, 0x62cbc1c4, 0x86e502f0, 0xa75c424c, + 0xc5978388, 0xe42ec334, 0x08267311, 0x299f33ad, 0x4b54f269, 0x6aedb2d5, + 0x8ec371e1, 0xaf7a315d, 0xcdb1f099, 0xec08b025}, + {0x00000000, 0x104ce622, 0x2099cc44, 0x30d52a66, 0x41339888, 0x517f7eaa, + 0x61aa54cc, 0x71e6b2ee, 0x82673110, 0x922bd732, 0xa2fefd54, 0xb2b21b76, + 0xc354a998, 0xd3184fba, 0xe3cd65dc, 0xf38183fe}, + {0x00000000, 0x012214d1, 0x024429a2, 0x03663d73, 0x04885344, 0x05aa4795, + 0x06cc7ae6, 0x07ee6e37, 0x0910a688, 0x0832b259, 0x0b548f2a, 0x0a769bfb, + 0x0d98f5cc, 0x0cbae11d, 0x0fdcdc6e, 0x0efec8bf}, + {0x00000000, 0x12214d10, 0x24429a20, 0x3663d730, 0x48853440, 0x5aa47950, + 0x6cc7ae60, 0x7ee6e370, 0x910a6880, 0x832b2590, 0xb548f2a0, 0xa769bfb0, + 0xd98f5cc0, 0xcbae11d0, 0xfdcdc6e0, 0xefec8bf0}, + {0x00000000, 0x27f8a7f1, 0x4ff14fe2, 0x6809e813, 0x9fe29fc4, 0xb81a3835, + 0xd013d026, 0xf7eb77d7, 0x3a294979, 0x1dd1ee88, 0x75d8069b, 0x5220a16a, + 0xa5cbd6bd, 0x8233714c, 0xea3a995f, 0xcdc23eae}, + {0x00000000, 0x745292f2, 0xe8a525e4, 0x9cf7b716, 0xd4a63d39, 0xa0f4afcb, + 0x3c0318dd, 0x48518a2f, 0xaca00c83, 0xd8f29e71, 0x44052967, 0x3057bb95, + 0x780631ba, 0x0c54a348, 0x90a3145e, 0xe4f186ac}, +}; +const uint32_t kBlock1SkipTable[8][16] = { + {0x00000000, 0x79113270, 0xf22264e0, 0x8b335690, 0xe1a8bf31, 0x98b98d41, + 0x138adbd1, 0x6a9be9a1, 0xc6bd0893, 0xbfac3ae3, 0x349f6c73, 0x4d8e5e03, + 0x2715b7a2, 0x5e0485d2, 0xd537d342, 0xac26e132}, + {0x00000000, 0x889667d7, 0x14c0b95f, 0x9c56de88, 0x298172be, 0xa1171569, + 0x3d41cbe1, 0xb5d7ac36, 0x5302e57c, 0xdb9482ab, 0x47c25c23, 0xcf543bf4, + 0x7a8397c2, 0xf215f015, 0x6e432e9d, 0xe6d5494a}, + {0x00000000, 0xa605caf8, 0x49e7e301, 0xefe229f9, 0x93cfc602, 0x35ca0cfa, + 0xda282503, 0x7c2deffb, 0x2273faf5, 0x8476300d, 0x6b9419f4, 0xcd91d30c, + 0xb1bc3cf7, 0x17b9f60f, 0xf85bdff6, 0x5e5e150e}, + {0x00000000, 0x44e7f5ea, 0x89cfebd4, 0xcd281e3e, 0x1673a159, 0x529454b3, + 0x9fbc4a8d, 0xdb5bbf67, 0x2ce742b2, 0x6800b758, 0xa528a966, 0xe1cf5c8c, + 0x3a94e3eb, 0x7e731601, 0xb35b083f, 0xf7bcfdd5}, + {0x00000000, 0x59ce8564, 0xb39d0ac8, 0xea538fac, 0x62d66361, 0x3b18e605, + 0xd14b69a9, 0x8885eccd, 0xc5acc6c2, 0x9c6243a6, 0x7631cc0a, 0x2fff496e, + 0xa77aa5a3, 0xfeb420c7, 0x14e7af6b, 0x4d292a0f}, + {0x00000000, 0x8eb5fb75, 0x1887801b, 0x96327b6e, 0x310f0036, 0xbfbafb43, + 0x2988802d, 0xa73d7b58, 0x621e006c, 0xecabfb19, 0x7a998077, 0xf42c7b02, + 0x5311005a, 0xdda4fb2f, 0x4b968041, 0xc5237b34}, + {0x00000000, 0xc43c00d8, 0x8d947741, 0x49a87799, 0x1ec49873, 0xdaf898ab, + 0x9350ef32, 0x576cefea, 0x3d8930e6, 0xf9b5303e, 0xb01d47a7, 0x7421477f, + 0x234da895, 0xe771a84d, 0xaed9dfd4, 0x6ae5df0c}, + {0x00000000, 0x7b1261cc, 0xf624c398, 0x8d36a254, 0xe9a5f1c1, 0x92b7900d, + 0x1f813259, 0x64935395, 0xd6a79573, 0xadb5f4bf, 0x208356eb, 0x5b913727, + 0x3f0264b2, 0x4410057e, 0xc926a72a, 0xb234c6e6}, +}; +const uint32_t kBlock2SkipTable[8][16] = { + {0x00000000, 0x8f158014, 0x1bc776d9, 0x94d2f6cd, 0x378eedb2, 0xb89b6da6, + 0x2c499b6b, 0xa35c1b7f, 0x6f1ddb64, 0xe0085b70, 0x74daadbd, 0xfbcf2da9, + 0x589336d6, 0xd786b6c2, 0x4354400f, 0xcc41c01b}, + {0x00000000, 0xde3bb6c8, 0xb99b1b61, 0x67a0ada9, 0x76da4033, 0xa8e1f6fb, + 0xcf415b52, 0x117aed9a, 0xedb48066, 0x338f36ae, 0x542f9b07, 0x8a142dcf, + 0x9b6ec055, 0x4555769d, 0x22f5db34, 0xfcce6dfc}, + {0x00000000, 0xde85763d, 0xb8e69a8b, 0x6663ecb6, 0x742143e7, 0xaaa435da, + 0xccc7d96c, 0x1242af51, 0xe84287ce, 0x36c7f1f3, 0x50a41d45, 0x8e216b78, + 0x9c63c429, 0x42e6b214, 0x24855ea2, 0xfa00289f}, + {0x00000000, 0xd569796d, 0xaf3e842b, 0x7a57fd46, 0x5b917ea7, 0x8ef807ca, + 0xf4affa8c, 0x21c683e1, 0xb722fd4e, 0x624b8423, 0x181c7965, 0xcd750008, + 0xecb383e9, 0x39dafa84, 0x438d07c2, 0x96e47eaf}, + {0x00000000, 0x6ba98c6d, 0xd75318da, 0xbcfa94b7, 0xab4a4745, 0xc0e3cb28, + 0x7c195f9f, 0x17b0d3f2, 0x5378f87b, 0x38d17416, 0x842be0a1, 0xef826ccc, + 0xf832bf3e, 0x939b3353, 0x2f61a7e4, 0x44c82b89}, + {0x00000000, 0xa6f1f0f6, 0x480f971d, 0xeefe67eb, 0x901f2e3a, 0x36eedecc, + 0xd810b927, 0x7ee149d1, 0x25d22a85, 0x8323da73, 0x6dddbd98, 0xcb2c4d6e, + 0xb5cd04bf, 0x133cf449, 0xfdc293a2, 0x5b336354}, + {0x00000000, 0x4ba4550a, 0x9748aa14, 0xdcecff1e, 0x2b7d22d9, 0x60d977d3, + 0xbc3588cd, 0xf791ddc7, 0x56fa45b2, 0x1d5e10b8, 0xc1b2efa6, 0x8a16baac, + 0x7d87676b, 0x36233261, 0xeacfcd7f, 0xa16b9875}, + {0x00000000, 0xadf48b64, 0x5e056039, 0xf3f1eb5d, 0xbc0ac072, 0x11fe4b16, + 0xe20fa04b, 0x4ffb2b2f, 0x7df9f615, 0xd00d7d71, 0x23fc962c, 0x8e081d48, + 0xc1f33667, 0x6c07bd03, 0x9ff6565e, 0x3202dd3a}, +}; + +constexpr const ptrdiff_t kPrefetchHorizon = 256; + +} // namespace + +uint32_t ExtendSse42(uint32_t crc, const uint8_t* data, size_t size) { + const uint8_t* p = data; + const uint8_t* e = data + size; + uint32_t l = crc ^ kCRC32Xor; + +#define STEP1 \ + do { \ + l = _mm_crc32_u8(l, *p++); \ + } while (0) + +#define STEP4(crc) \ + do { \ + crc = _mm_crc32_u32(crc, ReadUint32LE(p)); \ + p += 4; \ + } while (0) + +#define STEP8(crc, data) \ + do { \ + crc = _mm_crc32_u64(crc, ReadUint64LE(data)); \ + data += 8; \ + } while (0) + +#define STEP8BY3(crc0, crc1, crc2, p0, p1, p2) \ + do { \ + STEP8(crc0, p0); \ + STEP8(crc1, p1); \ + STEP8(crc2, p2); \ + } while (0) + +#define STEP8X3(crc0, crc1, crc2, bs) \ + do { \ + crc0 = _mm_crc32_u64(crc0, ReadUint64LE(p)); \ + crc1 = _mm_crc32_u64(crc1, ReadUint64LE(p + bs)); \ + crc2 = _mm_crc32_u64(crc2, ReadUint64LE(p + 2 * bs)); \ + p += 8; \ + } while (0) + +#define SKIP_BLOCK(crc, tab) \ + do { \ + crc = tab[0][crc & 0xf] ^ tab[1][(crc >> 4) & 0xf] ^ \ + tab[2][(crc >> 8) & 0xf] ^ tab[3][(crc >> 12) & 0xf] ^ \ + tab[4][(crc >> 16) & 0xf] ^ tab[5][(crc >> 20) & 0xf] ^ \ + tab[6][(crc >> 24) & 0xf] ^ tab[7][(crc >> 28) & 0xf]; \ + } while (0) + + // Point x at first 8-byte aligned byte in the buffer. This might be past the + // end of the buffer. + const uint8_t* x = RoundUp<8>(p); + if (x <= e) { + // Process bytes p is 8-byte aligned. + while (p != x) { + STEP1; + } + } + + // Proccess the data in predetermined block sizes with tables for quickly + // combining the checksum. Experimentally it's better to use larger block + // sizes where possible so use a hierarchy of decreasing block sizes. + uint64_t l64 = l; + while ((e - p) >= kGroups * kBlock0Size) { + uint64_t l641 = 0; + uint64_t l642 = 0; + for (int i = 0; i < kBlock0Size; i += 8 * 8) { + // Prefetch ahead to hide latency. + RequestPrefetch(p + kPrefetchHorizon); + RequestPrefetch(p + kBlock0Size + kPrefetchHorizon); + RequestPrefetch(p + 2 * kBlock0Size + kPrefetchHorizon); + + // Process 64 bytes at a time. + STEP8X3(l64, l641, l642, kBlock0Size); + STEP8X3(l64, l641, l642, kBlock0Size); + STEP8X3(l64, l641, l642, kBlock0Size); + STEP8X3(l64, l641, l642, kBlock0Size); + STEP8X3(l64, l641, l642, kBlock0Size); + STEP8X3(l64, l641, l642, kBlock0Size); + STEP8X3(l64, l641, l642, kBlock0Size); + STEP8X3(l64, l641, l642, kBlock0Size); + } + + // Combine results. + SKIP_BLOCK(l64, kBlock0SkipTable); + l64 ^= l641; + SKIP_BLOCK(l64, kBlock0SkipTable); + l64 ^= l642; + p += (kGroups - 1) * kBlock0Size; + } + while ((e - p) >= kGroups * kBlock1Size) { + uint64_t l641 = 0; + uint64_t l642 = 0; + for (int i = 0; i < kBlock1Size; i += 8) { + STEP8X3(l64, l641, l642, kBlock1Size); + } + SKIP_BLOCK(l64, kBlock1SkipTable); + l64 ^= l641; + SKIP_BLOCK(l64, kBlock1SkipTable); + l64 ^= l642; + p += (kGroups - 1) * kBlock1Size; + } + while ((e - p) >= kGroups * kBlock2Size) { + uint64_t l641 = 0; + uint64_t l642 = 0; + for (int i = 0; i < kBlock2Size; i += 8) { + STEP8X3(l64, l641, l642, kBlock2Size); + } + SKIP_BLOCK(l64, kBlock2SkipTable); + l64 ^= l641; + SKIP_BLOCK(l64, kBlock2SkipTable); + l64 ^= l642; + p += (kGroups - 1) * kBlock2Size; + } + + // Process bytes 16 at a time + while ((e - p) >= 16) { + STEP8(l64, p); + STEP8(l64, p); + } + + l = static_cast<uint32_t>(l64); + // Process the last few bytes. + while (p != e) { + STEP1; + } +#undef SKIP_BLOCK +#undef STEP8X3 +#undef STEP8BY3 +#undef STEP8 +#undef STEP4 +#undef STEP1 + + return l ^ kCRC32Xor; +} + +} // namespace crc32c + +#endif // HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__)) diff --git a/src/crc32c/src/crc32c_sse42.h b/src/crc32c/src/crc32c_sse42.h new file mode 100644 index 0000000000..95da926632 --- /dev/null +++ b/src/crc32c/src/crc32c_sse42.h @@ -0,0 +1,33 @@ +// Copyright 2017 The CRC32C Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef CRC32C_CRC32C_SSE42_H_ +#define CRC32C_CRC32C_SSE42_H_ + +// X86-specific code. + +#include <cstddef> +#include <cstdint> + +#ifdef CRC32C_HAVE_CONFIG_H +#include "crc32c/crc32c_config.h" +#endif + +// The hardware-accelerated implementation is only enabled for 64-bit builds, +// because a straightforward 32-bit implementation actually runs slower than the +// portable version. Most X86 machines are 64-bit nowadays, so it doesn't make +// much sense to spend time building an optimized hardware-accelerated +// implementation. +#if HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__)) + +namespace crc32c { + +// SSE4.2-accelerated implementation in crc32c_sse42.cc +uint32_t ExtendSse42(uint32_t crc, const uint8_t* data, size_t count); + +} // namespace crc32c + +#endif // HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__)) + +#endif // CRC32C_CRC32C_SSE42_H_ diff --git a/src/crc32c/src/crc32c_sse42_check.h b/src/crc32c/src/crc32c_sse42_check.h new file mode 100644 index 0000000000..e7528912a6 --- /dev/null +++ b/src/crc32c/src/crc32c_sse42_check.h @@ -0,0 +1,50 @@ +// Copyright 2017 The CRC32C Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef CRC32C_CRC32C_SSE42_CHECK_H_ +#define CRC32C_CRC32C_SSE42_CHECK_H_ + +// X86-specific code checking the availability of SSE4.2 instructions. + +#include <cstddef> +#include <cstdint> + +#ifdef CRC32C_HAVE_CONFIG_H +#include "crc32c/crc32c_config.h" +#endif + +#if HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__)) + +// If the compiler supports SSE4.2, it definitely supports X86. + +#if defined(_MSC_VER) +#include <intrin.h> + +namespace crc32c { + +inline bool CanUseSse42() { + int cpu_info[4]; + __cpuid(cpu_info, 1); + return (cpu_info[2] & (1 << 20)) != 0; +} + +} // namespace crc32c + +#else // !defined(_MSC_VER) +#include <cpuid.h> + +namespace crc32c { + +inline bool CanUseSse42() { + unsigned int eax, ebx, ecx, edx; + return __get_cpuid(1, &eax, &ebx, &ecx, &edx) && ((ecx & (1 << 20)) != 0); +} + +} // namespace crc32c + +#endif // defined(_MSC_VER) + +#endif // HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__)) + +#endif // CRC32C_CRC32C_SSE42_CHECK_H_ diff --git a/src/crc32c/src/crc32c_sse42_unittest.cc b/src/crc32c/src/crc32c_sse42_unittest.cc new file mode 100644 index 0000000000..c73ad8ddd1 --- /dev/null +++ b/src/crc32c/src/crc32c_sse42_unittest.cc @@ -0,0 +1,24 @@ +// Copyright 2017 The CRC32C Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "gtest/gtest.h" + +#include "./crc32c_extend_unittests.h" +#include "./crc32c_sse42.h" + +namespace crc32c { + +#if HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__)) + +struct Sse42TestTraits { + static uint32_t Extend(uint32_t crc, const uint8_t* data, size_t count) { + return ExtendSse42(crc, data, count); + } +}; + +INSTANTIATE_TYPED_TEST_SUITE_P(Sse42, ExtendTest, Sse42TestTraits); + +#endif // HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__)) + +} // namespace crc32c diff --git a/src/crc32c/src/crc32c_test_main.cc b/src/crc32c/src/crc32c_test_main.cc new file mode 100644 index 0000000000..275ee380c6 --- /dev/null +++ b/src/crc32c/src/crc32c_test_main.cc @@ -0,0 +1,22 @@ +// Copyright 2017 The CRC32C Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifdef CRC32C_HAVE_CONFIG_H +#include "crc32c/crc32c_config.h" +#endif + +#include "gtest/gtest.h" + +#if CRC32C_TESTS_BUILT_WITH_GLOG +#include "glog/logging.h" +#endif // CRC32C_TESTS_BUILT_WITH_GLOG + +int main(int argc, char** argv) { +#if CRC32C_TESTS_BUILT_WITH_GLOG + google::InitGoogleLogging(argv[0]); + google::InstallFailureSignalHandler(); +#endif // CRC32C_TESTS_BUILT_WITH_GLOG + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/src/crc32c/src/crc32c_unittest.cc b/src/crc32c/src/crc32c_unittest.cc new file mode 100644 index 0000000000..d6c6af680c --- /dev/null +++ b/src/crc32c/src/crc32c_unittest.cc @@ -0,0 +1,129 @@ +// Copyright 2017 The CRC32C Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "crc32c/crc32c.h" + +#include <cstddef> +#include <cstdint> +#include <cstring> + +#include "gtest/gtest.h" + +#include "./crc32c_extend_unittests.h" + +TEST(Crc32CTest, Crc32c) { + // From rfc3720 section B.4. + uint8_t buf[32]; + + std::memset(buf, 0, sizeof(buf)); + EXPECT_EQ(static_cast<uint32_t>(0x8a9136aa), + crc32c::Crc32c(buf, sizeof(buf))); + + std::memset(buf, 0xff, sizeof(buf)); + EXPECT_EQ(static_cast<uint32_t>(0x62a8ab43), + crc32c::Crc32c(buf, sizeof(buf))); + + for (size_t i = 0; i < 32; ++i) + buf[i] = static_cast<uint8_t>(i); + EXPECT_EQ(static_cast<uint32_t>(0x46dd794e), + crc32c::Crc32c(buf, sizeof(buf))); + + for (size_t i = 0; i < 32; ++i) + buf[i] = static_cast<uint8_t>(31 - i); + EXPECT_EQ(static_cast<uint32_t>(0x113fdb5c), + crc32c::Crc32c(buf, sizeof(buf))); + + uint8_t data[48] = { + 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }; + EXPECT_EQ(static_cast<uint32_t>(0xd9963a56), + crc32c::Crc32c(data, sizeof(data))); +} + +namespace crc32c { + +struct ApiTestTraits { + static uint32_t Extend(uint32_t crc, const uint8_t* data, size_t count) { + return ::crc32c::Extend(crc, data, count); + } +}; + +INSTANTIATE_TYPED_TEST_SUITE_P(Api, ExtendTest, ApiTestTraits); + +} // namespace crc32c + +TEST(CRC32CTest, Crc32cCharPointer) { + char buf[32]; + + std::memset(buf, 0, sizeof(buf)); + EXPECT_EQ(static_cast<uint32_t>(0x8a9136aa), + crc32c::Crc32c(buf, sizeof(buf))); + + std::memset(buf, 0xff, sizeof(buf)); + EXPECT_EQ(static_cast<uint32_t>(0x62a8ab43), + crc32c::Crc32c(buf, sizeof(buf))); + + for (size_t i = 0; i < 32; ++i) + buf[i] = static_cast<char>(i); + EXPECT_EQ(static_cast<uint32_t>(0x46dd794e), + crc32c::Crc32c(buf, sizeof(buf))); + + for (size_t i = 0; i < 32; ++i) + buf[i] = static_cast<char>(31 - i); + EXPECT_EQ(static_cast<uint32_t>(0x113fdb5c), + crc32c::Crc32c(buf, sizeof(buf))); +} + +TEST(CRC32CTest, Crc32cStdString) { + std::string buf; + buf.resize(32); + + for (size_t i = 0; i < 32; ++i) + buf[i] = static_cast<char>(0x00); + EXPECT_EQ(static_cast<uint32_t>(0x8a9136aa), crc32c::Crc32c(buf)); + + for (size_t i = 0; i < 32; ++i) + buf[i] = '\xff'; + EXPECT_EQ(static_cast<uint32_t>(0x62a8ab43), crc32c::Crc32c(buf)); + + for (size_t i = 0; i < 32; ++i) + buf[i] = static_cast<char>(i); + EXPECT_EQ(static_cast<uint32_t>(0x46dd794e), crc32c::Crc32c(buf)); + + for (size_t i = 0; i < 32; ++i) + buf[i] = static_cast<char>(31 - i); + EXPECT_EQ(static_cast<uint32_t>(0x113fdb5c), crc32c::Crc32c(buf)); +} + +#if __cplusplus > 201402L +#if __has_include(<string_view>) + +TEST(CRC32CTest, Crc32cStdStringView) { + uint8_t buf[32]; + std::string_view view(reinterpret_cast<const char*>(buf), sizeof(buf)); + + std::memset(buf, 0, sizeof(buf)); + EXPECT_EQ(static_cast<uint32_t>(0x8a9136aa), crc32c::Crc32c(view)); + + std::memset(buf, 0xff, sizeof(buf)); + EXPECT_EQ(static_cast<uint32_t>(0x62a8ab43), crc32c::Crc32c(view)); + + for (size_t i = 0; i < 32; ++i) + buf[i] = static_cast<uint8_t>(i); + EXPECT_EQ(static_cast<uint32_t>(0x46dd794e), crc32c::Crc32c(view)); + + for (size_t i = 0; i < 32; ++i) + buf[i] = static_cast<uint8_t>(31 - i); + EXPECT_EQ(static_cast<uint32_t>(0x113fdb5c), crc32c::Crc32c(view)); +} + +#endif // __has_include(<string_view>) +#endif // __cplusplus > 201402L + +#define TESTED_EXTEND Extend +#include "./crc32c_extend_unittests.h" +#undef TESTED_EXTEND diff --git a/src/flatfile.h b/src/flatfile.h index 374ceff411..d80682d383 100644 --- a/src/flatfile.h +++ b/src/flatfile.h @@ -20,7 +20,7 @@ struct FlatFilePos template <typename Stream, typename Operation> inline void SerializationOp(Stream& s, Operation ser_action) { - READWRITE(VARINT(nFile, VarIntMode::NONNEGATIVE_SIGNED)); + READWRITE(VARINT_MODE(nFile, VarIntMode::NONNEGATIVE_SIGNED)); READWRITE(VARINT(nPos)); } diff --git a/src/init.cpp b/src/init.cpp index 5e8d8c3d2d..24a1fd27db 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -436,7 +436,7 @@ void SetupServerArgs() gArgs.AddArg("-timeout=<n>", strprintf("Specify connection timeout in milliseconds (minimum: 1, default: %d)", DEFAULT_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); gArgs.AddArg("-peertimeout=<n>", strprintf("Specify p2p connection timeout in seconds. This option determines the amount of time a peer may be inactive before the connection to it is dropped. (minimum: 1, default: %d)", DEFAULT_PEER_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CONNECTION); gArgs.AddArg("-torcontrol=<ip>:<port>", strprintf("Tor control port to use if onion listening enabled (default: %s)", DEFAULT_TOR_CONTROL), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-torpassword=<pass>", "Tor control port password (default: empty)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + gArgs.AddArg("-torpassword=<pass>", "Tor control port password (default: empty)", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::CONNECTION); gArgs.AddArg("-asmap=<file>", "Specify asn mapping used for bucketing of the peers. Path should be relative to the -datadir path.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); #ifdef USE_UPNP #if USE_UPNP @@ -537,15 +537,15 @@ void SetupServerArgs() gArgs.AddArg("-rest", strprintf("Accept public REST requests (default: %u)", DEFAULT_REST_ENABLE), ArgsManager::ALLOW_ANY, OptionsCategory::RPC); gArgs.AddArg("-rpcallowip=<ip>", "Allow JSON-RPC connections from specified source. Valid for <ip> are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). This option can be specified multiple times", ArgsManager::ALLOW_ANY, OptionsCategory::RPC); - gArgs.AddArg("-rpcauth=<userpw>", "Username and HMAC-SHA-256 hashed password for JSON-RPC connections. The field <userpw> comes in the format: <USERNAME>:<SALT>$<HASH>. A canonical python script is included in share/rpcauth. The client then connects normally using the rpcuser=<USERNAME>/rpcpassword=<PASSWORD> pair of arguments. This option can be specified multiple times", ArgsManager::ALLOW_ANY, OptionsCategory::RPC); - gArgs.AddArg("-rpcbind=<addr>[:port]", "Bind to given address to listen for JSON-RPC connections. Do not expose the RPC server to untrusted networks such as the public internet! This option is ignored unless -rpcallowip is also passed. Port is optional and overrides -rpcport. Use [host]:port notation for IPv6. This option can be specified multiple times (default: 127.0.0.1 and ::1 i.e., localhost)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::RPC); + gArgs.AddArg("-rpcauth=<userpw>", "Username and HMAC-SHA-256 hashed password for JSON-RPC connections. The field <userpw> comes in the format: <USERNAME>:<SALT>$<HASH>. A canonical python script is included in share/rpcauth. The client then connects normally using the rpcuser=<USERNAME>/rpcpassword=<PASSWORD> pair of arguments. This option can be specified multiple times", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC); + gArgs.AddArg("-rpcbind=<addr>[:port]", "Bind to given address to listen for JSON-RPC connections. Do not expose the RPC server to untrusted networks such as the public internet! This option is ignored unless -rpcallowip is also passed. Port is optional and overrides -rpcport. Use [host]:port notation for IPv6. This option can be specified multiple times (default: 127.0.0.1 and ::1 i.e., localhost)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY | ArgsManager::SENSITIVE, OptionsCategory::RPC); gArgs.AddArg("-rpccookiefile=<loc>", "Location of the auth cookie. Relative paths will be prefixed by a net-specific datadir location. (default: data dir)", ArgsManager::ALLOW_ANY, OptionsCategory::RPC); - gArgs.AddArg("-rpcpassword=<pw>", "Password for JSON-RPC connections", ArgsManager::ALLOW_ANY, OptionsCategory::RPC); + gArgs.AddArg("-rpcpassword=<pw>", "Password for JSON-RPC connections", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC); gArgs.AddArg("-rpcport=<port>", strprintf("Listen for JSON-RPC connections on <port> (default: %u, testnet: %u, regtest: %u)", defaultBaseParams->RPCPort(), testnetBaseParams->RPCPort(), regtestBaseParams->RPCPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::RPC); gArgs.AddArg("-rpcserialversion", strprintf("Sets the serialization of raw transaction or block hex returned in non-verbose mode, non-segwit(0) or segwit(1) (default: %d)", DEFAULT_RPC_SERIALIZE_VERSION), ArgsManager::ALLOW_ANY, OptionsCategory::RPC); gArgs.AddArg("-rpcservertimeout=<n>", strprintf("Timeout during HTTP requests (default: %d)", DEFAULT_HTTP_SERVER_TIMEOUT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::RPC); gArgs.AddArg("-rpcthreads=<n>", strprintf("Set the number of threads to service RPC calls (default: %d)", DEFAULT_HTTP_THREADS), ArgsManager::ALLOW_ANY, OptionsCategory::RPC); - gArgs.AddArg("-rpcuser=<user>", "Username for JSON-RPC connections", ArgsManager::ALLOW_ANY, OptionsCategory::RPC); + gArgs.AddArg("-rpcuser=<user>", "Username for JSON-RPC connections", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC); gArgs.AddArg("-rpcwhitelist=<whitelist>", "Set a whitelist to filter incoming RPC calls for a specific user. The field <whitelist> comes in the format: <USERNAME>:<rpc 1>,<rpc 2>,...,<rpc n>. If multiple whitelists are set for a given user, they are set-intersected. See -rpcwhitelistdefault documentation for information on default whitelist behavior.", ArgsManager::ALLOW_ANY, OptionsCategory::RPC); gArgs.AddArg("-rpcwhitelistdefault", "Sets default behavior for rpc whitelisting. Unless rpcwhitelistdefault is set to 0, if any -rpcwhitelist is set, the rpc server acts as if all rpc users are subject to empty-unless-otherwise-specified whitelists. If rpcwhitelistdefault is set to 1 and no -rpcwhitelist is set, rpc server acts as if all rpc users are subject to empty whitelists.", ArgsManager::ALLOW_BOOL, OptionsCategory::RPC); gArgs.AddArg("-rpcworkqueue=<n>", strprintf("Set the depth of the work queue to service RPC calls (default: %d)", DEFAULT_HTTP_WORKQUEUE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::RPC); @@ -1230,6 +1230,9 @@ bool AppInitMain(NodeContext& node) LogPrintf("Config file: %s (not found, skipping)\n", config_file_path.string()); } + // Log the config arguments to debug.log + gArgs.LogArgs(); + LogPrintf("Using at most %i automatic connections (%i file descriptors available)\n", nMaxConnections, nFD); // Warn about relative -datadir path. @@ -1835,8 +1838,8 @@ bool AppInitMain(NodeContext& node) InitError(strprintf(_("Could not find or parse specified asmap: '%s'").translated, asmap_path)); return false; } - node.connman->SetAsmap(asmap); const uint256 asmap_version = SerializeHash(asmap); + node.connman->SetAsmap(std::move(asmap)); LogPrintf("Using asmap version %s for IP bucketing.\n", asmap_version.ToString()); } else { LogPrintf("Using /16 prefix for IP bucketing.\n"); diff --git a/src/interfaces/wallet.cpp b/src/interfaces/wallet.cpp index 568ab43ac0..baea71d0bb 100644 --- a/src/interfaces/wallet.cpp +++ b/src/interfaces/wallet.cpp @@ -119,7 +119,7 @@ public: } bool getPubKey(const CScript& script, const CKeyID& address, CPubKey& pub_key) override { - const SigningProvider* provider = m_wallet->GetSigningProvider(script); + std::unique_ptr<SigningProvider> provider = m_wallet->GetSigningProvider(script); if (provider) { return provider->GetPubKey(address, pub_key); } @@ -127,7 +127,7 @@ public: } bool getPrivKey(const CScript& script, const CKeyID& address, CKey& key) override { - const SigningProvider* provider = m_wallet->GetSigningProvider(script); + std::unique_ptr<SigningProvider> provider = m_wallet->GetSigningProvider(script); if (provider) { return provider->GetKey(address, key); } @@ -180,7 +180,6 @@ public: } return result; } - void learnRelatedScripts(const CPubKey& key, OutputType type) override { m_wallet->GetLegacyScriptPubKeyMan()->LearnRelatedScripts(key, type); } bool addDestData(const CTxDestination& dest, const std::string& key, const std::string& value) override { LOCK(m_wallet->cs_wallet); diff --git a/src/interfaces/wallet.h b/src/interfaces/wallet.h index de53b16c0c..d4280e8091 100644 --- a/src/interfaces/wallet.h +++ b/src/interfaces/wallet.h @@ -108,10 +108,6 @@ public: //! Get wallet address list. virtual std::vector<WalletAddress> getAddresses() = 0; - //! Add scripts to key store so old so software versions opening the wallet - //! database can detect payments to newer address types. - virtual void learnRelatedScripts(const CPubKey& key, OutputType type) = 0; - //! Add dest data. virtual bool addDestData(const CTxDestination& dest, const std::string& key, const std::string& value) = 0; diff --git a/src/leveldb/.appveyor.yml b/src/leveldb/.appveyor.yml new file mode 100644 index 0000000000..c24b17e805 --- /dev/null +++ b/src/leveldb/.appveyor.yml @@ -0,0 +1,35 @@ +# Build matrix / environment variables are explained on: +# https://www.appveyor.com/docs/appveyor-yml/ +# This file can be validated on: https://ci.appveyor.com/tools/validate-yaml + +version: "{build}" + +environment: + matrix: + # AppVeyor currently has no custom job name feature. + # http://help.appveyor.com/discussions/questions/1623-can-i-provide-a-friendly-name-for-jobs + - JOB: Visual Studio 2017 + APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 + CMAKE_GENERATOR: Visual Studio 15 2017 + +platform: + - x86 + - x64 + +configuration: + - RelWithDebInfo + - Debug + +build_script: + - git submodule update --init --recursive + - mkdir build + - cd build + - if "%platform%"=="x64" set CMAKE_GENERATOR=%CMAKE_GENERATOR% Win64 + - cmake --version + - cmake .. -G "%CMAKE_GENERATOR%" + -DCMAKE_CONFIGURATION_TYPES="%CONFIGURATION%" + - cmake --build . --config "%CONFIGURATION%" + - cd .. + +test_script: + - cd build && ctest --verbose --build-config "%CONFIGURATION%" && cd .. diff --git a/src/leveldb/.clang-format b/src/leveldb/.clang-format new file mode 100644 index 0000000000..f493f75382 --- /dev/null +++ b/src/leveldb/.clang-format @@ -0,0 +1,18 @@ +# Run manually to reformat a file: +# clang-format -i --style=file <file> +# find . -iname '*.cc' -o -iname '*.h' -o -iname '*.h.in' | xargs clang-format -i --style=file +BasedOnStyle: Google +DerivePointerAlignment: false + +# Public headers are in a different location in the internal Google repository. +# Order them so that when imported to the authoritative repository they will be +# in correct alphabetical order. +IncludeCategories: + - Regex: '^(<|"(benchmarks|db|helpers)/)' + Priority: 1 + - Regex: '^"(leveldb)/' + Priority: 2 + - Regex: '^(<|"(issues|port|table|third_party|util)/)' + Priority: 3 + - Regex: '.*' + Priority: 4 diff --git a/src/leveldb/.gitignore b/src/leveldb/.gitignore index 71d87a4eeb..c4b242534f 100644 --- a/src/leveldb/.gitignore +++ b/src/leveldb/.gitignore @@ -1,13 +1,8 @@ -build_config.mk -*.a -*.o -*.dylib* -*.so -*.so.* -*_test -db_bench -leveldbutil -Release -Debug -Benchmark -vs2010.* +# Editors. +*.sw* +.vscode +.DS_Store + +# Build directory. +build/ +out/ diff --git a/src/leveldb/.travis.yml b/src/leveldb/.travis.yml index f5bd74c454..42cbe64fd0 100644 --- a/src/leveldb/.travis.yml +++ b/src/leveldb/.travis.yml @@ -1,13 +1,82 @@ +# Build matrix / environment variables are explained on: +# http://about.travis-ci.org/docs/user/build-configuration/ +# This file can be validated on: http://lint.travis-ci.org/ + language: cpp +dist: bionic +osx_image: xcode10.3 + compiler: -- clang - gcc +- clang os: - linux - osx -sudo: false -before_install: -- echo $LANG -- echo $LC_ALL + +env: +- BUILD_TYPE=Debug +- BUILD_TYPE=RelWithDebInfo + +addons: + apt: + sources: + - sourceline: 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main' + key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key' + - sourceline: 'ppa:ubuntu-toolchain-r/test' + packages: + - clang-9 + - cmake + - gcc-9 + - g++-9 + - libgoogle-perftools-dev + - libkyotocabinet-dev + - libsnappy-dev + - libsqlite3-dev + - ninja-build + homebrew: + packages: + - cmake + - crc32c + - gcc@9 + - gperftools + - kyoto-cabinet + - llvm@9 + - ninja + - snappy + - sqlite3 + update: true + +install: +# The following Homebrew packages aren't linked by default, and need to be +# prepended to the path explicitly. +- if [ "$TRAVIS_OS_NAME" = "osx" ]; then + export PATH="$(brew --prefix llvm)/bin:$PATH"; + fi +# /usr/bin/gcc points to an older compiler on both Linux and macOS. +- if [ "$CXX" = "g++" ]; then export CXX="g++-9" CC="gcc-9"; fi +# /usr/bin/clang points to an older compiler on both Linux and macOS. +# +# Homebrew's llvm package doesn't ship a versioned clang++ binary, so the values +# below don't work on macOS. Fortunately, the path change above makes the +# default values (clang and clang++) resolve to the correct compiler on macOS. +- if [ "$TRAVIS_OS_NAME" = "linux" ]; then + if [ "$CXX" = "clang++" ]; then export CXX="clang++-9" CC="clang-9"; fi; + fi +- echo ${CC} +- echo ${CXX} +- ${CXX} --version +- cmake --version + +before_script: +- mkdir -p build && cd build +- cmake .. -G Ninja -DCMAKE_BUILD_TYPE=$BUILD_TYPE + -DCMAKE_INSTALL_PREFIX=$HOME/.local +- cmake --build . +- cd .. + script: -- make -j 4 check +- cd build && ctest --verbose && cd .. +- "if [ -f build/db_bench ] ; then build/db_bench ; fi" +- "if [ -f build/db_bench_sqlite3 ] ; then build/db_bench_sqlite3 ; fi" +- "if [ -f build/db_bench_tree_db ] ; then build/db_bench_tree_db ; fi" +- cd build && cmake --build . --target install diff --git a/src/leveldb/CMakeLists.txt b/src/leveldb/CMakeLists.txt new file mode 100644 index 0000000000..1cb46256c2 --- /dev/null +++ b/src/leveldb/CMakeLists.txt @@ -0,0 +1,465 @@ +# Copyright 2017 The LevelDB Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. See the AUTHORS file for names of contributors. + +cmake_minimum_required(VERSION 3.9) +# Keep the version below in sync with the one in db.h +project(leveldb VERSION 1.22.0 LANGUAGES C CXX) + +# This project can use C11, but will gracefully decay down to C89. +set(CMAKE_C_STANDARD 11) +set(CMAKE_C_STANDARD_REQUIRED OFF) +set(CMAKE_C_EXTENSIONS OFF) + +# This project requires C++11. +set(CMAKE_CXX_STANDARD 11) +set(CMAKE_CXX_STANDARD_REQUIRED ON) +set(CMAKE_CXX_EXTENSIONS OFF) + +if (WIN32) + set(LEVELDB_PLATFORM_NAME LEVELDB_PLATFORM_WINDOWS) + # TODO(cmumford): Make UNICODE configurable for Windows. + add_definitions(-D_UNICODE -DUNICODE) +else (WIN32) + set(LEVELDB_PLATFORM_NAME LEVELDB_PLATFORM_POSIX) +endif (WIN32) + +option(LEVELDB_BUILD_TESTS "Build LevelDB's unit tests" ON) +option(LEVELDB_BUILD_BENCHMARKS "Build LevelDB's benchmarks" ON) +option(LEVELDB_INSTALL "Install LevelDB's header and library" ON) + +include(TestBigEndian) +test_big_endian(LEVELDB_IS_BIG_ENDIAN) + +include(CheckIncludeFile) +check_include_file("unistd.h" HAVE_UNISTD_H) + +include(CheckLibraryExists) +check_library_exists(crc32c crc32c_value "" HAVE_CRC32C) +check_library_exists(snappy snappy_compress "" HAVE_SNAPPY) +check_library_exists(tcmalloc malloc "" HAVE_TCMALLOC) + +include(CheckCXXSymbolExists) +# Using check_cxx_symbol_exists() instead of check_c_symbol_exists() because +# we're including the header from C++, and feature detection should use the same +# compiler language that the project will use later. Principles aside, some +# versions of do not expose fdatasync() in <unistd.h> in standard C mode +# (-std=c11), but do expose the function in standard C++ mode (-std=c++11). +check_cxx_symbol_exists(fdatasync "unistd.h" HAVE_FDATASYNC) +check_cxx_symbol_exists(F_FULLFSYNC "fcntl.h" HAVE_FULLFSYNC) +check_cxx_symbol_exists(O_CLOEXEC "fcntl.h" HAVE_O_CLOEXEC) + +if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + # Disable C++ exceptions. + string(REGEX REPLACE "/EH[a-z]+" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHs-c-") + add_definitions(-D_HAS_EXCEPTIONS=0) + + # Disable RTTI. + string(REGEX REPLACE "/GR" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /GR-") +else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + # Enable strict prototype warnings for C code in clang and gcc. + if(NOT CMAKE_C_FLAGS MATCHES "-Wstrict-prototypes") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wstrict-prototypes") + endif(NOT CMAKE_C_FLAGS MATCHES "-Wstrict-prototypes") + + # Disable C++ exceptions. + string(REGEX REPLACE "-fexceptions" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions") + + # Disable RTTI. + string(REGEX REPLACE "-frtti" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti") +endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + +# Test whether -Wthread-safety is available. See +# https://clang.llvm.org/docs/ThreadSafetyAnalysis.html +include(CheckCXXCompilerFlag) +check_cxx_compiler_flag(-Wthread-safety HAVE_CLANG_THREAD_SAFETY) + +include(CheckCXXSourceCompiles) + +# Test whether C++17 __has_include is available. +check_cxx_source_compiles(" +#if defined(__has_include) && __has_include(<string>) +#include <string> +#endif +int main() { std::string str; return 0; } +" HAVE_CXX17_HAS_INCLUDE) + +set(LEVELDB_PUBLIC_INCLUDE_DIR "include/leveldb") +set(LEVELDB_PORT_CONFIG_DIR "include/port") + +configure_file( + "port/port_config.h.in" + "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h" +) + +include_directories( + "${PROJECT_BINARY_DIR}/include" + "." +) + +if(BUILD_SHARED_LIBS) + # Only export LEVELDB_EXPORT symbols from the shared library. + add_compile_options(-fvisibility=hidden) +endif(BUILD_SHARED_LIBS) + +# Must be included before CMAKE_INSTALL_INCLUDEDIR is used. +include(GNUInstallDirs) + +add_library(leveldb "") +target_sources(leveldb + PRIVATE + "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h" + "db/builder.cc" + "db/builder.h" + "db/c.cc" + "db/db_impl.cc" + "db/db_impl.h" + "db/db_iter.cc" + "db/db_iter.h" + "db/dbformat.cc" + "db/dbformat.h" + "db/dumpfile.cc" + "db/filename.cc" + "db/filename.h" + "db/log_format.h" + "db/log_reader.cc" + "db/log_reader.h" + "db/log_writer.cc" + "db/log_writer.h" + "db/memtable.cc" + "db/memtable.h" + "db/repair.cc" + "db/skiplist.h" + "db/snapshot.h" + "db/table_cache.cc" + "db/table_cache.h" + "db/version_edit.cc" + "db/version_edit.h" + "db/version_set.cc" + "db/version_set.h" + "db/write_batch_internal.h" + "db/write_batch.cc" + "port/port_stdcxx.h" + "port/port.h" + "port/thread_annotations.h" + "table/block_builder.cc" + "table/block_builder.h" + "table/block.cc" + "table/block.h" + "table/filter_block.cc" + "table/filter_block.h" + "table/format.cc" + "table/format.h" + "table/iterator_wrapper.h" + "table/iterator.cc" + "table/merger.cc" + "table/merger.h" + "table/table_builder.cc" + "table/table.cc" + "table/two_level_iterator.cc" + "table/two_level_iterator.h" + "util/arena.cc" + "util/arena.h" + "util/bloom.cc" + "util/cache.cc" + "util/coding.cc" + "util/coding.h" + "util/comparator.cc" + "util/crc32c.cc" + "util/crc32c.h" + "util/env.cc" + "util/filter_policy.cc" + "util/hash.cc" + "util/hash.h" + "util/logging.cc" + "util/logging.h" + "util/mutexlock.h" + "util/no_destructor.h" + "util/options.cc" + "util/random.h" + "util/status.cc" + + # Only CMake 3.3+ supports PUBLIC sources in targets exported by "install". + $<$<VERSION_GREATER:CMAKE_VERSION,3.2>:PUBLIC> + "${LEVELDB_PUBLIC_INCLUDE_DIR}/c.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/cache.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/comparator.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/db.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/dumpfile.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/env.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/export.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/filter_policy.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/iterator.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/options.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/slice.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/status.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/table_builder.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/table.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/write_batch.h" +) + +if (WIN32) + target_sources(leveldb + PRIVATE + "util/env_windows.cc" + "util/windows_logger.h" + ) +else (WIN32) + target_sources(leveldb + PRIVATE + "util/env_posix.cc" + "util/posix_logger.h" + ) +endif (WIN32) + +# MemEnv is not part of the interface and could be pulled to a separate library. +target_sources(leveldb + PRIVATE + "helpers/memenv/memenv.cc" + "helpers/memenv/memenv.h" +) + +target_include_directories(leveldb + PUBLIC + $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include> + $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}> +) + +set_target_properties(leveldb + PROPERTIES VERSION ${PROJECT_VERSION} SOVERSION ${PROJECT_VERSION_MAJOR}) + +target_compile_definitions(leveldb + PRIVATE + # Used by include/export.h when building shared libraries. + LEVELDB_COMPILE_LIBRARY + # Used by port/port.h. + ${LEVELDB_PLATFORM_NAME}=1 +) +if (NOT HAVE_CXX17_HAS_INCLUDE) + target_compile_definitions(leveldb + PRIVATE + LEVELDB_HAS_PORT_CONFIG_H=1 + ) +endif(NOT HAVE_CXX17_HAS_INCLUDE) + +if(BUILD_SHARED_LIBS) + target_compile_definitions(leveldb + PUBLIC + # Used by include/export.h. + LEVELDB_SHARED_LIBRARY + ) +endif(BUILD_SHARED_LIBS) + +if(HAVE_CLANG_THREAD_SAFETY) + target_compile_options(leveldb + PUBLIC + -Werror -Wthread-safety) +endif(HAVE_CLANG_THREAD_SAFETY) + +if(HAVE_CRC32C) + target_link_libraries(leveldb crc32c) +endif(HAVE_CRC32C) +if(HAVE_SNAPPY) + target_link_libraries(leveldb snappy) +endif(HAVE_SNAPPY) +if(HAVE_TCMALLOC) + target_link_libraries(leveldb tcmalloc) +endif(HAVE_TCMALLOC) + +# Needed by port_stdcxx.h +find_package(Threads REQUIRED) +target_link_libraries(leveldb Threads::Threads) + +add_executable(leveldbutil + "db/leveldbutil.cc" +) +target_link_libraries(leveldbutil leveldb) + +if(LEVELDB_BUILD_TESTS) + enable_testing() + + function(leveldb_test test_file) + get_filename_component(test_target_name "${test_file}" NAME_WE) + + add_executable("${test_target_name}" "") + target_sources("${test_target_name}" + PRIVATE + "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h" + "util/testharness.cc" + "util/testharness.h" + "util/testutil.cc" + "util/testutil.h" + + "${test_file}" + ) + target_link_libraries("${test_target_name}" leveldb) + target_compile_definitions("${test_target_name}" + PRIVATE + ${LEVELDB_PLATFORM_NAME}=1 + ) + if (NOT HAVE_CXX17_HAS_INCLUDE) + target_compile_definitions("${test_target_name}" + PRIVATE + LEVELDB_HAS_PORT_CONFIG_H=1 + ) + endif(NOT HAVE_CXX17_HAS_INCLUDE) + + add_test(NAME "${test_target_name}" COMMAND "${test_target_name}") + endfunction(leveldb_test) + + leveldb_test("db/c_test.c") + leveldb_test("db/fault_injection_test.cc") + + leveldb_test("issues/issue178_test.cc") + leveldb_test("issues/issue200_test.cc") + leveldb_test("issues/issue320_test.cc") + + leveldb_test("util/env_test.cc") + leveldb_test("util/status_test.cc") + leveldb_test("util/no_destructor_test.cc") + + if(NOT BUILD_SHARED_LIBS) + leveldb_test("db/autocompact_test.cc") + leveldb_test("db/corruption_test.cc") + leveldb_test("db/db_test.cc") + leveldb_test("db/dbformat_test.cc") + leveldb_test("db/filename_test.cc") + leveldb_test("db/log_test.cc") + leveldb_test("db/recovery_test.cc") + leveldb_test("db/skiplist_test.cc") + leveldb_test("db/version_edit_test.cc") + leveldb_test("db/version_set_test.cc") + leveldb_test("db/write_batch_test.cc") + + leveldb_test("helpers/memenv/memenv_test.cc") + + leveldb_test("table/filter_block_test.cc") + leveldb_test("table/table_test.cc") + + leveldb_test("util/arena_test.cc") + leveldb_test("util/bloom_test.cc") + leveldb_test("util/cache_test.cc") + leveldb_test("util/coding_test.cc") + leveldb_test("util/crc32c_test.cc") + leveldb_test("util/hash_test.cc") + leveldb_test("util/logging_test.cc") + + # TODO(costan): This test also uses + # "util/env_{posix|windows}_test_helper.h" + if (WIN32) + leveldb_test("util/env_windows_test.cc") + else (WIN32) + leveldb_test("util/env_posix_test.cc") + endif (WIN32) + endif(NOT BUILD_SHARED_LIBS) +endif(LEVELDB_BUILD_TESTS) + +if(LEVELDB_BUILD_BENCHMARKS) + function(leveldb_benchmark bench_file) + get_filename_component(bench_target_name "${bench_file}" NAME_WE) + + add_executable("${bench_target_name}" "") + target_sources("${bench_target_name}" + PRIVATE + "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h" + "util/histogram.cc" + "util/histogram.h" + "util/testharness.cc" + "util/testharness.h" + "util/testutil.cc" + "util/testutil.h" + + "${bench_file}" + ) + target_link_libraries("${bench_target_name}" leveldb) + target_compile_definitions("${bench_target_name}" + PRIVATE + ${LEVELDB_PLATFORM_NAME}=1 + ) + if (NOT HAVE_CXX17_HAS_INCLUDE) + target_compile_definitions("${bench_target_name}" + PRIVATE + LEVELDB_HAS_PORT_CONFIG_H=1 + ) + endif(NOT HAVE_CXX17_HAS_INCLUDE) + endfunction(leveldb_benchmark) + + if(NOT BUILD_SHARED_LIBS) + leveldb_benchmark("benchmarks/db_bench.cc") + endif(NOT BUILD_SHARED_LIBS) + + check_library_exists(sqlite3 sqlite3_open "" HAVE_SQLITE3) + if(HAVE_SQLITE3) + leveldb_benchmark("benchmarks/db_bench_sqlite3.cc") + target_link_libraries(db_bench_sqlite3 sqlite3) + endif(HAVE_SQLITE3) + + # check_library_exists is insufficient here because the library names have + # different manglings when compiled with clang or gcc, at least when installed + # with Homebrew on Mac. + set(OLD_CMAKE_REQURED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES}) + list(APPEND CMAKE_REQUIRED_LIBRARIES kyotocabinet) + check_cxx_source_compiles(" +#include <kcpolydb.h> + +int main() { + kyotocabinet::TreeDB* db = new kyotocabinet::TreeDB(); + delete db; + return 0; +} + " HAVE_KYOTOCABINET) + set(CMAKE_REQUIRED_LIBRARIES ${OLD_CMAKE_REQURED_LIBRARIES}) + if(HAVE_KYOTOCABINET) + leveldb_benchmark("benchmarks/db_bench_tree_db.cc") + target_link_libraries(db_bench_tree_db kyotocabinet) + endif(HAVE_KYOTOCABINET) +endif(LEVELDB_BUILD_BENCHMARKS) + +if(LEVELDB_INSTALL) + install(TARGETS leveldb + EXPORT leveldbTargets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + ) + install( + FILES + "${LEVELDB_PUBLIC_INCLUDE_DIR}/c.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/cache.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/comparator.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/db.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/dumpfile.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/env.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/export.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/filter_policy.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/iterator.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/options.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/slice.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/status.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/table_builder.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/table.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/write_batch.h" + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/leveldb + ) + + include(CMakePackageConfigHelpers) + write_basic_package_version_file( + "${PROJECT_BINARY_DIR}/leveldbConfigVersion.cmake" + COMPATIBILITY SameMajorVersion + ) + install( + EXPORT leveldbTargets + NAMESPACE leveldb:: + DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/leveldb" + ) + install( + FILES + "cmake/leveldbConfig.cmake" + "${PROJECT_BINARY_DIR}/leveldbConfigVersion.cmake" + DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/leveldb" + ) +endif(LEVELDB_INSTALL) diff --git a/src/leveldb/CONTRIBUTING.md b/src/leveldb/CONTRIBUTING.md index cd600ff46b..a74572a596 100644 --- a/src/leveldb/CONTRIBUTING.md +++ b/src/leveldb/CONTRIBUTING.md @@ -31,6 +31,6 @@ the CLA. ## Writing Code ## -If your contribution contains code, please make sure that it follows -[the style guide](http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml). +If your contribution contains code, please make sure that it follows +[the style guide](http://google.github.io/styleguide/cppguide.html). Otherwise we will have to ask you to make changes, and that's no fun for anyone. diff --git a/src/leveldb/Makefile b/src/leveldb/Makefile deleted file mode 100644 index f7cc7d736c..0000000000 --- a/src/leveldb/Makefile +++ /dev/null @@ -1,424 +0,0 @@ -# Copyright (c) 2011 The LevelDB Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. See the AUTHORS file for names of contributors. - -#----------------------------------------------- -# Uncomment exactly one of the lines labelled (A), (B), and (C) below -# to switch between compilation modes. - -# (A) Production use (optimized mode) -OPT ?= -O2 -DNDEBUG -# (B) Debug mode, w/ full line-level debugging symbols -# OPT ?= -g2 -# (C) Profiling mode: opt, but w/debugging symbols -# OPT ?= -O2 -g2 -DNDEBUG -#----------------------------------------------- - -# detect what platform we're building on -$(shell CC="$(CC)" CXX="$(CXX)" TARGET_OS="$(TARGET_OS)" \ - ./build_detect_platform build_config.mk ./) -# this file is generated by the previous line to set build flags and sources -include build_config.mk - -TESTS = \ - db/autocompact_test \ - db/c_test \ - db/corruption_test \ - db/db_test \ - db/dbformat_test \ - db/fault_injection_test \ - db/filename_test \ - db/log_test \ - db/recovery_test \ - db/skiplist_test \ - db/version_edit_test \ - db/version_set_test \ - db/write_batch_test \ - helpers/memenv/memenv_test \ - issues/issue178_test \ - issues/issue200_test \ - table/filter_block_test \ - table/table_test \ - util/arena_test \ - util/bloom_test \ - util/cache_test \ - util/coding_test \ - util/crc32c_test \ - util/env_posix_test \ - util/env_test \ - util/hash_test - -UTILS = \ - db/db_bench \ - db/leveldbutil - -# Put the object files in a subdirectory, but the application at the top of the object dir. -PROGNAMES := $(notdir $(TESTS) $(UTILS)) - -# On Linux may need libkyotocabinet-dev for dependency. -BENCHMARKS = \ - doc/bench/db_bench_sqlite3 \ - doc/bench/db_bench_tree_db - -CFLAGS += -I. -I./include $(PLATFORM_CCFLAGS) $(OPT) -CXXFLAGS += -I. -I./include $(PLATFORM_CXXFLAGS) $(OPT) - -LDFLAGS += $(PLATFORM_LDFLAGS) -LIBS += $(PLATFORM_LIBS) - -SIMULATOR_OUTDIR=out-ios-x86 -DEVICE_OUTDIR=out-ios-arm - -ifeq ($(PLATFORM), IOS) -# Note: iOS should probably be using libtool, not ar. -AR=xcrun ar -SIMULATORSDK=$(shell xcrun -sdk iphonesimulator --show-sdk-path) -DEVICESDK=$(shell xcrun -sdk iphoneos --show-sdk-path) -DEVICE_CFLAGS = -isysroot "$(DEVICESDK)" -arch armv6 -arch armv7 -arch armv7s -arch arm64 -SIMULATOR_CFLAGS = -isysroot "$(SIMULATORSDK)" -arch i686 -arch x86_64 -STATIC_OUTDIR=out-ios-universal -else -STATIC_OUTDIR=out-static -SHARED_OUTDIR=out-shared -STATIC_PROGRAMS := $(addprefix $(STATIC_OUTDIR)/, $(PROGNAMES)) -SHARED_PROGRAMS := $(addprefix $(SHARED_OUTDIR)/, db_bench) -endif - -STATIC_LIBOBJECTS := $(addprefix $(STATIC_OUTDIR)/, $(SOURCES:.cc=.o)) -STATIC_MEMENVOBJECTS := $(addprefix $(STATIC_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o)) - -DEVICE_LIBOBJECTS := $(addprefix $(DEVICE_OUTDIR)/, $(SOURCES:.cc=.o)) -DEVICE_MEMENVOBJECTS := $(addprefix $(DEVICE_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o)) - -SIMULATOR_LIBOBJECTS := $(addprefix $(SIMULATOR_OUTDIR)/, $(SOURCES:.cc=.o)) -SIMULATOR_MEMENVOBJECTS := $(addprefix $(SIMULATOR_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o)) - -SHARED_LIBOBJECTS := $(addprefix $(SHARED_OUTDIR)/, $(SOURCES:.cc=.o)) -SHARED_MEMENVOBJECTS := $(addprefix $(SHARED_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o)) - -TESTUTIL := $(STATIC_OUTDIR)/util/testutil.o -TESTHARNESS := $(STATIC_OUTDIR)/util/testharness.o $(TESTUTIL) - -STATIC_TESTOBJS := $(addprefix $(STATIC_OUTDIR)/, $(addsuffix .o, $(TESTS))) -STATIC_UTILOBJS := $(addprefix $(STATIC_OUTDIR)/, $(addsuffix .o, $(UTILS))) -STATIC_ALLOBJS := $(STATIC_LIBOBJECTS) $(STATIC_MEMENVOBJECTS) $(STATIC_TESTOBJS) $(STATIC_UTILOBJS) $(TESTHARNESS) -DEVICE_ALLOBJS := $(DEVICE_LIBOBJECTS) $(DEVICE_MEMENVOBJECTS) -SIMULATOR_ALLOBJS := $(SIMULATOR_LIBOBJECTS) $(SIMULATOR_MEMENVOBJECTS) - -default: all - -# Should we build shared libraries? -ifneq ($(PLATFORM_SHARED_EXT),) - -# Many leveldb test apps use non-exported API's. Only build a subset for testing. -SHARED_ALLOBJS := $(SHARED_LIBOBJECTS) $(SHARED_MEMENVOBJECTS) $(TESTHARNESS) - -ifneq ($(PLATFORM_SHARED_VERSIONED),true) -SHARED_LIB1 = libleveldb.$(PLATFORM_SHARED_EXT) -SHARED_LIB2 = $(SHARED_LIB1) -SHARED_LIB3 = $(SHARED_LIB1) -SHARED_LIBS = $(SHARED_LIB1) -SHARED_MEMENVLIB = $(SHARED_OUTDIR)/libmemenv.a -else -# Update db.h if you change these. -SHARED_VERSION_MAJOR = 1 -SHARED_VERSION_MINOR = 20 -SHARED_LIB1 = libleveldb.$(PLATFORM_SHARED_EXT) -SHARED_LIB2 = $(SHARED_LIB1).$(SHARED_VERSION_MAJOR) -SHARED_LIB3 = $(SHARED_LIB1).$(SHARED_VERSION_MAJOR).$(SHARED_VERSION_MINOR) -SHARED_LIBS = $(SHARED_OUTDIR)/$(SHARED_LIB1) $(SHARED_OUTDIR)/$(SHARED_LIB2) $(SHARED_OUTDIR)/$(SHARED_LIB3) -$(SHARED_OUTDIR)/$(SHARED_LIB1): $(SHARED_OUTDIR)/$(SHARED_LIB3) - ln -fs $(SHARED_LIB3) $(SHARED_OUTDIR)/$(SHARED_LIB1) -$(SHARED_OUTDIR)/$(SHARED_LIB2): $(SHARED_OUTDIR)/$(SHARED_LIB3) - ln -fs $(SHARED_LIB3) $(SHARED_OUTDIR)/$(SHARED_LIB2) -SHARED_MEMENVLIB = $(SHARED_OUTDIR)/libmemenv.a -endif - -$(SHARED_OUTDIR)/$(SHARED_LIB3): $(SHARED_LIBOBJECTS) - $(CXX) $(LDFLAGS) $(PLATFORM_SHARED_LDFLAGS)$(SHARED_LIB2) $(SHARED_LIBOBJECTS) -o $(SHARED_OUTDIR)/$(SHARED_LIB3) $(LIBS) - -endif # PLATFORM_SHARED_EXT - -all: $(SHARED_LIBS) $(SHARED_PROGRAMS) $(STATIC_OUTDIR)/libleveldb.a $(STATIC_OUTDIR)/libmemenv.a $(STATIC_PROGRAMS) - -check: $(STATIC_PROGRAMS) - for t in $(notdir $(TESTS)); do echo "***** Running $$t"; $(STATIC_OUTDIR)/$$t || exit 1; done - -clean: - -rm -rf out-static out-shared out-ios-x86 out-ios-arm out-ios-universal - -rm -f build_config.mk - -rm -rf ios-x86 ios-arm - -$(STATIC_OUTDIR): - mkdir $@ - -$(STATIC_OUTDIR)/db: | $(STATIC_OUTDIR) - mkdir $@ - -$(STATIC_OUTDIR)/helpers/memenv: | $(STATIC_OUTDIR) - mkdir -p $@ - -$(STATIC_OUTDIR)/port: | $(STATIC_OUTDIR) - mkdir $@ - -$(STATIC_OUTDIR)/table: | $(STATIC_OUTDIR) - mkdir $@ - -$(STATIC_OUTDIR)/util: | $(STATIC_OUTDIR) - mkdir $@ - -.PHONY: STATIC_OBJDIRS -STATIC_OBJDIRS: \ - $(STATIC_OUTDIR)/db \ - $(STATIC_OUTDIR)/port \ - $(STATIC_OUTDIR)/table \ - $(STATIC_OUTDIR)/util \ - $(STATIC_OUTDIR)/helpers/memenv - -$(SHARED_OUTDIR): - mkdir $@ - -$(SHARED_OUTDIR)/db: | $(SHARED_OUTDIR) - mkdir $@ - -$(SHARED_OUTDIR)/helpers/memenv: | $(SHARED_OUTDIR) - mkdir -p $@ - -$(SHARED_OUTDIR)/port: | $(SHARED_OUTDIR) - mkdir $@ - -$(SHARED_OUTDIR)/table: | $(SHARED_OUTDIR) - mkdir $@ - -$(SHARED_OUTDIR)/util: | $(SHARED_OUTDIR) - mkdir $@ - -.PHONY: SHARED_OBJDIRS -SHARED_OBJDIRS: \ - $(SHARED_OUTDIR)/db \ - $(SHARED_OUTDIR)/port \ - $(SHARED_OUTDIR)/table \ - $(SHARED_OUTDIR)/util \ - $(SHARED_OUTDIR)/helpers/memenv - -$(DEVICE_OUTDIR): - mkdir $@ - -$(DEVICE_OUTDIR)/db: | $(DEVICE_OUTDIR) - mkdir $@ - -$(DEVICE_OUTDIR)/helpers/memenv: | $(DEVICE_OUTDIR) - mkdir -p $@ - -$(DEVICE_OUTDIR)/port: | $(DEVICE_OUTDIR) - mkdir $@ - -$(DEVICE_OUTDIR)/table: | $(DEVICE_OUTDIR) - mkdir $@ - -$(DEVICE_OUTDIR)/util: | $(DEVICE_OUTDIR) - mkdir $@ - -.PHONY: DEVICE_OBJDIRS -DEVICE_OBJDIRS: \ - $(DEVICE_OUTDIR)/db \ - $(DEVICE_OUTDIR)/port \ - $(DEVICE_OUTDIR)/table \ - $(DEVICE_OUTDIR)/util \ - $(DEVICE_OUTDIR)/helpers/memenv - -$(SIMULATOR_OUTDIR): - mkdir $@ - -$(SIMULATOR_OUTDIR)/db: | $(SIMULATOR_OUTDIR) - mkdir $@ - -$(SIMULATOR_OUTDIR)/helpers/memenv: | $(SIMULATOR_OUTDIR) - mkdir -p $@ - -$(SIMULATOR_OUTDIR)/port: | $(SIMULATOR_OUTDIR) - mkdir $@ - -$(SIMULATOR_OUTDIR)/table: | $(SIMULATOR_OUTDIR) - mkdir $@ - -$(SIMULATOR_OUTDIR)/util: | $(SIMULATOR_OUTDIR) - mkdir $@ - -.PHONY: SIMULATOR_OBJDIRS -SIMULATOR_OBJDIRS: \ - $(SIMULATOR_OUTDIR)/db \ - $(SIMULATOR_OUTDIR)/port \ - $(SIMULATOR_OUTDIR)/table \ - $(SIMULATOR_OUTDIR)/util \ - $(SIMULATOR_OUTDIR)/helpers/memenv - -$(STATIC_ALLOBJS): | STATIC_OBJDIRS -$(DEVICE_ALLOBJS): | DEVICE_OBJDIRS -$(SIMULATOR_ALLOBJS): | SIMULATOR_OBJDIRS -$(SHARED_ALLOBJS): | SHARED_OBJDIRS - -ifeq ($(PLATFORM), IOS) -$(DEVICE_OUTDIR)/libleveldb.a: $(DEVICE_LIBOBJECTS) - rm -f $@ - $(AR) -rs $@ $(DEVICE_LIBOBJECTS) - -$(SIMULATOR_OUTDIR)/libleveldb.a: $(SIMULATOR_LIBOBJECTS) - rm -f $@ - $(AR) -rs $@ $(SIMULATOR_LIBOBJECTS) - -$(DEVICE_OUTDIR)/libmemenv.a: $(DEVICE_MEMENVOBJECTS) - rm -f $@ - $(AR) -rs $@ $(DEVICE_MEMENVOBJECTS) - -$(SIMULATOR_OUTDIR)/libmemenv.a: $(SIMULATOR_MEMENVOBJECTS) - rm -f $@ - $(AR) -rs $@ $(SIMULATOR_MEMENVOBJECTS) - -# For iOS, create universal object libraries to be used on both the simulator and -# a device. -$(STATIC_OUTDIR)/libleveldb.a: $(STATIC_OUTDIR) $(DEVICE_OUTDIR)/libleveldb.a $(SIMULATOR_OUTDIR)/libleveldb.a - lipo -create $(DEVICE_OUTDIR)/libleveldb.a $(SIMULATOR_OUTDIR)/libleveldb.a -output $@ - -$(STATIC_OUTDIR)/libmemenv.a: $(STATIC_OUTDIR) $(DEVICE_OUTDIR)/libmemenv.a $(SIMULATOR_OUTDIR)/libmemenv.a - lipo -create $(DEVICE_OUTDIR)/libmemenv.a $(SIMULATOR_OUTDIR)/libmemenv.a -output $@ -else -$(STATIC_OUTDIR)/libleveldb.a:$(STATIC_LIBOBJECTS) - rm -f $@ - $(AR) -rs $@ $(STATIC_LIBOBJECTS) - -$(STATIC_OUTDIR)/libmemenv.a:$(STATIC_MEMENVOBJECTS) - rm -f $@ - $(AR) -rs $@ $(STATIC_MEMENVOBJECTS) -endif - -$(SHARED_MEMENVLIB):$(SHARED_MEMENVOBJECTS) - rm -f $@ - $(AR) -rs $@ $(SHARED_MEMENVOBJECTS) - -$(STATIC_OUTDIR)/db_bench:db/db_bench.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) - $(CXX) $(LDFLAGS) $(CXXFLAGS) db/db_bench.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/db_bench_sqlite3:doc/bench/db_bench_sqlite3.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) - $(CXX) $(LDFLAGS) $(CXXFLAGS) doc/bench/db_bench_sqlite3.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) -o $@ -lsqlite3 $(LIBS) - -$(STATIC_OUTDIR)/db_bench_tree_db:doc/bench/db_bench_tree_db.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) - $(CXX) $(LDFLAGS) $(CXXFLAGS) doc/bench/db_bench_tree_db.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) -o $@ -lkyotocabinet $(LIBS) - -$(STATIC_OUTDIR)/leveldbutil:db/leveldbutil.cc $(STATIC_LIBOBJECTS) - $(CXX) $(LDFLAGS) $(CXXFLAGS) db/leveldbutil.cc $(STATIC_LIBOBJECTS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/arena_test:util/arena_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) $(CXXFLAGS) util/arena_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/autocompact_test:db/autocompact_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) $(CXXFLAGS) db/autocompact_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/bloom_test:util/bloom_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) $(CXXFLAGS) util/bloom_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/c_test:$(STATIC_OUTDIR)/db/c_test.o $(STATIC_LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) $(STATIC_OUTDIR)/db/c_test.o $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/cache_test:util/cache_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) $(CXXFLAGS) util/cache_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/coding_test:util/coding_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) $(CXXFLAGS) util/coding_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/corruption_test:db/corruption_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) $(CXXFLAGS) db/corruption_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/crc32c_test:util/crc32c_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) $(CXXFLAGS) util/crc32c_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/db_test:db/db_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) $(CXXFLAGS) db/db_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/dbformat_test:db/dbformat_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) $(CXXFLAGS) db/dbformat_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/env_posix_test:util/env_posix_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) $(CXXFLAGS) util/env_posix_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/env_test:util/env_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) $(CXXFLAGS) util/env_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/fault_injection_test:db/fault_injection_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) $(CXXFLAGS) db/fault_injection_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/filename_test:db/filename_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) $(CXXFLAGS) db/filename_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/filter_block_test:table/filter_block_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) $(CXXFLAGS) table/filter_block_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/hash_test:util/hash_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) $(CXXFLAGS) util/hash_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/issue178_test:issues/issue178_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) $(CXXFLAGS) issues/issue178_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/issue200_test:issues/issue200_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) $(CXXFLAGS) issues/issue200_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/log_test:db/log_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) $(CXXFLAGS) db/log_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/recovery_test:db/recovery_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) $(CXXFLAGS) db/recovery_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/table_test:table/table_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) $(CXXFLAGS) table/table_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/skiplist_test:db/skiplist_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) $(CXXFLAGS) db/skiplist_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/version_edit_test:db/version_edit_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) $(CXXFLAGS) db/version_edit_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/version_set_test:db/version_set_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) $(CXXFLAGS) db/version_set_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/write_batch_test:db/write_batch_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) - $(CXX) $(LDFLAGS) $(CXXFLAGS) db/write_batch_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) - -$(STATIC_OUTDIR)/memenv_test:$(STATIC_OUTDIR)/helpers/memenv/memenv_test.o $(STATIC_OUTDIR)/libmemenv.a $(STATIC_OUTDIR)/libleveldb.a $(TESTHARNESS) - $(XCRUN) $(CXX) $(LDFLAGS) $(STATIC_OUTDIR)/helpers/memenv/memenv_test.o $(STATIC_OUTDIR)/libmemenv.a $(STATIC_OUTDIR)/libleveldb.a $(TESTHARNESS) -o $@ $(LIBS) - -$(SHARED_OUTDIR)/db_bench:$(SHARED_OUTDIR)/db/db_bench.o $(SHARED_LIBS) $(TESTUTIL) - $(XCRUN) $(CXX) $(LDFLAGS) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) $(SHARED_OUTDIR)/db/db_bench.o $(TESTUTIL) $(SHARED_OUTDIR)/$(SHARED_LIB3) -o $@ $(LIBS) - -.PHONY: run-shared -run-shared: $(SHARED_OUTDIR)/db_bench - LD_LIBRARY_PATH=$(SHARED_OUTDIR) $(SHARED_OUTDIR)/db_bench - -$(SIMULATOR_OUTDIR)/%.o: %.cc - xcrun -sdk iphonesimulator $(CXX) $(CXXFLAGS) $(SIMULATOR_CFLAGS) -c $< -o $@ - -$(DEVICE_OUTDIR)/%.o: %.cc - xcrun -sdk iphoneos $(CXX) $(CXXFLAGS) $(DEVICE_CFLAGS) -c $< -o $@ - -$(SIMULATOR_OUTDIR)/%.o: %.c - xcrun -sdk iphonesimulator $(CC) $(CFLAGS) $(SIMULATOR_CFLAGS) -c $< -o $@ - -$(DEVICE_OUTDIR)/%.o: %.c - xcrun -sdk iphoneos $(CC) $(CFLAGS) $(DEVICE_CFLAGS) -c $< -o $@ - -$(STATIC_OUTDIR)/%.o: %.cc - $(CXX) $(CXXFLAGS) -c $< -o $@ - -$(STATIC_OUTDIR)/%.o: %.c - $(CC) $(CFLAGS) -c $< -o $@ - -$(SHARED_OUTDIR)/%.o: %.cc - $(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) -c $< -o $@ - -$(SHARED_OUTDIR)/%.o: %.c - $(CC) $(CFLAGS) $(PLATFORM_SHARED_CFLAGS) -c $< -o $@ - -$(STATIC_OUTDIR)/port/port_posix_sse.o: port/port_posix_sse.cc - $(CXX) $(CXXFLAGS) $(PLATFORM_SSEFLAGS) -c $< -o $@ - -$(SHARED_OUTDIR)/port/port_posix_sse.o: port/port_posix_sse.cc - $(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) $(PLATFORM_SSEFLAGS) -c $< -o $@ diff --git a/src/leveldb/README.md b/src/leveldb/README.md index a010c50858..dadfd5693e 100644 --- a/src/leveldb/README.md +++ b/src/leveldb/README.md @@ -1,10 +1,12 @@ **LevelDB is a fast key-value storage library written at Google that provides an ordered mapping from string keys to string values.** [![Build Status](https://travis-ci.org/google/leveldb.svg?branch=master)](https://travis-ci.org/google/leveldb) +[![Build status](https://ci.appveyor.com/api/projects/status/g2j5j4rfkda6eyw5/branch/master?svg=true)](https://ci.appveyor.com/project/pwnall/leveldb) Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com) # Features + * Keys and values are arbitrary byte arrays. * Data is stored sorted by key. * Callers can provide a custom comparison function to override the sort order. @@ -16,15 +18,55 @@ Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com) * External activity (file system operations etc.) is relayed through a virtual interface so users can customize the operating system interactions. # Documentation - [LevelDB library documentation](https://github.com/google/leveldb/blob/master/doc/index.md) is online and bundled with the source code. + [LevelDB library documentation](https://github.com/google/leveldb/blob/master/doc/index.md) is online and bundled with the source code. # Limitations + * This is not a SQL database. It does not have a relational data model, it does not support SQL queries, and it has no support for indexes. * Only a single process (possibly multi-threaded) can access a particular database at a time. * There is no client-server support builtin to the library. An application that needs such support will have to wrap their own server around the library. +# Building + +This project supports [CMake](https://cmake.org/) out of the box. + +### Build for POSIX + +Quick start: + +```bash +mkdir -p build && cd build +cmake -DCMAKE_BUILD_TYPE=Release .. && cmake --build . +``` + +### Building for Windows + +First generate the Visual Studio 2017 project/solution files: + +```cmd +mkdir build +cd build +cmake -G "Visual Studio 15" .. +``` +The default default will build for x86. For 64-bit run: + +```cmd +cmake -G "Visual Studio 15 Win64" .. +``` + +To compile the Windows solution from the command-line: + +```cmd +devenv /build Debug leveldb.sln +``` + +or open leveldb.sln in Visual Studio and build from within. + +Please see the CMake documentation and `CMakeLists.txt` for more advanced usage. + # Contributing to the leveldb Project + The leveldb project welcomes contributions. leveldb's primary goal is to be a reliable and fast key/value store. Changes that are in line with the features/limitations outlined above, and meet the requirements below, @@ -32,10 +74,10 @@ will be considered. Contribution requirements: -1. **POSIX only**. We _generally_ will only accept changes that are both - compiled, and tested on a POSIX platform - usually Linux. Very small - changes will sometimes be accepted, but consider that more of an - exception than the rule. +1. **Tested platforms only**. We _generally_ will only accept changes for + platforms that are compiled and tested. This means POSIX (for Linux and + macOS) or Windows. Very small changes will sometimes be accepted, but + consider that more of an exception than the rule. 2. **Stable API**. We strive very hard to maintain a stable API. Changes that require changes for projects using leveldb _might_ be rejected without @@ -44,7 +86,16 @@ Contribution requirements: 3. **Tests**: All changes must be accompanied by a new (or changed) test, or a sufficient explanation as to why a new (or changed) test is not required. +4. **Consistent Style**: This project conforms to the + [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html). + To ensure your changes are properly formatted please run: + + ``` + clang-format -i --style=file <file> + ``` + ## Submitting a Pull Request + Before any pull request will be accepted the author must first sign a Contributor License Agreement (CLA) at https://cla.developers.google.com/. @@ -138,37 +189,37 @@ uncompressed blocks in memory, the read performance improves again: See [doc/index.md](doc/index.md) for more explanation. See [doc/impl.md](doc/impl.md) for a brief overview of the implementation. -The public interface is in include/*.h. Callers should not include or +The public interface is in include/leveldb/*.h. Callers should not include or rely on the details of any other header files in this package. Those internal APIs may be changed without warning. Guide to header files: -* **include/db.h**: Main interface to the DB: Start here +* **include/leveldb/db.h**: Main interface to the DB: Start here. -* **include/options.h**: Control over the behavior of an entire database, +* **include/leveldb/options.h**: Control over the behavior of an entire database, and also control over the behavior of individual reads and writes. -* **include/comparator.h**: Abstraction for user-specified comparison function. +* **include/leveldb/comparator.h**: Abstraction for user-specified comparison function. If you want just bytewise comparison of keys, you can use the default comparator, but clients can write their own comparator implementations if they -want custom ordering (e.g. to handle different character encodings, etc.) +want custom ordering (e.g. to handle different character encodings, etc.). -* **include/iterator.h**: Interface for iterating over data. You can get +* **include/leveldb/iterator.h**: Interface for iterating over data. You can get an iterator from a DB object. -* **include/write_batch.h**: Interface for atomically applying multiple +* **include/leveldb/write_batch.h**: Interface for atomically applying multiple updates to a database. -* **include/slice.h**: A simple module for maintaining a pointer and a +* **include/leveldb/slice.h**: A simple module for maintaining a pointer and a length into some other byte array. -* **include/status.h**: Status is returned from many of the public interfaces +* **include/leveldb/status.h**: Status is returned from many of the public interfaces and is used to report success and various kinds of errors. -* **include/env.h**: +* **include/leveldb/env.h**: Abstraction of the OS environment. A posix implementation of this interface is -in util/env_posix.cc +in util/env_posix.cc. -* **include/table.h, include/table_builder.h**: Lower-level modules that most -clients probably won't use directly +* **include/leveldb/table.h, include/leveldb/table_builder.h**: Lower-level modules that most +clients probably won't use directly. diff --git a/src/leveldb/WINDOWS.md b/src/leveldb/WINDOWS.md deleted file mode 100644 index 5b76c2448f..0000000000 --- a/src/leveldb/WINDOWS.md +++ /dev/null @@ -1,39 +0,0 @@ -# Building LevelDB On Windows - -## Prereqs - -Install the [Windows Software Development Kit version 7.1](http://www.microsoft.com/downloads/dlx/en-us/listdetailsview.aspx?FamilyID=6b6c21d2-2006-4afa-9702-529fa782d63b). - -Download and extract the [Snappy source distribution](http://snappy.googlecode.com/files/snappy-1.0.5.tar.gz) - -1. Open the "Windows SDK 7.1 Command Prompt" : - Start Menu -> "Microsoft Windows SDK v7.1" > "Windows SDK 7.1 Command Prompt" -2. Change the directory to the leveldb project - -## Building the Static lib - -* 32 bit Version - - setenv /x86 - msbuild.exe /p:Configuration=Release /p:Platform=Win32 /p:Snappy=..\snappy-1.0.5 - -* 64 bit Version - - setenv /x64 - msbuild.exe /p:Configuration=Release /p:Platform=x64 /p:Snappy=..\snappy-1.0.5 - - -## Building and Running the Benchmark app - -* 32 bit Version - - setenv /x86 - msbuild.exe /p:Configuration=Benchmark /p:Platform=Win32 /p:Snappy=..\snappy-1.0.5 - Benchmark\leveldb.exe - -* 64 bit Version - - setenv /x64 - msbuild.exe /p:Configuration=Benchmark /p:Platform=x64 /p:Snappy=..\snappy-1.0.5 - x64\Benchmark\leveldb.exe - diff --git a/src/leveldb/db/db_bench.cc b/src/leveldb/benchmarks/db_bench.cc index 3ad19a512b..3696023b70 100644 --- a/src/leveldb/db/db_bench.cc +++ b/src/leveldb/benchmarks/db_bench.cc @@ -2,14 +2,14 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#include <sys/types.h> #include <stdio.h> #include <stdlib.h> -#include "db/db_impl.h" -#include "db/version_set.h" +#include <sys/types.h> + #include "leveldb/cache.h" #include "leveldb/db.h" #include "leveldb/env.h" +#include "leveldb/filter_policy.h" #include "leveldb/write_batch.h" #include "port/port.h" #include "util/crc32c.h" @@ -35,7 +35,6 @@ // seekrandom -- N random seeks // open -- cost of opening a DB // crc32c -- repeated crc32c of 4K of data -// acquireload -- load N*1000 times // Meta operations: // compact -- Compact the entire DB // stats -- Print DB stats @@ -57,9 +56,7 @@ static const char* FLAGS_benchmarks = "fill100K," "crc32c," "snappycomp," - "snappyuncomp," - "acquireload," - ; + "snappyuncomp,"; // Number of key/values to place in database static int FLAGS_num = 1000000; @@ -112,12 +109,12 @@ static bool FLAGS_use_existing_db = false; static bool FLAGS_reuse_logs = false; // Use the db with the following name. -static const char* FLAGS_db = NULL; +static const char* FLAGS_db = nullptr; namespace leveldb { namespace { -leveldb::Env* g_env = NULL; +leveldb::Env* g_env = nullptr; // Helper for quickly generating random data. class RandomGenerator { @@ -158,7 +155,7 @@ static Slice TrimSpace(Slice s) { start++; } size_t limit = s.size(); - while (limit > start && isspace(s[limit-1])) { + while (limit > start && isspace(s[limit - 1])) { limit--; } return Slice(s.data() + start, limit - start); @@ -190,14 +187,12 @@ class Stats { void Start() { next_report_ = 100; - last_op_finish_ = start_; hist_.Clear(); done_ = 0; bytes_ = 0; seconds_ = 0; - start_ = g_env->NowMicros(); - finish_ = start_; message_.clear(); + start_ = finish_ = last_op_finish_ = g_env->NowMicros(); } void Merge(const Stats& other) { @@ -217,9 +212,7 @@ class Stats { seconds_ = (finish_ - start_) * 1e-6; } - void AddMessage(Slice msg) { - AppendWithSpace(&message_, msg); - } + void AddMessage(Slice msg) { AppendWithSpace(&message_, msg); } void FinishedSingleOp() { if (FLAGS_histogram) { @@ -235,21 +228,26 @@ class Stats { done_++; if (done_ >= next_report_) { - if (next_report_ < 1000) next_report_ += 100; - else if (next_report_ < 5000) next_report_ += 500; - else if (next_report_ < 10000) next_report_ += 1000; - else if (next_report_ < 50000) next_report_ += 5000; - else if (next_report_ < 100000) next_report_ += 10000; - else if (next_report_ < 500000) next_report_ += 50000; - else next_report_ += 100000; + if (next_report_ < 1000) + next_report_ += 100; + else if (next_report_ < 5000) + next_report_ += 500; + else if (next_report_ < 10000) + next_report_ += 1000; + else if (next_report_ < 50000) + next_report_ += 5000; + else if (next_report_ < 100000) + next_report_ += 10000; + else if (next_report_ < 500000) + next_report_ += 50000; + else + next_report_ += 100000; fprintf(stderr, "... finished %d ops%30s\r", done_, ""); fflush(stderr); } } - void AddBytes(int64_t n) { - bytes_ += n; - } + void AddBytes(int64_t n) { bytes_ += n; } void Report(const Slice& name) { // Pretend at least one op was done in case we are running a benchmark @@ -268,11 +266,8 @@ class Stats { } AppendWithSpace(&extra, message_); - fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", - name.ToString().c_str(), - seconds_ * 1e6 / done_, - (extra.empty() ? "" : " "), - extra.c_str()); + fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(), + seconds_ * 1e6 / done_, (extra.empty() ? "" : " "), extra.c_str()); if (FLAGS_histogram) { fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str()); } @@ -283,8 +278,8 @@ class Stats { // State shared by all concurrent executions of the same benchmark. struct SharedState { port::Mutex mu; - port::CondVar cv; - int total; + port::CondVar cv GUARDED_BY(mu); + int total GUARDED_BY(mu); // Each thread goes through the following states: // (1) initializing @@ -292,24 +287,22 @@ struct SharedState { // (3) running // (4) done - int num_initialized; - int num_done; - bool start; + int num_initialized GUARDED_BY(mu); + int num_done GUARDED_BY(mu); + bool start GUARDED_BY(mu); - SharedState() : cv(&mu) { } + SharedState(int total) + : cv(&mu), total(total), num_initialized(0), num_done(0), start(false) {} }; // Per-thread state for concurrent executions of the same benchmark. struct ThreadState { - int tid; // 0..n-1 when running in n threads - Random rand; // Has different seeds for different threads + int tid; // 0..n-1 when running in n threads + Random rand; // Has different seeds for different threads Stats stats; SharedState* shared; - ThreadState(int index) - : tid(index), - rand(1000 + index) { - } + ThreadState(int index) : tid(index), rand(1000 + index), shared(nullptr) {} }; } // namespace @@ -335,20 +328,20 @@ class Benchmark { static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5)); fprintf(stdout, "Entries: %d\n", num_); fprintf(stdout, "RawSize: %.1f MB (estimated)\n", - ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) - / 1048576.0)); + ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) / + 1048576.0)); fprintf(stdout, "FileSize: %.1f MB (estimated)\n", - (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) - / 1048576.0)); + (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) / + 1048576.0)); PrintWarnings(); fprintf(stdout, "------------------------------------------------\n"); } void PrintWarnings() { #if defined(__GNUC__) && !defined(__OPTIMIZE__) - fprintf(stdout, - "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n" - ); + fprintf( + stdout, + "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"); #endif #ifndef NDEBUG fprintf(stdout, @@ -366,22 +359,22 @@ class Benchmark { } void PrintEnvironment() { - fprintf(stderr, "LevelDB: version %d.%d\n", - kMajorVersion, kMinorVersion); + fprintf(stderr, "LevelDB: version %d.%d\n", kMajorVersion, + kMinorVersion); #if defined(__linux) - time_t now = time(NULL); + time_t now = time(nullptr); fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline FILE* cpuinfo = fopen("/proc/cpuinfo", "r"); - if (cpuinfo != NULL) { + if (cpuinfo != nullptr) { char line[1000]; int num_cpus = 0; std::string cpu_type; std::string cache_size; - while (fgets(line, sizeof(line), cpuinfo) != NULL) { + while (fgets(line, sizeof(line), cpuinfo) != nullptr) { const char* sep = strchr(line, ':'); - if (sep == NULL) { + if (sep == nullptr) { continue; } Slice key = TrimSpace(Slice(line, sep - 1 - line)); @@ -402,16 +395,16 @@ class Benchmark { public: Benchmark() - : cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : NULL), - filter_policy_(FLAGS_bloom_bits >= 0 - ? NewBloomFilterPolicy(FLAGS_bloom_bits) - : NULL), - db_(NULL), - num_(FLAGS_num), - value_size_(FLAGS_value_size), - entries_per_batch_(1), - reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads), - heap_counter_(0) { + : cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : nullptr), + filter_policy_(FLAGS_bloom_bits >= 0 + ? NewBloomFilterPolicy(FLAGS_bloom_bits) + : nullptr), + db_(nullptr), + num_(FLAGS_num), + value_size_(FLAGS_value_size), + entries_per_batch_(1), + reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads), + heap_counter_(0) { std::vector<std::string> files; g_env->GetChildren(FLAGS_db, &files); for (size_t i = 0; i < files.size(); i++) { @@ -435,12 +428,12 @@ class Benchmark { Open(); const char* benchmarks = FLAGS_benchmarks; - while (benchmarks != NULL) { + while (benchmarks != nullptr) { const char* sep = strchr(benchmarks, ','); Slice name; - if (sep == NULL) { + if (sep == nullptr) { name = benchmarks; - benchmarks = NULL; + benchmarks = nullptr; } else { name = Slice(benchmarks, sep - benchmarks); benchmarks = sep + 1; @@ -453,7 +446,7 @@ class Benchmark { entries_per_batch_ = 1; write_options_ = WriteOptions(); - void (Benchmark::*method)(ThreadState*) = NULL; + void (Benchmark::*method)(ThreadState*) = nullptr; bool fresh_db = false; int num_threads = FLAGS_threads; @@ -510,8 +503,6 @@ class Benchmark { method = &Benchmark::Compact; } else if (name == Slice("crc32c")) { method = &Benchmark::Crc32c; - } else if (name == Slice("acquireload")) { - method = &Benchmark::AcquireLoad; } else if (name == Slice("snappycomp")) { method = &Benchmark::SnappyCompress; } else if (name == Slice("snappyuncomp")) { @@ -523,7 +514,7 @@ class Benchmark { } else if (name == Slice("sstables")) { PrintStats("leveldb.sstables"); } else { - if (name != Slice()) { // No error message for empty name + if (!name.empty()) { // No error message for empty name fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str()); } } @@ -532,16 +523,16 @@ class Benchmark { if (FLAGS_use_existing_db) { fprintf(stdout, "%-12s : skipped (--use_existing_db is true)\n", name.ToString().c_str()); - method = NULL; + method = nullptr; } else { delete db_; - db_ = NULL; + db_ = nullptr; DestroyDB(FLAGS_db, Options()); Open(); } } - if (method != NULL) { + if (method != nullptr) { RunBenchmark(num_threads, name, method); } } @@ -585,11 +576,7 @@ class Benchmark { void RunBenchmark(int n, Slice name, void (Benchmark::*method)(ThreadState*)) { - SharedState shared; - shared.total = n; - shared.num_initialized = 0; - shared.num_done = 0; - shared.start = false; + SharedState shared(n); ThreadArg* arg = new ThreadArg[n]; for (int i = 0; i < n; i++) { @@ -643,22 +630,6 @@ class Benchmark { thread->stats.AddMessage(label); } - void AcquireLoad(ThreadState* thread) { - int dummy; - port::AtomicPointer ap(&dummy); - int count = 0; - void *ptr = NULL; - thread->stats.AddMessage("(each op is 1000 loads)"); - while (count < 100000) { - for (int i = 0; i < 1000; i++) { - ptr = ap.Acquire_Load(); - } - count++; - thread->stats.FinishedSingleOp(); - } - if (ptr == NULL) exit(1); // Disable unused variable warning. - } - void SnappyCompress(ThreadState* thread) { RandomGenerator gen; Slice input = gen.Generate(Options().block_size); @@ -692,8 +663,8 @@ class Benchmark { int64_t bytes = 0; char* uncompressed = new char[input.size()]; while (ok && bytes < 1024 * 1048576) { // Compress 1G - ok = port::Snappy_Uncompress(compressed.data(), compressed.size(), - uncompressed); + ok = port::Snappy_Uncompress(compressed.data(), compressed.size(), + uncompressed); bytes += input.size(); thread->stats.FinishedSingleOp(); } @@ -707,7 +678,7 @@ class Benchmark { } void Open() { - assert(db_ == NULL); + assert(db_ == nullptr); Options options; options.env = g_env; options.create_if_missing = !FLAGS_use_existing_db; @@ -733,13 +704,9 @@ class Benchmark { } } - void WriteSeq(ThreadState* thread) { - DoWrite(thread, true); - } + void WriteSeq(ThreadState* thread) { DoWrite(thread, true); } - void WriteRandom(ThreadState* thread) { - DoWrite(thread, false); - } + void WriteRandom(ThreadState* thread) { DoWrite(thread, false); } void DoWrite(ThreadState* thread, bool seq) { if (num_ != FLAGS_num) { @@ -755,7 +722,7 @@ class Benchmark { for (int i = 0; i < num_; i += entries_per_batch_) { batch.Clear(); for (int j = 0; j < entries_per_batch_; j++) { - const int k = seq ? i+j : (thread->rand.Next() % FLAGS_num); + const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num); char key[100]; snprintf(key, sizeof(key), "%016d", k); batch.Put(key, gen.Generate(value_size_)); @@ -865,7 +832,7 @@ class Benchmark { for (int i = 0; i < num_; i += entries_per_batch_) { batch.Clear(); for (int j = 0; j < entries_per_batch_; j++) { - const int k = seq ? i+j : (thread->rand.Next() % FLAGS_num); + const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num); char key[100]; snprintf(key, sizeof(key), "%016d", k); batch.Delete(key); @@ -879,13 +846,9 @@ class Benchmark { } } - void DeleteSeq(ThreadState* thread) { - DoDelete(thread, true); - } + void DeleteSeq(ThreadState* thread) { DoDelete(thread, true); } - void DeleteRandom(ThreadState* thread) { - DoDelete(thread, false); - } + void DeleteRandom(ThreadState* thread) { DoDelete(thread, false); } void ReadWhileWriting(ThreadState* thread) { if (thread->tid > 0) { @@ -917,9 +880,7 @@ class Benchmark { } } - void Compact(ThreadState* thread) { - db_->CompactRange(NULL, NULL); - } + void Compact(ThreadState* thread) { db_->CompactRange(nullptr, nullptr); } void PrintStats(const char* key) { std::string stats; @@ -1008,10 +969,10 @@ int main(int argc, char** argv) { leveldb::g_env = leveldb::Env::Default(); // Choose a location for the test database if none given with --db=<path> - if (FLAGS_db == NULL) { - leveldb::g_env->GetTestDirectory(&default_db_path); - default_db_path += "/dbbench"; - FLAGS_db = default_db_path.c_str(); + if (FLAGS_db == nullptr) { + leveldb::g_env->GetTestDirectory(&default_db_path); + default_db_path += "/dbbench"; + FLAGS_db = default_db_path.c_str(); } leveldb::Benchmark benchmark; diff --git a/src/leveldb/doc/bench/db_bench_sqlite3.cc b/src/leveldb/benchmarks/db_bench_sqlite3.cc index e63aaa8dcc..f183f4fcfd 100644 --- a/src/leveldb/doc/bench/db_bench_sqlite3.cc +++ b/src/leveldb/benchmarks/db_bench_sqlite3.cc @@ -2,9 +2,10 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#include <sqlite3.h> #include <stdio.h> #include <stdlib.h> -#include <sqlite3.h> + #include "util/histogram.h" #include "util/random.h" #include "util/testutil.h" @@ -38,8 +39,7 @@ static const char* FLAGS_benchmarks = "fillrand100K," "fillseq100K," "readseq," - "readrand100K," - ; + "readrand100K,"; // Number of key/values to place in database static int FLAGS_num = 1000000; @@ -76,10 +76,9 @@ static bool FLAGS_transaction = true; static bool FLAGS_WAL_enabled = true; // Use the db with the following name. -static const char* FLAGS_db = NULL; +static const char* FLAGS_db = nullptr; -inline -static void ExecErrorCheck(int status, char *err_msg) { +inline static void ExecErrorCheck(int status, char* err_msg) { if (status != SQLITE_OK) { fprintf(stderr, "SQL error: %s\n", err_msg); sqlite3_free(err_msg); @@ -87,27 +86,25 @@ static void ExecErrorCheck(int status, char *err_msg) { } } -inline -static void StepErrorCheck(int status) { +inline static void StepErrorCheck(int status) { if (status != SQLITE_DONE) { fprintf(stderr, "SQL step error: status = %d\n", status); exit(1); } } -inline -static void ErrorCheck(int status) { +inline static void ErrorCheck(int status) { if (status != SQLITE_OK) { fprintf(stderr, "sqlite3 error: status = %d\n", status); exit(1); } } -inline -static void WalCheckpoint(sqlite3* db_) { +inline static void WalCheckpoint(sqlite3* db_) { // Flush all writes to disk if (FLAGS_WAL_enabled) { - sqlite3_wal_checkpoint_v2(db_, NULL, SQLITE_CHECKPOINT_FULL, NULL, NULL); + sqlite3_wal_checkpoint_v2(db_, nullptr, SQLITE_CHECKPOINT_FULL, nullptr, + nullptr); } } @@ -152,7 +149,7 @@ static Slice TrimSpace(Slice s) { start++; } int limit = s.size(); - while (limit > start && isspace(s[limit-1])) { + while (limit > start && isspace(s[limit - 1])) { limit--; } return Slice(s.data() + start, limit - start); @@ -176,7 +173,7 @@ class Benchmark { // State kept for progress messages int done_; - int next_report_; // When to report next + int next_report_; // When to report next void PrintHeader() { const int kKeySize = 16; @@ -185,17 +182,17 @@ class Benchmark { fprintf(stdout, "Values: %d bytes each\n", FLAGS_value_size); fprintf(stdout, "Entries: %d\n", num_); fprintf(stdout, "RawSize: %.1f MB (estimated)\n", - ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) - / 1048576.0)); + ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) / + 1048576.0)); PrintWarnings(); fprintf(stdout, "------------------------------------------------\n"); } void PrintWarnings() { #if defined(__GNUC__) && !defined(__OPTIMIZE__) - fprintf(stdout, - "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n" - ); + fprintf( + stdout, + "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"); #endif #ifndef NDEBUG fprintf(stdout, @@ -207,18 +204,18 @@ class Benchmark { fprintf(stderr, "SQLite: version %s\n", SQLITE_VERSION); #if defined(__linux) - time_t now = time(NULL); + time_t now = time(nullptr); fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline FILE* cpuinfo = fopen("/proc/cpuinfo", "r"); - if (cpuinfo != NULL) { + if (cpuinfo != nullptr) { char line[1000]; int num_cpus = 0; std::string cpu_type; std::string cache_size; - while (fgets(line, sizeof(line), cpuinfo) != NULL) { + while (fgets(line, sizeof(line), cpuinfo) != nullptr) { const char* sep = strchr(line, ':'); - if (sep == NULL) { + if (sep == nullptr) { continue; } Slice key = TrimSpace(Slice(line, sep - 1 - line)); @@ -261,13 +258,20 @@ class Benchmark { done_++; if (done_ >= next_report_) { - if (next_report_ < 1000) next_report_ += 100; - else if (next_report_ < 5000) next_report_ += 500; - else if (next_report_ < 10000) next_report_ += 1000; - else if (next_report_ < 50000) next_report_ += 5000; - else if (next_report_ < 100000) next_report_ += 10000; - else if (next_report_ < 500000) next_report_ += 50000; - else next_report_ += 100000; + if (next_report_ < 1000) + next_report_ += 100; + else if (next_report_ < 5000) + next_report_ += 500; + else if (next_report_ < 10000) + next_report_ += 1000; + else if (next_report_ < 50000) + next_report_ += 5000; + else if (next_report_ < 100000) + next_report_ += 10000; + else if (next_report_ < 500000) + next_report_ += 50000; + else + next_report_ += 100000; fprintf(stderr, "... finished %d ops%30s\r", done_, ""); fflush(stderr); } @@ -285,16 +289,14 @@ class Benchmark { snprintf(rate, sizeof(rate), "%6.1f MB/s", (bytes_ / 1048576.0) / (finish - start_)); if (!message_.empty()) { - message_ = std::string(rate) + " " + message_; + message_ = std::string(rate) + " " + message_; } else { message_ = rate; } } - fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", - name.ToString().c_str(), - (finish - start_) * 1e6 / done_, - (message_.empty() ? "" : " "), + fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(), + (finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "), message_.c_str()); if (FLAGS_histogram) { fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str()); @@ -303,22 +305,16 @@ class Benchmark { } public: - enum Order { - SEQUENTIAL, - RANDOM - }; - enum DBState { - FRESH, - EXISTING - }; + enum Order { SEQUENTIAL, RANDOM }; + enum DBState { FRESH, EXISTING }; Benchmark() - : db_(NULL), - db_num_(0), - num_(FLAGS_num), - reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads), - bytes_(0), - rand_(301) { + : db_(nullptr), + db_num_(0), + num_(FLAGS_num), + reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads), + bytes_(0), + rand_(301) { std::vector<std::string> files; std::string test_dir; Env::Default()->GetTestDirectory(&test_dir); @@ -345,12 +341,12 @@ class Benchmark { Open(); const char* benchmarks = FLAGS_benchmarks; - while (benchmarks != NULL) { + while (benchmarks != nullptr) { const char* sep = strchr(benchmarks, ','); Slice name; - if (sep == NULL) { + if (sep == nullptr) { name = benchmarks; - benchmarks = NULL; + benchmarks = nullptr; } else { name = Slice(benchmarks, sep - benchmarks); benchmarks = sep + 1; @@ -415,20 +411,18 @@ class Benchmark { } void Open() { - assert(db_ == NULL); + assert(db_ == nullptr); int status; char file_name[100]; - char* err_msg = NULL; + char* err_msg = nullptr; db_num_++; // Open database std::string tmp_dir; Env::Default()->GetTestDirectory(&tmp_dir); - snprintf(file_name, sizeof(file_name), - "%s/dbbench_sqlite3-%d.db", - tmp_dir.c_str(), - db_num_); + snprintf(file_name, sizeof(file_name), "%s/dbbench_sqlite3-%d.db", + tmp_dir.c_str(), db_num_); status = sqlite3_open(file_name, &db_); if (status) { fprintf(stderr, "open error: %s\n", sqlite3_errmsg(db_)); @@ -439,7 +433,7 @@ class Benchmark { char cache_size[100]; snprintf(cache_size, sizeof(cache_size), "PRAGMA cache_size = %d", FLAGS_num_pages); - status = sqlite3_exec(db_, cache_size, NULL, NULL, &err_msg); + status = sqlite3_exec(db_, cache_size, nullptr, nullptr, &err_msg); ExecErrorCheck(status, err_msg); // FLAGS_page_size is defaulted to 1024 @@ -447,7 +441,7 @@ class Benchmark { char page_size[100]; snprintf(page_size, sizeof(page_size), "PRAGMA page_size = %d", FLAGS_page_size); - status = sqlite3_exec(db_, page_size, NULL, NULL, &err_msg); + status = sqlite3_exec(db_, page_size, nullptr, nullptr, &err_msg); ExecErrorCheck(status, err_msg); } @@ -457,26 +451,28 @@ class Benchmark { // LevelDB's default cache size is a combined 4 MB std::string WAL_checkpoint = "PRAGMA wal_autocheckpoint = 4096"; - status = sqlite3_exec(db_, WAL_stmt.c_str(), NULL, NULL, &err_msg); + status = sqlite3_exec(db_, WAL_stmt.c_str(), nullptr, nullptr, &err_msg); ExecErrorCheck(status, err_msg); - status = sqlite3_exec(db_, WAL_checkpoint.c_str(), NULL, NULL, &err_msg); + status = + sqlite3_exec(db_, WAL_checkpoint.c_str(), nullptr, nullptr, &err_msg); ExecErrorCheck(status, err_msg); } // Change locking mode to exclusive and create tables/index for database std::string locking_stmt = "PRAGMA locking_mode = EXCLUSIVE"; std::string create_stmt = - "CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))"; - std::string stmt_array[] = { locking_stmt, create_stmt }; + "CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))"; + std::string stmt_array[] = {locking_stmt, create_stmt}; int stmt_array_length = sizeof(stmt_array) / sizeof(std::string); for (int i = 0; i < stmt_array_length; i++) { - status = sqlite3_exec(db_, stmt_array[i].c_str(), NULL, NULL, &err_msg); + status = + sqlite3_exec(db_, stmt_array[i].c_str(), nullptr, nullptr, &err_msg); ExecErrorCheck(status, err_msg); } } - void Write(bool write_sync, Order order, DBState state, - int num_entries, int value_size, int entries_per_batch) { + void Write(bool write_sync, Order order, DBState state, int num_entries, + int value_size, int entries_per_batch) { // Create new database if state == FRESH if (state == FRESH) { if (FLAGS_use_existing_db) { @@ -484,7 +480,7 @@ class Benchmark { return; } sqlite3_close(db_); - db_ = NULL; + db_ = nullptr; Open(); Start(); } @@ -495,7 +491,7 @@ class Benchmark { message_ = msg; } - char* err_msg = NULL; + char* err_msg = nullptr; int status; sqlite3_stmt *replace_stmt, *begin_trans_stmt, *end_trans_stmt; @@ -504,20 +500,20 @@ class Benchmark { std::string end_trans_str = "END TRANSACTION;"; // Check for synchronous flag in options - std::string sync_stmt = (write_sync) ? "PRAGMA synchronous = FULL" : - "PRAGMA synchronous = OFF"; - status = sqlite3_exec(db_, sync_stmt.c_str(), NULL, NULL, &err_msg); + std::string sync_stmt = + (write_sync) ? "PRAGMA synchronous = FULL" : "PRAGMA synchronous = OFF"; + status = sqlite3_exec(db_, sync_stmt.c_str(), nullptr, nullptr, &err_msg); ExecErrorCheck(status, err_msg); // Preparing sqlite3 statements - status = sqlite3_prepare_v2(db_, replace_str.c_str(), -1, - &replace_stmt, NULL); + status = sqlite3_prepare_v2(db_, replace_str.c_str(), -1, &replace_stmt, + nullptr); ErrorCheck(status); status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1, - &begin_trans_stmt, NULL); + &begin_trans_stmt, nullptr); ErrorCheck(status); - status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, - &end_trans_stmt, NULL); + status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, &end_trans_stmt, + nullptr); ErrorCheck(status); bool transaction = (entries_per_batch > 1); @@ -535,16 +531,16 @@ class Benchmark { const char* value = gen_.Generate(value_size).data(); // Create values for key-value pair - const int k = (order == SEQUENTIAL) ? i + j : - (rand_.Next() % num_entries); + const int k = + (order == SEQUENTIAL) ? i + j : (rand_.Next() % num_entries); char key[100]; snprintf(key, sizeof(key), "%016d", k); // Bind KV values into replace_stmt status = sqlite3_bind_blob(replace_stmt, 1, key, 16, SQLITE_STATIC); ErrorCheck(status); - status = sqlite3_bind_blob(replace_stmt, 2, value, - value_size, SQLITE_STATIC); + status = sqlite3_bind_blob(replace_stmt, 2, value, value_size, + SQLITE_STATIC); ErrorCheck(status); // Execute replace_stmt @@ -588,12 +584,12 @@ class Benchmark { // Preparing sqlite3 statements status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1, - &begin_trans_stmt, NULL); + &begin_trans_stmt, nullptr); ErrorCheck(status); - status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, - &end_trans_stmt, NULL); + status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, &end_trans_stmt, + nullptr); ErrorCheck(status); - status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &read_stmt, NULL); + status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &read_stmt, nullptr); ErrorCheck(status); bool transaction = (entries_per_batch > 1); @@ -618,7 +614,8 @@ class Benchmark { ErrorCheck(status); // Execute read statement - while ((status = sqlite3_step(read_stmt)) == SQLITE_ROW) {} + while ((status = sqlite3_step(read_stmt)) == SQLITE_ROW) { + } StepErrorCheck(status); // Reset SQLite statement for another use @@ -648,10 +645,10 @@ class Benchmark { void ReadSequential() { int status; - sqlite3_stmt *pStmt; + sqlite3_stmt* pStmt; std::string read_str = "SELECT * FROM test ORDER BY key"; - status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &pStmt, NULL); + status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &pStmt, nullptr); ErrorCheck(status); for (int i = 0; i < reads_ && SQLITE_ROW == sqlite3_step(pStmt); i++) { bytes_ += sqlite3_column_bytes(pStmt, 1) + sqlite3_column_bytes(pStmt, 2); @@ -661,7 +658,6 @@ class Benchmark { status = sqlite3_finalize(pStmt); ErrorCheck(status); } - }; } // namespace leveldb @@ -706,10 +702,10 @@ int main(int argc, char** argv) { } // Choose a location for the test database if none given with --db=<path> - if (FLAGS_db == NULL) { - leveldb::Env::Default()->GetTestDirectory(&default_db_path); - default_db_path += "/dbbench"; - FLAGS_db = default_db_path.c_str(); + if (FLAGS_db == nullptr) { + leveldb::Env::Default()->GetTestDirectory(&default_db_path); + default_db_path += "/dbbench"; + FLAGS_db = default_db_path.c_str(); } leveldb::Benchmark benchmark; diff --git a/src/leveldb/doc/bench/db_bench_tree_db.cc b/src/leveldb/benchmarks/db_bench_tree_db.cc index 4ca381f11f..b2f6646d89 100644 --- a/src/leveldb/doc/bench/db_bench_tree_db.cc +++ b/src/leveldb/benchmarks/db_bench_tree_db.cc @@ -2,9 +2,10 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#include <kcpolydb.h> #include <stdio.h> #include <stdlib.h> -#include <kcpolydb.h> + #include "util/histogram.h" #include "util/random.h" #include "util/testutil.h" @@ -34,8 +35,7 @@ static const char* FLAGS_benchmarks = "fillrand100K," "fillseq100K," "readseq100K," - "readrand100K," - ; + "readrand100K,"; // Number of key/values to place in database static int FLAGS_num = 1000000; @@ -69,11 +69,9 @@ static bool FLAGS_use_existing_db = false; static bool FLAGS_compression = true; // Use the db with the following name. -static const char* FLAGS_db = NULL; +static const char* FLAGS_db = nullptr; -inline -static void DBSynchronize(kyotocabinet::TreeDB* db_) -{ +inline static void DBSynchronize(kyotocabinet::TreeDB* db_) { // Synchronize will flush writes to disk if (!db_->synchronize()) { fprintf(stderr, "synchronize error: %s\n", db_->error().name()); @@ -121,7 +119,7 @@ static Slice TrimSpace(Slice s) { start++; } int limit = s.size(); - while (limit > start && isspace(s[limit-1])) { + while (limit > start && isspace(s[limit - 1])) { limit--; } return Slice(s.data() + start, limit - start); @@ -146,7 +144,7 @@ class Benchmark { // State kept for progress messages int done_; - int next_report_; // When to report next + int next_report_; // When to report next void PrintHeader() { const int kKeySize = 16; @@ -157,20 +155,20 @@ class Benchmark { static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5)); fprintf(stdout, "Entries: %d\n", num_); fprintf(stdout, "RawSize: %.1f MB (estimated)\n", - ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) - / 1048576.0)); + ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) / + 1048576.0)); fprintf(stdout, "FileSize: %.1f MB (estimated)\n", - (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) - / 1048576.0)); + (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) / + 1048576.0)); PrintWarnings(); fprintf(stdout, "------------------------------------------------\n"); } void PrintWarnings() { #if defined(__GNUC__) && !defined(__OPTIMIZE__) - fprintf(stdout, - "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n" - ); + fprintf( + stdout, + "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"); #endif #ifndef NDEBUG fprintf(stdout, @@ -183,18 +181,18 @@ class Benchmark { kyotocabinet::VERSION, kyotocabinet::LIBVER, kyotocabinet::LIBREV); #if defined(__linux) - time_t now = time(NULL); + time_t now = time(nullptr); fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline FILE* cpuinfo = fopen("/proc/cpuinfo", "r"); - if (cpuinfo != NULL) { + if (cpuinfo != nullptr) { char line[1000]; int num_cpus = 0; std::string cpu_type; std::string cache_size; - while (fgets(line, sizeof(line), cpuinfo) != NULL) { + while (fgets(line, sizeof(line), cpuinfo) != nullptr) { const char* sep = strchr(line, ':'); - if (sep == NULL) { + if (sep == nullptr) { continue; } Slice key = TrimSpace(Slice(line, sep - 1 - line)); @@ -237,13 +235,20 @@ class Benchmark { done_++; if (done_ >= next_report_) { - if (next_report_ < 1000) next_report_ += 100; - else if (next_report_ < 5000) next_report_ += 500; - else if (next_report_ < 10000) next_report_ += 1000; - else if (next_report_ < 50000) next_report_ += 5000; - else if (next_report_ < 100000) next_report_ += 10000; - else if (next_report_ < 500000) next_report_ += 50000; - else next_report_ += 100000; + if (next_report_ < 1000) + next_report_ += 100; + else if (next_report_ < 5000) + next_report_ += 500; + else if (next_report_ < 10000) + next_report_ += 1000; + else if (next_report_ < 50000) + next_report_ += 5000; + else if (next_report_ < 100000) + next_report_ += 10000; + else if (next_report_ < 500000) + next_report_ += 50000; + else + next_report_ += 100000; fprintf(stderr, "... finished %d ops%30s\r", done_, ""); fflush(stderr); } @@ -261,16 +266,14 @@ class Benchmark { snprintf(rate, sizeof(rate), "%6.1f MB/s", (bytes_ / 1048576.0) / (finish - start_)); if (!message_.empty()) { - message_ = std::string(rate) + " " + message_; + message_ = std::string(rate) + " " + message_; } else { message_ = rate; } } - fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", - name.ToString().c_str(), - (finish - start_) * 1e6 / done_, - (message_.empty() ? "" : " "), + fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(), + (finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "), message_.c_str()); if (FLAGS_histogram) { fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str()); @@ -279,21 +282,15 @@ class Benchmark { } public: - enum Order { - SEQUENTIAL, - RANDOM - }; - enum DBState { - FRESH, - EXISTING - }; + enum Order { SEQUENTIAL, RANDOM }; + enum DBState { FRESH, EXISTING }; Benchmark() - : db_(NULL), - num_(FLAGS_num), - reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads), - bytes_(0), - rand_(301) { + : db_(nullptr), + num_(FLAGS_num), + reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads), + bytes_(0), + rand_(301) { std::vector<std::string> files; std::string test_dir; Env::Default()->GetTestDirectory(&test_dir); @@ -321,12 +318,12 @@ class Benchmark { Open(false); const char* benchmarks = FLAGS_benchmarks; - while (benchmarks != NULL) { + while (benchmarks != nullptr) { const char* sep = strchr(benchmarks, ','); Slice name; - if (sep == NULL) { + if (sep == nullptr) { name = benchmarks; - benchmarks = NULL; + benchmarks = nullptr; } else { name = Slice(benchmarks, sep - benchmarks); benchmarks = sep + 1; @@ -386,8 +383,8 @@ class Benchmark { } private: - void Open(bool sync) { - assert(db_ == NULL); + void Open(bool sync) { + assert(db_ == nullptr); // Initialize db_ db_ = new kyotocabinet::TreeDB(); @@ -395,16 +392,14 @@ class Benchmark { db_num_++; std::string test_dir; Env::Default()->GetTestDirectory(&test_dir); - snprintf(file_name, sizeof(file_name), - "%s/dbbench_polyDB-%d.kct", - test_dir.c_str(), - db_num_); + snprintf(file_name, sizeof(file_name), "%s/dbbench_polyDB-%d.kct", + test_dir.c_str(), db_num_); // Create tuning options and open the database - int open_options = kyotocabinet::PolyDB::OWRITER | - kyotocabinet::PolyDB::OCREATE; - int tune_options = kyotocabinet::TreeDB::TSMALL | - kyotocabinet::TreeDB::TLINEAR; + int open_options = + kyotocabinet::PolyDB::OWRITER | kyotocabinet::PolyDB::OCREATE; + int tune_options = + kyotocabinet::TreeDB::TSMALL | kyotocabinet::TreeDB::TLINEAR; if (FLAGS_compression) { tune_options |= kyotocabinet::TreeDB::TCOMPRESS; db_->tune_compressor(&comp_); @@ -412,7 +407,7 @@ class Benchmark { db_->tune_options(tune_options); db_->tune_page_cache(FLAGS_cache_size); db_->tune_page(FLAGS_page_size); - db_->tune_map(256LL<<20); + db_->tune_map(256LL << 20); if (sync) { open_options |= kyotocabinet::PolyDB::OAUTOSYNC; } @@ -421,8 +416,8 @@ class Benchmark { } } - void Write(bool sync, Order order, DBState state, - int num_entries, int value_size, int entries_per_batch) { + void Write(bool sync, Order order, DBState state, int num_entries, + int value_size, int entries_per_batch) { // Create new database if state == FRESH if (state == FRESH) { if (FLAGS_use_existing_db) { @@ -430,7 +425,7 @@ class Benchmark { return; } delete db_; - db_ = NULL; + db_ = nullptr; Open(sync); Start(); // Do not count time taken to destroy/open } @@ -442,8 +437,7 @@ class Benchmark { } // Write to database - for (int i = 0; i < num_entries; i++) - { + for (int i = 0; i < num_entries; i++) { const int k = (order == SEQUENTIAL) ? i : (rand_.Next() % num_entries); char key[100]; snprintf(key, sizeof(key), "%016d", k); @@ -516,10 +510,10 @@ int main(int argc, char** argv) { } // Choose a location for the test database if none given with --db=<path> - if (FLAGS_db == NULL) { - leveldb::Env::Default()->GetTestDirectory(&default_db_path); - default_db_path += "/dbbench"; - FLAGS_db = default_db_path.c_str(); + if (FLAGS_db == nullptr) { + leveldb::Env::Default()->GetTestDirectory(&default_db_path); + default_db_path += "/dbbench"; + FLAGS_db = default_db_path.c_str(); } leveldb::Benchmark benchmark; diff --git a/src/leveldb/build_detect_platform b/src/leveldb/build_detect_platform deleted file mode 100755 index 4a94715900..0000000000 --- a/src/leveldb/build_detect_platform +++ /dev/null @@ -1,259 +0,0 @@ -#!/bin/sh -# -# Detects OS we're compiling on and outputs a file specified by the first -# argument, which in turn gets read while processing Makefile. -# -# The output will set the following variables: -# CC C Compiler path -# CXX C++ Compiler path -# PLATFORM_LDFLAGS Linker flags -# PLATFORM_LIBS Libraries flags -# PLATFORM_SHARED_EXT Extension for shared libraries -# PLATFORM_SHARED_LDFLAGS Flags for building shared library -# This flag is embedded just before the name -# of the shared library without intervening spaces -# PLATFORM_SHARED_CFLAGS Flags for compiling objects for shared library -# PLATFORM_CCFLAGS C compiler flags -# PLATFORM_CXXFLAGS C++ compiler flags. Will contain: -# PLATFORM_SHARED_VERSIONED Set to 'true' if platform supports versioned -# shared libraries, empty otherwise. -# -# The PLATFORM_CCFLAGS and PLATFORM_CXXFLAGS might include the following: -# -# -DLEVELDB_ATOMIC_PRESENT if <atomic> is present -# -DLEVELDB_PLATFORM_POSIX for Posix-based platforms -# -DSNAPPY if the Snappy library is present -# - -OUTPUT=$1 -PREFIX=$2 -if test -z "$OUTPUT" || test -z "$PREFIX"; then - echo "usage: $0 <output-filename> <directory_prefix>" >&2 - exit 1 -fi - -# Delete existing output, if it exists -rm -f $OUTPUT -touch $OUTPUT - -if test -z "$CC"; then - CC=cc -fi - -if test -z "$CXX"; then - CXX=g++ -fi - -if test -z "$TMPDIR"; then - TMPDIR=/tmp -fi - -# Detect OS -if test -z "$TARGET_OS"; then - TARGET_OS=`uname -s` -fi - -COMMON_FLAGS= -CROSS_COMPILE= -PLATFORM_CCFLAGS= -PLATFORM_CXXFLAGS= -PLATFORM_LDFLAGS= -PLATFORM_LIBS= -PLATFORM_SHARED_EXT="so" -PLATFORM_SHARED_LDFLAGS="-shared -Wl,-soname -Wl," -PLATFORM_SHARED_CFLAGS="-fPIC" -PLATFORM_SHARED_VERSIONED=true -PLATFORM_SSEFLAGS= - -MEMCMP_FLAG= -if [ "$CXX" = "g++" ]; then - # Use libc's memcmp instead of GCC's memcmp. This results in ~40% - # performance improvement on readrandom under gcc 4.4.3 on Linux/x86. - MEMCMP_FLAG="-fno-builtin-memcmp" -fi - -case "$TARGET_OS" in - CYGWIN_*) - PLATFORM=OS_LINUX - COMMON_FLAGS="$MEMCMP_FLAG -lpthread -DOS_LINUX -DCYGWIN" - PLATFORM_LDFLAGS="-lpthread" - PORT_FILE=port/port_posix.cc - PORT_SSE_FILE=port/port_posix_sse.cc - ;; - Darwin) - PLATFORM=OS_MACOSX - COMMON_FLAGS="$MEMCMP_FLAG -DOS_MACOSX" - PLATFORM_SHARED_EXT=dylib - [ -z "$INSTALL_PATH" ] && INSTALL_PATH=`pwd` - PLATFORM_SHARED_LDFLAGS="-dynamiclib -install_name $INSTALL_PATH/" - PORT_FILE=port/port_posix.cc - PORT_SSE_FILE=port/port_posix_sse.cc - ;; - Linux) - PLATFORM=OS_LINUX - COMMON_FLAGS="$MEMCMP_FLAG -pthread -DOS_LINUX" - PLATFORM_LDFLAGS="-pthread" - PORT_FILE=port/port_posix.cc - PORT_SSE_FILE=port/port_posix_sse.cc - ;; - SunOS) - PLATFORM=OS_SOLARIS - COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_SOLARIS" - PLATFORM_LIBS="-lpthread -lrt" - PORT_FILE=port/port_posix.cc - PORT_SSE_FILE=port/port_posix_sse.cc - ;; - FreeBSD) - PLATFORM=OS_FREEBSD - COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_FREEBSD" - PLATFORM_LIBS="-lpthread" - PORT_FILE=port/port_posix.cc - PORT_SSE_FILE=port/port_posix_sse.cc - ;; - GNU/kFreeBSD) - PLATFORM=OS_KFREEBSD - COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_KFREEBSD" - PLATFORM_LIBS="-lpthread" - PORT_FILE=port/port_posix.cc - ;; - NetBSD) - PLATFORM=OS_NETBSD - COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_NETBSD" - PLATFORM_LIBS="-lpthread -lgcc_s" - PORT_FILE=port/port_posix.cc - PORT_SSE_FILE=port/port_posix_sse.cc - ;; - OpenBSD) - PLATFORM=OS_OPENBSD - COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_OPENBSD" - PLATFORM_LDFLAGS="-pthread" - PORT_FILE=port/port_posix.cc - PORT_SSE_FILE=port/port_posix_sse.cc - ;; - DragonFly) - PLATFORM=OS_DRAGONFLYBSD - COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_DRAGONFLYBSD" - PLATFORM_LIBS="-lpthread" - PORT_FILE=port/port_posix.cc - PORT_SSE_FILE=port/port_posix_sse.cc - ;; - OS_ANDROID_CROSSCOMPILE) - PLATFORM=OS_ANDROID - COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_ANDROID -DLEVELDB_PLATFORM_POSIX" - PLATFORM_LDFLAGS="" # All pthread features are in the Android C library - PORT_FILE=port/port_posix.cc - PORT_SSE_FILE=port/port_posix_sse.cc - CROSS_COMPILE=true - ;; - HP-UX) - PLATFORM=OS_HPUX - COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_HPUX" - PLATFORM_LDFLAGS="-pthread" - PORT_FILE=port/port_posix.cc - PORT_SSE_FILE=port/port_posix_sse.cc - # man ld: +h internal_name - PLATFORM_SHARED_LDFLAGS="-shared -Wl,+h -Wl," - ;; - IOS) - PLATFORM=IOS - COMMON_FLAGS="$MEMCMP_FLAG -DOS_MACOSX" - [ -z "$INSTALL_PATH" ] && INSTALL_PATH=`pwd` - PORT_FILE=port/port_posix.cc - PORT_SSE_FILE=port/port_posix_sse.cc - PLATFORM_SHARED_EXT= - PLATFORM_SHARED_LDFLAGS= - PLATFORM_SHARED_CFLAGS= - PLATFORM_SHARED_VERSIONED= - ;; - OS_WINDOWS_CROSSCOMPILE | NATIVE_WINDOWS) - PLATFORM=OS_WINDOWS - COMMON_FLAGS="-fno-builtin-memcmp -D_REENTRANT -DOS_WINDOWS -DLEVELDB_PLATFORM_WINDOWS -DWINVER=0x0500 -D__USE_MINGW_ANSI_STDIO=1" - PLATFORM_SOURCES="util/env_win.cc" - PLATFORM_LIBS="-lshlwapi" - PORT_FILE=port/port_win.cc - CROSS_COMPILE=true - ;; - *) - echo "Unknown platform!" >&2 - exit 1 -esac - -# We want to make a list of all cc files within util, db, table, and helpers -# except for the test and benchmark files. By default, find will output a list -# of all files matching either rule, so we need to append -print to make the -# prune take effect. -DIRS="$PREFIX/db $PREFIX/util $PREFIX/table" - -set -f # temporarily disable globbing so that our patterns aren't expanded -PRUNE_TEST="-name *test*.cc -prune" -PRUNE_BENCH="-name *_bench.cc -prune" -PRUNE_TOOL="-name leveldbutil.cc -prune" -PORTABLE_FILES=`find $DIRS $PRUNE_TEST -o $PRUNE_BENCH -o $PRUNE_TOOL -o -name '*.cc' -print | sort | sed "s,^$PREFIX/,," | tr "\n" " "` - -set +f # re-enable globbing - -# The sources consist of the portable files, plus the platform-specific port -# file. -echo "SOURCES=$PORTABLE_FILES $PORT_FILE $PORT_SSE_FILE" >> $OUTPUT -echo "MEMENV_SOURCES=helpers/memenv/memenv.cc" >> $OUTPUT - -if [ "$CROSS_COMPILE" = "true" ]; then - # Cross-compiling; do not try any compilation tests. - true -else - CXXOUTPUT="${TMPDIR}/leveldb_build_detect_platform-cxx.$$" - - # If -std=c++0x works, use <atomic> as fallback for when memory barriers - # are not available. - $CXX $CXXFLAGS -std=c++0x -x c++ - -o $CXXOUTPUT 2>/dev/null <<EOF - #include <atomic> - int main() {} -EOF - if [ "$?" = 0 ]; then - COMMON_FLAGS="$COMMON_FLAGS -DLEVELDB_PLATFORM_POSIX -DLEVELDB_ATOMIC_PRESENT" - PLATFORM_CXXFLAGS="-std=c++0x" - else - COMMON_FLAGS="$COMMON_FLAGS -DLEVELDB_PLATFORM_POSIX" - fi - - # Test whether tcmalloc is available - $CXX $CXXFLAGS -x c++ - -o $CXXOUTPUT -ltcmalloc 2>/dev/null <<EOF - int main() {} -EOF - if [ "$?" = 0 ]; then - PLATFORM_LIBS="$PLATFORM_LIBS -ltcmalloc" - fi - - rm -f $CXXOUTPUT 2>/dev/null - - # Test if gcc SSE 4.2 is supported - $CXX $CXXFLAGS -x c++ - -o $CXXOUTPUT -msse4.2 2>/dev/null <<EOF - int main() {} -EOF - if [ "$?" = 0 ]; then - PLATFORM_SSEFLAGS="-msse4.2" - fi - - rm -f $CXXOUTPUT 2>/dev/null -fi - -# Use the SSE 4.2 CRC32C intrinsics iff runtime checks indicate compiler supports them. -if [ -n "$PLATFORM_SSEFLAGS" ]; then - PLATFORM_SSEFLAGS="$PLATFORM_SSEFLAGS -DLEVELDB_PLATFORM_POSIX_SSE" -fi - -PLATFORM_CCFLAGS="$PLATFORM_CCFLAGS $COMMON_FLAGS" -PLATFORM_CXXFLAGS="$PLATFORM_CXXFLAGS $COMMON_FLAGS" - -echo "CC=$CC" >> $OUTPUT -echo "CXX=$CXX" >> $OUTPUT -echo "PLATFORM=$PLATFORM" >> $OUTPUT -echo "PLATFORM_LDFLAGS=$PLATFORM_LDFLAGS" >> $OUTPUT -echo "PLATFORM_LIBS=$PLATFORM_LIBS" >> $OUTPUT -echo "PLATFORM_CCFLAGS=$PLATFORM_CCFLAGS" >> $OUTPUT -echo "PLATFORM_CXXFLAGS=$PLATFORM_CXXFLAGS" >> $OUTPUT -echo "PLATFORM_SSEFLAGS=$PLATFORM_SSEFLAGS" >> $OUTPUT -echo "PLATFORM_SHARED_CFLAGS=$PLATFORM_SHARED_CFLAGS" >> $OUTPUT -echo "PLATFORM_SHARED_EXT=$PLATFORM_SHARED_EXT" >> $OUTPUT -echo "PLATFORM_SHARED_LDFLAGS=$PLATFORM_SHARED_LDFLAGS" >> $OUTPUT -echo "PLATFORM_SHARED_VERSIONED=$PLATFORM_SHARED_VERSIONED" >> $OUTPUT diff --git a/src/leveldb/cmake/leveldbConfig.cmake b/src/leveldb/cmake/leveldbConfig.cmake new file mode 100644 index 0000000000..eea6e5c477 --- /dev/null +++ b/src/leveldb/cmake/leveldbConfig.cmake @@ -0,0 +1 @@ +include("${CMAKE_CURRENT_LIST_DIR}/leveldbTargets.cmake") diff --git a/src/leveldb/db/autocompact_test.cc b/src/leveldb/db/autocompact_test.cc index d20a2362c3..e6c97a05a6 100644 --- a/src/leveldb/db/autocompact_test.cc +++ b/src/leveldb/db/autocompact_test.cc @@ -2,9 +2,9 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#include "leveldb/db.h" #include "db/db_impl.h" #include "leveldb/cache.h" +#include "leveldb/db.h" #include "util/testharness.h" #include "util/testutil.h" @@ -12,11 +12,6 @@ namespace leveldb { class AutoCompactTest { public: - std::string dbname_; - Cache* tiny_cache_; - Options options_; - DB* db_; - AutoCompactTest() { dbname_ = test::TmpDir() + "/autocompact_test"; tiny_cache_ = NewLRUCache(100); @@ -47,6 +42,12 @@ class AutoCompactTest { } void DoReads(int n); + + private: + std::string dbname_; + Cache* tiny_cache_; + Options options_; + DB* db_; }; static const int kValueSize = 200 * 1024; @@ -81,17 +82,16 @@ void AutoCompactTest::DoReads(int n) { ASSERT_LT(read, 100) << "Taking too long to compact"; Iterator* iter = db_->NewIterator(ReadOptions()); for (iter->SeekToFirst(); - iter->Valid() && iter->key().ToString() < limit_key; - iter->Next()) { + iter->Valid() && iter->key().ToString() < limit_key; iter->Next()) { // Drop data } delete iter; // Wait a little bit to allow any triggered compactions to complete. Env::Default()->SleepForMicroseconds(1000000); uint64_t size = Size(Key(0), Key(n)); - fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n", - read+1, size/1048576.0, Size(Key(n), Key(kCount))/1048576.0); - if (size <= initial_size/10) { + fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n", read + 1, + size / 1048576.0, Size(Key(n), Key(kCount)) / 1048576.0); + if (size <= initial_size / 10) { break; } } @@ -100,19 +100,13 @@ void AutoCompactTest::DoReads(int n) { // is pretty much unchanged. const int64_t final_other_size = Size(Key(n), Key(kCount)); ASSERT_LE(final_other_size, initial_other_size + 1048576); - ASSERT_GE(final_other_size, initial_other_size/5 - 1048576); + ASSERT_GE(final_other_size, initial_other_size / 5 - 1048576); } -TEST(AutoCompactTest, ReadAll) { - DoReads(kCount); -} +TEST(AutoCompactTest, ReadAll) { DoReads(kCount); } -TEST(AutoCompactTest, ReadHalf) { - DoReads(kCount/2); -} +TEST(AutoCompactTest, ReadHalf) { DoReads(kCount / 2); } } // namespace leveldb -int main(int argc, char** argv) { - return leveldb::test::RunAllTests(); -} +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/db/builder.cc b/src/leveldb/db/builder.cc index f419882197..9520ee4535 100644 --- a/src/leveldb/db/builder.cc +++ b/src/leveldb/db/builder.cc @@ -4,8 +4,8 @@ #include "db/builder.h" -#include "db/filename.h" #include "db/dbformat.h" +#include "db/filename.h" #include "db/table_cache.h" #include "db/version_edit.h" #include "leveldb/db.h" @@ -14,12 +14,8 @@ namespace leveldb { -Status BuildTable(const std::string& dbname, - Env* env, - const Options& options, - TableCache* table_cache, - Iterator* iter, - FileMetaData* meta) { +Status BuildTable(const std::string& dbname, Env* env, const Options& options, + TableCache* table_cache, Iterator* iter, FileMetaData* meta) { Status s; meta->file_size = 0; iter->SeekToFirst(); @@ -41,14 +37,10 @@ Status BuildTable(const std::string& dbname, } // Finish and check for builder errors + s = builder->Finish(); if (s.ok()) { - s = builder->Finish(); - if (s.ok()) { - meta->file_size = builder->FileSize(); - assert(meta->file_size > 0); - } - } else { - builder->Abandon(); + meta->file_size = builder->FileSize(); + assert(meta->file_size > 0); } delete builder; @@ -60,12 +52,11 @@ Status BuildTable(const std::string& dbname, s = file->Close(); } delete file; - file = NULL; + file = nullptr; if (s.ok()) { // Verify that the table is usable - Iterator* it = table_cache->NewIterator(ReadOptions(), - meta->number, + Iterator* it = table_cache->NewIterator(ReadOptions(), meta->number, meta->file_size); s = it->status(); delete it; diff --git a/src/leveldb/db/builder.h b/src/leveldb/db/builder.h index 62431fcf44..7bd0b8049b 100644 --- a/src/leveldb/db/builder.h +++ b/src/leveldb/db/builder.h @@ -22,12 +22,8 @@ class VersionEdit; // *meta will be filled with metadata about the generated table. // If no data is present in *iter, meta->file_size will be set to // zero, and no Table file will be produced. -extern Status BuildTable(const std::string& dbname, - Env* env, - const Options& options, - TableCache* table_cache, - Iterator* iter, - FileMetaData* meta); +Status BuildTable(const std::string& dbname, Env* env, const Options& options, + TableCache* table_cache, Iterator* iter, FileMetaData* meta); } // namespace leveldb diff --git a/src/leveldb/db/c.cc b/src/leveldb/db/c.cc index b23e3dcc9d..3a492f9ac5 100644 --- a/src/leveldb/db/c.cc +++ b/src/leveldb/db/c.cc @@ -4,10 +4,9 @@ #include "leveldb/c.h" -#include <stdlib.h> -#ifndef WIN32 -#include <unistd.h> -#endif +#include <cstdint> +#include <cstdlib> + #include "leveldb/cache.h" #include "leveldb/comparator.h" #include "leveldb/db.h" @@ -45,69 +44,72 @@ using leveldb::WriteOptions; extern "C" { -struct leveldb_t { DB* rep; }; -struct leveldb_iterator_t { Iterator* rep; }; -struct leveldb_writebatch_t { WriteBatch rep; }; -struct leveldb_snapshot_t { const Snapshot* rep; }; -struct leveldb_readoptions_t { ReadOptions rep; }; -struct leveldb_writeoptions_t { WriteOptions rep; }; -struct leveldb_options_t { Options rep; }; -struct leveldb_cache_t { Cache* rep; }; -struct leveldb_seqfile_t { SequentialFile* rep; }; -struct leveldb_randomfile_t { RandomAccessFile* rep; }; -struct leveldb_writablefile_t { WritableFile* rep; }; -struct leveldb_logger_t { Logger* rep; }; -struct leveldb_filelock_t { FileLock* rep; }; +struct leveldb_t { + DB* rep; +}; +struct leveldb_iterator_t { + Iterator* rep; +}; +struct leveldb_writebatch_t { + WriteBatch rep; +}; +struct leveldb_snapshot_t { + const Snapshot* rep; +}; +struct leveldb_readoptions_t { + ReadOptions rep; +}; +struct leveldb_writeoptions_t { + WriteOptions rep; +}; +struct leveldb_options_t { + Options rep; +}; +struct leveldb_cache_t { + Cache* rep; +}; +struct leveldb_seqfile_t { + SequentialFile* rep; +}; +struct leveldb_randomfile_t { + RandomAccessFile* rep; +}; +struct leveldb_writablefile_t { + WritableFile* rep; +}; +struct leveldb_logger_t { + Logger* rep; +}; +struct leveldb_filelock_t { + FileLock* rep; +}; struct leveldb_comparator_t : public Comparator { - void* state_; - void (*destructor_)(void*); - int (*compare_)( - void*, - const char* a, size_t alen, - const char* b, size_t blen); - const char* (*name_)(void*); + ~leveldb_comparator_t() override { (*destructor_)(state_); } - virtual ~leveldb_comparator_t() { - (*destructor_)(state_); - } - - virtual int Compare(const Slice& a, const Slice& b) const { + int Compare(const Slice& a, const Slice& b) const override { return (*compare_)(state_, a.data(), a.size(), b.data(), b.size()); } - virtual const char* Name() const { - return (*name_)(state_); - } + const char* Name() const override { return (*name_)(state_); } // No-ops since the C binding does not support key shortening methods. - virtual void FindShortestSeparator(std::string*, const Slice&) const { } - virtual void FindShortSuccessor(std::string* key) const { } -}; + void FindShortestSeparator(std::string*, const Slice&) const override {} + void FindShortSuccessor(std::string* key) const override {} -struct leveldb_filterpolicy_t : public FilterPolicy { void* state_; void (*destructor_)(void*); + int (*compare_)(void*, const char* a, size_t alen, const char* b, + size_t blen); const char* (*name_)(void*); - char* (*create_)( - void*, - const char* const* key_array, const size_t* key_length_array, - int num_keys, - size_t* filter_length); - unsigned char (*key_match_)( - void*, - const char* key, size_t length, - const char* filter, size_t filter_length); - - virtual ~leveldb_filterpolicy_t() { - (*destructor_)(state_); - } +}; - virtual const char* Name() const { - return (*name_)(state_); - } +struct leveldb_filterpolicy_t : public FilterPolicy { + ~leveldb_filterpolicy_t() override { (*destructor_)(state_); } - virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const { + const char* Name() const override { return (*name_)(state_); } + + void CreateFilter(const Slice* keys, int n, std::string* dst) const override { std::vector<const char*> key_pointers(n); std::vector<size_t> key_sizes(n); for (int i = 0; i < n; i++) { @@ -120,10 +122,19 @@ struct leveldb_filterpolicy_t : public FilterPolicy { free(filter); } - virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const { - return (*key_match_)(state_, key.data(), key.size(), - filter.data(), filter.size()); + bool KeyMayMatch(const Slice& key, const Slice& filter) const override { + return (*key_match_)(state_, key.data(), key.size(), filter.data(), + filter.size()); } + + void* state_; + void (*destructor_)(void*); + const char* (*name_)(void*); + char* (*create_)(void*, const char* const* key_array, + const size_t* key_length_array, int num_keys, + size_t* filter_length); + uint8_t (*key_match_)(void*, const char* key, size_t length, + const char* filter, size_t filter_length); }; struct leveldb_env_t { @@ -132,10 +143,10 @@ struct leveldb_env_t { }; static bool SaveError(char** errptr, const Status& s) { - assert(errptr != NULL); + assert(errptr != nullptr); if (s.ok()) { return false; - } else if (*errptr == NULL) { + } else if (*errptr == nullptr) { *errptr = strdup(s.ToString().c_str()); } else { // TODO(sanjay): Merge with existing error? @@ -151,13 +162,11 @@ static char* CopyString(const std::string& str) { return result; } -leveldb_t* leveldb_open( - const leveldb_options_t* options, - const char* name, - char** errptr) { +leveldb_t* leveldb_open(const leveldb_options_t* options, const char* name, + char** errptr) { DB* db; if (SaveError(errptr, DB::Open(options->rep, std::string(name), &db))) { - return NULL; + return nullptr; } leveldb_t* result = new leveldb_t; result->rep = db; @@ -169,40 +178,27 @@ void leveldb_close(leveldb_t* db) { delete db; } -void leveldb_put( - leveldb_t* db, - const leveldb_writeoptions_t* options, - const char* key, size_t keylen, - const char* val, size_t vallen, - char** errptr) { +void leveldb_put(leveldb_t* db, const leveldb_writeoptions_t* options, + const char* key, size_t keylen, const char* val, size_t vallen, + char** errptr) { SaveError(errptr, db->rep->Put(options->rep, Slice(key, keylen), Slice(val, vallen))); } -void leveldb_delete( - leveldb_t* db, - const leveldb_writeoptions_t* options, - const char* key, size_t keylen, - char** errptr) { +void leveldb_delete(leveldb_t* db, const leveldb_writeoptions_t* options, + const char* key, size_t keylen, char** errptr) { SaveError(errptr, db->rep->Delete(options->rep, Slice(key, keylen))); } - -void leveldb_write( - leveldb_t* db, - const leveldb_writeoptions_t* options, - leveldb_writebatch_t* batch, - char** errptr) { +void leveldb_write(leveldb_t* db, const leveldb_writeoptions_t* options, + leveldb_writebatch_t* batch, char** errptr) { SaveError(errptr, db->rep->Write(options->rep, &batch->rep)); } -char* leveldb_get( - leveldb_t* db, - const leveldb_readoptions_t* options, - const char* key, size_t keylen, - size_t* vallen, - char** errptr) { - char* result = NULL; +char* leveldb_get(leveldb_t* db, const leveldb_readoptions_t* options, + const char* key, size_t keylen, size_t* vallen, + char** errptr) { + char* result = nullptr; std::string tmp; Status s = db->rep->Get(options->rep, Slice(key, keylen), &tmp); if (s.ok()) { @@ -218,45 +214,40 @@ char* leveldb_get( } leveldb_iterator_t* leveldb_create_iterator( - leveldb_t* db, - const leveldb_readoptions_t* options) { + leveldb_t* db, const leveldb_readoptions_t* options) { leveldb_iterator_t* result = new leveldb_iterator_t; result->rep = db->rep->NewIterator(options->rep); return result; } -const leveldb_snapshot_t* leveldb_create_snapshot( - leveldb_t* db) { +const leveldb_snapshot_t* leveldb_create_snapshot(leveldb_t* db) { leveldb_snapshot_t* result = new leveldb_snapshot_t; result->rep = db->rep->GetSnapshot(); return result; } -void leveldb_release_snapshot( - leveldb_t* db, - const leveldb_snapshot_t* snapshot) { +void leveldb_release_snapshot(leveldb_t* db, + const leveldb_snapshot_t* snapshot) { db->rep->ReleaseSnapshot(snapshot->rep); delete snapshot; } -char* leveldb_property_value( - leveldb_t* db, - const char* propname) { +char* leveldb_property_value(leveldb_t* db, const char* propname) { std::string tmp; if (db->rep->GetProperty(Slice(propname), &tmp)) { // We use strdup() since we expect human readable output. return strdup(tmp.c_str()); } else { - return NULL; + return nullptr; } } -void leveldb_approximate_sizes( - leveldb_t* db, - int num_ranges, - const char* const* range_start_key, const size_t* range_start_key_len, - const char* const* range_limit_key, const size_t* range_limit_key_len, - uint64_t* sizes) { +void leveldb_approximate_sizes(leveldb_t* db, int num_ranges, + const char* const* range_start_key, + const size_t* range_start_key_len, + const char* const* range_limit_key, + const size_t* range_limit_key_len, + uint64_t* sizes) { Range* ranges = new Range[num_ranges]; for (int i = 0; i < num_ranges; i++) { ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]); @@ -266,28 +257,23 @@ void leveldb_approximate_sizes( delete[] ranges; } -void leveldb_compact_range( - leveldb_t* db, - const char* start_key, size_t start_key_len, - const char* limit_key, size_t limit_key_len) { +void leveldb_compact_range(leveldb_t* db, const char* start_key, + size_t start_key_len, const char* limit_key, + size_t limit_key_len) { Slice a, b; db->rep->CompactRange( - // Pass NULL Slice if corresponding "const char*" is NULL - (start_key ? (a = Slice(start_key, start_key_len), &a) : NULL), - (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : NULL)); + // Pass null Slice if corresponding "const char*" is null + (start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr), + (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr)); } -void leveldb_destroy_db( - const leveldb_options_t* options, - const char* name, - char** errptr) { +void leveldb_destroy_db(const leveldb_options_t* options, const char* name, + char** errptr) { SaveError(errptr, DestroyDB(name, options->rep)); } -void leveldb_repair_db( - const leveldb_options_t* options, - const char* name, - char** errptr) { +void leveldb_repair_db(const leveldb_options_t* options, const char* name, + char** errptr) { SaveError(errptr, RepairDB(name, options->rep)); } @@ -296,7 +282,7 @@ void leveldb_iter_destroy(leveldb_iterator_t* iter) { delete iter; } -unsigned char leveldb_iter_valid(const leveldb_iterator_t* iter) { +uint8_t leveldb_iter_valid(const leveldb_iterator_t* iter) { return iter->rep->Valid(); } @@ -312,13 +298,9 @@ void leveldb_iter_seek(leveldb_iterator_t* iter, const char* k, size_t klen) { iter->rep->Seek(Slice(k, klen)); } -void leveldb_iter_next(leveldb_iterator_t* iter) { - iter->rep->Next(); -} +void leveldb_iter_next(leveldb_iterator_t* iter) { iter->rep->Next(); } -void leveldb_iter_prev(leveldb_iterator_t* iter) { - iter->rep->Prev(); -} +void leveldb_iter_prev(leveldb_iterator_t* iter) { iter->rep->Prev(); } const char* leveldb_iter_key(const leveldb_iterator_t* iter, size_t* klen) { Slice s = iter->rep->key(); @@ -340,41 +322,34 @@ leveldb_writebatch_t* leveldb_writebatch_create() { return new leveldb_writebatch_t; } -void leveldb_writebatch_destroy(leveldb_writebatch_t* b) { - delete b; -} +void leveldb_writebatch_destroy(leveldb_writebatch_t* b) { delete b; } -void leveldb_writebatch_clear(leveldb_writebatch_t* b) { - b->rep.Clear(); -} +void leveldb_writebatch_clear(leveldb_writebatch_t* b) { b->rep.Clear(); } -void leveldb_writebatch_put( - leveldb_writebatch_t* b, - const char* key, size_t klen, - const char* val, size_t vlen) { +void leveldb_writebatch_put(leveldb_writebatch_t* b, const char* key, + size_t klen, const char* val, size_t vlen) { b->rep.Put(Slice(key, klen), Slice(val, vlen)); } -void leveldb_writebatch_delete( - leveldb_writebatch_t* b, - const char* key, size_t klen) { +void leveldb_writebatch_delete(leveldb_writebatch_t* b, const char* key, + size_t klen) { b->rep.Delete(Slice(key, klen)); } -void leveldb_writebatch_iterate( - leveldb_writebatch_t* b, - void* state, - void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen), - void (*deleted)(void*, const char* k, size_t klen)) { +void leveldb_writebatch_iterate(const leveldb_writebatch_t* b, void* state, + void (*put)(void*, const char* k, size_t klen, + const char* v, size_t vlen), + void (*deleted)(void*, const char* k, + size_t klen)) { class H : public WriteBatch::Handler { public: void* state_; void (*put_)(void*, const char* k, size_t klen, const char* v, size_t vlen); void (*deleted_)(void*, const char* k, size_t klen); - virtual void Put(const Slice& key, const Slice& value) { + void Put(const Slice& key, const Slice& value) override { (*put_)(state_, key.data(), key.size(), value.data(), value.size()); } - virtual void Delete(const Slice& key) { + void Delete(const Slice& key) override { (*deleted_)(state_, key.data(), key.size()); } }; @@ -385,47 +360,43 @@ void leveldb_writebatch_iterate( b->rep.Iterate(&handler); } -leveldb_options_t* leveldb_options_create() { - return new leveldb_options_t; +void leveldb_writebatch_append(leveldb_writebatch_t* destination, + const leveldb_writebatch_t* source) { + destination->rep.Append(source->rep); } -void leveldb_options_destroy(leveldb_options_t* options) { - delete options; -} +leveldb_options_t* leveldb_options_create() { return new leveldb_options_t; } + +void leveldb_options_destroy(leveldb_options_t* options) { delete options; } -void leveldb_options_set_comparator( - leveldb_options_t* opt, - leveldb_comparator_t* cmp) { +void leveldb_options_set_comparator(leveldb_options_t* opt, + leveldb_comparator_t* cmp) { opt->rep.comparator = cmp; } -void leveldb_options_set_filter_policy( - leveldb_options_t* opt, - leveldb_filterpolicy_t* policy) { +void leveldb_options_set_filter_policy(leveldb_options_t* opt, + leveldb_filterpolicy_t* policy) { opt->rep.filter_policy = policy; } -void leveldb_options_set_create_if_missing( - leveldb_options_t* opt, unsigned char v) { +void leveldb_options_set_create_if_missing(leveldb_options_t* opt, uint8_t v) { opt->rep.create_if_missing = v; } -void leveldb_options_set_error_if_exists( - leveldb_options_t* opt, unsigned char v) { +void leveldb_options_set_error_if_exists(leveldb_options_t* opt, uint8_t v) { opt->rep.error_if_exists = v; } -void leveldb_options_set_paranoid_checks( - leveldb_options_t* opt, unsigned char v) { +void leveldb_options_set_paranoid_checks(leveldb_options_t* opt, uint8_t v) { opt->rep.paranoid_checks = v; } void leveldb_options_set_env(leveldb_options_t* opt, leveldb_env_t* env) { - opt->rep.env = (env ? env->rep : NULL); + opt->rep.env = (env ? env->rep : nullptr); } void leveldb_options_set_info_log(leveldb_options_t* opt, leveldb_logger_t* l) { - opt->rep.info_log = (l ? l->rep : NULL); + opt->rep.info_log = (l ? l->rep : nullptr); } void leveldb_options_set_write_buffer_size(leveldb_options_t* opt, size_t s) { @@ -448,17 +419,18 @@ void leveldb_options_set_block_restart_interval(leveldb_options_t* opt, int n) { opt->rep.block_restart_interval = n; } +void leveldb_options_set_max_file_size(leveldb_options_t* opt, size_t s) { + opt->rep.max_file_size = s; +} + void leveldb_options_set_compression(leveldb_options_t* opt, int t) { opt->rep.compression = static_cast<CompressionType>(t); } leveldb_comparator_t* leveldb_comparator_create( - void* state, - void (*destructor)(void*), - int (*compare)( - void*, - const char* a, size_t alen, - const char* b, size_t blen), + void* state, void (*destructor)(void*), + int (*compare)(void*, const char* a, size_t alen, const char* b, + size_t blen), const char* (*name)(void*)) { leveldb_comparator_t* result = new leveldb_comparator_t; result->state_ = state; @@ -468,22 +440,15 @@ leveldb_comparator_t* leveldb_comparator_create( return result; } -void leveldb_comparator_destroy(leveldb_comparator_t* cmp) { - delete cmp; -} +void leveldb_comparator_destroy(leveldb_comparator_t* cmp) { delete cmp; } leveldb_filterpolicy_t* leveldb_filterpolicy_create( - void* state, - void (*destructor)(void*), - char* (*create_filter)( - void*, - const char* const* key_array, const size_t* key_length_array, - int num_keys, - size_t* filter_length), - unsigned char (*key_may_match)( - void*, - const char* key, size_t length, - const char* filter, size_t filter_length), + void* state, void (*destructor)(void*), + char* (*create_filter)(void*, const char* const* key_array, + const size_t* key_length_array, int num_keys, + size_t* filter_length), + uint8_t (*key_may_match)(void*, const char* key, size_t length, + const char* filter, size_t filter_length), const char* (*name)(void*)) { leveldb_filterpolicy_t* result = new leveldb_filterpolicy_t; result->state_ = state; @@ -503,7 +468,8 @@ leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(int bits_per_key) { // they delegate to a NewBloomFilterPolicy() instead of user // supplied C functions. struct Wrapper : public leveldb_filterpolicy_t { - const FilterPolicy* rep_; + static void DoNothing(void*) {} + ~Wrapper() { delete rep_; } const char* Name() const { return rep_->Name(); } void CreateFilter(const Slice* keys, int n, std::string* dst) const { @@ -512,11 +478,12 @@ leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(int bits_per_key) { bool KeyMayMatch(const Slice& key, const Slice& filter) const { return rep_->KeyMayMatch(key, filter); } - static void DoNothing(void*) { } + + const FilterPolicy* rep_; }; Wrapper* wrapper = new Wrapper; wrapper->rep_ = NewBloomFilterPolicy(bits_per_key); - wrapper->state_ = NULL; + wrapper->state_ = nullptr; wrapper->destructor_ = &Wrapper::DoNothing; return wrapper; } @@ -525,37 +492,29 @@ leveldb_readoptions_t* leveldb_readoptions_create() { return new leveldb_readoptions_t; } -void leveldb_readoptions_destroy(leveldb_readoptions_t* opt) { - delete opt; -} +void leveldb_readoptions_destroy(leveldb_readoptions_t* opt) { delete opt; } -void leveldb_readoptions_set_verify_checksums( - leveldb_readoptions_t* opt, - unsigned char v) { +void leveldb_readoptions_set_verify_checksums(leveldb_readoptions_t* opt, + uint8_t v) { opt->rep.verify_checksums = v; } -void leveldb_readoptions_set_fill_cache( - leveldb_readoptions_t* opt, unsigned char v) { +void leveldb_readoptions_set_fill_cache(leveldb_readoptions_t* opt, uint8_t v) { opt->rep.fill_cache = v; } -void leveldb_readoptions_set_snapshot( - leveldb_readoptions_t* opt, - const leveldb_snapshot_t* snap) { - opt->rep.snapshot = (snap ? snap->rep : NULL); +void leveldb_readoptions_set_snapshot(leveldb_readoptions_t* opt, + const leveldb_snapshot_t* snap) { + opt->rep.snapshot = (snap ? snap->rep : nullptr); } leveldb_writeoptions_t* leveldb_writeoptions_create() { return new leveldb_writeoptions_t; } -void leveldb_writeoptions_destroy(leveldb_writeoptions_t* opt) { - delete opt; -} +void leveldb_writeoptions_destroy(leveldb_writeoptions_t* opt) { delete opt; } -void leveldb_writeoptions_set_sync( - leveldb_writeoptions_t* opt, unsigned char v) { +void leveldb_writeoptions_set_sync(leveldb_writeoptions_t* opt, uint8_t v) { opt->rep.sync = v; } @@ -582,16 +541,22 @@ void leveldb_env_destroy(leveldb_env_t* env) { delete env; } -void leveldb_free(void* ptr) { - free(ptr); -} +char* leveldb_env_get_test_directory(leveldb_env_t* env) { + std::string result; + if (!env->rep->GetTestDirectory(&result).ok()) { + return nullptr; + } -int leveldb_major_version() { - return kMajorVersion; + char* buffer = static_cast<char*>(malloc(result.size() + 1)); + memcpy(buffer, result.data(), result.size()); + buffer[result.size()] = '\0'; + return buffer; } -int leveldb_minor_version() { - return kMinorVersion; -} +void leveldb_free(void* ptr) { free(ptr); } + +int leveldb_major_version() { return kMajorVersion; } + +int leveldb_minor_version() { return kMinorVersion; } } // end extern "C" diff --git a/src/leveldb/db/c_test.c b/src/leveldb/db/c_test.c index 7cd5ee0207..16c77eed6a 100644 --- a/src/leveldb/db/c_test.c +++ b/src/leveldb/db/c_test.c @@ -8,24 +8,14 @@ #include <stdio.h> #include <stdlib.h> #include <string.h> -#include <sys/types.h> -#include <unistd.h> const char* phase = ""; -static char dbname[200]; static void StartPhase(const char* name) { fprintf(stderr, "=== Test %s\n", name); phase = name; } -static const char* GetTempDir(void) { - const char* ret = getenv("TEST_TMPDIR"); - if (ret == NULL || ret[0] == '\0') - ret = "/tmp"; - return ret; -} - #define CheckNoError(err) \ if ((err) != NULL) { \ fprintf(stderr, "%s:%d: %s: %s\n", __FILE__, __LINE__, phase, (err)); \ @@ -130,7 +120,7 @@ static const char* CmpName(void* arg) { } // Custom filter policy -static unsigned char fake_filter_result = 1; +static uint8_t fake_filter_result = 1; static void FilterDestroy(void* arg) { } static const char* FilterName(void* arg) { return "TestFilter"; @@ -145,10 +135,8 @@ static char* FilterCreate( memcpy(result, "fake", 4); return result; } -unsigned char FilterKeyMatch( - void* arg, - const char* key, size_t length, - const char* filter, size_t filter_length) { +uint8_t FilterKeyMatch(void* arg, const char* key, size_t length, + const char* filter, size_t filter_length) { CheckCondition(filter_length == 4); CheckCondition(memcmp(filter, "fake", 4) == 0); return fake_filter_result; @@ -162,21 +150,19 @@ int main(int argc, char** argv) { leveldb_options_t* options; leveldb_readoptions_t* roptions; leveldb_writeoptions_t* woptions; + char* dbname; char* err = NULL; int run = -1; CheckCondition(leveldb_major_version() >= 1); CheckCondition(leveldb_minor_version() >= 1); - snprintf(dbname, sizeof(dbname), - "%s/leveldb_c_test-%d", - GetTempDir(), - ((int) geteuid())); - StartPhase("create_objects"); cmp = leveldb_comparator_create(NULL, CmpDestroy, CmpCompare, CmpName); env = leveldb_create_default_env(); cache = leveldb_cache_create_lru(100000); + dbname = leveldb_env_get_test_directory(env); + CheckCondition(dbname != NULL); options = leveldb_options_create(); leveldb_options_set_comparator(options, cmp); @@ -189,6 +175,7 @@ int main(int argc, char** argv) { leveldb_options_set_max_open_files(options, 10); leveldb_options_set_block_size(options, 1024); leveldb_options_set_block_restart_interval(options, 8); + leveldb_options_set_max_file_size(options, 3 << 20); leveldb_options_set_compression(options, leveldb_no_compression); roptions = leveldb_readoptions_create(); @@ -239,12 +226,18 @@ int main(int argc, char** argv) { leveldb_writebatch_clear(wb); leveldb_writebatch_put(wb, "bar", 3, "b", 1); leveldb_writebatch_put(wb, "box", 3, "c", 1); - leveldb_writebatch_delete(wb, "bar", 3); + + leveldb_writebatch_t* wb2 = leveldb_writebatch_create(); + leveldb_writebatch_delete(wb2, "bar", 3); + leveldb_writebatch_append(wb, wb2); + leveldb_writebatch_destroy(wb2); + leveldb_write(db, woptions, wb, &err); CheckNoError(err); CheckGet(db, roptions, "foo", "hello"); CheckGet(db, roptions, "bar", NULL); CheckGet(db, roptions, "box", "c"); + int pos = 0; leveldb_writebatch_iterate(wb, &pos, CheckPut, CheckDel); CheckCondition(pos == 3); @@ -381,6 +374,7 @@ int main(int argc, char** argv) { leveldb_options_destroy(options); leveldb_readoptions_destroy(roptions); leveldb_writeoptions_destroy(woptions); + leveldb_free(dbname); leveldb_cache_destroy(cache); leveldb_comparator_destroy(cmp); leveldb_env_destroy(env); diff --git a/src/leveldb/db/corruption_test.cc b/src/leveldb/db/corruption_test.cc index 37a484d25f..42f5237c65 100644 --- a/src/leveldb/db/corruption_test.cc +++ b/src/leveldb/db/corruption_test.cc @@ -2,20 +2,16 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#include "leveldb/db.h" - -#include <errno.h> -#include <fcntl.h> -#include <sys/stat.h> #include <sys/types.h> -#include "leveldb/cache.h" -#include "leveldb/env.h" -#include "leveldb/table.h" -#include "leveldb/write_batch.h" + #include "db/db_impl.h" #include "db/filename.h" #include "db/log_format.h" #include "db/version_set.h" +#include "leveldb/cache.h" +#include "leveldb/db.h" +#include "leveldb/table.h" +#include "leveldb/write_batch.h" #include "util/logging.h" #include "util/testharness.h" #include "util/testutil.h" @@ -26,44 +22,35 @@ static const int kValueSize = 1000; class CorruptionTest { public: - test::ErrorEnv env_; - std::string dbname_; - Cache* tiny_cache_; - Options options_; - DB* db_; - - CorruptionTest() { - tiny_cache_ = NewLRUCache(100); + CorruptionTest() + : db_(nullptr), + dbname_("/memenv/corruption_test"), + tiny_cache_(NewLRUCache(100)) { options_.env = &env_; options_.block_cache = tiny_cache_; - dbname_ = test::TmpDir() + "/corruption_test"; DestroyDB(dbname_, options_); - db_ = NULL; options_.create_if_missing = true; Reopen(); options_.create_if_missing = false; } ~CorruptionTest() { - delete db_; - DestroyDB(dbname_, Options()); - delete tiny_cache_; + delete db_; + delete tiny_cache_; } Status TryReopen() { delete db_; - db_ = NULL; + db_ = nullptr; return DB::Open(options_, dbname_, &db_); } - void Reopen() { - ASSERT_OK(TryReopen()); - } + void Reopen() { ASSERT_OK(TryReopen()); } void RepairDB() { delete db_; - db_ = NULL; + db_ = nullptr; ASSERT_OK(::leveldb::RepairDB(dbname_, options_)); } @@ -71,7 +58,7 @@ class CorruptionTest { std::string key_space, value_space; WriteBatch batch; for (int i = 0; i < n; i++) { - //if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n); + // if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n); Slice key = Key(i, &key_space); batch.Clear(); batch.Put(key, Value(i, &value_space)); @@ -100,8 +87,7 @@ class CorruptionTest { // Ignore boundary keys. continue; } - if (!ConsumeDecimalNumber(&in, &key) || - !in.empty() || + if (!ConsumeDecimalNumber(&in, &key) || !in.empty() || key < next_expected) { bad_keys++; continue; @@ -126,14 +112,13 @@ class CorruptionTest { void Corrupt(FileType filetype, int offset, int bytes_to_corrupt) { // Pick file to corrupt std::vector<std::string> filenames; - ASSERT_OK(env_.GetChildren(dbname_, &filenames)); + ASSERT_OK(env_.target()->GetChildren(dbname_, &filenames)); uint64_t number; FileType type; std::string fname; int picked_number = -1; for (size_t i = 0; i < filenames.size(); i++) { - if (ParseFileName(filenames[i], &number, &type) && - type == filetype && + if (ParseFileName(filenames[i], &number, &type) && type == filetype && int(number) > picked_number) { // Pick latest file fname = dbname_ + "/" + filenames[i]; picked_number = number; @@ -141,35 +126,32 @@ class CorruptionTest { } ASSERT_TRUE(!fname.empty()) << filetype; - struct stat sbuf; - if (stat(fname.c_str(), &sbuf) != 0) { - const char* msg = strerror(errno); - ASSERT_TRUE(false) << fname << ": " << msg; - } + uint64_t file_size; + ASSERT_OK(env_.target()->GetFileSize(fname, &file_size)); if (offset < 0) { // Relative to end of file; make it absolute - if (-offset > sbuf.st_size) { + if (-offset > file_size) { offset = 0; } else { - offset = sbuf.st_size + offset; + offset = file_size + offset; } } - if (offset > sbuf.st_size) { - offset = sbuf.st_size; + if (offset > file_size) { + offset = file_size; } - if (offset + bytes_to_corrupt > sbuf.st_size) { - bytes_to_corrupt = sbuf.st_size - offset; + if (offset + bytes_to_corrupt > file_size) { + bytes_to_corrupt = file_size - offset; } // Do it std::string contents; - Status s = ReadFileToString(Env::Default(), fname, &contents); + Status s = ReadFileToString(env_.target(), fname, &contents); ASSERT_TRUE(s.ok()) << s.ToString(); for (int i = 0; i < bytes_to_corrupt; i++) { contents[i + offset] ^= 0x80; } - s = WriteStringToFile(Env::Default(), contents, fname); + s = WriteStringToFile(env_.target(), contents, fname); ASSERT_TRUE(s.ok()) << s.ToString(); } @@ -197,12 +179,20 @@ class CorruptionTest { Random r(k); return test::RandomString(&r, kValueSize, storage); } + + test::ErrorEnv env_; + Options options_; + DB* db_; + + private: + std::string dbname_; + Cache* tiny_cache_; }; TEST(CorruptionTest, Recovery) { Build(100); Check(100, 100); - Corrupt(kLogFile, 19, 1); // WriteBatch tag for first record + Corrupt(kLogFile, 19, 1); // WriteBatch tag for first record Corrupt(kLogFile, log::kBlockSize + 1000, 1); // Somewhere in second block Reopen(); @@ -237,8 +227,8 @@ TEST(CorruptionTest, TableFile) { Build(100); DBImpl* dbi = reinterpret_cast<DBImpl*>(db_); dbi->TEST_CompactMemTable(); - dbi->TEST_CompactRange(0, NULL, NULL); - dbi->TEST_CompactRange(1, NULL, NULL); + dbi->TEST_CompactRange(0, nullptr, nullptr); + dbi->TEST_CompactRange(1, nullptr, nullptr); Corrupt(kTableFile, 100, 1); Check(90, 99); @@ -251,8 +241,8 @@ TEST(CorruptionTest, TableFileRepair) { Build(100); DBImpl* dbi = reinterpret_cast<DBImpl*>(db_); dbi->TEST_CompactMemTable(); - dbi->TEST_CompactRange(0, NULL, NULL); - dbi->TEST_CompactRange(1, NULL, NULL); + dbi->TEST_CompactRange(0, nullptr, nullptr); + dbi->TEST_CompactRange(1, nullptr, nullptr); Corrupt(kTableFile, 100, 1); RepairDB(); @@ -302,7 +292,7 @@ TEST(CorruptionTest, CorruptedDescriptor) { ASSERT_OK(db_->Put(WriteOptions(), "foo", "hello")); DBImpl* dbi = reinterpret_cast<DBImpl*>(db_); dbi->TEST_CompactMemTable(); - dbi->TEST_CompactRange(0, NULL, NULL); + dbi->TEST_CompactRange(0, nullptr, nullptr); Corrupt(kDescriptorFile, 0, 1000); Status s = TryReopen(); @@ -343,7 +333,7 @@ TEST(CorruptionTest, CompactionInputErrorParanoid) { Corrupt(kTableFile, 100, 1); env_.SleepForMicroseconds(100000); } - dbi->CompactRange(NULL, NULL); + dbi->CompactRange(nullptr, nullptr); // Write must fail because of corrupted table std::string tmp1, tmp2; @@ -369,6 +359,4 @@ TEST(CorruptionTest, UnrelatedKeys) { } // namespace leveldb -int main(int argc, char** argv) { - return leveldb::test::RunAllTests(); -} +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/db/db_impl.cc b/src/leveldb/db/db_impl.cc index 3bb58e560a..65e31724bc 100644 --- a/src/leveldb/db/db_impl.cc +++ b/src/leveldb/db/db_impl.cc @@ -4,12 +4,15 @@ #include "db/db_impl.h" +#include <stdint.h> +#include <stdio.h> + #include <algorithm> +#include <atomic> #include <set> #include <string> -#include <stdint.h> -#include <stdio.h> #include <vector> + #include "db/builder.h" #include "db/db_iter.h" #include "db/dbformat.h" @@ -39,16 +42,33 @@ const int kNumNonTableCacheFiles = 10; // Information kept for every waiting writer struct DBImpl::Writer { + explicit Writer(port::Mutex* mu) + : batch(nullptr), sync(false), done(false), cv(mu) {} + Status status; WriteBatch* batch; bool sync; bool done; port::CondVar cv; - - explicit Writer(port::Mutex* mu) : cv(mu) { } }; struct DBImpl::CompactionState { + // Files produced by compaction + struct Output { + uint64_t number; + uint64_t file_size; + InternalKey smallest, largest; + }; + + Output* current_output() { return &outputs[outputs.size() - 1]; } + + explicit CompactionState(Compaction* c) + : compaction(c), + smallest_snapshot(0), + outfile(nullptr), + builder(nullptr), + total_bytes(0) {} + Compaction* const compaction; // Sequence numbers < smallest_snapshot are not significant since we @@ -57,12 +77,6 @@ struct DBImpl::CompactionState { // we can drop all entries for the same key with sequence numbers < S. SequenceNumber smallest_snapshot; - // Files produced by compaction - struct Output { - uint64_t number; - uint64_t file_size; - InternalKey smallest, largest; - }; std::vector<Output> outputs; // State kept for output being generated @@ -70,19 +84,10 @@ struct DBImpl::CompactionState { TableBuilder* builder; uint64_t total_bytes; - - Output* current_output() { return &outputs[outputs.size()-1]; } - - explicit CompactionState(Compaction* c) - : compaction(c), - outfile(NULL), - builder(NULL), - total_bytes(0) { - } }; // Fix user-supplied options to be reasonable -template <class T,class V> +template <class T, class V> static void ClipToRange(T* ptr, V minvalue, V maxvalue) { if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue; if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue; @@ -93,27 +98,32 @@ Options SanitizeOptions(const std::string& dbname, const Options& src) { Options result = src; result.comparator = icmp; - result.filter_policy = (src.filter_policy != NULL) ? ipolicy : NULL; - ClipToRange(&result.max_open_files, 64 + kNumNonTableCacheFiles, 50000); - ClipToRange(&result.write_buffer_size, 64<<10, 1<<30); - ClipToRange(&result.max_file_size, 1<<20, 1<<30); - ClipToRange(&result.block_size, 1<<10, 4<<20); - if (result.info_log == NULL) { + result.filter_policy = (src.filter_policy != nullptr) ? ipolicy : nullptr; + ClipToRange(&result.max_open_files, 64 + kNumNonTableCacheFiles, 50000); + ClipToRange(&result.write_buffer_size, 64 << 10, 1 << 30); + ClipToRange(&result.max_file_size, 1 << 20, 1 << 30); + ClipToRange(&result.block_size, 1 << 10, 4 << 20); + if (result.info_log == nullptr) { // Open a log file in the same directory as the db src.env->CreateDir(dbname); // In case it does not exist src.env->RenameFile(InfoLogFileName(dbname), OldInfoLogFileName(dbname)); Status s = src.env->NewLogger(InfoLogFileName(dbname), &result.info_log); if (!s.ok()) { // No place suitable for logging - result.info_log = NULL; + result.info_log = nullptr; } } - if (result.block_cache == NULL) { + if (result.block_cache == nullptr) { result.block_cache = NewLRUCache(8 << 20); } return result; } +static int TableCacheSize(const Options& sanitized_options) { + // Reserve ten files or so for other uses and give the rest to TableCache. + return sanitized_options.max_open_files - kNumNonTableCacheFiles; +} + DBImpl::DBImpl(const Options& raw_options, const std::string& dbname) : env_(raw_options.env), internal_comparator_(raw_options.comparator), @@ -123,44 +133,39 @@ DBImpl::DBImpl(const Options& raw_options, const std::string& dbname) owns_info_log_(options_.info_log != raw_options.info_log), owns_cache_(options_.block_cache != raw_options.block_cache), dbname_(dbname), - db_lock_(NULL), - shutting_down_(NULL), - bg_cv_(&mutex_), - mem_(NULL), - imm_(NULL), - logfile_(NULL), + table_cache_(new TableCache(dbname_, options_, TableCacheSize(options_))), + db_lock_(nullptr), + shutting_down_(false), + background_work_finished_signal_(&mutex_), + mem_(nullptr), + imm_(nullptr), + has_imm_(false), + logfile_(nullptr), logfile_number_(0), - log_(NULL), + log_(nullptr), seed_(0), tmp_batch_(new WriteBatch), - bg_compaction_scheduled_(false), - manual_compaction_(NULL) { - has_imm_.Release_Store(NULL); - - // Reserve ten files or so for other uses and give the rest to TableCache. - const int table_cache_size = options_.max_open_files - kNumNonTableCacheFiles; - table_cache_ = new TableCache(dbname_, &options_, table_cache_size); - - versions_ = new VersionSet(dbname_, &options_, table_cache_, - &internal_comparator_); -} + background_compaction_scheduled_(false), + manual_compaction_(nullptr), + versions_(new VersionSet(dbname_, &options_, table_cache_, + &internal_comparator_)) {} DBImpl::~DBImpl() { - // Wait for background work to finish + // Wait for background work to finish. mutex_.Lock(); - shutting_down_.Release_Store(this); // Any non-NULL value is ok - while (bg_compaction_scheduled_) { - bg_cv_.Wait(); + shutting_down_.store(true, std::memory_order_release); + while (background_compaction_scheduled_) { + background_work_finished_signal_.Wait(); } mutex_.Unlock(); - if (db_lock_ != NULL) { + if (db_lock_ != nullptr) { env_->UnlockFile(db_lock_); } delete versions_; - if (mem_ != NULL) mem_->Unref(); - if (imm_ != NULL) imm_->Unref(); + if (mem_ != nullptr) mem_->Unref(); + if (imm_ != nullptr) imm_->Unref(); delete tmp_batch_; delete log_; delete logfile_; @@ -216,6 +221,8 @@ void DBImpl::MaybeIgnoreError(Status* s) const { } void DBImpl::DeleteObsoleteFiles() { + mutex_.AssertHeld(); + if (!bg_error_.ok()) { // After a background error, we don't know whether a new version may // or may not have been committed, so we cannot safely garbage collect. @@ -227,11 +234,12 @@ void DBImpl::DeleteObsoleteFiles() { versions_->AddLiveFiles(&live); std::vector<std::string> filenames; - env_->GetChildren(dbname_, &filenames); // Ignoring errors on purpose + env_->GetChildren(dbname_, &filenames); // Ignoring errors on purpose uint64_t number; FileType type; - for (size_t i = 0; i < filenames.size(); i++) { - if (ParseFileName(filenames[i], &number, &type)) { + std::vector<std::string> files_to_delete; + for (std::string& filename : filenames) { + if (ParseFileName(filename, &number, &type)) { bool keep = true; switch (type) { case kLogFile: @@ -259,26 +267,34 @@ void DBImpl::DeleteObsoleteFiles() { } if (!keep) { + files_to_delete.push_back(std::move(filename)); if (type == kTableFile) { table_cache_->Evict(number); } - Log(options_.info_log, "Delete type=%d #%lld\n", - int(type), + Log(options_.info_log, "Delete type=%d #%lld\n", static_cast<int>(type), static_cast<unsigned long long>(number)); - env_->DeleteFile(dbname_ + "/" + filenames[i]); } } } + + // While deleting all files unblock other threads. All files being deleted + // have unique names which will not collide with newly created files and + // are therefore safe to delete while allowing other threads to proceed. + mutex_.Unlock(); + for (const std::string& filename : files_to_delete) { + env_->DeleteFile(dbname_ + "/" + filename); + } + mutex_.Lock(); } -Status DBImpl::Recover(VersionEdit* edit, bool *save_manifest) { +Status DBImpl::Recover(VersionEdit* edit, bool* save_manifest) { mutex_.AssertHeld(); // Ignore error from CreateDir since the creation of the DB is // committed only when the descriptor is created, and this directory // may already exist from a previous failed creation attempt. env_->CreateDir(dbname_); - assert(db_lock_ == NULL); + assert(db_lock_ == nullptr); Status s = env_->LockFile(LockFileName(dbname_), &db_lock_); if (!s.ok()) { return s; @@ -296,8 +312,8 @@ Status DBImpl::Recover(VersionEdit* edit, bool *save_manifest) { } } else { if (options_.error_if_exists) { - return Status::InvalidArgument( - dbname_, "exists (error_if_exists is true)"); + return Status::InvalidArgument(dbname_, + "exists (error_if_exists is true)"); } } @@ -369,12 +385,12 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log, Env* env; Logger* info_log; const char* fname; - Status* status; // NULL if options_.paranoid_checks==false - virtual void Corruption(size_t bytes, const Status& s) { + Status* status; // null if options_.paranoid_checks==false + void Corruption(size_t bytes, const Status& s) override { Log(info_log, "%s%s: dropping %d bytes; %s", - (this->status == NULL ? "(ignoring error) " : ""), - fname, static_cast<int>(bytes), s.ToString().c_str()); - if (this->status != NULL && this->status->ok()) *this->status = s; + (this->status == nullptr ? "(ignoring error) " : ""), fname, + static_cast<int>(bytes), s.ToString().c_str()); + if (this->status != nullptr && this->status->ok()) *this->status = s; } }; @@ -394,32 +410,30 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log, reporter.env = env_; reporter.info_log = options_.info_log; reporter.fname = fname.c_str(); - reporter.status = (options_.paranoid_checks ? &status : NULL); + reporter.status = (options_.paranoid_checks ? &status : nullptr); // We intentionally make log::Reader do checksumming even if // paranoid_checks==false so that corruptions cause entire commits // to be skipped instead of propagating bad information (like overly // large sequence numbers). - log::Reader reader(file, &reporter, true/*checksum*/, - 0/*initial_offset*/); + log::Reader reader(file, &reporter, true /*checksum*/, 0 /*initial_offset*/); Log(options_.info_log, "Recovering log #%llu", - (unsigned long long) log_number); + (unsigned long long)log_number); // Read all the records and add to a memtable std::string scratch; Slice record; WriteBatch batch; int compactions = 0; - MemTable* mem = NULL; - while (reader.ReadRecord(&record, &scratch) && - status.ok()) { + MemTable* mem = nullptr; + while (reader.ReadRecord(&record, &scratch) && status.ok()) { if (record.size() < 12) { - reporter.Corruption( - record.size(), Status::Corruption("log record too small", fname)); + reporter.Corruption(record.size(), + Status::Corruption("log record too small", fname)); continue; } WriteBatchInternal::SetContents(&batch, record); - if (mem == NULL) { + if (mem == nullptr) { mem = new MemTable(internal_comparator_); mem->Ref(); } @@ -428,9 +442,8 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log, if (!status.ok()) { break; } - const SequenceNumber last_seq = - WriteBatchInternal::Sequence(&batch) + - WriteBatchInternal::Count(&batch) - 1; + const SequenceNumber last_seq = WriteBatchInternal::Sequence(&batch) + + WriteBatchInternal::Count(&batch) - 1; if (last_seq > *max_sequence) { *max_sequence = last_seq; } @@ -438,9 +451,9 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log, if (mem->ApproximateMemoryUsage() > options_.write_buffer_size) { compactions++; *save_manifest = true; - status = WriteLevel0Table(mem, edit, NULL); + status = WriteLevel0Table(mem, edit, nullptr); mem->Unref(); - mem = NULL; + mem = nullptr; if (!status.ok()) { // Reflect errors immediately so that conditions like full // file-systems cause the DB::Open() to fail. @@ -453,31 +466,31 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log, // See if we should keep reusing the last log file. if (status.ok() && options_.reuse_logs && last_log && compactions == 0) { - assert(logfile_ == NULL); - assert(log_ == NULL); - assert(mem_ == NULL); + assert(logfile_ == nullptr); + assert(log_ == nullptr); + assert(mem_ == nullptr); uint64_t lfile_size; if (env_->GetFileSize(fname, &lfile_size).ok() && env_->NewAppendableFile(fname, &logfile_).ok()) { Log(options_.info_log, "Reusing old log %s \n", fname.c_str()); log_ = new log::Writer(logfile_, lfile_size); logfile_number_ = log_number; - if (mem != NULL) { + if (mem != nullptr) { mem_ = mem; - mem = NULL; + mem = nullptr; } else { - // mem can be NULL if lognum exists but was empty. + // mem can be nullptr if lognum exists but was empty. mem_ = new MemTable(internal_comparator_); mem_->Ref(); } } } - if (mem != NULL) { + if (mem != nullptr) { // mem did not get reused; compact it. if (status.ok()) { *save_manifest = true; - status = WriteLevel0Table(mem, edit, NULL); + status = WriteLevel0Table(mem, edit, nullptr); } mem->Unref(); } @@ -494,7 +507,7 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit, pending_outputs_.insert(meta.number); Iterator* iter = mem->NewIterator(); Log(options_.info_log, "Level-0 table #%llu: started", - (unsigned long long) meta.number); + (unsigned long long)meta.number); Status s; { @@ -504,24 +517,22 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit, } Log(options_.info_log, "Level-0 table #%llu: %lld bytes %s", - (unsigned long long) meta.number, - (unsigned long long) meta.file_size, + (unsigned long long)meta.number, (unsigned long long)meta.file_size, s.ToString().c_str()); delete iter; pending_outputs_.erase(meta.number); - // Note that if file_size is zero, the file has been deleted and // should not be added to the manifest. int level = 0; if (s.ok() && meta.file_size > 0) { const Slice min_user_key = meta.smallest.user_key(); const Slice max_user_key = meta.largest.user_key(); - if (base != NULL) { + if (base != nullptr) { level = base->PickLevelForMemTableOutput(min_user_key, max_user_key); } - edit->AddFile(level, meta.number, meta.file_size, - meta.smallest, meta.largest); + edit->AddFile(level, meta.number, meta.file_size, meta.smallest, + meta.largest); } CompactionStats stats; @@ -533,7 +544,7 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit, void DBImpl::CompactMemTable() { mutex_.AssertHeld(); - assert(imm_ != NULL); + assert(imm_ != nullptr); // Save the contents of the memtable as a new Table VersionEdit edit; @@ -542,7 +553,7 @@ void DBImpl::CompactMemTable() { Status s = WriteLevel0Table(imm_, &edit, base); base->Unref(); - if (s.ok() && shutting_down_.Acquire_Load()) { + if (s.ok() && shutting_down_.load(std::memory_order_acquire)) { s = Status::IOError("Deleting DB during memtable compaction"); } @@ -556,8 +567,8 @@ void DBImpl::CompactMemTable() { if (s.ok()) { // Commit to the new state imm_->Unref(); - imm_ = NULL; - has_imm_.Release_Store(NULL); + imm_ = nullptr; + has_imm_.store(false, std::memory_order_release); DeleteObsoleteFiles(); } else { RecordBackgroundError(s); @@ -575,13 +586,14 @@ void DBImpl::CompactRange(const Slice* begin, const Slice* end) { } } } - TEST_CompactMemTable(); // TODO(sanjay): Skip if memtable does not overlap + TEST_CompactMemTable(); // TODO(sanjay): Skip if memtable does not overlap for (int level = 0; level < max_level_with_files; level++) { TEST_CompactRange(level, begin, end); } } -void DBImpl::TEST_CompactRange(int level, const Slice* begin,const Slice* end) { +void DBImpl::TEST_CompactRange(int level, const Slice* begin, + const Slice* end) { assert(level >= 0); assert(level + 1 < config::kNumLevels); @@ -590,44 +602,45 @@ void DBImpl::TEST_CompactRange(int level, const Slice* begin,const Slice* end) { ManualCompaction manual; manual.level = level; manual.done = false; - if (begin == NULL) { - manual.begin = NULL; + if (begin == nullptr) { + manual.begin = nullptr; } else { begin_storage = InternalKey(*begin, kMaxSequenceNumber, kValueTypeForSeek); manual.begin = &begin_storage; } - if (end == NULL) { - manual.end = NULL; + if (end == nullptr) { + manual.end = nullptr; } else { end_storage = InternalKey(*end, 0, static_cast<ValueType>(0)); manual.end = &end_storage; } MutexLock l(&mutex_); - while (!manual.done && !shutting_down_.Acquire_Load() && bg_error_.ok()) { - if (manual_compaction_ == NULL) { // Idle + while (!manual.done && !shutting_down_.load(std::memory_order_acquire) && + bg_error_.ok()) { + if (manual_compaction_ == nullptr) { // Idle manual_compaction_ = &manual; MaybeScheduleCompaction(); } else { // Running either my compaction or another compaction. - bg_cv_.Wait(); + background_work_finished_signal_.Wait(); } } if (manual_compaction_ == &manual) { // Cancel my manual compaction since we aborted early for some reason. - manual_compaction_ = NULL; + manual_compaction_ = nullptr; } } Status DBImpl::TEST_CompactMemTable() { - // NULL batch means just wait for earlier writes to be done - Status s = Write(WriteOptions(), NULL); + // nullptr batch means just wait for earlier writes to be done + Status s = Write(WriteOptions(), nullptr); if (s.ok()) { // Wait until the compaction completes MutexLock l(&mutex_); - while (imm_ != NULL && bg_error_.ok()) { - bg_cv_.Wait(); + while (imm_ != nullptr && bg_error_.ok()) { + background_work_finished_signal_.Wait(); } - if (imm_ != NULL) { + if (imm_ != nullptr) { s = bg_error_; } } @@ -638,24 +651,23 @@ void DBImpl::RecordBackgroundError(const Status& s) { mutex_.AssertHeld(); if (bg_error_.ok()) { bg_error_ = s; - bg_cv_.SignalAll(); + background_work_finished_signal_.SignalAll(); } } void DBImpl::MaybeScheduleCompaction() { mutex_.AssertHeld(); - if (bg_compaction_scheduled_) { + if (background_compaction_scheduled_) { // Already scheduled - } else if (shutting_down_.Acquire_Load()) { + } else if (shutting_down_.load(std::memory_order_acquire)) { // DB is being deleted; no more background compactions } else if (!bg_error_.ok()) { // Already got an error; no more changes - } else if (imm_ == NULL && - manual_compaction_ == NULL && + } else if (imm_ == nullptr && manual_compaction_ == nullptr && !versions_->NeedsCompaction()) { // No work to be done } else { - bg_compaction_scheduled_ = true; + background_compaction_scheduled_ = true; env_->Schedule(&DBImpl::BGWork, this); } } @@ -666,8 +678,8 @@ void DBImpl::BGWork(void* db) { void DBImpl::BackgroundCall() { MutexLock l(&mutex_); - assert(bg_compaction_scheduled_); - if (shutting_down_.Acquire_Load()) { + assert(background_compaction_scheduled_); + if (shutting_down_.load(std::memory_order_acquire)) { // No more background work when shutting down. } else if (!bg_error_.ok()) { // No more background work after a background error. @@ -675,36 +687,35 @@ void DBImpl::BackgroundCall() { BackgroundCompaction(); } - bg_compaction_scheduled_ = false; + background_compaction_scheduled_ = false; // Previous compaction may have produced too many files in a level, // so reschedule another compaction if needed. MaybeScheduleCompaction(); - bg_cv_.SignalAll(); + background_work_finished_signal_.SignalAll(); } void DBImpl::BackgroundCompaction() { mutex_.AssertHeld(); - if (imm_ != NULL) { + if (imm_ != nullptr) { CompactMemTable(); return; } Compaction* c; - bool is_manual = (manual_compaction_ != NULL); + bool is_manual = (manual_compaction_ != nullptr); InternalKey manual_end; if (is_manual) { ManualCompaction* m = manual_compaction_; c = versions_->CompactRange(m->level, m->begin, m->end); - m->done = (c == NULL); - if (c != NULL) { + m->done = (c == nullptr); + if (c != nullptr) { manual_end = c->input(0, c->num_input_files(0) - 1)->largest; } Log(options_.info_log, "Manual compaction at level-%d from %s .. %s; will stop at %s\n", - m->level, - (m->begin ? m->begin->DebugString().c_str() : "(begin)"), + m->level, (m->begin ? m->begin->DebugString().c_str() : "(begin)"), (m->end ? m->end->DebugString().c_str() : "(end)"), (m->done ? "(end)" : manual_end.DebugString().c_str())); } else { @@ -712,26 +723,24 @@ void DBImpl::BackgroundCompaction() { } Status status; - if (c == NULL) { + if (c == nullptr) { // Nothing to do } else if (!is_manual && c->IsTrivialMove()) { // Move file to next level assert(c->num_input_files(0) == 1); FileMetaData* f = c->input(0, 0); c->edit()->DeleteFile(c->level(), f->number); - c->edit()->AddFile(c->level() + 1, f->number, f->file_size, - f->smallest, f->largest); + c->edit()->AddFile(c->level() + 1, f->number, f->file_size, f->smallest, + f->largest); status = versions_->LogAndApply(c->edit(), &mutex_); if (!status.ok()) { RecordBackgroundError(status); } VersionSet::LevelSummaryStorage tmp; Log(options_.info_log, "Moved #%lld to level-%d %lld bytes %s: %s\n", - static_cast<unsigned long long>(f->number), - c->level() + 1, + static_cast<unsigned long long>(f->number), c->level() + 1, static_cast<unsigned long long>(f->file_size), - status.ToString().c_str(), - versions_->LevelSummary(&tmp)); + status.ToString().c_str(), versions_->LevelSummary(&tmp)); } else { CompactionState* compact = new CompactionState(c); status = DoCompactionWork(compact); @@ -746,11 +755,10 @@ void DBImpl::BackgroundCompaction() { if (status.ok()) { // Done - } else if (shutting_down_.Acquire_Load()) { + } else if (shutting_down_.load(std::memory_order_acquire)) { // Ignore compaction errors found during shutting down } else { - Log(options_.info_log, - "Compaction error: %s", status.ToString().c_str()); + Log(options_.info_log, "Compaction error: %s", status.ToString().c_str()); } if (is_manual) { @@ -764,18 +772,18 @@ void DBImpl::BackgroundCompaction() { m->tmp_storage = manual_end; m->begin = &m->tmp_storage; } - manual_compaction_ = NULL; + manual_compaction_ = nullptr; } } void DBImpl::CleanupCompaction(CompactionState* compact) { mutex_.AssertHeld(); - if (compact->builder != NULL) { + if (compact->builder != nullptr) { // May happen if we get a shutdown call in the middle of compaction compact->builder->Abandon(); delete compact->builder; } else { - assert(compact->outfile == NULL); + assert(compact->outfile == nullptr); } delete compact->outfile; for (size_t i = 0; i < compact->outputs.size(); i++) { @@ -786,8 +794,8 @@ void DBImpl::CleanupCompaction(CompactionState* compact) { } Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) { - assert(compact != NULL); - assert(compact->builder == NULL); + assert(compact != nullptr); + assert(compact->builder == nullptr); uint64_t file_number; { mutex_.Lock(); @@ -812,9 +820,9 @@ Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) { Status DBImpl::FinishCompactionOutputFile(CompactionState* compact, Iterator* input) { - assert(compact != NULL); - assert(compact->outfile != NULL); - assert(compact->builder != NULL); + assert(compact != nullptr); + assert(compact->outfile != nullptr); + assert(compact->builder != nullptr); const uint64_t output_number = compact->current_output()->number; assert(output_number != 0); @@ -831,7 +839,7 @@ Status DBImpl::FinishCompactionOutputFile(CompactionState* compact, compact->current_output()->file_size = current_bytes; compact->total_bytes += current_bytes; delete compact->builder; - compact->builder = NULL; + compact->builder = nullptr; // Finish and check for file errors if (s.ok()) { @@ -841,35 +849,29 @@ Status DBImpl::FinishCompactionOutputFile(CompactionState* compact, s = compact->outfile->Close(); } delete compact->outfile; - compact->outfile = NULL; + compact->outfile = nullptr; if (s.ok() && current_entries > 0) { // Verify that the table is usable - Iterator* iter = table_cache_->NewIterator(ReadOptions(), - output_number, - current_bytes); + Iterator* iter = + table_cache_->NewIterator(ReadOptions(), output_number, current_bytes); s = iter->status(); delete iter; if (s.ok()) { - Log(options_.info_log, - "Generated table #%llu@%d: %lld keys, %lld bytes", - (unsigned long long) output_number, - compact->compaction->level(), - (unsigned long long) current_entries, - (unsigned long long) current_bytes); + Log(options_.info_log, "Generated table #%llu@%d: %lld keys, %lld bytes", + (unsigned long long)output_number, compact->compaction->level(), + (unsigned long long)current_entries, + (unsigned long long)current_bytes); } } return s; } - Status DBImpl::InstallCompactionResults(CompactionState* compact) { mutex_.AssertHeld(); - Log(options_.info_log, "Compacted %d@%d + %d@%d files => %lld bytes", - compact->compaction->num_input_files(0), - compact->compaction->level(), - compact->compaction->num_input_files(1), - compact->compaction->level() + 1, + Log(options_.info_log, "Compacted %d@%d + %d@%d files => %lld bytes", + compact->compaction->num_input_files(0), compact->compaction->level(), + compact->compaction->num_input_files(1), compact->compaction->level() + 1, static_cast<long long>(compact->total_bytes)); // Add compaction outputs @@ -877,9 +879,8 @@ Status DBImpl::InstallCompactionResults(CompactionState* compact) { const int level = compact->compaction->level(); for (size_t i = 0; i < compact->outputs.size(); i++) { const CompactionState::Output& out = compact->outputs[i]; - compact->compaction->edit()->AddFile( - level + 1, - out.number, out.file_size, out.smallest, out.largest); + compact->compaction->edit()->AddFile(level + 1, out.number, out.file_size, + out.smallest, out.largest); } return versions_->LogAndApply(compact->compaction->edit(), &mutex_); } @@ -888,39 +889,40 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) { const uint64_t start_micros = env_->NowMicros(); int64_t imm_micros = 0; // Micros spent doing imm_ compactions - Log(options_.info_log, "Compacting %d@%d + %d@%d files", - compact->compaction->num_input_files(0), - compact->compaction->level(), + Log(options_.info_log, "Compacting %d@%d + %d@%d files", + compact->compaction->num_input_files(0), compact->compaction->level(), compact->compaction->num_input_files(1), compact->compaction->level() + 1); assert(versions_->NumLevelFiles(compact->compaction->level()) > 0); - assert(compact->builder == NULL); - assert(compact->outfile == NULL); + assert(compact->builder == nullptr); + assert(compact->outfile == nullptr); if (snapshots_.empty()) { compact->smallest_snapshot = versions_->LastSequence(); } else { - compact->smallest_snapshot = snapshots_.oldest()->number_; + compact->smallest_snapshot = snapshots_.oldest()->sequence_number(); } + Iterator* input = versions_->MakeInputIterator(compact->compaction); + // Release mutex while we're actually doing the compaction work mutex_.Unlock(); - Iterator* input = versions_->MakeInputIterator(compact->compaction); input->SeekToFirst(); Status status; ParsedInternalKey ikey; std::string current_user_key; bool has_current_user_key = false; SequenceNumber last_sequence_for_key = kMaxSequenceNumber; - for (; input->Valid() && !shutting_down_.Acquire_Load(); ) { + while (input->Valid() && !shutting_down_.load(std::memory_order_acquire)) { // Prioritize immutable compaction work - if (has_imm_.NoBarrier_Load() != NULL) { + if (has_imm_.load(std::memory_order_relaxed)) { const uint64_t imm_start = env_->NowMicros(); mutex_.Lock(); - if (imm_ != NULL) { + if (imm_ != nullptr) { CompactMemTable(); - bg_cv_.SignalAll(); // Wakeup MakeRoomForWrite() if necessary + // Wake up MakeRoomForWrite() if necessary. + background_work_finished_signal_.SignalAll(); } mutex_.Unlock(); imm_micros += (env_->NowMicros() - imm_start); @@ -928,7 +930,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) { Slice key = input->key(); if (compact->compaction->ShouldStopBefore(key) && - compact->builder != NULL) { + compact->builder != nullptr) { status = FinishCompactionOutputFile(compact, input); if (!status.ok()) { break; @@ -944,8 +946,8 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) { last_sequence_for_key = kMaxSequenceNumber; } else { if (!has_current_user_key || - user_comparator()->Compare(ikey.user_key, - Slice(current_user_key)) != 0) { + user_comparator()->Compare(ikey.user_key, Slice(current_user_key)) != + 0) { // First occurrence of this user key current_user_key.assign(ikey.user_key.data(), ikey.user_key.size()); has_current_user_key = true; @@ -954,7 +956,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) { if (last_sequence_for_key <= compact->smallest_snapshot) { // Hidden by an newer entry for same user key - drop = true; // (A) + drop = true; // (A) } else if (ikey.type == kTypeDeletion && ikey.sequence <= compact->smallest_snapshot && compact->compaction->IsBaseLevelForKey(ikey.user_key)) { @@ -982,7 +984,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) { if (!drop) { // Open output file if necessary - if (compact->builder == NULL) { + if (compact->builder == nullptr) { status = OpenCompactionOutputFile(compact); if (!status.ok()) { break; @@ -1007,17 +1009,17 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) { input->Next(); } - if (status.ok() && shutting_down_.Acquire_Load()) { + if (status.ok() && shutting_down_.load(std::memory_order_acquire)) { status = Status::IOError("Deleting DB during compaction"); } - if (status.ok() && compact->builder != NULL) { + if (status.ok() && compact->builder != nullptr) { status = FinishCompactionOutputFile(compact, input); } if (status.ok()) { status = input->status(); } delete input; - input = NULL; + input = nullptr; CompactionStats stats; stats.micros = env_->NowMicros() - start_micros - imm_micros; @@ -1040,34 +1042,37 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) { RecordBackgroundError(status); } VersionSet::LevelSummaryStorage tmp; - Log(options_.info_log, - "compacted to: %s", versions_->LevelSummary(&tmp)); + Log(options_.info_log, "compacted to: %s", versions_->LevelSummary(&tmp)); return status; } namespace { + struct IterState { - port::Mutex* mu; - Version* version; - MemTable* mem; - MemTable* imm; + port::Mutex* const mu; + Version* const version GUARDED_BY(mu); + MemTable* const mem GUARDED_BY(mu); + MemTable* const imm GUARDED_BY(mu); + + IterState(port::Mutex* mutex, MemTable* mem, MemTable* imm, Version* version) + : mu(mutex), version(version), mem(mem), imm(imm) {} }; static void CleanupIteratorState(void* arg1, void* arg2) { IterState* state = reinterpret_cast<IterState*>(arg1); state->mu->Lock(); state->mem->Unref(); - if (state->imm != NULL) state->imm->Unref(); + if (state->imm != nullptr) state->imm->Unref(); state->version->Unref(); state->mu->Unlock(); delete state; } -} // namespace + +} // anonymous namespace Iterator* DBImpl::NewInternalIterator(const ReadOptions& options, SequenceNumber* latest_snapshot, uint32_t* seed) { - IterState* cleanup = new IterState; mutex_.Lock(); *latest_snapshot = versions_->LastSequence(); @@ -1075,7 +1080,7 @@ Iterator* DBImpl::NewInternalIterator(const ReadOptions& options, std::vector<Iterator*> list; list.push_back(mem_->NewIterator()); mem_->Ref(); - if (imm_ != NULL) { + if (imm_ != nullptr) { list.push_back(imm_->NewIterator()); imm_->Ref(); } @@ -1084,11 +1089,8 @@ Iterator* DBImpl::NewInternalIterator(const ReadOptions& options, NewMergingIterator(&internal_comparator_, &list[0], list.size()); versions_->current()->Ref(); - cleanup->mu = &mutex_; - cleanup->mem = mem_; - cleanup->imm = imm_; - cleanup->version = versions_->current(); - internal_iter->RegisterCleanup(CleanupIteratorState, cleanup, NULL); + IterState* cleanup = new IterState(&mutex_, mem_, imm_, versions_->current()); + internal_iter->RegisterCleanup(CleanupIteratorState, cleanup, nullptr); *seed = ++seed_; mutex_.Unlock(); @@ -1106,14 +1108,14 @@ int64_t DBImpl::TEST_MaxNextLevelOverlappingBytes() { return versions_->MaxNextLevelOverlappingBytes(); } -Status DBImpl::Get(const ReadOptions& options, - const Slice& key, +Status DBImpl::Get(const ReadOptions& options, const Slice& key, std::string* value) { Status s; MutexLock l(&mutex_); SequenceNumber snapshot; - if (options.snapshot != NULL) { - snapshot = reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_; + if (options.snapshot != nullptr) { + snapshot = + static_cast<const SnapshotImpl*>(options.snapshot)->sequence_number(); } else { snapshot = versions_->LastSequence(); } @@ -1122,7 +1124,7 @@ Status DBImpl::Get(const ReadOptions& options, MemTable* imm = imm_; Version* current = versions_->current(); mem->Ref(); - if (imm != NULL) imm->Ref(); + if (imm != nullptr) imm->Ref(); current->Ref(); bool have_stat_update = false; @@ -1135,7 +1137,7 @@ Status DBImpl::Get(const ReadOptions& options, LookupKey lkey(key, snapshot); if (mem->Get(lkey, value, &s)) { // Done - } else if (imm != NULL && imm->Get(lkey, value, &s)) { + } else if (imm != nullptr && imm->Get(lkey, value, &s)) { // Done } else { s = current->Get(options, lkey, value, &stats); @@ -1148,7 +1150,7 @@ Status DBImpl::Get(const ReadOptions& options, MaybeScheduleCompaction(); } mem->Unref(); - if (imm != NULL) imm->Unref(); + if (imm != nullptr) imm->Unref(); current->Unref(); return s; } @@ -1157,12 +1159,12 @@ Iterator* DBImpl::NewIterator(const ReadOptions& options) { SequenceNumber latest_snapshot; uint32_t seed; Iterator* iter = NewInternalIterator(options, &latest_snapshot, &seed); - return NewDBIterator( - this, user_comparator(), iter, - (options.snapshot != NULL - ? reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_ - : latest_snapshot), - seed); + return NewDBIterator(this, user_comparator(), iter, + (options.snapshot != nullptr + ? static_cast<const SnapshotImpl*>(options.snapshot) + ->sequence_number() + : latest_snapshot), + seed); } void DBImpl::RecordReadSample(Slice key) { @@ -1177,9 +1179,9 @@ const Snapshot* DBImpl::GetSnapshot() { return snapshots_.New(versions_->LastSequence()); } -void DBImpl::ReleaseSnapshot(const Snapshot* s) { +void DBImpl::ReleaseSnapshot(const Snapshot* snapshot) { MutexLock l(&mutex_); - snapshots_.Delete(reinterpret_cast<const SnapshotImpl*>(s)); + snapshots_.Delete(static_cast<const SnapshotImpl*>(snapshot)); } // Convenience methods @@ -1191,9 +1193,9 @@ Status DBImpl::Delete(const WriteOptions& options, const Slice& key) { return DB::Delete(options, key); } -Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) { +Status DBImpl::Write(const WriteOptions& options, WriteBatch* updates) { Writer w(&mutex_); - w.batch = my_batch; + w.batch = updates; w.sync = options.sync; w.done = false; @@ -1207,13 +1209,13 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) { } // May temporarily unlock and wait. - Status status = MakeRoomForWrite(my_batch == NULL); + Status status = MakeRoomForWrite(updates == nullptr); uint64_t last_sequence = versions_->LastSequence(); Writer* last_writer = &w; - if (status.ok() && my_batch != NULL) { // NULL batch is for compactions - WriteBatch* updates = BuildBatchGroup(&last_writer); - WriteBatchInternal::SetSequence(updates, last_sequence + 1); - last_sequence += WriteBatchInternal::Count(updates); + if (status.ok() && updates != nullptr) { // nullptr batch is for compactions + WriteBatch* write_batch = BuildBatchGroup(&last_writer); + WriteBatchInternal::SetSequence(write_batch, last_sequence + 1); + last_sequence += WriteBatchInternal::Count(write_batch); // Add to log and apply to memtable. We can release the lock // during this phase since &w is currently responsible for logging @@ -1221,7 +1223,7 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) { // into mem_. { mutex_.Unlock(); - status = log_->AddRecord(WriteBatchInternal::Contents(updates)); + status = log_->AddRecord(WriteBatchInternal::Contents(write_batch)); bool sync_error = false; if (status.ok() && options.sync) { status = logfile_->Sync(); @@ -1230,7 +1232,7 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) { } } if (status.ok()) { - status = WriteBatchInternal::InsertInto(updates, mem_); + status = WriteBatchInternal::InsertInto(write_batch, mem_); } mutex_.Lock(); if (sync_error) { @@ -1240,7 +1242,7 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) { RecordBackgroundError(status); } } - if (updates == tmp_batch_) tmp_batch_->Clear(); + if (write_batch == tmp_batch_) tmp_batch_->Clear(); versions_->SetLastSequence(last_sequence); } @@ -1265,12 +1267,13 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) { } // REQUIRES: Writer list must be non-empty -// REQUIRES: First writer must have a non-NULL batch +// REQUIRES: First writer must have a non-null batch WriteBatch* DBImpl::BuildBatchGroup(Writer** last_writer) { + mutex_.AssertHeld(); assert(!writers_.empty()); Writer* first = writers_.front(); WriteBatch* result = first->batch; - assert(result != NULL); + assert(result != nullptr); size_t size = WriteBatchInternal::ByteSize(first->batch); @@ -1278,8 +1281,8 @@ WriteBatch* DBImpl::BuildBatchGroup(Writer** last_writer) { // original write is small, limit the growth so we do not slow // down the small write too much. size_t max_size = 1 << 20; - if (size <= (128<<10)) { - max_size = size + (128<<10); + if (size <= (128 << 10)) { + max_size = size + (128 << 10); } *last_writer = first; @@ -1292,7 +1295,7 @@ WriteBatch* DBImpl::BuildBatchGroup(Writer** last_writer) { break; } - if (w->batch != NULL) { + if (w->batch != nullptr) { size += WriteBatchInternal::ByteSize(w->batch); if (size > max_size) { // Do not make batch too big @@ -1325,9 +1328,8 @@ Status DBImpl::MakeRoomForWrite(bool force) { // Yield previous error s = bg_error_; break; - } else if ( - allow_delay && - versions_->NumLevelFiles(0) >= config::kL0_SlowdownWritesTrigger) { + } else if (allow_delay && versions_->NumLevelFiles(0) >= + config::kL0_SlowdownWritesTrigger) { // We are getting close to hitting a hard limit on the number of // L0 files. Rather than delaying a single write by several // seconds when we hit the hard limit, start delaying each @@ -1342,20 +1344,20 @@ Status DBImpl::MakeRoomForWrite(bool force) { (mem_->ApproximateMemoryUsage() <= options_.write_buffer_size)) { // There is room in current memtable break; - } else if (imm_ != NULL) { + } else if (imm_ != nullptr) { // We have filled up the current memtable, but the previous // one is still being compacted, so we wait. Log(options_.info_log, "Current memtable full; waiting...\n"); - bg_cv_.Wait(); + background_work_finished_signal_.Wait(); } else if (versions_->NumLevelFiles(0) >= config::kL0_StopWritesTrigger) { // There are too many level-0 files. Log(options_.info_log, "Too many L0 files; waiting...\n"); - bg_cv_.Wait(); + background_work_finished_signal_.Wait(); } else { // Attempt to switch to a new memtable and trigger compaction of old assert(versions_->PrevLogNumber() == 0); uint64_t new_log_number = versions_->NewFileNumber(); - WritableFile* lfile = NULL; + WritableFile* lfile = nullptr; s = env_->NewWritableFile(LogFileName(dbname_, new_log_number), &lfile); if (!s.ok()) { // Avoid chewing through file number space in a tight loop. @@ -1368,10 +1370,10 @@ Status DBImpl::MakeRoomForWrite(bool force) { logfile_number_ = new_log_number; log_ = new log::Writer(lfile); imm_ = mem_; - has_imm_.Release_Store(imm_); + has_imm_.store(true, std::memory_order_release); mem_ = new MemTable(internal_comparator_); mem_->Ref(); - force = false; // Do not force another compaction if have room + force = false; // Do not force another compaction if have room MaybeScheduleCompaction(); } } @@ -1405,21 +1407,16 @@ bool DBImpl::GetProperty(const Slice& property, std::string* value) { snprintf(buf, sizeof(buf), " Compactions\n" "Level Files Size(MB) Time(sec) Read(MB) Write(MB)\n" - "--------------------------------------------------\n" - ); + "--------------------------------------------------\n"); value->append(buf); for (int level = 0; level < config::kNumLevels; level++) { int files = versions_->NumLevelFiles(level); if (stats_[level].micros > 0 || files > 0) { - snprintf( - buf, sizeof(buf), - "%3d %8d %8.0f %9.0f %8.0f %9.0f\n", - level, - files, - versions_->NumLevelBytes(level) / 1048576.0, - stats_[level].micros / 1e6, - stats_[level].bytes_read / 1048576.0, - stats_[level].bytes_written / 1048576.0); + snprintf(buf, sizeof(buf), "%3d %8d %8.0f %9.0f %8.0f %9.0f\n", level, + files, versions_->NumLevelBytes(level) / 1048576.0, + stats_[level].micros / 1e6, + stats_[level].bytes_read / 1048576.0, + stats_[level].bytes_written / 1048576.0); value->append(buf); } } @@ -1445,16 +1442,11 @@ bool DBImpl::GetProperty(const Slice& property, std::string* value) { return false; } -void DBImpl::GetApproximateSizes( - const Range* range, int n, - uint64_t* sizes) { +void DBImpl::GetApproximateSizes(const Range* range, int n, uint64_t* sizes) { // TODO(opt): better implementation - Version* v; - { - MutexLock l(&mutex_); - versions_->current()->Ref(); - v = versions_->current(); - } + MutexLock l(&mutex_); + Version* v = versions_->current(); + v->Ref(); for (int i = 0; i < n; i++) { // Convert user_key into a corresponding internal key. @@ -1465,10 +1457,7 @@ void DBImpl::GetApproximateSizes( sizes[i] = (limit >= start ? limit - start : 0); } - { - MutexLock l(&mutex_); - v->Unref(); - } + v->Unref(); } // Default implementations of convenience methods that subclasses of DB @@ -1485,11 +1474,10 @@ Status DB::Delete(const WriteOptions& opt, const Slice& key) { return Write(opt, &batch); } -DB::~DB() { } +DB::~DB() = default; -Status DB::Open(const Options& options, const std::string& dbname, - DB** dbptr) { - *dbptr = NULL; +Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) { + *dbptr = nullptr; DBImpl* impl = new DBImpl(options, dbname); impl->mutex_.Lock(); @@ -1497,7 +1485,7 @@ Status DB::Open(const Options& options, const std::string& dbname, // Recover handles create_if_missing, error_if_exists bool save_manifest = false; Status s = impl->Recover(&edit, &save_manifest); - if (s.ok() && impl->mem_ == NULL) { + if (s.ok() && impl->mem_ == nullptr) { // Create new log and a corresponding memtable. uint64_t new_log_number = impl->versions_->NewFileNumber(); WritableFile* lfile; @@ -1523,7 +1511,7 @@ Status DB::Open(const Options& options, const std::string& dbname, } impl->mutex_.Unlock(); if (s.ok()) { - assert(impl->mem_ != NULL); + assert(impl->mem_ != nullptr); *dbptr = impl; } else { delete impl; @@ -1531,21 +1519,20 @@ Status DB::Open(const Options& options, const std::string& dbname, return s; } -Snapshot::~Snapshot() { -} +Snapshot::~Snapshot() = default; Status DestroyDB(const std::string& dbname, const Options& options) { Env* env = options.env; std::vector<std::string> filenames; - // Ignore error in case directory does not exist - env->GetChildren(dbname, &filenames); - if (filenames.empty()) { + Status result = env->GetChildren(dbname, &filenames); + if (!result.ok()) { + // Ignore error in case directory does not exist return Status::OK(); } FileLock* lock; const std::string lockname = LockFileName(dbname); - Status result = env->LockFile(lockname, &lock); + result = env->LockFile(lockname, &lock); if (result.ok()) { uint64_t number; FileType type; diff --git a/src/leveldb/db/db_impl.h b/src/leveldb/db/db_impl.h index 8ff323e728..685735c733 100644 --- a/src/leveldb/db/db_impl.h +++ b/src/leveldb/db/db_impl.h @@ -5,8 +5,11 @@ #ifndef STORAGE_LEVELDB_DB_DB_IMPL_H_ #define STORAGE_LEVELDB_DB_DB_IMPL_H_ +#include <atomic> #include <deque> #include <set> +#include <string> + #include "db/dbformat.h" #include "db/log_writer.h" #include "db/snapshot.h" @@ -26,21 +29,25 @@ class VersionSet; class DBImpl : public DB { public: DBImpl(const Options& options, const std::string& dbname); - virtual ~DBImpl(); + + DBImpl(const DBImpl&) = delete; + DBImpl& operator=(const DBImpl&) = delete; + + ~DBImpl() override; // Implementations of the DB interface - virtual Status Put(const WriteOptions&, const Slice& key, const Slice& value); - virtual Status Delete(const WriteOptions&, const Slice& key); - virtual Status Write(const WriteOptions& options, WriteBatch* updates); - virtual Status Get(const ReadOptions& options, - const Slice& key, - std::string* value); - virtual Iterator* NewIterator(const ReadOptions&); - virtual const Snapshot* GetSnapshot(); - virtual void ReleaseSnapshot(const Snapshot* snapshot); - virtual bool GetProperty(const Slice& property, std::string* value); - virtual void GetApproximateSizes(const Range* range, int n, uint64_t* sizes); - virtual void CompactRange(const Slice* begin, const Slice* end); + Status Put(const WriteOptions&, const Slice& key, + const Slice& value) override; + Status Delete(const WriteOptions&, const Slice& key) override; + Status Write(const WriteOptions& options, WriteBatch* updates) override; + Status Get(const ReadOptions& options, const Slice& key, + std::string* value) override; + Iterator* NewIterator(const ReadOptions&) override; + const Snapshot* GetSnapshot() override; + void ReleaseSnapshot(const Snapshot* snapshot) override; + bool GetProperty(const Slice& property, std::string* value) override; + void GetApproximateSizes(const Range* range, int n, uint64_t* sizes) override; + void CompactRange(const Slice* begin, const Slice* end) override; // Extra methods (for testing) that are not in the public DB interface @@ -69,6 +76,31 @@ class DBImpl : public DB { struct CompactionState; struct Writer; + // Information for a manual compaction + struct ManualCompaction { + int level; + bool done; + const InternalKey* begin; // null means beginning of key range + const InternalKey* end; // null means end of key range + InternalKey tmp_storage; // Used to keep track of compaction progress + }; + + // Per level compaction stats. stats_[level] stores the stats for + // compactions that produced data for the specified "level". + struct CompactionStats { + CompactionStats() : micros(0), bytes_read(0), bytes_written(0) {} + + void Add(const CompactionStats& c) { + this->micros += c.micros; + this->bytes_read += c.bytes_read; + this->bytes_written += c.bytes_written; + } + + int64_t micros; + int64_t bytes_read; + int64_t bytes_written; + }; + Iterator* NewInternalIterator(const ReadOptions&, SequenceNumber* latest_snapshot, uint32_t* seed); @@ -84,7 +116,7 @@ class DBImpl : public DB { void MaybeIgnoreError(Status* s) const; // Delete any unneeded files and stale in-memory entries. - void DeleteObsoleteFiles(); + void DeleteObsoleteFiles() EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Compact the in-memory write buffer to disk. Switches to a new // log-file/memtable and writes a new descriptor iff successful. @@ -100,14 +132,15 @@ class DBImpl : public DB { Status MakeRoomForWrite(bool force /* compact even if there is room? */) EXCLUSIVE_LOCKS_REQUIRED(mutex_); - WriteBatch* BuildBatchGroup(Writer** last_writer); + WriteBatch* BuildBatchGroup(Writer** last_writer) + EXCLUSIVE_LOCKS_REQUIRED(mutex_); void RecordBackgroundError(const Status& s); void MaybeScheduleCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_); static void BGWork(void* db); void BackgroundCall(); - void BackgroundCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_); + void BackgroundCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_); void CleanupCompaction(CompactionState* compact) EXCLUSIVE_LOCKS_REQUIRED(mutex_); Status DoCompactionWork(CompactionState* compact) @@ -118,93 +151,66 @@ class DBImpl : public DB { Status InstallCompactionResults(CompactionState* compact) EXCLUSIVE_LOCKS_REQUIRED(mutex_); + const Comparator* user_comparator() const { + return internal_comparator_.user_comparator(); + } + // Constant after construction Env* const env_; const InternalKeyComparator internal_comparator_; const InternalFilterPolicy internal_filter_policy_; const Options options_; // options_.comparator == &internal_comparator_ - bool owns_info_log_; - bool owns_cache_; + const bool owns_info_log_; + const bool owns_cache_; const std::string dbname_; // table_cache_ provides its own synchronization - TableCache* table_cache_; + TableCache* const table_cache_; - // Lock over the persistent DB state. Non-NULL iff successfully acquired. + // Lock over the persistent DB state. Non-null iff successfully acquired. FileLock* db_lock_; // State below is protected by mutex_ port::Mutex mutex_; - port::AtomicPointer shutting_down_; - port::CondVar bg_cv_; // Signalled when background work finishes + std::atomic<bool> shutting_down_; + port::CondVar background_work_finished_signal_ GUARDED_BY(mutex_); MemTable* mem_; - MemTable* imm_; // Memtable being compacted - port::AtomicPointer has_imm_; // So bg thread can detect non-NULL imm_ + MemTable* imm_ GUARDED_BY(mutex_); // Memtable being compacted + std::atomic<bool> has_imm_; // So bg thread can detect non-null imm_ WritableFile* logfile_; - uint64_t logfile_number_; + uint64_t logfile_number_ GUARDED_BY(mutex_); log::Writer* log_; - uint32_t seed_; // For sampling. + uint32_t seed_ GUARDED_BY(mutex_); // For sampling. // Queue of writers. - std::deque<Writer*> writers_; - WriteBatch* tmp_batch_; + std::deque<Writer*> writers_ GUARDED_BY(mutex_); + WriteBatch* tmp_batch_ GUARDED_BY(mutex_); - SnapshotList snapshots_; + SnapshotList snapshots_ GUARDED_BY(mutex_); // Set of table files to protect from deletion because they are // part of ongoing compactions. - std::set<uint64_t> pending_outputs_; + std::set<uint64_t> pending_outputs_ GUARDED_BY(mutex_); // Has a background compaction been scheduled or is running? - bool bg_compaction_scheduled_; + bool background_compaction_scheduled_ GUARDED_BY(mutex_); - // Information for a manual compaction - struct ManualCompaction { - int level; - bool done; - const InternalKey* begin; // NULL means beginning of key range - const InternalKey* end; // NULL means end of key range - InternalKey tmp_storage; // Used to keep track of compaction progress - }; - ManualCompaction* manual_compaction_; + ManualCompaction* manual_compaction_ GUARDED_BY(mutex_); - VersionSet* versions_; + VersionSet* const versions_ GUARDED_BY(mutex_); // Have we encountered a background error in paranoid mode? - Status bg_error_; - - // Per level compaction stats. stats_[level] stores the stats for - // compactions that produced data for the specified "level". - struct CompactionStats { - int64_t micros; - int64_t bytes_read; - int64_t bytes_written; - - CompactionStats() : micros(0), bytes_read(0), bytes_written(0) { } + Status bg_error_ GUARDED_BY(mutex_); - void Add(const CompactionStats& c) { - this->micros += c.micros; - this->bytes_read += c.bytes_read; - this->bytes_written += c.bytes_written; - } - }; - CompactionStats stats_[config::kNumLevels]; - - // No copying allowed - DBImpl(const DBImpl&); - void operator=(const DBImpl&); - - const Comparator* user_comparator() const { - return internal_comparator_.user_comparator(); - } + CompactionStats stats_[config::kNumLevels] GUARDED_BY(mutex_); }; // Sanitize db options. The caller should delete result.info_log if // it is not equal to src.info_log. -extern Options SanitizeOptions(const std::string& db, - const InternalKeyComparator* icmp, - const InternalFilterPolicy* ipolicy, - const Options& src); +Options SanitizeOptions(const std::string& db, + const InternalKeyComparator* icmp, + const InternalFilterPolicy* ipolicy, + const Options& src); } // namespace leveldb diff --git a/src/leveldb/db/db_iter.cc b/src/leveldb/db/db_iter.cc index 3b2035e9e3..98715a9502 100644 --- a/src/leveldb/db/db_iter.cc +++ b/src/leveldb/db/db_iter.cc @@ -4,9 +4,9 @@ #include "db/db_iter.h" -#include "db/filename.h" #include "db/db_impl.h" #include "db/dbformat.h" +#include "db/filename.h" #include "leveldb/env.h" #include "leveldb/iterator.h" #include "port/port.h" @@ -36,17 +36,14 @@ namespace { // combines multiple entries for the same userkey found in the DB // representation into a single entry while accounting for sequence // numbers, deletion markers, overwrites, etc. -class DBIter: public Iterator { +class DBIter : public Iterator { public: // Which direction is the iterator currently moving? // (1) When moving forward, the internal iterator is positioned at // the exact entry that yields this->key(), this->value() // (2) When moving backwards, the internal iterator is positioned // just before all entries whose user key == this->key(). - enum Direction { - kForward, - kReverse - }; + enum Direction { kForward, kReverse }; DBIter(DBImpl* db, const Comparator* cmp, Iterator* iter, SequenceNumber s, uint32_t seed) @@ -57,21 +54,22 @@ class DBIter: public Iterator { direction_(kForward), valid_(false), rnd_(seed), - bytes_counter_(RandomPeriod()) { - } - virtual ~DBIter() { - delete iter_; - } - virtual bool Valid() const { return valid_; } - virtual Slice key() const { + bytes_until_read_sampling_(RandomCompactionPeriod()) {} + + DBIter(const DBIter&) = delete; + DBIter& operator=(const DBIter&) = delete; + + ~DBIter() override { delete iter_; } + bool Valid() const override { return valid_; } + Slice key() const override { assert(valid_); return (direction_ == kForward) ? ExtractUserKey(iter_->key()) : saved_key_; } - virtual Slice value() const { + Slice value() const override { assert(valid_); return (direction_ == kForward) ? iter_->value() : saved_value_; } - virtual Status status() const { + Status status() const override { if (status_.ok()) { return iter_->status(); } else { @@ -79,11 +77,11 @@ class DBIter: public Iterator { } } - virtual void Next(); - virtual void Prev(); - virtual void Seek(const Slice& target); - virtual void SeekToFirst(); - virtual void SeekToLast(); + void Next() override; + void Prev() override; + void Seek(const Slice& target) override; + void SeekToFirst() override; + void SeekToLast() override; private: void FindNextUserEntry(bool skipping, std::string* skip); @@ -103,38 +101,35 @@ class DBIter: public Iterator { } } - // Pick next gap with average value of config::kReadBytesPeriod. - ssize_t RandomPeriod() { - return rnd_.Uniform(2*config::kReadBytesPeriod); + // Picks the number of bytes that can be read until a compaction is scheduled. + size_t RandomCompactionPeriod() { + return rnd_.Uniform(2 * config::kReadBytesPeriod); } DBImpl* db_; const Comparator* const user_comparator_; Iterator* const iter_; SequenceNumber const sequence_; - Status status_; - std::string saved_key_; // == current key when direction_==kReverse - std::string saved_value_; // == current raw value when direction_==kReverse + std::string saved_key_; // == current key when direction_==kReverse + std::string saved_value_; // == current raw value when direction_==kReverse Direction direction_; bool valid_; - Random rnd_; - ssize_t bytes_counter_; - - // No copying allowed - DBIter(const DBIter&); - void operator=(const DBIter&); + size_t bytes_until_read_sampling_; }; inline bool DBIter::ParseKey(ParsedInternalKey* ikey) { Slice k = iter_->key(); - ssize_t n = k.size() + iter_->value().size(); - bytes_counter_ -= n; - while (bytes_counter_ < 0) { - bytes_counter_ += RandomPeriod(); + + size_t bytes_read = k.size() + iter_->value().size(); + while (bytes_until_read_sampling_ < bytes_read) { + bytes_until_read_sampling_ += RandomCompactionPeriod(); db_->RecordReadSample(k); } + assert(bytes_until_read_sampling_ >= bytes_read); + bytes_until_read_sampling_ -= bytes_read; + if (!ParseInternalKey(k, ikey)) { status_ = Status::Corruption("corrupted internal key in DBIter"); return false; @@ -165,6 +160,15 @@ void DBIter::Next() { } else { // Store in saved_key_ the current key so we skip it below. SaveKey(ExtractUserKey(iter_->key()), &saved_key_); + + // iter_ is pointing to current key. We can now safely move to the next to + // avoid checking current key. + iter_->Next(); + if (!iter_->Valid()) { + valid_ = false; + saved_key_.clear(); + return; + } } FindNextUserEntry(true, &saved_key_); @@ -218,8 +222,8 @@ void DBIter::Prev() { ClearSavedValue(); return; } - if (user_comparator_->Compare(ExtractUserKey(iter_->key()), - saved_key_) < 0) { + if (user_comparator_->Compare(ExtractUserKey(iter_->key()), saved_key_) < + 0) { break; } } @@ -275,8 +279,8 @@ void DBIter::Seek(const Slice& target) { direction_ = kForward; ClearSavedValue(); saved_key_.clear(); - AppendInternalKey( - &saved_key_, ParsedInternalKey(target, sequence_, kValueTypeForSeek)); + AppendInternalKey(&saved_key_, + ParsedInternalKey(target, sequence_, kValueTypeForSeek)); iter_->Seek(saved_key_); if (iter_->Valid()) { FindNextUserEntry(false, &saved_key_ /* temporary storage */); @@ -305,12 +309,9 @@ void DBIter::SeekToLast() { } // anonymous namespace -Iterator* NewDBIterator( - DBImpl* db, - const Comparator* user_key_comparator, - Iterator* internal_iter, - SequenceNumber sequence, - uint32_t seed) { +Iterator* NewDBIterator(DBImpl* db, const Comparator* user_key_comparator, + Iterator* internal_iter, SequenceNumber sequence, + uint32_t seed) { return new DBIter(db, user_key_comparator, internal_iter, sequence, seed); } diff --git a/src/leveldb/db/db_iter.h b/src/leveldb/db/db_iter.h index 04927e937b..fd93e912a0 100644 --- a/src/leveldb/db/db_iter.h +++ b/src/leveldb/db/db_iter.h @@ -6,8 +6,9 @@ #define STORAGE_LEVELDB_DB_DB_ITER_H_ #include <stdint.h> -#include "leveldb/db.h" + #include "db/dbformat.h" +#include "leveldb/db.h" namespace leveldb { @@ -16,12 +17,9 @@ class DBImpl; // Return a new iterator that converts internal keys (yielded by // "*internal_iter") that were live at the specified "sequence" number // into appropriate user keys. -extern Iterator* NewDBIterator( - DBImpl* db, - const Comparator* user_key_comparator, - Iterator* internal_iter, - SequenceNumber sequence, - uint32_t seed); +Iterator* NewDBIterator(DBImpl* db, const Comparator* user_key_comparator, + Iterator* internal_iter, SequenceNumber sequence, + uint32_t seed); } // namespace leveldb diff --git a/src/leveldb/db/db_test.cc b/src/leveldb/db/db_test.cc index a0b08bc19c..beb1d3bdef 100644 --- a/src/leveldb/db/db_test.cc +++ b/src/leveldb/db/db_test.cc @@ -3,14 +3,20 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "leveldb/db.h" -#include "leveldb/filter_policy.h" + +#include <atomic> +#include <string> + #include "db/db_impl.h" #include "db/filename.h" #include "db/version_set.h" #include "db/write_batch_internal.h" #include "leveldb/cache.h" #include "leveldb/env.h" +#include "leveldb/filter_policy.h" #include "leveldb/table.h" +#include "port/port.h" +#include "port/thread_annotations.h" #include "util/hash.h" #include "util/logging.h" #include "util/mutexlock.h" @@ -25,83 +31,116 @@ static std::string RandomString(Random* rnd, int len) { return r; } +static std::string RandomKey(Random* rnd) { + int len = + (rnd->OneIn(3) ? 1 // Short sometimes to encourage collisions + : (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10))); + return test::RandomKey(rnd, len); +} + namespace { class AtomicCounter { - private: - port::Mutex mu_; - int count_; public: - AtomicCounter() : count_(0) { } - void Increment() { - IncrementBy(1); - } - void IncrementBy(int count) { + AtomicCounter() : count_(0) {} + void Increment() { IncrementBy(1); } + void IncrementBy(int count) LOCKS_EXCLUDED(mu_) { MutexLock l(&mu_); count_ += count; } - int Read() { + int Read() LOCKS_EXCLUDED(mu_) { MutexLock l(&mu_); return count_; } - void Reset() { + void Reset() LOCKS_EXCLUDED(mu_) { MutexLock l(&mu_); count_ = 0; } + + private: + port::Mutex mu_; + int count_ GUARDED_BY(mu_); }; void DelayMilliseconds(int millis) { Env::Default()->SleepForMicroseconds(millis * 1000); } -} +} // namespace + +// Test Env to override default Env behavior for testing. +class TestEnv : public EnvWrapper { + public: + explicit TestEnv(Env* base) : EnvWrapper(base), ignore_dot_files_(false) {} + + void SetIgnoreDotFiles(bool ignored) { ignore_dot_files_ = ignored; } + + Status GetChildren(const std::string& dir, + std::vector<std::string>* result) override { + Status s = target()->GetChildren(dir, result); + if (!s.ok() || !ignore_dot_files_) { + return s; + } + + std::vector<std::string>::iterator it = result->begin(); + while (it != result->end()) { + if ((*it == ".") || (*it == "..")) { + it = result->erase(it); + } else { + ++it; + } + } + + return s; + } + + private: + bool ignore_dot_files_; +}; -// Special Env used to delay background operations +// Special Env used to delay background operations. class SpecialEnv : public EnvWrapper { public: - // sstable/log Sync() calls are blocked while this pointer is non-NULL. - port::AtomicPointer delay_data_sync_; + // sstable/log Sync() calls are blocked while this pointer is non-null. + std::atomic<bool> delay_data_sync_; // sstable/log Sync() calls return an error. - port::AtomicPointer data_sync_error_; + std::atomic<bool> data_sync_error_; - // Simulate no-space errors while this pointer is non-NULL. - port::AtomicPointer no_space_; + // Simulate no-space errors while this pointer is non-null. + std::atomic<bool> no_space_; - // Simulate non-writable file system while this pointer is non-NULL - port::AtomicPointer non_writable_; + // Simulate non-writable file system while this pointer is non-null. + std::atomic<bool> non_writable_; - // Force sync of manifest files to fail while this pointer is non-NULL - port::AtomicPointer manifest_sync_error_; + // Force sync of manifest files to fail while this pointer is non-null. + std::atomic<bool> manifest_sync_error_; - // Force write to manifest files to fail while this pointer is non-NULL - port::AtomicPointer manifest_write_error_; + // Force write to manifest files to fail while this pointer is non-null. + std::atomic<bool> manifest_write_error_; bool count_random_reads_; AtomicCounter random_read_counter_; - explicit SpecialEnv(Env* base) : EnvWrapper(base) { - delay_data_sync_.Release_Store(NULL); - data_sync_error_.Release_Store(NULL); - no_space_.Release_Store(NULL); - non_writable_.Release_Store(NULL); - count_random_reads_ = false; - manifest_sync_error_.Release_Store(NULL); - manifest_write_error_.Release_Store(NULL); - } + explicit SpecialEnv(Env* base) + : EnvWrapper(base), + delay_data_sync_(false), + data_sync_error_(false), + no_space_(false), + non_writable_(false), + manifest_sync_error_(false), + manifest_write_error_(false), + count_random_reads_(false) {} Status NewWritableFile(const std::string& f, WritableFile** r) { class DataFile : public WritableFile { private: - SpecialEnv* env_; - WritableFile* base_; + SpecialEnv* const env_; + WritableFile* const base_; public: - DataFile(SpecialEnv* env, WritableFile* base) - : env_(env), - base_(base) { - } + DataFile(SpecialEnv* env, WritableFile* base) : env_(env), base_(base) {} ~DataFile() { delete base_; } Status Append(const Slice& data) { - if (env_->no_space_.Acquire_Load() != NULL) { + if (env_->no_space_.load(std::memory_order_acquire)) { // Drop writes on the floor return Status::OK(); } else { @@ -111,24 +150,26 @@ class SpecialEnv : public EnvWrapper { Status Close() { return base_->Close(); } Status Flush() { return base_->Flush(); } Status Sync() { - if (env_->data_sync_error_.Acquire_Load() != NULL) { + if (env_->data_sync_error_.load(std::memory_order_acquire)) { return Status::IOError("simulated data sync error"); } - while (env_->delay_data_sync_.Acquire_Load() != NULL) { + while (env_->delay_data_sync_.load(std::memory_order_acquire)) { DelayMilliseconds(100); } return base_->Sync(); } + std::string GetName() const override { return ""; } }; class ManifestFile : public WritableFile { private: SpecialEnv* env_; WritableFile* base_; + public: - ManifestFile(SpecialEnv* env, WritableFile* b) : env_(env), base_(b) { } + ManifestFile(SpecialEnv* env, WritableFile* b) : env_(env), base_(b) {} ~ManifestFile() { delete base_; } Status Append(const Slice& data) { - if (env_->manifest_write_error_.Acquire_Load() != NULL) { + if (env_->manifest_write_error_.load(std::memory_order_acquire)) { return Status::IOError("simulated writer error"); } else { return base_->Append(data); @@ -137,24 +178,25 @@ class SpecialEnv : public EnvWrapper { Status Close() { return base_->Close(); } Status Flush() { return base_->Flush(); } Status Sync() { - if (env_->manifest_sync_error_.Acquire_Load() != NULL) { + if (env_->manifest_sync_error_.load(std::memory_order_acquire)) { return Status::IOError("simulated sync error"); } else { return base_->Sync(); } } + std::string GetName() const override { return ""; } }; - if (non_writable_.Acquire_Load() != NULL) { + if (non_writable_.load(std::memory_order_acquire)) { return Status::IOError("simulated write error"); } Status s = target()->NewWritableFile(f, r); if (s.ok()) { - if (strstr(f.c_str(), ".ldb") != NULL || - strstr(f.c_str(), ".log") != NULL) { + if (strstr(f.c_str(), ".ldb") != nullptr || + strstr(f.c_str(), ".log") != nullptr) { *r = new DataFile(this, *r); - } else if (strstr(f.c_str(), "MANIFEST") != NULL) { + } else if (strstr(f.c_str(), "MANIFEST") != nullptr) { *r = new ManifestFile(this, *r); } } @@ -166,16 +208,17 @@ class SpecialEnv : public EnvWrapper { private: RandomAccessFile* target_; AtomicCounter* counter_; + public: CountingFile(RandomAccessFile* target, AtomicCounter* counter) - : target_(target), counter_(counter) { - } - virtual ~CountingFile() { delete target_; } - virtual Status Read(uint64_t offset, size_t n, Slice* result, - char* scratch) const { + : target_(target), counter_(counter) {} + ~CountingFile() override { delete target_; } + Status Read(uint64_t offset, size_t n, Slice* result, + char* scratch) const override { counter_->Increment(); return target_->Read(offset, n, result, scratch); } + std::string GetName() const override { return ""; } }; Status s = target()->NewRandomAccessFile(f, r); @@ -187,19 +230,6 @@ class SpecialEnv : public EnvWrapper { }; class DBTest { - private: - const FilterPolicy* filter_policy_; - - // Sequence of option configurations to try - enum OptionConfig { - kDefault, - kReuse, - kFilter, - kUncompressed, - kEnd - }; - int option_config_; - public: std::string dbname_; SpecialEnv* env_; @@ -207,12 +237,11 @@ class DBTest { Options last_options_; - DBTest() : option_config_(kDefault), - env_(new SpecialEnv(Env::Default())) { + DBTest() : env_(new SpecialEnv(Env::Default())), option_config_(kDefault) { filter_policy_ = NewBloomFilterPolicy(10); dbname_ = test::TmpDir() + "/db_test"; DestroyDB(dbname_, Options()); - db_ = NULL; + db_ = nullptr; Reopen(); } @@ -255,31 +284,27 @@ class DBTest { return options; } - DBImpl* dbfull() { - return reinterpret_cast<DBImpl*>(db_); - } + DBImpl* dbfull() { return reinterpret_cast<DBImpl*>(db_); } - void Reopen(Options* options = NULL) { - ASSERT_OK(TryReopen(options)); - } + void Reopen(Options* options = nullptr) { ASSERT_OK(TryReopen(options)); } void Close() { delete db_; - db_ = NULL; + db_ = nullptr; } - void DestroyAndReopen(Options* options = NULL) { + void DestroyAndReopen(Options* options = nullptr) { delete db_; - db_ = NULL; + db_ = nullptr; DestroyDB(dbname_, Options()); ASSERT_OK(TryReopen(options)); } Status TryReopen(Options* options) { delete db_; - db_ = NULL; + db_ = nullptr; Options opts; - if (options != NULL) { + if (options != nullptr) { opts = *options; } else { opts = CurrentOptions(); @@ -294,11 +319,9 @@ class DBTest { return db_->Put(WriteOptions(), k, v); } - Status Delete(const std::string& k) { - return db_->Delete(WriteOptions(), k); - } + Status Delete(const std::string& k) { return db_->Delete(WriteOptions(), k); } - std::string Get(const std::string& k, const Snapshot* snapshot = NULL) { + std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) { ReadOptions options; options.snapshot = snapshot; std::string result; @@ -382,10 +405,9 @@ class DBTest { int NumTableFilesAtLevel(int level) { std::string property; - ASSERT_TRUE( - db_->GetProperty("leveldb.num-files-at-level" + NumberToString(level), - &property)); - return atoi(property.c_str()); + ASSERT_TRUE(db_->GetProperty( + "leveldb.num-files-at-level" + NumberToString(level), &property)); + return std::stoi(property); } int TotalTableFiles() { @@ -431,11 +453,12 @@ class DBTest { } // Do n memtable compactions, each of which produces an sstable - // covering the range [small,large]. - void MakeTables(int n, const std::string& small, const std::string& large) { + // covering the range [small_key,large_key]. + void MakeTables(int n, const std::string& small_key, + const std::string& large_key) { for (int i = 0; i < n; i++) { - Put(small, "begin"); - Put(large, "end"); + Put(small_key, "begin"); + Put(large_key, "end"); dbfull()->TEST_CompactMemTable(); } } @@ -448,9 +471,9 @@ class DBTest { void DumpFileCounts(const char* label) { fprintf(stderr, "---\n%s:\n", label); - fprintf(stderr, "maxoverlap: %lld\n", - static_cast<long long>( - dbfull()->TEST_MaxNextLevelOverlappingBytes())); + fprintf( + stderr, "maxoverlap: %lld\n", + static_cast<long long>(dbfull()->TEST_MaxNextLevelOverlappingBytes())); for (int level = 0; level < config::kNumLevels; level++) { int num = NumTableFilesAtLevel(level); if (num > 0) { @@ -506,15 +529,42 @@ class DBTest { } return files_renamed; } + + private: + // Sequence of option configurations to try + enum OptionConfig { kDefault, kReuse, kFilter, kUncompressed, kEnd }; + + const FilterPolicy* filter_policy_; + int option_config_; }; TEST(DBTest, Empty) { do { - ASSERT_TRUE(db_ != NULL); + ASSERT_TRUE(db_ != nullptr); ASSERT_EQ("NOT_FOUND", Get("foo")); } while (ChangeOptions()); } +TEST(DBTest, EmptyKey) { + do { + ASSERT_OK(Put("", "v1")); + ASSERT_EQ("v1", Get("")); + ASSERT_OK(Put("", "v2")); + ASSERT_EQ("v2", Get("")); + } while (ChangeOptions()); +} + +TEST(DBTest, EmptyValue) { + do { + ASSERT_OK(Put("key", "v1")); + ASSERT_EQ("v1", Get("key")); + ASSERT_OK(Put("key", "")); + ASSERT_EQ("", Get("key")); + ASSERT_OK(Put("key", "v2")); + ASSERT_EQ("v2", Get("key")); + } while (ChangeOptions()); +} + TEST(DBTest, ReadWrite) { do { ASSERT_OK(Put("foo", "v1")); @@ -547,11 +597,13 @@ TEST(DBTest, GetFromImmutableLayer) { ASSERT_OK(Put("foo", "v1")); ASSERT_EQ("v1", Get("foo")); - env_->delay_data_sync_.Release_Store(env_); // Block sync calls - Put("k1", std::string(100000, 'x')); // Fill memtable - Put("k2", std::string(100000, 'y')); // Trigger compaction + // Block sync calls. + env_->delay_data_sync_.store(true, std::memory_order_release); + Put("k1", std::string(100000, 'x')); // Fill memtable. + Put("k2", std::string(100000, 'y')); // Trigger compaction. ASSERT_EQ("v1", Get("foo")); - env_->delay_data_sync_.Release_Store(NULL); // Release sync calls + // Release sync calls. + env_->delay_data_sync_.store(false, std::memory_order_release); } while (ChangeOptions()); } @@ -568,9 +620,9 @@ TEST(DBTest, GetMemUsage) { ASSERT_OK(Put("foo", "v1")); std::string val; ASSERT_TRUE(db_->GetProperty("leveldb.approximate-memory-usage", &val)); - int mem_usage = atoi(val.c_str()); + int mem_usage = std::stoi(val); ASSERT_GT(mem_usage, 0); - ASSERT_LT(mem_usage, 5*1024*1024); + ASSERT_LT(mem_usage, 5 * 1024 * 1024); } while (ChangeOptions()); } @@ -592,6 +644,55 @@ TEST(DBTest, GetSnapshot) { } while (ChangeOptions()); } +TEST(DBTest, GetIdenticalSnapshots) { + do { + // Try with both a short key and a long key + for (int i = 0; i < 2; i++) { + std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x'); + ASSERT_OK(Put(key, "v1")); + const Snapshot* s1 = db_->GetSnapshot(); + const Snapshot* s2 = db_->GetSnapshot(); + const Snapshot* s3 = db_->GetSnapshot(); + ASSERT_OK(Put(key, "v2")); + ASSERT_EQ("v2", Get(key)); + ASSERT_EQ("v1", Get(key, s1)); + ASSERT_EQ("v1", Get(key, s2)); + ASSERT_EQ("v1", Get(key, s3)); + db_->ReleaseSnapshot(s1); + dbfull()->TEST_CompactMemTable(); + ASSERT_EQ("v2", Get(key)); + ASSERT_EQ("v1", Get(key, s2)); + db_->ReleaseSnapshot(s2); + ASSERT_EQ("v1", Get(key, s3)); + db_->ReleaseSnapshot(s3); + } + } while (ChangeOptions()); +} + +TEST(DBTest, IterateOverEmptySnapshot) { + do { + const Snapshot* snapshot = db_->GetSnapshot(); + ReadOptions read_options; + read_options.snapshot = snapshot; + ASSERT_OK(Put("foo", "v1")); + ASSERT_OK(Put("foo", "v2")); + + Iterator* iterator1 = db_->NewIterator(read_options); + iterator1->SeekToFirst(); + ASSERT_TRUE(!iterator1->Valid()); + delete iterator1; + + dbfull()->TEST_CompactMemTable(); + + Iterator* iterator2 = db_->NewIterator(read_options); + iterator2->SeekToFirst(); + ASSERT_TRUE(!iterator2->Valid()); + delete iterator2; + + db_->ReleaseSnapshot(snapshot); + } while (ChangeOptions()); +} + TEST(DBTest, GetLevel0Ordering) { do { // Check that we process level-0 files in correct order. The code @@ -646,8 +747,7 @@ TEST(DBTest, GetEncountersEmptyLevel) { // Step 1: First place sstables in levels 0 and 2 int compaction_count = 0; - while (NumTableFilesAtLevel(0) == 0 || - NumTableFilesAtLevel(2) == 0) { + while (NumTableFilesAtLevel(0) == 0 || NumTableFilesAtLevel(2) == 0) { ASSERT_LE(compaction_count, 100) << "could not fill levels 0 and 2"; compaction_count++; Put("a", "begin"); @@ -656,7 +756,7 @@ TEST(DBTest, GetEncountersEmptyLevel) { } // Step 2: clear level 1 if necessary. - dbfull()->TEST_CompactRange(1, NULL, NULL); + dbfull()->TEST_CompactRange(1, nullptr, nullptr); ASSERT_EQ(NumTableFilesAtLevel(0), 1); ASSERT_EQ(NumTableFilesAtLevel(1), 0); ASSERT_EQ(NumTableFilesAtLevel(2), 1); @@ -784,10 +884,10 @@ TEST(DBTest, IterMulti) { ASSERT_EQ(IterStatus(iter), "b->vb"); // Make sure iter stays at snapshot - ASSERT_OK(Put("a", "va2")); + ASSERT_OK(Put("a", "va2")); ASSERT_OK(Put("a2", "va3")); - ASSERT_OK(Put("b", "vb2")); - ASSERT_OK(Put("c", "vc2")); + ASSERT_OK(Put("b", "vb2")); + ASSERT_OK(Put("c", "vc2")); ASSERT_OK(Delete("b")); iter->SeekToFirst(); ASSERT_EQ(IterStatus(iter), "a->va"); @@ -978,7 +1078,7 @@ TEST(DBTest, RecoverWithLargeLog) { TEST(DBTest, CompactionsGenerateMultipleFiles) { Options options = CurrentOptions(); - options.write_buffer_size = 100000000; // Large write buffer + options.write_buffer_size = 100000000; // Large write buffer Reopen(&options); Random rnd(301); @@ -993,7 +1093,7 @@ TEST(DBTest, CompactionsGenerateMultipleFiles) { // Reopening moves updates to level-0 Reopen(&options); - dbfull()->TEST_CompactRange(0, NULL, NULL); + dbfull()->TEST_CompactRange(0, nullptr, nullptr); ASSERT_EQ(NumTableFilesAtLevel(0), 0); ASSERT_GT(NumTableFilesAtLevel(1), 1); @@ -1017,7 +1117,7 @@ TEST(DBTest, RepeatedWritesToSameKey) { for (int i = 0; i < 5 * kMaxFiles; i++) { Put("key", value); ASSERT_LE(TotalTableFiles(), kMaxFiles); - fprintf(stderr, "after %d: %d files\n", int(i+1), TotalTableFiles()); + fprintf(stderr, "after %d: %d files\n", i + 1, TotalTableFiles()); } } @@ -1044,29 +1144,28 @@ TEST(DBTest, SparseMerge) { } Put("C", "vc"); dbfull()->TEST_CompactMemTable(); - dbfull()->TEST_CompactRange(0, NULL, NULL); + dbfull()->TEST_CompactRange(0, nullptr, nullptr); // Make sparse update - Put("A", "va2"); + Put("A", "va2"); Put("B100", "bvalue2"); - Put("C", "vc2"); + Put("C", "vc2"); dbfull()->TEST_CompactMemTable(); // Compactions should not cause us to create a situation where // a file overlaps too much data at the next level. - ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576); - dbfull()->TEST_CompactRange(0, NULL, NULL); - ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576); - dbfull()->TEST_CompactRange(1, NULL, NULL); - ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576); + ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576); + dbfull()->TEST_CompactRange(0, nullptr, nullptr); + ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576); + dbfull()->TEST_CompactRange(1, nullptr, nullptr); + ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576); } static bool Between(uint64_t val, uint64_t low, uint64_t high) { bool result = (val >= low) && (val <= high); if (!result) { fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n", - (unsigned long long)(val), - (unsigned long long)(low), + (unsigned long long)(val), (unsigned long long)(low), (unsigned long long)(high)); } return result; @@ -1075,7 +1174,7 @@ static bool Between(uint64_t val, uint64_t low, uint64_t high) { TEST(DBTest, ApproximateSizes) { do { Options options = CurrentOptions(); - options.write_buffer_size = 100000000; // Large write buffer + options.write_buffer_size = 100000000; // Large write buffer options.compression = kNoCompression; DestroyAndReopen(); @@ -1110,12 +1209,13 @@ TEST(DBTest, ApproximateSizes) { for (int compact_start = 0; compact_start < N; compact_start += 10) { for (int i = 0; i < N; i += 10) { - ASSERT_TRUE(Between(Size("", Key(i)), S1*i, S2*i)); - ASSERT_TRUE(Between(Size("", Key(i)+".suffix"), S1*(i+1), S2*(i+1))); - ASSERT_TRUE(Between(Size(Key(i), Key(i+10)), S1*10, S2*10)); + ASSERT_TRUE(Between(Size("", Key(i)), S1 * i, S2 * i)); + ASSERT_TRUE(Between(Size("", Key(i) + ".suffix"), S1 * (i + 1), + S2 * (i + 1))); + ASSERT_TRUE(Between(Size(Key(i), Key(i + 10)), S1 * 10, S2 * 10)); } - ASSERT_TRUE(Between(Size("", Key(50)), S1*50, S2*50)); - ASSERT_TRUE(Between(Size("", Key(50)+".suffix"), S1*50, S2*50)); + ASSERT_TRUE(Between(Size("", Key(50)), S1 * 50, S2 * 50)); + ASSERT_TRUE(Between(Size("", Key(50) + ".suffix"), S1 * 50, S2 * 50)); std::string cstart_str = Key(compact_start); std::string cend_str = Key(compact_start + 9); @@ -1168,7 +1268,7 @@ TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) { ASSERT_TRUE(Between(Size(Key(3), Key(5)), 110000, 111000)); - dbfull()->TEST_CompactRange(0, NULL, NULL); + dbfull()->TEST_CompactRange(0, nullptr, nullptr); } } while (ChangeOptions()); } @@ -1182,7 +1282,7 @@ TEST(DBTest, IteratorPinsRef) { // Write to force compactions Put("foo", "newvalue1"); for (int i = 0; i < 100; i++) { - ASSERT_OK(Put(Key(i), Key(i) + std::string(100000, 'v'))); // 100K values + ASSERT_OK(Put(Key(i), Key(i) + std::string(100000, 'v'))); // 100K values } Put("foo", "newvalue2"); @@ -1234,7 +1334,7 @@ TEST(DBTest, HiddenValuesAreRemoved) { Put("pastfoo", "v"); const Snapshot* snapshot = db_->GetSnapshot(); Put("foo", "tiny"); - Put("pastfoo2", "v2"); // Advance sequence number one more + Put("pastfoo2", "v2"); // Advance sequence number one more ASSERT_OK(dbfull()->TEST_CompactMemTable()); ASSERT_GT(NumTableFilesAtLevel(0), 0); @@ -1244,11 +1344,11 @@ TEST(DBTest, HiddenValuesAreRemoved) { db_->ReleaseSnapshot(snapshot); ASSERT_EQ(AllEntriesFor("foo"), "[ tiny, " + big + " ]"); Slice x("x"); - dbfull()->TEST_CompactRange(0, NULL, &x); + dbfull()->TEST_CompactRange(0, nullptr, &x); ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]"); ASSERT_EQ(NumTableFilesAtLevel(0), 0); ASSERT_GE(NumTableFilesAtLevel(1), 1); - dbfull()->TEST_CompactRange(1, NULL, &x); + dbfull()->TEST_CompactRange(1, nullptr, &x); ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]"); ASSERT_TRUE(Between(Size("", "pastfoo"), 0, 1000)); @@ -1259,14 +1359,14 @@ TEST(DBTest, DeletionMarkers1) { Put("foo", "v1"); ASSERT_OK(dbfull()->TEST_CompactMemTable()); const int last = config::kMaxMemCompactLevel; - ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level + ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level // Place a table at level last-1 to prevent merging with preceding mutation Put("a", "begin"); Put("z", "end"); dbfull()->TEST_CompactMemTable(); ASSERT_EQ(NumTableFilesAtLevel(last), 1); - ASSERT_EQ(NumTableFilesAtLevel(last-1), 1); + ASSERT_EQ(NumTableFilesAtLevel(last - 1), 1); Delete("foo"); Put("foo", "v2"); @@ -1274,11 +1374,11 @@ TEST(DBTest, DeletionMarkers1) { ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2 ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]"); Slice z("z"); - dbfull()->TEST_CompactRange(last-2, NULL, &z); + dbfull()->TEST_CompactRange(last - 2, nullptr, &z); // DEL eliminated, but v1 remains because we aren't compacting that level // (DEL can be eliminated because v2 hides v1). ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]"); - dbfull()->TEST_CompactRange(last-1, NULL, NULL); + dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr); // Merging last-1 w/ last, so we are the base level for "foo", so // DEL is removed. (as is v1). ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]"); @@ -1288,23 +1388,23 @@ TEST(DBTest, DeletionMarkers2) { Put("foo", "v1"); ASSERT_OK(dbfull()->TEST_CompactMemTable()); const int last = config::kMaxMemCompactLevel; - ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level + ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level // Place a table at level last-1 to prevent merging with preceding mutation Put("a", "begin"); Put("z", "end"); dbfull()->TEST_CompactMemTable(); ASSERT_EQ(NumTableFilesAtLevel(last), 1); - ASSERT_EQ(NumTableFilesAtLevel(last-1), 1); + ASSERT_EQ(NumTableFilesAtLevel(last - 1), 1); Delete("foo"); ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]"); ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2 ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]"); - dbfull()->TEST_CompactRange(last-2, NULL, NULL); + dbfull()->TEST_CompactRange(last - 2, nullptr, nullptr); // DEL kept: "last" file overlaps ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]"); - dbfull()->TEST_CompactRange(last-1, NULL, NULL); + dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr); // Merging last-1 w/ last, so we are the base level for "foo", so // DEL is removed. (as is v1). ASSERT_EQ(AllEntriesFor("foo"), "[ ]"); @@ -1314,7 +1414,8 @@ TEST(DBTest, OverlapInLevel0) { do { ASSERT_EQ(config::kMaxMemCompactLevel, 2) << "Fix test to match config"; - // Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0. + // Fill levels 1 and 2 to disable the pushing of new memtables to levels > + // 0. ASSERT_OK(Put("100", "v100")); ASSERT_OK(Put("999", "v999")); dbfull()->TEST_CompactMemTable(); @@ -1337,8 +1438,8 @@ TEST(DBTest, OverlapInLevel0) { ASSERT_EQ("2,1,1", FilesPerLevel()); // Compact away the placeholder files we created initially - dbfull()->TEST_CompactRange(1, NULL, NULL); - dbfull()->TEST_CompactRange(2, NULL, NULL); + dbfull()->TEST_CompactRange(1, nullptr, nullptr); + dbfull()->TEST_CompactRange(2, nullptr, nullptr); ASSERT_EQ("2", FilesPerLevel()); // Do a memtable compaction. Before bug-fix, the compaction would @@ -1370,21 +1471,21 @@ TEST(DBTest, L0_CompactionBug_Issue44_a) { TEST(DBTest, L0_CompactionBug_Issue44_b) { Reopen(); - Put("",""); + Put("", ""); Reopen(); Delete("e"); - Put("",""); + Put("", ""); Reopen(); Put("c", "cv"); Reopen(); - Put("",""); + Put("", ""); Reopen(); - Put("",""); + Put("", ""); DelayMilliseconds(1000); // Wait for compaction to finish Reopen(); - Put("d","dv"); + Put("d", "dv"); Reopen(); - Put("",""); + Put("", ""); Reopen(); Delete("d"); Delete("b"); @@ -1394,17 +1495,26 @@ TEST(DBTest, L0_CompactionBug_Issue44_b) { ASSERT_EQ("(->)(c->cv)", Contents()); } +TEST(DBTest, Fflush_Issue474) { + static const int kNum = 100000; + Random rnd(test::RandomSeed()); + for (int i = 0; i < kNum; i++) { + fflush(nullptr); + ASSERT_OK(Put(RandomKey(&rnd), RandomString(&rnd, 100))); + } +} + TEST(DBTest, ComparatorCheck) { class NewComparator : public Comparator { public: - virtual const char* Name() const { return "leveldb.NewComparator"; } - virtual int Compare(const Slice& a, const Slice& b) const { + const char* Name() const override { return "leveldb.NewComparator"; } + int Compare(const Slice& a, const Slice& b) const override { return BytewiseComparator()->Compare(a, b); } - virtual void FindShortestSeparator(std::string* s, const Slice& l) const { + void FindShortestSeparator(std::string* s, const Slice& l) const override { BytewiseComparator()->FindShortestSeparator(s, l); } - virtual void FindShortSuccessor(std::string* key) const { + void FindShortSuccessor(std::string* key) const override { BytewiseComparator()->FindShortSuccessor(key); } }; @@ -1420,21 +1530,22 @@ TEST(DBTest, ComparatorCheck) { TEST(DBTest, CustomComparator) { class NumberComparator : public Comparator { public: - virtual const char* Name() const { return "test.NumberComparator"; } - virtual int Compare(const Slice& a, const Slice& b) const { + const char* Name() const override { return "test.NumberComparator"; } + int Compare(const Slice& a, const Slice& b) const override { return ToNumber(a) - ToNumber(b); } - virtual void FindShortestSeparator(std::string* s, const Slice& l) const { - ToNumber(*s); // Check format - ToNumber(l); // Check format + void FindShortestSeparator(std::string* s, const Slice& l) const override { + ToNumber(*s); // Check format + ToNumber(l); // Check format } - virtual void FindShortSuccessor(std::string* key) const { - ToNumber(*key); // Check format + void FindShortSuccessor(std::string* key) const override { + ToNumber(*key); // Check format } + private: static int ToNumber(const Slice& x) { // Check that there are no extra characters. - ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size()-1] == ']') + ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size() - 1] == ']') << EscapeString(x); int val; char ignored; @@ -1447,7 +1558,7 @@ TEST(DBTest, CustomComparator) { Options new_options = CurrentOptions(); new_options.create_if_missing = true; new_options.comparator = &cmp; - new_options.filter_policy = NULL; // Cannot use bloom filters + new_options.filter_policy = nullptr; // Cannot use bloom filters new_options.write_buffer_size = 1000; // Compact more often DestroyAndReopen(&new_options); ASSERT_OK(Put("[10]", "ten")); @@ -1465,7 +1576,7 @@ TEST(DBTest, CustomComparator) { for (int run = 0; run < 2; run++) { for (int i = 0; i < 1000; i++) { char buf[100]; - snprintf(buf, sizeof(buf), "[%d]", i*10); + snprintf(buf, sizeof(buf), "[%d]", i * 10); ASSERT_OK(Put(buf, buf)); } Compact("[0]", "[1000000]"); @@ -1502,7 +1613,7 @@ TEST(DBTest, ManualCompaction) { // Compact all MakeTables(1, "a", "z"); ASSERT_EQ("0,1,2", FilesPerLevel()); - db_->CompactRange(NULL, NULL); + db_->CompactRange(nullptr, nullptr); ASSERT_EQ("0,0,1", FilesPerLevel()); } @@ -1511,42 +1622,94 @@ TEST(DBTest, DBOpen_Options) { DestroyDB(dbname, Options()); // Does not exist, and create_if_missing == false: error - DB* db = NULL; + DB* db = nullptr; Options opts; opts.create_if_missing = false; Status s = DB::Open(opts, dbname, &db); - ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != NULL); - ASSERT_TRUE(db == NULL); + ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != nullptr); + ASSERT_TRUE(db == nullptr); // Does not exist, and create_if_missing == true: OK opts.create_if_missing = true; s = DB::Open(opts, dbname, &db); ASSERT_OK(s); - ASSERT_TRUE(db != NULL); + ASSERT_TRUE(db != nullptr); delete db; - db = NULL; + db = nullptr; // Does exist, and error_if_exists == true: error opts.create_if_missing = false; opts.error_if_exists = true; s = DB::Open(opts, dbname, &db); - ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != NULL); - ASSERT_TRUE(db == NULL); + ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != nullptr); + ASSERT_TRUE(db == nullptr); // Does exist, and error_if_exists == false: OK opts.create_if_missing = true; opts.error_if_exists = false; s = DB::Open(opts, dbname, &db); ASSERT_OK(s); - ASSERT_TRUE(db != NULL); + ASSERT_TRUE(db != nullptr); + + delete db; + db = nullptr; +} + +TEST(DBTest, DestroyEmptyDir) { + std::string dbname = test::TmpDir() + "/db_empty_dir"; + TestEnv env(Env::Default()); + env.DeleteDir(dbname); + ASSERT_TRUE(!env.FileExists(dbname)); + + Options opts; + opts.env = &env; + + ASSERT_OK(env.CreateDir(dbname)); + ASSERT_TRUE(env.FileExists(dbname)); + std::vector<std::string> children; + ASSERT_OK(env.GetChildren(dbname, &children)); + // The stock Env's do not filter out '.' and '..' special files. + ASSERT_EQ(2, children.size()); + ASSERT_OK(DestroyDB(dbname, opts)); + ASSERT_TRUE(!env.FileExists(dbname)); + + // Should also be destroyed if Env is filtering out dot files. + env.SetIgnoreDotFiles(true); + ASSERT_OK(env.CreateDir(dbname)); + ASSERT_TRUE(env.FileExists(dbname)); + ASSERT_OK(env.GetChildren(dbname, &children)); + ASSERT_EQ(0, children.size()); + ASSERT_OK(DestroyDB(dbname, opts)); + ASSERT_TRUE(!env.FileExists(dbname)); +} + +TEST(DBTest, DestroyOpenDB) { + std::string dbname = test::TmpDir() + "/open_db_dir"; + env_->DeleteDir(dbname); + ASSERT_TRUE(!env_->FileExists(dbname)); + + Options opts; + opts.create_if_missing = true; + DB* db = nullptr; + ASSERT_OK(DB::Open(opts, dbname, &db)); + ASSERT_TRUE(db != nullptr); + + // Must fail to destroy an open db. + ASSERT_TRUE(env_->FileExists(dbname)); + ASSERT_TRUE(!DestroyDB(dbname, Options()).ok()); + ASSERT_TRUE(env_->FileExists(dbname)); delete db; - db = NULL; + db = nullptr; + + // Should succeed destroying a closed db. + ASSERT_OK(DestroyDB(dbname, Options())); + ASSERT_TRUE(!env_->FileExists(dbname)); } TEST(DBTest, Locking) { - DB* db2 = NULL; + DB* db2 = nullptr; Status s = DB::Open(CurrentOptions(), dbname_, &db2); ASSERT_TRUE(!s.ok()) << "Locking did not prevent re-opening db"; } @@ -1561,13 +1724,14 @@ TEST(DBTest, NoSpace) { ASSERT_EQ("v1", Get("foo")); Compact("a", "z"); const int num_files = CountFiles(); - env_->no_space_.Release_Store(env_); // Force out-of-space errors + // Force out-of-space errors. + env_->no_space_.store(true, std::memory_order_release); for (int i = 0; i < 10; i++) { - for (int level = 0; level < config::kNumLevels-1; level++) { - dbfull()->TEST_CompactRange(level, NULL, NULL); + for (int level = 0; level < config::kNumLevels - 1; level++) { + dbfull()->TEST_CompactRange(level, nullptr, nullptr); } } - env_->no_space_.Release_Store(NULL); + env_->no_space_.store(false, std::memory_order_release); ASSERT_LT(CountFiles(), num_files + 3); } @@ -1577,7 +1741,8 @@ TEST(DBTest, NonWritableFileSystem) { options.env = env_; Reopen(&options); ASSERT_OK(Put("foo", "v1")); - env_->non_writable_.Release_Store(env_); // Force errors for new files + // Force errors for new files. + env_->non_writable_.store(true, std::memory_order_release); std::string big(100000, 'x'); int errors = 0; for (int i = 0; i < 20; i++) { @@ -1588,7 +1753,7 @@ TEST(DBTest, NonWritableFileSystem) { } } ASSERT_GT(errors, 0); - env_->non_writable_.Release_Store(NULL); + env_->non_writable_.store(false, std::memory_order_release); } TEST(DBTest, WriteSyncError) { @@ -1598,7 +1763,7 @@ TEST(DBTest, WriteSyncError) { Options options = CurrentOptions(); options.env = env_; Reopen(&options); - env_->data_sync_error_.Release_Store(env_); + env_->data_sync_error_.store(true, std::memory_order_release); // (b) Normal write should succeed WriteOptions w; @@ -1612,7 +1777,7 @@ TEST(DBTest, WriteSyncError) { ASSERT_EQ("NOT_FOUND", Get("k2")); // (d) make sync behave normally - env_->data_sync_error_.Release_Store(NULL); + env_->data_sync_error_.store(false, std::memory_order_release); // (e) Do a non-sync write; should fail w.sync = false; @@ -1632,9 +1797,8 @@ TEST(DBTest, ManifestWriteError) { // We iterate twice. In the second iteration, everything is the // same except the log record never makes it to the MANIFEST file. for (int iter = 0; iter < 2; iter++) { - port::AtomicPointer* error_type = (iter == 0) - ? &env_->manifest_sync_error_ - : &env_->manifest_write_error_; + std::atomic<bool>* error_type = (iter == 0) ? &env_->manifest_sync_error_ + : &env_->manifest_write_error_; // Insert foo=>bar mapping Options options = CurrentOptions(); @@ -1649,15 +1813,15 @@ TEST(DBTest, ManifestWriteError) { dbfull()->TEST_CompactMemTable(); ASSERT_EQ("bar", Get("foo")); const int last = config::kMaxMemCompactLevel; - ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo=>bar is now in last level + ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo=>bar is now in last level // Merging compaction (will fail) - error_type->Release_Store(env_); - dbfull()->TEST_CompactRange(last, NULL, NULL); // Should fail + error_type->store(true, std::memory_order_release); + dbfull()->TEST_CompactRange(last, nullptr, nullptr); // Should fail ASSERT_EQ("bar", Get("foo")); // Recovery: should not lose data - error_type->Release_Store(NULL); + error_type->store(false, std::memory_order_release); Reopen(&options); ASSERT_EQ("bar", Get("foo")); } @@ -1677,8 +1841,7 @@ TEST(DBTest, MissingSSTFile) { options.paranoid_checks = true; Status s = TryReopen(&options); ASSERT_TRUE(!s.ok()); - ASSERT_TRUE(s.ToString().find("issing") != std::string::npos) - << s.ToString(); + ASSERT_TRUE(s.ToString().find("issing") != std::string::npos) << s.ToString(); } TEST(DBTest, StillReadSST) { @@ -1728,7 +1891,7 @@ TEST(DBTest, BloomFilter) { dbfull()->TEST_CompactMemTable(); // Prevent auto compactions triggered by seeks - env_->delay_data_sync_.Release_Store(env_); + env_->delay_data_sync_.store(true, std::memory_order_release); // Lookup present keys. Should rarely read from small sstable. env_->random_read_counter_.Reset(); @@ -1738,7 +1901,7 @@ TEST(DBTest, BloomFilter) { int reads = env_->random_read_counter_.Read(); fprintf(stderr, "%d present => %d reads\n", N, reads); ASSERT_GE(reads, N); - ASSERT_LE(reads, N + 2*N/100); + ASSERT_LE(reads, N + 2 * N / 100); // Lookup present keys. Should rarely read from either sstable. env_->random_read_counter_.Reset(); @@ -1747,9 +1910,9 @@ TEST(DBTest, BloomFilter) { } reads = env_->random_read_counter_.Read(); fprintf(stderr, "%d missing => %d reads\n", N, reads); - ASSERT_LE(reads, 3*N/100); + ASSERT_LE(reads, 3 * N / 100); - env_->delay_data_sync_.Release_Store(NULL); + env_->delay_data_sync_.store(false, std::memory_order_release); Close(); delete options.block_cache; delete options.filter_policy; @@ -1764,9 +1927,9 @@ static const int kNumKeys = 1000; struct MTState { DBTest* test; - port::AtomicPointer stop; - port::AtomicPointer counter[kNumThreads]; - port::AtomicPointer thread_done[kNumThreads]; + std::atomic<bool> stop; + std::atomic<int> counter[kNumThreads]; + std::atomic<bool> thread_done[kNumThreads]; }; struct MTThread { @@ -1778,13 +1941,13 @@ static void MTThreadBody(void* arg) { MTThread* t = reinterpret_cast<MTThread*>(arg); int id = t->id; DB* db = t->state->test->db_; - uintptr_t counter = 0; + int counter = 0; fprintf(stderr, "... starting thread %d\n", id); Random rnd(1000 + id); std::string value; char valbuf[1500]; - while (t->state->stop.Acquire_Load() == NULL) { - t->state->counter[id].Release_Store(reinterpret_cast<void*>(counter)); + while (!t->state->stop.load(std::memory_order_acquire)) { + t->state->counter[id].store(counter, std::memory_order_release); int key = rnd.Uniform(kNumKeys); char keybuf[20]; @@ -1793,8 +1956,8 @@ static void MTThreadBody(void* arg) { if (rnd.OneIn(2)) { // Write values of the form <key, my id, counter>. // We add some padding for force compactions. - snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d", - key, id, static_cast<int>(counter)); + snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d", key, id, + static_cast<int>(counter)); ASSERT_OK(db->Put(WriteOptions(), Slice(keybuf), Slice(valbuf))); } else { // Read a value and verify that it matches the pattern written above. @@ -1809,14 +1972,13 @@ static void MTThreadBody(void* arg) { ASSERT_EQ(k, key); ASSERT_GE(w, 0); ASSERT_LT(w, kNumThreads); - ASSERT_LE(static_cast<uintptr_t>(c), reinterpret_cast<uintptr_t>( - t->state->counter[w].Acquire_Load())); + ASSERT_LE(c, t->state->counter[w].load(std::memory_order_acquire)); } } counter++; } - t->state->thread_done[id].Release_Store(t); - fprintf(stderr, "... stopping thread %d after %d ops\n", id, int(counter)); + t->state->thread_done[id].store(true, std::memory_order_release); + fprintf(stderr, "... stopping thread %d after %d ops\n", id, counter); } } // namespace @@ -1826,10 +1988,10 @@ TEST(DBTest, MultiThreaded) { // Initialize state MTState mt; mt.test = this; - mt.stop.Release_Store(0); + mt.stop.store(false, std::memory_order_release); for (int id = 0; id < kNumThreads; id++) { - mt.counter[id].Release_Store(0); - mt.thread_done[id].Release_Store(0); + mt.counter[id].store(false, std::memory_order_release); + mt.thread_done[id].store(false, std::memory_order_release); } // Start threads @@ -1844,9 +2006,9 @@ TEST(DBTest, MultiThreaded) { DelayMilliseconds(kTestSeconds * 1000); // Stop the threads and wait for them to finish - mt.stop.Release_Store(&mt); + mt.stop.store(true, std::memory_order_release); for (int id = 0; id < kNumThreads; id++) { - while (mt.thread_done[id].Acquire_Load() == NULL) { + while (!mt.thread_done[id].load(std::memory_order_acquire)) { DelayMilliseconds(100); } } @@ -1857,28 +2019,28 @@ namespace { typedef std::map<std::string, std::string> KVMap; } -class ModelDB: public DB { +class ModelDB : public DB { public: class ModelSnapshot : public Snapshot { public: KVMap map_; }; - explicit ModelDB(const Options& options): options_(options) { } - ~ModelDB() { } - virtual Status Put(const WriteOptions& o, const Slice& k, const Slice& v) { + explicit ModelDB(const Options& options) : options_(options) {} + ~ModelDB() override = default; + Status Put(const WriteOptions& o, const Slice& k, const Slice& v) override { return DB::Put(o, k, v); } - virtual Status Delete(const WriteOptions& o, const Slice& key) { + Status Delete(const WriteOptions& o, const Slice& key) override { return DB::Delete(o, key); } - virtual Status Get(const ReadOptions& options, - const Slice& key, std::string* value) { - assert(false); // Not implemented + Status Get(const ReadOptions& options, const Slice& key, + std::string* value) override { + assert(false); // Not implemented return Status::NotFound(key); } - virtual Iterator* NewIterator(const ReadOptions& options) { - if (options.snapshot == NULL) { + Iterator* NewIterator(const ReadOptions& options) override { + if (options.snapshot == nullptr) { KVMap* saved = new KVMap; *saved = map_; return new ModelIter(saved, true); @@ -1888,68 +2050,65 @@ class ModelDB: public DB { return new ModelIter(snapshot_state, false); } } - virtual const Snapshot* GetSnapshot() { + const Snapshot* GetSnapshot() override { ModelSnapshot* snapshot = new ModelSnapshot; snapshot->map_ = map_; return snapshot; } - virtual void ReleaseSnapshot(const Snapshot* snapshot) { + void ReleaseSnapshot(const Snapshot* snapshot) override { delete reinterpret_cast<const ModelSnapshot*>(snapshot); } - virtual Status Write(const WriteOptions& options, WriteBatch* batch) { + Status Write(const WriteOptions& options, WriteBatch* batch) override { class Handler : public WriteBatch::Handler { public: KVMap* map_; - virtual void Put(const Slice& key, const Slice& value) { + void Put(const Slice& key, const Slice& value) override { (*map_)[key.ToString()] = value.ToString(); } - virtual void Delete(const Slice& key) { - map_->erase(key.ToString()); - } + void Delete(const Slice& key) override { map_->erase(key.ToString()); } }; Handler handler; handler.map_ = &map_; return batch->Iterate(&handler); } - virtual bool GetProperty(const Slice& property, std::string* value) { + bool GetProperty(const Slice& property, std::string* value) override { return false; } - virtual void GetApproximateSizes(const Range* r, int n, uint64_t* sizes) { + void GetApproximateSizes(const Range* r, int n, uint64_t* sizes) override { for (int i = 0; i < n; i++) { sizes[i] = 0; } } - virtual void CompactRange(const Slice* start, const Slice* end) { - } + void CompactRange(const Slice* start, const Slice* end) override {} private: - class ModelIter: public Iterator { + class ModelIter : public Iterator { public: ModelIter(const KVMap* map, bool owned) - : map_(map), owned_(owned), iter_(map_->end()) { - } - ~ModelIter() { + : map_(map), owned_(owned), iter_(map_->end()) {} + ~ModelIter() override { if (owned_) delete map_; } - virtual bool Valid() const { return iter_ != map_->end(); } - virtual void SeekToFirst() { iter_ = map_->begin(); } - virtual void SeekToLast() { + bool Valid() const override { return iter_ != map_->end(); } + void SeekToFirst() override { iter_ = map_->begin(); } + void SeekToLast() override { if (map_->empty()) { iter_ = map_->end(); } else { iter_ = map_->find(map_->rbegin()->first); } } - virtual void Seek(const Slice& k) { + void Seek(const Slice& k) override { iter_ = map_->lower_bound(k.ToString()); } - virtual void Next() { ++iter_; } - virtual void Prev() { --iter_; } - virtual Slice key() const { return iter_->first; } - virtual Slice value() const { return iter_->second; } - virtual Status status() const { return Status::OK(); } + void Next() override { ++iter_; } + void Prev() override { --iter_; } + Slice key() const override { return iter_->first; } + Slice value() const override { return iter_->second; } + Status status() const override { return Status::OK(); } + private: const KVMap* const map_; const bool owned_; // Do we own map_ @@ -1959,16 +2118,7 @@ class ModelDB: public DB { KVMap map_; }; -static std::string RandomKey(Random* rnd) { - int len = (rnd->OneIn(3) - ? 1 // Short sometimes to encourage collisions - : (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10))); - return test::RandomKey(rnd, len); -} - -static bool CompareIterators(int step, - DB* model, - DB* db, +static bool CompareIterators(int step, DB* model, DB* db, const Snapshot* model_snap, const Snapshot* db_snap) { ReadOptions options; @@ -1979,12 +2129,10 @@ static bool CompareIterators(int step, bool ok = true; int count = 0; for (miter->SeekToFirst(), dbiter->SeekToFirst(); - ok && miter->Valid() && dbiter->Valid(); - miter->Next(), dbiter->Next()) { + ok && miter->Valid() && dbiter->Valid(); miter->Next(), dbiter->Next()) { count++; if (miter->key().compare(dbiter->key()) != 0) { - fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n", - step, + fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n", step, EscapeString(miter->key()).c_str(), EscapeString(dbiter->key()).c_str()); ok = false; @@ -1993,8 +2141,7 @@ static bool CompareIterators(int step, if (miter->value().compare(dbiter->value()) != 0) { fprintf(stderr, "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n", - step, - EscapeString(miter->key()).c_str(), + step, EscapeString(miter->key()).c_str(), EscapeString(miter->value()).c_str(), EscapeString(miter->value()).c_str()); ok = false; @@ -2019,8 +2166,8 @@ TEST(DBTest, Randomized) { do { ModelDB model(CurrentOptions()); const int N = 10000; - const Snapshot* model_snap = NULL; - const Snapshot* db_snap = NULL; + const Snapshot* model_snap = nullptr; + const Snapshot* db_snap = nullptr; std::string k, v; for (int step = 0; step < N; step++) { if (step % 100 == 0) { @@ -2028,22 +2175,19 @@ TEST(DBTest, Randomized) { } // TODO(sanjay): Test Get() works int p = rnd.Uniform(100); - if (p < 45) { // Put + if (p < 45) { // Put k = RandomKey(&rnd); - v = RandomString(&rnd, - rnd.OneIn(20) - ? 100 + rnd.Uniform(100) - : rnd.Uniform(8)); + v = RandomString( + &rnd, rnd.OneIn(20) ? 100 + rnd.Uniform(100) : rnd.Uniform(8)); ASSERT_OK(model.Put(WriteOptions(), k, v)); ASSERT_OK(db_->Put(WriteOptions(), k, v)); - } else if (p < 90) { // Delete + } else if (p < 90) { // Delete k = RandomKey(&rnd); ASSERT_OK(model.Delete(WriteOptions(), k)); ASSERT_OK(db_->Delete(WriteOptions(), k)); - - } else { // Multi-element batch + } else { // Multi-element batch WriteBatch b; const int num = rnd.Uniform(8); for (int i = 0; i < num; i++) { @@ -2065,23 +2209,23 @@ TEST(DBTest, Randomized) { } if ((step % 100) == 0) { - ASSERT_TRUE(CompareIterators(step, &model, db_, NULL, NULL)); + ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr)); ASSERT_TRUE(CompareIterators(step, &model, db_, model_snap, db_snap)); // Save a snapshot from each DB this time that we'll use next // time we compare things, to make sure the current state is // preserved with the snapshot - if (model_snap != NULL) model.ReleaseSnapshot(model_snap); - if (db_snap != NULL) db_->ReleaseSnapshot(db_snap); + if (model_snap != nullptr) model.ReleaseSnapshot(model_snap); + if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap); Reopen(); - ASSERT_TRUE(CompareIterators(step, &model, db_, NULL, NULL)); + ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr)); model_snap = model.GetSnapshot(); db_snap = db_->GetSnapshot(); } } - if (model_snap != NULL) model.ReleaseSnapshot(model_snap); - if (db_snap != NULL) db_->ReleaseSnapshot(db_snap); + if (model_snap != nullptr) model.ReleaseSnapshot(model_snap); + if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap); } while (ChangeOptions()); } @@ -2095,15 +2239,15 @@ void BM_LogAndApply(int iters, int num_base_files) { std::string dbname = test::TmpDir() + "/leveldb_test_benchmark"; DestroyDB(dbname, Options()); - DB* db = NULL; + DB* db = nullptr; Options opts; opts.create_if_missing = true; Status s = DB::Open(opts, dbname, &db); ASSERT_OK(s); - ASSERT_TRUE(db != NULL); + ASSERT_TRUE(db != nullptr); delete db; - db = NULL; + db = nullptr; Env* env = Env::Default(); @@ -2112,14 +2256,14 @@ void BM_LogAndApply(int iters, int num_base_files) { InternalKeyComparator cmp(BytewiseComparator()); Options options; - VersionSet vset(dbname, &options, NULL, &cmp); + VersionSet vset(dbname, &options, nullptr, &cmp); bool save_manifest; ASSERT_OK(vset.Recover(&save_manifest)); VersionEdit vbase; uint64_t fnum = 1; for (int i = 0; i < num_base_files; i++) { - InternalKey start(MakeKey(2*fnum), 1, kTypeValue); - InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion); + InternalKey start(MakeKey(2 * fnum), 1, kTypeValue); + InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion); vbase.AddFile(2, fnum++, 1 /* file size */, start, limit); } ASSERT_OK(vset.LogAndApply(&vbase, &mu)); @@ -2129,8 +2273,8 @@ void BM_LogAndApply(int iters, int num_base_files) { for (int i = 0; i < iters; i++) { VersionEdit vedit; vedit.DeleteFile(2, fnum); - InternalKey start(MakeKey(2*fnum), 1, kTypeValue); - InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion); + InternalKey start(MakeKey(2 * fnum), 1, kTypeValue); + InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion); vedit.AddFile(2, fnum++, 1 /* file size */, start, limit); vset.LogAndApply(&vedit, &mu); } @@ -2139,8 +2283,8 @@ void BM_LogAndApply(int iters, int num_base_files) { char buf[16]; snprintf(buf, sizeof(buf), "%d", num_base_files); fprintf(stderr, - "BM_LogAndApply/%-6s %8d iters : %9u us (%7.0f us / iter)\n", - buf, iters, us, ((float)us) / iters); + "BM_LogAndApply/%-6s %8d iters : %9u us (%7.0f us / iter)\n", buf, + iters, us, ((float)us) / iters); } } // namespace leveldb diff --git a/src/leveldb/db/dbformat.cc b/src/leveldb/db/dbformat.cc index 20a7ca4462..459eddf5b1 100644 --- a/src/leveldb/db/dbformat.cc +++ b/src/leveldb/db/dbformat.cc @@ -2,8 +2,12 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#include <stdio.h> #include "db/dbformat.h" + +#include <stdio.h> + +#include <sstream> + #include "port/port.h" #include "util/coding.h" @@ -21,26 +25,20 @@ void AppendInternalKey(std::string* result, const ParsedInternalKey& key) { } std::string ParsedInternalKey::DebugString() const { - char buf[50]; - snprintf(buf, sizeof(buf), "' @ %llu : %d", - (unsigned long long) sequence, - int(type)); - std::string result = "'"; - result += EscapeString(user_key.ToString()); - result += buf; - return result; + std::ostringstream ss; + ss << '\'' << EscapeString(user_key.ToString()) << "' @ " << sequence << " : " + << static_cast<int>(type); + return ss.str(); } std::string InternalKey::DebugString() const { - std::string result; ParsedInternalKey parsed; if (ParseInternalKey(rep_, &parsed)) { - result = parsed.DebugString(); - } else { - result = "(bad)"; - result.append(EscapeString(rep_)); + return parsed.DebugString(); } - return result; + std::ostringstream ss; + ss << "(bad)" << EscapeString(rep_); + return ss.str(); } const char* InternalKeyComparator::Name() const { @@ -65,9 +63,8 @@ int InternalKeyComparator::Compare(const Slice& akey, const Slice& bkey) const { return r; } -void InternalKeyComparator::FindShortestSeparator( - std::string* start, - const Slice& limit) const { +void InternalKeyComparator::FindShortestSeparator(std::string* start, + const Slice& limit) const { // Attempt to shorten the user portion of the key Slice user_start = ExtractUserKey(*start); Slice user_limit = ExtractUserKey(limit); @@ -77,7 +74,8 @@ void InternalKeyComparator::FindShortestSeparator( user_comparator_->Compare(user_start, tmp) < 0) { // User key has become shorter physically, but larger logically. // Tack on the earliest possible number to the shortened user key. - PutFixed64(&tmp, PackSequenceAndType(kMaxSequenceNumber,kValueTypeForSeek)); + PutFixed64(&tmp, + PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek)); assert(this->Compare(*start, tmp) < 0); assert(this->Compare(tmp, limit) < 0); start->swap(tmp); @@ -92,15 +90,14 @@ void InternalKeyComparator::FindShortSuccessor(std::string* key) const { user_comparator_->Compare(user_key, tmp) < 0) { // User key has become shorter physically, but larger logically. // Tack on the earliest possible number to the shortened user key. - PutFixed64(&tmp, PackSequenceAndType(kMaxSequenceNumber,kValueTypeForSeek)); + PutFixed64(&tmp, + PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek)); assert(this->Compare(*key, tmp) < 0); key->swap(tmp); } } -const char* InternalFilterPolicy::Name() const { - return user_policy_->Name(); -} +const char* InternalFilterPolicy::Name() const { return user_policy_->Name(); } void InternalFilterPolicy::CreateFilter(const Slice* keys, int n, std::string* dst) const { diff --git a/src/leveldb/db/dbformat.h b/src/leveldb/db/dbformat.h index ea897b13c0..a1c30ed88c 100644 --- a/src/leveldb/db/dbformat.h +++ b/src/leveldb/db/dbformat.h @@ -5,7 +5,10 @@ #ifndef STORAGE_LEVELDB_DB_DBFORMAT_H_ #define STORAGE_LEVELDB_DB_DBFORMAT_H_ -#include <stdio.h> +#include <cstddef> +#include <cstdint> +#include <string> + #include "leveldb/comparator.h" #include "leveldb/db.h" #include "leveldb/filter_policy.h" @@ -48,10 +51,7 @@ class InternalKey; // Value types encoded as the last component of internal keys. // DO NOT CHANGE THESE ENUM VALUES: they are embedded in the on-disk // data structures. -enum ValueType { - kTypeDeletion = 0x0, - kTypeValue = 0x1 -}; +enum ValueType { kTypeDeletion = 0x0, kTypeValue = 0x1 }; // kValueTypeForSeek defines the ValueType that should be passed when // constructing a ParsedInternalKey object for seeking to a particular // sequence number (since we sort sequence numbers in decreasing order @@ -64,17 +64,16 @@ typedef uint64_t SequenceNumber; // We leave eight bits empty at the bottom so a type and sequence# // can be packed together into 64-bits. -static const SequenceNumber kMaxSequenceNumber = - ((0x1ull << 56) - 1); +static const SequenceNumber kMaxSequenceNumber = ((0x1ull << 56) - 1); struct ParsedInternalKey { Slice user_key; SequenceNumber sequence; ValueType type; - ParsedInternalKey() { } // Intentionally left uninitialized (for speed) + ParsedInternalKey() {} // Intentionally left uninitialized (for speed) ParsedInternalKey(const Slice& u, const SequenceNumber& seq, ValueType t) - : user_key(u), sequence(seq), type(t) { } + : user_key(u), sequence(seq), type(t) {} std::string DebugString() const; }; @@ -84,15 +83,13 @@ inline size_t InternalKeyEncodingLength(const ParsedInternalKey& key) { } // Append the serialization of "key" to *result. -extern void AppendInternalKey(std::string* result, - const ParsedInternalKey& key); +void AppendInternalKey(std::string* result, const ParsedInternalKey& key); // Attempt to parse an internal key from "internal_key". On success, // stores the parsed data in "*result", and returns true. // // On error, returns false, leaves "*result" in an undefined state. -extern bool ParseInternalKey(const Slice& internal_key, - ParsedInternalKey* result); +bool ParseInternalKey(const Slice& internal_key, ParsedInternalKey* result); // Returns the user key portion of an internal key. inline Slice ExtractUserKey(const Slice& internal_key) { @@ -100,27 +97,19 @@ inline Slice ExtractUserKey(const Slice& internal_key) { return Slice(internal_key.data(), internal_key.size() - 8); } -inline ValueType ExtractValueType(const Slice& internal_key) { - assert(internal_key.size() >= 8); - const size_t n = internal_key.size(); - uint64_t num = DecodeFixed64(internal_key.data() + n - 8); - unsigned char c = num & 0xff; - return static_cast<ValueType>(c); -} - // A comparator for internal keys that uses a specified comparator for // the user key portion and breaks ties by decreasing sequence number. class InternalKeyComparator : public Comparator { private: const Comparator* user_comparator_; + public: - explicit InternalKeyComparator(const Comparator* c) : user_comparator_(c) { } - virtual const char* Name() const; - virtual int Compare(const Slice& a, const Slice& b) const; - virtual void FindShortestSeparator( - std::string* start, - const Slice& limit) const; - virtual void FindShortSuccessor(std::string* key) const; + explicit InternalKeyComparator(const Comparator* c) : user_comparator_(c) {} + const char* Name() const override; + int Compare(const Slice& a, const Slice& b) const override; + void FindShortestSeparator(std::string* start, + const Slice& limit) const override; + void FindShortSuccessor(std::string* key) const override; const Comparator* user_comparator() const { return user_comparator_; } @@ -131,11 +120,12 @@ class InternalKeyComparator : public Comparator { class InternalFilterPolicy : public FilterPolicy { private: const FilterPolicy* const user_policy_; + public: - explicit InternalFilterPolicy(const FilterPolicy* p) : user_policy_(p) { } - virtual const char* Name() const; - virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const; - virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const; + explicit InternalFilterPolicy(const FilterPolicy* p) : user_policy_(p) {} + const char* Name() const override; + void CreateFilter(const Slice* keys, int n, std::string* dst) const override; + bool KeyMayMatch(const Slice& key, const Slice& filter) const override; }; // Modules in this directory should keep internal keys wrapped inside @@ -144,13 +134,18 @@ class InternalFilterPolicy : public FilterPolicy { class InternalKey { private: std::string rep_; + public: - InternalKey() { } // Leave rep_ as empty to indicate it is invalid + InternalKey() {} // Leave rep_ as empty to indicate it is invalid InternalKey(const Slice& user_key, SequenceNumber s, ValueType t) { AppendInternalKey(&rep_, ParsedInternalKey(user_key, s, t)); } - void DecodeFrom(const Slice& s) { rep_.assign(s.data(), s.size()); } + bool DecodeFrom(const Slice& s) { + rep_.assign(s.data(), s.size()); + return !rep_.empty(); + } + Slice Encode() const { assert(!rep_.empty()); return rep_; @@ -168,8 +163,8 @@ class InternalKey { std::string DebugString() const; }; -inline int InternalKeyComparator::Compare( - const InternalKey& a, const InternalKey& b) const { +inline int InternalKeyComparator::Compare(const InternalKey& a, + const InternalKey& b) const { return Compare(a.Encode(), b.Encode()); } @@ -178,11 +173,11 @@ inline bool ParseInternalKey(const Slice& internal_key, const size_t n = internal_key.size(); if (n < 8) return false; uint64_t num = DecodeFixed64(internal_key.data() + n - 8); - unsigned char c = num & 0xff; + uint8_t c = num & 0xff; result->sequence = num >> 8; result->type = static_cast<ValueType>(c); result->user_key = Slice(internal_key.data(), n - 8); - return (c <= static_cast<unsigned char>(kTypeValue)); + return (c <= static_cast<uint8_t>(kTypeValue)); } // A helper class useful for DBImpl::Get() @@ -192,6 +187,9 @@ class LookupKey { // the specified sequence number. LookupKey(const Slice& user_key, SequenceNumber sequence); + LookupKey(const LookupKey&) = delete; + LookupKey& operator=(const LookupKey&) = delete; + ~LookupKey(); // Return a key suitable for lookup in a MemTable. @@ -214,11 +212,7 @@ class LookupKey { const char* start_; const char* kstart_; const char* end_; - char space_[200]; // Avoid allocation for short keys - - // No copying allowed - LookupKey(const LookupKey&); - void operator=(const LookupKey&); + char space_[200]; // Avoid allocation for short keys }; inline LookupKey::~LookupKey() { diff --git a/src/leveldb/db/dbformat_test.cc b/src/leveldb/db/dbformat_test.cc index 5d82f5d313..1209369c31 100644 --- a/src/leveldb/db/dbformat_test.cc +++ b/src/leveldb/db/dbformat_test.cc @@ -8,8 +8,7 @@ namespace leveldb { -static std::string IKey(const std::string& user_key, - uint64_t seq, +static std::string IKey(const std::string& user_key, uint64_t seq, ValueType vt) { std::string encoded; AppendInternalKey(&encoded, ParsedInternalKey(user_key, seq, vt)); @@ -28,9 +27,7 @@ static std::string ShortSuccessor(const std::string& s) { return result; } -static void TestKey(const std::string& key, - uint64_t seq, - ValueType vt) { +static void TestKey(const std::string& key, uint64_t seq, ValueType vt) { std::string encoded = IKey(key, seq, vt); Slice in(encoded); @@ -44,16 +41,22 @@ static void TestKey(const std::string& key, ASSERT_TRUE(!ParseInternalKey(Slice("bar"), &decoded)); } -class FormatTest { }; +class FormatTest {}; TEST(FormatTest, InternalKey_EncodeDecode) { - const char* keys[] = { "", "k", "hello", "longggggggggggggggggggggg" }; - const uint64_t seq[] = { - 1, 2, 3, - (1ull << 8) - 1, 1ull << 8, (1ull << 8) + 1, - (1ull << 16) - 1, 1ull << 16, (1ull << 16) + 1, - (1ull << 32) - 1, 1ull << 32, (1ull << 32) + 1 - }; + const char* keys[] = {"", "k", "hello", "longggggggggggggggggggggg"}; + const uint64_t seq[] = {1, + 2, + 3, + (1ull << 8) - 1, + 1ull << 8, + (1ull << 8) + 1, + (1ull << 16) - 1, + 1ull << 16, + (1ull << 16) + 1, + (1ull << 32) - 1, + 1ull << 32, + (1ull << 32) + 1}; for (int k = 0; k < sizeof(keys) / sizeof(keys[0]); k++) { for (int s = 0; s < sizeof(seq) / sizeof(seq[0]); s++) { TestKey(keys[k], seq[s], kTypeValue); @@ -62,40 +65,44 @@ TEST(FormatTest, InternalKey_EncodeDecode) { } } +TEST(FormatTest, InternalKey_DecodeFromEmpty) { + InternalKey internal_key; + + ASSERT_TRUE(!internal_key.DecodeFrom("")); +} + TEST(FormatTest, InternalKeyShortSeparator) { // When user keys are same ASSERT_EQ(IKey("foo", 100, kTypeValue), - Shorten(IKey("foo", 100, kTypeValue), - IKey("foo", 99, kTypeValue))); - ASSERT_EQ(IKey("foo", 100, kTypeValue), - Shorten(IKey("foo", 100, kTypeValue), - IKey("foo", 101, kTypeValue))); - ASSERT_EQ(IKey("foo", 100, kTypeValue), - Shorten(IKey("foo", 100, kTypeValue), - IKey("foo", 100, kTypeValue))); - ASSERT_EQ(IKey("foo", 100, kTypeValue), - Shorten(IKey("foo", 100, kTypeValue), - IKey("foo", 100, kTypeDeletion))); + Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 99, kTypeValue))); + ASSERT_EQ( + IKey("foo", 100, kTypeValue), + Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 101, kTypeValue))); + ASSERT_EQ( + IKey("foo", 100, kTypeValue), + Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeValue))); + ASSERT_EQ( + IKey("foo", 100, kTypeValue), + Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeDeletion))); // When user keys are misordered ASSERT_EQ(IKey("foo", 100, kTypeValue), - Shorten(IKey("foo", 100, kTypeValue), - IKey("bar", 99, kTypeValue))); + Shorten(IKey("foo", 100, kTypeValue), IKey("bar", 99, kTypeValue))); // When user keys are different, but correctly ordered - ASSERT_EQ(IKey("g", kMaxSequenceNumber, kValueTypeForSeek), - Shorten(IKey("foo", 100, kTypeValue), - IKey("hello", 200, kTypeValue))); + ASSERT_EQ( + IKey("g", kMaxSequenceNumber, kValueTypeForSeek), + Shorten(IKey("foo", 100, kTypeValue), IKey("hello", 200, kTypeValue))); // When start user key is prefix of limit user key - ASSERT_EQ(IKey("foo", 100, kTypeValue), - Shorten(IKey("foo", 100, kTypeValue), - IKey("foobar", 200, kTypeValue))); + ASSERT_EQ( + IKey("foo", 100, kTypeValue), + Shorten(IKey("foo", 100, kTypeValue), IKey("foobar", 200, kTypeValue))); // When limit user key is prefix of start user key - ASSERT_EQ(IKey("foobar", 100, kTypeValue), - Shorten(IKey("foobar", 100, kTypeValue), - IKey("foo", 200, kTypeValue))); + ASSERT_EQ( + IKey("foobar", 100, kTypeValue), + Shorten(IKey("foobar", 100, kTypeValue), IKey("foo", 200, kTypeValue))); } TEST(FormatTest, InternalKeyShortestSuccessor) { @@ -105,8 +112,20 @@ TEST(FormatTest, InternalKeyShortestSuccessor) { ShortSuccessor(IKey("\xff\xff", 100, kTypeValue))); } -} // namespace leveldb +TEST(FormatTest, ParsedInternalKeyDebugString) { + ParsedInternalKey key("The \"key\" in 'single quotes'", 42, kTypeValue); + + ASSERT_EQ("'The \"key\" in 'single quotes'' @ 42 : 1", key.DebugString()); +} + +TEST(FormatTest, InternalKeyDebugString) { + InternalKey key("The \"key\" in 'single quotes'", 42, kTypeValue); + ASSERT_EQ("'The \"key\" in 'single quotes'' @ 42 : 1", key.DebugString()); -int main(int argc, char** argv) { - return leveldb::test::RunAllTests(); + InternalKey invalid_key; + ASSERT_EQ("(bad)", invalid_key.DebugString()); } + +} // namespace leveldb + +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/db/dumpfile.cc b/src/leveldb/db/dumpfile.cc index 61c47c2ff9..77d59003cf 100644 --- a/src/leveldb/db/dumpfile.cc +++ b/src/leveldb/db/dumpfile.cc @@ -2,7 +2,10 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#include "leveldb/dumpfile.h" + #include <stdio.h> + #include "db/dbformat.h" #include "db/filename.h" #include "db/log_reader.h" @@ -35,8 +38,7 @@ bool GuessType(const std::string& fname, FileType* type) { // Notified when log reader encounters corruption. class CorruptionReporter : public log::Reader::Reporter { public: - WritableFile* dst_; - virtual void Corruption(size_t bytes, const Status& status) { + void Corruption(size_t bytes, const Status& status) override { std::string r = "corruption: "; AppendNumberTo(&r, bytes); r += " bytes; "; @@ -44,6 +46,8 @@ class CorruptionReporter : public log::Reader::Reporter { r.push_back('\n'); dst_->Append(r); } + + WritableFile* dst_; }; // Print contents of a log file. (*func)() is called on every record. @@ -70,8 +74,7 @@ Status PrintLogContents(Env* env, const std::string& fname, // Called on every item found in a WriteBatch. class WriteBatchItemPrinter : public WriteBatch::Handler { public: - WritableFile* dst_; - virtual void Put(const Slice& key, const Slice& value) { + void Put(const Slice& key, const Slice& value) override { std::string r = " put '"; AppendEscapedStringTo(&r, key); r += "' '"; @@ -79,14 +82,15 @@ class WriteBatchItemPrinter : public WriteBatch::Handler { r += "'\n"; dst_->Append(r); } - virtual void Delete(const Slice& key) { + void Delete(const Slice& key) override { std::string r = " del '"; AppendEscapedStringTo(&r, key); r += "'\n"; dst_->Append(r); } -}; + WritableFile* dst_; +}; // Called on every log record (each one of which is a WriteBatch) // found in a kLogFile. @@ -142,8 +146,8 @@ Status DumpDescriptor(Env* env, const std::string& fname, WritableFile* dst) { Status DumpTable(Env* env, const std::string& fname, WritableFile* dst) { uint64_t file_size; - RandomAccessFile* file = NULL; - Table* table = NULL; + RandomAccessFile* file = nullptr; + Table* table = nullptr; Status s = env->GetFileSize(fname, &file_size); if (s.ok()) { s = env->NewRandomAccessFile(fname, &file); @@ -213,9 +217,12 @@ Status DumpFile(Env* env, const std::string& fname, WritableFile* dst) { return Status::InvalidArgument(fname + ": unknown file type"); } switch (ftype) { - case kLogFile: return DumpLog(env, fname, dst); - case kDescriptorFile: return DumpDescriptor(env, fname, dst); - case kTableFile: return DumpTable(env, fname, dst); + case kLogFile: + return DumpLog(env, fname, dst); + case kDescriptorFile: + return DumpDescriptor(env, fname, dst); + case kTableFile: + return DumpTable(env, fname, dst); default: break; } diff --git a/src/leveldb/db/fault_injection_test.cc b/src/leveldb/db/fault_injection_test.cc index 875dfe81ee..bf705cb60f 100644 --- a/src/leveldb/db/fault_injection_test.cc +++ b/src/leveldb/db/fault_injection_test.cc @@ -6,18 +6,20 @@ // the last "sync". It then checks for data loss errors by purposely dropping // file data (or entire files) not protected by a "sync". -#include "leveldb/db.h" - #include <map> #include <set> + #include "db/db_impl.h" #include "db/filename.h" #include "db/log_format.h" #include "db/version_set.h" #include "leveldb/cache.h" +#include "leveldb/db.h" #include "leveldb/env.h" #include "leveldb/table.h" #include "leveldb/write_batch.h" +#include "port/port.h" +#include "port/thread_annotations.h" #include "util/logging.h" #include "util/mutexlock.h" #include "util/testharness.h" @@ -34,7 +36,7 @@ class FaultInjectionTestEnv; namespace { // Assume a filename, and not a directory name like "/foo/bar/" -static std::string GetDirName(const std::string filename) { +static std::string GetDirName(const std::string& filename) { size_t found = filename.find_last_of("/\\"); if (found == std::string::npos) { return ""; @@ -54,8 +56,7 @@ Status Truncate(const std::string& filename, uint64_t length) { SequentialFile* orig_file; Status s = env->NewSequentialFile(filename, &orig_file); - if (!s.ok()) - return s; + if (!s.ok()) return s; char* scratch = new char[length]; leveldb::Slice result; @@ -83,15 +84,15 @@ Status Truncate(const std::string& filename, uint64_t length) { struct FileState { std::string filename_; - ssize_t pos_; - ssize_t pos_at_last_sync_; - ssize_t pos_at_last_flush_; + int64_t pos_; + int64_t pos_at_last_sync_; + int64_t pos_at_last_flush_; FileState(const std::string& filename) : filename_(filename), pos_(-1), pos_at_last_sync_(-1), - pos_at_last_flush_(-1) { } + pos_at_last_flush_(-1) {} FileState() : pos_(-1), pos_at_last_sync_(-1), pos_at_last_flush_(-1) {} @@ -106,14 +107,14 @@ struct FileState { // is written to or sync'ed. class TestWritableFile : public WritableFile { public: - TestWritableFile(const FileState& state, - WritableFile* f, + TestWritableFile(const FileState& state, WritableFile* f, FaultInjectionTestEnv* env); - virtual ~TestWritableFile(); - virtual Status Append(const Slice& data); - virtual Status Close(); - virtual Status Flush(); - virtual Status Sync(); + ~TestWritableFile() override; + Status Append(const Slice& data) override; + Status Close() override; + Status Flush() override; + Status Sync() override; + std::string GetName() const override { return ""; } private: FileState state_; @@ -126,14 +127,15 @@ class TestWritableFile : public WritableFile { class FaultInjectionTestEnv : public EnvWrapper { public: - FaultInjectionTestEnv() : EnvWrapper(Env::Default()), filesystem_active_(true) {} - virtual ~FaultInjectionTestEnv() { } - virtual Status NewWritableFile(const std::string& fname, - WritableFile** result); - virtual Status NewAppendableFile(const std::string& fname, - WritableFile** result); - virtual Status DeleteFile(const std::string& f); - virtual Status RenameFile(const std::string& s, const std::string& t); + FaultInjectionTestEnv() + : EnvWrapper(Env::Default()), filesystem_active_(true) {} + ~FaultInjectionTestEnv() override = default; + Status NewWritableFile(const std::string& fname, + WritableFile** result) override; + Status NewAppendableFile(const std::string& fname, + WritableFile** result) override; + Status DeleteFile(const std::string& f) override; + Status RenameFile(const std::string& s, const std::string& t) override; void WritableFileClosed(const FileState& state); Status DropUnsyncedFileData(); @@ -146,24 +148,26 @@ class FaultInjectionTestEnv : public EnvWrapper { // system reset. Setting to inactive will freeze our saved filesystem state so // that it will stop being recorded. It can then be reset back to the state at // the time of the reset. - bool IsFilesystemActive() const { return filesystem_active_; } - void SetFilesystemActive(bool active) { filesystem_active_ = active; } + bool IsFilesystemActive() LOCKS_EXCLUDED(mutex_) { + MutexLock l(&mutex_); + return filesystem_active_; + } + void SetFilesystemActive(bool active) LOCKS_EXCLUDED(mutex_) { + MutexLock l(&mutex_); + filesystem_active_ = active; + } private: port::Mutex mutex_; - std::map<std::string, FileState> db_file_state_; - std::set<std::string> new_files_since_last_dir_sync_; - bool filesystem_active_; // Record flushes, syncs, writes + std::map<std::string, FileState> db_file_state_ GUARDED_BY(mutex_); + std::set<std::string> new_files_since_last_dir_sync_ GUARDED_BY(mutex_); + bool filesystem_active_ GUARDED_BY(mutex_); // Record flushes, syncs, writes }; -TestWritableFile::TestWritableFile(const FileState& state, - WritableFile* f, +TestWritableFile::TestWritableFile(const FileState& state, WritableFile* f, FaultInjectionTestEnv* env) - : state_(state), - target_(f), - writable_file_opened_(true), - env_(env) { - assert(f != NULL); + : state_(state), target_(f), writable_file_opened_(true), env_(env) { + assert(f != nullptr); } TestWritableFile::~TestWritableFile() { @@ -265,10 +269,11 @@ Status FaultInjectionTestEnv::NewAppendableFile(const std::string& fname, Status FaultInjectionTestEnv::DropUnsyncedFileData() { Status s; MutexLock l(&mutex_); - for (std::map<std::string, FileState>::const_iterator it = - db_file_state_.begin(); - s.ok() && it != db_file_state_.end(); ++it) { - const FileState& state = it->second; + for (const auto& kvp : db_file_state_) { + if (!s.ok()) { + break; + } + const FileState& state = kvp.second; if (!state.IsFullySynced()) { s = state.DropUnsyncedData(); } @@ -328,7 +333,6 @@ void FaultInjectionTestEnv::ResetState() { // Since we are not destroying the database, the existing files // should keep their recorded synced/flushed state. Therefore // we do not reset db_file_state_ and new_files_since_last_dir_sync_. - MutexLock l(&mutex_); SetFilesystemActive(true); } @@ -338,12 +342,14 @@ Status FaultInjectionTestEnv::DeleteFilesCreatedAfterLastDirSync() { std::set<std::string> new_files(new_files_since_last_dir_sync_.begin(), new_files_since_last_dir_sync_.end()); mutex_.Unlock(); - Status s; - std::set<std::string>::const_iterator it; - for (it = new_files.begin(); s.ok() && it != new_files.end(); ++it) { - s = DeleteFile(*it); + Status status; + for (const auto& new_file : new_files) { + Status delete_status = DeleteFile(new_file); + if (!delete_status.ok() && status.ok()) { + status = std::move(delete_status); + } } - return s; + return status; } void FaultInjectionTestEnv::WritableFileClosed(const FileState& state) { @@ -352,7 +358,7 @@ void FaultInjectionTestEnv::WritableFileClosed(const FileState& state) { } Status FileState::DropUnsyncedData() const { - ssize_t sync_pos = pos_at_last_sync_ == -1 ? 0 : pos_at_last_sync_; + int64_t sync_pos = pos_at_last_sync_ == -1 ? 0 : pos_at_last_sync_; return Truncate(filename_, sync_pos); } @@ -370,7 +376,7 @@ class FaultInjectionTest { FaultInjectionTest() : env_(new FaultInjectionTestEnv), tiny_cache_(NewLRUCache(100)), - db_(NULL) { + db_(nullptr) { dbname_ = test::TmpDir() + "/fault_test"; DestroyDB(dbname_, Options()); // Destroy any db from earlier run options_.reuse_logs = true; @@ -387,9 +393,7 @@ class FaultInjectionTest { delete env_; } - void ReuseLogs(bool reuse) { - options_.reuse_logs = reuse; - } + void ReuseLogs(bool reuse) { options_.reuse_logs = reuse; } void Build(int start_idx, int num_vals) { std::string key_space, value_space; @@ -449,19 +453,18 @@ class FaultInjectionTest { Status OpenDB() { delete db_; - db_ = NULL; + db_ = nullptr; env_->ResetState(); return DB::Open(options_, dbname_, &db_); } void CloseDB() { delete db_; - db_ = NULL; + db_ = nullptr; } void DeleteAllData() { Iterator* iter = db_->NewIterator(ReadOptions()); - WriteOptions options; for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { ASSERT_OK(db_->Delete(WriteOptions(), iter->key())); } @@ -485,23 +488,22 @@ class FaultInjectionTest { void PartialCompactTestPreFault(int num_pre_sync, int num_post_sync) { DeleteAllData(); Build(0, num_pre_sync); - db_->CompactRange(NULL, NULL); + db_->CompactRange(nullptr, nullptr); Build(num_pre_sync, num_post_sync); } void PartialCompactTestReopenWithFault(ResetMethod reset_method, - int num_pre_sync, - int num_post_sync) { + int num_pre_sync, int num_post_sync) { env_->SetFilesystemActive(false); CloseDB(); ResetDBState(reset_method); ASSERT_OK(OpenDB()); ASSERT_OK(Verify(0, num_pre_sync, FaultInjectionTest::VAL_EXPECT_NO_ERROR)); - ASSERT_OK(Verify(num_pre_sync, num_post_sync, FaultInjectionTest::VAL_EXPECT_ERROR)); + ASSERT_OK(Verify(num_pre_sync, num_post_sync, + FaultInjectionTest::VAL_EXPECT_ERROR)); } - void NoWriteTestPreFault() { - } + void NoWriteTestPreFault() {} void NoWriteTestReopenWithFault(ResetMethod reset_method) { CloseDB(); @@ -517,8 +519,7 @@ class FaultInjectionTest { int num_post_sync = rnd.Uniform(kMaxNumValues); PartialCompactTestPreFault(num_pre_sync, num_post_sync); - PartialCompactTestReopenWithFault(RESET_DROP_UNSYNCED_DATA, - num_pre_sync, + PartialCompactTestReopenWithFault(RESET_DROP_UNSYNCED_DATA, num_pre_sync, num_post_sync); NoWriteTestPreFault(); @@ -528,8 +529,7 @@ class FaultInjectionTest { // No new files created so we expect all values since no files will be // dropped. PartialCompactTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES, - num_pre_sync + num_post_sync, - 0); + num_pre_sync + num_post_sync, 0); NoWriteTestPreFault(); NoWriteTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES); @@ -549,6 +549,4 @@ TEST(FaultInjectionTest, FaultTestWithLogReuse) { } // namespace leveldb -int main(int argc, char** argv) { - return leveldb::test::RunAllTests(); -} +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/db/filename.cc b/src/leveldb/db/filename.cc index da32946d99..85de45c507 100644 --- a/src/leveldb/db/filename.cc +++ b/src/leveldb/db/filename.cc @@ -2,9 +2,11 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#include "db/filename.h" + #include <ctype.h> #include <stdio.h> -#include "db/filename.h" + #include "db/dbformat.h" #include "leveldb/env.h" #include "util/logging.h" @@ -12,31 +14,30 @@ namespace leveldb { // A utility routine: write "data" to the named file and Sync() it. -extern Status WriteStringToFileSync(Env* env, const Slice& data, - const std::string& fname); +Status WriteStringToFileSync(Env* env, const Slice& data, + const std::string& fname); -static std::string MakeFileName(const std::string& name, uint64_t number, +static std::string MakeFileName(const std::string& dbname, uint64_t number, const char* suffix) { char buf[100]; snprintf(buf, sizeof(buf), "/%06llu.%s", - static_cast<unsigned long long>(number), - suffix); - return name + buf; + static_cast<unsigned long long>(number), suffix); + return dbname + buf; } -std::string LogFileName(const std::string& name, uint64_t number) { +std::string LogFileName(const std::string& dbname, uint64_t number) { assert(number > 0); - return MakeFileName(name, number, "log"); + return MakeFileName(dbname, number, "log"); } -std::string TableFileName(const std::string& name, uint64_t number) { +std::string TableFileName(const std::string& dbname, uint64_t number) { assert(number > 0); - return MakeFileName(name, number, "ldb"); + return MakeFileName(dbname, number, "ldb"); } -std::string SSTTableFileName(const std::string& name, uint64_t number) { +std::string SSTTableFileName(const std::string& dbname, uint64_t number) { assert(number > 0); - return MakeFileName(name, number, "sst"); + return MakeFileName(dbname, number, "sst"); } std::string DescriptorFileName(const std::string& dbname, uint64_t number) { @@ -51,9 +52,7 @@ std::string CurrentFileName(const std::string& dbname) { return dbname + "/CURRENT"; } -std::string LockFileName(const std::string& dbname) { - return dbname + "/LOCK"; -} +std::string LockFileName(const std::string& dbname) { return dbname + "/LOCK"; } std::string TempFileName(const std::string& dbname, uint64_t number) { assert(number > 0); @@ -69,7 +68,6 @@ std::string OldInfoLogFileName(const std::string& dbname) { return dbname + "/LOG.old"; } - // Owned filenames have the form: // dbname/CURRENT // dbname/LOCK @@ -77,10 +75,9 @@ std::string OldInfoLogFileName(const std::string& dbname) { // dbname/LOG.old // dbname/MANIFEST-[0-9]+ // dbname/[0-9]+.(log|sst|ldb) -bool ParseFileName(const std::string& fname, - uint64_t* number, +bool ParseFileName(const std::string& filename, uint64_t* number, FileType* type) { - Slice rest(fname); + Slice rest(filename); if (rest == "CURRENT") { *number = 0; *type = kCurrentFile; diff --git a/src/leveldb/db/filename.h b/src/leveldb/db/filename.h index 87a752605d..524e813c06 100644 --- a/src/leveldb/db/filename.h +++ b/src/leveldb/db/filename.h @@ -8,7 +8,9 @@ #define STORAGE_LEVELDB_DB_FILENAME_H_ #include <stdint.h> + #include <string> + #include "leveldb/slice.h" #include "leveldb/status.h" #include "port/port.h" @@ -30,55 +32,52 @@ enum FileType { // Return the name of the log file with the specified number // in the db named by "dbname". The result will be prefixed with // "dbname". -extern std::string LogFileName(const std::string& dbname, uint64_t number); +std::string LogFileName(const std::string& dbname, uint64_t number); // Return the name of the sstable with the specified number // in the db named by "dbname". The result will be prefixed with // "dbname". -extern std::string TableFileName(const std::string& dbname, uint64_t number); +std::string TableFileName(const std::string& dbname, uint64_t number); // Return the legacy file name for an sstable with the specified number // in the db named by "dbname". The result will be prefixed with // "dbname". -extern std::string SSTTableFileName(const std::string& dbname, uint64_t number); +std::string SSTTableFileName(const std::string& dbname, uint64_t number); // Return the name of the descriptor file for the db named by // "dbname" and the specified incarnation number. The result will be // prefixed with "dbname". -extern std::string DescriptorFileName(const std::string& dbname, - uint64_t number); +std::string DescriptorFileName(const std::string& dbname, uint64_t number); // Return the name of the current file. This file contains the name // of the current manifest file. The result will be prefixed with // "dbname". -extern std::string CurrentFileName(const std::string& dbname); +std::string CurrentFileName(const std::string& dbname); // Return the name of the lock file for the db named by // "dbname". The result will be prefixed with "dbname". -extern std::string LockFileName(const std::string& dbname); +std::string LockFileName(const std::string& dbname); // Return the name of a temporary file owned by the db named "dbname". // The result will be prefixed with "dbname". -extern std::string TempFileName(const std::string& dbname, uint64_t number); +std::string TempFileName(const std::string& dbname, uint64_t number); // Return the name of the info log file for "dbname". -extern std::string InfoLogFileName(const std::string& dbname); +std::string InfoLogFileName(const std::string& dbname); // Return the name of the old info log file for "dbname". -extern std::string OldInfoLogFileName(const std::string& dbname); +std::string OldInfoLogFileName(const std::string& dbname); // If filename is a leveldb file, store the type of the file in *type. // The number encoded in the filename is stored in *number. If the // filename was successfully parsed, returns true. Else return false. -extern bool ParseFileName(const std::string& filename, - uint64_t* number, - FileType* type); +bool ParseFileName(const std::string& filename, uint64_t* number, + FileType* type); // Make the CURRENT file point to the descriptor file with the // specified number. -extern Status SetCurrentFile(Env* env, const std::string& dbname, - uint64_t descriptor_number); - +Status SetCurrentFile(Env* env, const std::string& dbname, + uint64_t descriptor_number); } // namespace leveldb diff --git a/src/leveldb/db/filename_test.cc b/src/leveldb/db/filename_test.cc index a32556deaf..952f32008e 100644 --- a/src/leveldb/db/filename_test.cc +++ b/src/leveldb/db/filename_test.cc @@ -11,7 +11,7 @@ namespace leveldb { -class FileNameTest { }; +class FileNameTest {}; TEST(FileNameTest, Parse) { Slice db; @@ -24,17 +24,17 @@ TEST(FileNameTest, Parse) { uint64_t number; FileType type; } cases[] = { - { "100.log", 100, kLogFile }, - { "0.log", 0, kLogFile }, - { "0.sst", 0, kTableFile }, - { "0.ldb", 0, kTableFile }, - { "CURRENT", 0, kCurrentFile }, - { "LOCK", 0, kDBLockFile }, - { "MANIFEST-2", 2, kDescriptorFile }, - { "MANIFEST-7", 7, kDescriptorFile }, - { "LOG", 0, kInfoLogFile }, - { "LOG.old", 0, kInfoLogFile }, - { "18446744073709551615.log", 18446744073709551615ull, kLogFile }, + {"100.log", 100, kLogFile}, + {"0.log", 0, kLogFile}, + {"0.sst", 0, kTableFile}, + {"0.ldb", 0, kTableFile}, + {"CURRENT", 0, kCurrentFile}, + {"LOCK", 0, kDBLockFile}, + {"MANIFEST-2", 2, kDescriptorFile}, + {"MANIFEST-7", 7, kDescriptorFile}, + {"LOG", 0, kInfoLogFile}, + {"LOG.old", 0, kInfoLogFile}, + {"18446744073709551615.log", 18446744073709551615ull, kLogFile}, }; for (int i = 0; i < sizeof(cases) / sizeof(cases[0]); i++) { std::string f = cases[i].fname; @@ -44,30 +44,28 @@ TEST(FileNameTest, Parse) { } // Errors - static const char* errors[] = { - "", - "foo", - "foo-dx-100.log", - ".log", - "", - "manifest", - "CURREN", - "CURRENTX", - "MANIFES", - "MANIFEST", - "MANIFEST-", - "XMANIFEST-3", - "MANIFEST-3x", - "LOC", - "LOCKx", - "LO", - "LOGx", - "18446744073709551616.log", - "184467440737095516150.log", - "100", - "100.", - "100.lop" - }; + static const char* errors[] = {"", + "foo", + "foo-dx-100.log", + ".log", + "", + "manifest", + "CURREN", + "CURRENTX", + "MANIFES", + "MANIFEST", + "MANIFEST-", + "XMANIFEST-3", + "MANIFEST-3x", + "LOC", + "LOCKx", + "LO", + "LOGx", + "18446744073709551616.log", + "184467440737095516150.log", + "100", + "100.", + "100.lop"}; for (int i = 0; i < sizeof(errors) / sizeof(errors[0]); i++) { std::string f = errors[i]; ASSERT_TRUE(!ParseFileName(f, &number, &type)) << f; @@ -114,10 +112,20 @@ TEST(FileNameTest, Construction) { ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type)); ASSERT_EQ(999, number); ASSERT_EQ(kTempFile, type); + + fname = InfoLogFileName("foo"); + ASSERT_EQ("foo/", std::string(fname.data(), 4)); + ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type)); + ASSERT_EQ(0, number); + ASSERT_EQ(kInfoLogFile, type); + + fname = OldInfoLogFileName("foo"); + ASSERT_EQ("foo/", std::string(fname.data(), 4)); + ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type)); + ASSERT_EQ(0, number); + ASSERT_EQ(kInfoLogFile, type); } } // namespace leveldb -int main(int argc, char** argv) { - return leveldb::test::RunAllTests(); -} +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/db/leveldbutil.cc b/src/leveldb/db/leveldbutil.cc index d06d64d640..9ed9667d37 100644 --- a/src/leveldb/db/leveldbutil.cc +++ b/src/leveldb/db/leveldbutil.cc @@ -3,6 +3,7 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include <stdio.h> + #include "leveldb/dumpfile.h" #include "leveldb/env.h" #include "leveldb/status.h" @@ -12,14 +13,14 @@ namespace { class StdoutPrinter : public WritableFile { public: - virtual Status Append(const Slice& data) { + Status Append(const Slice& data) override { fwrite(data.data(), 1, data.size(), stdout); return Status::OK(); } - virtual Status Close() { return Status::OK(); } - virtual Status Flush() { return Status::OK(); } - virtual Status Sync() { return Status::OK(); } - virtual std::string GetName() const { return "[stdout]"; } + Status Close() override { return Status::OK(); } + Status Flush() override { return Status::OK(); } + Status Sync() override { return Status::OK(); } + std::string GetName() const override { return "[stdout]"; } }; bool HandleDumpCommand(Env* env, char** files, int num) { @@ -39,11 +40,9 @@ bool HandleDumpCommand(Env* env, char** files, int num) { } // namespace leveldb static void Usage() { - fprintf( - stderr, - "Usage: leveldbutil command...\n" - " dump files... -- dump contents of specified files\n" - ); + fprintf(stderr, + "Usage: leveldbutil command...\n" + " dump files... -- dump contents of specified files\n"); } int main(int argc, char** argv) { @@ -55,7 +54,7 @@ int main(int argc, char** argv) { } else { std::string command = argv[1]; if (command == "dump") { - ok = leveldb::HandleDumpCommand(env, argv+2, argc-2); + ok = leveldb::HandleDumpCommand(env, argv + 2, argc - 2); } else { Usage(); ok = false; diff --git a/src/leveldb/db/log_reader.cc b/src/leveldb/db/log_reader.cc index 8b6ad136d7..1ccfb7b34a 100644 --- a/src/leveldb/db/log_reader.cc +++ b/src/leveldb/db/log_reader.cc @@ -5,6 +5,7 @@ #include "db/log_reader.h" #include <stdio.h> + #include "leveldb/env.h" #include "util/coding.h" #include "util/crc32c.h" @@ -12,8 +13,7 @@ namespace leveldb { namespace log { -Reader::Reporter::~Reporter() { -} +Reader::Reporter::~Reporter() = default; Reader::Reader(SequentialFile* file, Reporter* reporter, bool checksum, uint64_t initial_offset) @@ -26,20 +26,16 @@ Reader::Reader(SequentialFile* file, Reporter* reporter, bool checksum, last_record_offset_(0), end_of_buffer_offset_(0), initial_offset_(initial_offset), - resyncing_(initial_offset > 0) { -} + resyncing_(initial_offset > 0) {} -Reader::~Reader() { - delete[] backing_store_; -} +Reader::~Reader() { delete[] backing_store_; } bool Reader::SkipToInitialBlock() { - size_t offset_in_block = initial_offset_ % kBlockSize; + const size_t offset_in_block = initial_offset_ % kBlockSize; uint64_t block_start_location = initial_offset_ - offset_in_block; // Don't search a block if we'd be in the trailer if (offset_in_block > kBlockSize - 6) { - offset_in_block = 0; block_start_location += kBlockSize; } @@ -99,9 +95,7 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch) { // it could emit an empty kFirstType record at the tail end // of a block followed by a kFullType or kFirstType record // at the beginning of the next block. - if (scratch->empty()) { - in_fragmented_record = false; - } else { + if (!scratch->empty()) { ReportCorruption(scratch->size(), "partial record without end(1)"); } } @@ -117,9 +111,7 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch) { // it could emit an empty kFirstType record at the tail end // of a block followed by a kFullType or kFirstType record // at the beginning of the next block. - if (scratch->empty()) { - in_fragmented_record = false; - } else { + if (!scratch->empty()) { ReportCorruption(scratch->size(), "partial record without end(2)"); } } @@ -181,16 +173,14 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch) { return false; } -uint64_t Reader::LastRecordOffset() { - return last_record_offset_; -} +uint64_t Reader::LastRecordOffset() { return last_record_offset_; } void Reader::ReportCorruption(uint64_t bytes, const char* reason) { ReportDrop(bytes, Status::Corruption(reason, file_->GetName())); } void Reader::ReportDrop(uint64_t bytes, const Status& reason) { - if (reporter_ != NULL && + if (reporter_ != nullptr && end_of_buffer_offset_ - buffer_.size() - bytes >= initial_offset_) { reporter_->Corruption(static_cast<size_t>(bytes), reason); } diff --git a/src/leveldb/db/log_reader.h b/src/leveldb/db/log_reader.h index 8389d61f8f..001da8948a 100644 --- a/src/leveldb/db/log_reader.h +++ b/src/leveldb/db/log_reader.h @@ -32,7 +32,7 @@ class Reader { // Create a reader that will return log records from "*file". // "*file" must remain live while this Reader is in use. // - // If "reporter" is non-NULL, it is notified whenever some data is + // If "reporter" is non-null, it is notified whenever some data is // dropped due to a detected corruption. "*reporter" must remain // live while this Reader is in use. // @@ -43,6 +43,9 @@ class Reader { Reader(SequentialFile* file, Reporter* reporter, bool checksum, uint64_t initial_offset); + Reader(const Reader&) = delete; + Reader& operator=(const Reader&) = delete; + ~Reader(); // Read the next record into *record. Returns true if read @@ -58,26 +61,6 @@ class Reader { uint64_t LastRecordOffset(); private: - SequentialFile* const file_; - Reporter* const reporter_; - bool const checksum_; - char* const backing_store_; - Slice buffer_; - bool eof_; // Last Read() indicated EOF by returning < kBlockSize - - // Offset of the last record returned by ReadRecord. - uint64_t last_record_offset_; - // Offset of the first location past the end of buffer_. - uint64_t end_of_buffer_offset_; - - // Offset at which to start looking for the first record to return - uint64_t const initial_offset_; - - // True if we are resynchronizing after a seek (initial_offset_ > 0). In - // particular, a run of kMiddleType and kLastType records can be silently - // skipped in this mode - bool resyncing_; - // Extend record types with the following special values enum { kEof = kMaxRecordType + 1, @@ -102,9 +85,25 @@ class Reader { void ReportCorruption(uint64_t bytes, const char* reason); void ReportDrop(uint64_t bytes, const Status& reason); - // No copying allowed - Reader(const Reader&); - void operator=(const Reader&); + SequentialFile* const file_; + Reporter* const reporter_; + bool const checksum_; + char* const backing_store_; + Slice buffer_; + bool eof_; // Last Read() indicated EOF by returning < kBlockSize + + // Offset of the last record returned by ReadRecord. + uint64_t last_record_offset_; + // Offset of the first location past the end of buffer_. + uint64_t end_of_buffer_offset_; + + // Offset at which to start looking for the first record to return + uint64_t const initial_offset_; + + // True if we are resynchronizing after a seek (initial_offset_ > 0). In + // particular, a run of kMiddleType and kLastType records can be silently + // skipped in this mode + bool resyncing_; }; } // namespace log diff --git a/src/leveldb/db/log_test.cc b/src/leveldb/db/log_test.cc index 48a5928657..41fc043068 100644 --- a/src/leveldb/db/log_test.cc +++ b/src/leveldb/db/log_test.cc @@ -37,87 +37,12 @@ static std::string RandomSkewedString(int i, Random* rnd) { } class LogTest { - private: - class StringDest : public WritableFile { - public: - std::string contents_; - - virtual Status Close() { return Status::OK(); } - virtual Status Flush() { return Status::OK(); } - virtual Status Sync() { return Status::OK(); } - virtual Status Append(const Slice& slice) { - contents_.append(slice.data(), slice.size()); - return Status::OK(); - } - }; - - class StringSource : public SequentialFile { - public: - Slice contents_; - bool force_error_; - bool returned_partial_; - StringSource() : force_error_(false), returned_partial_(false) { } - - virtual Status Read(size_t n, Slice* result, char* scratch) { - ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error"; - - if (force_error_) { - force_error_ = false; - returned_partial_ = true; - return Status::Corruption("read error"); - } - - if (contents_.size() < n) { - n = contents_.size(); - returned_partial_ = true; - } - *result = Slice(contents_.data(), n); - contents_.remove_prefix(n); - return Status::OK(); - } - - virtual Status Skip(uint64_t n) { - if (n > contents_.size()) { - contents_.clear(); - return Status::NotFound("in-memory file skipped past end"); - } - - contents_.remove_prefix(n); - - return Status::OK(); - } - }; - - class ReportCollector : public Reader::Reporter { - public: - size_t dropped_bytes_; - std::string message_; - - ReportCollector() : dropped_bytes_(0) { } - virtual void Corruption(size_t bytes, const Status& status) { - dropped_bytes_ += bytes; - message_.append(status.ToString()); - } - }; - - StringDest dest_; - StringSource source_; - ReportCollector report_; - bool reading_; - Writer* writer_; - Reader* reader_; - - // Record metadata for testing initial offset functionality - static size_t initial_offset_record_sizes_[]; - static uint64_t initial_offset_last_record_offsets_[]; - static int num_initial_offset_records_; - public: - LogTest() : reading_(false), - writer_(new Writer(&dest_)), - reader_(new Reader(&source_, &report_, true/*checksum*/, - 0/*initial_offset*/)) { - } + LogTest() + : reading_(false), + writer_(new Writer(&dest_)), + reader_(new Reader(&source_, &report_, true /*checksum*/, + 0 /*initial_offset*/)) {} ~LogTest() { delete writer_; @@ -134,9 +59,7 @@ class LogTest { writer_->AddRecord(Slice(msg)); } - size_t WrittenBytes() const { - return dest_.contents_.size(); - } + size_t WrittenBytes() const { return dest_.contents_.size(); } std::string Read() { if (!reading_) { @@ -166,22 +89,16 @@ class LogTest { void FixChecksum(int header_offset, int len) { // Compute crc of type/len/data - uint32_t crc = crc32c::Value(&dest_.contents_[header_offset+6], 1 + len); + uint32_t crc = crc32c::Value(&dest_.contents_[header_offset + 6], 1 + len); crc = crc32c::Mask(crc); EncodeFixed32(&dest_.contents_[header_offset], crc); } - void ForceError() { - source_.force_error_ = true; - } + void ForceError() { source_.force_error_ = true; } - size_t DroppedBytes() const { - return report_.dropped_bytes_; - } + size_t DroppedBytes() const { return report_.dropped_bytes_; } - std::string ReportMessage() const { - return report_.message_; - } + std::string ReportMessage() const { return report_.message_; } // Returns OK iff recorded error message contains "msg" std::string MatchError(const std::string& msg) const { @@ -202,14 +119,14 @@ class LogTest { void StartReadingAt(uint64_t initial_offset) { delete reader_; - reader_ = new Reader(&source_, &report_, true/*checksum*/, initial_offset); + reader_ = new Reader(&source_, &report_, true /*checksum*/, initial_offset); } void CheckOffsetPastEndReturnsNoRecords(uint64_t offset_past_end) { WriteInitialOffsetLog(); reading_ = true; source_.contents_ = Slice(dest_.contents_); - Reader* offset_reader = new Reader(&source_, &report_, true/*checksum*/, + Reader* offset_reader = new Reader(&source_, &report_, true /*checksum*/, WrittenBytes() + offset_past_end); Slice record; std::string scratch; @@ -222,8 +139,8 @@ class LogTest { WriteInitialOffsetLog(); reading_ = true; source_.contents_ = Slice(dest_.contents_); - Reader* offset_reader = new Reader(&source_, &report_, true/*checksum*/, - initial_offset); + Reader* offset_reader = + new Reader(&source_, &report_, true /*checksum*/, initial_offset); // Read all records from expected_record_offset through the last one. ASSERT_LT(expected_record_offset, num_initial_offset_records_); @@ -240,36 +157,110 @@ class LogTest { } delete offset_reader; } + + private: + class StringDest : public WritableFile { + public: + Status Close() override { return Status::OK(); } + Status Flush() override { return Status::OK(); } + Status Sync() override { return Status::OK(); } + Status Append(const Slice& slice) override { + contents_.append(slice.data(), slice.size()); + return Status::OK(); + } + std::string GetName() const override { return ""; } + + std::string contents_; + }; + + class StringSource : public SequentialFile { + public: + StringSource() : force_error_(false), returned_partial_(false) {} + + Status Read(size_t n, Slice* result, char* scratch) override { + ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error"; + + if (force_error_) { + force_error_ = false; + returned_partial_ = true; + return Status::Corruption("read error"); + } + + if (contents_.size() < n) { + n = contents_.size(); + returned_partial_ = true; + } + *result = Slice(contents_.data(), n); + contents_.remove_prefix(n); + return Status::OK(); + } + + Status Skip(uint64_t n) override { + if (n > contents_.size()) { + contents_.clear(); + return Status::NotFound("in-memory file skipped past end"); + } + + contents_.remove_prefix(n); + + return Status::OK(); + } + std::string GetName() const { return ""; } + + Slice contents_; + bool force_error_; + bool returned_partial_; + }; + + class ReportCollector : public Reader::Reporter { + public: + ReportCollector() : dropped_bytes_(0) {} + void Corruption(size_t bytes, const Status& status) override { + dropped_bytes_ += bytes; + message_.append(status.ToString()); + } + + size_t dropped_bytes_; + std::string message_; + }; + + // Record metadata for testing initial offset functionality + static size_t initial_offset_record_sizes_[]; + static uint64_t initial_offset_last_record_offsets_[]; + static int num_initial_offset_records_; + + StringDest dest_; + StringSource source_; + ReportCollector report_; + bool reading_; + Writer* writer_; + Reader* reader_; +}; + +size_t LogTest::initial_offset_record_sizes_[] = { + 10000, // Two sizable records in first block + 10000, + 2 * log::kBlockSize - 1000, // Span three blocks + 1, + 13716, // Consume all but two bytes of block 3. + log::kBlockSize - kHeaderSize, // Consume the entirety of block 4. }; -size_t LogTest::initial_offset_record_sizes_[] = - {10000, // Two sizable records in first block - 10000, - 2 * log::kBlockSize - 1000, // Span three blocks - 1, - 13716, // Consume all but two bytes of block 3. - log::kBlockSize - kHeaderSize, // Consume the entirety of block 4. - }; - -uint64_t LogTest::initial_offset_last_record_offsets_[] = - {0, - kHeaderSize + 10000, - 2 * (kHeaderSize + 10000), - 2 * (kHeaderSize + 10000) + - (2 * log::kBlockSize - 1000) + 3 * kHeaderSize, - 2 * (kHeaderSize + 10000) + - (2 * log::kBlockSize - 1000) + 3 * kHeaderSize - + kHeaderSize + 1, - 3 * log::kBlockSize, - }; +uint64_t LogTest::initial_offset_last_record_offsets_[] = { + 0, + kHeaderSize + 10000, + 2 * (kHeaderSize + 10000), + 2 * (kHeaderSize + 10000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize, + 2 * (kHeaderSize + 10000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize + + kHeaderSize + 1, + 3 * log::kBlockSize, +}; // LogTest::initial_offset_last_record_offsets_ must be defined before this. int LogTest::num_initial_offset_records_ = - sizeof(LogTest::initial_offset_last_record_offsets_)/sizeof(uint64_t); + sizeof(LogTest::initial_offset_last_record_offsets_) / sizeof(uint64_t); -TEST(LogTest, Empty) { - ASSERT_EQ("EOF", Read()); -} +TEST(LogTest, Empty) { ASSERT_EQ("EOF", Read()); } TEST(LogTest, ReadWrite) { Write("foo"); @@ -306,7 +297,7 @@ TEST(LogTest, Fragmentation) { TEST(LogTest, MarginalTrailer) { // Make a trailer that is exactly the same length as an empty record. - const int n = kBlockSize - 2*kHeaderSize; + const int n = kBlockSize - 2 * kHeaderSize; Write(BigString("foo", n)); ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes()); Write(""); @@ -319,7 +310,7 @@ TEST(LogTest, MarginalTrailer) { TEST(LogTest, MarginalTrailer2) { // Make a trailer that is exactly the same length as an empty record. - const int n = kBlockSize - 2*kHeaderSize; + const int n = kBlockSize - 2 * kHeaderSize; Write(BigString("foo", n)); ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes()); Write("bar"); @@ -331,7 +322,7 @@ TEST(LogTest, MarginalTrailer2) { } TEST(LogTest, ShortTrailer) { - const int n = kBlockSize - 2*kHeaderSize + 4; + const int n = kBlockSize - 2 * kHeaderSize + 4; Write(BigString("foo", n)); ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes()); Write(""); @@ -343,7 +334,7 @@ TEST(LogTest, ShortTrailer) { } TEST(LogTest, AlignedEof) { - const int n = kBlockSize - 2*kHeaderSize + 4; + const int n = kBlockSize - 2 * kHeaderSize + 4; Write(BigString("foo", n)); ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes()); ASSERT_EQ(BigString("foo", n), Read()); @@ -394,7 +385,7 @@ TEST(LogTest, BadRecordType) { TEST(LogTest, TruncatedTrailingRecordIsIgnored) { Write("foo"); - ShrinkSize(4); // Drop all payload as well as a header byte + ShrinkSize(4); // Drop all payload as well as a header byte ASSERT_EQ("EOF", Read()); // Truncated last record is ignored, not treated as an error. ASSERT_EQ(0, DroppedBytes()); @@ -492,7 +483,7 @@ TEST(LogTest, SkipIntoMultiRecord) { // If initial_offset points to a record after first(R1) but before first(R2) // incomplete fragment errors are not actual errors, and must be suppressed // until a new first or full record is encountered. - Write(BigString("foo", 3*kBlockSize)); + Write(BigString("foo", 3 * kBlockSize)); Write("correct"); StartReadingAt(kBlockSize); @@ -514,44 +505,30 @@ TEST(LogTest, ErrorJoinsRecords) { Write("correct"); // Wipe the middle block - for (int offset = kBlockSize; offset < 2*kBlockSize; offset++) { + for (int offset = kBlockSize; offset < 2 * kBlockSize; offset++) { SetByte(offset, 'x'); } ASSERT_EQ("correct", Read()); ASSERT_EQ("EOF", Read()); const size_t dropped = DroppedBytes(); - ASSERT_LE(dropped, 2*kBlockSize + 100); - ASSERT_GE(dropped, 2*kBlockSize); + ASSERT_LE(dropped, 2 * kBlockSize + 100); + ASSERT_GE(dropped, 2 * kBlockSize); } -TEST(LogTest, ReadStart) { - CheckInitialOffsetRecord(0, 0); -} +TEST(LogTest, ReadStart) { CheckInitialOffsetRecord(0, 0); } -TEST(LogTest, ReadSecondOneOff) { - CheckInitialOffsetRecord(1, 1); -} +TEST(LogTest, ReadSecondOneOff) { CheckInitialOffsetRecord(1, 1); } -TEST(LogTest, ReadSecondTenThousand) { - CheckInitialOffsetRecord(10000, 1); -} +TEST(LogTest, ReadSecondTenThousand) { CheckInitialOffsetRecord(10000, 1); } -TEST(LogTest, ReadSecondStart) { - CheckInitialOffsetRecord(10007, 1); -} +TEST(LogTest, ReadSecondStart) { CheckInitialOffsetRecord(10007, 1); } -TEST(LogTest, ReadThirdOneOff) { - CheckInitialOffsetRecord(10008, 2); -} +TEST(LogTest, ReadThirdOneOff) { CheckInitialOffsetRecord(10008, 2); } -TEST(LogTest, ReadThirdStart) { - CheckInitialOffsetRecord(20014, 2); -} +TEST(LogTest, ReadThirdStart) { CheckInitialOffsetRecord(20014, 2); } -TEST(LogTest, ReadFourthOneOff) { - CheckInitialOffsetRecord(20015, 3); -} +TEST(LogTest, ReadFourthOneOff) { CheckInitialOffsetRecord(20015, 3); } TEST(LogTest, ReadFourthFirstBlockTrailer) { CheckInitialOffsetRecord(log::kBlockSize - 4, 3); @@ -575,17 +552,11 @@ TEST(LogTest, ReadInitialOffsetIntoBlockPadding) { CheckInitialOffsetRecord(3 * log::kBlockSize - 3, 5); } -TEST(LogTest, ReadEnd) { - CheckOffsetPastEndReturnsNoRecords(0); -} +TEST(LogTest, ReadEnd) { CheckOffsetPastEndReturnsNoRecords(0); } -TEST(LogTest, ReadPastEnd) { - CheckOffsetPastEndReturnsNoRecords(5); -} +TEST(LogTest, ReadPastEnd) { CheckOffsetPastEndReturnsNoRecords(5); } } // namespace log } // namespace leveldb -int main(int argc, char** argv) { - return leveldb::test::RunAllTests(); -} +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/db/log_writer.cc b/src/leveldb/db/log_writer.cc index 74a03270da..bfb16fb486 100644 --- a/src/leveldb/db/log_writer.cc +++ b/src/leveldb/db/log_writer.cc @@ -5,6 +5,7 @@ #include "db/log_writer.h" #include <stdint.h> + #include "leveldb/env.h" #include "util/coding.h" #include "util/crc32c.h" @@ -19,9 +20,7 @@ static void InitTypeCrc(uint32_t* type_crc) { } } -Writer::Writer(WritableFile* dest) - : dest_(dest), - block_offset_(0) { +Writer::Writer(WritableFile* dest) : dest_(dest), block_offset_(0) { InitTypeCrc(type_crc_); } @@ -30,8 +29,7 @@ Writer::Writer(WritableFile* dest, uint64_t dest_length) InitTypeCrc(type_crc_); } -Writer::~Writer() { -} +Writer::~Writer() = default; Status Writer::AddRecord(const Slice& slice) { const char* ptr = slice.data(); @@ -49,7 +47,7 @@ Status Writer::AddRecord(const Slice& slice) { // Switch to a new block if (leftover > 0) { // Fill the trailer (literal below relies on kHeaderSize being 7) - assert(kHeaderSize == 7); + static_assert(kHeaderSize == 7, ""); dest_->Append(Slice("\x00\x00\x00\x00\x00\x00", leftover)); } block_offset_ = 0; @@ -81,30 +79,31 @@ Status Writer::AddRecord(const Slice& slice) { return s; } -Status Writer::EmitPhysicalRecord(RecordType t, const char* ptr, size_t n) { - assert(n <= 0xffff); // Must fit in two bytes - assert(block_offset_ + kHeaderSize + n <= kBlockSize); +Status Writer::EmitPhysicalRecord(RecordType t, const char* ptr, + size_t length) { + assert(length <= 0xffff); // Must fit in two bytes + assert(block_offset_ + kHeaderSize + length <= kBlockSize); // Format the header char buf[kHeaderSize]; - buf[4] = static_cast<char>(n & 0xff); - buf[5] = static_cast<char>(n >> 8); + buf[4] = static_cast<char>(length & 0xff); + buf[5] = static_cast<char>(length >> 8); buf[6] = static_cast<char>(t); // Compute the crc of the record type and the payload. - uint32_t crc = crc32c::Extend(type_crc_[t], ptr, n); - crc = crc32c::Mask(crc); // Adjust for storage + uint32_t crc = crc32c::Extend(type_crc_[t], ptr, length); + crc = crc32c::Mask(crc); // Adjust for storage EncodeFixed32(buf, crc); // Write the header and the payload Status s = dest_->Append(Slice(buf, kHeaderSize)); if (s.ok()) { - s = dest_->Append(Slice(ptr, n)); + s = dest_->Append(Slice(ptr, length)); if (s.ok()) { s = dest_->Flush(); } } - block_offset_ += kHeaderSize + n; + block_offset_ += kHeaderSize + length; return s; } diff --git a/src/leveldb/db/log_writer.h b/src/leveldb/db/log_writer.h index 9e7cc4705b..c0a21147ee 100644 --- a/src/leveldb/db/log_writer.h +++ b/src/leveldb/db/log_writer.h @@ -6,6 +6,7 @@ #define STORAGE_LEVELDB_DB_LOG_WRITER_H_ #include <stdint.h> + #include "db/log_format.h" #include "leveldb/slice.h" #include "leveldb/status.h" @@ -28,24 +29,23 @@ class Writer { // "*dest" must remain live while this Writer is in use. Writer(WritableFile* dest, uint64_t dest_length); + Writer(const Writer&) = delete; + Writer& operator=(const Writer&) = delete; + ~Writer(); Status AddRecord(const Slice& slice); private: + Status EmitPhysicalRecord(RecordType type, const char* ptr, size_t length); + WritableFile* dest_; - int block_offset_; // Current offset in block + int block_offset_; // Current offset in block // crc32c values for all supported record types. These are // pre-computed to reduce the overhead of computing the crc of the // record type stored in the header. uint32_t type_crc_[kMaxRecordType + 1]; - - Status EmitPhysicalRecord(RecordType type, const char* ptr, size_t length); - - // No copying allowed - Writer(const Writer&); - void operator=(const Writer&); }; } // namespace log diff --git a/src/leveldb/db/memtable.cc b/src/leveldb/db/memtable.cc index 287afdbdcb..00931d4671 100644 --- a/src/leveldb/db/memtable.cc +++ b/src/leveldb/db/memtable.cc @@ -18,20 +18,15 @@ static Slice GetLengthPrefixedSlice(const char* data) { return Slice(p, len); } -MemTable::MemTable(const InternalKeyComparator& cmp) - : comparator_(cmp), - refs_(0), - table_(comparator_, &arena_) { -} +MemTable::MemTable(const InternalKeyComparator& comparator) + : comparator_(comparator), refs_(0), table_(comparator_, &arena_) {} -MemTable::~MemTable() { - assert(refs_ == 0); -} +MemTable::~MemTable() { assert(refs_ == 0); } size_t MemTable::ApproximateMemoryUsage() { return arena_.MemoryUsage(); } -int MemTable::KeyComparator::operator()(const char* aptr, const char* bptr) - const { +int MemTable::KeyComparator::operator()(const char* aptr, + const char* bptr) const { // Internal keys are encoded as length-prefixed strings. Slice a = GetLengthPrefixedSlice(aptr); Slice b = GetLengthPrefixedSlice(bptr); @@ -48,39 +43,37 @@ static const char* EncodeKey(std::string* scratch, const Slice& target) { return scratch->data(); } -class MemTableIterator: public Iterator { +class MemTableIterator : public Iterator { public: - explicit MemTableIterator(MemTable::Table* table) : iter_(table) { } - - virtual bool Valid() const { return iter_.Valid(); } - virtual void Seek(const Slice& k) { iter_.Seek(EncodeKey(&tmp_, k)); } - virtual void SeekToFirst() { iter_.SeekToFirst(); } - virtual void SeekToLast() { iter_.SeekToLast(); } - virtual void Next() { iter_.Next(); } - virtual void Prev() { iter_.Prev(); } - virtual Slice key() const { return GetLengthPrefixedSlice(iter_.key()); } - virtual Slice value() const { + explicit MemTableIterator(MemTable::Table* table) : iter_(table) {} + + MemTableIterator(const MemTableIterator&) = delete; + MemTableIterator& operator=(const MemTableIterator&) = delete; + + ~MemTableIterator() override = default; + + bool Valid() const override { return iter_.Valid(); } + void Seek(const Slice& k) override { iter_.Seek(EncodeKey(&tmp_, k)); } + void SeekToFirst() override { iter_.SeekToFirst(); } + void SeekToLast() override { iter_.SeekToLast(); } + void Next() override { iter_.Next(); } + void Prev() override { iter_.Prev(); } + Slice key() const override { return GetLengthPrefixedSlice(iter_.key()); } + Slice value() const override { Slice key_slice = GetLengthPrefixedSlice(iter_.key()); return GetLengthPrefixedSlice(key_slice.data() + key_slice.size()); } - virtual Status status() const { return Status::OK(); } + Status status() const override { return Status::OK(); } private: MemTable::Table::Iterator iter_; - std::string tmp_; // For passing to EncodeKey - - // No copying allowed - MemTableIterator(const MemTableIterator&); - void operator=(const MemTableIterator&); + std::string tmp_; // For passing to EncodeKey }; -Iterator* MemTable::NewIterator() { - return new MemTableIterator(&table_); -} +Iterator* MemTable::NewIterator() { return new MemTableIterator(&table_); } -void MemTable::Add(SequenceNumber s, ValueType type, - const Slice& key, +void MemTable::Add(SequenceNumber s, ValueType type, const Slice& key, const Slice& value) { // Format of an entry is concatenation of: // key_size : varint32 of internal_key.size() @@ -90,9 +83,9 @@ void MemTable::Add(SequenceNumber s, ValueType type, size_t key_size = key.size(); size_t val_size = value.size(); size_t internal_key_size = key_size + 8; - const size_t encoded_len = - VarintLength(internal_key_size) + internal_key_size + - VarintLength(val_size) + val_size; + const size_t encoded_len = VarintLength(internal_key_size) + + internal_key_size + VarintLength(val_size) + + val_size; char* buf = arena_.Allocate(encoded_len); char* p = EncodeVarint32(buf, internal_key_size); memcpy(p, key.data(), key_size); @@ -121,10 +114,9 @@ bool MemTable::Get(const LookupKey& key, std::string* value, Status* s) { // all entries with overly large sequence numbers. const char* entry = iter.key(); uint32_t key_length; - const char* key_ptr = GetVarint32Ptr(entry, entry+5, &key_length); + const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length); if (comparator_.comparator.user_comparator()->Compare( - Slice(key_ptr, key_length - 8), - key.user_key()) == 0) { + Slice(key_ptr, key_length - 8), key.user_key()) == 0) { // Correct user key const uint64_t tag = DecodeFixed64(key_ptr + key_length - 8); switch (static_cast<ValueType>(tag & 0xff)) { diff --git a/src/leveldb/db/memtable.h b/src/leveldb/db/memtable.h index 9f41567cde..9d986b1070 100644 --- a/src/leveldb/db/memtable.h +++ b/src/leveldb/db/memtable.h @@ -6,15 +6,15 @@ #define STORAGE_LEVELDB_DB_MEMTABLE_H_ #include <string> -#include "leveldb/db.h" + #include "db/dbformat.h" #include "db/skiplist.h" +#include "leveldb/db.h" #include "util/arena.h" namespace leveldb { class InternalKeyComparator; -class Mutex; class MemTableIterator; class MemTable { @@ -23,6 +23,9 @@ class MemTable { // is zero and the caller must call Ref() at least once. explicit MemTable(const InternalKeyComparator& comparator); + MemTable(const MemTable&) = delete; + MemTable& operator=(const MemTable&) = delete; + // Increase reference count. void Ref() { ++refs_; } @@ -50,8 +53,7 @@ class MemTable { // Add an entry into memtable that maps key to value at the // specified sequence number and with the specified type. // Typically value will be empty if type==kTypeDeletion. - void Add(SequenceNumber seq, ValueType type, - const Slice& key, + void Add(SequenceNumber seq, ValueType type, const Slice& key, const Slice& value); // If memtable contains a value for key, store it in *value and return true. @@ -61,26 +63,23 @@ class MemTable { bool Get(const LookupKey& key, std::string* value, Status* s); private: - ~MemTable(); // Private since only Unref() should be used to delete it + friend class MemTableIterator; + friend class MemTableBackwardIterator; struct KeyComparator { const InternalKeyComparator comparator; - explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) { } + explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) {} int operator()(const char* a, const char* b) const; }; - friend class MemTableIterator; - friend class MemTableBackwardIterator; typedef SkipList<const char*, KeyComparator> Table; + ~MemTable(); // Private since only Unref() should be used to delete it + KeyComparator comparator_; int refs_; Arena arena_; Table table_; - - // No copying allowed - MemTable(const MemTable&); - void operator=(const MemTable&); }; } // namespace leveldb diff --git a/src/leveldb/db/recovery_test.cc b/src/leveldb/db/recovery_test.cc index 9596f4288a..547a9591ea 100644 --- a/src/leveldb/db/recovery_test.cc +++ b/src/leveldb/db/recovery_test.cc @@ -17,7 +17,7 @@ namespace leveldb { class RecoveryTest { public: - RecoveryTest() : env_(Env::Default()), db_(NULL) { + RecoveryTest() : env_(Env::Default()), db_(nullptr) { dbname_ = test::TmpDir() + "/recovery_test"; DestroyDB(dbname_, Options()); Open(); @@ -44,22 +44,26 @@ class RecoveryTest { void Close() { delete db_; - db_ = NULL; + db_ = nullptr; } - void Open(Options* options = NULL) { + Status OpenWithStatus(Options* options = nullptr) { Close(); Options opts; - if (options != NULL) { + if (options != nullptr) { opts = *options; } else { opts.reuse_logs = true; // TODO(sanjay): test both ways opts.create_if_missing = true; } - if (opts.env == NULL) { + if (opts.env == nullptr) { opts.env = env_; } - ASSERT_OK(DB::Open(opts, dbname_, &db_)); + return DB::Open(opts, dbname_, &db_); + } + + void Open(Options* options = nullptr) { + ASSERT_OK(OpenWithStatus(options)); ASSERT_EQ(1, NumLogs()); } @@ -67,7 +71,7 @@ class RecoveryTest { return db_->Put(WriteOptions(), k, v); } - std::string Get(const std::string& k, const Snapshot* snapshot = NULL) { + std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) { std::string result; Status s = db_->Get(ReadOptions(), k, &result); if (s.IsNotFound()) { @@ -82,17 +86,18 @@ class RecoveryTest { std::string current; ASSERT_OK(ReadFileToString(env_, CurrentFileName(dbname_), ¤t)); size_t len = current.size(); - if (len > 0 && current[len-1] == '\n') { + if (len > 0 && current[len - 1] == '\n') { current.resize(len - 1); } return dbname_ + "/" + current; } - std::string LogName(uint64_t number) { - return LogFileName(dbname_, number); - } + std::string LogName(uint64_t number) { return LogFileName(dbname_, number); } size_t DeleteLogFiles() { + // Linux allows unlinking open files, but Windows does not. + // Closing the db allows for file deletion. + Close(); std::vector<uint64_t> logs = GetFiles(kLogFile); for (size_t i = 0; i < logs.size(); i++) { ASSERT_OK(env_->DeleteFile(LogName(logs[i]))) << LogName(logs[i]); @@ -100,9 +105,9 @@ class RecoveryTest { return logs.size(); } - uint64_t FirstLogFile() { - return GetFiles(kLogFile)[0]; - } + void DeleteManifestFile() { ASSERT_OK(env_->DeleteFile(ManifestFileName())); } + + uint64_t FirstLogFile() { return GetFiles(kLogFile)[0]; } std::vector<uint64_t> GetFiles(FileType t) { std::vector<std::string> filenames; @@ -118,13 +123,9 @@ class RecoveryTest { return result; } - int NumLogs() { - return GetFiles(kLogFile).size(); - } + int NumLogs() { return GetFiles(kLogFile).size(); } - int NumTables() { - return GetFiles(kTableFile).size(); - } + int NumTables() { return GetFiles(kTableFile).size(); } uint64_t FileSize(const std::string& fname) { uint64_t result; @@ -132,9 +133,7 @@ class RecoveryTest { return result; } - void CompactMemTable() { - dbfull()->TEST_CompactMemTable(); - } + void CompactMemTable() { dbfull()->TEST_CompactMemTable(); } // Directly construct a log file that sets key to val. void MakeLogFile(uint64_t lognum, SequenceNumber seq, Slice key, Slice val) { @@ -186,7 +185,7 @@ TEST(RecoveryTest, LargeManifestCompacted) { uint64_t len = FileSize(old_manifest); WritableFile* file; ASSERT_OK(env()->NewAppendableFile(old_manifest, &file)); - std::string zeroes(3*1048576 - static_cast<size_t>(len), 0); + std::string zeroes(3 * 1048576 - static_cast<size_t>(len), 0); ASSERT_OK(file->Append(zeroes)); ASSERT_OK(file->Flush()); delete file; @@ -259,7 +258,7 @@ TEST(RecoveryTest, MultipleMemTables) { // Force creation of multiple memtables by reducing the write buffer size. Options opt; opt.reuse_logs = true; - opt.write_buffer_size = (kNum*100) / 2; + opt.write_buffer_size = (kNum * 100) / 2; Open(&opt); ASSERT_LE(2, NumTables()); ASSERT_EQ(1, NumLogs()); @@ -278,16 +277,16 @@ TEST(RecoveryTest, MultipleLogFiles) { // Make a bunch of uncompacted log files. uint64_t old_log = FirstLogFile(); - MakeLogFile(old_log+1, 1000, "hello", "world"); - MakeLogFile(old_log+2, 1001, "hi", "there"); - MakeLogFile(old_log+3, 1002, "foo", "bar2"); + MakeLogFile(old_log + 1, 1000, "hello", "world"); + MakeLogFile(old_log + 2, 1001, "hi", "there"); + MakeLogFile(old_log + 3, 1002, "foo", "bar2"); // Recover and check that all log files were processed. Open(); ASSERT_LE(1, NumTables()); ASSERT_EQ(1, NumLogs()); uint64_t new_log = FirstLogFile(); - ASSERT_LE(old_log+3, new_log); + ASSERT_LE(old_log + 3, new_log); ASSERT_EQ("bar2", Get("foo")); ASSERT_EQ("world", Get("hello")); ASSERT_EQ("there", Get("hi")); @@ -305,7 +304,7 @@ TEST(RecoveryTest, MultipleLogFiles) { // Check that introducing an older log file does not cause it to be re-read. Close(); - MakeLogFile(old_log+1, 2000, "hello", "stale write"); + MakeLogFile(old_log + 1, 2000, "hello", "stale write"); Open(); ASSERT_LE(1, NumTables()); ASSERT_EQ(1, NumLogs()); @@ -317,8 +316,15 @@ TEST(RecoveryTest, MultipleLogFiles) { ASSERT_EQ("there", Get("hi")); } -} // namespace leveldb +TEST(RecoveryTest, ManifestMissing) { + ASSERT_OK(Put("foo", "bar")); + Close(); + DeleteManifestFile(); -int main(int argc, char** argv) { - return leveldb::test::RunAllTests(); + Status status = OpenWithStatus(); + ASSERT_TRUE(status.IsCorruption()); } + +} // namespace leveldb + +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/db/repair.cc b/src/leveldb/db/repair.cc index 7281e3d345..04847c3bbf 100644 --- a/src/leveldb/db/repair.cc +++ b/src/leveldb/db/repair.cc @@ -54,7 +54,7 @@ class Repairer { owns_cache_(options_.block_cache != options.block_cache), next_file_number_(1) { // TableCache can be small since we expect each table to be opened once. - table_cache_ = new TableCache(dbname_, &options_, 10); + table_cache_ = new TableCache(dbname_, options_, 10); } ~Repairer() { @@ -84,9 +84,7 @@ class Repairer { "recovered %d files; %llu bytes. " "Some data may have been lost. " "****", - dbname_.c_str(), - static_cast<int>(tables_.size()), - bytes); + dbname_.c_str(), static_cast<int>(tables_.size()), bytes); } return status; } @@ -97,22 +95,6 @@ class Repairer { SequenceNumber max_sequence; }; - std::string const dbname_; - Env* const env_; - InternalKeyComparator const icmp_; - InternalFilterPolicy const ipolicy_; - Options const options_; - bool owns_info_log_; - bool owns_cache_; - TableCache* table_cache_; - VersionEdit edit_; - - std::vector<std::string> manifests_; - std::vector<uint64_t> table_numbers_; - std::vector<uint64_t> logs_; - std::vector<TableInfo> tables_; - uint64_t next_file_number_; - Status FindFiles() { std::vector<std::string> filenames; Status status = env_->GetChildren(dbname_, &filenames); @@ -152,8 +134,7 @@ class Repairer { Status status = ConvertLogToTable(logs_[i]); if (!status.ok()) { Log(options_.info_log, "Log #%llu: ignoring conversion error: %s", - (unsigned long long) logs_[i], - status.ToString().c_str()); + (unsigned long long)logs_[i], status.ToString().c_str()); } ArchiveFile(logname); } @@ -164,11 +145,10 @@ class Repairer { Env* env; Logger* info_log; uint64_t lognum; - virtual void Corruption(size_t bytes, const Status& s) { + void Corruption(size_t bytes, const Status& s) override { // We print error messages for corruption, but continue repairing. Log(info_log, "Log #%llu: dropping %d bytes; %s", - (unsigned long long) lognum, - static_cast<int>(bytes), + (unsigned long long)lognum, static_cast<int>(bytes), s.ToString().c_str()); } }; @@ -190,8 +170,8 @@ class Repairer { // corruptions cause entire commits to be skipped instead of // propagating bad information (like overly large sequence // numbers). - log::Reader reader(lfile, &reporter, false/*do not checksum*/, - 0/*initial_offset*/); + log::Reader reader(lfile, &reporter, false /*do not checksum*/, + 0 /*initial_offset*/); // Read all the records and add to a memtable std::string scratch; @@ -202,8 +182,8 @@ class Repairer { int counter = 0; while (reader.ReadRecord(&record, &scratch)) { if (record.size() < 12) { - reporter.Corruption( - record.size(), Status::Corruption("log record too small", logname)); + reporter.Corruption(record.size(), + Status::Corruption("log record too small", logname)); continue; } WriteBatchInternal::SetContents(&batch, record); @@ -212,8 +192,7 @@ class Repairer { counter += WriteBatchInternal::Count(&batch); } else { Log(options_.info_log, "Log #%llu: ignoring %s", - (unsigned long long) log, - status.ToString().c_str()); + (unsigned long long)log, status.ToString().c_str()); status = Status::OK(); // Keep going with rest of file } } @@ -227,16 +206,14 @@ class Repairer { status = BuildTable(dbname_, env_, options_, table_cache_, iter, &meta); delete iter; mem->Unref(); - mem = NULL; + mem = nullptr; if (status.ok()) { if (meta.file_size > 0) { table_numbers_.push_back(meta.number); } } Log(options_.info_log, "Log #%llu: %d ops saved to Table #%llu %s", - (unsigned long long) log, - counter, - (unsigned long long) meta.number, + (unsigned long long)log, counter, (unsigned long long)meta.number, status.ToString().c_str()); return status; } @@ -272,8 +249,7 @@ class Repairer { ArchiveFile(TableFileName(dbname_, number)); ArchiveFile(SSTTableFileName(dbname_, number)); Log(options_.info_log, "Table #%llu: dropped: %s", - (unsigned long long) t.meta.number, - status.ToString().c_str()); + (unsigned long long)t.meta.number, status.ToString().c_str()); return; } @@ -287,8 +263,7 @@ class Repairer { Slice key = iter->key(); if (!ParseInternalKey(key, &parsed)) { Log(options_.info_log, "Table #%llu: unparsable key %s", - (unsigned long long) t.meta.number, - EscapeString(key).c_str()); + (unsigned long long)t.meta.number, EscapeString(key).c_str()); continue; } @@ -307,9 +282,7 @@ class Repairer { } delete iter; Log(options_.info_log, "Table #%llu: %d entries %s", - (unsigned long long) t.meta.number, - counter, - status.ToString().c_str()); + (unsigned long long)t.meta.number, counter, status.ToString().c_str()); if (status.ok()) { tables_.push_back(t); @@ -350,20 +323,20 @@ class Repairer { } } delete builder; - builder = NULL; + builder = nullptr; if (s.ok()) { s = file->Close(); } delete file; - file = NULL; + file = nullptr; if (counter > 0 && s.ok()) { std::string orig = TableFileName(dbname_, t.meta.number); s = env_->RenameFile(copy, orig); if (s.ok()) { Log(options_.info_log, "Table #%llu: %d entries repaired", - (unsigned long long) t.meta.number, counter); + (unsigned long long)t.meta.number, counter); tables_.push_back(t); } } @@ -395,11 +368,11 @@ class Repairer { for (size_t i = 0; i < tables_.size(); i++) { // TODO(opt): separate out into multiple levels const TableInfo& t = tables_[i]; - edit_.AddFile(0, t.meta.number, t.meta.file_size, - t.meta.smallest, t.meta.largest); + edit_.AddFile(0, t.meta.number, t.meta.file_size, t.meta.smallest, + t.meta.largest); } - //fprintf(stderr, "NewDescriptor:\n%s\n", edit_.DebugString().c_str()); + // fprintf(stderr, "NewDescriptor:\n%s\n", edit_.DebugString().c_str()); { log::Writer log(file); std::string record; @@ -410,7 +383,7 @@ class Repairer { status = file->Close(); } delete file; - file = NULL; + file = nullptr; if (!status.ok()) { env_->DeleteFile(tmp); @@ -438,18 +411,34 @@ class Repairer { // dir/lost/foo const char* slash = strrchr(fname.c_str(), '/'); std::string new_dir; - if (slash != NULL) { + if (slash != nullptr) { new_dir.assign(fname.data(), slash - fname.data()); } new_dir.append("/lost"); env_->CreateDir(new_dir); // Ignore error std::string new_file = new_dir; new_file.append("/"); - new_file.append((slash == NULL) ? fname.c_str() : slash + 1); + new_file.append((slash == nullptr) ? fname.c_str() : slash + 1); Status s = env_->RenameFile(fname, new_file); - Log(options_.info_log, "Archiving %s: %s\n", - fname.c_str(), s.ToString().c_str()); + Log(options_.info_log, "Archiving %s: %s\n", fname.c_str(), + s.ToString().c_str()); } + + const std::string dbname_; + Env* const env_; + InternalKeyComparator const icmp_; + InternalFilterPolicy const ipolicy_; + const Options options_; + bool owns_info_log_; + bool owns_cache_; + TableCache* table_cache_; + VersionEdit edit_; + + std::vector<std::string> manifests_; + std::vector<uint64_t> table_numbers_; + std::vector<uint64_t> logs_; + std::vector<TableInfo> tables_; + uint64_t next_file_number_; }; } // namespace diff --git a/src/leveldb/db/skiplist.h b/src/leveldb/db/skiplist.h index 8bd77764d8..a59b45b380 100644 --- a/src/leveldb/db/skiplist.h +++ b/src/leveldb/db/skiplist.h @@ -27,9 +27,10 @@ // // ... prev vs. next pointer ordering ... -#include <assert.h> -#include <stdlib.h> -#include "port/port.h" +#include <atomic> +#include <cassert> +#include <cstdlib> + #include "util/arena.h" #include "util/random.h" @@ -37,7 +38,7 @@ namespace leveldb { class Arena; -template<typename Key, class Comparator> +template <typename Key, class Comparator> class SkipList { private: struct Node; @@ -48,6 +49,9 @@ class SkipList { // must remain allocated for the lifetime of the skiplist object. explicit SkipList(Comparator cmp, Arena* arena); + SkipList(const SkipList&) = delete; + SkipList& operator=(const SkipList&) = delete; + // Insert key into the list. // REQUIRES: nothing that compares equal to key is currently in the list. void Insert(const Key& key); @@ -97,24 +101,10 @@ class SkipList { private: enum { kMaxHeight = 12 }; - // Immutable after construction - Comparator const compare_; - Arena* const arena_; // Arena used for allocations of nodes - - Node* const head_; - - // Modified only by Insert(). Read racily by readers, but stale - // values are ok. - port::AtomicPointer max_height_; // Height of the entire list - inline int GetMaxHeight() const { - return static_cast<int>( - reinterpret_cast<intptr_t>(max_height_.NoBarrier_Load())); + return max_height_.load(std::memory_order_relaxed); } - // Read/written only by Insert(). - Random rnd_; - Node* NewNode(const Key& key, int height); int RandomHeight(); bool Equal(const Key& a, const Key& b) const { return (compare_(a, b) == 0); } @@ -123,9 +113,9 @@ class SkipList { bool KeyIsAfterNode(const Key& key, Node* n) const; // Return the earliest node that comes at or after key. - // Return NULL if there is no such node. + // Return nullptr if there is no such node. // - // If prev is non-NULL, fills prev[level] with pointer to previous + // If prev is non-null, fills prev[level] with pointer to previous // node at "level" for every level in [0..max_height_-1]. Node* FindGreaterOrEqual(const Key& key, Node** prev) const; @@ -137,15 +127,24 @@ class SkipList { // Return head_ if list is empty. Node* FindLast() const; - // No copying allowed - SkipList(const SkipList&); - void operator=(const SkipList&); + // Immutable after construction + Comparator const compare_; + Arena* const arena_; // Arena used for allocations of nodes + + Node* const head_; + + // Modified only by Insert(). Read racily by readers, but stale + // values are ok. + std::atomic<int> max_height_; // Height of the entire list + + // Read/written only by Insert(). + Random rnd_; }; // Implementation details follow -template<typename Key, class Comparator> -struct SkipList<Key,Comparator>::Node { - explicit Node(const Key& k) : key(k) { } +template <typename Key, class Comparator> +struct SkipList<Key, Comparator>::Node { + explicit Node(const Key& k) : key(k) {} Key const key; @@ -155,92 +154,92 @@ struct SkipList<Key,Comparator>::Node { assert(n >= 0); // Use an 'acquire load' so that we observe a fully initialized // version of the returned Node. - return reinterpret_cast<Node*>(next_[n].Acquire_Load()); + return next_[n].load(std::memory_order_acquire); } void SetNext(int n, Node* x) { assert(n >= 0); // Use a 'release store' so that anybody who reads through this // pointer observes a fully initialized version of the inserted node. - next_[n].Release_Store(x); + next_[n].store(x, std::memory_order_release); } // No-barrier variants that can be safely used in a few locations. Node* NoBarrier_Next(int n) { assert(n >= 0); - return reinterpret_cast<Node*>(next_[n].NoBarrier_Load()); + return next_[n].load(std::memory_order_relaxed); } void NoBarrier_SetNext(int n, Node* x) { assert(n >= 0); - next_[n].NoBarrier_Store(x); + next_[n].store(x, std::memory_order_relaxed); } private: // Array of length equal to the node height. next_[0] is lowest level link. - port::AtomicPointer next_[1]; + std::atomic<Node*> next_[1]; }; -template<typename Key, class Comparator> -typename SkipList<Key,Comparator>::Node* -SkipList<Key,Comparator>::NewNode(const Key& key, int height) { - char* mem = arena_->AllocateAligned( - sizeof(Node) + sizeof(port::AtomicPointer) * (height - 1)); - return new (mem) Node(key); +template <typename Key, class Comparator> +typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::NewNode( + const Key& key, int height) { + char* const node_memory = arena_->AllocateAligned( + sizeof(Node) + sizeof(std::atomic<Node*>) * (height - 1)); + return new (node_memory) Node(key); } -template<typename Key, class Comparator> -inline SkipList<Key,Comparator>::Iterator::Iterator(const SkipList* list) { +template <typename Key, class Comparator> +inline SkipList<Key, Comparator>::Iterator::Iterator(const SkipList* list) { list_ = list; - node_ = NULL; + node_ = nullptr; } -template<typename Key, class Comparator> -inline bool SkipList<Key,Comparator>::Iterator::Valid() const { - return node_ != NULL; +template <typename Key, class Comparator> +inline bool SkipList<Key, Comparator>::Iterator::Valid() const { + return node_ != nullptr; } -template<typename Key, class Comparator> -inline const Key& SkipList<Key,Comparator>::Iterator::key() const { +template <typename Key, class Comparator> +inline const Key& SkipList<Key, Comparator>::Iterator::key() const { assert(Valid()); return node_->key; } -template<typename Key, class Comparator> -inline void SkipList<Key,Comparator>::Iterator::Next() { +template <typename Key, class Comparator> +inline void SkipList<Key, Comparator>::Iterator::Next() { assert(Valid()); node_ = node_->Next(0); } -template<typename Key, class Comparator> -inline void SkipList<Key,Comparator>::Iterator::Prev() { +template <typename Key, class Comparator> +inline void SkipList<Key, Comparator>::Iterator::Prev() { // Instead of using explicit "prev" links, we just search for the // last node that falls before key. assert(Valid()); node_ = list_->FindLessThan(node_->key); if (node_ == list_->head_) { - node_ = NULL; + node_ = nullptr; } } -template<typename Key, class Comparator> -inline void SkipList<Key,Comparator>::Iterator::Seek(const Key& target) { - node_ = list_->FindGreaterOrEqual(target, NULL); +template <typename Key, class Comparator> +inline void SkipList<Key, Comparator>::Iterator::Seek(const Key& target) { + node_ = list_->FindGreaterOrEqual(target, nullptr); } -template<typename Key, class Comparator> -inline void SkipList<Key,Comparator>::Iterator::SeekToFirst() { +template <typename Key, class Comparator> +inline void SkipList<Key, Comparator>::Iterator::SeekToFirst() { node_ = list_->head_->Next(0); } -template<typename Key, class Comparator> -inline void SkipList<Key,Comparator>::Iterator::SeekToLast() { +template <typename Key, class Comparator> +inline void SkipList<Key, Comparator>::Iterator::SeekToLast() { node_ = list_->FindLast(); if (node_ == list_->head_) { - node_ = NULL; + node_ = nullptr; } } -template<typename Key, class Comparator> -int SkipList<Key,Comparator>::RandomHeight() { +template <typename Key, class Comparator> +int SkipList<Key, Comparator>::RandomHeight() { // Increase height with probability 1 in kBranching static const unsigned int kBranching = 4; int height = 1; @@ -252,15 +251,16 @@ int SkipList<Key,Comparator>::RandomHeight() { return height; } -template<typename Key, class Comparator> -bool SkipList<Key,Comparator>::KeyIsAfterNode(const Key& key, Node* n) const { - // NULL n is considered infinite - return (n != NULL) && (compare_(n->key, key) < 0); +template <typename Key, class Comparator> +bool SkipList<Key, Comparator>::KeyIsAfterNode(const Key& key, Node* n) const { + // null n is considered infinite + return (n != nullptr) && (compare_(n->key, key) < 0); } -template<typename Key, class Comparator> -typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindGreaterOrEqual(const Key& key, Node** prev) - const { +template <typename Key, class Comparator> +typename SkipList<Key, Comparator>::Node* +SkipList<Key, Comparator>::FindGreaterOrEqual(const Key& key, + Node** prev) const { Node* x = head_; int level = GetMaxHeight() - 1; while (true) { @@ -269,7 +269,7 @@ typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindGreaterOr // Keep searching in this list x = next; } else { - if (prev != NULL) prev[level] = x; + if (prev != nullptr) prev[level] = x; if (level == 0) { return next; } else { @@ -280,15 +280,15 @@ typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindGreaterOr } } -template<typename Key, class Comparator> -typename SkipList<Key,Comparator>::Node* -SkipList<Key,Comparator>::FindLessThan(const Key& key) const { +template <typename Key, class Comparator> +typename SkipList<Key, Comparator>::Node* +SkipList<Key, Comparator>::FindLessThan(const Key& key) const { Node* x = head_; int level = GetMaxHeight() - 1; while (true) { assert(x == head_ || compare_(x->key, key) < 0); Node* next = x->Next(level); - if (next == NULL || compare_(next->key, key) >= 0) { + if (next == nullptr || compare_(next->key, key) >= 0) { if (level == 0) { return x; } else { @@ -301,14 +301,14 @@ SkipList<Key,Comparator>::FindLessThan(const Key& key) const { } } -template<typename Key, class Comparator> -typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindLast() +template <typename Key, class Comparator> +typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::FindLast() const { Node* x = head_; int level = GetMaxHeight() - 1; while (true) { Node* next = x->Next(level); - if (next == NULL) { + if (next == nullptr) { if (level == 0) { return x; } else { @@ -321,43 +321,41 @@ typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindLast() } } -template<typename Key, class Comparator> -SkipList<Key,Comparator>::SkipList(Comparator cmp, Arena* arena) +template <typename Key, class Comparator> +SkipList<Key, Comparator>::SkipList(Comparator cmp, Arena* arena) : compare_(cmp), arena_(arena), head_(NewNode(0 /* any key will do */, kMaxHeight)), - max_height_(reinterpret_cast<void*>(1)), + max_height_(1), rnd_(0xdeadbeef) { for (int i = 0; i < kMaxHeight; i++) { - head_->SetNext(i, NULL); + head_->SetNext(i, nullptr); } } -template<typename Key, class Comparator> -void SkipList<Key,Comparator>::Insert(const Key& key) { +template <typename Key, class Comparator> +void SkipList<Key, Comparator>::Insert(const Key& key) { // TODO(opt): We can use a barrier-free variant of FindGreaterOrEqual() // here since Insert() is externally synchronized. Node* prev[kMaxHeight]; Node* x = FindGreaterOrEqual(key, prev); // Our data structure does not allow duplicate insertion - assert(x == NULL || !Equal(key, x->key)); + assert(x == nullptr || !Equal(key, x->key)); int height = RandomHeight(); if (height > GetMaxHeight()) { for (int i = GetMaxHeight(); i < height; i++) { prev[i] = head_; } - //fprintf(stderr, "Change height from %d to %d\n", max_height_, height); - // It is ok to mutate max_height_ without any synchronization // with concurrent readers. A concurrent reader that observes // the new value of max_height_ will see either the old value of - // new level pointers from head_ (NULL), or a new value set in + // new level pointers from head_ (nullptr), or a new value set in // the loop below. In the former case the reader will - // immediately drop to the next level since NULL sorts after all + // immediately drop to the next level since nullptr sorts after all // keys. In the latter case the reader will use the new node. - max_height_.NoBarrier_Store(reinterpret_cast<void*>(height)); + max_height_.store(height, std::memory_order_relaxed); } x = NewNode(key, height); @@ -369,10 +367,10 @@ void SkipList<Key,Comparator>::Insert(const Key& key) { } } -template<typename Key, class Comparator> -bool SkipList<Key,Comparator>::Contains(const Key& key) const { - Node* x = FindGreaterOrEqual(key, NULL); - if (x != NULL && Equal(key, x->key)) { +template <typename Key, class Comparator> +bool SkipList<Key, Comparator>::Contains(const Key& key) const { + Node* x = FindGreaterOrEqual(key, nullptr); + if (x != nullptr && Equal(key, x->key)) { return true; } else { return false; diff --git a/src/leveldb/db/skiplist_test.cc b/src/leveldb/db/skiplist_test.cc index aee1461e1b..9fa2d96829 100644 --- a/src/leveldb/db/skiplist_test.cc +++ b/src/leveldb/db/skiplist_test.cc @@ -3,8 +3,13 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/skiplist.h" + +#include <atomic> #include <set> + #include "leveldb/env.h" +#include "port/port.h" +#include "port/thread_annotations.h" #include "util/arena.h" #include "util/hash.h" #include "util/random.h" @@ -26,7 +31,7 @@ struct Comparator { } }; -class SkipTest { }; +class SkipTest {}; TEST(SkipTest, Empty) { Arena arena; @@ -112,8 +117,7 @@ TEST(SkipTest, InsertAndLookup) { // Compare against model iterator for (std::set<Key>::reverse_iterator model_iter = keys.rbegin(); - model_iter != keys.rend(); - ++model_iter) { + model_iter != keys.rend(); ++model_iter) { ASSERT_TRUE(iter.Valid()); ASSERT_EQ(*model_iter, iter.key()); iter.Prev(); @@ -126,7 +130,7 @@ TEST(SkipTest, InsertAndLookup) { // concurrent readers (with no synchronization other than when a // reader's iterator is created), the reader always observes all the // data that was present in the skip list when the iterator was -// constructor. Because insertions are happening concurrently, we may +// constructed. Because insertions are happening concurrently, we may // also observe new values that were inserted since the iterator was // constructed, but we should never miss any values that were present // at iterator construction time. @@ -155,12 +159,12 @@ class ConcurrentTest { static uint64_t hash(Key key) { return key & 0xff; } static uint64_t HashNumbers(uint64_t k, uint64_t g) { - uint64_t data[2] = { k, g }; + uint64_t data[2] = {k, g}; return Hash(reinterpret_cast<char*>(data), sizeof(data), 0); } static Key MakeKey(uint64_t k, uint64_t g) { - assert(sizeof(Key) == sizeof(uint64_t)); + static_assert(sizeof(Key) == sizeof(uint64_t), ""); assert(k <= K); // We sometimes pass K to seek to the end of the skiplist assert(g <= 0xffffffffu); return ((k << 40) | (g << 8) | (HashNumbers(k, g) & 0xff)); @@ -186,13 +190,11 @@ class ConcurrentTest { // Per-key generation struct State { - port::AtomicPointer generation[K]; - void Set(int k, intptr_t v) { - generation[k].Release_Store(reinterpret_cast<void*>(v)); - } - intptr_t Get(int k) { - return reinterpret_cast<intptr_t>(generation[k].Acquire_Load()); + std::atomic<int> generation[K]; + void Set(int k, int v) { + generation[k].store(v, std::memory_order_release); } + int Get(int k) { return generation[k].load(std::memory_order_acquire); } State() { for (int k = 0; k < K; k++) { @@ -211,7 +213,7 @@ class ConcurrentTest { SkipList<Key, Comparator> list_; public: - ConcurrentTest() : list_(Comparator(), &arena_) { } + ConcurrentTest() : list_(Comparator(), &arena_) {} // REQUIRES: External synchronization void WriteStep(Random* rnd) { @@ -250,11 +252,9 @@ class ConcurrentTest { // Note that generation 0 is never inserted, so it is ok if // <*,0,*> is missing. ASSERT_TRUE((gen(pos) == 0) || - (gen(pos) > static_cast<Key>(initial_state.Get(key(pos)))) - ) << "key: " << key(pos) - << "; gen: " << gen(pos) - << "; initgen: " - << initial_state.Get(key(pos)); + (gen(pos) > static_cast<Key>(initial_state.Get(key(pos))))) + << "key: " << key(pos) << "; gen: " << gen(pos) + << "; initgen: " << initial_state.Get(key(pos)); // Advance to next key in the valid key space if (key(pos) < key(current)) { @@ -298,21 +298,14 @@ class TestState { public: ConcurrentTest t_; int seed_; - port::AtomicPointer quit_flag_; + std::atomic<bool> quit_flag_; - enum ReaderState { - STARTING, - RUNNING, - DONE - }; + enum ReaderState { STARTING, RUNNING, DONE }; explicit TestState(int s) - : seed_(s), - quit_flag_(NULL), - state_(STARTING), - state_cv_(&mu_) {} + : seed_(s), quit_flag_(false), state_(STARTING), state_cv_(&mu_) {} - void Wait(ReaderState s) { + void Wait(ReaderState s) LOCKS_EXCLUDED(mu_) { mu_.Lock(); while (state_ != s) { state_cv_.Wait(); @@ -320,7 +313,7 @@ class TestState { mu_.Unlock(); } - void Change(ReaderState s) { + void Change(ReaderState s) LOCKS_EXCLUDED(mu_) { mu_.Lock(); state_ = s; state_cv_.Signal(); @@ -329,8 +322,8 @@ class TestState { private: port::Mutex mu_; - ReaderState state_; - port::CondVar state_cv_; + ReaderState state_ GUARDED_BY(mu_); + port::CondVar state_cv_ GUARDED_BY(mu_); }; static void ConcurrentReader(void* arg) { @@ -338,7 +331,7 @@ static void ConcurrentReader(void* arg) { Random rnd(state->seed_); int64_t reads = 0; state->Change(TestState::RUNNING); - while (!state->quit_flag_.Acquire_Load()) { + while (!state->quit_flag_.load(std::memory_order_acquire)) { state->t_.ReadStep(&rnd); ++reads; } @@ -360,7 +353,7 @@ static void RunConcurrent(int run) { for (int i = 0; i < kSize; i++) { state.t_.WriteStep(&rnd); } - state.quit_flag_.Release_Store(&state); // Any non-NULL arg will do + state.quit_flag_.store(true, std::memory_order_release); state.Wait(TestState::DONE); } } @@ -373,6 +366,4 @@ TEST(SkipTest, Concurrent5) { RunConcurrent(5); } } // namespace leveldb -int main(int argc, char** argv) { - return leveldb::test::RunAllTests(); -} +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/db/snapshot.h b/src/leveldb/db/snapshot.h index 6ed413c42d..9f1d66491d 100644 --- a/src/leveldb/db/snapshot.h +++ b/src/leveldb/db/snapshot.h @@ -16,50 +16,78 @@ class SnapshotList; // Each SnapshotImpl corresponds to a particular sequence number. class SnapshotImpl : public Snapshot { public: - SequenceNumber number_; // const after creation + SnapshotImpl(SequenceNumber sequence_number) + : sequence_number_(sequence_number) {} + + SequenceNumber sequence_number() const { return sequence_number_; } private: friend class SnapshotList; - // SnapshotImpl is kept in a doubly-linked circular list + // SnapshotImpl is kept in a doubly-linked circular list. The SnapshotList + // implementation operates on the next/previous fields direcly. SnapshotImpl* prev_; SnapshotImpl* next_; - SnapshotList* list_; // just for sanity checks + const SequenceNumber sequence_number_; + +#if !defined(NDEBUG) + SnapshotList* list_ = nullptr; +#endif // !defined(NDEBUG) }; class SnapshotList { public: - SnapshotList() { - list_.prev_ = &list_; - list_.next_ = &list_; + SnapshotList() : head_(0) { + head_.prev_ = &head_; + head_.next_ = &head_; + } + + bool empty() const { return head_.next_ == &head_; } + SnapshotImpl* oldest() const { + assert(!empty()); + return head_.next_; } + SnapshotImpl* newest() const { + assert(!empty()); + return head_.prev_; + } + + // Creates a SnapshotImpl and appends it to the end of the list. + SnapshotImpl* New(SequenceNumber sequence_number) { + assert(empty() || newest()->sequence_number_ <= sequence_number); + + SnapshotImpl* snapshot = new SnapshotImpl(sequence_number); - bool empty() const { return list_.next_ == &list_; } - SnapshotImpl* oldest() const { assert(!empty()); return list_.next_; } - SnapshotImpl* newest() const { assert(!empty()); return list_.prev_; } - - const SnapshotImpl* New(SequenceNumber seq) { - SnapshotImpl* s = new SnapshotImpl; - s->number_ = seq; - s->list_ = this; - s->next_ = &list_; - s->prev_ = list_.prev_; - s->prev_->next_ = s; - s->next_->prev_ = s; - return s; +#if !defined(NDEBUG) + snapshot->list_ = this; +#endif // !defined(NDEBUG) + snapshot->next_ = &head_; + snapshot->prev_ = head_.prev_; + snapshot->prev_->next_ = snapshot; + snapshot->next_->prev_ = snapshot; + return snapshot; } - void Delete(const SnapshotImpl* s) { - assert(s->list_ == this); - s->prev_->next_ = s->next_; - s->next_->prev_ = s->prev_; - delete s; + // Removes a SnapshotImpl from this list. + // + // The snapshot must have been created by calling New() on this list. + // + // The snapshot pointer should not be const, because its memory is + // deallocated. However, that would force us to change DB::ReleaseSnapshot(), + // which is in the API, and currently takes a const Snapshot. + void Delete(const SnapshotImpl* snapshot) { +#if !defined(NDEBUG) + assert(snapshot->list_ == this); +#endif // !defined(NDEBUG) + snapshot->prev_->next_ = snapshot->next_; + snapshot->next_->prev_ = snapshot->prev_; + delete snapshot; } private: // Dummy head of doubly-linked list of snapshots - SnapshotImpl list_; + SnapshotImpl head_; }; } // namespace leveldb diff --git a/src/leveldb/db/table_cache.cc b/src/leveldb/db/table_cache.cc index e3d82cd3ea..73f05fd7b1 100644 --- a/src/leveldb/db/table_cache.cc +++ b/src/leveldb/db/table_cache.cc @@ -29,18 +29,14 @@ static void UnrefEntry(void* arg1, void* arg2) { cache->Release(h); } -TableCache::TableCache(const std::string& dbname, - const Options* options, +TableCache::TableCache(const std::string& dbname, const Options& options, int entries) - : env_(options->env), + : env_(options.env), dbname_(dbname), options_(options), - cache_(NewLRUCache(entries)) { -} + cache_(NewLRUCache(entries)) {} -TableCache::~TableCache() { - delete cache_; -} +TableCache::~TableCache() { delete cache_; } Status TableCache::FindTable(uint64_t file_number, uint64_t file_size, Cache::Handle** handle) { @@ -49,10 +45,10 @@ Status TableCache::FindTable(uint64_t file_number, uint64_t file_size, EncodeFixed64(buf, file_number); Slice key(buf, sizeof(buf)); *handle = cache_->Lookup(key); - if (*handle == NULL) { + if (*handle == nullptr) { std::string fname = TableFileName(dbname_, file_number); - RandomAccessFile* file = NULL; - Table* table = NULL; + RandomAccessFile* file = nullptr; + Table* table = nullptr; s = env_->NewRandomAccessFile(fname, &file); if (!s.ok()) { std::string old_fname = SSTTableFileName(dbname_, file_number); @@ -61,11 +57,11 @@ Status TableCache::FindTable(uint64_t file_number, uint64_t file_size, } } if (s.ok()) { - s = Table::Open(*options_, file, file_size, &table); + s = Table::Open(options_, file, file_size, &table); } if (!s.ok()) { - assert(table == NULL); + assert(table == nullptr); delete file; // We do not cache error results so that if the error is transient, // or somebody repairs the file, we recover automatically. @@ -80,14 +76,13 @@ Status TableCache::FindTable(uint64_t file_number, uint64_t file_size, } Iterator* TableCache::NewIterator(const ReadOptions& options, - uint64_t file_number, - uint64_t file_size, + uint64_t file_number, uint64_t file_size, Table** tableptr) { - if (tableptr != NULL) { - *tableptr = NULL; + if (tableptr != nullptr) { + *tableptr = nullptr; } - Cache::Handle* handle = NULL; + Cache::Handle* handle = nullptr; Status s = FindTable(file_number, file_size, &handle); if (!s.ok()) { return NewErrorIterator(s); @@ -96,23 +91,21 @@ Iterator* TableCache::NewIterator(const ReadOptions& options, Table* table = reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table; Iterator* result = table->NewIterator(options); result->RegisterCleanup(&UnrefEntry, cache_, handle); - if (tableptr != NULL) { + if (tableptr != nullptr) { *tableptr = table; } return result; } -Status TableCache::Get(const ReadOptions& options, - uint64_t file_number, - uint64_t file_size, - const Slice& k, - void* arg, - void (*saver)(void*, const Slice&, const Slice&)) { - Cache::Handle* handle = NULL; +Status TableCache::Get(const ReadOptions& options, uint64_t file_number, + uint64_t file_size, const Slice& k, void* arg, + void (*handle_result)(void*, const Slice&, + const Slice&)) { + Cache::Handle* handle = nullptr; Status s = FindTable(file_number, file_size, &handle); if (s.ok()) { Table* t = reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table; - s = t->InternalGet(options, k, arg, saver); + s = t->InternalGet(options, k, arg, handle_result); cache_->Release(handle); } return s; diff --git a/src/leveldb/db/table_cache.h b/src/leveldb/db/table_cache.h index 8cf4aaf12d..93069c8844 100644 --- a/src/leveldb/db/table_cache.h +++ b/src/leveldb/db/table_cache.h @@ -7,8 +7,10 @@ #ifndef STORAGE_LEVELDB_DB_TABLE_CACHE_H_ #define STORAGE_LEVELDB_DB_TABLE_CACHE_H_ -#include <string> #include <stdint.h> + +#include <string> + #include "db/dbformat.h" #include "leveldb/cache.h" #include "leveldb/table.h" @@ -20,40 +22,35 @@ class Env; class TableCache { public: - TableCache(const std::string& dbname, const Options* options, int entries); + TableCache(const std::string& dbname, const Options& options, int entries); ~TableCache(); // Return an iterator for the specified file number (the corresponding // file length must be exactly "file_size" bytes). If "tableptr" is - // non-NULL, also sets "*tableptr" to point to the Table object - // underlying the returned iterator, or NULL if no Table object underlies - // the returned iterator. The returned "*tableptr" object is owned by - // the cache and should not be deleted, and is valid for as long as the + // non-null, also sets "*tableptr" to point to the Table object + // underlying the returned iterator, or to nullptr if no Table object + // underlies the returned iterator. The returned "*tableptr" object is owned + // by the cache and should not be deleted, and is valid for as long as the // returned iterator is live. - Iterator* NewIterator(const ReadOptions& options, - uint64_t file_number, - uint64_t file_size, - Table** tableptr = NULL); + Iterator* NewIterator(const ReadOptions& options, uint64_t file_number, + uint64_t file_size, Table** tableptr = nullptr); // If a seek to internal key "k" in specified file finds an entry, // call (*handle_result)(arg, found_key, found_value). - Status Get(const ReadOptions& options, - uint64_t file_number, - uint64_t file_size, - const Slice& k, - void* arg, + Status Get(const ReadOptions& options, uint64_t file_number, + uint64_t file_size, const Slice& k, void* arg, void (*handle_result)(void*, const Slice&, const Slice&)); // Evict any entry for the specified file number void Evict(uint64_t file_number); private: + Status FindTable(uint64_t file_number, uint64_t file_size, Cache::Handle**); + Env* const env_; const std::string dbname_; - const Options* options_; + const Options& options_; Cache* cache_; - - Status FindTable(uint64_t file_number, uint64_t file_size, Cache::Handle**); }; } // namespace leveldb diff --git a/src/leveldb/db/version_edit.cc b/src/leveldb/db/version_edit.cc index f10a2d58b2..cd770ef12d 100644 --- a/src/leveldb/db/version_edit.cc +++ b/src/leveldb/db/version_edit.cc @@ -12,15 +12,15 @@ namespace leveldb { // Tag numbers for serialized VersionEdit. These numbers are written to // disk and should not be changed. enum Tag { - kComparator = 1, - kLogNumber = 2, - kNextFileNumber = 3, - kLastSequence = 4, - kCompactPointer = 5, - kDeletedFile = 6, - kNewFile = 7, + kComparator = 1, + kLogNumber = 2, + kNextFileNumber = 3, + kLastSequence = 4, + kCompactPointer = 5, + kDeletedFile = 6, + kNewFile = 7, // 8 was used for large value refs - kPrevLogNumber = 9 + kPrevLogNumber = 9 }; void VersionEdit::Clear() { @@ -66,12 +66,10 @@ void VersionEdit::EncodeTo(std::string* dst) const { PutLengthPrefixedSlice(dst, compact_pointers_[i].second.Encode()); } - for (DeletedFileSet::const_iterator iter = deleted_files_.begin(); - iter != deleted_files_.end(); - ++iter) { + for (const auto& deleted_file_kvp : deleted_files_) { PutVarint32(dst, kDeletedFile); - PutVarint32(dst, iter->first); // level - PutVarint64(dst, iter->second); // file number + PutVarint32(dst, deleted_file_kvp.first); // level + PutVarint64(dst, deleted_file_kvp.second); // file number } for (size_t i = 0; i < new_files_.size(); i++) { @@ -88,8 +86,7 @@ void VersionEdit::EncodeTo(std::string* dst) const { static bool GetInternalKey(Slice* input, InternalKey* dst) { Slice str; if (GetLengthPrefixedSlice(input, &str)) { - dst->DecodeFrom(str); - return true; + return dst->DecodeFrom(str); } else { return false; } @@ -97,8 +94,7 @@ static bool GetInternalKey(Slice* input, InternalKey* dst) { static bool GetLevel(Slice* input, int* level) { uint32_t v; - if (GetVarint32(input, &v) && - v < config::kNumLevels) { + if (GetVarint32(input, &v) && v < config::kNumLevels) { *level = v; return true; } else { @@ -109,7 +105,7 @@ static bool GetLevel(Slice* input, int* level) { Status VersionEdit::DecodeFrom(const Slice& src) { Clear(); Slice input = src; - const char* msg = NULL; + const char* msg = nullptr; uint32_t tag; // Temporary storage for parsing @@ -119,7 +115,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) { Slice str; InternalKey key; - while (msg == NULL && GetVarint32(&input, &tag)) { + while (msg == nullptr && GetVarint32(&input, &tag)) { switch (tag) { case kComparator: if (GetLengthPrefixedSlice(&input, &str)) { @@ -163,8 +159,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) { break; case kCompactPointer: - if (GetLevel(&input, &level) && - GetInternalKey(&input, &key)) { + if (GetLevel(&input, &level) && GetInternalKey(&input, &key)) { compact_pointers_.push_back(std::make_pair(level, key)); } else { msg = "compaction pointer"; @@ -172,8 +167,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) { break; case kDeletedFile: - if (GetLevel(&input, &level) && - GetVarint64(&input, &number)) { + if (GetLevel(&input, &level) && GetVarint64(&input, &number)) { deleted_files_.insert(std::make_pair(level, number)); } else { msg = "deleted file"; @@ -181,8 +175,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) { break; case kNewFile: - if (GetLevel(&input, &level) && - GetVarint64(&input, &f.number) && + if (GetLevel(&input, &level) && GetVarint64(&input, &f.number) && GetVarint64(&input, &f.file_size) && GetInternalKey(&input, &f.smallest) && GetInternalKey(&input, &f.largest)) { @@ -198,12 +191,12 @@ Status VersionEdit::DecodeFrom(const Slice& src) { } } - if (msg == NULL && !input.empty()) { + if (msg == nullptr && !input.empty()) { msg = "invalid tag"; } Status result; - if (msg != NULL) { + if (msg != nullptr) { result = Status::Corruption("VersionEdit", msg); } return result; @@ -238,13 +231,11 @@ std::string VersionEdit::DebugString() const { r.append(" "); r.append(compact_pointers_[i].second.DebugString()); } - for (DeletedFileSet::const_iterator iter = deleted_files_.begin(); - iter != deleted_files_.end(); - ++iter) { + for (const auto& deleted_files_kvp : deleted_files_) { r.append("\n DeleteFile: "); - AppendNumberTo(&r, iter->first); + AppendNumberTo(&r, deleted_files_kvp.first); r.append(" "); - AppendNumberTo(&r, iter->second); + AppendNumberTo(&r, deleted_files_kvp.second); } for (size_t i = 0; i < new_files_.size(); i++) { const FileMetaData& f = new_files_[i].second; diff --git a/src/leveldb/db/version_edit.h b/src/leveldb/db/version_edit.h index eaef77b327..0de4531773 100644 --- a/src/leveldb/db/version_edit.h +++ b/src/leveldb/db/version_edit.h @@ -8,6 +8,7 @@ #include <set> #include <utility> #include <vector> + #include "db/dbformat.h" namespace leveldb { @@ -15,20 +16,20 @@ namespace leveldb { class VersionSet; struct FileMetaData { + FileMetaData() : refs(0), allowed_seeks(1 << 30), file_size(0) {} + int refs; - int allowed_seeks; // Seeks allowed until compaction + int allowed_seeks; // Seeks allowed until compaction uint64_t number; - uint64_t file_size; // File size in bytes - InternalKey smallest; // Smallest internal key served by table - InternalKey largest; // Largest internal key served by table - - FileMetaData() : refs(0), allowed_seeks(1 << 30), file_size(0) { } + uint64_t file_size; // File size in bytes + InternalKey smallest; // Smallest internal key served by table + InternalKey largest; // Largest internal key served by table }; class VersionEdit { public: VersionEdit() { Clear(); } - ~VersionEdit() { } + ~VersionEdit() = default; void Clear(); @@ -59,10 +60,8 @@ class VersionEdit { // Add the specified file at the specified number. // REQUIRES: This version has not been saved (see VersionSet::SaveTo) // REQUIRES: "smallest" and "largest" are smallest and largest keys in file - void AddFile(int level, uint64_t file, - uint64_t file_size, - const InternalKey& smallest, - const InternalKey& largest) { + void AddFile(int level, uint64_t file, uint64_t file_size, + const InternalKey& smallest, const InternalKey& largest) { FileMetaData f; f.number = file; f.file_size = file_size; @@ -84,7 +83,7 @@ class VersionEdit { private: friend class VersionSet; - typedef std::set< std::pair<int, uint64_t> > DeletedFileSet; + typedef std::set<std::pair<int, uint64_t>> DeletedFileSet; std::string comparator_; uint64_t log_number_; @@ -97,9 +96,9 @@ class VersionEdit { bool has_next_file_number_; bool has_last_sequence_; - std::vector< std::pair<int, InternalKey> > compact_pointers_; + std::vector<std::pair<int, InternalKey>> compact_pointers_; DeletedFileSet deleted_files_; - std::vector< std::pair<int, FileMetaData> > new_files_; + std::vector<std::pair<int, FileMetaData>> new_files_; }; } // namespace leveldb diff --git a/src/leveldb/db/version_edit_test.cc b/src/leveldb/db/version_edit_test.cc index 280310b49d..0b7cda8854 100644 --- a/src/leveldb/db/version_edit_test.cc +++ b/src/leveldb/db/version_edit_test.cc @@ -17,7 +17,7 @@ static void TestEncodeDecode(const VersionEdit& edit) { ASSERT_EQ(encoded, encoded2); } -class VersionEditTest { }; +class VersionEditTest {}; TEST(VersionEditTest, EncodeDecode) { static const uint64_t kBig = 1ull << 50; @@ -41,6 +41,4 @@ TEST(VersionEditTest, EncodeDecode) { } // namespace leveldb -int main(int argc, char** argv) { - return leveldb::test::RunAllTests(); -} +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/db/version_set.cc b/src/leveldb/db/version_set.cc index 2cb6d80ed3..cd07346ea8 100644 --- a/src/leveldb/db/version_set.cc +++ b/src/leveldb/db/version_set.cc @@ -4,8 +4,10 @@ #include "db/version_set.h" -#include <algorithm> #include <stdio.h> + +#include <algorithm> + #include "db/filename.h" #include "db/log_reader.h" #include "db/log_writer.h" @@ -84,8 +86,7 @@ Version::~Version() { } int FindFile(const InternalKeyComparator& icmp, - const std::vector<FileMetaData*>& files, - const Slice& key) { + const std::vector<FileMetaData*>& files, const Slice& key) { uint32_t left = 0; uint32_t right = files.size(); while (left < right) { @@ -104,26 +105,25 @@ int FindFile(const InternalKeyComparator& icmp, return right; } -static bool AfterFile(const Comparator* ucmp, - const Slice* user_key, const FileMetaData* f) { - // NULL user_key occurs before all keys and is therefore never after *f - return (user_key != NULL && +static bool AfterFile(const Comparator* ucmp, const Slice* user_key, + const FileMetaData* f) { + // null user_key occurs before all keys and is therefore never after *f + return (user_key != nullptr && ucmp->Compare(*user_key, f->largest.user_key()) > 0); } -static bool BeforeFile(const Comparator* ucmp, - const Slice* user_key, const FileMetaData* f) { - // NULL user_key occurs after all keys and is therefore never before *f - return (user_key != NULL && +static bool BeforeFile(const Comparator* ucmp, const Slice* user_key, + const FileMetaData* f) { + // null user_key occurs after all keys and is therefore never before *f + return (user_key != nullptr && ucmp->Compare(*user_key, f->smallest.user_key()) < 0); } -bool SomeFileOverlapsRange( - const InternalKeyComparator& icmp, - bool disjoint_sorted_files, - const std::vector<FileMetaData*>& files, - const Slice* smallest_user_key, - const Slice* largest_user_key) { +bool SomeFileOverlapsRange(const InternalKeyComparator& icmp, + bool disjoint_sorted_files, + const std::vector<FileMetaData*>& files, + const Slice* smallest_user_key, + const Slice* largest_user_key) { const Comparator* ucmp = icmp.user_comparator(); if (!disjoint_sorted_files) { // Need to check against all files @@ -141,10 +141,11 @@ bool SomeFileOverlapsRange( // Binary search over file list uint32_t index = 0; - if (smallest_user_key != NULL) { + if (smallest_user_key != nullptr) { // Find the earliest possible internal key for smallest_user_key - InternalKey small(*smallest_user_key, kMaxSequenceNumber,kValueTypeForSeek); - index = FindFile(icmp, files, small.Encode()); + InternalKey small_key(*smallest_user_key, kMaxSequenceNumber, + kValueTypeForSeek); + index = FindFile(icmp, files, small_key.Encode()); } if (index >= files.size()) { @@ -164,25 +165,21 @@ class Version::LevelFileNumIterator : public Iterator { public: LevelFileNumIterator(const InternalKeyComparator& icmp, const std::vector<FileMetaData*>* flist) - : icmp_(icmp), - flist_(flist), - index_(flist->size()) { // Marks as invalid - } - virtual bool Valid() const { - return index_ < flist_->size(); + : icmp_(icmp), flist_(flist), index_(flist->size()) { // Marks as invalid } - virtual void Seek(const Slice& target) { + bool Valid() const override { return index_ < flist_->size(); } + void Seek(const Slice& target) override { index_ = FindFile(icmp_, *flist_, target); } - virtual void SeekToFirst() { index_ = 0; } - virtual void SeekToLast() { + void SeekToFirst() override { index_ = 0; } + void SeekToLast() override { index_ = flist_->empty() ? 0 : flist_->size() - 1; } - virtual void Next() { + void Next() override { assert(Valid()); index_++; } - virtual void Prev() { + void Prev() override { assert(Valid()); if (index_ == 0) { index_ = flist_->size(); // Marks as invalid @@ -190,17 +187,18 @@ class Version::LevelFileNumIterator : public Iterator { index_--; } } - Slice key() const { + Slice key() const override { assert(Valid()); return (*flist_)[index_]->largest.Encode(); } - Slice value() const { + Slice value() const override { assert(Valid()); EncodeFixed64(value_buf_, (*flist_)[index_]->number); - EncodeFixed64(value_buf_+8, (*flist_)[index_]->file_size); + EncodeFixed64(value_buf_ + 8, (*flist_)[index_]->file_size); return Slice(value_buf_, sizeof(value_buf_)); } - virtual Status status() const { return Status::OK(); } + Status status() const override { return Status::OK(); } + private: const InternalKeyComparator icmp_; const std::vector<FileMetaData*>* const flist_; @@ -210,16 +208,14 @@ class Version::LevelFileNumIterator : public Iterator { mutable char value_buf_[16]; }; -static Iterator* GetFileIterator(void* arg, - const ReadOptions& options, +static Iterator* GetFileIterator(void* arg, const ReadOptions& options, const Slice& file_value) { TableCache* cache = reinterpret_cast<TableCache*>(arg); if (file_value.size() != 16) { return NewErrorIterator( Status::Corruption("FileReader invoked with unexpected value")); } else { - return cache->NewIterator(options, - DecodeFixed64(file_value.data()), + return cache->NewIterator(options, DecodeFixed64(file_value.data()), DecodeFixed64(file_value.data() + 8)); } } @@ -227,17 +223,16 @@ static Iterator* GetFileIterator(void* arg, Iterator* Version::NewConcatenatingIterator(const ReadOptions& options, int level) const { return NewTwoLevelIterator( - new LevelFileNumIterator(vset_->icmp_, &files_[level]), - &GetFileIterator, vset_->table_cache_, options); + new LevelFileNumIterator(vset_->icmp_, &files_[level]), &GetFileIterator, + vset_->table_cache_, options); } void Version::AddIterators(const ReadOptions& options, std::vector<Iterator*>* iters) { // Merge all level zero files together since they may overlap for (size_t i = 0; i < files_[0].size(); i++) { - iters->push_back( - vset_->table_cache_->NewIterator( - options, files_[0][i]->number, files_[0][i]->file_size)); + iters->push_back(vset_->table_cache_->NewIterator( + options, files_[0][i]->number, files_[0][i]->file_size)); } // For levels > 0, we can use a concatenating iterator that sequentially @@ -264,7 +259,7 @@ struct Saver { Slice user_key; std::string* value; }; -} +} // namespace static void SaveValue(void* arg, const Slice& ikey, const Slice& v) { Saver* s = reinterpret_cast<Saver*>(arg); ParsedInternalKey parsed_key; @@ -284,10 +279,8 @@ static bool NewestFirst(FileMetaData* a, FileMetaData* b) { return a->number > b->number; } -void Version::ForEachOverlapping(Slice user_key, Slice internal_key, - void* arg, +void Version::ForEachOverlapping(Slice user_key, Slice internal_key, void* arg, bool (*func)(void*, int, FileMetaData*)) { - // TODO(sanjay): Change Version::Get() to use this function. const Comparator* ucmp = vset_->icmp_.user_comparator(); // Search level-0 in order from newest to oldest. @@ -329,110 +322,89 @@ void Version::ForEachOverlapping(Slice user_key, Slice internal_key, } } -Status Version::Get(const ReadOptions& options, - const LookupKey& k, - std::string* value, - GetStats* stats) { - Slice ikey = k.internal_key(); - Slice user_key = k.user_key(); - const Comparator* ucmp = vset_->icmp_.user_comparator(); - Status s; - - stats->seek_file = NULL; +Status Version::Get(const ReadOptions& options, const LookupKey& k, + std::string* value, GetStats* stats) { + stats->seek_file = nullptr; stats->seek_file_level = -1; - FileMetaData* last_file_read = NULL; - int last_file_read_level = -1; - // We can search level-by-level since entries never hop across - // levels. Therefore we are guaranteed that if we find data - // in an smaller level, later levels are irrelevant. - std::vector<FileMetaData*> tmp; - FileMetaData* tmp2; - for (int level = 0; level < config::kNumLevels; level++) { - size_t num_files = files_[level].size(); - if (num_files == 0) continue; + struct State { + Saver saver; + GetStats* stats; + const ReadOptions* options; + Slice ikey; + FileMetaData* last_file_read; + int last_file_read_level; - // Get the list of files to search in this level - FileMetaData* const* files = &files_[level][0]; - if (level == 0) { - // Level-0 files may overlap each other. Find all files that - // overlap user_key and process them in order from newest to oldest. - tmp.reserve(num_files); - for (uint32_t i = 0; i < num_files; i++) { - FileMetaData* f = files[i]; - if (ucmp->Compare(user_key, f->smallest.user_key()) >= 0 && - ucmp->Compare(user_key, f->largest.user_key()) <= 0) { - tmp.push_back(f); - } - } - if (tmp.empty()) continue; + VersionSet* vset; + Status s; + bool found; - std::sort(tmp.begin(), tmp.end(), NewestFirst); - files = &tmp[0]; - num_files = tmp.size(); - } else { - // Binary search to find earliest index whose largest key >= ikey. - uint32_t index = FindFile(vset_->icmp_, files_[level], ikey); - if (index >= num_files) { - files = NULL; - num_files = 0; - } else { - tmp2 = files[index]; - if (ucmp->Compare(user_key, tmp2->smallest.user_key()) < 0) { - // All of "tmp2" is past any data for user_key - files = NULL; - num_files = 0; - } else { - files = &tmp2; - num_files = 1; - } - } - } + static bool Match(void* arg, int level, FileMetaData* f) { + State* state = reinterpret_cast<State*>(arg); - for (uint32_t i = 0; i < num_files; ++i) { - if (last_file_read != NULL && stats->seek_file == NULL) { + if (state->stats->seek_file == nullptr && + state->last_file_read != nullptr) { // We have had more than one seek for this read. Charge the 1st file. - stats->seek_file = last_file_read; - stats->seek_file_level = last_file_read_level; + state->stats->seek_file = state->last_file_read; + state->stats->seek_file_level = state->last_file_read_level; } - FileMetaData* f = files[i]; - last_file_read = f; - last_file_read_level = level; - - Saver saver; - saver.state = kNotFound; - saver.ucmp = ucmp; - saver.user_key = user_key; - saver.value = value; - s = vset_->table_cache_->Get(options, f->number, f->file_size, - ikey, &saver, SaveValue); - if (!s.ok()) { - return s; + state->last_file_read = f; + state->last_file_read_level = level; + + state->s = state->vset->table_cache_->Get(*state->options, f->number, + f->file_size, state->ikey, + &state->saver, SaveValue); + if (!state->s.ok()) { + state->found = true; + return false; } - switch (saver.state) { + switch (state->saver.state) { case kNotFound: - break; // Keep searching in other files + return true; // Keep searching in other files case kFound: - return s; + state->found = true; + return false; case kDeleted: - s = Status::NotFound(Slice()); // Use empty error message for speed - return s; + return false; case kCorrupt: - s = Status::Corruption("corrupted key for ", user_key); - return s; + state->s = + Status::Corruption("corrupted key for ", state->saver.user_key); + state->found = true; + return false; } + + // Not reached. Added to avoid false compilation warnings of + // "control reaches end of non-void function". + return false; } - } + }; + + State state; + state.found = false; + state.stats = stats; + state.last_file_read = nullptr; + state.last_file_read_level = -1; - return Status::NotFound(Slice()); // Use an empty error message for speed + state.options = &options; + state.ikey = k.internal_key(); + state.vset = vset_; + + state.saver.state = kNotFound; + state.saver.ucmp = vset_->icmp_.user_comparator(); + state.saver.user_key = k.user_key(); + state.saver.value = value; + + ForEachOverlapping(state.saver.user_key, state.ikey, &state, &State::Match); + + return state.found ? state.s : Status::NotFound(Slice()); } bool Version::UpdateStats(const GetStats& stats) { FileMetaData* f = stats.seek_file; - if (f != NULL) { + if (f != nullptr) { f->allowed_seeks--; - if (f->allowed_seeks <= 0 && file_to_compact_ == NULL) { + if (f->allowed_seeks <= 0 && file_to_compact_ == nullptr) { file_to_compact_ = f; file_to_compact_level_ = stats.seek_file_level; return true; @@ -479,9 +451,7 @@ bool Version::RecordReadSample(Slice internal_key) { return false; } -void Version::Ref() { - ++refs_; -} +void Version::Ref() { ++refs_; } void Version::Unref() { assert(this != &vset_->dummy_versions_); @@ -492,16 +462,14 @@ void Version::Unref() { } } -bool Version::OverlapInLevel(int level, - const Slice* smallest_user_key, +bool Version::OverlapInLevel(int level, const Slice* smallest_user_key, const Slice* largest_user_key) { return SomeFileOverlapsRange(vset_->icmp_, (level > 0), files_[level], smallest_user_key, largest_user_key); } -int Version::PickLevelForMemTableOutput( - const Slice& smallest_user_key, - const Slice& largest_user_key) { +int Version::PickLevelForMemTableOutput(const Slice& smallest_user_key, + const Slice& largest_user_key) { int level = 0; if (!OverlapInLevel(0, &smallest_user_key, &largest_user_key)) { // Push to next level if there is no overlap in next level, @@ -528,40 +496,39 @@ int Version::PickLevelForMemTableOutput( } // Store in "*inputs" all files in "level" that overlap [begin,end] -void Version::GetOverlappingInputs( - int level, - const InternalKey* begin, - const InternalKey* end, - std::vector<FileMetaData*>* inputs) { +void Version::GetOverlappingInputs(int level, const InternalKey* begin, + const InternalKey* end, + std::vector<FileMetaData*>* inputs) { assert(level >= 0); assert(level < config::kNumLevels); inputs->clear(); Slice user_begin, user_end; - if (begin != NULL) { + if (begin != nullptr) { user_begin = begin->user_key(); } - if (end != NULL) { + if (end != nullptr) { user_end = end->user_key(); } const Comparator* user_cmp = vset_->icmp_.user_comparator(); - for (size_t i = 0; i < files_[level].size(); ) { + for (size_t i = 0; i < files_[level].size();) { FileMetaData* f = files_[level][i++]; const Slice file_start = f->smallest.user_key(); const Slice file_limit = f->largest.user_key(); - if (begin != NULL && user_cmp->Compare(file_limit, user_begin) < 0) { + if (begin != nullptr && user_cmp->Compare(file_limit, user_begin) < 0) { // "f" is completely before specified range; skip it - } else if (end != NULL && user_cmp->Compare(file_start, user_end) > 0) { + } else if (end != nullptr && user_cmp->Compare(file_start, user_end) > 0) { // "f" is completely after specified range; skip it } else { inputs->push_back(f); if (level == 0) { // Level-0 files may overlap each other. So check if the newly // added file has expanded the range. If so, restart search. - if (begin != NULL && user_cmp->Compare(file_start, user_begin) < 0) { + if (begin != nullptr && user_cmp->Compare(file_start, user_begin) < 0) { user_begin = file_start; inputs->clear(); i = 0; - } else if (end != NULL && user_cmp->Compare(file_limit, user_end) > 0) { + } else if (end != nullptr && + user_cmp->Compare(file_limit, user_end) > 0) { user_end = file_limit; inputs->clear(); i = 0; @@ -629,9 +596,7 @@ class VersionSet::Builder { public: // Initialize a builder with the files from *base and other info from *vset - Builder(VersionSet* vset, Version* base) - : vset_(vset), - base_(base) { + Builder(VersionSet* vset, Version* base) : vset_(vset), base_(base) { base_->Ref(); BySmallestKey cmp; cmp.internal_comparator = &vset_->icmp_; @@ -645,8 +610,8 @@ class VersionSet::Builder { const FileSet* added = levels_[level].added_files; std::vector<FileMetaData*> to_unref; to_unref.reserve(added->size()); - for (FileSet::const_iterator it = added->begin(); - it != added->end(); ++it) { + for (FileSet::const_iterator it = added->begin(); it != added->end(); + ++it) { to_unref.push_back(*it); } delete added; @@ -671,12 +636,9 @@ class VersionSet::Builder { } // Delete files - const VersionEdit::DeletedFileSet& del = edit->deleted_files_; - for (VersionEdit::DeletedFileSet::const_iterator iter = del.begin(); - iter != del.end(); - ++iter) { - const int level = iter->first; - const uint64_t number = iter->second; + for (const auto& deleted_file_set_kvp : edit->deleted_files_) { + const int level = deleted_file_set_kvp.first; + const uint64_t number = deleted_file_set_kvp.second; levels_[level].deleted_files.insert(number); } @@ -699,7 +661,7 @@ class VersionSet::Builder { // same as the compaction of 40KB of data. We are a little // conservative and allow approximately one seek for every 16KB // of data before triggering a compaction. - f->allowed_seeks = (f->file_size / 16384); + f->allowed_seeks = static_cast<int>((f->file_size / 16384U)); if (f->allowed_seeks < 100) f->allowed_seeks = 100; levels_[level].deleted_files.erase(f->number); @@ -717,20 +679,17 @@ class VersionSet::Builder { const std::vector<FileMetaData*>& base_files = base_->files_[level]; std::vector<FileMetaData*>::const_iterator base_iter = base_files.begin(); std::vector<FileMetaData*>::const_iterator base_end = base_files.end(); - const FileSet* added = levels_[level].added_files; - v->files_[level].reserve(base_files.size() + added->size()); - for (FileSet::const_iterator added_iter = added->begin(); - added_iter != added->end(); - ++added_iter) { + const FileSet* added_files = levels_[level].added_files; + v->files_[level].reserve(base_files.size() + added_files->size()); + for (const auto& added_file : *added_files) { // Add all smaller files listed in base_ - for (std::vector<FileMetaData*>::const_iterator bpos - = std::upper_bound(base_iter, base_end, *added_iter, cmp); - base_iter != bpos; - ++base_iter) { + for (std::vector<FileMetaData*>::const_iterator bpos = + std::upper_bound(base_iter, base_end, added_file, cmp); + base_iter != bpos; ++base_iter) { MaybeAddFile(v, level, *base_iter); } - MaybeAddFile(v, level, *added_iter); + MaybeAddFile(v, level, added_file); } // Add remaining base files @@ -742,7 +701,7 @@ class VersionSet::Builder { // Make sure there is no overlap in levels > 0 if (level > 0) { for (uint32_t i = 1; i < v->files_[level].size(); i++) { - const InternalKey& prev_end = v->files_[level][i-1]->largest; + const InternalKey& prev_end = v->files_[level][i - 1]->largest; const InternalKey& this_begin = v->files_[level][i]->smallest; if (vset_->icmp_.Compare(prev_end, this_begin) >= 0) { fprintf(stderr, "overlapping ranges in same level %s vs. %s\n", @@ -763,7 +722,7 @@ class VersionSet::Builder { std::vector<FileMetaData*>* files = &v->files_[level]; if (level > 0 && !files->empty()) { // Must not overlap - assert(vset_->icmp_.Compare((*files)[files->size()-1]->largest, + assert(vset_->icmp_.Compare((*files)[files->size() - 1]->largest, f->smallest) < 0); } f->refs++; @@ -772,8 +731,7 @@ class VersionSet::Builder { } }; -VersionSet::VersionSet(const std::string& dbname, - const Options* options, +VersionSet::VersionSet(const std::string& dbname, const Options* options, TableCache* table_cache, const InternalKeyComparator* cmp) : env_(options->env), @@ -786,10 +744,10 @@ VersionSet::VersionSet(const std::string& dbname, last_sequence_(0), log_number_(0), prev_log_number_(0), - descriptor_file_(NULL), - descriptor_log_(NULL), + descriptor_file_(nullptr), + descriptor_log_(nullptr), dummy_versions_(this), - current_(NULL) { + current_(nullptr) { AppendVersion(new Version(this)); } @@ -804,7 +762,7 @@ void VersionSet::AppendVersion(Version* v) { // Make "v" current assert(v->refs_ == 0); assert(v != current_); - if (current_ != NULL) { + if (current_ != nullptr) { current_->Unref(); } current_ = v; @@ -844,10 +802,10 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) { // a temporary file that contains a snapshot of the current version. std::string new_manifest_file; Status s; - if (descriptor_log_ == NULL) { + if (descriptor_log_ == nullptr) { // No reason to unlock *mu here since we only hit this path in the // first call to LogAndApply (when opening the database). - assert(descriptor_file_ == NULL); + assert(descriptor_file_ == nullptr); new_manifest_file = DescriptorFileName(dbname_, manifest_file_number_); edit->SetNextFile(next_file_number_); s = env_->NewWritableFile(new_manifest_file, &descriptor_file_); @@ -893,8 +851,8 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) { if (!new_manifest_file.empty()) { delete descriptor_log_; delete descriptor_file_; - descriptor_log_ = NULL; - descriptor_file_ = NULL; + descriptor_log_ = nullptr; + descriptor_file_ = nullptr; env_->DeleteFile(new_manifest_file); } } @@ -902,10 +860,10 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) { return s; } -Status VersionSet::Recover(bool *save_manifest) { +Status VersionSet::Recover(bool* save_manifest) { struct LogReporter : public log::Reader::Reporter { Status* status; - virtual void Corruption(size_t bytes, const Status& s) { + void Corruption(size_t bytes, const Status& s) override { if (this->status->ok()) *this->status = s; } }; @@ -916,7 +874,7 @@ Status VersionSet::Recover(bool *save_manifest) { if (!s.ok()) { return s; } - if (current.empty() || current[current.size()-1] != '\n') { + if (current.empty() || current[current.size() - 1] != '\n') { return Status::Corruption("CURRENT file does not end with newline"); } current.resize(current.size() - 1); @@ -925,6 +883,10 @@ Status VersionSet::Recover(bool *save_manifest) { SequentialFile* file; s = env_->NewSequentialFile(dscname, &file); if (!s.ok()) { + if (s.IsNotFound()) { + return Status::Corruption("CURRENT points to a non-existent file", + s.ToString()); + } return s; } @@ -941,7 +903,8 @@ Status VersionSet::Recover(bool *save_manifest) { { LogReporter reporter; reporter.status = &s; - log::Reader reader(file, &reporter, true/*checksum*/, 0/*initial_offset*/); + log::Reader reader(file, &reporter, true /*checksum*/, + 0 /*initial_offset*/); Slice record; std::string scratch; while (reader.ReadRecord(&record, &scratch) && s.ok()) { @@ -982,7 +945,7 @@ Status VersionSet::Recover(bool *save_manifest) { } } delete file; - file = NULL; + file = nullptr; if (s.ok()) { if (!have_next_file) { @@ -1040,12 +1003,12 @@ bool VersionSet::ReuseManifest(const std::string& dscname, return false; } - assert(descriptor_file_ == NULL); - assert(descriptor_log_ == NULL); + assert(descriptor_file_ == nullptr); + assert(descriptor_log_ == nullptr); Status r = env_->NewAppendableFile(dscname, &descriptor_file_); if (!r.ok()) { Log(options_->info_log, "Reuse MANIFEST: %s\n", r.ToString().c_str()); - assert(descriptor_file_ == NULL); + assert(descriptor_file_ == nullptr); return false; } @@ -1066,7 +1029,7 @@ void VersionSet::Finalize(Version* v) { int best_level = -1; double best_score = -1; - for (int level = 0; level < config::kNumLevels-1; level++) { + for (int level = 0; level < config::kNumLevels - 1; level++) { double score; if (level == 0) { // We treat level-0 specially by bounding the number of files @@ -1081,7 +1044,7 @@ void VersionSet::Finalize(Version* v) { // setting, or very high compression ratios, or lots of // overwrites/deletions). score = v->files_[level].size() / - static_cast<double>(config::kL0_CompactionTrigger); + static_cast<double>(config::kL0_CompactionTrigger); } else { // Compute the ratio of current size to size limit. const uint64_t level_bytes = TotalFileSize(v->files_[level]); @@ -1137,16 +1100,12 @@ int VersionSet::NumLevelFiles(int level) const { const char* VersionSet::LevelSummary(LevelSummaryStorage* scratch) const { // Update code if kNumLevels changes - assert(config::kNumLevels == 7); + static_assert(config::kNumLevels == 7, ""); snprintf(scratch->buffer, sizeof(scratch->buffer), - "files[ %d %d %d %d %d %d %d ]", - int(current_->files_[0].size()), - int(current_->files_[1].size()), - int(current_->files_[2].size()), - int(current_->files_[3].size()), - int(current_->files_[4].size()), - int(current_->files_[5].size()), - int(current_->files_[6].size())); + "files[ %d %d %d %d %d %d %d ]", int(current_->files_[0].size()), + int(current_->files_[1].size()), int(current_->files_[2].size()), + int(current_->files_[3].size()), int(current_->files_[4].size()), + int(current_->files_[5].size()), int(current_->files_[6].size())); return scratch->buffer; } @@ -1172,7 +1131,7 @@ uint64_t VersionSet::ApproximateOffsetOf(Version* v, const InternalKey& ikey) { Table* tableptr; Iterator* iter = table_cache_->NewIterator( ReadOptions(), files[i]->number, files[i]->file_size, &tableptr); - if (tableptr != NULL) { + if (tableptr != nullptr) { result += tableptr->ApproximateOffsetOf(ikey.Encode()); } delete iter; @@ -1183,8 +1142,7 @@ uint64_t VersionSet::ApproximateOffsetOf(Version* v, const InternalKey& ikey) { } void VersionSet::AddLiveFiles(std::set<uint64_t>* live) { - for (Version* v = dummy_versions_.next_; - v != &dummy_versions_; + for (Version* v = dummy_versions_.next_; v != &dummy_versions_; v = v->next_) { for (int level = 0; level < config::kNumLevels; level++) { const std::vector<FileMetaData*>& files = v->files_[level]; @@ -1207,7 +1165,7 @@ int64_t VersionSet::MaxNextLevelOverlappingBytes() { for (int level = 1; level < config::kNumLevels - 1; level++) { for (size_t i = 0; i < current_->files_[level].size(); i++) { const FileMetaData* f = current_->files_[level][i]; - current_->GetOverlappingInputs(level+1, &f->smallest, &f->largest, + current_->GetOverlappingInputs(level + 1, &f->smallest, &f->largest, &overlaps); const int64_t sum = TotalFileSize(overlaps); if (sum > result) { @@ -1222,8 +1180,7 @@ int64_t VersionSet::MaxNextLevelOverlappingBytes() { // *smallest, *largest. // REQUIRES: inputs is not empty void VersionSet::GetRange(const std::vector<FileMetaData*>& inputs, - InternalKey* smallest, - InternalKey* largest) { + InternalKey* smallest, InternalKey* largest) { assert(!inputs.empty()); smallest->Clear(); largest->Clear(); @@ -1248,8 +1205,7 @@ void VersionSet::GetRange(const std::vector<FileMetaData*>& inputs, // REQUIRES: inputs is not empty void VersionSet::GetRange2(const std::vector<FileMetaData*>& inputs1, const std::vector<FileMetaData*>& inputs2, - InternalKey* smallest, - InternalKey* largest) { + InternalKey* smallest, InternalKey* largest) { std::vector<FileMetaData*> all = inputs1; all.insert(all.end(), inputs2.begin(), inputs2.end()); GetRange(all, smallest, largest); @@ -1271,8 +1227,8 @@ Iterator* VersionSet::MakeInputIterator(Compaction* c) { if (c->level() + which == 0) { const std::vector<FileMetaData*>& files = c->inputs_[which]; for (size_t i = 0; i < files.size(); i++) { - list[num++] = table_cache_->NewIterator( - options, files[i]->number, files[i]->file_size); + list[num++] = table_cache_->NewIterator(options, files[i]->number, + files[i]->file_size); } } else { // Create concatenating iterator for the files from this level @@ -1295,11 +1251,11 @@ Compaction* VersionSet::PickCompaction() { // We prefer compactions triggered by too much data in a level over // the compactions triggered by seeks. const bool size_compaction = (current_->compaction_score_ >= 1); - const bool seek_compaction = (current_->file_to_compact_ != NULL); + const bool seek_compaction = (current_->file_to_compact_ != nullptr); if (size_compaction) { level = current_->compaction_level_; assert(level >= 0); - assert(level+1 < config::kNumLevels); + assert(level + 1 < config::kNumLevels); c = new Compaction(options_, level); // Pick the first file that comes after compact_pointer_[level] @@ -1320,7 +1276,7 @@ Compaction* VersionSet::PickCompaction() { c = new Compaction(options_, level); c->inputs_[0].push_back(current_->file_to_compact_); } else { - return NULL; + return nullptr; } c->input_version_ = current_; @@ -1342,12 +1298,94 @@ Compaction* VersionSet::PickCompaction() { return c; } +// Finds the largest key in a vector of files. Returns true if files it not +// empty. +bool FindLargestKey(const InternalKeyComparator& icmp, + const std::vector<FileMetaData*>& files, + InternalKey* largest_key) { + if (files.empty()) { + return false; + } + *largest_key = files[0]->largest; + for (size_t i = 1; i < files.size(); ++i) { + FileMetaData* f = files[i]; + if (icmp.Compare(f->largest, *largest_key) > 0) { + *largest_key = f->largest; + } + } + return true; +} + +// Finds minimum file b2=(l2, u2) in level file for which l2 > u1 and +// user_key(l2) = user_key(u1) +FileMetaData* FindSmallestBoundaryFile( + const InternalKeyComparator& icmp, + const std::vector<FileMetaData*>& level_files, + const InternalKey& largest_key) { + const Comparator* user_cmp = icmp.user_comparator(); + FileMetaData* smallest_boundary_file = nullptr; + for (size_t i = 0; i < level_files.size(); ++i) { + FileMetaData* f = level_files[i]; + if (icmp.Compare(f->smallest, largest_key) > 0 && + user_cmp->Compare(f->smallest.user_key(), largest_key.user_key()) == + 0) { + if (smallest_boundary_file == nullptr || + icmp.Compare(f->smallest, smallest_boundary_file->smallest) < 0) { + smallest_boundary_file = f; + } + } + } + return smallest_boundary_file; +} + +// Extracts the largest file b1 from |compaction_files| and then searches for a +// b2 in |level_files| for which user_key(u1) = user_key(l2). If it finds such a +// file b2 (known as a boundary file) it adds it to |compaction_files| and then +// searches again using this new upper bound. +// +// If there are two blocks, b1=(l1, u1) and b2=(l2, u2) and +// user_key(u1) = user_key(l2), and if we compact b1 but not b2 then a +// subsequent get operation will yield an incorrect result because it will +// return the record from b2 in level i rather than from b1 because it searches +// level by level for records matching the supplied user key. +// +// parameters: +// in level_files: List of files to search for boundary files. +// in/out compaction_files: List of files to extend by adding boundary files. +void AddBoundaryInputs(const InternalKeyComparator& icmp, + const std::vector<FileMetaData*>& level_files, + std::vector<FileMetaData*>* compaction_files) { + InternalKey largest_key; + + // Quick return if compaction_files is empty. + if (!FindLargestKey(icmp, *compaction_files, &largest_key)) { + return; + } + + bool continue_searching = true; + while (continue_searching) { + FileMetaData* smallest_boundary_file = + FindSmallestBoundaryFile(icmp, level_files, largest_key); + + // If a boundary file was found advance largest_key, otherwise we're done. + if (smallest_boundary_file != NULL) { + compaction_files->push_back(smallest_boundary_file); + largest_key = smallest_boundary_file->largest; + } else { + continue_searching = false; + } + } +} + void VersionSet::SetupOtherInputs(Compaction* c) { const int level = c->level(); InternalKey smallest, largest; + + AddBoundaryInputs(icmp_, current_->files_[level], &c->inputs_[0]); GetRange(c->inputs_[0], &smallest, &largest); - current_->GetOverlappingInputs(level+1, &smallest, &largest, &c->inputs_[1]); + current_->GetOverlappingInputs(level + 1, &smallest, &largest, + &c->inputs_[1]); // Get entire range covered by compaction InternalKey all_start, all_limit; @@ -1358,6 +1396,7 @@ void VersionSet::SetupOtherInputs(Compaction* c) { if (!c->inputs_[1].empty()) { std::vector<FileMetaData*> expanded0; current_->GetOverlappingInputs(level, &all_start, &all_limit, &expanded0); + AddBoundaryInputs(icmp_, current_->files_[level], &expanded0); const int64_t inputs0_size = TotalFileSize(c->inputs_[0]); const int64_t inputs1_size = TotalFileSize(c->inputs_[1]); const int64_t expanded0_size = TotalFileSize(expanded0); @@ -1367,18 +1406,14 @@ void VersionSet::SetupOtherInputs(Compaction* c) { InternalKey new_start, new_limit; GetRange(expanded0, &new_start, &new_limit); std::vector<FileMetaData*> expanded1; - current_->GetOverlappingInputs(level+1, &new_start, &new_limit, + current_->GetOverlappingInputs(level + 1, &new_start, &new_limit, &expanded1); if (expanded1.size() == c->inputs_[1].size()) { Log(options_->info_log, "Expanding@%d %d+%d (%ld+%ld bytes) to %d+%d (%ld+%ld bytes)\n", - level, - int(c->inputs_[0].size()), - int(c->inputs_[1].size()), - long(inputs0_size), long(inputs1_size), - int(expanded0.size()), - int(expanded1.size()), - long(expanded0_size), long(inputs1_size)); + level, int(c->inputs_[0].size()), int(c->inputs_[1].size()), + long(inputs0_size), long(inputs1_size), int(expanded0.size()), + int(expanded1.size()), long(expanded0_size), long(inputs1_size)); smallest = new_start; largest = new_limit; c->inputs_[0] = expanded0; @@ -1395,13 +1430,6 @@ void VersionSet::SetupOtherInputs(Compaction* c) { &c->grandparents_); } - if (false) { - Log(options_->info_log, "Compacting %d '%s' .. '%s'", - level, - smallest.DebugString().c_str(), - largest.DebugString().c_str()); - } - // Update the place where we will do the next compaction for this level. // We update this immediately instead of waiting for the VersionEdit // to be applied so that if the compaction fails, we will try a different @@ -1410,14 +1438,12 @@ void VersionSet::SetupOtherInputs(Compaction* c) { c->edit_.SetCompactPointer(level, largest); } -Compaction* VersionSet::CompactRange( - int level, - const InternalKey* begin, - const InternalKey* end) { +Compaction* VersionSet::CompactRange(int level, const InternalKey* begin, + const InternalKey* end) { std::vector<FileMetaData*> inputs; current_->GetOverlappingInputs(level, begin, end, &inputs); if (inputs.empty()) { - return NULL; + return nullptr; } // Avoid compacting too much in one shot in case the range is large. @@ -1448,7 +1474,7 @@ Compaction* VersionSet::CompactRange( Compaction::Compaction(const Options* options, int level) : level_(level), max_output_file_size_(MaxFileSizeForLevel(options, level)), - input_version_(NULL), + input_version_(nullptr), grandparent_index_(0), seen_key_(false), overlapped_bytes_(0) { @@ -1458,7 +1484,7 @@ Compaction::Compaction(const Options* options, int level) } Compaction::~Compaction() { - if (input_version_ != NULL) { + if (input_version_ != nullptr) { input_version_->Unref(); } } @@ -1486,7 +1512,7 @@ bool Compaction::IsBaseLevelForKey(const Slice& user_key) { const Comparator* user_cmp = input_version_->vset_->icmp_.user_comparator(); for (int lvl = level_ + 2; lvl < config::kNumLevels; lvl++) { const std::vector<FileMetaData*>& files = input_version_->files_[lvl]; - for (; level_ptrs_[lvl] < files.size(); ) { + while (level_ptrs_[lvl] < files.size()) { FileMetaData* f = files[level_ptrs_[lvl]]; if (user_cmp->Compare(user_key, f->largest.user_key()) <= 0) { // We've advanced far enough @@ -1507,8 +1533,9 @@ bool Compaction::ShouldStopBefore(const Slice& internal_key) { // Scan to find earliest grandparent file that contains key. const InternalKeyComparator* icmp = &vset->icmp_; while (grandparent_index_ < grandparents_.size() && - icmp->Compare(internal_key, - grandparents_[grandparent_index_]->largest.Encode()) > 0) { + icmp->Compare(internal_key, + grandparents_[grandparent_index_]->largest.Encode()) > + 0) { if (seen_key_) { overlapped_bytes_ += grandparents_[grandparent_index_]->file_size; } @@ -1526,9 +1553,9 @@ bool Compaction::ShouldStopBefore(const Slice& internal_key) { } void Compaction::ReleaseInputs() { - if (input_version_ != NULL) { + if (input_version_ != nullptr) { input_version_->Unref(); - input_version_ = NULL; + input_version_ = nullptr; } } diff --git a/src/leveldb/db/version_set.h b/src/leveldb/db/version_set.h index 7935a965a7..69f3d70133 100644 --- a/src/leveldb/db/version_set.h +++ b/src/leveldb/db/version_set.h @@ -18,6 +18,7 @@ #include <map> #include <set> #include <vector> + #include "db/dbformat.h" #include "db/version_edit.h" #include "port/port.h" @@ -25,7 +26,9 @@ namespace leveldb { -namespace log { class Writer; } +namespace log { +class Writer; +} class Compaction; class Iterator; @@ -39,30 +42,23 @@ class WritableFile; // Return the smallest index i such that files[i]->largest >= key. // Return files.size() if there is no such file. // REQUIRES: "files" contains a sorted list of non-overlapping files. -extern int FindFile(const InternalKeyComparator& icmp, - const std::vector<FileMetaData*>& files, - const Slice& key); +int FindFile(const InternalKeyComparator& icmp, + const std::vector<FileMetaData*>& files, const Slice& key); // Returns true iff some file in "files" overlaps the user key range // [*smallest,*largest]. -// smallest==NULL represents a key smaller than all keys in the DB. -// largest==NULL represents a key largest than all keys in the DB. +// smallest==nullptr represents a key smaller than all keys in the DB. +// largest==nullptr represents a key largest than all keys in the DB. // REQUIRES: If disjoint_sorted_files, files[] contains disjoint ranges // in sorted order. -extern bool SomeFileOverlapsRange( - const InternalKeyComparator& icmp, - bool disjoint_sorted_files, - const std::vector<FileMetaData*>& files, - const Slice* smallest_user_key, - const Slice* largest_user_key); +bool SomeFileOverlapsRange(const InternalKeyComparator& icmp, + bool disjoint_sorted_files, + const std::vector<FileMetaData*>& files, + const Slice* smallest_user_key, + const Slice* largest_user_key); class Version { public: - // Append to *iters a sequence of iterators that will - // yield the contents of this Version when merged together. - // REQUIRES: This version has been saved (see VersionSet::SaveTo) - void AddIterators(const ReadOptions&, std::vector<Iterator*>* iters); - // Lookup the value for key. If found, store it in *val and // return OK. Else return a non-OK status. Fills *stats. // REQUIRES: lock is not held @@ -70,6 +66,12 @@ class Version { FileMetaData* seek_file; int seek_file_level; }; + + // Append to *iters a sequence of iterators that will + // yield the contents of this Version when merged together. + // REQUIRES: This version has been saved (see VersionSet::SaveTo) + void AddIterators(const ReadOptions&, std::vector<Iterator*>* iters); + Status Get(const ReadOptions&, const LookupKey& key, std::string* val, GetStats* stats); @@ -91,16 +93,15 @@ class Version { void GetOverlappingInputs( int level, - const InternalKey* begin, // NULL means before all keys - const InternalKey* end, // NULL means after all keys + const InternalKey* begin, // nullptr means before all keys + const InternalKey* end, // nullptr means after all keys std::vector<FileMetaData*>* inputs); // Returns true iff some file in the specified level overlaps // some part of [*smallest_user_key,*largest_user_key]. - // smallest_user_key==NULL represents a key smaller than all keys in the DB. - // largest_user_key==NULL represents a key largest than all keys in the DB. - bool OverlapInLevel(int level, - const Slice* smallest_user_key, + // smallest_user_key==nullptr represents a key smaller than all the DB's keys. + // largest_user_key==nullptr represents a key largest than all the DB's keys. + bool OverlapInLevel(int level, const Slice* smallest_user_key, const Slice* largest_user_key); // Return the level at which we should place a new memtable compaction @@ -118,6 +119,22 @@ class Version { friend class VersionSet; class LevelFileNumIterator; + + explicit Version(VersionSet* vset) + : vset_(vset), + next_(this), + prev_(this), + refs_(0), + file_to_compact_(nullptr), + file_to_compact_level_(-1), + compaction_score_(-1), + compaction_level_(-1) {} + + Version(const Version&) = delete; + Version& operator=(const Version&) = delete; + + ~Version(); + Iterator* NewConcatenatingIterator(const ReadOptions&, int level) const; // Call func(arg, level, f) for every file that overlaps user_key in @@ -125,14 +142,13 @@ class Version { // false, makes no more calls. // // REQUIRES: user portion of internal_key == user_key. - void ForEachOverlapping(Slice user_key, Slice internal_key, - void* arg, + void ForEachOverlapping(Slice user_key, Slice internal_key, void* arg, bool (*func)(void*, int, FileMetaData*)); - VersionSet* vset_; // VersionSet to which this Version belongs - Version* next_; // Next version in linked list - Version* prev_; // Previous version in linked list - int refs_; // Number of live refs to this version + VersionSet* vset_; // VersionSet to which this Version belongs + Version* next_; // Next version in linked list + Version* prev_; // Previous version in linked list + int refs_; // Number of live refs to this version // List of files per level std::vector<FileMetaData*> files_[config::kNumLevels]; @@ -146,28 +162,15 @@ class Version { // are initialized by Finalize(). double compaction_score_; int compaction_level_; - - explicit Version(VersionSet* vset) - : vset_(vset), next_(this), prev_(this), refs_(0), - file_to_compact_(NULL), - file_to_compact_level_(-1), - compaction_score_(-1), - compaction_level_(-1) { - } - - ~Version(); - - // No copying allowed - Version(const Version&); - void operator=(const Version&); }; class VersionSet { public: - VersionSet(const std::string& dbname, - const Options* options, - TableCache* table_cache, - const InternalKeyComparator*); + VersionSet(const std::string& dbname, const Options* options, + TableCache* table_cache, const InternalKeyComparator*); + VersionSet(const VersionSet&) = delete; + VersionSet& operator=(const VersionSet&) = delete; + ~VersionSet(); // Apply *edit to the current version to form a new descriptor that @@ -179,7 +182,7 @@ class VersionSet { EXCLUSIVE_LOCKS_REQUIRED(mu); // Recover the last saved descriptor from persistent storage. - Status Recover(bool *save_manifest); + Status Recover(bool* save_manifest); // Return the current version. Version* current() const { return current_; } @@ -225,19 +228,17 @@ class VersionSet { uint64_t PrevLogNumber() const { return prev_log_number_; } // Pick level and inputs for a new compaction. - // Returns NULL if there is no compaction to be done. + // Returns nullptr if there is no compaction to be done. // Otherwise returns a pointer to a heap-allocated object that // describes the compaction. Caller should delete the result. Compaction* PickCompaction(); // Return a compaction object for compacting the range [begin,end] in - // the specified level. Returns NULL if there is nothing in that + // the specified level. Returns nullptr if there is nothing in that // level that overlaps the specified range. Caller should delete // the result. - Compaction* CompactRange( - int level, - const InternalKey* begin, - const InternalKey* end); + Compaction* CompactRange(int level, const InternalKey* begin, + const InternalKey* end); // Return the maximum overlapping data (in bytes) at next level for any // file at a level >= 1. @@ -250,7 +251,7 @@ class VersionSet { // Returns true iff some level needs a compaction. bool NeedsCompaction() const { Version* v = current_; - return (v->compaction_score_ >= 1) || (v->file_to_compact_ != NULL); + return (v->compaction_score_ >= 1) || (v->file_to_compact_ != nullptr); } // Add all files listed in any live version to *live. @@ -278,14 +279,12 @@ class VersionSet { void Finalize(Version* v); - void GetRange(const std::vector<FileMetaData*>& inputs, - InternalKey* smallest, + void GetRange(const std::vector<FileMetaData*>& inputs, InternalKey* smallest, InternalKey* largest); void GetRange2(const std::vector<FileMetaData*>& inputs1, const std::vector<FileMetaData*>& inputs2, - InternalKey* smallest, - InternalKey* largest); + InternalKey* smallest, InternalKey* largest); void SetupOtherInputs(Compaction* c); @@ -314,10 +313,6 @@ class VersionSet { // Per-level key at which the next compaction at that level should start. // Either an empty string, or a valid InternalKey. std::string compact_pointer_[config::kNumLevels]; - - // No copying allowed - VersionSet(const VersionSet&); - void operator=(const VersionSet&); }; // A Compaction encapsulates information about a compaction. @@ -374,7 +369,7 @@ class Compaction { VersionEdit edit_; // Each compaction reads inputs from "level_" and "level_+1" - std::vector<FileMetaData*> inputs_[2]; // The two sets of inputs + std::vector<FileMetaData*> inputs_[2]; // The two sets of inputs // State used to check for number of overlapping grandparent files // (parent == level_ + 1, grandparent == level_ + 2) diff --git a/src/leveldb/db/version_set_test.cc b/src/leveldb/db/version_set_test.cc index 501e34d133..c1056a1e7d 100644 --- a/src/leveldb/db/version_set_test.cc +++ b/src/leveldb/db/version_set_test.cc @@ -11,10 +11,7 @@ namespace leveldb { class FindFileTest { public: - std::vector<FileMetaData*> files_; - bool disjoint_sorted_files_; - - FindFileTest() : disjoint_sorted_files_(true) { } + FindFileTest() : disjoint_sorted_files_(true) {} ~FindFileTest() { for (int i = 0; i < files_.size(); i++) { @@ -40,20 +37,25 @@ class FindFileTest { bool Overlaps(const char* smallest, const char* largest) { InternalKeyComparator cmp(BytewiseComparator()); - Slice s(smallest != NULL ? smallest : ""); - Slice l(largest != NULL ? largest : ""); + Slice s(smallest != nullptr ? smallest : ""); + Slice l(largest != nullptr ? largest : ""); return SomeFileOverlapsRange(cmp, disjoint_sorted_files_, files_, - (smallest != NULL ? &s : NULL), - (largest != NULL ? &l : NULL)); + (smallest != nullptr ? &s : nullptr), + (largest != nullptr ? &l : nullptr)); } + + bool disjoint_sorted_files_; + + private: + std::vector<FileMetaData*> files_; }; TEST(FindFileTest, Empty) { ASSERT_EQ(0, Find("foo")); - ASSERT_TRUE(! Overlaps("a", "z")); - ASSERT_TRUE(! Overlaps(NULL, "z")); - ASSERT_TRUE(! Overlaps("a", NULL)); - ASSERT_TRUE(! Overlaps(NULL, NULL)); + ASSERT_TRUE(!Overlaps("a", "z")); + ASSERT_TRUE(!Overlaps(nullptr, "z")); + ASSERT_TRUE(!Overlaps("a", nullptr)); + ASSERT_TRUE(!Overlaps(nullptr, nullptr)); } TEST(FindFileTest, Single) { @@ -65,8 +67,8 @@ TEST(FindFileTest, Single) { ASSERT_EQ(1, Find("q1")); ASSERT_EQ(1, Find("z")); - ASSERT_TRUE(! Overlaps("a", "b")); - ASSERT_TRUE(! Overlaps("z1", "z2")); + ASSERT_TRUE(!Overlaps("a", "b")); + ASSERT_TRUE(!Overlaps("z1", "z2")); ASSERT_TRUE(Overlaps("a", "p")); ASSERT_TRUE(Overlaps("a", "q")); ASSERT_TRUE(Overlaps("a", "z")); @@ -78,15 +80,14 @@ TEST(FindFileTest, Single) { ASSERT_TRUE(Overlaps("q", "q")); ASSERT_TRUE(Overlaps("q", "q1")); - ASSERT_TRUE(! Overlaps(NULL, "j")); - ASSERT_TRUE(! Overlaps("r", NULL)); - ASSERT_TRUE(Overlaps(NULL, "p")); - ASSERT_TRUE(Overlaps(NULL, "p1")); - ASSERT_TRUE(Overlaps("q", NULL)); - ASSERT_TRUE(Overlaps(NULL, NULL)); + ASSERT_TRUE(!Overlaps(nullptr, "j")); + ASSERT_TRUE(!Overlaps("r", nullptr)); + ASSERT_TRUE(Overlaps(nullptr, "p")); + ASSERT_TRUE(Overlaps(nullptr, "p1")); + ASSERT_TRUE(Overlaps("q", nullptr)); + ASSERT_TRUE(Overlaps(nullptr, nullptr)); } - TEST(FindFileTest, Multiple) { Add("150", "200"); Add("200", "250"); @@ -110,10 +111,10 @@ TEST(FindFileTest, Multiple) { ASSERT_EQ(3, Find("450")); ASSERT_EQ(4, Find("451")); - ASSERT_TRUE(! Overlaps("100", "149")); - ASSERT_TRUE(! Overlaps("251", "299")); - ASSERT_TRUE(! Overlaps("451", "500")); - ASSERT_TRUE(! Overlaps("351", "399")); + ASSERT_TRUE(!Overlaps("100", "149")); + ASSERT_TRUE(!Overlaps("251", "299")); + ASSERT_TRUE(!Overlaps("451", "500")); + ASSERT_TRUE(!Overlaps("351", "399")); ASSERT_TRUE(Overlaps("100", "150")); ASSERT_TRUE(Overlaps("100", "200")); @@ -130,25 +131,25 @@ TEST(FindFileTest, MultipleNullBoundaries) { Add("200", "250"); Add("300", "350"); Add("400", "450"); - ASSERT_TRUE(! Overlaps(NULL, "149")); - ASSERT_TRUE(! Overlaps("451", NULL)); - ASSERT_TRUE(Overlaps(NULL, NULL)); - ASSERT_TRUE(Overlaps(NULL, "150")); - ASSERT_TRUE(Overlaps(NULL, "199")); - ASSERT_TRUE(Overlaps(NULL, "200")); - ASSERT_TRUE(Overlaps(NULL, "201")); - ASSERT_TRUE(Overlaps(NULL, "400")); - ASSERT_TRUE(Overlaps(NULL, "800")); - ASSERT_TRUE(Overlaps("100", NULL)); - ASSERT_TRUE(Overlaps("200", NULL)); - ASSERT_TRUE(Overlaps("449", NULL)); - ASSERT_TRUE(Overlaps("450", NULL)); + ASSERT_TRUE(!Overlaps(nullptr, "149")); + ASSERT_TRUE(!Overlaps("451", nullptr)); + ASSERT_TRUE(Overlaps(nullptr, nullptr)); + ASSERT_TRUE(Overlaps(nullptr, "150")); + ASSERT_TRUE(Overlaps(nullptr, "199")); + ASSERT_TRUE(Overlaps(nullptr, "200")); + ASSERT_TRUE(Overlaps(nullptr, "201")); + ASSERT_TRUE(Overlaps(nullptr, "400")); + ASSERT_TRUE(Overlaps(nullptr, "800")); + ASSERT_TRUE(Overlaps("100", nullptr)); + ASSERT_TRUE(Overlaps("200", nullptr)); + ASSERT_TRUE(Overlaps("449", nullptr)); + ASSERT_TRUE(Overlaps("450", nullptr)); } TEST(FindFileTest, OverlapSequenceChecks) { Add("200", "200", 5000, 3000); - ASSERT_TRUE(! Overlaps("199", "199")); - ASSERT_TRUE(! Overlaps("201", "300")); + ASSERT_TRUE(!Overlaps("199", "199")); + ASSERT_TRUE(!Overlaps("201", "300")); ASSERT_TRUE(Overlaps("200", "200")); ASSERT_TRUE(Overlaps("190", "200")); ASSERT_TRUE(Overlaps("200", "210")); @@ -158,8 +159,8 @@ TEST(FindFileTest, OverlappingFiles) { Add("150", "600"); Add("400", "500"); disjoint_sorted_files_ = false; - ASSERT_TRUE(! Overlaps("100", "149")); - ASSERT_TRUE(! Overlaps("601", "700")); + ASSERT_TRUE(!Overlaps("100", "149")); + ASSERT_TRUE(!Overlaps("601", "700")); ASSERT_TRUE(Overlaps("100", "150")); ASSERT_TRUE(Overlaps("100", "200")); ASSERT_TRUE(Overlaps("100", "300")); @@ -172,8 +173,160 @@ TEST(FindFileTest, OverlappingFiles) { ASSERT_TRUE(Overlaps("600", "700")); } -} // namespace leveldb +void AddBoundaryInputs(const InternalKeyComparator& icmp, + const std::vector<FileMetaData*>& level_files, + std::vector<FileMetaData*>* compaction_files); + +class AddBoundaryInputsTest { + public: + std::vector<FileMetaData*> level_files_; + std::vector<FileMetaData*> compaction_files_; + std::vector<FileMetaData*> all_files_; + InternalKeyComparator icmp_; + + AddBoundaryInputsTest() : icmp_(BytewiseComparator()) {} + + ~AddBoundaryInputsTest() { + for (size_t i = 0; i < all_files_.size(); ++i) { + delete all_files_[i]; + } + all_files_.clear(); + } + + FileMetaData* CreateFileMetaData(uint64_t number, InternalKey smallest, + InternalKey largest) { + FileMetaData* f = new FileMetaData(); + f->number = number; + f->smallest = smallest; + f->largest = largest; + all_files_.push_back(f); + return f; + } +}; + +TEST(AddBoundaryInputsTest, TestEmptyFileSets) { + AddBoundaryInputs(icmp_, level_files_, &compaction_files_); + ASSERT_TRUE(compaction_files_.empty()); + ASSERT_TRUE(level_files_.empty()); +} + +TEST(AddBoundaryInputsTest, TestEmptyLevelFiles) { + FileMetaData* f1 = + CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), + InternalKey(InternalKey("100", 1, kTypeValue))); + compaction_files_.push_back(f1); + + AddBoundaryInputs(icmp_, level_files_, &compaction_files_); + ASSERT_EQ(1, compaction_files_.size()); + ASSERT_EQ(f1, compaction_files_[0]); + ASSERT_TRUE(level_files_.empty()); +} + +TEST(AddBoundaryInputsTest, TestEmptyCompactionFiles) { + FileMetaData* f1 = + CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), + InternalKey(InternalKey("100", 1, kTypeValue))); + level_files_.push_back(f1); + + AddBoundaryInputs(icmp_, level_files_, &compaction_files_); + ASSERT_TRUE(compaction_files_.empty()); + ASSERT_EQ(1, level_files_.size()); + ASSERT_EQ(f1, level_files_[0]); +} + +TEST(AddBoundaryInputsTest, TestNoBoundaryFiles) { + FileMetaData* f1 = + CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), + InternalKey(InternalKey("100", 1, kTypeValue))); + FileMetaData* f2 = + CreateFileMetaData(1, InternalKey("200", 2, kTypeValue), + InternalKey(InternalKey("200", 1, kTypeValue))); + FileMetaData* f3 = + CreateFileMetaData(1, InternalKey("300", 2, kTypeValue), + InternalKey(InternalKey("300", 1, kTypeValue))); + + level_files_.push_back(f3); + level_files_.push_back(f2); + level_files_.push_back(f1); + compaction_files_.push_back(f2); + compaction_files_.push_back(f3); + + AddBoundaryInputs(icmp_, level_files_, &compaction_files_); + ASSERT_EQ(2, compaction_files_.size()); +} + +TEST(AddBoundaryInputsTest, TestOneBoundaryFiles) { + FileMetaData* f1 = + CreateFileMetaData(1, InternalKey("100", 3, kTypeValue), + InternalKey(InternalKey("100", 2, kTypeValue))); + FileMetaData* f2 = + CreateFileMetaData(1, InternalKey("100", 1, kTypeValue), + InternalKey(InternalKey("200", 3, kTypeValue))); + FileMetaData* f3 = + CreateFileMetaData(1, InternalKey("300", 2, kTypeValue), + InternalKey(InternalKey("300", 1, kTypeValue))); + + level_files_.push_back(f3); + level_files_.push_back(f2); + level_files_.push_back(f1); + compaction_files_.push_back(f1); + + AddBoundaryInputs(icmp_, level_files_, &compaction_files_); + ASSERT_EQ(2, compaction_files_.size()); + ASSERT_EQ(f1, compaction_files_[0]); + ASSERT_EQ(f2, compaction_files_[1]); +} + +TEST(AddBoundaryInputsTest, TestTwoBoundaryFiles) { + FileMetaData* f1 = + CreateFileMetaData(1, InternalKey("100", 6, kTypeValue), + InternalKey(InternalKey("100", 5, kTypeValue))); + FileMetaData* f2 = + CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), + InternalKey(InternalKey("300", 1, kTypeValue))); + FileMetaData* f3 = + CreateFileMetaData(1, InternalKey("100", 4, kTypeValue), + InternalKey(InternalKey("100", 3, kTypeValue))); + + level_files_.push_back(f2); + level_files_.push_back(f3); + level_files_.push_back(f1); + compaction_files_.push_back(f1); -int main(int argc, char** argv) { - return leveldb::test::RunAllTests(); + AddBoundaryInputs(icmp_, level_files_, &compaction_files_); + ASSERT_EQ(3, compaction_files_.size()); + ASSERT_EQ(f1, compaction_files_[0]); + ASSERT_EQ(f3, compaction_files_[1]); + ASSERT_EQ(f2, compaction_files_[2]); } + +TEST(AddBoundaryInputsTest, TestDisjoinFilePointers) { + FileMetaData* f1 = + CreateFileMetaData(1, InternalKey("100", 6, kTypeValue), + InternalKey(InternalKey("100", 5, kTypeValue))); + FileMetaData* f2 = + CreateFileMetaData(1, InternalKey("100", 6, kTypeValue), + InternalKey(InternalKey("100", 5, kTypeValue))); + FileMetaData* f3 = + CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), + InternalKey(InternalKey("300", 1, kTypeValue))); + FileMetaData* f4 = + CreateFileMetaData(1, InternalKey("100", 4, kTypeValue), + InternalKey(InternalKey("100", 3, kTypeValue))); + + level_files_.push_back(f2); + level_files_.push_back(f3); + level_files_.push_back(f4); + + compaction_files_.push_back(f1); + + AddBoundaryInputs(icmp_, level_files_, &compaction_files_); + ASSERT_EQ(3, compaction_files_.size()); + ASSERT_EQ(f1, compaction_files_[0]); + ASSERT_EQ(f4, compaction_files_[1]); + ASSERT_EQ(f3, compaction_files_[2]); +} + +} // namespace leveldb + +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/db/write_batch.cc b/src/leveldb/db/write_batch.cc index 33f4a4257e..b54313c35e 100644 --- a/src/leveldb/db/write_batch.cc +++ b/src/leveldb/db/write_batch.cc @@ -15,10 +15,10 @@ #include "leveldb/write_batch.h" -#include "leveldb/db.h" #include "db/dbformat.h" #include "db/memtable.h" #include "db/write_batch_internal.h" +#include "leveldb/db.h" #include "util/coding.h" namespace leveldb { @@ -26,19 +26,19 @@ namespace leveldb { // WriteBatch header has an 8-byte sequence number followed by a 4-byte count. static const size_t kHeader = 12; -WriteBatch::WriteBatch() { - Clear(); -} +WriteBatch::WriteBatch() { Clear(); } -WriteBatch::~WriteBatch() { } +WriteBatch::~WriteBatch() = default; -WriteBatch::Handler::~Handler() { } +WriteBatch::Handler::~Handler() = default; void WriteBatch::Clear() { rep_.clear(); rep_.resize(kHeader); } +size_t WriteBatch::ApproximateSize() const { return rep_.size(); } + Status WriteBatch::Iterate(Handler* handler) const { Slice input(rep_); if (input.size() < kHeader) { @@ -108,25 +108,28 @@ void WriteBatch::Delete(const Slice& key) { PutLengthPrefixedSlice(&rep_, key); } +void WriteBatch::Append(const WriteBatch& source) { + WriteBatchInternal::Append(this, &source); +} + namespace { class MemTableInserter : public WriteBatch::Handler { public: SequenceNumber sequence_; MemTable* mem_; - virtual void Put(const Slice& key, const Slice& value) { + void Put(const Slice& key, const Slice& value) override { mem_->Add(sequence_, kTypeValue, key, value); sequence_++; } - virtual void Delete(const Slice& key) { + void Delete(const Slice& key) override { mem_->Add(sequence_, kTypeDeletion, key, Slice()); sequence_++; } }; } // namespace -Status WriteBatchInternal::InsertInto(const WriteBatch* b, - MemTable* memtable) { +Status WriteBatchInternal::InsertInto(const WriteBatch* b, MemTable* memtable) { MemTableInserter inserter; inserter.sequence_ = WriteBatchInternal::Sequence(b); inserter.mem_ = memtable; diff --git a/src/leveldb/db/write_batch_internal.h b/src/leveldb/db/write_batch_internal.h index 9448ef7b21..fce86e3f1f 100644 --- a/src/leveldb/db/write_batch_internal.h +++ b/src/leveldb/db/write_batch_internal.h @@ -29,13 +29,9 @@ class WriteBatchInternal { // this batch. static void SetSequence(WriteBatch* batch, SequenceNumber seq); - static Slice Contents(const WriteBatch* batch) { - return Slice(batch->rep_); - } + static Slice Contents(const WriteBatch* batch) { return Slice(batch->rep_); } - static size_t ByteSize(const WriteBatch* batch) { - return batch->rep_.size(); - } + static size_t ByteSize(const WriteBatch* batch) { return batch->rep_.size(); } static void SetContents(WriteBatch* batch, const Slice& contents); @@ -46,5 +42,4 @@ class WriteBatchInternal { } // namespace leveldb - #endif // STORAGE_LEVELDB_DB_WRITE_BATCH_INTERNAL_H_ diff --git a/src/leveldb/db/write_batch_test.cc b/src/leveldb/db/write_batch_test.cc index 9064e3d85e..c32317fb5e 100644 --- a/src/leveldb/db/write_batch_test.cc +++ b/src/leveldb/db/write_batch_test.cc @@ -52,7 +52,7 @@ static std::string PrintContents(WriteBatch* b) { return state; } -class WriteBatchTest { }; +class WriteBatchTest {}; TEST(WriteBatchTest, Empty) { WriteBatch batch; @@ -68,10 +68,11 @@ TEST(WriteBatchTest, Multiple) { WriteBatchInternal::SetSequence(&batch, 100); ASSERT_EQ(100, WriteBatchInternal::Sequence(&batch)); ASSERT_EQ(3, WriteBatchInternal::Count(&batch)); - ASSERT_EQ("Put(baz, boo)@102" - "Delete(box)@101" - "Put(foo, bar)@100", - PrintContents(&batch)); + ASSERT_EQ( + "Put(baz, boo)@102" + "Delete(box)@101" + "Put(foo, bar)@100", + PrintContents(&batch)); } TEST(WriteBatchTest, Corruption) { @@ -81,40 +82,56 @@ TEST(WriteBatchTest, Corruption) { WriteBatchInternal::SetSequence(&batch, 200); Slice contents = WriteBatchInternal::Contents(&batch); WriteBatchInternal::SetContents(&batch, - Slice(contents.data(),contents.size()-1)); - ASSERT_EQ("Put(foo, bar)@200" - "ParseError()", - PrintContents(&batch)); + Slice(contents.data(), contents.size() - 1)); + ASSERT_EQ( + "Put(foo, bar)@200" + "ParseError()", + PrintContents(&batch)); } TEST(WriteBatchTest, Append) { WriteBatch b1, b2; WriteBatchInternal::SetSequence(&b1, 200); WriteBatchInternal::SetSequence(&b2, 300); - WriteBatchInternal::Append(&b1, &b2); - ASSERT_EQ("", - PrintContents(&b1)); + b1.Append(b2); + ASSERT_EQ("", PrintContents(&b1)); b2.Put("a", "va"); - WriteBatchInternal::Append(&b1, &b2); - ASSERT_EQ("Put(a, va)@200", - PrintContents(&b1)); + b1.Append(b2); + ASSERT_EQ("Put(a, va)@200", PrintContents(&b1)); b2.Clear(); b2.Put("b", "vb"); - WriteBatchInternal::Append(&b1, &b2); - ASSERT_EQ("Put(a, va)@200" - "Put(b, vb)@201", - PrintContents(&b1)); + b1.Append(b2); + ASSERT_EQ( + "Put(a, va)@200" + "Put(b, vb)@201", + PrintContents(&b1)); b2.Delete("foo"); - WriteBatchInternal::Append(&b1, &b2); - ASSERT_EQ("Put(a, va)@200" - "Put(b, vb)@202" - "Put(b, vb)@201" - "Delete(foo)@203", - PrintContents(&b1)); + b1.Append(b2); + ASSERT_EQ( + "Put(a, va)@200" + "Put(b, vb)@202" + "Put(b, vb)@201" + "Delete(foo)@203", + PrintContents(&b1)); } -} // namespace leveldb +TEST(WriteBatchTest, ApproximateSize) { + WriteBatch batch; + size_t empty_size = batch.ApproximateSize(); + + batch.Put(Slice("foo"), Slice("bar")); + size_t one_key_size = batch.ApproximateSize(); + ASSERT_LT(empty_size, one_key_size); + + batch.Put(Slice("baz"), Slice("boo")); + size_t two_keys_size = batch.ApproximateSize(); + ASSERT_LT(one_key_size, two_keys_size); -int main(int argc, char** argv) { - return leveldb::test::RunAllTests(); + batch.Delete(Slice("box")); + size_t post_delete_size = batch.ApproximateSize(); + ASSERT_LT(two_keys_size, post_delete_size); } + +} // namespace leveldb + +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/doc/benchmark.html b/src/leveldb/doc/benchmark.html index c4639772c1..f3fd77144c 100644 --- a/src/leveldb/doc/benchmark.html +++ b/src/leveldb/doc/benchmark.html @@ -90,9 +90,9 @@ div.bsql { <h4>Benchmark Source Code</h4> <p>We wrote benchmark tools for SQLite and Kyoto TreeDB based on LevelDB's <span class="code">db_bench</span>. The code for each of the benchmarks resides here:</p> <ul> - <li> <b>LevelDB:</b> <a href="http://code.google.com/p/leveldb/source/browse/trunk/db/db_bench.cc">db/db_bench.cc</a>.</li> - <li> <b>SQLite:</b> <a href="http://code.google.com/p/leveldb/source/browse/#svn%2Ftrunk%2Fdoc%2Fbench%2Fdb_bench_sqlite3.cc">doc/bench/db_bench_sqlite3.cc</a>.</li> - <li> <b>Kyoto TreeDB:</b> <a href="http://code.google.com/p/leveldb/source/browse/#svn%2Ftrunk%2Fdoc%2Fbench%2Fdb_bench_tree_db.cc">doc/bench/db_bench_tree_db.cc</a>.</li> + <li> <b>LevelDB:</b> <a href="https://github.com/google/leveldb/blob/master/benchmarks/db_bench.cc">benchmarks/db_bench.cc</a>.</li> + <li> <b>SQLite:</b> <a href="https://github.com/google/leveldb/blob/master/benchmarks/db_bench_sqlite3.cc">benchmarks/db_bench_sqlite3.cc</a>.</li> + <li> <b>Kyoto TreeDB:</b> <a href="https://github.com/google/leveldb/blob/master/benchmarks/db_bench_tree_db.cc">benchmarks/db_bench_tree_db.cc</a>.</li> </ul> <h4>Custom Build Specifications</h4> diff --git a/src/leveldb/doc/impl.md b/src/leveldb/doc/impl.md index 4b13f2a6ba..cacabb96fc 100644 --- a/src/leveldb/doc/impl.md +++ b/src/leveldb/doc/impl.md @@ -64,13 +64,15 @@ Other files used for miscellaneous purposes may also be present (LOCK, *.dbtmp). ## Level 0 -When the log file grows above a certain size (1MB by default): -Create a brand new memtable and log file and direct future updates here +When the log file grows above a certain size (4MB by default): +Create a brand new memtable and log file and direct future updates here. + In the background: -Write the contents of the previous memtable to an sstable -Discard the memtable -Delete the old log file and the old memtable -Add the new sstable to the young (level-0) level. + +1. Write the contents of the previous memtable to an sstable. +2. Discard the memtable. +3. Delete the old log file and the old memtable. +4. Add the new sstable to the young (level-0) level. ## Compactions diff --git a/src/leveldb/doc/index.md b/src/leveldb/doc/index.md index be8569692b..3d9a25805b 100644 --- a/src/leveldb/doc/index.md +++ b/src/leveldb/doc/index.md @@ -307,7 +307,7 @@ version numbers found in the keys to decide how to interpret them. ## Performance Performance can be tuned by changing the default values of the types defined in -`include/leveldb/options.h`. +`include/options.h`. ### Block size @@ -338,19 +338,19 @@ options.compression = leveldb::kNoCompression; ### Cache The contents of the database are stored in a set of files in the filesystem and -each file stores a sequence of compressed blocks. If options.cache is non-NULL, -it is used to cache frequently used uncompressed block contents. +each file stores a sequence of compressed blocks. If options.block_cache is +non-NULL, it is used to cache frequently used uncompressed block contents. ```c++ #include "leveldb/cache.h" leveldb::Options options; -options.cache = leveldb::NewLRUCache(100 * 1048576); // 100MB cache +options.block_cache = leveldb::NewLRUCache(100 * 1048576); // 100MB cache leveldb::DB* db; leveldb::DB::Open(options, name, &db); ... use the db ... delete db -delete options.cache; +delete options.block_cache; ``` Note that the cache holds uncompressed data, and therefore it should be sized diff --git a/src/leveldb/helpers/memenv/memenv.cc b/src/leveldb/helpers/memenv/memenv.cc index 68c0614a59..47e4481f7c 100644 --- a/src/leveldb/helpers/memenv/memenv.cc +++ b/src/leveldb/helpers/memenv/memenv.cc @@ -4,14 +4,18 @@ #include "helpers/memenv/memenv.h" +#include <string.h> + +#include <limits> +#include <map> +#include <string> +#include <vector> + #include "leveldb/env.h" #include "leveldb/status.h" #include "port/port.h" +#include "port/thread_annotations.h" #include "util/mutexlock.h" -#include <map> -#include <string.h> -#include <string> -#include <vector> namespace leveldb { @@ -23,6 +27,10 @@ class FileState { // and the caller must call Ref() at least once. FileState() : refs_(0), size_(0) {} + // No copying allowed. + FileState(const FileState&) = delete; + FileState& operator=(const FileState&) = delete; + // Increase the reference count. void Ref() { MutexLock lock(&refs_mutex_); @@ -47,9 +55,22 @@ class FileState { } } - uint64_t Size() const { return size_; } + uint64_t Size() const { + MutexLock lock(&blocks_mutex_); + return size_; + } + + void Truncate() { + MutexLock lock(&blocks_mutex_); + for (char*& block : blocks_) { + delete[] block; + } + blocks_.clear(); + size_ = 0; + } Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const { + MutexLock lock(&blocks_mutex_); if (offset > size_) { return Status::IOError("Offset greater than file size."); } @@ -62,16 +83,9 @@ class FileState { return Status::OK(); } - assert(offset / kBlockSize <= SIZE_MAX); + assert(offset / kBlockSize <= std::numeric_limits<size_t>::max()); size_t block = static_cast<size_t>(offset / kBlockSize); size_t block_offset = offset % kBlockSize; - - if (n <= kBlockSize - block_offset) { - // The requested bytes are all in the first block. - *result = Slice(blocks_[block] + block_offset, n); - return Status::OK(); - } - size_t bytes_to_copy = n; char* dst = scratch; @@ -96,6 +110,7 @@ class FileState { const char* src = data.data(); size_t src_len = data.size(); + MutexLock lock(&blocks_mutex_); while (src_len > 0) { size_t avail; size_t offset = size_ % kBlockSize; @@ -122,28 +137,17 @@ class FileState { } private: - // Private since only Unref() should be used to delete it. - ~FileState() { - for (std::vector<char*>::iterator i = blocks_.begin(); i != blocks_.end(); - ++i) { - delete [] *i; - } - } + enum { kBlockSize = 8 * 1024 }; - // No copying allowed. - FileState(const FileState&); - void operator=(const FileState&); + // Private since only Unref() should be used to delete it. + ~FileState() { Truncate(); } port::Mutex refs_mutex_; - int refs_; // Protected by refs_mutex_; + int refs_ GUARDED_BY(refs_mutex_); - // The following fields are not protected by any mutex. They are only mutable - // while the file is being written, and concurrent access is not allowed - // to writable files. - std::vector<char*> blocks_; - uint64_t size_; - - enum { kBlockSize = 8 * 1024 }; + mutable port::Mutex blocks_mutex_; + std::vector<char*> blocks_ GUARDED_BY(blocks_mutex_); + uint64_t size_ GUARDED_BY(blocks_mutex_); }; class SequentialFileImpl : public SequentialFile { @@ -152,11 +156,9 @@ class SequentialFileImpl : public SequentialFile { file_->Ref(); } - ~SequentialFileImpl() { - file_->Unref(); - } + ~SequentialFileImpl() override { file_->Unref(); } - virtual Status Read(size_t n, Slice* result, char* scratch) { + Status Read(size_t n, Slice* result, char* scratch) override { Status s = file_->Read(pos_, n, result, scratch); if (s.ok()) { pos_ += result->size(); @@ -164,7 +166,7 @@ class SequentialFileImpl : public SequentialFile { return s; } - virtual Status Skip(uint64_t n) { + Status Skip(uint64_t n) override { if (pos_ > file_->Size()) { return Status::IOError("pos_ > file_->Size()"); } @@ -176,7 +178,7 @@ class SequentialFileImpl : public SequentialFile { return Status::OK(); } - virtual std::string GetName() const { return "[memenv]"; } + virtual std::string GetName() const override { return "[memenv]"; } private: FileState* file_; uint64_t pos_; @@ -184,68 +186,58 @@ class SequentialFileImpl : public SequentialFile { class RandomAccessFileImpl : public RandomAccessFile { public: - explicit RandomAccessFileImpl(FileState* file) : file_(file) { - file_->Ref(); - } + explicit RandomAccessFileImpl(FileState* file) : file_(file) { file_->Ref(); } - ~RandomAccessFileImpl() { - file_->Unref(); - } + ~RandomAccessFileImpl() override { file_->Unref(); } - virtual Status Read(uint64_t offset, size_t n, Slice* result, - char* scratch) const { + Status Read(uint64_t offset, size_t n, Slice* result, + char* scratch) const override { return file_->Read(offset, n, result, scratch); } - virtual std::string GetName() const { return "[memenv]"; } + virtual std::string GetName() const override { return "[memenv]"; } private: FileState* file_; }; class WritableFileImpl : public WritableFile { public: - WritableFileImpl(FileState* file) : file_(file) { - file_->Ref(); - } + WritableFileImpl(FileState* file) : file_(file) { file_->Ref(); } - ~WritableFileImpl() { - file_->Unref(); - } + ~WritableFileImpl() override { file_->Unref(); } - virtual Status Append(const Slice& data) { - return file_->Append(data); - } + Status Append(const Slice& data) override { return file_->Append(data); } - virtual Status Close() { return Status::OK(); } - virtual Status Flush() { return Status::OK(); } - virtual Status Sync() { return Status::OK(); } + Status Close() override { return Status::OK(); } + Status Flush() override { return Status::OK(); } + Status Sync() override { return Status::OK(); } - virtual std::string GetName() const { return "[memenv]"; } + virtual std::string GetName() const override { return "[memenv]"; } private: FileState* file_; }; class NoOpLogger : public Logger { public: - virtual void Logv(const char* format, va_list ap) { } + void Logv(const char* format, va_list ap) override {} }; class InMemoryEnv : public EnvWrapper { public: - explicit InMemoryEnv(Env* base_env) : EnvWrapper(base_env) { } + explicit InMemoryEnv(Env* base_env) : EnvWrapper(base_env) {} - virtual ~InMemoryEnv() { - for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end(); ++i){ - i->second->Unref(); + ~InMemoryEnv() override { + for (const auto& kvp : file_map_) { + kvp.second->Unref(); } } // Partial implementation of the Env interface. - virtual Status NewSequentialFile(const std::string& fname, - SequentialFile** result) { + Status NewSequentialFile(const std::string& fname, + SequentialFile** result) override { MutexLock lock(&mutex_); if (file_map_.find(fname) == file_map_.end()) { - *result = NULL; + *result = nullptr; return Status::IOError(fname, "File not found"); } @@ -253,11 +245,11 @@ class InMemoryEnv : public EnvWrapper { return Status::OK(); } - virtual Status NewRandomAccessFile(const std::string& fname, - RandomAccessFile** result) { + Status NewRandomAccessFile(const std::string& fname, + RandomAccessFile** result) override { MutexLock lock(&mutex_); if (file_map_.find(fname) == file_map_.end()) { - *result = NULL; + *result = nullptr; return Status::IOError(fname, "File not found"); } @@ -265,27 +257,32 @@ class InMemoryEnv : public EnvWrapper { return Status::OK(); } - virtual Status NewWritableFile(const std::string& fname, - WritableFile** result) { + Status NewWritableFile(const std::string& fname, + WritableFile** result) override { MutexLock lock(&mutex_); - if (file_map_.find(fname) != file_map_.end()) { - DeleteFileInternal(fname); - } + FileSystem::iterator it = file_map_.find(fname); - FileState* file = new FileState(); - file->Ref(); - file_map_[fname] = file; + FileState* file; + if (it == file_map_.end()) { + // File is not currently open. + file = new FileState(); + file->Ref(); + file_map_[fname] = file; + } else { + file = it->second; + file->Truncate(); + } *result = new WritableFileImpl(file); return Status::OK(); } - virtual Status NewAppendableFile(const std::string& fname, - WritableFile** result) { + Status NewAppendableFile(const std::string& fname, + WritableFile** result) override { MutexLock lock(&mutex_); FileState** sptr = &file_map_[fname]; FileState* file = *sptr; - if (file == NULL) { + if (file == nullptr) { file = new FileState(); file->Ref(); } @@ -293,18 +290,18 @@ class InMemoryEnv : public EnvWrapper { return Status::OK(); } - virtual bool FileExists(const std::string& fname) { + bool FileExists(const std::string& fname) override { MutexLock lock(&mutex_); return file_map_.find(fname) != file_map_.end(); } - virtual Status GetChildren(const std::string& dir, - std::vector<std::string>* result) { + Status GetChildren(const std::string& dir, + std::vector<std::string>* result) override { MutexLock lock(&mutex_); result->clear(); - for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end(); ++i){ - const std::string& filename = i->first; + for (const auto& kvp : file_map_) { + const std::string& filename = kvp.first; if (filename.size() >= dir.size() + 1 && filename[dir.size()] == '/' && Slice(filename).starts_with(Slice(dir))) { @@ -315,7 +312,8 @@ class InMemoryEnv : public EnvWrapper { return Status::OK(); } - void DeleteFileInternal(const std::string& fname) { + void DeleteFileInternal(const std::string& fname) + EXCLUSIVE_LOCKS_REQUIRED(mutex_) { if (file_map_.find(fname) == file_map_.end()) { return; } @@ -324,7 +322,7 @@ class InMemoryEnv : public EnvWrapper { file_map_.erase(fname); } - virtual Status DeleteFile(const std::string& fname) { + Status DeleteFile(const std::string& fname) override { MutexLock lock(&mutex_); if (file_map_.find(fname) == file_map_.end()) { return Status::IOError(fname, "File not found"); @@ -334,15 +332,11 @@ class InMemoryEnv : public EnvWrapper { return Status::OK(); } - virtual Status CreateDir(const std::string& dirname) { - return Status::OK(); - } + Status CreateDir(const std::string& dirname) override { return Status::OK(); } - virtual Status DeleteDir(const std::string& dirname) { - return Status::OK(); - } + Status DeleteDir(const std::string& dirname) override { return Status::OK(); } - virtual Status GetFileSize(const std::string& fname, uint64_t* file_size) { + Status GetFileSize(const std::string& fname, uint64_t* file_size) override { MutexLock lock(&mutex_); if (file_map_.find(fname) == file_map_.end()) { return Status::IOError(fname, "File not found"); @@ -352,8 +346,8 @@ class InMemoryEnv : public EnvWrapper { return Status::OK(); } - virtual Status RenameFile(const std::string& src, - const std::string& target) { + Status RenameFile(const std::string& src, + const std::string& target) override { MutexLock lock(&mutex_); if (file_map_.find(src) == file_map_.end()) { return Status::IOError(src, "File not found"); @@ -365,22 +359,22 @@ class InMemoryEnv : public EnvWrapper { return Status::OK(); } - virtual Status LockFile(const std::string& fname, FileLock** lock) { + Status LockFile(const std::string& fname, FileLock** lock) override { *lock = new FileLock; return Status::OK(); } - virtual Status UnlockFile(FileLock* lock) { + Status UnlockFile(FileLock* lock) override { delete lock; return Status::OK(); } - virtual Status GetTestDirectory(std::string* path) { + Status GetTestDirectory(std::string* path) override { *path = "/test"; return Status::OK(); } - virtual Status NewLogger(const std::string& fname, Logger** result) { + Status NewLogger(const std::string& fname, Logger** result) override { *result = new NoOpLogger; return Status::OK(); } @@ -388,14 +382,13 @@ class InMemoryEnv : public EnvWrapper { private: // Map from filenames to FileState objects, representing a simple file system. typedef std::map<std::string, FileState*> FileSystem; + port::Mutex mutex_; - FileSystem file_map_; // Protected by mutex_. + FileSystem file_map_ GUARDED_BY(mutex_); }; } // namespace -Env* NewMemEnv(Env* base_env) { - return new InMemoryEnv(base_env); -} +Env* NewMemEnv(Env* base_env) { return new InMemoryEnv(base_env); } } // namespace leveldb diff --git a/src/leveldb/helpers/memenv/memenv.h b/src/leveldb/helpers/memenv/memenv.h index 03b88de761..3d929e4c4e 100644 --- a/src/leveldb/helpers/memenv/memenv.h +++ b/src/leveldb/helpers/memenv/memenv.h @@ -5,6 +5,8 @@ #ifndef STORAGE_LEVELDB_HELPERS_MEMENV_MEMENV_H_ #define STORAGE_LEVELDB_HELPERS_MEMENV_MEMENV_H_ +#include "leveldb/export.h" + namespace leveldb { class Env; @@ -13,7 +15,7 @@ class Env; // all non-file-storage tasks to base_env. The caller must delete the result // when it is no longer needed. // *base_env must remain live while the result is in use. -Env* NewMemEnv(Env* base_env); +LEVELDB_EXPORT Env* NewMemEnv(Env* base_env); } // namespace leveldb diff --git a/src/leveldb/helpers/memenv/memenv_test.cc b/src/leveldb/helpers/memenv/memenv_test.cc index 5cff77613f..94ad06be68 100644 --- a/src/leveldb/helpers/memenv/memenv_test.cc +++ b/src/leveldb/helpers/memenv/memenv_test.cc @@ -4,25 +4,22 @@ #include "helpers/memenv/memenv.h" +#include <string> +#include <vector> + #include "db/db_impl.h" #include "leveldb/db.h" #include "leveldb/env.h" #include "util/testharness.h" -#include <string> -#include <vector> namespace leveldb { class MemEnvTest { public: - Env* env_; + MemEnvTest() : env_(NewMemEnv(Env::Default())) {} + ~MemEnvTest() { delete env_; } - MemEnvTest() - : env_(NewMemEnv(Env::Default())) { - } - ~MemEnvTest() { - delete env_; - } + Env* env_; }; TEST(MemEnvTest, Basics) { @@ -109,25 +106,25 @@ TEST(MemEnvTest, ReadWrite) { // Read sequentially. ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file)); - ASSERT_OK(seq_file->Read(5, &result, scratch)); // Read "hello". + ASSERT_OK(seq_file->Read(5, &result, scratch)); // Read "hello". ASSERT_EQ(0, result.compare("hello")); ASSERT_OK(seq_file->Skip(1)); - ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Read "world". + ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Read "world". ASSERT_EQ(0, result.compare("world")); - ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Try reading past EOF. + ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Try reading past EOF. ASSERT_EQ(0, result.size()); - ASSERT_OK(seq_file->Skip(100)); // Try to skip past end of file. + ASSERT_OK(seq_file->Skip(100)); // Try to skip past end of file. ASSERT_OK(seq_file->Read(1000, &result, scratch)); ASSERT_EQ(0, result.size()); delete seq_file; // Random reads. ASSERT_OK(env_->NewRandomAccessFile("/dir/f", &rand_file)); - ASSERT_OK(rand_file->Read(6, 5, &result, scratch)); // Read "world". + ASSERT_OK(rand_file->Read(6, 5, &result, scratch)); // Read "world". ASSERT_EQ(0, result.compare("world")); - ASSERT_OK(rand_file->Read(0, 5, &result, scratch)); // Read "hello". + ASSERT_OK(rand_file->Read(0, 5, &result, scratch)); // Read "hello". ASSERT_EQ(0, result.compare("hello")); - ASSERT_OK(rand_file->Read(10, 100, &result, scratch)); // Read "d". + ASSERT_OK(rand_file->Read(10, 100, &result, scratch)); // Read "d". ASSERT_EQ(0, result.compare("d")); // Too high offset. @@ -176,7 +173,7 @@ TEST(MemEnvTest, LargeWrite) { SequentialFile* seq_file; Slice result; ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file)); - ASSERT_OK(seq_file->Read(3, &result, scratch)); // Read "foo". + ASSERT_OK(seq_file->Read(3, &result, scratch)); // Read "foo". ASSERT_EQ(0, result.compare("foo")); size_t read = 0; @@ -188,7 +185,30 @@ TEST(MemEnvTest, LargeWrite) { } ASSERT_TRUE(write_data == read_data); delete seq_file; - delete [] scratch; + delete[] scratch; +} + +TEST(MemEnvTest, OverwriteOpenFile) { + const char kWrite1Data[] = "Write #1 data"; + const size_t kFileDataLen = sizeof(kWrite1Data) - 1; + const std::string kTestFileName = test::TmpDir() + "/leveldb-TestFile.dat"; + + ASSERT_OK(WriteStringToFile(env_, kWrite1Data, kTestFileName)); + + RandomAccessFile* rand_file; + ASSERT_OK(env_->NewRandomAccessFile(kTestFileName, &rand_file)); + + const char kWrite2Data[] = "Write #2 data"; + ASSERT_OK(WriteStringToFile(env_, kWrite2Data, kTestFileName)); + + // Verify that overwriting an open file will result in the new file data + // being read from files opened before the write. + Slice result; + char scratch[kFileDataLen]; + ASSERT_OK(rand_file->Read(0, kFileDataLen, &result, scratch)); + ASSERT_EQ(0, result.compare(kWrite2Data)); + + delete rand_file; } TEST(MemEnvTest, DBTest) { @@ -236,6 +256,4 @@ TEST(MemEnvTest, DBTest) { } // namespace leveldb -int main(int argc, char** argv) { - return leveldb::test::RunAllTests(); -} +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/include/leveldb/c.h b/src/leveldb/include/leveldb/c.h index 1048fe3b86..02c79ba72e 100644 --- a/src/leveldb/include/leveldb/c.h +++ b/src/leveldb/include/leveldb/c.h @@ -32,7 +32,7 @@ On failure, leveldb frees the old value of *errptr and set *errptr to a malloc()ed error message. - (4) Bools have the type unsigned char (0 == false; rest == true) + (4) Bools have the type uint8_t (0 == false; rest == true) (5) All of the pointer arguments must be non-NULL. */ @@ -48,225 +48,205 @@ extern "C" { #include <stddef.h> #include <stdint.h> +#include "leveldb/export.h" + /* Exported types */ -typedef struct leveldb_t leveldb_t; -typedef struct leveldb_cache_t leveldb_cache_t; -typedef struct leveldb_comparator_t leveldb_comparator_t; -typedef struct leveldb_env_t leveldb_env_t; -typedef struct leveldb_filelock_t leveldb_filelock_t; -typedef struct leveldb_filterpolicy_t leveldb_filterpolicy_t; -typedef struct leveldb_iterator_t leveldb_iterator_t; -typedef struct leveldb_logger_t leveldb_logger_t; -typedef struct leveldb_options_t leveldb_options_t; -typedef struct leveldb_randomfile_t leveldb_randomfile_t; -typedef struct leveldb_readoptions_t leveldb_readoptions_t; -typedef struct leveldb_seqfile_t leveldb_seqfile_t; -typedef struct leveldb_snapshot_t leveldb_snapshot_t; -typedef struct leveldb_writablefile_t leveldb_writablefile_t; -typedef struct leveldb_writebatch_t leveldb_writebatch_t; -typedef struct leveldb_writeoptions_t leveldb_writeoptions_t; +typedef struct leveldb_t leveldb_t; +typedef struct leveldb_cache_t leveldb_cache_t; +typedef struct leveldb_comparator_t leveldb_comparator_t; +typedef struct leveldb_env_t leveldb_env_t; +typedef struct leveldb_filelock_t leveldb_filelock_t; +typedef struct leveldb_filterpolicy_t leveldb_filterpolicy_t; +typedef struct leveldb_iterator_t leveldb_iterator_t; +typedef struct leveldb_logger_t leveldb_logger_t; +typedef struct leveldb_options_t leveldb_options_t; +typedef struct leveldb_randomfile_t leveldb_randomfile_t; +typedef struct leveldb_readoptions_t leveldb_readoptions_t; +typedef struct leveldb_seqfile_t leveldb_seqfile_t; +typedef struct leveldb_snapshot_t leveldb_snapshot_t; +typedef struct leveldb_writablefile_t leveldb_writablefile_t; +typedef struct leveldb_writebatch_t leveldb_writebatch_t; +typedef struct leveldb_writeoptions_t leveldb_writeoptions_t; /* DB operations */ -extern leveldb_t* leveldb_open( - const leveldb_options_t* options, - const char* name, - char** errptr); +LEVELDB_EXPORT leveldb_t* leveldb_open(const leveldb_options_t* options, + const char* name, char** errptr); -extern void leveldb_close(leveldb_t* db); +LEVELDB_EXPORT void leveldb_close(leveldb_t* db); -extern void leveldb_put( - leveldb_t* db, - const leveldb_writeoptions_t* options, - const char* key, size_t keylen, - const char* val, size_t vallen, - char** errptr); +LEVELDB_EXPORT void leveldb_put(leveldb_t* db, + const leveldb_writeoptions_t* options, + const char* key, size_t keylen, const char* val, + size_t vallen, char** errptr); -extern void leveldb_delete( - leveldb_t* db, - const leveldb_writeoptions_t* options, - const char* key, size_t keylen, - char** errptr); +LEVELDB_EXPORT void leveldb_delete(leveldb_t* db, + const leveldb_writeoptions_t* options, + const char* key, size_t keylen, + char** errptr); -extern void leveldb_write( - leveldb_t* db, - const leveldb_writeoptions_t* options, - leveldb_writebatch_t* batch, - char** errptr); +LEVELDB_EXPORT void leveldb_write(leveldb_t* db, + const leveldb_writeoptions_t* options, + leveldb_writebatch_t* batch, char** errptr); /* Returns NULL if not found. A malloc()ed array otherwise. Stores the length of the array in *vallen. */ -extern char* leveldb_get( - leveldb_t* db, - const leveldb_readoptions_t* options, - const char* key, size_t keylen, - size_t* vallen, - char** errptr); +LEVELDB_EXPORT char* leveldb_get(leveldb_t* db, + const leveldb_readoptions_t* options, + const char* key, size_t keylen, size_t* vallen, + char** errptr); -extern leveldb_iterator_t* leveldb_create_iterator( - leveldb_t* db, - const leveldb_readoptions_t* options); +LEVELDB_EXPORT leveldb_iterator_t* leveldb_create_iterator( + leveldb_t* db, const leveldb_readoptions_t* options); -extern const leveldb_snapshot_t* leveldb_create_snapshot( - leveldb_t* db); +LEVELDB_EXPORT const leveldb_snapshot_t* leveldb_create_snapshot(leveldb_t* db); -extern void leveldb_release_snapshot( - leveldb_t* db, - const leveldb_snapshot_t* snapshot); +LEVELDB_EXPORT void leveldb_release_snapshot( + leveldb_t* db, const leveldb_snapshot_t* snapshot); /* Returns NULL if property name is unknown. Else returns a pointer to a malloc()-ed null-terminated value. */ -extern char* leveldb_property_value( - leveldb_t* db, - const char* propname); - -extern void leveldb_approximate_sizes( - leveldb_t* db, - int num_ranges, - const char* const* range_start_key, const size_t* range_start_key_len, - const char* const* range_limit_key, const size_t* range_limit_key_len, - uint64_t* sizes); - -extern void leveldb_compact_range( - leveldb_t* db, - const char* start_key, size_t start_key_len, - const char* limit_key, size_t limit_key_len); +LEVELDB_EXPORT char* leveldb_property_value(leveldb_t* db, + const char* propname); + +LEVELDB_EXPORT void leveldb_approximate_sizes( + leveldb_t* db, int num_ranges, const char* const* range_start_key, + const size_t* range_start_key_len, const char* const* range_limit_key, + const size_t* range_limit_key_len, uint64_t* sizes); + +LEVELDB_EXPORT void leveldb_compact_range(leveldb_t* db, const char* start_key, + size_t start_key_len, + const char* limit_key, + size_t limit_key_len); /* Management operations */ -extern void leveldb_destroy_db( - const leveldb_options_t* options, - const char* name, - char** errptr); +LEVELDB_EXPORT void leveldb_destroy_db(const leveldb_options_t* options, + const char* name, char** errptr); -extern void leveldb_repair_db( - const leveldb_options_t* options, - const char* name, - char** errptr); +LEVELDB_EXPORT void leveldb_repair_db(const leveldb_options_t* options, + const char* name, char** errptr); /* Iterator */ -extern void leveldb_iter_destroy(leveldb_iterator_t*); -extern unsigned char leveldb_iter_valid(const leveldb_iterator_t*); -extern void leveldb_iter_seek_to_first(leveldb_iterator_t*); -extern void leveldb_iter_seek_to_last(leveldb_iterator_t*); -extern void leveldb_iter_seek(leveldb_iterator_t*, const char* k, size_t klen); -extern void leveldb_iter_next(leveldb_iterator_t*); -extern void leveldb_iter_prev(leveldb_iterator_t*); -extern const char* leveldb_iter_key(const leveldb_iterator_t*, size_t* klen); -extern const char* leveldb_iter_value(const leveldb_iterator_t*, size_t* vlen); -extern void leveldb_iter_get_error(const leveldb_iterator_t*, char** errptr); +LEVELDB_EXPORT void leveldb_iter_destroy(leveldb_iterator_t*); +LEVELDB_EXPORT uint8_t leveldb_iter_valid(const leveldb_iterator_t*); +LEVELDB_EXPORT void leveldb_iter_seek_to_first(leveldb_iterator_t*); +LEVELDB_EXPORT void leveldb_iter_seek_to_last(leveldb_iterator_t*); +LEVELDB_EXPORT void leveldb_iter_seek(leveldb_iterator_t*, const char* k, + size_t klen); +LEVELDB_EXPORT void leveldb_iter_next(leveldb_iterator_t*); +LEVELDB_EXPORT void leveldb_iter_prev(leveldb_iterator_t*); +LEVELDB_EXPORT const char* leveldb_iter_key(const leveldb_iterator_t*, + size_t* klen); +LEVELDB_EXPORT const char* leveldb_iter_value(const leveldb_iterator_t*, + size_t* vlen); +LEVELDB_EXPORT void leveldb_iter_get_error(const leveldb_iterator_t*, + char** errptr); /* Write batch */ -extern leveldb_writebatch_t* leveldb_writebatch_create(); -extern void leveldb_writebatch_destroy(leveldb_writebatch_t*); -extern void leveldb_writebatch_clear(leveldb_writebatch_t*); -extern void leveldb_writebatch_put( - leveldb_writebatch_t*, - const char* key, size_t klen, - const char* val, size_t vlen); -extern void leveldb_writebatch_delete( - leveldb_writebatch_t*, - const char* key, size_t klen); -extern void leveldb_writebatch_iterate( - leveldb_writebatch_t*, - void* state, +LEVELDB_EXPORT leveldb_writebatch_t* leveldb_writebatch_create(void); +LEVELDB_EXPORT void leveldb_writebatch_destroy(leveldb_writebatch_t*); +LEVELDB_EXPORT void leveldb_writebatch_clear(leveldb_writebatch_t*); +LEVELDB_EXPORT void leveldb_writebatch_put(leveldb_writebatch_t*, + const char* key, size_t klen, + const char* val, size_t vlen); +LEVELDB_EXPORT void leveldb_writebatch_delete(leveldb_writebatch_t*, + const char* key, size_t klen); +LEVELDB_EXPORT void leveldb_writebatch_iterate( + const leveldb_writebatch_t*, void* state, void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen), void (*deleted)(void*, const char* k, size_t klen)); +LEVELDB_EXPORT void leveldb_writebatch_append( + leveldb_writebatch_t* destination, const leveldb_writebatch_t* source); /* Options */ -extern leveldb_options_t* leveldb_options_create(); -extern void leveldb_options_destroy(leveldb_options_t*); -extern void leveldb_options_set_comparator( - leveldb_options_t*, - leveldb_comparator_t*); -extern void leveldb_options_set_filter_policy( - leveldb_options_t*, - leveldb_filterpolicy_t*); -extern void leveldb_options_set_create_if_missing( - leveldb_options_t*, unsigned char); -extern void leveldb_options_set_error_if_exists( - leveldb_options_t*, unsigned char); -extern void leveldb_options_set_paranoid_checks( - leveldb_options_t*, unsigned char); -extern void leveldb_options_set_env(leveldb_options_t*, leveldb_env_t*); -extern void leveldb_options_set_info_log(leveldb_options_t*, leveldb_logger_t*); -extern void leveldb_options_set_write_buffer_size(leveldb_options_t*, size_t); -extern void leveldb_options_set_max_open_files(leveldb_options_t*, int); -extern void leveldb_options_set_cache(leveldb_options_t*, leveldb_cache_t*); -extern void leveldb_options_set_block_size(leveldb_options_t*, size_t); -extern void leveldb_options_set_block_restart_interval(leveldb_options_t*, int); - -enum { - leveldb_no_compression = 0, - leveldb_snappy_compression = 1 -}; -extern void leveldb_options_set_compression(leveldb_options_t*, int); +LEVELDB_EXPORT leveldb_options_t* leveldb_options_create(void); +LEVELDB_EXPORT void leveldb_options_destroy(leveldb_options_t*); +LEVELDB_EXPORT void leveldb_options_set_comparator(leveldb_options_t*, + leveldb_comparator_t*); +LEVELDB_EXPORT void leveldb_options_set_filter_policy(leveldb_options_t*, + leveldb_filterpolicy_t*); +LEVELDB_EXPORT void leveldb_options_set_create_if_missing(leveldb_options_t*, + uint8_t); +LEVELDB_EXPORT void leveldb_options_set_error_if_exists(leveldb_options_t*, + uint8_t); +LEVELDB_EXPORT void leveldb_options_set_paranoid_checks(leveldb_options_t*, + uint8_t); +LEVELDB_EXPORT void leveldb_options_set_env(leveldb_options_t*, leveldb_env_t*); +LEVELDB_EXPORT void leveldb_options_set_info_log(leveldb_options_t*, + leveldb_logger_t*); +LEVELDB_EXPORT void leveldb_options_set_write_buffer_size(leveldb_options_t*, + size_t); +LEVELDB_EXPORT void leveldb_options_set_max_open_files(leveldb_options_t*, int); +LEVELDB_EXPORT void leveldb_options_set_cache(leveldb_options_t*, + leveldb_cache_t*); +LEVELDB_EXPORT void leveldb_options_set_block_size(leveldb_options_t*, size_t); +LEVELDB_EXPORT void leveldb_options_set_block_restart_interval( + leveldb_options_t*, int); +LEVELDB_EXPORT void leveldb_options_set_max_file_size(leveldb_options_t*, + size_t); + +enum { leveldb_no_compression = 0, leveldb_snappy_compression = 1 }; +LEVELDB_EXPORT void leveldb_options_set_compression(leveldb_options_t*, int); /* Comparator */ -extern leveldb_comparator_t* leveldb_comparator_create( - void* state, - void (*destructor)(void*), - int (*compare)( - void*, - const char* a, size_t alen, - const char* b, size_t blen), +LEVELDB_EXPORT leveldb_comparator_t* leveldb_comparator_create( + void* state, void (*destructor)(void*), + int (*compare)(void*, const char* a, size_t alen, const char* b, + size_t blen), const char* (*name)(void*)); -extern void leveldb_comparator_destroy(leveldb_comparator_t*); +LEVELDB_EXPORT void leveldb_comparator_destroy(leveldb_comparator_t*); /* Filter policy */ -extern leveldb_filterpolicy_t* leveldb_filterpolicy_create( - void* state, - void (*destructor)(void*), - char* (*create_filter)( - void*, - const char* const* key_array, const size_t* key_length_array, - int num_keys, - size_t* filter_length), - unsigned char (*key_may_match)( - void*, - const char* key, size_t length, - const char* filter, size_t filter_length), +LEVELDB_EXPORT leveldb_filterpolicy_t* leveldb_filterpolicy_create( + void* state, void (*destructor)(void*), + char* (*create_filter)(void*, const char* const* key_array, + const size_t* key_length_array, int num_keys, + size_t* filter_length), + uint8_t (*key_may_match)(void*, const char* key, size_t length, + const char* filter, size_t filter_length), const char* (*name)(void*)); -extern void leveldb_filterpolicy_destroy(leveldb_filterpolicy_t*); +LEVELDB_EXPORT void leveldb_filterpolicy_destroy(leveldb_filterpolicy_t*); -extern leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom( +LEVELDB_EXPORT leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom( int bits_per_key); /* Read options */ -extern leveldb_readoptions_t* leveldb_readoptions_create(); -extern void leveldb_readoptions_destroy(leveldb_readoptions_t*); -extern void leveldb_readoptions_set_verify_checksums( - leveldb_readoptions_t*, - unsigned char); -extern void leveldb_readoptions_set_fill_cache( - leveldb_readoptions_t*, unsigned char); -extern void leveldb_readoptions_set_snapshot( - leveldb_readoptions_t*, - const leveldb_snapshot_t*); +LEVELDB_EXPORT leveldb_readoptions_t* leveldb_readoptions_create(void); +LEVELDB_EXPORT void leveldb_readoptions_destroy(leveldb_readoptions_t*); +LEVELDB_EXPORT void leveldb_readoptions_set_verify_checksums( + leveldb_readoptions_t*, uint8_t); +LEVELDB_EXPORT void leveldb_readoptions_set_fill_cache(leveldb_readoptions_t*, + uint8_t); +LEVELDB_EXPORT void leveldb_readoptions_set_snapshot(leveldb_readoptions_t*, + const leveldb_snapshot_t*); /* Write options */ -extern leveldb_writeoptions_t* leveldb_writeoptions_create(); -extern void leveldb_writeoptions_destroy(leveldb_writeoptions_t*); -extern void leveldb_writeoptions_set_sync( - leveldb_writeoptions_t*, unsigned char); +LEVELDB_EXPORT leveldb_writeoptions_t* leveldb_writeoptions_create(void); +LEVELDB_EXPORT void leveldb_writeoptions_destroy(leveldb_writeoptions_t*); +LEVELDB_EXPORT void leveldb_writeoptions_set_sync(leveldb_writeoptions_t*, + uint8_t); /* Cache */ -extern leveldb_cache_t* leveldb_cache_create_lru(size_t capacity); -extern void leveldb_cache_destroy(leveldb_cache_t* cache); +LEVELDB_EXPORT leveldb_cache_t* leveldb_cache_create_lru(size_t capacity); +LEVELDB_EXPORT void leveldb_cache_destroy(leveldb_cache_t* cache); /* Env */ -extern leveldb_env_t* leveldb_create_default_env(); -extern void leveldb_env_destroy(leveldb_env_t*); +LEVELDB_EXPORT leveldb_env_t* leveldb_create_default_env(void); +LEVELDB_EXPORT void leveldb_env_destroy(leveldb_env_t*); + +/* If not NULL, the returned buffer must be released using leveldb_free(). */ +LEVELDB_EXPORT char* leveldb_env_get_test_directory(leveldb_env_t*); /* Utility */ @@ -275,16 +255,16 @@ extern void leveldb_env_destroy(leveldb_env_t*); in this file. Note that in certain cases (typically on Windows), you may need to call this routine instead of free(ptr) to dispose of malloc()-ed memory returned by this library. */ -extern void leveldb_free(void* ptr); +LEVELDB_EXPORT void leveldb_free(void* ptr); /* Return the major version number for this release. */ -extern int leveldb_major_version(); +LEVELDB_EXPORT int leveldb_major_version(void); /* Return the minor version number for this release. */ -extern int leveldb_minor_version(); +LEVELDB_EXPORT int leveldb_minor_version(void); #ifdef __cplusplus -} /* end extern "C" */ +} /* end extern "C" */ #endif -#endif /* STORAGE_LEVELDB_INCLUDE_C_H_ */ +#endif /* STORAGE_LEVELDB_INCLUDE_C_H_ */ diff --git a/src/leveldb/include/leveldb/cache.h b/src/leveldb/include/leveldb/cache.h index 6819d5bc49..7d1a221193 100644 --- a/src/leveldb/include/leveldb/cache.h +++ b/src/leveldb/include/leveldb/cache.h @@ -19,26 +19,31 @@ #define STORAGE_LEVELDB_INCLUDE_CACHE_H_ #include <stdint.h> + +#include "leveldb/export.h" #include "leveldb/slice.h" namespace leveldb { -class Cache; +class LEVELDB_EXPORT Cache; // Create a new cache with a fixed size capacity. This implementation // of Cache uses a least-recently-used eviction policy. -extern Cache* NewLRUCache(size_t capacity); +LEVELDB_EXPORT Cache* NewLRUCache(size_t capacity); -class Cache { +class LEVELDB_EXPORT Cache { public: - Cache() { } + Cache() = default; + + Cache(const Cache&) = delete; + Cache& operator=(const Cache&) = delete; // Destroys all existing entries by calling the "deleter" // function that was passed to the constructor. virtual ~Cache(); // Opaque handle to an entry stored in the cache. - struct Handle { }; + struct Handle {}; // Insert a mapping from key->value into the cache and assign it // the specified charge against the total cache capacity. @@ -52,7 +57,7 @@ class Cache { virtual Handle* Insert(const Slice& key, void* value, size_t charge, void (*deleter)(const Slice& key, void* value)) = 0; - // If the cache has no mapping for "key", returns NULL. + // If the cache has no mapping for "key", returns nullptr. // // Else return a handle that corresponds to the mapping. The caller // must call this->Release(handle) when the returned mapping is no @@ -99,10 +104,6 @@ class Cache { struct Rep; Rep* rep_; - - // No copying allowed - Cache(const Cache&); - void operator=(const Cache&); }; } // namespace leveldb diff --git a/src/leveldb/include/leveldb/comparator.h b/src/leveldb/include/leveldb/comparator.h index 556b984c76..a85b51ebd8 100644 --- a/src/leveldb/include/leveldb/comparator.h +++ b/src/leveldb/include/leveldb/comparator.h @@ -7,6 +7,8 @@ #include <string> +#include "leveldb/export.h" + namespace leveldb { class Slice; @@ -15,7 +17,7 @@ class Slice; // used as keys in an sstable or a database. A Comparator implementation // must be thread-safe since leveldb may invoke its methods concurrently // from multiple threads. -class Comparator { +class LEVELDB_EXPORT Comparator { public: virtual ~Comparator(); @@ -43,9 +45,8 @@ class Comparator { // If *start < limit, changes *start to a short string in [start,limit). // Simple comparator implementations may return with *start unchanged, // i.e., an implementation of this method that does nothing is correct. - virtual void FindShortestSeparator( - std::string* start, - const Slice& limit) const = 0; + virtual void FindShortestSeparator(std::string* start, + const Slice& limit) const = 0; // Changes *key to a short string >= *key. // Simple comparator implementations may return with *key unchanged, @@ -56,7 +57,7 @@ class Comparator { // Return a builtin comparator that uses lexicographic byte-wise // ordering. The result remains the property of this module and // must not be deleted. -extern const Comparator* BytewiseComparator(); +LEVELDB_EXPORT const Comparator* BytewiseComparator(); } // namespace leveldb diff --git a/src/leveldb/include/leveldb/db.h b/src/leveldb/include/leveldb/db.h index bfab10a0b7..b73014a221 100644 --- a/src/leveldb/include/leveldb/db.h +++ b/src/leveldb/include/leveldb/db.h @@ -7,14 +7,16 @@ #include <stdint.h> #include <stdio.h> + +#include "leveldb/export.h" #include "leveldb/iterator.h" #include "leveldb/options.h" namespace leveldb { -// Update Makefile if you change these +// Update CMakeLists.txt if you change these static const int kMajorVersion = 1; -static const int kMinorVersion = 20; +static const int kMinorVersion = 22; struct Options; struct ReadOptions; @@ -24,42 +26,44 @@ class WriteBatch; // Abstract handle to particular state of a DB. // A Snapshot is an immutable object and can therefore be safely // accessed from multiple threads without any external synchronization. -class Snapshot { +class LEVELDB_EXPORT Snapshot { protected: virtual ~Snapshot(); }; // A range of keys -struct Range { - Slice start; // Included in the range - Slice limit; // Not included in the range +struct LEVELDB_EXPORT Range { + Range() = default; + Range(const Slice& s, const Slice& l) : start(s), limit(l) {} - Range() { } - Range(const Slice& s, const Slice& l) : start(s), limit(l) { } + Slice start; // Included in the range + Slice limit; // Not included in the range }; // A DB is a persistent ordered map from keys to values. // A DB is safe for concurrent access from multiple threads without // any external synchronization. -class DB { +class LEVELDB_EXPORT DB { public: // Open the database with the specified "name". // Stores a pointer to a heap-allocated database in *dbptr and returns // OK on success. - // Stores NULL in *dbptr and returns a non-OK status on error. + // Stores nullptr in *dbptr and returns a non-OK status on error. // Caller should delete *dbptr when it is no longer needed. - static Status Open(const Options& options, - const std::string& name, + static Status Open(const Options& options, const std::string& name, DB** dbptr); - DB() { } + DB() = default; + + DB(const DB&) = delete; + DB& operator=(const DB&) = delete; + virtual ~DB(); // Set the database entry for "key" to "value". Returns OK on success, // and a non-OK status on error. // Note: consider setting options.sync = true. - virtual Status Put(const WriteOptions& options, - const Slice& key, + virtual Status Put(const WriteOptions& options, const Slice& key, const Slice& value) = 0; // Remove the database entry (if any) for "key". Returns OK on @@ -80,8 +84,8 @@ class DB { // a status for which Status::IsNotFound() returns true. // // May return some other Status on an error. - virtual Status Get(const ReadOptions& options, - const Slice& key, std::string* value) = 0; + virtual Status Get(const ReadOptions& options, const Slice& key, + std::string* value) = 0; // Return a heap-allocated iterator over the contents of the database. // The result of NewIterator() is initially invalid (caller must @@ -136,27 +140,27 @@ class DB { // needed to access the data. This operation should typically only // be invoked by users who understand the underlying implementation. // - // begin==NULL is treated as a key before all keys in the database. - // end==NULL is treated as a key after all keys in the database. + // begin==nullptr is treated as a key before all keys in the database. + // end==nullptr is treated as a key after all keys in the database. // Therefore the following call will compact the entire database: - // db->CompactRange(NULL, NULL); + // db->CompactRange(nullptr, nullptr); virtual void CompactRange(const Slice* begin, const Slice* end) = 0; - - private: - // No copying allowed - DB(const DB&); - void operator=(const DB&); }; // Destroy the contents of the specified database. // Be very careful using this method. -Status DestroyDB(const std::string& name, const Options& options); +// +// Note: For backwards compatibility, if DestroyDB is unable to list the +// database files, Status::OK() will still be returned masking this failure. +LEVELDB_EXPORT Status DestroyDB(const std::string& name, + const Options& options); // If a DB cannot be opened, you may attempt to call this method to // resurrect as much of the contents of the database as possible. // Some data may be lost, so be careful when calling this function // on a database that contains important information. -Status RepairDB(const std::string& dbname, const Options& options); +LEVELDB_EXPORT Status RepairDB(const std::string& dbname, + const Options& options); } // namespace leveldb diff --git a/src/leveldb/include/leveldb/dumpfile.h b/src/leveldb/include/leveldb/dumpfile.h index 3f97fda16b..a58bc6b36c 100644 --- a/src/leveldb/include/leveldb/dumpfile.h +++ b/src/leveldb/include/leveldb/dumpfile.h @@ -6,7 +6,9 @@ #define STORAGE_LEVELDB_INCLUDE_DUMPFILE_H_ #include <string> + #include "leveldb/env.h" +#include "leveldb/export.h" #include "leveldb/status.h" namespace leveldb { @@ -18,7 +20,8 @@ namespace leveldb { // // Returns a non-OK result if fname does not name a leveldb storage // file, or if the file cannot be read. -Status DumpFile(Env* env, const std::string& fname, WritableFile* dst); +LEVELDB_EXPORT Status DumpFile(Env* env, const std::string& fname, + WritableFile* dst); } // namespace leveldb diff --git a/src/leveldb/include/leveldb/env.h b/src/leveldb/include/leveldb/env.h index 275d441eae..96c21b3966 100644 --- a/src/leveldb/include/leveldb/env.h +++ b/src/leveldb/include/leveldb/env.h @@ -13,12 +13,36 @@ #ifndef STORAGE_LEVELDB_INCLUDE_ENV_H_ #define STORAGE_LEVELDB_INCLUDE_ENV_H_ -#include <string> -#include <vector> #include <stdarg.h> #include <stdint.h> + +#include <string> +#include <vector> + +#include "leveldb/export.h" #include "leveldb/status.h" +#if defined(_WIN32) +// The leveldb::Env class below contains a DeleteFile method. +// At the same time, <windows.h>, a fairly popular header +// file for Windows applications, defines a DeleteFile macro. +// +// Without any intervention on our part, the result of this +// unfortunate coincidence is that the name of the +// leveldb::Env::DeleteFile method seen by the compiler depends on +// whether <windows.h> was included before or after the LevelDB +// headers. +// +// To avoid headaches, we undefined DeleteFile (if defined) and +// redefine it at the bottom of this file. This way <windows.h> +// can be included before this file (or not at all) and the +// exported method will always be leveldb::Env::DeleteFile. +#if defined(DeleteFile) +#undef DeleteFile +#define LEVELDB_DELETEFILE_UNDEFINED +#endif // defined(DeleteFile) +#endif // defined(_WIN32) + namespace leveldb { class FileLock; @@ -28,9 +52,13 @@ class SequentialFile; class Slice; class WritableFile; -class Env { +class LEVELDB_EXPORT Env { public: - Env() { } + Env() = default; + + Env(const Env&) = delete; + Env& operator=(const Env&) = delete; + virtual ~Env(); // Return a default environment suitable for the current operating @@ -40,20 +68,22 @@ class Env { // The result of Default() belongs to leveldb and must never be deleted. static Env* Default(); - // Create a brand new sequentially-readable file with the specified name. + // Create an object that sequentially reads the file with the specified name. // On success, stores a pointer to the new file in *result and returns OK. - // On failure stores NULL in *result and returns non-OK. If the file does - // not exist, returns a non-OK status. + // On failure stores nullptr in *result and returns non-OK. If the file does + // not exist, returns a non-OK status. Implementations should return a + // NotFound status when the file does not exist. // // The returned file will only be accessed by one thread at a time. virtual Status NewSequentialFile(const std::string& fname, SequentialFile** result) = 0; - // Create a brand new random access read-only file with the + // Create an object supporting random-access reads from the file with the // specified name. On success, stores a pointer to the new file in - // *result and returns OK. On failure stores NULL in *result and + // *result and returns OK. On failure stores nullptr in *result and // returns non-OK. If the file does not exist, returns a non-OK - // status. + // status. Implementations should return a NotFound status when the file does + // not exist. // // The returned file may be concurrently accessed by multiple threads. virtual Status NewRandomAccessFile(const std::string& fname, @@ -62,7 +92,7 @@ class Env { // Create an object that writes to a new file with the specified // name. Deletes any existing file with the same name and creates a // new file. On success, stores a pointer to the new file in - // *result and returns OK. On failure stores NULL in *result and + // *result and returns OK. On failure stores nullptr in *result and // returns non-OK. // // The returned file will only be accessed by one thread at a time. @@ -72,7 +102,7 @@ class Env { // Create an object that either appends to an existing file, or // writes to a new file (if the file does not exist to begin with). // On success, stores a pointer to the new file in *result and - // returns OK. On failure stores NULL in *result and returns + // returns OK. On failure stores nullptr in *result and returns // non-OK. // // The returned file will only be accessed by one thread at a time. @@ -110,7 +140,7 @@ class Env { const std::string& target) = 0; // Lock the specified file. Used to prevent concurrent access to - // the same db by multiple processes. On failure, stores NULL in + // the same db by multiple processes. On failure, stores nullptr in // *lock and returns non-OK. // // On success, stores a pointer to the object that represents the @@ -136,16 +166,14 @@ class Env { // added to the same Env may run concurrently in different threads. // I.e., the caller may not assume that background work items are // serialized. - virtual void Schedule( - void (*function)(void* arg), - void* arg) = 0; + virtual void Schedule(void (*function)(void* arg), void* arg) = 0; // Start a new thread, invoking "function(arg)" within the new thread. // When "function(arg)" returns, the thread will be destroyed. virtual void StartThread(void (*function)(void* arg), void* arg) = 0; // *path is set to a temporary directory that can be used for testing. It may - // or many not have just been created. The directory may or may not differ + // or may not have just been created. The directory may or may not differ // between runs of the same process, but subsequent calls will return the // same directory. virtual Status GetTestDirectory(std::string* path) = 0; @@ -159,17 +187,16 @@ class Env { // Sleep/delay the thread for the prescribed number of micro-seconds. virtual void SleepForMicroseconds(int micros) = 0; - - private: - // No copying allowed - Env(const Env&); - void operator=(const Env&); }; // A file abstraction for reading sequentially through a file -class SequentialFile { +class LEVELDB_EXPORT SequentialFile { public: - SequentialFile() { } + SequentialFile() = default; + + SequentialFile(const SequentialFile&) = delete; + SequentialFile& operator=(const SequentialFile&) = delete; + virtual ~SequentialFile(); // Read up to "n" bytes from the file. "scratch[0..n-1]" may be @@ -193,17 +220,16 @@ class SequentialFile { // Get a name for the file, only for error reporting virtual std::string GetName() const = 0; - - private: - // No copying allowed - SequentialFile(const SequentialFile&); - void operator=(const SequentialFile&); }; // A file abstraction for randomly reading the contents of a file. -class RandomAccessFile { +class LEVELDB_EXPORT RandomAccessFile { public: - RandomAccessFile() { } + RandomAccessFile() = default; + + RandomAccessFile(const RandomAccessFile&) = delete; + RandomAccessFile& operator=(const RandomAccessFile&) = delete; + virtual ~RandomAccessFile(); // Read up to "n" bytes from the file starting at "offset". @@ -220,19 +246,18 @@ class RandomAccessFile { // Get a name for the file, only for error reporting virtual std::string GetName() const = 0; - - private: - // No copying allowed - RandomAccessFile(const RandomAccessFile&); - void operator=(const RandomAccessFile&); }; // A file abstraction for sequential writing. The implementation // must provide buffering since callers may append small fragments // at a time to the file. -class WritableFile { +class LEVELDB_EXPORT WritableFile { public: - WritableFile() { } + WritableFile() = default; + + WritableFile(const WritableFile&) = delete; + WritableFile& operator=(const WritableFile&) = delete; + virtual ~WritableFile(); virtual Status Append(const Slice& data) = 0; @@ -242,119 +267,130 @@ class WritableFile { // Get a name for the file, only for error reporting virtual std::string GetName() const = 0; - - private: - // No copying allowed - WritableFile(const WritableFile&); - void operator=(const WritableFile&); }; // An interface for writing log messages. -class Logger { +class LEVELDB_EXPORT Logger { public: - Logger() { } + Logger() = default; + + Logger(const Logger&) = delete; + Logger& operator=(const Logger&) = delete; + virtual ~Logger(); // Write an entry to the log file with the specified format. virtual void Logv(const char* format, va_list ap) = 0; - - private: - // No copying allowed - Logger(const Logger&); - void operator=(const Logger&); }; - // Identifies a locked file. -class FileLock { +class LEVELDB_EXPORT FileLock { public: - FileLock() { } + FileLock() = default; + + FileLock(const FileLock&) = delete; + FileLock& operator=(const FileLock&) = delete; + virtual ~FileLock(); - private: - // No copying allowed - FileLock(const FileLock&); - void operator=(const FileLock&); }; -// Log the specified data to *info_log if info_log is non-NULL. -extern void Log(Logger* info_log, const char* format, ...) -# if defined(__GNUC__) || defined(__clang__) - __attribute__((__format__ (__printf__, 2, 3))) -# endif +// Log the specified data to *info_log if info_log is non-null. +void Log(Logger* info_log, const char* format, ...) +#if defined(__GNUC__) || defined(__clang__) + __attribute__((__format__(__printf__, 2, 3))) +#endif ; // A utility routine: write "data" to the named file. -extern Status WriteStringToFile(Env* env, const Slice& data, - const std::string& fname); +LEVELDB_EXPORT Status WriteStringToFile(Env* env, const Slice& data, + const std::string& fname); // A utility routine: read contents of named file into *data -extern Status ReadFileToString(Env* env, const std::string& fname, - std::string* data); +LEVELDB_EXPORT Status ReadFileToString(Env* env, const std::string& fname, + std::string* data); // An implementation of Env that forwards all calls to another Env. // May be useful to clients who wish to override just part of the // functionality of another Env. -class EnvWrapper : public Env { +class LEVELDB_EXPORT EnvWrapper : public Env { public: - // Initialize an EnvWrapper that delegates all calls to *t - explicit EnvWrapper(Env* t) : target_(t) { } + // Initialize an EnvWrapper that delegates all calls to *t. + explicit EnvWrapper(Env* t) : target_(t) {} virtual ~EnvWrapper(); - // Return the target to which this Env forwards all calls + // Return the target to which this Env forwards all calls. Env* target() const { return target_; } - // The following text is boilerplate that forwards all methods to target() - Status NewSequentialFile(const std::string& f, SequentialFile** r) { + // The following text is boilerplate that forwards all methods to target(). + Status NewSequentialFile(const std::string& f, SequentialFile** r) override { return target_->NewSequentialFile(f, r); } - Status NewRandomAccessFile(const std::string& f, RandomAccessFile** r) { + Status NewRandomAccessFile(const std::string& f, + RandomAccessFile** r) override { return target_->NewRandomAccessFile(f, r); } - Status NewWritableFile(const std::string& f, WritableFile** r) { + Status NewWritableFile(const std::string& f, WritableFile** r) override { return target_->NewWritableFile(f, r); } - Status NewAppendableFile(const std::string& f, WritableFile** r) { + Status NewAppendableFile(const std::string& f, WritableFile** r) override { return target_->NewAppendableFile(f, r); } - bool FileExists(const std::string& f) { return target_->FileExists(f); } - Status GetChildren(const std::string& dir, std::vector<std::string>* r) { + bool FileExists(const std::string& f) override { + return target_->FileExists(f); + } + Status GetChildren(const std::string& dir, + std::vector<std::string>* r) override { return target_->GetChildren(dir, r); } - Status DeleteFile(const std::string& f) { return target_->DeleteFile(f); } - Status CreateDir(const std::string& d) { return target_->CreateDir(d); } - Status DeleteDir(const std::string& d) { return target_->DeleteDir(d); } - Status GetFileSize(const std::string& f, uint64_t* s) { + Status DeleteFile(const std::string& f) override { + return target_->DeleteFile(f); + } + Status CreateDir(const std::string& d) override { + return target_->CreateDir(d); + } + Status DeleteDir(const std::string& d) override { + return target_->DeleteDir(d); + } + Status GetFileSize(const std::string& f, uint64_t* s) override { return target_->GetFileSize(f, s); } - Status RenameFile(const std::string& s, const std::string& t) { + Status RenameFile(const std::string& s, const std::string& t) override { return target_->RenameFile(s, t); } - Status LockFile(const std::string& f, FileLock** l) { + Status LockFile(const std::string& f, FileLock** l) override { return target_->LockFile(f, l); } - Status UnlockFile(FileLock* l) { return target_->UnlockFile(l); } - void Schedule(void (*f)(void*), void* a) { + Status UnlockFile(FileLock* l) override { return target_->UnlockFile(l); } + void Schedule(void (*f)(void*), void* a) override { return target_->Schedule(f, a); } - void StartThread(void (*f)(void*), void* a) { + void StartThread(void (*f)(void*), void* a) override { return target_->StartThread(f, a); } - virtual Status GetTestDirectory(std::string* path) { + Status GetTestDirectory(std::string* path) override { return target_->GetTestDirectory(path); } - virtual Status NewLogger(const std::string& fname, Logger** result) { + Status NewLogger(const std::string& fname, Logger** result) override { return target_->NewLogger(fname, result); } - uint64_t NowMicros() { - return target_->NowMicros(); - } - void SleepForMicroseconds(int micros) { + uint64_t NowMicros() override { return target_->NowMicros(); } + void SleepForMicroseconds(int micros) override { target_->SleepForMicroseconds(micros); } + private: Env* target_; }; } // namespace leveldb +// Redefine DeleteFile if necessary. +#if defined(_WIN32) && defined(LEVELDB_DELETEFILE_UNDEFINED) +#if defined(UNICODE) +#define DeleteFile DeleteFileW +#else +#define DeleteFile DeleteFileA +#endif // defined(UNICODE) +#endif // defined(_WIN32) && defined(LEVELDB_DELETEFILE_UNDEFINED) + #endif // STORAGE_LEVELDB_INCLUDE_ENV_H_ diff --git a/src/leveldb/include/leveldb/export.h b/src/leveldb/include/leveldb/export.h new file mode 100644 index 0000000000..6ba9b183da --- /dev/null +++ b/src/leveldb/include/leveldb/export.h @@ -0,0 +1,33 @@ +// Copyright (c) 2017 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef STORAGE_LEVELDB_INCLUDE_EXPORT_H_ +#define STORAGE_LEVELDB_INCLUDE_EXPORT_H_ + +#if !defined(LEVELDB_EXPORT) + +#if defined(LEVELDB_SHARED_LIBRARY) +#if defined(_WIN32) + +#if defined(LEVELDB_COMPILE_LIBRARY) +#define LEVELDB_EXPORT __declspec(dllexport) +#else +#define LEVELDB_EXPORT __declspec(dllimport) +#endif // defined(LEVELDB_COMPILE_LIBRARY) + +#else // defined(_WIN32) +#if defined(LEVELDB_COMPILE_LIBRARY) +#define LEVELDB_EXPORT __attribute__((visibility("default"))) +#else +#define LEVELDB_EXPORT +#endif +#endif // defined(_WIN32) + +#else // defined(LEVELDB_SHARED_LIBRARY) +#define LEVELDB_EXPORT +#endif + +#endif // !defined(LEVELDB_EXPORT) + +#endif // STORAGE_LEVELDB_INCLUDE_EXPORT_H_ diff --git a/src/leveldb/include/leveldb/filter_policy.h b/src/leveldb/include/leveldb/filter_policy.h index 1fba08001f..49c8eda776 100644 --- a/src/leveldb/include/leveldb/filter_policy.h +++ b/src/leveldb/include/leveldb/filter_policy.h @@ -18,11 +18,13 @@ #include <string> +#include "leveldb/export.h" + namespace leveldb { class Slice; -class FilterPolicy { +class LEVELDB_EXPORT FilterPolicy { public: virtual ~FilterPolicy(); @@ -38,8 +40,8 @@ class FilterPolicy { // // Warning: do not change the initial contents of *dst. Instead, // append the newly constructed filter to *dst. - virtual void CreateFilter(const Slice* keys, int n, std::string* dst) - const = 0; + virtual void CreateFilter(const Slice* keys, int n, + std::string* dst) const = 0; // "filter" contains the data appended by a preceding call to // CreateFilter() on this class. This method must return true if @@ -63,8 +65,8 @@ class FilterPolicy { // ignores trailing spaces, it would be incorrect to use a // FilterPolicy (like NewBloomFilterPolicy) that does not ignore // trailing spaces in keys. -extern const FilterPolicy* NewBloomFilterPolicy(int bits_per_key); +LEVELDB_EXPORT const FilterPolicy* NewBloomFilterPolicy(int bits_per_key); -} +} // namespace leveldb #endif // STORAGE_LEVELDB_INCLUDE_FILTER_POLICY_H_ diff --git a/src/leveldb/include/leveldb/iterator.h b/src/leveldb/include/leveldb/iterator.h index da631ed9d8..bb9a5df8f5 100644 --- a/src/leveldb/include/leveldb/iterator.h +++ b/src/leveldb/include/leveldb/iterator.h @@ -15,14 +15,19 @@ #ifndef STORAGE_LEVELDB_INCLUDE_ITERATOR_H_ #define STORAGE_LEVELDB_INCLUDE_ITERATOR_H_ +#include "leveldb/export.h" #include "leveldb/slice.h" #include "leveldb/status.h" namespace leveldb { -class Iterator { +class LEVELDB_EXPORT Iterator { public: Iterator(); + + Iterator(const Iterator&) = delete; + Iterator& operator=(const Iterator&) = delete; + virtual ~Iterator(); // An iterator is either positioned at a key/value pair, or @@ -72,28 +77,35 @@ class Iterator { // // Note that unlike all of the preceding methods, this method is // not abstract and therefore clients should not override it. - typedef void (*CleanupFunction)(void* arg1, void* arg2); + using CleanupFunction = void (*)(void* arg1, void* arg2); void RegisterCleanup(CleanupFunction function, void* arg1, void* arg2); private: - struct Cleanup { + // Cleanup functions are stored in a single-linked list. + // The list's head node is inlined in the iterator. + struct CleanupNode { + // True if the node is not used. Only head nodes might be unused. + bool IsEmpty() const { return function == nullptr; } + // Invokes the cleanup function. + void Run() { + assert(function != nullptr); + (*function)(arg1, arg2); + } + + // The head node is used if the function pointer is not null. CleanupFunction function; void* arg1; void* arg2; - Cleanup* next; + CleanupNode* next; }; - Cleanup cleanup_; - - // No copying allowed - Iterator(const Iterator&); - void operator=(const Iterator&); + CleanupNode cleanup_head_; }; // Return an empty iterator (yields nothing). -extern Iterator* NewEmptyIterator(); +LEVELDB_EXPORT Iterator* NewEmptyIterator(); // Return an empty iterator with the specified status. -extern Iterator* NewErrorIterator(const Status& status); +LEVELDB_EXPORT Iterator* NewErrorIterator(const Status& status); } // namespace leveldb diff --git a/src/leveldb/include/leveldb/options.h b/src/leveldb/include/leveldb/options.h index 976e38122a..b7487726bc 100644 --- a/src/leveldb/include/leveldb/options.h +++ b/src/leveldb/include/leveldb/options.h @@ -7,6 +7,8 @@ #include <stddef.h> +#include "leveldb/export.h" + namespace leveldb { class Cache; @@ -23,12 +25,15 @@ class Snapshot; enum CompressionType { // NOTE: do not change the values of existing entries, as these are // part of the persistent format on disk. - kNoCompression = 0x0, + kNoCompression = 0x0, kSnappyCompression = 0x1 }; // Options to control the behavior of a database (passed to DB::Open) -struct Options { +struct LEVELDB_EXPORT Options { + // Create an Options object with default values for all fields. + Options(); + // ------------------- // Parameters that affect behavior @@ -41,20 +46,17 @@ struct Options { const Comparator* comparator; // If true, the database will be created if it is missing. - // Default: false - bool create_if_missing; + bool create_if_missing = false; // If true, an error is raised if the database already exists. - // Default: false - bool error_if_exists; + bool error_if_exists = false; // If true, the implementation will do aggressive checking of the // data it is processing and will stop early if it detects any // errors. This may have unforeseen ramifications: for example, a // corruption of one DB entry may cause a large number of entries to // become unreadable or for the entire DB to become unopenable. - // Default: false - bool paranoid_checks; + bool paranoid_checks = false; // Use the specified object to interact with the environment, // e.g. to read/write files, schedule background work, etc. @@ -62,10 +64,9 @@ struct Options { Env* env; // Any internal progress/error information generated by the db will - // be written to info_log if it is non-NULL, or to a file stored - // in the same directory as the DB contents if info_log is NULL. - // Default: NULL - Logger* info_log; + // be written to info_log if it is non-null, or to a file stored + // in the same directory as the DB contents if info_log is null. + Logger* info_log = nullptr; // ------------------- // Parameters that affect performance @@ -78,39 +79,30 @@ struct Options { // so you may wish to adjust this parameter to control memory usage. // Also, a larger write buffer will result in a longer recovery time // the next time the database is opened. - // - // Default: 4MB - size_t write_buffer_size; + size_t write_buffer_size = 4 * 1024 * 1024; // Number of open files that can be used by the DB. You may need to // increase this if your database has a large working set (budget // one open file per 2MB of working set). - // - // Default: 1000 - int max_open_files; + int max_open_files = 1000; // Control over blocks (user data is stored in a set of blocks, and // a block is the unit of reading from disk). - // If non-NULL, use the specified cache for blocks. - // If NULL, leveldb will automatically create and use an 8MB internal cache. - // Default: NULL - Cache* block_cache; + // If non-null, use the specified cache for blocks. + // If null, leveldb will automatically create and use an 8MB internal cache. + Cache* block_cache = nullptr; // Approximate size of user data packed per block. Note that the // block size specified here corresponds to uncompressed data. The // actual size of the unit read from disk may be smaller if // compression is enabled. This parameter can be changed dynamically. - // - // Default: 4K - size_t block_size; + size_t block_size = 4 * 1024; // Number of keys between restart points for delta encoding of keys. // This parameter can be changed dynamically. Most clients should // leave this parameter alone. - // - // Default: 16 - int block_restart_interval; + int block_restart_interval = 16; // Leveldb will write up to this amount of bytes to a file before // switching to a new one. @@ -120,9 +112,7 @@ struct Options { // compactions and hence longer latency/performance hiccups. // Another reason to increase this parameter might be when you are // initially populating a large database. - // - // Default: 2MB - size_t max_file_size; + size_t max_file_size = 2 * 1024 * 1024; // Compress blocks using the specified compression algorithm. This // parameter can be changed dynamically. @@ -138,53 +128,43 @@ struct Options { // worth switching to kNoCompression. Even if the input data is // incompressible, the kSnappyCompression implementation will // efficiently detect that and will switch to uncompressed mode. - CompressionType compression; + CompressionType compression = kSnappyCompression; // EXPERIMENTAL: If true, append to existing MANIFEST and log files // when a database is opened. This can significantly speed up open. // // Default: currently false, but may become true later. - bool reuse_logs; + bool reuse_logs = false; - // If non-NULL, use the specified filter policy to reduce disk reads. + // If non-null, use the specified filter policy to reduce disk reads. // Many applications will benefit from passing the result of // NewBloomFilterPolicy() here. - // - // Default: NULL - const FilterPolicy* filter_policy; - - // Create an Options object with default values for all fields. - Options(); + const FilterPolicy* filter_policy = nullptr; }; // Options that control read operations -struct ReadOptions { +struct LEVELDB_EXPORT ReadOptions { + ReadOptions() = default; + // If true, all data read from underlying storage will be // verified against corresponding checksums. - // Default: false - bool verify_checksums; + bool verify_checksums = false; // Should the data read for this iteration be cached in memory? // Callers may wish to set this field to false for bulk scans. - // Default: true - bool fill_cache; + bool fill_cache = true; - // If "snapshot" is non-NULL, read as of the supplied snapshot + // If "snapshot" is non-null, read as of the supplied snapshot // (which must belong to the DB that is being read and which must - // not have been released). If "snapshot" is NULL, use an implicit + // not have been released). If "snapshot" is null, use an implicit // snapshot of the state at the beginning of this read operation. - // Default: NULL - const Snapshot* snapshot; - - ReadOptions() - : verify_checksums(false), - fill_cache(true), - snapshot(NULL) { - } + const Snapshot* snapshot = nullptr; }; // Options that control write operations -struct WriteOptions { +struct LEVELDB_EXPORT WriteOptions { + WriteOptions() = default; + // If true, the write will be flushed from the operating system // buffer cache (by calling WritableFile::Sync()) before the write // is considered complete. If this flag is true, writes will be @@ -199,13 +179,7 @@ struct WriteOptions { // crash semantics as the "write()" system call. A DB write // with sync==true has similar crash semantics to a "write()" // system call followed by "fsync()". - // - // Default: false - bool sync; - - WriteOptions() - : sync(false) { - } + bool sync = false; }; } // namespace leveldb diff --git a/src/leveldb/include/leveldb/slice.h b/src/leveldb/include/leveldb/slice.h index bc367986f7..2df417dc31 100644 --- a/src/leveldb/include/leveldb/slice.h +++ b/src/leveldb/include/leveldb/slice.h @@ -18,23 +18,30 @@ #include <assert.h> #include <stddef.h> #include <string.h> + #include <string> +#include "leveldb/export.h" + namespace leveldb { -class Slice { +class LEVELDB_EXPORT Slice { public: // Create an empty slice. - Slice() : data_(""), size_(0) { } + Slice() : data_(""), size_(0) {} // Create a slice that refers to d[0,n-1]. - Slice(const char* d, size_t n) : data_(d), size_(n) { } + Slice(const char* d, size_t n) : data_(d), size_(n) {} // Create a slice that refers to the contents of "s" - Slice(const std::string& s) : data_(s.data()), size_(s.size()) { } + Slice(const std::string& s) : data_(s.data()), size_(s.size()) {} // Create a slice that refers to s[0,strlen(s)-1] - Slice(const char* s) : data_(s), size_(strlen(s)) { } + Slice(const char* s) : data_(s), size_(strlen(s)) {} + + // Intentionally copyable. + Slice(const Slice&) = default; + Slice& operator=(const Slice&) = default; // Return a pointer to the beginning of the referenced data const char* data() const { return data_; } @@ -53,7 +60,10 @@ class Slice { } // Change this slice to refer to an empty array - void clear() { data_ = ""; size_ = 0; } + void clear() { + data_ = ""; + size_ = 0; + } // Drop the first "n" bytes from this slice. void remove_prefix(size_t n) { @@ -73,15 +83,12 @@ class Slice { // Return true iff "x" is a prefix of "*this" bool starts_with(const Slice& x) const { - return ((size_ >= x.size_) && - (memcmp(data_, x.data_, x.size_) == 0)); + return ((size_ >= x.size_) && (memcmp(data_, x.data_, x.size_) == 0)); } private: const char* data_; size_t size_; - - // Intentionally copyable }; inline bool operator==(const Slice& x, const Slice& y) { @@ -89,21 +96,20 @@ inline bool operator==(const Slice& x, const Slice& y) { (memcmp(x.data(), y.data(), x.size()) == 0)); } -inline bool operator!=(const Slice& x, const Slice& y) { - return !(x == y); -} +inline bool operator!=(const Slice& x, const Slice& y) { return !(x == y); } inline int Slice::compare(const Slice& b) const { const size_t min_len = (size_ < b.size_) ? size_ : b.size_; int r = memcmp(data_, b.data_, min_len); if (r == 0) { - if (size_ < b.size_) r = -1; - else if (size_ > b.size_) r = +1; + if (size_ < b.size_) + r = -1; + else if (size_ > b.size_) + r = +1; } return r; } } // namespace leveldb - #endif // STORAGE_LEVELDB_INCLUDE_SLICE_H_ diff --git a/src/leveldb/include/leveldb/status.h b/src/leveldb/include/leveldb/status.h index d9575f9753..e3273144e4 100644 --- a/src/leveldb/include/leveldb/status.h +++ b/src/leveldb/include/leveldb/status.h @@ -13,20 +13,25 @@ #ifndef STORAGE_LEVELDB_INCLUDE_STATUS_H_ #define STORAGE_LEVELDB_INCLUDE_STATUS_H_ +#include <algorithm> #include <string> + +#include "leveldb/export.h" #include "leveldb/slice.h" namespace leveldb { -class Status { +class LEVELDB_EXPORT Status { public: // Create a success status. - Status() : state_(NULL) { } + Status() noexcept : state_(nullptr) {} ~Status() { delete[] state_; } - // Copy the specified status. - Status(const Status& s); - void operator=(const Status& s); + Status(const Status& rhs); + Status& operator=(const Status& rhs); + + Status(Status&& rhs) noexcept : state_(rhs.state_) { rhs.state_ = nullptr; } + Status& operator=(Status&& rhs) noexcept; // Return a success status. static Status OK() { return Status(); } @@ -49,7 +54,7 @@ class Status { } // Returns true iff the status indicates success. - bool ok() const { return (state_ == NULL); } + bool ok() const { return (state_ == nullptr); } // Returns true iff the status indicates a NotFound error. bool IsNotFound() const { return code() == kNotFound; } @@ -71,13 +76,6 @@ class Status { std::string ToString() const; private: - // OK status has a NULL state_. Otherwise, state_ is a new[] array - // of the following form: - // state_[0..3] == length of message - // state_[4] == code - // state_[5..] == message - const char* state_; - enum Code { kOk = 0, kNotFound = 1, @@ -88,23 +86,35 @@ class Status { }; Code code() const { - return (state_ == NULL) ? kOk : static_cast<Code>(state_[4]); + return (state_ == nullptr) ? kOk : static_cast<Code>(state_[4]); } Status(Code code, const Slice& msg, const Slice& msg2); static const char* CopyState(const char* s); + + // OK status has a null state_. Otherwise, state_ is a new[] array + // of the following form: + // state_[0..3] == length of message + // state_[4] == code + // state_[5..] == message + const char* state_; }; -inline Status::Status(const Status& s) { - state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_); +inline Status::Status(const Status& rhs) { + state_ = (rhs.state_ == nullptr) ? nullptr : CopyState(rhs.state_); } -inline void Status::operator=(const Status& s) { - // The following condition catches both aliasing (when this == &s), - // and the common case where both s and *this are ok. - if (state_ != s.state_) { +inline Status& Status::operator=(const Status& rhs) { + // The following condition catches both aliasing (when this == &rhs), + // and the common case where both rhs and *this are ok. + if (state_ != rhs.state_) { delete[] state_; - state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_); + state_ = (rhs.state_ == nullptr) ? nullptr : CopyState(rhs.state_); } + return *this; +} +inline Status& Status::operator=(Status&& rhs) noexcept { + std::swap(state_, rhs.state_); + return *this; } } // namespace leveldb diff --git a/src/leveldb/include/leveldb/table.h b/src/leveldb/include/leveldb/table.h index a9746c3f5e..25c6013116 100644 --- a/src/leveldb/include/leveldb/table.h +++ b/src/leveldb/include/leveldb/table.h @@ -6,6 +6,8 @@ #define STORAGE_LEVELDB_INCLUDE_TABLE_H_ #include <stdint.h> + +#include "leveldb/export.h" #include "leveldb/iterator.h" namespace leveldb { @@ -21,7 +23,7 @@ class TableCache; // A Table is a sorted map from strings to strings. Tables are // immutable and persistent. A Table may be safely accessed from // multiple threads without external synchronization. -class Table { +class LEVELDB_EXPORT Table { public: // Attempt to open the table that is stored in bytes [0..file_size) // of "file", and read the metadata entries necessary to allow @@ -30,15 +32,16 @@ class Table { // If successful, returns ok and sets "*table" to the newly opened // table. The client should delete "*table" when no longer needed. // If there was an error while initializing the table, sets "*table" - // to NULL and returns a non-ok status. Does not take ownership of + // to nullptr and returns a non-ok status. Does not take ownership of // "*source", but the client must ensure that "source" remains live // for the duration of the returned table's lifetime. // // *file must remain live while this Table is in use. - static Status Open(const Options& options, - RandomAccessFile* file, - uint64_t file_size, - Table** table); + static Status Open(const Options& options, RandomAccessFile* file, + uint64_t file_size, Table** table); + + Table(const Table&) = delete; + Table& operator=(const Table&) = delete; ~Table(); @@ -56,28 +59,24 @@ class Table { uint64_t ApproximateOffsetOf(const Slice& key) const; private: + friend class TableCache; struct Rep; - Rep* rep_; - explicit Table(Rep* rep) { rep_ = rep; } static Iterator* BlockReader(void*, const ReadOptions&, const Slice&); + explicit Table(Rep* rep) : rep_(rep) {} + // Calls (*handle_result)(arg, ...) with the entry found after a call // to Seek(key). May not make such a call if filter policy says // that key is not present. - friend class TableCache; - Status InternalGet( - const ReadOptions&, const Slice& key, - void* arg, - void (*handle_result)(void* arg, const Slice& k, const Slice& v)); - + Status InternalGet(const ReadOptions&, const Slice& key, void* arg, + void (*handle_result)(void* arg, const Slice& k, + const Slice& v)); void ReadMeta(const Footer& footer); void ReadFilter(const Slice& filter_handle_value); - // No copying allowed - Table(const Table&); - void operator=(const Table&); + Rep* const rep_; }; } // namespace leveldb diff --git a/src/leveldb/include/leveldb/table_builder.h b/src/leveldb/include/leveldb/table_builder.h index 5fd1dc71f1..7d8896bb89 100644 --- a/src/leveldb/include/leveldb/table_builder.h +++ b/src/leveldb/include/leveldb/table_builder.h @@ -14,6 +14,8 @@ #define STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_ #include <stdint.h> + +#include "leveldb/export.h" #include "leveldb/options.h" #include "leveldb/status.h" @@ -23,13 +25,16 @@ class BlockBuilder; class BlockHandle; class WritableFile; -class TableBuilder { +class LEVELDB_EXPORT TableBuilder { public: // Create a builder that will store the contents of the table it is // building in *file. Does not close the file. It is up to the // caller to close the file after calling Finish(). TableBuilder(const Options& options, WritableFile* file); + TableBuilder(const TableBuilder&) = delete; + TableBuilder& operator=(const TableBuilder&) = delete; + // REQUIRES: Either Finish() or Abandon() has been called. ~TableBuilder(); @@ -81,10 +86,6 @@ class TableBuilder { struct Rep; Rep* rep_; - - // No copying allowed - TableBuilder(const TableBuilder&); - void operator=(const TableBuilder&); }; } // namespace leveldb diff --git a/src/leveldb/include/leveldb/write_batch.h b/src/leveldb/include/leveldb/write_batch.h index ee9aab68e0..94d4115fed 100644 --- a/src/leveldb/include/leveldb/write_batch.h +++ b/src/leveldb/include/leveldb/write_batch.h @@ -22,15 +22,29 @@ #define STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_ #include <string> + +#include "leveldb/export.h" #include "leveldb/status.h" namespace leveldb { class Slice; -class WriteBatch { +class LEVELDB_EXPORT WriteBatch { public: + class LEVELDB_EXPORT Handler { + public: + virtual ~Handler(); + virtual void Put(const Slice& key, const Slice& value) = 0; + virtual void Delete(const Slice& key) = 0; + }; + WriteBatch(); + + // Intentionally copyable. + WriteBatch(const WriteBatch&) = default; + WriteBatch& operator=(const WriteBatch&) = default; + ~WriteBatch(); // Store the mapping "key->value" in the database. @@ -42,21 +56,26 @@ class WriteBatch { // Clear all updates buffered in this batch. void Clear(); + // The size of the database changes caused by this batch. + // + // This number is tied to implementation details, and may change across + // releases. It is intended for LevelDB usage metrics. + size_t ApproximateSize() const; + + // Copies the operations in "source" to this batch. + // + // This runs in O(source size) time. However, the constant factor is better + // than calling Iterate() over the source batch with a Handler that replicates + // the operations into this batch. + void Append(const WriteBatch& source); + // Support for iterating over the contents of a batch. - class Handler { - public: - virtual ~Handler(); - virtual void Put(const Slice& key, const Slice& value) = 0; - virtual void Delete(const Slice& key) = 0; - }; Status Iterate(Handler* handler) const; private: friend class WriteBatchInternal; std::string rep_; // See comment in write_batch.cc for the format of rep_ - - // Intentionally copyable }; } // namespace leveldb diff --git a/src/leveldb/issues/issue178_test.cc b/src/leveldb/issues/issue178_test.cc index 1b1cf8bb28..d50ffeb9d4 100644 --- a/src/leveldb/issues/issue178_test.cc +++ b/src/leveldb/issues/issue178_test.cc @@ -3,9 +3,9 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. // Test for issue 178: a manual compaction causes deleted data to reappear. +#include <cstdlib> #include <iostream> #include <sstream> -#include <cstdlib> #include "leveldb/db.h" #include "leveldb/write_batch.h" @@ -21,11 +21,9 @@ std::string Key1(int i) { return buf; } -std::string Key2(int i) { - return Key1(i) + "_xxx"; -} +std::string Key2(int i) { return Key1(i) + "_xxx"; } -class Issue178 { }; +class Issue178 {}; TEST(Issue178, Test) { // Get rid of any state from an old run. @@ -87,6 +85,4 @@ TEST(Issue178, Test) { } // anonymous namespace -int main(int argc, char** argv) { - return leveldb::test::RunAllTests(); -} +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/issues/issue200_test.cc b/src/leveldb/issues/issue200_test.cc index 1cec79f443..877b2afc47 100644 --- a/src/leveldb/issues/issue200_test.cc +++ b/src/leveldb/issues/issue200_test.cc @@ -11,14 +11,14 @@ namespace leveldb { -class Issue200 { }; +class Issue200 {}; TEST(Issue200, Test) { // Get rid of any state from an old run. std::string dbpath = test::TmpDir() + "/leveldb_issue200_test"; DestroyDB(dbpath, Options()); - DB *db; + DB* db; Options options; options.create_if_missing = true; ASSERT_OK(DB::Open(options, dbpath, &db)); @@ -31,7 +31,7 @@ TEST(Issue200, Test) { ASSERT_OK(db->Put(write_options, "5", "f")); ReadOptions read_options; - Iterator *iter = db->NewIterator(read_options); + Iterator* iter = db->NewIterator(read_options); // Add an element that should not be reflected in the iterator. ASSERT_OK(db->Put(write_options, "25", "cd")); @@ -54,6 +54,4 @@ TEST(Issue200, Test) { } // namespace leveldb -int main(int argc, char** argv) { - return leveldb::test::RunAllTests(); -} +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/issues/issue320_test.cc b/src/leveldb/issues/issue320_test.cc new file mode 100644 index 0000000000..c5fcbfc6e7 --- /dev/null +++ b/src/leveldb/issues/issue320_test.cc @@ -0,0 +1,128 @@ +// Copyright (c) 2019 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include <cstdint> +#include <cstdlib> +#include <iostream> +#include <memory> +#include <string> +#include <vector> + +#include "leveldb/db.h" +#include "leveldb/write_batch.h" +#include "util/testharness.h" + +namespace leveldb { + +namespace { + +// Creates a random number in the range of [0, max). +int GenerateRandomNumber(int max) { return std::rand() % max; } + +std::string CreateRandomString(int32_t index) { + static const size_t len = 1024; + char bytes[len]; + size_t i = 0; + while (i < 8) { + bytes[i] = 'a' + ((index >> (4 * i)) & 0xf); + ++i; + } + while (i < sizeof(bytes)) { + bytes[i] = 'a' + GenerateRandomNumber(26); + ++i; + } + return std::string(bytes, sizeof(bytes)); +} + +} // namespace + +class Issue320 {}; + +TEST(Issue320, Test) { + std::srand(0); + + bool delete_before_put = false; + bool keep_snapshots = true; + + std::vector<std::unique_ptr<std::pair<std::string, std::string>>> test_map( + 10000); + std::vector<Snapshot const*> snapshots(100, nullptr); + + DB* db; + Options options; + options.create_if_missing = true; + + std::string dbpath = test::TmpDir() + "/leveldb_issue320_test"; + ASSERT_OK(DB::Open(options, dbpath, &db)); + + uint32_t target_size = 10000; + uint32_t num_items = 0; + uint32_t count = 0; + std::string key; + std::string value, old_value; + + WriteOptions writeOptions; + ReadOptions readOptions; + while (count < 200000) { + if ((++count % 1000) == 0) { + std::cout << "count: " << count << std::endl; + } + + int index = GenerateRandomNumber(test_map.size()); + WriteBatch batch; + + if (test_map[index] == nullptr) { + num_items++; + test_map[index].reset(new std::pair<std::string, std::string>( + CreateRandomString(index), CreateRandomString(index))); + batch.Put(test_map[index]->first, test_map[index]->second); + } else { + ASSERT_OK(db->Get(readOptions, test_map[index]->first, &old_value)); + if (old_value != test_map[index]->second) { + std::cout << "ERROR incorrect value returned by Get" << std::endl; + std::cout << " count=" << count << std::endl; + std::cout << " old value=" << old_value << std::endl; + std::cout << " test_map[index]->second=" << test_map[index]->second + << std::endl; + std::cout << " test_map[index]->first=" << test_map[index]->first + << std::endl; + std::cout << " index=" << index << std::endl; + ASSERT_EQ(old_value, test_map[index]->second); + } + + if (num_items >= target_size && GenerateRandomNumber(100) > 30) { + batch.Delete(test_map[index]->first); + test_map[index] = nullptr; + --num_items; + } else { + test_map[index]->second = CreateRandomString(index); + if (delete_before_put) batch.Delete(test_map[index]->first); + batch.Put(test_map[index]->first, test_map[index]->second); + } + } + + ASSERT_OK(db->Write(writeOptions, &batch)); + + if (keep_snapshots && GenerateRandomNumber(10) == 0) { + int i = GenerateRandomNumber(snapshots.size()); + if (snapshots[i] != nullptr) { + db->ReleaseSnapshot(snapshots[i]); + } + snapshots[i] = db->GetSnapshot(); + } + } + + for (Snapshot const* snapshot : snapshots) { + if (snapshot) { + db->ReleaseSnapshot(snapshot); + } + } + + delete db; + DestroyDB(dbpath, options); +} + +} // namespace leveldb + +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/port/README b/src/leveldb/port/README.md index 422563e25c..8b171532e1 100644 --- a/src/leveldb/port/README +++ b/src/leveldb/port/README.md @@ -5,6 +5,6 @@ Code in the rest of the package includes "port.h" from this directory. "port.h" in turn includes a platform specific "port_<platform>.h" file that provides the platform specific implementation. -See port_posix.h for an example of what must be provided in a platform +See port_stdcxx.h for an example of what must be provided in a platform specific header file. diff --git a/src/leveldb/port/atomic_pointer.h b/src/leveldb/port/atomic_pointer.h deleted file mode 100644 index d79a02230d..0000000000 --- a/src/leveldb/port/atomic_pointer.h +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. - -// AtomicPointer provides storage for a lock-free pointer. -// Platform-dependent implementation of AtomicPointer: -// - If the platform provides a cheap barrier, we use it with raw pointers -// - If <atomic> is present (on newer versions of gcc, it is), we use -// a <atomic>-based AtomicPointer. However we prefer the memory -// barrier based version, because at least on a gcc 4.4 32-bit build -// on linux, we have encountered a buggy <atomic> implementation. -// Also, some <atomic> implementations are much slower than a memory-barrier -// based implementation (~16ns for <atomic> based acquire-load vs. ~1ns for -// a barrier based acquire-load). -// This code is based on atomicops-internals-* in Google's perftools: -// http://code.google.com/p/google-perftools/source/browse/#svn%2Ftrunk%2Fsrc%2Fbase - -#ifndef PORT_ATOMIC_POINTER_H_ -#define PORT_ATOMIC_POINTER_H_ - -#include <stdint.h> -#ifdef LEVELDB_ATOMIC_PRESENT -#include <atomic> -#endif -#ifdef OS_WIN -#include <windows.h> -#endif -#ifdef OS_MACOSX -#include <libkern/OSAtomic.h> -#endif - -#if defined(_M_X64) || defined(__x86_64__) -#define ARCH_CPU_X86_FAMILY 1 -#elif defined(_M_IX86) || defined(__i386__) || defined(__i386) -#define ARCH_CPU_X86_FAMILY 1 -#elif defined(__ARMEL__) -#define ARCH_CPU_ARM_FAMILY 1 -#elif defined(__aarch64__) -#define ARCH_CPU_ARM64_FAMILY 1 -#elif defined(__ppc__) || defined(__powerpc__) || defined(__powerpc64__) -#define ARCH_CPU_PPC_FAMILY 1 -#elif defined(__mips__) -#define ARCH_CPU_MIPS_FAMILY 1 -#endif - -namespace leveldb { -namespace port { - -// AtomicPointer based on <cstdatomic> if available -#if defined(LEVELDB_ATOMIC_PRESENT) -class AtomicPointer { - private: - std::atomic<void*> rep_; - public: - AtomicPointer() { } - explicit AtomicPointer(void* v) : rep_(v) { } - inline void* Acquire_Load() const { - return rep_.load(std::memory_order_acquire); - } - inline void Release_Store(void* v) { - rep_.store(v, std::memory_order_release); - } - inline void* NoBarrier_Load() const { - return rep_.load(std::memory_order_relaxed); - } - inline void NoBarrier_Store(void* v) { - rep_.store(v, std::memory_order_relaxed); - } -}; - -#else - -// Define MemoryBarrier() if available -// Windows on x86 -#if defined(OS_WIN) && defined(COMPILER_MSVC) && defined(ARCH_CPU_X86_FAMILY) -// windows.h already provides a MemoryBarrier(void) macro -// http://msdn.microsoft.com/en-us/library/ms684208(v=vs.85).aspx -#define LEVELDB_HAVE_MEMORY_BARRIER - -// Mac OS -#elif defined(OS_MACOSX) -inline void MemoryBarrier() { - OSMemoryBarrier(); -} -#define LEVELDB_HAVE_MEMORY_BARRIER - -// Gcc on x86 -#elif defined(ARCH_CPU_X86_FAMILY) && defined(__GNUC__) -inline void MemoryBarrier() { - // See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on - // this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering. - __asm__ __volatile__("" : : : "memory"); -} -#define LEVELDB_HAVE_MEMORY_BARRIER - -// Sun Studio -#elif defined(ARCH_CPU_X86_FAMILY) && defined(__SUNPRO_CC) -inline void MemoryBarrier() { - // See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on - // this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering. - asm volatile("" : : : "memory"); -} -#define LEVELDB_HAVE_MEMORY_BARRIER - -// ARM Linux -#elif defined(ARCH_CPU_ARM_FAMILY) && defined(__linux__) -typedef void (*LinuxKernelMemoryBarrierFunc)(void); -// The Linux ARM kernel provides a highly optimized device-specific memory -// barrier function at a fixed memory address that is mapped in every -// user-level process. -// -// This beats using CPU-specific instructions which are, on single-core -// devices, un-necessary and very costly (e.g. ARMv7-A "dmb" takes more -// than 180ns on a Cortex-A8 like the one on a Nexus One). Benchmarking -// shows that the extra function call cost is completely negligible on -// multi-core devices. -// -inline void MemoryBarrier() { - (*(LinuxKernelMemoryBarrierFunc)0xffff0fa0)(); -} -#define LEVELDB_HAVE_MEMORY_BARRIER - -// ARM64 -#elif defined(ARCH_CPU_ARM64_FAMILY) -inline void MemoryBarrier() { - asm volatile("dmb sy" : : : "memory"); -} -#define LEVELDB_HAVE_MEMORY_BARRIER - -// PPC -#elif defined(ARCH_CPU_PPC_FAMILY) && defined(__GNUC__) -inline void MemoryBarrier() { - // TODO for some powerpc expert: is there a cheaper suitable variant? - // Perhaps by having separate barriers for acquire and release ops. - asm volatile("sync" : : : "memory"); -} -#define LEVELDB_HAVE_MEMORY_BARRIER - -// MIPS -#elif defined(ARCH_CPU_MIPS_FAMILY) && defined(__GNUC__) -inline void MemoryBarrier() { - __asm__ __volatile__("sync" : : : "memory"); -} -#define LEVELDB_HAVE_MEMORY_BARRIER - -#endif - -// AtomicPointer built using platform-specific MemoryBarrier() -#if defined(LEVELDB_HAVE_MEMORY_BARRIER) -class AtomicPointer { - private: - void* rep_; - public: - AtomicPointer() { } - explicit AtomicPointer(void* p) : rep_(p) {} - inline void* NoBarrier_Load() const { return rep_; } - inline void NoBarrier_Store(void* v) { rep_ = v; } - inline void* Acquire_Load() const { - void* result = rep_; - MemoryBarrier(); - return result; - } - inline void Release_Store(void* v) { - MemoryBarrier(); - rep_ = v; - } -}; - -// Atomic pointer based on sparc memory barriers -#elif defined(__sparcv9) && defined(__GNUC__) -class AtomicPointer { - private: - void* rep_; - public: - AtomicPointer() { } - explicit AtomicPointer(void* v) : rep_(v) { } - inline void* Acquire_Load() const { - void* val; - __asm__ __volatile__ ( - "ldx [%[rep_]], %[val] \n\t" - "membar #LoadLoad|#LoadStore \n\t" - : [val] "=r" (val) - : [rep_] "r" (&rep_) - : "memory"); - return val; - } - inline void Release_Store(void* v) { - __asm__ __volatile__ ( - "membar #LoadStore|#StoreStore \n\t" - "stx %[v], [%[rep_]] \n\t" - : - : [rep_] "r" (&rep_), [v] "r" (v) - : "memory"); - } - inline void* NoBarrier_Load() const { return rep_; } - inline void NoBarrier_Store(void* v) { rep_ = v; } -}; - -// Atomic pointer based on ia64 acq/rel -#elif defined(__ia64) && defined(__GNUC__) -class AtomicPointer { - private: - void* rep_; - public: - AtomicPointer() { } - explicit AtomicPointer(void* v) : rep_(v) { } - inline void* Acquire_Load() const { - void* val ; - __asm__ __volatile__ ( - "ld8.acq %[val] = [%[rep_]] \n\t" - : [val] "=r" (val) - : [rep_] "r" (&rep_) - : "memory" - ); - return val; - } - inline void Release_Store(void* v) { - __asm__ __volatile__ ( - "st8.rel [%[rep_]] = %[v] \n\t" - : - : [rep_] "r" (&rep_), [v] "r" (v) - : "memory" - ); - } - inline void* NoBarrier_Load() const { return rep_; } - inline void NoBarrier_Store(void* v) { rep_ = v; } -}; - -// We have neither MemoryBarrier(), nor <atomic> -#else -#error Please implement AtomicPointer for this platform. - -#endif -#endif - -#undef LEVELDB_HAVE_MEMORY_BARRIER -#undef ARCH_CPU_X86_FAMILY -#undef ARCH_CPU_ARM_FAMILY -#undef ARCH_CPU_ARM64_FAMILY -#undef ARCH_CPU_PPC_FAMILY - -} // namespace port -} // namespace leveldb - -#endif // PORT_ATOMIC_POINTER_H_ diff --git a/src/leveldb/port/port.h b/src/leveldb/port/port.h index 4baafa8e22..4b247f74f9 100644 --- a/src/leveldb/port/port.h +++ b/src/leveldb/port/port.h @@ -10,12 +10,10 @@ // Include the appropriate platform specific file below. If you are // porting to a new platform, see "port_example.h" for documentation // of what the new port_<platform>.h file must provide. -#if defined(LEVELDB_PLATFORM_POSIX) -# include "port/port_posix.h" +#if defined(LEVELDB_PLATFORM_POSIX) || defined(LEVELDB_PLATFORM_WINDOWS) +#include "port/port_stdcxx.h" #elif defined(LEVELDB_PLATFORM_CHROMIUM) -# include "port/port_chromium.h" -#elif defined(LEVELDB_PLATFORM_WINDOWS) -# include "port/port_win.h" +#include "port/port_chromium.h" #endif #endif // STORAGE_LEVELDB_PORT_PORT_H_ diff --git a/src/leveldb/port/port_config.h.in b/src/leveldb/port/port_config.h.in new file mode 100644 index 0000000000..21273153a3 --- /dev/null +++ b/src/leveldb/port/port_config.h.in @@ -0,0 +1,39 @@ +// Copyright 2017 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef STORAGE_LEVELDB_PORT_PORT_CONFIG_H_ +#define STORAGE_LEVELDB_PORT_PORT_CONFIG_H_ + +// Define to 1 if you have a definition for fdatasync() in <unistd.h>. +#if !defined(HAVE_FDATASYNC) +#cmakedefine01 HAVE_FDATASYNC +#endif // !defined(HAVE_FDATASYNC) + +// Define to 1 if you have a definition for F_FULLFSYNC in <fcntl.h>. +#if !defined(HAVE_FULLFSYNC) +#cmakedefine01 HAVE_FULLFSYNC +#endif // !defined(HAVE_FULLFSYNC) + +// Define to 1 if you have a definition for O_CLOEXEC in <fcntl.h>. +#if !defined(HAVE_O_CLOEXEC) +#cmakedefine01 HAVE_O_CLOEXEC +#endif // !defined(HAVE_O_CLOEXEC) + +// Define to 1 if you have Google CRC32C. +#if !defined(HAVE_CRC32C) +#cmakedefine01 HAVE_CRC32C +#endif // !defined(HAVE_CRC32C) + +// Define to 1 if you have Google Snappy. +#if !defined(HAVE_SNAPPY) +#cmakedefine01 HAVE_SNAPPY +#endif // !defined(HAVE_SNAPPY) + +// Define to 1 if your processor stores words with the most significant byte +// first (like Motorola and SPARC, unlike Intel and VAX). +#if !defined(LEVELDB_IS_BIG_ENDIAN) +#cmakedefine01 LEVELDB_IS_BIG_ENDIAN +#endif // !defined(LEVELDB_IS_BIG_ENDIAN) + +#endif // STORAGE_LEVELDB_PORT_PORT_CONFIG_H_
\ No newline at end of file diff --git a/src/leveldb/port/port_example.h b/src/leveldb/port/port_example.h index 5b1d027de5..1a8fca24b3 100644 --- a/src/leveldb/port/port_example.h +++ b/src/leveldb/port/port_example.h @@ -10,6 +10,8 @@ #ifndef STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_ #define STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_ +#include "port/thread_annotations.h" + namespace leveldb { namespace port { @@ -23,23 +25,23 @@ static const bool kLittleEndian = true /* or some other expression */; // ------------------ Threading ------------------- // A Mutex represents an exclusive lock. -class Mutex { +class LOCKABLE Mutex { public: Mutex(); ~Mutex(); // Lock the mutex. Waits until other lockers have exited. // Will deadlock if the mutex is already locked by this thread. - void Lock(); + void Lock() EXCLUSIVE_LOCK_FUNCTION(); // Unlock the mutex. // REQUIRES: This mutex was locked by this thread. - void Unlock(); + void Unlock() UNLOCK_FUNCTION(); // Optionally crash if this thread does not hold this mutex. // The implementation must be fast, especially if NDEBUG is // defined. The implementation is allowed to skip all checks. - void AssertHeld(); + void AssertHeld() ASSERT_EXCLUSIVE_LOCK(); }; class CondVar { @@ -60,57 +62,18 @@ class CondVar { void SignallAll(); }; -// Thread-safe initialization. -// Used as follows: -// static port::OnceType init_control = LEVELDB_ONCE_INIT; -// static void Initializer() { ... do something ...; } -// ... -// port::InitOnce(&init_control, &Initializer); -typedef intptr_t OnceType; -#define LEVELDB_ONCE_INIT 0 -extern void InitOnce(port::OnceType*, void (*initializer)()); - -// A type that holds a pointer that can be read or written atomically -// (i.e., without word-tearing.) -class AtomicPointer { - private: - intptr_t rep_; - public: - // Initialize to arbitrary value - AtomicPointer(); - - // Initialize to hold v - explicit AtomicPointer(void* v) : rep_(v) { } - - // Read and return the stored pointer with the guarantee that no - // later memory access (read or write) by this thread can be - // reordered ahead of this read. - void* Acquire_Load() const; - - // Set v as the stored pointer with the guarantee that no earlier - // memory access (read or write) by this thread can be reordered - // after this store. - void Release_Store(void* v); - - // Read the stored pointer with no ordering guarantees. - void* NoBarrier_Load() const; - - // Set va as the stored pointer with no ordering guarantees. - void NoBarrier_Store(void* v); -}; - // ------------------ Compression ------------------- // Store the snappy compression of "input[0,input_length-1]" in *output. // Returns false if snappy is not supported by this port. -extern bool Snappy_Compress(const char* input, size_t input_length, - std::string* output); +bool Snappy_Compress(const char* input, size_t input_length, + std::string* output); // If input[0,input_length-1] looks like a valid snappy compressed // buffer, store the size of the uncompressed data in *result and // return true. Else return false. -extern bool Snappy_GetUncompressedLength(const char* input, size_t length, - size_t* result); +bool Snappy_GetUncompressedLength(const char* input, size_t length, + size_t* result); // Attempt to snappy uncompress input[0,input_length-1] into *output. // Returns true if successful, false if the input is invalid lightweight @@ -119,19 +82,15 @@ extern bool Snappy_GetUncompressedLength(const char* input, size_t length, // REQUIRES: at least the first "n" bytes of output[] must be writable // where "n" is the result of a successful call to // Snappy_GetUncompressedLength. -extern bool Snappy_Uncompress(const char* input_data, size_t input_length, - char* output); +bool Snappy_Uncompress(const char* input_data, size_t input_length, + char* output); // ------------------ Miscellaneous ------------------- // If heap profiling is not supported, returns false. // Else repeatedly calls (*func)(arg, data, n) and then returns true. // The concatenation of all "data[0,n-1]" fragments is the heap profile. -extern bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg); - -// Determine whether a working accelerated crc32 implementation exists -// Returns true if AcceleratedCRC32C is safe to call -bool HasAcceleratedCRC32C(); +bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg); // Extend the CRC to include the first n bytes of buf. // diff --git a/src/leveldb/port/port_posix.cc b/src/leveldb/port/port_posix.cc deleted file mode 100644 index ec39e92195..0000000000 --- a/src/leveldb/port/port_posix.cc +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. - -#include "port/port_posix.h" - -#include <cstdlib> -#include <stdio.h> -#include <string.h> - -#if (defined(__x86_64__) || defined(__i386__)) && defined(__GNUC__) -#include <cpuid.h> -#endif - -namespace leveldb { -namespace port { - -static void PthreadCall(const char* label, int result) { - if (result != 0) { - fprintf(stderr, "pthread %s: %s\n", label, strerror(result)); - abort(); - } -} - -Mutex::Mutex() { PthreadCall("init mutex", pthread_mutex_init(&mu_, NULL)); } - -Mutex::~Mutex() { PthreadCall("destroy mutex", pthread_mutex_destroy(&mu_)); } - -void Mutex::Lock() { PthreadCall("lock", pthread_mutex_lock(&mu_)); } - -void Mutex::Unlock() { PthreadCall("unlock", pthread_mutex_unlock(&mu_)); } - -CondVar::CondVar(Mutex* mu) - : mu_(mu) { - PthreadCall("init cv", pthread_cond_init(&cv_, NULL)); -} - -CondVar::~CondVar() { PthreadCall("destroy cv", pthread_cond_destroy(&cv_)); } - -void CondVar::Wait() { - PthreadCall("wait", pthread_cond_wait(&cv_, &mu_->mu_)); -} - -void CondVar::Signal() { - PthreadCall("signal", pthread_cond_signal(&cv_)); -} - -void CondVar::SignalAll() { - PthreadCall("broadcast", pthread_cond_broadcast(&cv_)); -} - -void InitOnce(OnceType* once, void (*initializer)()) { - PthreadCall("once", pthread_once(once, initializer)); -} - -bool HasAcceleratedCRC32C() { -#if (defined(__x86_64__) || defined(__i386__)) && defined(__GNUC__) - unsigned int eax, ebx, ecx, edx; - __get_cpuid(1, &eax, &ebx, &ecx, &edx); - return (ecx & (1 << 20)) != 0; -#else - return false; -#endif -} - -} // namespace port -} // namespace leveldb diff --git a/src/leveldb/port/port_posix.h b/src/leveldb/port/port_posix.h deleted file mode 100644 index d85fa5d63f..0000000000 --- a/src/leveldb/port/port_posix.h +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. -// -// See port_example.h for documentation for the following types/functions. - -#ifndef STORAGE_LEVELDB_PORT_PORT_POSIX_H_ -#define STORAGE_LEVELDB_PORT_PORT_POSIX_H_ - -#undef PLATFORM_IS_LITTLE_ENDIAN -#if defined(OS_MACOSX) - #include <machine/endian.h> - #if defined(__DARWIN_LITTLE_ENDIAN) && defined(__DARWIN_BYTE_ORDER) - #define PLATFORM_IS_LITTLE_ENDIAN \ - (__DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN) - #endif -#elif defined(OS_SOLARIS) - #include <sys/isa_defs.h> - #ifdef _LITTLE_ENDIAN - #define PLATFORM_IS_LITTLE_ENDIAN true - #else - #define PLATFORM_IS_LITTLE_ENDIAN false - #endif -#elif defined(OS_FREEBSD) || defined(OS_OPENBSD) ||\ - defined(OS_NETBSD) || defined(OS_DRAGONFLYBSD) - #include <sys/types.h> - #include <sys/endian.h> - #define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN) -#elif defined(OS_HPUX) - #define PLATFORM_IS_LITTLE_ENDIAN false -#elif defined(OS_ANDROID) - // Due to a bug in the NDK x86 <sys/endian.h> definition, - // _BYTE_ORDER must be used instead of __BYTE_ORDER on Android. - // See http://code.google.com/p/android/issues/detail?id=39824 - #include <endian.h> - #define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN) -#else - #include <endian.h> -#endif - -#include <pthread.h> -#ifdef SNAPPY -#include <snappy.h> -#endif -#include <stdint.h> -#include <string> -#include "port/atomic_pointer.h" - -#ifndef PLATFORM_IS_LITTLE_ENDIAN -#define PLATFORM_IS_LITTLE_ENDIAN (__BYTE_ORDER == __LITTLE_ENDIAN) -#endif - -#if defined(OS_MACOSX) || defined(OS_SOLARIS) || defined(OS_FREEBSD) ||\ - defined(OS_NETBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLYBSD) ||\ - defined(OS_ANDROID) || defined(OS_HPUX) || defined(CYGWIN) -// Use fread/fwrite/fflush on platforms without _unlocked variants -#define fread_unlocked fread -#define fwrite_unlocked fwrite -#define fflush_unlocked fflush -#endif - -#if defined(OS_FREEBSD) ||\ - defined(OS_OPENBSD) || defined(OS_DRAGONFLYBSD) -// Use fsync() on platforms without fdatasync() -#define fdatasync fsync -#endif - -#if defined(OS_MACOSX) -#define fdatasync(fd) fcntl(fd, F_FULLFSYNC, 0) -#endif - -#if defined(OS_ANDROID) && __ANDROID_API__ < 9 -// fdatasync() was only introduced in API level 9 on Android. Use fsync() -// when targetting older platforms. -#define fdatasync fsync -#endif - -namespace leveldb { -namespace port { - -static const bool kLittleEndian = PLATFORM_IS_LITTLE_ENDIAN; -#undef PLATFORM_IS_LITTLE_ENDIAN - -class CondVar; - -class Mutex { - public: - Mutex(); - ~Mutex(); - - void Lock(); - void Unlock(); - void AssertHeld() { } - - private: - friend class CondVar; - pthread_mutex_t mu_; - - // No copying - Mutex(const Mutex&); - void operator=(const Mutex&); -}; - -class CondVar { - public: - explicit CondVar(Mutex* mu); - ~CondVar(); - void Wait(); - void Signal(); - void SignalAll(); - private: - pthread_cond_t cv_; - Mutex* mu_; -}; - -typedef pthread_once_t OnceType; -#define LEVELDB_ONCE_INIT PTHREAD_ONCE_INIT -extern void InitOnce(OnceType* once, void (*initializer)()); - -inline bool Snappy_Compress(const char* input, size_t length, - ::std::string* output) { -#ifdef SNAPPY - output->resize(snappy::MaxCompressedLength(length)); - size_t outlen; - snappy::RawCompress(input, length, &(*output)[0], &outlen); - output->resize(outlen); - return true; -#endif - - return false; -} - -inline bool Snappy_GetUncompressedLength(const char* input, size_t length, - size_t* result) { -#ifdef SNAPPY - return snappy::GetUncompressedLength(input, length, result); -#else - return false; -#endif -} - -inline bool Snappy_Uncompress(const char* input, size_t length, - char* output) { -#ifdef SNAPPY - return snappy::RawUncompress(input, length, output); -#else - return false; -#endif -} - -inline bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg) { - return false; -} - -bool HasAcceleratedCRC32C(); -uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size); - -} // namespace port -} // namespace leveldb - -#endif // STORAGE_LEVELDB_PORT_PORT_POSIX_H_ diff --git a/src/leveldb/port/port_posix_sse.cc b/src/leveldb/port/port_posix_sse.cc deleted file mode 100644 index 2d49c21dd8..0000000000 --- a/src/leveldb/port/port_posix_sse.cc +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2016 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. -// -// A portable implementation of crc32c, optimized to handle -// four bytes at a time. -// -// In a separate source file to allow this accelerated CRC32C function to be -// compiled with the appropriate compiler flags to enable x86 SSE 4.2 -// instructions. - -#include <stdint.h> -#include <string.h> -#include "port/port.h" - -#if defined(LEVELDB_PLATFORM_POSIX_SSE) - -#if defined(_MSC_VER) -#include <intrin.h> -#elif defined(__GNUC__) && defined(__SSE4_2__) -#include <nmmintrin.h> -#endif - -#endif // defined(LEVELDB_PLATFORM_POSIX_SSE) - -namespace leveldb { -namespace port { - -#if defined(LEVELDB_PLATFORM_POSIX_SSE) - -// Used to fetch a naturally-aligned 32-bit word in little endian byte-order -static inline uint32_t LE_LOAD32(const uint8_t *p) { - // SSE is x86 only, so ensured that |p| is always little-endian. - uint32_t word; - memcpy(&word, p, sizeof(word)); - return word; -} - -#if defined(_M_X64) || defined(__x86_64__) // LE_LOAD64 is only used on x64. - -// Used to fetch a naturally-aligned 64-bit word in little endian byte-order -static inline uint64_t LE_LOAD64(const uint8_t *p) { - uint64_t dword; - memcpy(&dword, p, sizeof(dword)); - return dword; -} - -#endif // defined(_M_X64) || defined(__x86_64__) - -#endif // defined(LEVELDB_PLATFORM_POSIX_SSE) - -// For further improvements see Intel publication at: -// http://download.intel.com/design/intarch/papers/323405.pdf -uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size) { -#if !defined(LEVELDB_PLATFORM_POSIX_SSE) - return 0; -#else - - const uint8_t *p = reinterpret_cast<const uint8_t *>(buf); - const uint8_t *e = p + size; - uint32_t l = crc ^ 0xffffffffu; - -#define STEP1 do { \ - l = _mm_crc32_u8(l, *p++); \ -} while (0) -#define STEP4 do { \ - l = _mm_crc32_u32(l, LE_LOAD32(p)); \ - p += 4; \ -} while (0) -#define STEP8 do { \ - l = _mm_crc32_u64(l, LE_LOAD64(p)); \ - p += 8; \ -} while (0) - - if (size > 16) { - // Process unaligned bytes - for (unsigned int i = reinterpret_cast<uintptr_t>(p) % 8; i; --i) { - STEP1; - } - - // _mm_crc32_u64 is only available on x64. -#if defined(_M_X64) || defined(__x86_64__) - // Process 8 bytes at a time - while ((e-p) >= 8) { - STEP8; - } - // Process 4 bytes at a time - if ((e-p) >= 4) { - STEP4; - } -#else // !(defined(_M_X64) || defined(__x86_64__)) - // Process 4 bytes at a time - while ((e-p) >= 4) { - STEP4; - } -#endif // defined(_M_X64) || defined(__x86_64__) - } - // Process the last few bytes - while (p != e) { - STEP1; - } -#undef STEP8 -#undef STEP4 -#undef STEP1 - return l ^ 0xffffffffu; -#endif // defined(LEVELDB_PLATFORM_POSIX_SSE) -} - -} // namespace port -} // namespace leveldb diff --git a/src/leveldb/port/port_stdcxx.h b/src/leveldb/port/port_stdcxx.h new file mode 100644 index 0000000000..e9cb0e53af --- /dev/null +++ b/src/leveldb/port/port_stdcxx.h @@ -0,0 +1,153 @@ +// Copyright (c) 2018 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef STORAGE_LEVELDB_PORT_PORT_STDCXX_H_ +#define STORAGE_LEVELDB_PORT_PORT_STDCXX_H_ + +// port/port_config.h availability is automatically detected via __has_include +// in newer compilers. If LEVELDB_HAS_PORT_CONFIG_H is defined, it overrides the +// configuration detection. +#if defined(LEVELDB_HAS_PORT_CONFIG_H) + +#if LEVELDB_HAS_PORT_CONFIG_H +#include "port/port_config.h" +#endif // LEVELDB_HAS_PORT_CONFIG_H + +#elif defined(__has_include) + +#if __has_include("port/port_config.h") +#include "port/port_config.h" +#endif // __has_include("port/port_config.h") + +#endif // defined(LEVELDB_HAS_PORT_CONFIG_H) + +#if HAVE_CRC32C +#include <crc32c/crc32c.h> +#endif // HAVE_CRC32C +#if HAVE_SNAPPY +#include <snappy.h> +#endif // HAVE_SNAPPY + +#include <cassert> +#include <condition_variable> // NOLINT +#include <cstddef> +#include <cstdint> +#include <mutex> // NOLINT +#include <string> + +#include "port/thread_annotations.h" + +namespace leveldb { +namespace port { + +static const bool kLittleEndian = !LEVELDB_IS_BIG_ENDIAN; + +class CondVar; + +// Thinly wraps std::mutex. +class LOCKABLE Mutex { + public: + Mutex() = default; + ~Mutex() = default; + + Mutex(const Mutex&) = delete; + Mutex& operator=(const Mutex&) = delete; + + void Lock() EXCLUSIVE_LOCK_FUNCTION() { mu_.lock(); } + void Unlock() UNLOCK_FUNCTION() { mu_.unlock(); } + void AssertHeld() ASSERT_EXCLUSIVE_LOCK() {} + + private: + friend class CondVar; + std::mutex mu_; +}; + +// Thinly wraps std::condition_variable. +class CondVar { + public: + explicit CondVar(Mutex* mu) : mu_(mu) { assert(mu != nullptr); } + ~CondVar() = default; + + CondVar(const CondVar&) = delete; + CondVar& operator=(const CondVar&) = delete; + + void Wait() { + std::unique_lock<std::mutex> lock(mu_->mu_, std::adopt_lock); + cv_.wait(lock); + lock.release(); + } + void Signal() { cv_.notify_one(); } + void SignalAll() { cv_.notify_all(); } + + private: + std::condition_variable cv_; + Mutex* const mu_; +}; + +inline bool Snappy_Compress(const char* input, size_t length, + std::string* output) { +#if HAVE_SNAPPY + output->resize(snappy::MaxCompressedLength(length)); + size_t outlen; + snappy::RawCompress(input, length, &(*output)[0], &outlen); + output->resize(outlen); + return true; +#else + // Silence compiler warnings about unused arguments. + (void)input; + (void)length; + (void)output; +#endif // HAVE_SNAPPY + + return false; +} + +inline bool Snappy_GetUncompressedLength(const char* input, size_t length, + size_t* result) { +#if HAVE_SNAPPY + return snappy::GetUncompressedLength(input, length, result); +#else + // Silence compiler warnings about unused arguments. + (void)input; + (void)length; + (void)result; + return false; +#endif // HAVE_SNAPPY +} + +inline bool Snappy_Uncompress(const char* input, size_t length, char* output) { +#if HAVE_SNAPPY + return snappy::RawUncompress(input, length, output); +#else + // Silence compiler warnings about unused arguments. + (void)input; + (void)length; + (void)output; + return false; +#endif // HAVE_SNAPPY +} + +inline bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg) { + // Silence compiler warnings about unused arguments. + (void)func; + (void)arg; + return false; +} + +inline uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size) { +#if HAVE_CRC32C + return ::crc32c::Extend(crc, reinterpret_cast<const uint8_t*>(buf), size); +#else + // Silence compiler warnings about unused arguments. + (void)crc; + (void)buf; + (void)size; + return 0; +#endif // HAVE_CRC32C +} + +} // namespace port +} // namespace leveldb + +#endif // STORAGE_LEVELDB_PORT_PORT_STDCXX_H_ diff --git a/src/leveldb/port/port_win.cc b/src/leveldb/port/port_win.cc deleted file mode 100644 index 1be9e8d5b0..0000000000 --- a/src/leveldb/port/port_win.cc +++ /dev/null @@ -1,158 +0,0 @@ -// LevelDB Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. -// -// See port_example.h for documentation for the following types/functions. - -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the University of California, Berkeley nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#include "port/port_win.h" - -#include <windows.h> -#include <cassert> -#include <intrin.h> - -namespace leveldb { -namespace port { - -Mutex::Mutex() : - cs_(NULL) { - assert(!cs_); - cs_ = static_cast<void *>(new CRITICAL_SECTION()); - ::InitializeCriticalSection(static_cast<CRITICAL_SECTION *>(cs_)); - assert(cs_); -} - -Mutex::~Mutex() { - assert(cs_); - ::DeleteCriticalSection(static_cast<CRITICAL_SECTION *>(cs_)); - delete static_cast<CRITICAL_SECTION *>(cs_); - cs_ = NULL; - assert(!cs_); -} - -void Mutex::Lock() { - assert(cs_); - ::EnterCriticalSection(static_cast<CRITICAL_SECTION *>(cs_)); -} - -void Mutex::Unlock() { - assert(cs_); - ::LeaveCriticalSection(static_cast<CRITICAL_SECTION *>(cs_)); -} - -void Mutex::AssertHeld() { - assert(cs_); - assert(1); -} - -CondVar::CondVar(Mutex* mu) : - waiting_(0), - mu_(mu), - sem1_(::CreateSemaphore(NULL, 0, 10000, NULL)), - sem2_(::CreateSemaphore(NULL, 0, 10000, NULL)) { - assert(mu_); -} - -CondVar::~CondVar() { - ::CloseHandle(sem1_); - ::CloseHandle(sem2_); -} - -void CondVar::Wait() { - mu_->AssertHeld(); - - wait_mtx_.Lock(); - ++waiting_; - wait_mtx_.Unlock(); - - mu_->Unlock(); - - // initiate handshake - ::WaitForSingleObject(sem1_, INFINITE); - ::ReleaseSemaphore(sem2_, 1, NULL); - mu_->Lock(); -} - -void CondVar::Signal() { - wait_mtx_.Lock(); - if (waiting_ > 0) { - --waiting_; - - // finalize handshake - ::ReleaseSemaphore(sem1_, 1, NULL); - ::WaitForSingleObject(sem2_, INFINITE); - } - wait_mtx_.Unlock(); -} - -void CondVar::SignalAll() { - wait_mtx_.Lock(); - ::ReleaseSemaphore(sem1_, waiting_, NULL); - while(waiting_ > 0) { - --waiting_; - ::WaitForSingleObject(sem2_, INFINITE); - } - wait_mtx_.Unlock(); -} - -AtomicPointer::AtomicPointer(void* v) { - Release_Store(v); -} - -void InitOnce(OnceType* once, void (*initializer)()) { - once->InitOnce(initializer); -} - -void* AtomicPointer::Acquire_Load() const { - void * p = NULL; - InterlockedExchangePointer(&p, rep_); - return p; -} - -void AtomicPointer::Release_Store(void* v) { - InterlockedExchangePointer(&rep_, v); -} - -void* AtomicPointer::NoBarrier_Load() const { - return rep_; -} - -void AtomicPointer::NoBarrier_Store(void* v) { - rep_ = v; -} - -bool HasAcceleratedCRC32C() { -#if defined(__x86_64__) || defined(__i386__) - int cpu_info[4]; - __cpuid(cpu_info, 1); - return (cpu_info[2] & (1 << 20)) != 0; -#else - return false; -#endif -} - -} -} diff --git a/src/leveldb/port/port_win.h b/src/leveldb/port/port_win.h deleted file mode 100644 index 989c15cd91..0000000000 --- a/src/leveldb/port/port_win.h +++ /dev/null @@ -1,184 +0,0 @@ -// LevelDB Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. -// -// See port_example.h for documentation for the following types/functions. - -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the University of California, Berkeley nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#ifndef STORAGE_LEVELDB_PORT_PORT_WIN_H_ -#define STORAGE_LEVELDB_PORT_PORT_WIN_H_ - -#ifdef _MSC_VER -#if !(_MSC_VER >= 1900) -#define snprintf _snprintf -#endif -#define close _close -#define fread_unlocked _fread_nolock -#ifdef _WIN64 -#define ssize_t int64_t -#else -#define ssize_t int32_t -#endif -#endif - -#include <string> -#include <stdint.h> -#ifdef SNAPPY -#include <snappy.h> -#endif - -namespace leveldb { -namespace port { - -// Windows is little endian (for now :p) -static const bool kLittleEndian = true; - -class CondVar; - -class Mutex { - public: - Mutex(); - ~Mutex(); - - void Lock(); - void Unlock(); - void AssertHeld(); - - private: - friend class CondVar; - // critical sections are more efficient than mutexes - // but they are not recursive and can only be used to synchronize threads within the same process - // we use opaque void * to avoid including windows.h in port_win.h - void * cs_; - - // No copying - Mutex(const Mutex&); - void operator=(const Mutex&); -}; - -// the Win32 API offers a dependable condition variable mechanism, but only starting with -// Windows 2008 and Vista -// no matter what we will implement our own condition variable with a semaphore -// implementation as described in a paper written by Andrew D. Birrell in 2003 -class CondVar { - public: - explicit CondVar(Mutex* mu); - ~CondVar(); - void Wait(); - void Signal(); - void SignalAll(); - private: - Mutex* mu_; - - Mutex wait_mtx_; - long waiting_; - - void * sem1_; - void * sem2_; - - -}; - -class OnceType { -public: -// OnceType() : init_(false) {} - OnceType(const OnceType &once) : init_(once.init_) {} - OnceType(bool f) : init_(f) {} - void InitOnce(void (*initializer)()) { - mutex_.Lock(); - if (!init_) { - init_ = true; - initializer(); - } - mutex_.Unlock(); - } - -private: - bool init_; - Mutex mutex_; -}; - -#define LEVELDB_ONCE_INIT false -extern void InitOnce(port::OnceType*, void (*initializer)()); - -// Storage for a lock-free pointer -class AtomicPointer { - private: - void * rep_; - public: - AtomicPointer() : rep_(NULL) { } - explicit AtomicPointer(void* v); - void* Acquire_Load() const; - - void Release_Store(void* v); - - void* NoBarrier_Load() const; - - void NoBarrier_Store(void* v); -}; - -inline bool Snappy_Compress(const char* input, size_t length, - ::std::string* output) { -#ifdef SNAPPY - output->resize(snappy::MaxCompressedLength(length)); - size_t outlen; - snappy::RawCompress(input, length, &(*output)[0], &outlen); - output->resize(outlen); - return true; -#endif - - return false; -} - -inline bool Snappy_GetUncompressedLength(const char* input, size_t length, - size_t* result) { -#ifdef SNAPPY - return snappy::GetUncompressedLength(input, length, result); -#else - return false; -#endif -} - -inline bool Snappy_Uncompress(const char* input, size_t length, - char* output) { -#ifdef SNAPPY - return snappy::RawUncompress(input, length, output); -#else - return false; -#endif -} - -inline bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg) { - return false; -} - -bool HasAcceleratedCRC32C(); -uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size); - -} -} - -#endif // STORAGE_LEVELDB_PORT_PORT_WIN_H_ diff --git a/src/leveldb/port/thread_annotations.h b/src/leveldb/port/thread_annotations.h index 9470ef587c..1547df908f 100644 --- a/src/leveldb/port/thread_annotations.h +++ b/src/leveldb/port/thread_annotations.h @@ -5,56 +5,104 @@ #ifndef STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_ #define STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_ -// Some environments provide custom macros to aid in static thread-safety -// analysis. Provide empty definitions of such macros unless they are already -// defined. +// Use Clang's thread safety analysis annotations when available. In other +// environments, the macros receive empty definitions. +// Usage documentation: https://clang.llvm.org/docs/ThreadSafetyAnalysis.html + +#if !defined(THREAD_ANNOTATION_ATTRIBUTE__) + +#if defined(__clang__) + +#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) +#else +#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op +#endif + +#endif // !defined(THREAD_ANNOTATION_ATTRIBUTE__) + +#ifndef GUARDED_BY +#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) +#endif + +#ifndef PT_GUARDED_BY +#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) +#endif + +#ifndef ACQUIRED_AFTER +#define ACQUIRED_AFTER(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) +#endif + +#ifndef ACQUIRED_BEFORE +#define ACQUIRED_BEFORE(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) +#endif #ifndef EXCLUSIVE_LOCKS_REQUIRED -#define EXCLUSIVE_LOCKS_REQUIRED(...) +#define EXCLUSIVE_LOCKS_REQUIRED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__)) #endif #ifndef SHARED_LOCKS_REQUIRED -#define SHARED_LOCKS_REQUIRED(...) +#define SHARED_LOCKS_REQUIRED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__)) #endif #ifndef LOCKS_EXCLUDED -#define LOCKS_EXCLUDED(...) +#define LOCKS_EXCLUDED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) #endif #ifndef LOCK_RETURNED -#define LOCK_RETURNED(x) +#define LOCK_RETURNED(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) #endif #ifndef LOCKABLE -#define LOCKABLE +#define LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(lockable) #endif #ifndef SCOPED_LOCKABLE -#define SCOPED_LOCKABLE +#define SCOPED_LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) #endif #ifndef EXCLUSIVE_LOCK_FUNCTION -#define EXCLUSIVE_LOCK_FUNCTION(...) +#define EXCLUSIVE_LOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__)) #endif #ifndef SHARED_LOCK_FUNCTION -#define SHARED_LOCK_FUNCTION(...) +#define SHARED_LOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__)) #endif #ifndef EXCLUSIVE_TRYLOCK_FUNCTION -#define EXCLUSIVE_TRYLOCK_FUNCTION(...) +#define EXCLUSIVE_TRYLOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__)) #endif #ifndef SHARED_TRYLOCK_FUNCTION -#define SHARED_TRYLOCK_FUNCTION(...) +#define SHARED_TRYLOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__)) #endif #ifndef UNLOCK_FUNCTION -#define UNLOCK_FUNCTION(...) +#define UNLOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__)) #endif #ifndef NO_THREAD_SAFETY_ANALYSIS -#define NO_THREAD_SAFETY_ANALYSIS +#define NO_THREAD_SAFETY_ANALYSIS \ + THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) +#endif + +#ifndef ASSERT_EXCLUSIVE_LOCK +#define ASSERT_EXCLUSIVE_LOCK(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__)) +#endif + +#ifndef ASSERT_SHARED_LOCK +#define ASSERT_SHARED_LOCK(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__)) #endif #endif // STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_ diff --git a/src/leveldb/port/win/stdint.h b/src/leveldb/port/win/stdint.h deleted file mode 100644 index 39edd0db13..0000000000 --- a/src/leveldb/port/win/stdint.h +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. - -// MSVC didn't ship with this file until the 2010 version. - -#ifndef STORAGE_LEVELDB_PORT_WIN_STDINT_H_ -#define STORAGE_LEVELDB_PORT_WIN_STDINT_H_ - -#if !defined(_MSC_VER) -#error This file should only be included when compiling with MSVC. -#endif - -// Define C99 equivalent types. -typedef signed char int8_t; -typedef signed short int16_t; -typedef signed int int32_t; -typedef signed long long int64_t; -typedef unsigned char uint8_t; -typedef unsigned short uint16_t; -typedef unsigned int uint32_t; -typedef unsigned long long uint64_t; - -#endif // STORAGE_LEVELDB_PORT_WIN_STDINT_H_ diff --git a/src/leveldb/table/block.cc b/src/leveldb/table/block.cc index 43e402c9c0..2fe89eaa45 100644 --- a/src/leveldb/table/block.cc +++ b/src/leveldb/table/block.cc @@ -6,8 +6,10 @@ #include "table/block.h" -#include <vector> #include <algorithm> +#include <cstdint> +#include <vector> + #include "leveldb/comparator.h" #include "table/format.h" #include "util/coding.h" @@ -27,7 +29,7 @@ Block::Block(const BlockContents& contents) if (size_ < sizeof(uint32_t)) { size_ = 0; // Error marker } else { - size_t max_restarts_allowed = (size_-sizeof(uint32_t)) / sizeof(uint32_t); + size_t max_restarts_allowed = (size_ - sizeof(uint32_t)) / sizeof(uint32_t); if (NumRestarts() > max_restarts_allowed) { // The size is too small for NumRestarts() size_ = 0; @@ -48,27 +50,26 @@ Block::~Block() { // and the length of the value in "*shared", "*non_shared", and // "*value_length", respectively. Will not dereference past "limit". // -// If any errors are detected, returns NULL. Otherwise, returns a +// If any errors are detected, returns nullptr. Otherwise, returns a // pointer to the key delta (just past the three decoded values). static inline const char* DecodeEntry(const char* p, const char* limit, - uint32_t* shared, - uint32_t* non_shared, + uint32_t* shared, uint32_t* non_shared, uint32_t* value_length) { - if (limit - p < 3) return NULL; - *shared = reinterpret_cast<const unsigned char*>(p)[0]; - *non_shared = reinterpret_cast<const unsigned char*>(p)[1]; - *value_length = reinterpret_cast<const unsigned char*>(p)[2]; + if (limit - p < 3) return nullptr; + *shared = reinterpret_cast<const uint8_t*>(p)[0]; + *non_shared = reinterpret_cast<const uint8_t*>(p)[1]; + *value_length = reinterpret_cast<const uint8_t*>(p)[2]; if ((*shared | *non_shared | *value_length) < 128) { // Fast path: all three values are encoded in one byte each p += 3; } else { - if ((p = GetVarint32Ptr(p, limit, shared)) == NULL) return NULL; - if ((p = GetVarint32Ptr(p, limit, non_shared)) == NULL) return NULL; - if ((p = GetVarint32Ptr(p, limit, value_length)) == NULL) return NULL; + if ((p = GetVarint32Ptr(p, limit, shared)) == nullptr) return nullptr; + if ((p = GetVarint32Ptr(p, limit, non_shared)) == nullptr) return nullptr; + if ((p = GetVarint32Ptr(p, limit, value_length)) == nullptr) return nullptr; } if (static_cast<uint32_t>(limit - p) < (*non_shared + *value_length)) { - return NULL; + return nullptr; } return p; } @@ -76,9 +77,9 @@ static inline const char* DecodeEntry(const char* p, const char* limit, class Block::Iter : public Iterator { private: const Comparator* const comparator_; - const char* const data_; // underlying block contents - uint32_t const restarts_; // Offset of restart array (list of fixed32) - uint32_t const num_restarts_; // Number of uint32_t entries in restart array + const char* const data_; // underlying block contents + uint32_t const restarts_; // Offset of restart array (list of fixed32) + uint32_t const num_restarts_; // Number of uint32_t entries in restart array // current_ is offset in data_ of current entry. >= restarts_ if !Valid uint32_t current_; @@ -112,9 +113,7 @@ class Block::Iter : public Iterator { } public: - Iter(const Comparator* comparator, - const char* data, - uint32_t restarts, + Iter(const Comparator* comparator, const char* data, uint32_t restarts, uint32_t num_restarts) : comparator_(comparator), data_(data), @@ -125,23 +124,23 @@ class Block::Iter : public Iterator { assert(num_restarts_ > 0); } - virtual bool Valid() const { return current_ < restarts_; } - virtual Status status() const { return status_; } - virtual Slice key() const { + bool Valid() const override { return current_ < restarts_; } + Status status() const override { return status_; } + Slice key() const override { assert(Valid()); return key_; } - virtual Slice value() const { + Slice value() const override { assert(Valid()); return value_; } - virtual void Next() { + void Next() override { assert(Valid()); ParseNextKey(); } - virtual void Prev() { + void Prev() override { assert(Valid()); // Scan backwards to a restart point before current_ @@ -162,7 +161,7 @@ class Block::Iter : public Iterator { } while (ParseNextKey() && NextEntryOffset() < original); } - virtual void Seek(const Slice& target) { + void Seek(const Slice& target) override { // Binary search in restart array to find the last restart point // with a key < target uint32_t left = 0; @@ -171,10 +170,10 @@ class Block::Iter : public Iterator { uint32_t mid = (left + right + 1) / 2; uint32_t region_offset = GetRestartPoint(mid); uint32_t shared, non_shared, value_length; - const char* key_ptr = DecodeEntry(data_ + region_offset, - data_ + restarts_, - &shared, &non_shared, &value_length); - if (key_ptr == NULL || (shared != 0)) { + const char* key_ptr = + DecodeEntry(data_ + region_offset, data_ + restarts_, &shared, + &non_shared, &value_length); + if (key_ptr == nullptr || (shared != 0)) { CorruptionError(); return; } @@ -202,12 +201,12 @@ class Block::Iter : public Iterator { } } - virtual void SeekToFirst() { + void SeekToFirst() override { SeekToRestartPoint(0); ParseNextKey(); } - virtual void SeekToLast() { + void SeekToLast() override { SeekToRestartPoint(num_restarts_ - 1); while (ParseNextKey() && NextEntryOffset() < restarts_) { // Keep skipping @@ -237,7 +236,7 @@ class Block::Iter : public Iterator { // Decode next entry uint32_t shared, non_shared, value_length; p = DecodeEntry(p, limit, &shared, &non_shared, &value_length); - if (p == NULL || key_.size() < shared) { + if (p == nullptr || key_.size() < shared) { CorruptionError(); return false; } else { @@ -253,7 +252,7 @@ class Block::Iter : public Iterator { } }; -Iterator* Block::NewIterator(const Comparator* cmp) { +Iterator* Block::NewIterator(const Comparator* comparator) { if (size_ < sizeof(uint32_t)) { return NewErrorIterator(Status::Corruption("bad block contents")); } @@ -261,7 +260,7 @@ Iterator* Block::NewIterator(const Comparator* cmp) { if (num_restarts == 0) { return NewEmptyIterator(); } else { - return new Iter(cmp, data_, restart_offset_, num_restarts); + return new Iter(comparator, data_, restart_offset_, num_restarts); } } diff --git a/src/leveldb/table/block.h b/src/leveldb/table/block.h index 2493eb9f9f..c8f1f7b436 100644 --- a/src/leveldb/table/block.h +++ b/src/leveldb/table/block.h @@ -7,6 +7,7 @@ #include <stddef.h> #include <stdint.h> + #include "leveldb/iterator.h" namespace leveldb { @@ -19,24 +20,23 @@ class Block { // Initialize the block with the specified contents. explicit Block(const BlockContents& contents); + Block(const Block&) = delete; + Block& operator=(const Block&) = delete; + ~Block(); size_t size() const { return size_; } Iterator* NewIterator(const Comparator* comparator); private: + class Iter; + uint32_t NumRestarts() const; const char* data_; size_t size_; - uint32_t restart_offset_; // Offset in data_ of restart array - bool owned_; // Block owns data_[] - - // No copying allowed - Block(const Block&); - void operator=(const Block&); - - class Iter; + uint32_t restart_offset_; // Offset in data_ of restart array + bool owned_; // Block owns data_[] }; } // namespace leveldb diff --git a/src/leveldb/table/block_builder.cc b/src/leveldb/table/block_builder.cc index db660cd07c..919cff5c93 100644 --- a/src/leveldb/table/block_builder.cc +++ b/src/leveldb/table/block_builder.cc @@ -28,36 +28,35 @@ #include "table/block_builder.h" -#include <algorithm> #include <assert.h> + +#include <algorithm> + #include "leveldb/comparator.h" -#include "leveldb/table_builder.h" +#include "leveldb/options.h" #include "util/coding.h" namespace leveldb { BlockBuilder::BlockBuilder(const Options* options) - : options_(options), - restarts_(), - counter_(0), - finished_(false) { + : options_(options), restarts_(), counter_(0), finished_(false) { assert(options->block_restart_interval >= 1); - restarts_.push_back(0); // First restart point is at offset 0 + restarts_.push_back(0); // First restart point is at offset 0 } void BlockBuilder::Reset() { buffer_.clear(); restarts_.clear(); - restarts_.push_back(0); // First restart point is at offset 0 + restarts_.push_back(0); // First restart point is at offset 0 counter_ = 0; finished_ = false; last_key_.clear(); } size_t BlockBuilder::CurrentSizeEstimate() const { - return (buffer_.size() + // Raw data buffer - restarts_.size() * sizeof(uint32_t) + // Restart array - sizeof(uint32_t)); // Restart array length + return (buffer_.size() + // Raw data buffer + restarts_.size() * sizeof(uint32_t) + // Restart array + sizeof(uint32_t)); // Restart array length } Slice BlockBuilder::Finish() { @@ -74,7 +73,7 @@ void BlockBuilder::Add(const Slice& key, const Slice& value) { Slice last_key_piece(last_key_); assert(!finished_); assert(counter_ <= options_->block_restart_interval); - assert(buffer_.empty() // No values yet? + assert(buffer_.empty() // No values yet? || options_->comparator->Compare(key, last_key_piece) > 0); size_t shared = 0; if (counter_ < options_->block_restart_interval) { diff --git a/src/leveldb/table/block_builder.h b/src/leveldb/table/block_builder.h index 4fbcb33972..f91f5e6d47 100644 --- a/src/leveldb/table/block_builder.h +++ b/src/leveldb/table/block_builder.h @@ -5,9 +5,10 @@ #ifndef STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_ #define STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_ +#include <stdint.h> + #include <vector> -#include <stdint.h> #include "leveldb/slice.h" namespace leveldb { @@ -18,6 +19,9 @@ class BlockBuilder { public: explicit BlockBuilder(const Options* options); + BlockBuilder(const BlockBuilder&) = delete; + BlockBuilder& operator=(const BlockBuilder&) = delete; + // Reset the contents as if the BlockBuilder was just constructed. void Reset(); @@ -35,21 +39,15 @@ class BlockBuilder { size_t CurrentSizeEstimate() const; // Return true iff no entries have been added since the last Reset() - bool empty() const { - return buffer_.empty(); - } + bool empty() const { return buffer_.empty(); } private: - const Options* options_; - std::string buffer_; // Destination buffer - std::vector<uint32_t> restarts_; // Restart points - int counter_; // Number of entries emitted since restart - bool finished_; // Has Finish() been called? - std::string last_key_; - - // No copying allowed - BlockBuilder(const BlockBuilder&); - void operator=(const BlockBuilder&); + const Options* options_; + std::string buffer_; // Destination buffer + std::vector<uint32_t> restarts_; // Restart points + int counter_; // Number of entries emitted since restart + bool finished_; // Has Finish() been called? + std::string last_key_; }; } // namespace leveldb diff --git a/src/leveldb/table/filter_block.cc b/src/leveldb/table/filter_block.cc index 1ed5134170..09ec0094bd 100644 --- a/src/leveldb/table/filter_block.cc +++ b/src/leveldb/table/filter_block.cc @@ -16,8 +16,7 @@ static const size_t kFilterBaseLg = 11; static const size_t kFilterBase = 1 << kFilterBaseLg; FilterBlockBuilder::FilterBlockBuilder(const FilterPolicy* policy) - : policy_(policy) { -} + : policy_(policy) {} void FilterBlockBuilder::StartBlock(uint64_t block_offset) { uint64_t filter_index = (block_offset / kFilterBase); @@ -62,7 +61,7 @@ void FilterBlockBuilder::GenerateFilter() { tmp_keys_.resize(num_keys); for (size_t i = 0; i < num_keys; i++) { const char* base = keys_.data() + start_[i]; - size_t length = start_[i+1] - start_[i]; + size_t length = start_[i + 1] - start_[i]; tmp_keys_[i] = Slice(base, length); } @@ -77,14 +76,10 @@ void FilterBlockBuilder::GenerateFilter() { FilterBlockReader::FilterBlockReader(const FilterPolicy* policy, const Slice& contents) - : policy_(policy), - data_(NULL), - offset_(NULL), - num_(0), - base_lg_(0) { + : policy_(policy), data_(nullptr), offset_(nullptr), num_(0), base_lg_(0) { size_t n = contents.size(); if (n < 5) return; // 1 byte for base_lg_ and 4 for start of offset array - base_lg_ = contents[n-1]; + base_lg_ = contents[n - 1]; uint32_t last_word = DecodeFixed32(contents.data() + n - 5); if (last_word > n - 5) return; data_ = contents.data(); @@ -95,8 +90,8 @@ FilterBlockReader::FilterBlockReader(const FilterPolicy* policy, bool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) { uint64_t index = block_offset >> base_lg_; if (index < num_) { - uint32_t start = DecodeFixed32(offset_ + index*4); - uint32_t limit = DecodeFixed32(offset_ + index*4 + 4); + uint32_t start = DecodeFixed32(offset_ + index * 4); + uint32_t limit = DecodeFixed32(offset_ + index * 4 + 4); if (start <= limit && limit <= static_cast<size_t>(offset_ - data_)) { Slice filter = Slice(data_ + start, limit - start); return policy_->KeyMayMatch(key, filter); @@ -108,4 +103,4 @@ bool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) { return true; // Errors are treated as potential matches } -} +} // namespace leveldb diff --git a/src/leveldb/table/filter_block.h b/src/leveldb/table/filter_block.h index c67d010bd1..73b5399249 100644 --- a/src/leveldb/table/filter_block.h +++ b/src/leveldb/table/filter_block.h @@ -11,8 +11,10 @@ #include <stddef.h> #include <stdint.h> + #include <string> #include <vector> + #include "leveldb/slice.h" #include "util/hash.h" @@ -30,6 +32,9 @@ class FilterBlockBuilder { public: explicit FilterBlockBuilder(const FilterPolicy*); + FilterBlockBuilder(const FilterBlockBuilder&) = delete; + FilterBlockBuilder& operator=(const FilterBlockBuilder&) = delete; + void StartBlock(uint64_t block_offset); void AddKey(const Slice& key); Slice Finish(); @@ -38,20 +43,16 @@ class FilterBlockBuilder { void GenerateFilter(); const FilterPolicy* policy_; - std::string keys_; // Flattened key contents - std::vector<size_t> start_; // Starting index in keys_ of each key - std::string result_; // Filter data computed so far - std::vector<Slice> tmp_keys_; // policy_->CreateFilter() argument + std::string keys_; // Flattened key contents + std::vector<size_t> start_; // Starting index in keys_ of each key + std::string result_; // Filter data computed so far + std::vector<Slice> tmp_keys_; // policy_->CreateFilter() argument std::vector<uint32_t> filter_offsets_; - - // No copying allowed - FilterBlockBuilder(const FilterBlockBuilder&); - void operator=(const FilterBlockBuilder&); }; class FilterBlockReader { public: - // REQUIRES: "contents" and *policy must stay live while *this is live. + // REQUIRES: "contents" and *policy must stay live while *this is live. FilterBlockReader(const FilterPolicy* policy, const Slice& contents); bool KeyMayMatch(uint64_t block_offset, const Slice& key); @@ -63,6 +64,6 @@ class FilterBlockReader { size_t base_lg_; // Encoding parameter (see kFilterBaseLg in .cc file) }; -} +} // namespace leveldb #endif // STORAGE_LEVELDB_TABLE_FILTER_BLOCK_H_ diff --git a/src/leveldb/table/filter_block_test.cc b/src/leveldb/table/filter_block_test.cc index 8c4a4741f2..8b33bbdd18 100644 --- a/src/leveldb/table/filter_block_test.cc +++ b/src/leveldb/table/filter_block_test.cc @@ -16,18 +16,16 @@ namespace leveldb { // For testing: emit an array with one hash value per key class TestHashFilter : public FilterPolicy { public: - virtual const char* Name() const { - return "TestHashFilter"; - } + const char* Name() const override { return "TestHashFilter"; } - virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const { + void CreateFilter(const Slice* keys, int n, std::string* dst) const override { for (int i = 0; i < n; i++) { uint32_t h = Hash(keys[i].data(), keys[i].size(), 1); PutFixed32(dst, h); } } - virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const { + bool KeyMayMatch(const Slice& key, const Slice& filter) const override { uint32_t h = Hash(key.data(), key.size(), 1); for (size_t i = 0; i + 4 <= filter.size(); i += 4) { if (h == DecodeFixed32(filter.data() + i)) { @@ -69,8 +67,8 @@ TEST(FilterBlockTest, SingleChunk) { ASSERT_TRUE(reader.KeyMayMatch(100, "box")); ASSERT_TRUE(reader.KeyMayMatch(100, "hello")); ASSERT_TRUE(reader.KeyMayMatch(100, "foo")); - ASSERT_TRUE(! reader.KeyMayMatch(100, "missing")); - ASSERT_TRUE(! reader.KeyMayMatch(100, "other")); + ASSERT_TRUE(!reader.KeyMayMatch(100, "missing")); + ASSERT_TRUE(!reader.KeyMayMatch(100, "other")); } TEST(FilterBlockTest, MultiChunk) { @@ -99,30 +97,28 @@ TEST(FilterBlockTest, MultiChunk) { // Check first filter ASSERT_TRUE(reader.KeyMayMatch(0, "foo")); ASSERT_TRUE(reader.KeyMayMatch(2000, "bar")); - ASSERT_TRUE(! reader.KeyMayMatch(0, "box")); - ASSERT_TRUE(! reader.KeyMayMatch(0, "hello")); + ASSERT_TRUE(!reader.KeyMayMatch(0, "box")); + ASSERT_TRUE(!reader.KeyMayMatch(0, "hello")); // Check second filter ASSERT_TRUE(reader.KeyMayMatch(3100, "box")); - ASSERT_TRUE(! reader.KeyMayMatch(3100, "foo")); - ASSERT_TRUE(! reader.KeyMayMatch(3100, "bar")); - ASSERT_TRUE(! reader.KeyMayMatch(3100, "hello")); + ASSERT_TRUE(!reader.KeyMayMatch(3100, "foo")); + ASSERT_TRUE(!reader.KeyMayMatch(3100, "bar")); + ASSERT_TRUE(!reader.KeyMayMatch(3100, "hello")); // Check third filter (empty) - ASSERT_TRUE(! reader.KeyMayMatch(4100, "foo")); - ASSERT_TRUE(! reader.KeyMayMatch(4100, "bar")); - ASSERT_TRUE(! reader.KeyMayMatch(4100, "box")); - ASSERT_TRUE(! reader.KeyMayMatch(4100, "hello")); + ASSERT_TRUE(!reader.KeyMayMatch(4100, "foo")); + ASSERT_TRUE(!reader.KeyMayMatch(4100, "bar")); + ASSERT_TRUE(!reader.KeyMayMatch(4100, "box")); + ASSERT_TRUE(!reader.KeyMayMatch(4100, "hello")); // Check last filter ASSERT_TRUE(reader.KeyMayMatch(9000, "box")); ASSERT_TRUE(reader.KeyMayMatch(9000, "hello")); - ASSERT_TRUE(! reader.KeyMayMatch(9000, "foo")); - ASSERT_TRUE(! reader.KeyMayMatch(9000, "bar")); + ASSERT_TRUE(!reader.KeyMayMatch(9000, "foo")); + ASSERT_TRUE(!reader.KeyMayMatch(9000, "bar")); } } // namespace leveldb -int main(int argc, char** argv) { - return leveldb::test::RunAllTests(); -} +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/table/format.cc b/src/leveldb/table/format.cc index 285e1c0de3..a3d67de2e4 100644 --- a/src/leveldb/table/format.cc +++ b/src/leveldb/table/format.cc @@ -21,8 +21,7 @@ void BlockHandle::EncodeTo(std::string* dst) const { } Status BlockHandle::DecodeFrom(Slice* input) { - if (GetVarint64(input, &offset_) && - GetVarint64(input, &size_)) { + if (GetVarint64(input, &offset_) && GetVarint64(input, &size_)) { return Status::OK(); } else { return Status::Corruption("bad block handle"); @@ -62,10 +61,8 @@ Status Footer::DecodeFrom(Slice* input) { return result; } -Status ReadBlock(RandomAccessFile* file, - const ReadOptions& options, - const BlockHandle& handle, - BlockContents* result) { +Status ReadBlock(RandomAccessFile* file, const ReadOptions& options, + const BlockHandle& handle, BlockContents* result) { result->data = Slice(); result->cachable = false; result->heap_allocated = false; @@ -86,7 +83,7 @@ Status ReadBlock(RandomAccessFile* file, } // Check the crc of the type and the block contents - const char* data = contents.data(); // Pointer to where Read put the data + const char* data = contents.data(); // Pointer to where Read put the data if (options.verify_checksums) { const uint32_t crc = crc32c::Unmask(DecodeFixed32(data + n + 1)); const uint32_t actual = crc32c::Value(data, n + 1); diff --git a/src/leveldb/table/format.h b/src/leveldb/table/format.h index 6c0b80c017..e49dfdc047 100644 --- a/src/leveldb/table/format.h +++ b/src/leveldb/table/format.h @@ -5,8 +5,10 @@ #ifndef STORAGE_LEVELDB_TABLE_FORMAT_H_ #define STORAGE_LEVELDB_TABLE_FORMAT_H_ -#include <string> #include <stdint.h> + +#include <string> + #include "leveldb/slice.h" #include "leveldb/status.h" #include "leveldb/table_builder.h" @@ -21,6 +23,9 @@ struct ReadOptions; // block or a meta block. class BlockHandle { public: + // Maximum encoding length of a BlockHandle + enum { kMaxEncodedLength = 10 + 10 }; + BlockHandle(); // The offset of the block in the file. @@ -34,9 +39,6 @@ class BlockHandle { void EncodeTo(std::string* dst) const; Status DecodeFrom(Slice* input); - // Maximum encoding length of a BlockHandle - enum { kMaxEncodedLength = 10 + 10 }; - private: uint64_t offset_; uint64_t size_; @@ -46,30 +48,24 @@ class BlockHandle { // end of every table file. class Footer { public: - Footer() { } + // Encoded length of a Footer. Note that the serialization of a + // Footer will always occupy exactly this many bytes. It consists + // of two block handles and a magic number. + enum { kEncodedLength = 2 * BlockHandle::kMaxEncodedLength + 8 }; + + Footer() = default; // The block handle for the metaindex block of the table const BlockHandle& metaindex_handle() const { return metaindex_handle_; } void set_metaindex_handle(const BlockHandle& h) { metaindex_handle_ = h; } // The block handle for the index block of the table - const BlockHandle& index_handle() const { - return index_handle_; - } - void set_index_handle(const BlockHandle& h) { - index_handle_ = h; - } + const BlockHandle& index_handle() const { return index_handle_; } + void set_index_handle(const BlockHandle& h) { index_handle_ = h; } void EncodeTo(std::string* dst) const; Status DecodeFrom(Slice* input); - // Encoded length of a Footer. Note that the serialization of a - // Footer will always occupy exactly this many bytes. It consists - // of two block handles and a magic number. - enum { - kEncodedLength = 2*BlockHandle::kMaxEncodedLength + 8 - }; - private: BlockHandle metaindex_handle_; BlockHandle index_handle_; @@ -91,17 +87,13 @@ struct BlockContents { // Read the block identified by "handle" from "file". On failure // return non-OK. On success fill *result and return OK. -extern Status ReadBlock(RandomAccessFile* file, - const ReadOptions& options, - const BlockHandle& handle, - BlockContents* result); +Status ReadBlock(RandomAccessFile* file, const ReadOptions& options, + const BlockHandle& handle, BlockContents* result); // Implementation details follow. Clients should ignore, inline BlockHandle::BlockHandle() - : offset_(~static_cast<uint64_t>(0)), - size_(~static_cast<uint64_t>(0)) { -} + : offset_(~static_cast<uint64_t>(0)), size_(~static_cast<uint64_t>(0)) {} } // namespace leveldb diff --git a/src/leveldb/table/iterator.cc b/src/leveldb/table/iterator.cc index 3d1c87fdec..dfef083d4d 100644 --- a/src/leveldb/table/iterator.cc +++ b/src/leveldb/table/iterator.cc @@ -7,58 +7,67 @@ namespace leveldb { Iterator::Iterator() { - cleanup_.function = NULL; - cleanup_.next = NULL; + cleanup_head_.function = nullptr; + cleanup_head_.next = nullptr; } Iterator::~Iterator() { - if (cleanup_.function != NULL) { - (*cleanup_.function)(cleanup_.arg1, cleanup_.arg2); - for (Cleanup* c = cleanup_.next; c != NULL; ) { - (*c->function)(c->arg1, c->arg2); - Cleanup* next = c->next; - delete c; - c = next; + if (!cleanup_head_.IsEmpty()) { + cleanup_head_.Run(); + for (CleanupNode* node = cleanup_head_.next; node != nullptr;) { + node->Run(); + CleanupNode* next_node = node->next; + delete node; + node = next_node; } } } void Iterator::RegisterCleanup(CleanupFunction func, void* arg1, void* arg2) { - assert(func != NULL); - Cleanup* c; - if (cleanup_.function == NULL) { - c = &cleanup_; + assert(func != nullptr); + CleanupNode* node; + if (cleanup_head_.IsEmpty()) { + node = &cleanup_head_; } else { - c = new Cleanup; - c->next = cleanup_.next; - cleanup_.next = c; + node = new CleanupNode(); + node->next = cleanup_head_.next; + cleanup_head_.next = node; } - c->function = func; - c->arg1 = arg1; - c->arg2 = arg2; + node->function = func; + node->arg1 = arg1; + node->arg2 = arg2; } namespace { + class EmptyIterator : public Iterator { public: - EmptyIterator(const Status& s) : status_(s) { } - virtual bool Valid() const { return false; } - virtual void Seek(const Slice& target) { } - virtual void SeekToFirst() { } - virtual void SeekToLast() { } - virtual void Next() { assert(false); } - virtual void Prev() { assert(false); } - Slice key() const { assert(false); return Slice(); } - Slice value() const { assert(false); return Slice(); } - virtual Status status() const { return status_; } + EmptyIterator(const Status& s) : status_(s) {} + ~EmptyIterator() override = default; + + bool Valid() const override { return false; } + void Seek(const Slice& target) override {} + void SeekToFirst() override {} + void SeekToLast() override {} + void Next() override { assert(false); } + void Prev() override { assert(false); } + Slice key() const override { + assert(false); + return Slice(); + } + Slice value() const override { + assert(false); + return Slice(); + } + Status status() const override { return status_; } + private: Status status_; }; -} // namespace -Iterator* NewEmptyIterator() { - return new EmptyIterator(Status::OK()); -} +} // anonymous namespace + +Iterator* NewEmptyIterator() { return new EmptyIterator(Status::OK()); } Iterator* NewErrorIterator(const Status& status) { return new EmptyIterator(status); diff --git a/src/leveldb/table/iterator_wrapper.h b/src/leveldb/table/iterator_wrapper.h index f410c3fabe..c230572529 100644 --- a/src/leveldb/table/iterator_wrapper.h +++ b/src/leveldb/table/iterator_wrapper.h @@ -16,10 +16,8 @@ namespace leveldb { // cache locality. class IteratorWrapper { public: - IteratorWrapper(): iter_(NULL), valid_(false) { } - explicit IteratorWrapper(Iterator* iter): iter_(NULL) { - Set(iter); - } + IteratorWrapper() : iter_(nullptr), valid_(false) {} + explicit IteratorWrapper(Iterator* iter) : iter_(nullptr) { Set(iter); } ~IteratorWrapper() { delete iter_; } Iterator* iter() const { return iter_; } @@ -28,25 +26,53 @@ class IteratorWrapper { void Set(Iterator* iter) { delete iter_; iter_ = iter; - if (iter_ == NULL) { + if (iter_ == nullptr) { valid_ = false; } else { Update(); } } - // Iterator interface methods - bool Valid() const { return valid_; } - Slice key() const { assert(Valid()); return key_; } - Slice value() const { assert(Valid()); return iter_->value(); } - // Methods below require iter() != NULL - Status status() const { assert(iter_); return iter_->status(); } - void Next() { assert(iter_); iter_->Next(); Update(); } - void Prev() { assert(iter_); iter_->Prev(); Update(); } - void Seek(const Slice& k) { assert(iter_); iter_->Seek(k); Update(); } - void SeekToFirst() { assert(iter_); iter_->SeekToFirst(); Update(); } - void SeekToLast() { assert(iter_); iter_->SeekToLast(); Update(); } + bool Valid() const { return valid_; } + Slice key() const { + assert(Valid()); + return key_; + } + Slice value() const { + assert(Valid()); + return iter_->value(); + } + // Methods below require iter() != nullptr + Status status() const { + assert(iter_); + return iter_->status(); + } + void Next() { + assert(iter_); + iter_->Next(); + Update(); + } + void Prev() { + assert(iter_); + iter_->Prev(); + Update(); + } + void Seek(const Slice& k) { + assert(iter_); + iter_->Seek(k); + Update(); + } + void SeekToFirst() { + assert(iter_); + iter_->SeekToFirst(); + Update(); + } + void SeekToLast() { + assert(iter_); + iter_->SeekToLast(); + Update(); + } private: void Update() { diff --git a/src/leveldb/table/merger.cc b/src/leveldb/table/merger.cc index 2dde4dc21f..76441b1cc2 100644 --- a/src/leveldb/table/merger.cc +++ b/src/leveldb/table/merger.cc @@ -17,22 +17,18 @@ class MergingIterator : public Iterator { : comparator_(comparator), children_(new IteratorWrapper[n]), n_(n), - current_(NULL), + current_(nullptr), direction_(kForward) { for (int i = 0; i < n; i++) { children_[i].Set(children[i]); } } - virtual ~MergingIterator() { - delete[] children_; - } + ~MergingIterator() override { delete[] children_; } - virtual bool Valid() const { - return (current_ != NULL); - } + bool Valid() const override { return (current_ != nullptr); } - virtual void SeekToFirst() { + void SeekToFirst() override { for (int i = 0; i < n_; i++) { children_[i].SeekToFirst(); } @@ -40,7 +36,7 @@ class MergingIterator : public Iterator { direction_ = kForward; } - virtual void SeekToLast() { + void SeekToLast() override { for (int i = 0; i < n_; i++) { children_[i].SeekToLast(); } @@ -48,7 +44,7 @@ class MergingIterator : public Iterator { direction_ = kReverse; } - virtual void Seek(const Slice& target) { + void Seek(const Slice& target) override { for (int i = 0; i < n_; i++) { children_[i].Seek(target); } @@ -56,7 +52,7 @@ class MergingIterator : public Iterator { direction_ = kForward; } - virtual void Next() { + void Next() override { assert(Valid()); // Ensure that all children are positioned after key(). @@ -82,7 +78,7 @@ class MergingIterator : public Iterator { FindSmallest(); } - virtual void Prev() { + void Prev() override { assert(Valid()); // Ensure that all children are positioned before key(). @@ -111,17 +107,17 @@ class MergingIterator : public Iterator { FindLargest(); } - virtual Slice key() const { + Slice key() const override { assert(Valid()); return current_->key(); } - virtual Slice value() const { + Slice value() const override { assert(Valid()); return current_->value(); } - virtual Status status() const { + Status status() const override { Status status; for (int i = 0; i < n_; i++) { status = children_[i].status(); @@ -133,6 +129,9 @@ class MergingIterator : public Iterator { } private: + // Which direction is the iterator moving? + enum Direction { kForward, kReverse }; + void FindSmallest(); void FindLargest(); @@ -143,21 +142,15 @@ class MergingIterator : public Iterator { IteratorWrapper* children_; int n_; IteratorWrapper* current_; - - // Which direction is the iterator moving? - enum Direction { - kForward, - kReverse - }; Direction direction_; }; void MergingIterator::FindSmallest() { - IteratorWrapper* smallest = NULL; + IteratorWrapper* smallest = nullptr; for (int i = 0; i < n_; i++) { IteratorWrapper* child = &children_[i]; if (child->Valid()) { - if (smallest == NULL) { + if (smallest == nullptr) { smallest = child; } else if (comparator_->Compare(child->key(), smallest->key()) < 0) { smallest = child; @@ -168,11 +161,11 @@ void MergingIterator::FindSmallest() { } void MergingIterator::FindLargest() { - IteratorWrapper* largest = NULL; - for (int i = n_-1; i >= 0; i--) { + IteratorWrapper* largest = nullptr; + for (int i = n_ - 1; i >= 0; i--) { IteratorWrapper* child = &children_[i]; if (child->Valid()) { - if (largest == NULL) { + if (largest == nullptr) { largest = child; } else if (comparator_->Compare(child->key(), largest->key()) > 0) { largest = child; @@ -183,14 +176,15 @@ void MergingIterator::FindLargest() { } } // namespace -Iterator* NewMergingIterator(const Comparator* cmp, Iterator** list, int n) { +Iterator* NewMergingIterator(const Comparator* comparator, Iterator** children, + int n) { assert(n >= 0); if (n == 0) { return NewEmptyIterator(); } else if (n == 1) { - return list[0]; + return children[0]; } else { - return new MergingIterator(cmp, list, n); + return new MergingIterator(comparator, children, n); } } diff --git a/src/leveldb/table/merger.h b/src/leveldb/table/merger.h index 91ddd80faa..41cedc5254 100644 --- a/src/leveldb/table/merger.h +++ b/src/leveldb/table/merger.h @@ -18,8 +18,8 @@ class Iterator; // key is present in K child iterators, it will be yielded K times. // // REQUIRES: n >= 0 -extern Iterator* NewMergingIterator( - const Comparator* comparator, Iterator** children, int n); +Iterator* NewMergingIterator(const Comparator* comparator, Iterator** children, + int n); } // namespace leveldb diff --git a/src/leveldb/table/table.cc b/src/leveldb/table/table.cc index decf8082cc..b07bc88c7e 100644 --- a/src/leveldb/table/table.cc +++ b/src/leveldb/table/table.cc @@ -20,7 +20,7 @@ namespace leveldb { struct Table::Rep { ~Rep() { delete filter; - delete [] filter_data; + delete[] filter_data; delete index_block; } @@ -35,11 +35,9 @@ struct Table::Rep { Block* index_block; }; -Status Table::Open(const Options& options, - RandomAccessFile* file, - uint64_t size, - Table** table) { - *table = NULL; +Status Table::Open(const Options& options, RandomAccessFile* file, + uint64_t size, Table** table) { + *table = nullptr; if (size < Footer::kEncodedLength) { return Status::Corruption("file is too short to be an sstable"); } @@ -55,41 +53,36 @@ Status Table::Open(const Options& options, if (!s.ok()) return s; // Read the index block - BlockContents contents; - Block* index_block = NULL; + BlockContents index_block_contents; if (s.ok()) { ReadOptions opt; if (options.paranoid_checks) { opt.verify_checksums = true; } - s = ReadBlock(file, opt, footer.index_handle(), &contents); - if (s.ok()) { - index_block = new Block(contents); - } + s = ReadBlock(file, opt, footer.index_handle(), &index_block_contents); } if (s.ok()) { // We've successfully read the footer and the index block: we're // ready to serve requests. + Block* index_block = new Block(index_block_contents); Rep* rep = new Table::Rep; rep->options = options; rep->file = file; rep->metaindex_handle = footer.metaindex_handle(); rep->index_block = index_block; rep->cache_id = (options.block_cache ? options.block_cache->NewId() : 0); - rep->filter_data = NULL; - rep->filter = NULL; + rep->filter_data = nullptr; + rep->filter = nullptr; *table = new Table(rep); (*table)->ReadMeta(footer); - } else { - delete index_block; } return s; } void Table::ReadMeta(const Footer& footer) { - if (rep_->options.filter_policy == NULL) { + if (rep_->options.filter_policy == nullptr) { return; // Do not need any metadata } @@ -135,14 +128,12 @@ void Table::ReadFilter(const Slice& filter_handle_value) { return; } if (block.heap_allocated) { - rep_->filter_data = block.data.data(); // Will need to delete later + rep_->filter_data = block.data.data(); // Will need to delete later } rep_->filter = new FilterBlockReader(rep_->options.filter_policy, block.data); } -Table::~Table() { - delete rep_; -} +Table::~Table() { delete rep_; } static void DeleteBlock(void* arg, void* ignored) { delete reinterpret_cast<Block*>(arg); @@ -161,13 +152,12 @@ static void ReleaseBlock(void* arg, void* h) { // Convert an index iterator value (i.e., an encoded BlockHandle) // into an iterator over the contents of the corresponding block. -Iterator* Table::BlockReader(void* arg, - const ReadOptions& options, +Iterator* Table::BlockReader(void* arg, const ReadOptions& options, const Slice& index_value) { Table* table = reinterpret_cast<Table*>(arg); Cache* block_cache = table->rep_->options.block_cache; - Block* block = NULL; - Cache::Handle* cache_handle = NULL; + Block* block = nullptr; + Cache::Handle* cache_handle = nullptr; BlockHandle handle; Slice input = index_value; @@ -177,21 +167,21 @@ Iterator* Table::BlockReader(void* arg, if (s.ok()) { BlockContents contents; - if (block_cache != NULL) { + if (block_cache != nullptr) { char cache_key_buffer[16]; EncodeFixed64(cache_key_buffer, table->rep_->cache_id); - EncodeFixed64(cache_key_buffer+8, handle.offset()); + EncodeFixed64(cache_key_buffer + 8, handle.offset()); Slice key(cache_key_buffer, sizeof(cache_key_buffer)); cache_handle = block_cache->Lookup(key); - if (cache_handle != NULL) { + if (cache_handle != nullptr) { block = reinterpret_cast<Block*>(block_cache->Value(cache_handle)); } else { s = ReadBlock(table->rep_->file, options, handle, &contents); if (s.ok()) { block = new Block(contents); if (contents.cachable && options.fill_cache) { - cache_handle = block_cache->Insert( - key, block, block->size(), &DeleteCachedBlock); + cache_handle = block_cache->Insert(key, block, block->size(), + &DeleteCachedBlock); } } } @@ -204,10 +194,10 @@ Iterator* Table::BlockReader(void* arg, } Iterator* iter; - if (block != NULL) { + if (block != nullptr) { iter = block->NewIterator(table->rep_->options.comparator); - if (cache_handle == NULL) { - iter->RegisterCleanup(&DeleteBlock, block, NULL); + if (cache_handle == nullptr) { + iter->RegisterCleanup(&DeleteBlock, block, nullptr); } else { iter->RegisterCleanup(&ReleaseBlock, block_cache, cache_handle); } @@ -223,9 +213,9 @@ Iterator* Table::NewIterator(const ReadOptions& options) const { &Table::BlockReader, const_cast<Table*>(this), options); } -Status Table::InternalGet(const ReadOptions& options, const Slice& k, - void* arg, - void (*saver)(void*, const Slice&, const Slice&)) { +Status Table::InternalGet(const ReadOptions& options, const Slice& k, void* arg, + void (*handle_result)(void*, const Slice&, + const Slice&)) { Status s; Iterator* iiter = rep_->index_block->NewIterator(rep_->options.comparator); iiter->Seek(k); @@ -233,15 +223,14 @@ Status Table::InternalGet(const ReadOptions& options, const Slice& k, Slice handle_value = iiter->value(); FilterBlockReader* filter = rep_->filter; BlockHandle handle; - if (filter != NULL && - handle.DecodeFrom(&handle_value).ok() && + if (filter != nullptr && handle.DecodeFrom(&handle_value).ok() && !filter->KeyMayMatch(handle.offset(), k)) { // Not found } else { Iterator* block_iter = BlockReader(this, options, iiter->value()); block_iter->Seek(k); if (block_iter->Valid()) { - (*saver)(arg, block_iter->key(), block_iter->value()); + (*handle_result)(arg, block_iter->key(), block_iter->value()); } s = block_iter->status(); delete block_iter; @@ -254,7 +243,6 @@ Status Table::InternalGet(const ReadOptions& options, const Slice& k, return s; } - uint64_t Table::ApproximateOffsetOf(const Slice& key) const { Iterator* index_iter = rep_->index_block->NewIterator(rep_->options.comparator); diff --git a/src/leveldb/table/table_builder.cc b/src/leveldb/table/table_builder.cc index 62002c84f2..278febf94f 100644 --- a/src/leveldb/table/table_builder.cc +++ b/src/leveldb/table/table_builder.cc @@ -5,6 +5,7 @@ #include "leveldb/table_builder.h" #include <assert.h> + #include "leveldb/comparator.h" #include "leveldb/env.h" #include "leveldb/filter_policy.h" @@ -18,6 +19,22 @@ namespace leveldb { struct TableBuilder::Rep { + Rep(const Options& opt, WritableFile* f) + : options(opt), + index_block_options(opt), + file(f), + offset(0), + data_block(&options), + index_block(&index_block_options), + num_entries(0), + closed(false), + filter_block(opt.filter_policy == nullptr + ? nullptr + : new FilterBlockBuilder(opt.filter_policy)), + pending_index_entry(false) { + index_block_options.block_restart_interval = 1; + } + Options options; Options index_block_options; WritableFile* file; @@ -27,7 +44,7 @@ struct TableBuilder::Rep { BlockBuilder index_block; std::string last_key; int64_t num_entries; - bool closed; // Either Finish() or Abandon() has been called. + bool closed; // Either Finish() or Abandon() has been called. FilterBlockBuilder* filter_block; // We do not emit the index entry for a block until we have seen the @@ -43,26 +60,11 @@ struct TableBuilder::Rep { BlockHandle pending_handle; // Handle to add to index block std::string compressed_output; - - Rep(const Options& opt, WritableFile* f) - : options(opt), - index_block_options(opt), - file(f), - offset(0), - data_block(&options), - index_block(&index_block_options), - num_entries(0), - closed(false), - filter_block(opt.filter_policy == NULL ? NULL - : new FilterBlockBuilder(opt.filter_policy)), - pending_index_entry(false) { - index_block_options.block_restart_interval = 1; - } }; TableBuilder::TableBuilder(const Options& options, WritableFile* file) : rep_(new Rep(options, file)) { - if (rep_->filter_block != NULL) { + if (rep_->filter_block != nullptr) { rep_->filter_block->StartBlock(0); } } @@ -106,7 +108,7 @@ void TableBuilder::Add(const Slice& key, const Slice& value) { r->pending_index_entry = false; } - if (r->filter_block != NULL) { + if (r->filter_block != nullptr) { r->filter_block->AddKey(key); } @@ -131,7 +133,7 @@ void TableBuilder::Flush() { r->pending_index_entry = true; r->status = r->file->Flush(); } - if (r->filter_block != NULL) { + if (r->filter_block != nullptr) { r->filter_block->StartBlock(r->offset); } } @@ -173,8 +175,7 @@ void TableBuilder::WriteBlock(BlockBuilder* block, BlockHandle* handle) { } void TableBuilder::WriteRawBlock(const Slice& block_contents, - CompressionType type, - BlockHandle* handle) { + CompressionType type, BlockHandle* handle) { Rep* r = rep_; handle->set_offset(r->offset); handle->set_size(block_contents.size()); @@ -184,7 +185,7 @@ void TableBuilder::WriteRawBlock(const Slice& block_contents, trailer[0] = type; uint32_t crc = crc32c::Value(block_contents.data(), block_contents.size()); crc = crc32c::Extend(crc, trailer, 1); // Extend crc to cover block type - EncodeFixed32(trailer+1, crc32c::Mask(crc)); + EncodeFixed32(trailer + 1, crc32c::Mask(crc)); r->status = r->file->Append(Slice(trailer, kBlockTrailerSize)); if (r->status.ok()) { r->offset += block_contents.size() + kBlockTrailerSize; @@ -192,9 +193,7 @@ void TableBuilder::WriteRawBlock(const Slice& block_contents, } } -Status TableBuilder::status() const { - return rep_->status; -} +Status TableBuilder::status() const { return rep_->status; } Status TableBuilder::Finish() { Rep* r = rep_; @@ -205,7 +204,7 @@ Status TableBuilder::Finish() { BlockHandle filter_block_handle, metaindex_block_handle, index_block_handle; // Write filter block - if (ok() && r->filter_block != NULL) { + if (ok() && r->filter_block != nullptr) { WriteRawBlock(r->filter_block->Finish(), kNoCompression, &filter_block_handle); } @@ -213,7 +212,7 @@ Status TableBuilder::Finish() { // Write metaindex block if (ok()) { BlockBuilder meta_index_block(&r->options); - if (r->filter_block != NULL) { + if (r->filter_block != nullptr) { // Add mapping from "filter.Name" to location of filter data std::string key = "filter."; key.append(r->options.filter_policy->Name()); @@ -259,12 +258,8 @@ void TableBuilder::Abandon() { r->closed = true; } -uint64_t TableBuilder::NumEntries() const { - return rep_->num_entries; -} +uint64_t TableBuilder::NumEntries() const { return rep_->num_entries; } -uint64_t TableBuilder::FileSize() const { - return rep_->offset; -} +uint64_t TableBuilder::FileSize() const { return rep_->offset; } } // namespace leveldb diff --git a/src/leveldb/table/table_test.cc b/src/leveldb/table/table_test.cc index abf6e246ff..17aaea2f9e 100644 --- a/src/leveldb/table/table_test.cc +++ b/src/leveldb/table/table_test.cc @@ -6,6 +6,7 @@ #include <map> #include <string> + #include "db/dbformat.h" #include "db/memtable.h" #include "db/write_batch_internal.h" @@ -27,8 +28,8 @@ namespace leveldb { static std::string Reverse(const Slice& key) { std::string str(key.ToString()); std::string rev(""); - for (std::string::reverse_iterator rit = str.rbegin(); - rit != str.rend(); ++rit) { + for (std::string::reverse_iterator rit = str.rbegin(); rit != str.rend(); + ++rit) { rev.push_back(*rit); } return rev; @@ -37,24 +38,23 @@ static std::string Reverse(const Slice& key) { namespace { class ReverseKeyComparator : public Comparator { public: - virtual const char* Name() const { + const char* Name() const override { return "leveldb.ReverseBytewiseComparator"; } - virtual int Compare(const Slice& a, const Slice& b) const { + int Compare(const Slice& a, const Slice& b) const override { return BytewiseComparator()->Compare(Reverse(a), Reverse(b)); } - virtual void FindShortestSeparator( - std::string* start, - const Slice& limit) const { + void FindShortestSeparator(std::string* start, + const Slice& limit) const override { std::string s = Reverse(*start); std::string l = Reverse(limit); BytewiseComparator()->FindShortestSeparator(&s, l); *start = Reverse(s); } - virtual void FindShortSuccessor(std::string* key) const { + void FindShortSuccessor(std::string* key) const override { std::string s = Reverse(*key); BytewiseComparator()->FindShortSuccessor(&s); *key = Reverse(s); @@ -79,47 +79,46 @@ namespace { struct STLLessThan { const Comparator* cmp; - STLLessThan() : cmp(BytewiseComparator()) { } - STLLessThan(const Comparator* c) : cmp(c) { } + STLLessThan() : cmp(BytewiseComparator()) {} + STLLessThan(const Comparator* c) : cmp(c) {} bool operator()(const std::string& a, const std::string& b) const { return cmp->Compare(Slice(a), Slice(b)) < 0; } }; } // namespace -class StringSink: public WritableFile { +class StringSink : public WritableFile { public: - ~StringSink() { } + ~StringSink() override = default; const std::string& contents() const { return contents_; } - virtual Status Close() { return Status::OK(); } - virtual Status Flush() { return Status::OK(); } - virtual Status Sync() { return Status::OK(); } + Status Close() override { return Status::OK(); } + Status Flush() override { return Status::OK(); } + Status Sync() override { return Status::OK(); } - virtual Status Append(const Slice& data) { + Status Append(const Slice& data) override { contents_.append(data.data(), data.size()); return Status::OK(); } + std::string GetName() const override { return ""; } private: std::string contents_; }; - -class StringSource: public RandomAccessFile { +class StringSource : public RandomAccessFile { public: StringSource(const Slice& contents) - : contents_(contents.data(), contents.size()) { - } + : contents_(contents.data(), contents.size()) {} - virtual ~StringSource() { } + ~StringSource() override = default; uint64_t Size() const { return contents_.size(); } - virtual Status Read(uint64_t offset, size_t n, Slice* result, - char* scratch) const { - if (offset > contents_.size()) { + Status Read(uint64_t offset, size_t n, Slice* result, + char* scratch) const override { + if (offset >= contents_.size()) { return Status::InvalidArgument("invalid Read offset"); } if (offset + n > contents_.size()) { @@ -130,6 +129,7 @@ class StringSource: public RandomAccessFile { return Status::OK(); } + std::string GetName() const { return ""; } private: std::string contents_; }; @@ -140,8 +140,8 @@ typedef std::map<std::string, std::string, STLLessThan> KVMap; // BlockBuilder/TableBuilder and Block/Table. class Constructor { public: - explicit Constructor(const Comparator* cmp) : data_(STLLessThan(cmp)) { } - virtual ~Constructor() { } + explicit Constructor(const Comparator* cmp) : data_(STLLessThan(cmp)) {} + virtual ~Constructor() = default; void Add(const std::string& key, const Slice& value) { data_[key] = value.ToString(); @@ -150,15 +150,12 @@ class Constructor { // Finish constructing the data structure with all the keys that have // been added so far. Returns the keys in sorted order in "*keys" // and stores the key/value pairs in "*kvmap" - void Finish(const Options& options, - std::vector<std::string>* keys, + void Finish(const Options& options, std::vector<std::string>* keys, KVMap* kvmap) { *kvmap = data_; keys->clear(); - for (KVMap::const_iterator it = data_.begin(); - it != data_.end(); - ++it) { - keys->push_back(it->first); + for (const auto& kvp : data_) { + keys->push_back(kvp.first); } data_.clear(); Status s = FinishImpl(options, *kvmap); @@ -170,32 +167,26 @@ class Constructor { virtual Iterator* NewIterator() const = 0; - virtual const KVMap& data() { return data_; } + const KVMap& data() const { return data_; } - virtual DB* db() const { return NULL; } // Overridden in DBConstructor + virtual DB* db() const { return nullptr; } // Overridden in DBConstructor private: KVMap data_; }; -class BlockConstructor: public Constructor { +class BlockConstructor : public Constructor { public: explicit BlockConstructor(const Comparator* cmp) - : Constructor(cmp), - comparator_(cmp), - block_(NULL) { } - ~BlockConstructor() { + : Constructor(cmp), comparator_(cmp), block_(nullptr) {} + ~BlockConstructor() override { delete block_; } + Status FinishImpl(const Options& options, const KVMap& data) override { delete block_; - } - virtual Status FinishImpl(const Options& options, const KVMap& data) { - delete block_; - block_ = NULL; + block_ = nullptr; BlockBuilder builder(&options); - for (KVMap::const_iterator it = data.begin(); - it != data.end(); - ++it) { - builder.Add(it->first, it->second); + for (const auto& kvp : data) { + builder.Add(kvp.first, kvp.second); } // Open the block data_ = builder.Finish().ToString(); @@ -206,36 +197,30 @@ class BlockConstructor: public Constructor { block_ = new Block(contents); return Status::OK(); } - virtual Iterator* NewIterator() const { + Iterator* NewIterator() const override { return block_->NewIterator(comparator_); } private: - const Comparator* comparator_; + const Comparator* const comparator_; std::string data_; Block* block_; BlockConstructor(); }; -class TableConstructor: public Constructor { +class TableConstructor : public Constructor { public: TableConstructor(const Comparator* cmp) - : Constructor(cmp), - source_(NULL), table_(NULL) { - } - ~TableConstructor() { - Reset(); - } - virtual Status FinishImpl(const Options& options, const KVMap& data) { + : Constructor(cmp), source_(nullptr), table_(nullptr) {} + ~TableConstructor() override { Reset(); } + Status FinishImpl(const Options& options, const KVMap& data) override { Reset(); StringSink sink; TableBuilder builder(options, &sink); - for (KVMap::const_iterator it = data.begin(); - it != data.end(); - ++it) { - builder.Add(it->first, it->second); + for (const auto& kvp : data) { + builder.Add(kvp.first, kvp.second); ASSERT_TRUE(builder.status().ok()); } Status s = builder.Finish(); @@ -250,7 +235,7 @@ class TableConstructor: public Constructor { return Table::Open(table_options, source_, sink.contents().size(), &table_); } - virtual Iterator* NewIterator() const { + Iterator* NewIterator() const override { return table_->NewIterator(ReadOptions()); } @@ -262,8 +247,8 @@ class TableConstructor: public Constructor { void Reset() { delete table_; delete source_; - table_ = NULL; - source_ = NULL; + table_ = nullptr; + source_ = nullptr; } StringSource* source_; @@ -273,23 +258,28 @@ class TableConstructor: public Constructor { }; // A helper class that converts internal format keys into user keys -class KeyConvertingIterator: public Iterator { +class KeyConvertingIterator : public Iterator { public: - explicit KeyConvertingIterator(Iterator* iter) : iter_(iter) { } - virtual ~KeyConvertingIterator() { delete iter_; } - virtual bool Valid() const { return iter_->Valid(); } - virtual void Seek(const Slice& target) { + explicit KeyConvertingIterator(Iterator* iter) : iter_(iter) {} + + KeyConvertingIterator(const KeyConvertingIterator&) = delete; + KeyConvertingIterator& operator=(const KeyConvertingIterator&) = delete; + + ~KeyConvertingIterator() override { delete iter_; } + + bool Valid() const override { return iter_->Valid(); } + void Seek(const Slice& target) override { ParsedInternalKey ikey(target, kMaxSequenceNumber, kTypeValue); std::string encoded; AppendInternalKey(&encoded, ikey); iter_->Seek(encoded); } - virtual void SeekToFirst() { iter_->SeekToFirst(); } - virtual void SeekToLast() { iter_->SeekToLast(); } - virtual void Next() { iter_->Next(); } - virtual void Prev() { iter_->Prev(); } + void SeekToFirst() override { iter_->SeekToFirst(); } + void SeekToLast() override { iter_->SeekToLast(); } + void Next() override { iter_->Next(); } + void Prev() override { iter_->Prev(); } - virtual Slice key() const { + Slice key() const override { assert(Valid()); ParsedInternalKey key; if (!ParseInternalKey(iter_->key(), &key)) { @@ -299,82 +289,68 @@ class KeyConvertingIterator: public Iterator { return key.user_key; } - virtual Slice value() const { return iter_->value(); } - virtual Status status() const { + Slice value() const override { return iter_->value(); } + Status status() const override { return status_.ok() ? iter_->status() : status_; } private: mutable Status status_; Iterator* iter_; - - // No copying allowed - KeyConvertingIterator(const KeyConvertingIterator&); - void operator=(const KeyConvertingIterator&); }; -class MemTableConstructor: public Constructor { +class MemTableConstructor : public Constructor { public: explicit MemTableConstructor(const Comparator* cmp) - : Constructor(cmp), - internal_comparator_(cmp) { + : Constructor(cmp), internal_comparator_(cmp) { memtable_ = new MemTable(internal_comparator_); memtable_->Ref(); } - ~MemTableConstructor() { - memtable_->Unref(); - } - virtual Status FinishImpl(const Options& options, const KVMap& data) { + ~MemTableConstructor() override { memtable_->Unref(); } + Status FinishImpl(const Options& options, const KVMap& data) override { memtable_->Unref(); memtable_ = new MemTable(internal_comparator_); memtable_->Ref(); int seq = 1; - for (KVMap::const_iterator it = data.begin(); - it != data.end(); - ++it) { - memtable_->Add(seq, kTypeValue, it->first, it->second); + for (const auto& kvp : data) { + memtable_->Add(seq, kTypeValue, kvp.first, kvp.second); seq++; } return Status::OK(); } - virtual Iterator* NewIterator() const { + Iterator* NewIterator() const override { return new KeyConvertingIterator(memtable_->NewIterator()); } private: - InternalKeyComparator internal_comparator_; + const InternalKeyComparator internal_comparator_; MemTable* memtable_; }; -class DBConstructor: public Constructor { +class DBConstructor : public Constructor { public: explicit DBConstructor(const Comparator* cmp) - : Constructor(cmp), - comparator_(cmp) { - db_ = NULL; + : Constructor(cmp), comparator_(cmp) { + db_ = nullptr; NewDB(); } - ~DBConstructor() { - delete db_; - } - virtual Status FinishImpl(const Options& options, const KVMap& data) { + ~DBConstructor() override { delete db_; } + Status FinishImpl(const Options& options, const KVMap& data) override { delete db_; - db_ = NULL; + db_ = nullptr; NewDB(); - for (KVMap::const_iterator it = data.begin(); - it != data.end(); - ++it) { + for (const auto& kvp : data) { WriteBatch batch; - batch.Put(it->first, it->second); + batch.Put(kvp.first, kvp.second); ASSERT_TRUE(db_->Write(WriteOptions(), &batch).ok()); } return Status::OK(); } - virtual Iterator* NewIterator() const { + Iterator* NewIterator() const override { return db_->NewIterator(ReadOptions()); } - virtual DB* db() const { return db_; } + DB* db() const override { return db_; } private: void NewDB() { @@ -392,16 +368,11 @@ class DBConstructor: public Constructor { ASSERT_TRUE(status.ok()) << status.ToString(); } - const Comparator* comparator_; + const Comparator* const comparator_; DB* db_; }; -enum TestType { - TABLE_TEST, - BLOCK_TEST, - MEMTABLE_TEST, - DB_TEST -}; +enum TestType { TABLE_TEST, BLOCK_TEST, MEMTABLE_TEST, DB_TEST }; struct TestArgs { TestType type; @@ -410,37 +381,37 @@ struct TestArgs { }; static const TestArgs kTestArgList[] = { - { TABLE_TEST, false, 16 }, - { TABLE_TEST, false, 1 }, - { TABLE_TEST, false, 1024 }, - { TABLE_TEST, true, 16 }, - { TABLE_TEST, true, 1 }, - { TABLE_TEST, true, 1024 }, - - { BLOCK_TEST, false, 16 }, - { BLOCK_TEST, false, 1 }, - { BLOCK_TEST, false, 1024 }, - { BLOCK_TEST, true, 16 }, - { BLOCK_TEST, true, 1 }, - { BLOCK_TEST, true, 1024 }, - - // Restart interval does not matter for memtables - { MEMTABLE_TEST, false, 16 }, - { MEMTABLE_TEST, true, 16 }, - - // Do not bother with restart interval variations for DB - { DB_TEST, false, 16 }, - { DB_TEST, true, 16 }, + {TABLE_TEST, false, 16}, + {TABLE_TEST, false, 1}, + {TABLE_TEST, false, 1024}, + {TABLE_TEST, true, 16}, + {TABLE_TEST, true, 1}, + {TABLE_TEST, true, 1024}, + + {BLOCK_TEST, false, 16}, + {BLOCK_TEST, false, 1}, + {BLOCK_TEST, false, 1024}, + {BLOCK_TEST, true, 16}, + {BLOCK_TEST, true, 1}, + {BLOCK_TEST, true, 1024}, + + // Restart interval does not matter for memtables + {MEMTABLE_TEST, false, 16}, + {MEMTABLE_TEST, true, 16}, + + // Do not bother with restart interval variations for DB + {DB_TEST, false, 16}, + {DB_TEST, true, 16}, }; static const int kNumTestArgs = sizeof(kTestArgList) / sizeof(kTestArgList[0]); class Harness { public: - Harness() : constructor_(NULL) { } + Harness() : constructor_(nullptr) {} void Init(const TestArgs& args) { delete constructor_; - constructor_ = NULL; + constructor_ = nullptr; options_ = Options(); options_.block_restart_interval = args.restart_interval; @@ -466,9 +437,7 @@ class Harness { } } - ~Harness() { - delete constructor_; - } + ~Harness() { delete constructor_; } void Add(const std::string& key, const std::string& value) { constructor_->Add(key, value); @@ -490,8 +459,7 @@ class Harness { ASSERT_TRUE(!iter->Valid()); iter->SeekToFirst(); for (KVMap::const_iterator model_iter = data.begin(); - model_iter != data.end(); - ++model_iter) { + model_iter != data.end(); ++model_iter) { ASSERT_EQ(ToString(data, model_iter), ToString(iter)); iter->Next(); } @@ -505,8 +473,7 @@ class Harness { ASSERT_TRUE(!iter->Valid()); iter->SeekToLast(); for (KVMap::const_reverse_iterator model_iter = data.rbegin(); - model_iter != data.rend(); - ++model_iter) { + model_iter != data.rend(); ++model_iter) { ASSERT_EQ(ToString(data, model_iter), ToString(iter)); iter->Prev(); } @@ -514,8 +481,7 @@ class Harness { delete iter; } - void TestRandomAccess(Random* rnd, - const std::vector<std::string>& keys, + void TestRandomAccess(Random* rnd, const std::vector<std::string>& keys, const KVMap& data) { static const bool kVerbose = false; Iterator* iter = constructor_->NewIterator(); @@ -546,8 +512,8 @@ class Harness { case 2: { std::string key = PickRandomKey(rnd, keys); model_iter = data.lower_bound(key); - if (kVerbose) fprintf(stderr, "Seek '%s'\n", - EscapeString(key).c_str()); + if (kVerbose) + fprintf(stderr, "Seek '%s'\n", EscapeString(key).c_str()); iter->Seek(Slice(key)); ASSERT_EQ(ToString(data, model_iter), ToString(iter)); break; @@ -558,7 +524,7 @@ class Harness { if (kVerbose) fprintf(stderr, "Prev\n"); iter->Prev(); if (model_iter == data.begin()) { - model_iter = data.end(); // Wrap around to invalid value + model_iter = data.end(); // Wrap around to invalid value } else { --model_iter; } @@ -621,8 +587,8 @@ class Harness { break; case 1: { // Attempt to return something smaller than an existing key - if (result.size() > 0 && result[result.size()-1] > '\0') { - result[result.size()-1]--; + if (!result.empty() && result[result.size() - 1] > '\0') { + result[result.size() - 1]--; } break; } @@ -636,7 +602,7 @@ class Harness { } } - // Returns NULL if not running against a DB + // Returns nullptr if not running against a DB DB* db() const { return constructor_->db(); } private: @@ -720,8 +686,8 @@ TEST(Harness, Randomized) { for (int num_entries = 0; num_entries < 2000; num_entries += (num_entries < 50 ? 1 : 200)) { if ((num_entries % 10) == 0) { - fprintf(stderr, "case %d of %d: num_entries = %d\n", - (i + 1), int(kNumTestArgs), num_entries); + fprintf(stderr, "case %d of %d: num_entries = %d\n", (i + 1), + int(kNumTestArgs), num_entries); } for (int e = 0; e < num_entries; e++) { std::string v; @@ -735,7 +701,7 @@ TEST(Harness, Randomized) { TEST(Harness, RandomizedLongDB) { Random rnd(test::RandomSeed()); - TestArgs args = { DB_TEST, false, 16 }; + TestArgs args = {DB_TEST, false, 16}; Init(args); int num_entries = 100000; for (int e = 0; e < num_entries; e++) { @@ -757,7 +723,7 @@ TEST(Harness, RandomizedLongDB) { ASSERT_GT(files, 0); } -class MemTableTest { }; +class MemTableTest {}; TEST(MemTableTest, Simple) { InternalKeyComparator cmp(BytewiseComparator()); @@ -774,8 +740,7 @@ TEST(MemTableTest, Simple) { Iterator* iter = memtable->NewIterator(); iter->SeekToFirst(); while (iter->Valid()) { - fprintf(stderr, "key: '%s' -> '%s'\n", - iter->key().ToString().c_str(), + fprintf(stderr, "key: '%s' -> '%s'\n", iter->key().ToString().c_str(), iter->value().ToString().c_str()); iter->Next(); } @@ -788,14 +753,13 @@ static bool Between(uint64_t val, uint64_t low, uint64_t high) { bool result = (val >= low) && (val <= high); if (!result) { fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n", - (unsigned long long)(val), - (unsigned long long)(low), + (unsigned long long)(val), (unsigned long long)(low), (unsigned long long)(high)); } return result; } -class TableTest { }; +class TableTest {}; TEST(TableTest, ApproximateOffsetOfPlain) { TableConstructor c(BytewiseComparator()); @@ -813,18 +777,17 @@ TEST(TableTest, ApproximateOffsetOfPlain) { options.compression = kNoCompression; c.Finish(options, &keys, &kvmap); - ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0)); - ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0)); - ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"), 0, 0)); - ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0)); - ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 0, 0)); - ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 10000, 11000)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"), 0, 0)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 0, 0)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 10000, 11000)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04a"), 210000, 211000)); - ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"), 210000, 211000)); - ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"), 510000, 511000)); - ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"), 510000, 511000)); - ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 610000, 612000)); - + ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"), 210000, 211000)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"), 510000, 511000)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"), 510000, 511000)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 610000, 612000)); } static bool SnappyCompressionSupported() { @@ -855,7 +818,7 @@ TEST(TableTest, ApproximateOffsetOfCompressed) { // Expected upper and lower bounds of space used by compressible strings. static const int kSlop = 1000; // Compressor effectiveness varies. - const int expected = 2500; // 10000 * compression ratio (0.25) + const int expected = 2500; // 10000 * compression ratio (0.25) const int min_z = expected - kSlop; const int max_z = expected + kSlop; @@ -871,6 +834,4 @@ TEST(TableTest, ApproximateOffsetOfCompressed) { } // namespace leveldb -int main(int argc, char** argv) { - return leveldb::test::RunAllTests(); -} +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/table/two_level_iterator.cc b/src/leveldb/table/two_level_iterator.cc index 7822ebab9c..144790dd97 100644 --- a/src/leveldb/table/two_level_iterator.cc +++ b/src/leveldb/table/two_level_iterator.cc @@ -15,38 +15,33 @@ namespace { typedef Iterator* (*BlockFunction)(void*, const ReadOptions&, const Slice&); -class TwoLevelIterator: public Iterator { +class TwoLevelIterator : public Iterator { public: - TwoLevelIterator( - Iterator* index_iter, - BlockFunction block_function, - void* arg, - const ReadOptions& options); - - virtual ~TwoLevelIterator(); - - virtual void Seek(const Slice& target); - virtual void SeekToFirst(); - virtual void SeekToLast(); - virtual void Next(); - virtual void Prev(); - - virtual bool Valid() const { - return data_iter_.Valid(); - } - virtual Slice key() const { + TwoLevelIterator(Iterator* index_iter, BlockFunction block_function, + void* arg, const ReadOptions& options); + + ~TwoLevelIterator() override; + + void Seek(const Slice& target) override; + void SeekToFirst() override; + void SeekToLast() override; + void Next() override; + void Prev() override; + + bool Valid() const override { return data_iter_.Valid(); } + Slice key() const override { assert(Valid()); return data_iter_.key(); } - virtual Slice value() const { + Slice value() const override { assert(Valid()); return data_iter_.value(); } - virtual Status status() const { + Status status() const override { // It'd be nice if status() returned a const Status& instead of a Status if (!index_iter_.status().ok()) { return index_iter_.status(); - } else if (data_iter_.iter() != NULL && !data_iter_.status().ok()) { + } else if (data_iter_.iter() != nullptr && !data_iter_.status().ok()) { return data_iter_.status(); } else { return status_; @@ -67,45 +62,41 @@ class TwoLevelIterator: public Iterator { const ReadOptions options_; Status status_; IteratorWrapper index_iter_; - IteratorWrapper data_iter_; // May be NULL - // If data_iter_ is non-NULL, then "data_block_handle_" holds the + IteratorWrapper data_iter_; // May be nullptr + // If data_iter_ is non-null, then "data_block_handle_" holds the // "index_value" passed to block_function_ to create the data_iter_. std::string data_block_handle_; }; -TwoLevelIterator::TwoLevelIterator( - Iterator* index_iter, - BlockFunction block_function, - void* arg, - const ReadOptions& options) +TwoLevelIterator::TwoLevelIterator(Iterator* index_iter, + BlockFunction block_function, void* arg, + const ReadOptions& options) : block_function_(block_function), arg_(arg), options_(options), index_iter_(index_iter), - data_iter_(NULL) { -} + data_iter_(nullptr) {} -TwoLevelIterator::~TwoLevelIterator() { -} +TwoLevelIterator::~TwoLevelIterator() = default; void TwoLevelIterator::Seek(const Slice& target) { index_iter_.Seek(target); InitDataBlock(); - if (data_iter_.iter() != NULL) data_iter_.Seek(target); + if (data_iter_.iter() != nullptr) data_iter_.Seek(target); SkipEmptyDataBlocksForward(); } void TwoLevelIterator::SeekToFirst() { index_iter_.SeekToFirst(); InitDataBlock(); - if (data_iter_.iter() != NULL) data_iter_.SeekToFirst(); + if (data_iter_.iter() != nullptr) data_iter_.SeekToFirst(); SkipEmptyDataBlocksForward(); } void TwoLevelIterator::SeekToLast() { index_iter_.SeekToLast(); InitDataBlock(); - if (data_iter_.iter() != NULL) data_iter_.SeekToLast(); + if (data_iter_.iter() != nullptr) data_iter_.SeekToLast(); SkipEmptyDataBlocksBackward(); } @@ -121,44 +112,44 @@ void TwoLevelIterator::Prev() { SkipEmptyDataBlocksBackward(); } - void TwoLevelIterator::SkipEmptyDataBlocksForward() { - while (data_iter_.iter() == NULL || !data_iter_.Valid()) { + while (data_iter_.iter() == nullptr || !data_iter_.Valid()) { // Move to next block if (!index_iter_.Valid()) { - SetDataIterator(NULL); + SetDataIterator(nullptr); return; } index_iter_.Next(); InitDataBlock(); - if (data_iter_.iter() != NULL) data_iter_.SeekToFirst(); + if (data_iter_.iter() != nullptr) data_iter_.SeekToFirst(); } } void TwoLevelIterator::SkipEmptyDataBlocksBackward() { - while (data_iter_.iter() == NULL || !data_iter_.Valid()) { + while (data_iter_.iter() == nullptr || !data_iter_.Valid()) { // Move to next block if (!index_iter_.Valid()) { - SetDataIterator(NULL); + SetDataIterator(nullptr); return; } index_iter_.Prev(); InitDataBlock(); - if (data_iter_.iter() != NULL) data_iter_.SeekToLast(); + if (data_iter_.iter() != nullptr) data_iter_.SeekToLast(); } } void TwoLevelIterator::SetDataIterator(Iterator* data_iter) { - if (data_iter_.iter() != NULL) SaveError(data_iter_.status()); + if (data_iter_.iter() != nullptr) SaveError(data_iter_.status()); data_iter_.Set(data_iter); } void TwoLevelIterator::InitDataBlock() { if (!index_iter_.Valid()) { - SetDataIterator(NULL); + SetDataIterator(nullptr); } else { Slice handle = index_iter_.value(); - if (data_iter_.iter() != NULL && handle.compare(data_block_handle_) == 0) { + if (data_iter_.iter() != nullptr && + handle.compare(data_block_handle_) == 0) { // data_iter_ is already constructed with this iterator, so // no need to change anything } else { @@ -171,11 +162,9 @@ void TwoLevelIterator::InitDataBlock() { } // namespace -Iterator* NewTwoLevelIterator( - Iterator* index_iter, - BlockFunction block_function, - void* arg, - const ReadOptions& options) { +Iterator* NewTwoLevelIterator(Iterator* index_iter, + BlockFunction block_function, void* arg, + const ReadOptions& options) { return new TwoLevelIterator(index_iter, block_function, arg, options); } diff --git a/src/leveldb/table/two_level_iterator.h b/src/leveldb/table/two_level_iterator.h index 629ca34525..81ffe809ac 100644 --- a/src/leveldb/table/two_level_iterator.h +++ b/src/leveldb/table/two_level_iterator.h @@ -20,14 +20,11 @@ struct ReadOptions; // // Uses a supplied function to convert an index_iter value into // an iterator over the contents of the corresponding block. -extern Iterator* NewTwoLevelIterator( +Iterator* NewTwoLevelIterator( Iterator* index_iter, - Iterator* (*block_function)( - void* arg, - const ReadOptions& options, - const Slice& index_value), - void* arg, - const ReadOptions& options); + Iterator* (*block_function)(void* arg, const ReadOptions& options, + const Slice& index_value), + void* arg, const ReadOptions& options); } // namespace leveldb diff --git a/src/leveldb/util/arena.cc b/src/leveldb/util/arena.cc index 74078213ee..46e3b2eb8f 100644 --- a/src/leveldb/util/arena.cc +++ b/src/leveldb/util/arena.cc @@ -3,16 +3,13 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "util/arena.h" -#include <assert.h> namespace leveldb { static const int kBlockSize = 4096; -Arena::Arena() : memory_usage_(0) { - alloc_ptr_ = NULL; // First allocation will allocate a block - alloc_bytes_remaining_ = 0; -} +Arena::Arena() + : alloc_ptr_(nullptr), alloc_bytes_remaining_(0), memory_usage_(0) {} Arena::~Arena() { for (size_t i = 0; i < blocks_.size(); i++) { @@ -40,8 +37,9 @@ char* Arena::AllocateFallback(size_t bytes) { char* Arena::AllocateAligned(size_t bytes) { const int align = (sizeof(void*) > 8) ? sizeof(void*) : 8; - assert((align & (align-1)) == 0); // Pointer size should be a power of 2 - size_t current_mod = reinterpret_cast<uintptr_t>(alloc_ptr_) & (align-1); + static_assert((align & (align - 1)) == 0, + "Pointer size should be a power of 2"); + size_t current_mod = reinterpret_cast<uintptr_t>(alloc_ptr_) & (align - 1); size_t slop = (current_mod == 0 ? 0 : align - current_mod); size_t needed = bytes + slop; char* result; @@ -53,15 +51,15 @@ char* Arena::AllocateAligned(size_t bytes) { // AllocateFallback always returned aligned memory result = AllocateFallback(bytes); } - assert((reinterpret_cast<uintptr_t>(result) & (align-1)) == 0); + assert((reinterpret_cast<uintptr_t>(result) & (align - 1)) == 0); return result; } char* Arena::AllocateNewBlock(size_t block_bytes) { char* result = new char[block_bytes]; blocks_.push_back(result); - memory_usage_.NoBarrier_Store( - reinterpret_cast<void*>(MemoryUsage() + block_bytes + sizeof(char*))); + memory_usage_.fetch_add(block_bytes + sizeof(char*), + std::memory_order_relaxed); return result; } diff --git a/src/leveldb/util/arena.h b/src/leveldb/util/arena.h index 48bab33741..68fc55d4dd 100644 --- a/src/leveldb/util/arena.h +++ b/src/leveldb/util/arena.h @@ -5,29 +5,33 @@ #ifndef STORAGE_LEVELDB_UTIL_ARENA_H_ #define STORAGE_LEVELDB_UTIL_ARENA_H_ +#include <atomic> +#include <cassert> +#include <cstddef> +#include <cstdint> #include <vector> -#include <assert.h> -#include <stddef.h> -#include <stdint.h> -#include "port/port.h" namespace leveldb { class Arena { public: Arena(); + + Arena(const Arena&) = delete; + Arena& operator=(const Arena&) = delete; + ~Arena(); // Return a pointer to a newly allocated memory block of "bytes" bytes. char* Allocate(size_t bytes); - // Allocate memory with the normal alignment guarantees provided by malloc + // Allocate memory with the normal alignment guarantees provided by malloc. char* AllocateAligned(size_t bytes); // Returns an estimate of the total memory usage of data allocated // by the arena. size_t MemoryUsage() const { - return reinterpret_cast<uintptr_t>(memory_usage_.NoBarrier_Load()); + return memory_usage_.load(std::memory_order_relaxed); } private: @@ -42,11 +46,10 @@ class Arena { std::vector<char*> blocks_; // Total memory usage of the arena. - port::AtomicPointer memory_usage_; - - // No copying allowed - Arena(const Arena&); - void operator=(const Arena&); + // + // TODO(costan): This member is accessed via atomics, but the others are + // accessed without any locking. Is this OK? + std::atomic<size_t> memory_usage_; }; inline char* Arena::Allocate(size_t bytes) { diff --git a/src/leveldb/util/arena_test.cc b/src/leveldb/util/arena_test.cc index 58e870ec44..e917228f42 100644 --- a/src/leveldb/util/arena_test.cc +++ b/src/leveldb/util/arena_test.cc @@ -9,14 +9,12 @@ namespace leveldb { -class ArenaTest { }; +class ArenaTest {}; -TEST(ArenaTest, Empty) { - Arena arena; -} +TEST(ArenaTest, Empty) { Arena arena; } TEST(ArenaTest, Simple) { - std::vector<std::pair<size_t, char*> > allocated; + std::vector<std::pair<size_t, char*>> allocated; Arena arena; const int N = 100000; size_t bytes = 0; @@ -26,8 +24,9 @@ TEST(ArenaTest, Simple) { if (i % (N / 10) == 0) { s = i; } else { - s = rnd.OneIn(4000) ? rnd.Uniform(6000) : - (rnd.OneIn(10) ? rnd.Uniform(100) : rnd.Uniform(20)); + s = rnd.OneIn(4000) + ? rnd.Uniform(6000) + : (rnd.OneIn(10) ? rnd.Uniform(100) : rnd.Uniform(20)); } if (s == 0) { // Our arena disallows size 0 allocations. @@ -47,7 +46,7 @@ TEST(ArenaTest, Simple) { bytes += s; allocated.push_back(std::make_pair(s, r)); ASSERT_GE(arena.MemoryUsage(), bytes); - if (i > N/10) { + if (i > N / 10) { ASSERT_LE(arena.MemoryUsage(), bytes * 1.10); } } @@ -63,6 +62,4 @@ TEST(ArenaTest, Simple) { } // namespace leveldb -int main(int argc, char** argv) { - return leveldb::test::RunAllTests(); -} +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/util/bloom.cc b/src/leveldb/util/bloom.cc index bf3e4ca6e9..87547a7e62 100644 --- a/src/leveldb/util/bloom.cc +++ b/src/leveldb/util/bloom.cc @@ -15,24 +15,17 @@ static uint32_t BloomHash(const Slice& key) { } class BloomFilterPolicy : public FilterPolicy { - private: - size_t bits_per_key_; - size_t k_; - public: - explicit BloomFilterPolicy(int bits_per_key) - : bits_per_key_(bits_per_key) { + explicit BloomFilterPolicy(int bits_per_key) : bits_per_key_(bits_per_key) { // We intentionally round down to reduce probing cost a little bit k_ = static_cast<size_t>(bits_per_key * 0.69); // 0.69 =~ ln(2) if (k_ < 1) k_ = 1; if (k_ > 30) k_ = 30; } - virtual const char* Name() const { - return "leveldb.BuiltinBloomFilter2"; - } + const char* Name() const override { return "leveldb.BuiltinBloomFilter2"; } - virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const { + void CreateFilter(const Slice* keys, int n, std::string* dst) const override { // Compute bloom filter size (in both bits and bytes) size_t bits = n * bits_per_key_; @@ -54,13 +47,13 @@ class BloomFilterPolicy : public FilterPolicy { const uint32_t delta = (h >> 17) | (h << 15); // Rotate right 17 bits for (size_t j = 0; j < k_; j++) { const uint32_t bitpos = h % bits; - array[bitpos/8] |= (1 << (bitpos % 8)); + array[bitpos / 8] |= (1 << (bitpos % 8)); h += delta; } } } - virtual bool KeyMayMatch(const Slice& key, const Slice& bloom_filter) const { + bool KeyMayMatch(const Slice& key, const Slice& bloom_filter) const override { const size_t len = bloom_filter.size(); if (len < 2) return false; @@ -69,7 +62,7 @@ class BloomFilterPolicy : public FilterPolicy { // Use the encoded k so that we can read filters generated by // bloom filters created using different parameters. - const size_t k = array[len-1]; + const size_t k = array[len - 1]; if (k > 30) { // Reserved for potentially new encodings for short bloom filters. // Consider it a match. @@ -80,13 +73,17 @@ class BloomFilterPolicy : public FilterPolicy { const uint32_t delta = (h >> 17) | (h << 15); // Rotate right 17 bits for (size_t j = 0; j < k; j++) { const uint32_t bitpos = h % bits; - if ((array[bitpos/8] & (1 << (bitpos % 8))) == 0) return false; + if ((array[bitpos / 8] & (1 << (bitpos % 8))) == 0) return false; h += delta; } return true; } + + private: + size_t bits_per_key_; + size_t k_; }; -} +} // namespace const FilterPolicy* NewBloomFilterPolicy(int bits_per_key) { return new BloomFilterPolicy(bits_per_key); diff --git a/src/leveldb/util/bloom_test.cc b/src/leveldb/util/bloom_test.cc index 1b87a2be3f..436daa9e99 100644 --- a/src/leveldb/util/bloom_test.cc +++ b/src/leveldb/util/bloom_test.cc @@ -19,26 +19,17 @@ static Slice Key(int i, char* buffer) { } class BloomTest { - private: - const FilterPolicy* policy_; - std::string filter_; - std::vector<std::string> keys_; - public: - BloomTest() : policy_(NewBloomFilterPolicy(10)) { } + BloomTest() : policy_(NewBloomFilterPolicy(10)) {} - ~BloomTest() { - delete policy_; - } + ~BloomTest() { delete policy_; } void Reset() { keys_.clear(); filter_.clear(); } - void Add(const Slice& s) { - keys_.push_back(s.ToString()); - } + void Add(const Slice& s) { keys_.push_back(s.ToString()); } void Build() { std::vector<Slice> key_slices; @@ -52,16 +43,14 @@ class BloomTest { if (kVerbose >= 2) DumpFilter(); } - size_t FilterSize() const { - return filter_.size(); - } + size_t FilterSize() const { return filter_.size(); } void DumpFilter() { fprintf(stderr, "F("); - for (size_t i = 0; i+1 < filter_.size(); i++) { + for (size_t i = 0; i + 1 < filter_.size(); i++) { const unsigned int c = static_cast<unsigned int>(filter_[i]); for (int j = 0; j < 8; j++) { - fprintf(stderr, "%c", (c & (1 <<j)) ? '1' : '.'); + fprintf(stderr, "%c", (c & (1 << j)) ? '1' : '.'); } } fprintf(stderr, ")\n"); @@ -84,11 +73,16 @@ class BloomTest { } return result / 10000.0; } + + private: + const FilterPolicy* policy_; + std::string filter_; + std::vector<std::string> keys_; }; TEST(BloomTest, EmptyFilter) { - ASSERT_TRUE(! Matches("hello")); - ASSERT_TRUE(! Matches("world")); + ASSERT_TRUE(!Matches("hello")); + ASSERT_TRUE(!Matches("world")); } TEST(BloomTest, Small) { @@ -96,8 +90,8 @@ TEST(BloomTest, Small) { Add("world"); ASSERT_TRUE(Matches("hello")); ASSERT_TRUE(Matches("world")); - ASSERT_TRUE(! Matches("x")); - ASSERT_TRUE(! Matches("foo")); + ASSERT_TRUE(!Matches("x")); + ASSERT_TRUE(!Matches("foo")); } static int NextLength(int length) { @@ -140,23 +134,23 @@ TEST(BloomTest, VaryingLengths) { double rate = FalsePositiveRate(); if (kVerbose >= 1) { fprintf(stderr, "False positives: %5.2f%% @ length = %6d ; bytes = %6d\n", - rate*100.0, length, static_cast<int>(FilterSize())); + rate * 100.0, length, static_cast<int>(FilterSize())); } - ASSERT_LE(rate, 0.02); // Must not be over 2% - if (rate > 0.0125) mediocre_filters++; // Allowed, but not too often - else good_filters++; + ASSERT_LE(rate, 0.02); // Must not be over 2% + if (rate > 0.0125) + mediocre_filters++; // Allowed, but not too often + else + good_filters++; } if (kVerbose >= 1) { - fprintf(stderr, "Filters: %d good, %d mediocre\n", - good_filters, mediocre_filters); + fprintf(stderr, "Filters: %d good, %d mediocre\n", good_filters, + mediocre_filters); } - ASSERT_LE(mediocre_filters, good_filters/5); + ASSERT_LE(mediocre_filters, good_filters / 5); } // Different bits-per-byte } // namespace leveldb -int main(int argc, char** argv) { - return leveldb::test::RunAllTests(); -} +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/util/cache.cc b/src/leveldb/util/cache.cc index ce46886171..12de306cad 100644 --- a/src/leveldb/util/cache.cc +++ b/src/leveldb/util/cache.cc @@ -8,13 +8,13 @@ #include "leveldb/cache.h" #include "port/port.h" +#include "port/thread_annotations.h" #include "util/hash.h" #include "util/mutexlock.h" namespace leveldb { -Cache::~Cache() { -} +Cache::~Cache() {} namespace { @@ -45,21 +45,19 @@ struct LRUHandle { LRUHandle* next_hash; LRUHandle* next; LRUHandle* prev; - size_t charge; // TODO(opt): Only allow uint32_t? + size_t charge; // TODO(opt): Only allow uint32_t? size_t key_length; - bool in_cache; // Whether entry is in the cache. - uint32_t refs; // References, including cache reference, if present. - uint32_t hash; // Hash of key(); used for fast sharding and comparisons - char key_data[1]; // Beginning of key + bool in_cache; // Whether entry is in the cache. + uint32_t refs; // References, including cache reference, if present. + uint32_t hash; // Hash of key(); used for fast sharding and comparisons + char key_data[1]; // Beginning of key Slice key() const { - // For cheaper lookups, we allow a temporary Handle object - // to store a pointer to a key in "value". - if (next == this) { - return *(reinterpret_cast<Slice*>(value)); - } else { - return Slice(key_data, key_length); - } + // next_ is only equal to this if the LRU handle is the list head of an + // empty list. List heads never have meaningful keys. + assert(next != this); + + return Slice(key_data, key_length); } }; @@ -70,7 +68,7 @@ struct LRUHandle { // 4.4.3's builtin hashtable. class HandleTable { public: - HandleTable() : length_(0), elems_(0), list_(NULL) { Resize(); } + HandleTable() : length_(0), elems_(0), list_(nullptr) { Resize(); } ~HandleTable() { delete[] list_; } LRUHandle* Lookup(const Slice& key, uint32_t hash) { @@ -80,9 +78,9 @@ class HandleTable { LRUHandle* Insert(LRUHandle* h) { LRUHandle** ptr = FindPointer(h->key(), h->hash); LRUHandle* old = *ptr; - h->next_hash = (old == NULL ? NULL : old->next_hash); + h->next_hash = (old == nullptr ? nullptr : old->next_hash); *ptr = h; - if (old == NULL) { + if (old == nullptr) { ++elems_; if (elems_ > length_) { // Since each cache entry is fairly large, we aim for a small @@ -96,7 +94,7 @@ class HandleTable { LRUHandle* Remove(const Slice& key, uint32_t hash) { LRUHandle** ptr = FindPointer(key, hash); LRUHandle* result = *ptr; - if (result != NULL) { + if (result != nullptr) { *ptr = result->next_hash; --elems_; } @@ -115,8 +113,7 @@ class HandleTable { // pointer to the trailing slot in the corresponding linked list. LRUHandle** FindPointer(const Slice& key, uint32_t hash) { LRUHandle** ptr = &list_[hash & (length_ - 1)]; - while (*ptr != NULL && - ((*ptr)->hash != hash || key != (*ptr)->key())) { + while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) { ptr = &(*ptr)->next_hash; } return ptr; @@ -132,7 +129,7 @@ class HandleTable { uint32_t count = 0; for (uint32_t i = 0; i < length_; i++) { LRUHandle* h = list_[i]; - while (h != NULL) { + while (h != nullptr) { LRUHandle* next = h->next_hash; uint32_t hash = h->hash; LRUHandle** ptr = &new_list[hash & (new_length - 1)]; @@ -159,8 +156,8 @@ class LRUCache { void SetCapacity(size_t capacity) { capacity_ = capacity; } // Like Cache methods, but with an extra "hash" parameter. - Cache::Handle* Insert(const Slice& key, uint32_t hash, - void* value, size_t charge, + Cache::Handle* Insert(const Slice& key, uint32_t hash, void* value, + size_t charge, void (*deleter)(const Slice& key, void* value)); Cache::Handle* Lookup(const Slice& key, uint32_t hash); void Release(Cache::Handle* handle); @@ -173,32 +170,31 @@ class LRUCache { private: void LRU_Remove(LRUHandle* e); - void LRU_Append(LRUHandle*list, LRUHandle* e); + void LRU_Append(LRUHandle* list, LRUHandle* e); void Ref(LRUHandle* e); void Unref(LRUHandle* e); - bool FinishErase(LRUHandle* e); + bool FinishErase(LRUHandle* e) EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Initialized before use. size_t capacity_; // mutex_ protects the following state. mutable port::Mutex mutex_; - size_t usage_; + size_t usage_ GUARDED_BY(mutex_); // Dummy head of LRU list. // lru.prev is newest entry, lru.next is oldest entry. // Entries have refs==1 and in_cache==true. - LRUHandle lru_; + LRUHandle lru_ GUARDED_BY(mutex_); // Dummy head of in-use list. // Entries are in use by clients, and have refs >= 2 and in_cache==true. - LRUHandle in_use_; + LRUHandle in_use_ GUARDED_BY(mutex_); - HandleTable table_; + HandleTable table_ GUARDED_BY(mutex_); }; -LRUCache::LRUCache() - : usage_(0) { +LRUCache::LRUCache() : capacity_(0), usage_(0) { // Make empty circular linked lists. lru_.next = &lru_; lru_.prev = &lru_; @@ -208,7 +204,7 @@ LRUCache::LRUCache() LRUCache::~LRUCache() { assert(in_use_.next == &in_use_); // Error if caller has an unreleased handle - for (LRUHandle* e = lru_.next; e != &lru_; ) { + for (LRUHandle* e = lru_.next; e != &lru_;) { LRUHandle* next = e->next; assert(e->in_cache); e->in_cache = false; @@ -229,11 +225,12 @@ void LRUCache::Ref(LRUHandle* e) { void LRUCache::Unref(LRUHandle* e) { assert(e->refs > 0); e->refs--; - if (e->refs == 0) { // Deallocate. + if (e->refs == 0) { // Deallocate. assert(!e->in_cache); (*e->deleter)(e->key(), e->value); free(e); - } else if (e->in_cache && e->refs == 1) { // No longer in use; move to lru_ list. + } else if (e->in_cache && e->refs == 1) { + // No longer in use; move to lru_ list. LRU_Remove(e); LRU_Append(&lru_, e); } @@ -255,7 +252,7 @@ void LRUCache::LRU_Append(LRUHandle* list, LRUHandle* e) { Cache::Handle* LRUCache::Lookup(const Slice& key, uint32_t hash) { MutexLock l(&mutex_); LRUHandle* e = table_.Lookup(key, hash); - if (e != NULL) { + if (e != nullptr) { Ref(e); } return reinterpret_cast<Cache::Handle*>(e); @@ -266,13 +263,14 @@ void LRUCache::Release(Cache::Handle* handle) { Unref(reinterpret_cast<LRUHandle*>(handle)); } -Cache::Handle* LRUCache::Insert( - const Slice& key, uint32_t hash, void* value, size_t charge, - void (*deleter)(const Slice& key, void* value)) { +Cache::Handle* LRUCache::Insert(const Slice& key, uint32_t hash, void* value, + size_t charge, + void (*deleter)(const Slice& key, + void* value)) { MutexLock l(&mutex_); - LRUHandle* e = reinterpret_cast<LRUHandle*>( - malloc(sizeof(LRUHandle)-1 + key.size())); + LRUHandle* e = + reinterpret_cast<LRUHandle*>(malloc(sizeof(LRUHandle) - 1 + key.size())); e->value = value; e->deleter = deleter; e->charge = charge; @@ -288,8 +286,10 @@ Cache::Handle* LRUCache::Insert( LRU_Append(&in_use_, e); usage_ += charge; FinishErase(table_.Insert(e)); - } // else don't cache. (Tests use capacity_==0 to turn off caching.) - + } else { // don't cache. (capacity_==0 is supported and turns off caching.) + // next is read by key() in an assert, so it must be initialized + e->next = nullptr; + } while (usage_ > capacity_ && lru_.next != &lru_) { LRUHandle* old = lru_.next; assert(old->refs == 1); @@ -302,17 +302,17 @@ Cache::Handle* LRUCache::Insert( return reinterpret_cast<Cache::Handle*>(e); } -// If e != NULL, finish removing *e from the cache; it has already been removed -// from the hash table. Return whether e != NULL. Requires mutex_ held. +// If e != nullptr, finish removing *e from the cache; it has already been +// removed from the hash table. Return whether e != nullptr. bool LRUCache::FinishErase(LRUHandle* e) { - if (e != NULL) { + if (e != nullptr) { assert(e->in_cache); LRU_Remove(e); e->in_cache = false; usage_ -= e->charge; Unref(e); } - return e != NULL; + return e != nullptr; } void LRUCache::Erase(const Slice& key, uint32_t hash) { @@ -345,49 +345,46 @@ class ShardedLRUCache : public Cache { return Hash(s.data(), s.size(), 0); } - static uint32_t Shard(uint32_t hash) { - return hash >> (32 - kNumShardBits); - } + static uint32_t Shard(uint32_t hash) { return hash >> (32 - kNumShardBits); } public: - explicit ShardedLRUCache(size_t capacity) - : last_id_(0) { + explicit ShardedLRUCache(size_t capacity) : last_id_(0) { const size_t per_shard = (capacity + (kNumShards - 1)) / kNumShards; for (int s = 0; s < kNumShards; s++) { shard_[s].SetCapacity(per_shard); } } - virtual ~ShardedLRUCache() { } - virtual Handle* Insert(const Slice& key, void* value, size_t charge, - void (*deleter)(const Slice& key, void* value)) { + ~ShardedLRUCache() override {} + Handle* Insert(const Slice& key, void* value, size_t charge, + void (*deleter)(const Slice& key, void* value)) override { const uint32_t hash = HashSlice(key); return shard_[Shard(hash)].Insert(key, hash, value, charge, deleter); } - virtual Handle* Lookup(const Slice& key) { + Handle* Lookup(const Slice& key) override { const uint32_t hash = HashSlice(key); return shard_[Shard(hash)].Lookup(key, hash); } - virtual void Release(Handle* handle) { + void Release(Handle* handle) override { LRUHandle* h = reinterpret_cast<LRUHandle*>(handle); shard_[Shard(h->hash)].Release(handle); } - virtual void Erase(const Slice& key) { + void Erase(const Slice& key) override { const uint32_t hash = HashSlice(key); shard_[Shard(hash)].Erase(key, hash); } - virtual void* Value(Handle* handle) { + void* Value(Handle* handle) override { return reinterpret_cast<LRUHandle*>(handle)->value; } - virtual uint64_t NewId() { + uint64_t NewId() override { MutexLock l(&id_mutex_); return ++(last_id_); } - virtual void Prune() { + void Prune() override { for (int s = 0; s < kNumShards; s++) { shard_[s].Prune(); } } - virtual size_t TotalCharge() const { + size_t TotalCharge() const override { size_t total = 0; for (int s = 0; s < kNumShards; s++) { total += shard_[s].TotalCharge(); @@ -398,8 +395,6 @@ class ShardedLRUCache : public Cache { } // end anonymous namespace -Cache* NewLRUCache(size_t capacity) { - return new ShardedLRUCache(capacity); -} +Cache* NewLRUCache(size_t capacity) { return new ShardedLRUCache(capacity); } } // namespace leveldb diff --git a/src/leveldb/util/cache_test.cc b/src/leveldb/util/cache_test.cc index 468f7a6425..974334b9f8 100644 --- a/src/leveldb/util/cache_test.cc +++ b/src/leveldb/util/cache_test.cc @@ -25,8 +25,6 @@ static int DecodeValue(void* v) { return reinterpret_cast<uintptr_t>(v); } class CacheTest { public: - static CacheTest* current_; - static void Deleter(const Slice& key, void* v) { current_->deleted_keys_.push_back(DecodeKey(key)); current_->deleted_values_.push_back(DecodeValue(v)); @@ -37,18 +35,14 @@ class CacheTest { std::vector<int> deleted_values_; Cache* cache_; - CacheTest() : cache_(NewLRUCache(kCacheSize)) { - current_ = this; - } + CacheTest() : cache_(NewLRUCache(kCacheSize)) { current_ = this; } - ~CacheTest() { - delete cache_; - } + ~CacheTest() { delete cache_; } int Lookup(int key) { Cache::Handle* handle = cache_->Lookup(EncodeKey(key)); - const int r = (handle == NULL) ? -1 : DecodeValue(cache_->Value(handle)); - if (handle != NULL) { + const int r = (handle == nullptr) ? -1 : DecodeValue(cache_->Value(handle)); + if (handle != nullptr) { cache_->Release(handle); } return r; @@ -64,9 +58,9 @@ class CacheTest { &CacheTest::Deleter); } - void Erase(int key) { - cache_->Erase(EncodeKey(key)); - } + void Erase(int key) { cache_->Erase(EncodeKey(key)); } + + static CacheTest* current_; }; CacheTest* CacheTest::current_; @@ -75,18 +69,18 @@ TEST(CacheTest, HitAndMiss) { Insert(100, 101); ASSERT_EQ(101, Lookup(100)); - ASSERT_EQ(-1, Lookup(200)); - ASSERT_EQ(-1, Lookup(300)); + ASSERT_EQ(-1, Lookup(200)); + ASSERT_EQ(-1, Lookup(300)); Insert(200, 201); ASSERT_EQ(101, Lookup(100)); ASSERT_EQ(201, Lookup(200)); - ASSERT_EQ(-1, Lookup(300)); + ASSERT_EQ(-1, Lookup(300)); Insert(100, 102); ASSERT_EQ(102, Lookup(100)); ASSERT_EQ(201, Lookup(200)); - ASSERT_EQ(-1, Lookup(300)); + ASSERT_EQ(-1, Lookup(300)); ASSERT_EQ(1, deleted_keys_.size()); ASSERT_EQ(100, deleted_keys_[0]); @@ -100,14 +94,14 @@ TEST(CacheTest, Erase) { Insert(100, 101); Insert(200, 201); Erase(100); - ASSERT_EQ(-1, Lookup(100)); + ASSERT_EQ(-1, Lookup(100)); ASSERT_EQ(201, Lookup(200)); ASSERT_EQ(1, deleted_keys_.size()); ASSERT_EQ(100, deleted_keys_[0]); ASSERT_EQ(101, deleted_values_[0]); Erase(100); - ASSERT_EQ(-1, Lookup(100)); + ASSERT_EQ(-1, Lookup(100)); ASSERT_EQ(201, Lookup(200)); ASSERT_EQ(1, deleted_keys_.size()); } @@ -146,8 +140,8 @@ TEST(CacheTest, EvictionPolicy) { // Frequently used entry must be kept around, // as must things that are still in use. for (int i = 0; i < kCacheSize + 100; i++) { - Insert(1000+i, 2000+i); - ASSERT_EQ(2000+i, Lookup(1000+i)); + Insert(1000 + i, 2000 + i); + ASSERT_EQ(2000 + i, Lookup(1000 + i)); ASSERT_EQ(101, Lookup(100)); } ASSERT_EQ(101, Lookup(100)); @@ -160,12 +154,12 @@ TEST(CacheTest, UseExceedsCacheSize) { // Overfill the cache, keeping handles on all inserted entries. std::vector<Cache::Handle*> h; for (int i = 0; i < kCacheSize + 100; i++) { - h.push_back(InsertAndReturnHandle(1000+i, 2000+i)); + h.push_back(InsertAndReturnHandle(1000 + i, 2000 + i)); } // Check that all the entries can be found in the cache. for (int i = 0; i < h.size(); i++) { - ASSERT_EQ(2000+i, Lookup(1000+i)); + ASSERT_EQ(2000 + i, Lookup(1000 + i)); } for (int i = 0; i < h.size(); i++) { @@ -181,9 +175,9 @@ TEST(CacheTest, HeavyEntries) { const int kHeavy = 10; int added = 0; int index = 0; - while (added < 2*kCacheSize) { + while (added < 2 * kCacheSize) { const int weight = (index & 1) ? kLight : kHeavy; - Insert(index, 1000+index, weight); + Insert(index, 1000 + index, weight); added += weight; index++; } @@ -194,10 +188,10 @@ TEST(CacheTest, HeavyEntries) { int r = Lookup(i); if (r >= 0) { cached_weight += weight; - ASSERT_EQ(1000+i, r); + ASSERT_EQ(1000 + i, r); } } - ASSERT_LE(cached_weight, kCacheSize + kCacheSize/10); + ASSERT_LE(cached_weight, kCacheSize + kCacheSize / 10); } TEST(CacheTest, NewId) { @@ -219,8 +213,14 @@ TEST(CacheTest, Prune) { ASSERT_EQ(-1, Lookup(2)); } -} // namespace leveldb +TEST(CacheTest, ZeroSizeCache) { + delete cache_; + cache_ = NewLRUCache(0); -int main(int argc, char** argv) { - return leveldb::test::RunAllTests(); + Insert(1, 100); + ASSERT_EQ(-1, Lookup(1)); } + +} // namespace leveldb + +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/util/coding.cc b/src/leveldb/util/coding.cc index 21e3186d5d..df3fa10f0d 100644 --- a/src/leveldb/util/coding.cc +++ b/src/leveldb/util/coding.cc @@ -6,32 +6,6 @@ namespace leveldb { -void EncodeFixed32(char* buf, uint32_t value) { - if (port::kLittleEndian) { - memcpy(buf, &value, sizeof(value)); - } else { - buf[0] = value & 0xff; - buf[1] = (value >> 8) & 0xff; - buf[2] = (value >> 16) & 0xff; - buf[3] = (value >> 24) & 0xff; - } -} - -void EncodeFixed64(char* buf, uint64_t value) { - if (port::kLittleEndian) { - memcpy(buf, &value, sizeof(value)); - } else { - buf[0] = value & 0xff; - buf[1] = (value >> 8) & 0xff; - buf[2] = (value >> 16) & 0xff; - buf[3] = (value >> 24) & 0xff; - buf[4] = (value >> 32) & 0xff; - buf[5] = (value >> 40) & 0xff; - buf[6] = (value >> 48) & 0xff; - buf[7] = (value >> 56) & 0xff; - } -} - void PutFixed32(std::string* dst, uint32_t value) { char buf[sizeof(value)]; EncodeFixed32(buf, value); @@ -46,28 +20,28 @@ void PutFixed64(std::string* dst, uint64_t value) { char* EncodeVarint32(char* dst, uint32_t v) { // Operate on characters as unsigneds - unsigned char* ptr = reinterpret_cast<unsigned char*>(dst); + uint8_t* ptr = reinterpret_cast<uint8_t*>(dst); static const int B = 128; - if (v < (1<<7)) { + if (v < (1 << 7)) { *(ptr++) = v; - } else if (v < (1<<14)) { + } else if (v < (1 << 14)) { *(ptr++) = v | B; - *(ptr++) = v>>7; - } else if (v < (1<<21)) { + *(ptr++) = v >> 7; + } else if (v < (1 << 21)) { *(ptr++) = v | B; - *(ptr++) = (v>>7) | B; - *(ptr++) = v>>14; - } else if (v < (1<<28)) { + *(ptr++) = (v >> 7) | B; + *(ptr++) = v >> 14; + } else if (v < (1 << 28)) { *(ptr++) = v | B; - *(ptr++) = (v>>7) | B; - *(ptr++) = (v>>14) | B; - *(ptr++) = v>>21; + *(ptr++) = (v >> 7) | B; + *(ptr++) = (v >> 14) | B; + *(ptr++) = v >> 21; } else { *(ptr++) = v | B; - *(ptr++) = (v>>7) | B; - *(ptr++) = (v>>14) | B; - *(ptr++) = (v>>21) | B; - *(ptr++) = v>>28; + *(ptr++) = (v >> 7) | B; + *(ptr++) = (v >> 14) | B; + *(ptr++) = (v >> 21) | B; + *(ptr++) = v >> 28; } return reinterpret_cast<char*>(ptr); } @@ -80,12 +54,12 @@ void PutVarint32(std::string* dst, uint32_t v) { char* EncodeVarint64(char* dst, uint64_t v) { static const int B = 128; - unsigned char* ptr = reinterpret_cast<unsigned char*>(dst); + uint8_t* ptr = reinterpret_cast<uint8_t*>(dst); while (v >= B) { - *(ptr++) = (v & (B-1)) | B; + *(ptr++) = v | B; v >>= 7; } - *(ptr++) = static_cast<unsigned char>(v); + *(ptr++) = static_cast<uint8_t>(v); return reinterpret_cast<char*>(ptr); } @@ -109,12 +83,11 @@ int VarintLength(uint64_t v) { return len; } -const char* GetVarint32PtrFallback(const char* p, - const char* limit, +const char* GetVarint32PtrFallback(const char* p, const char* limit, uint32_t* value) { uint32_t result = 0; for (uint32_t shift = 0; shift <= 28 && p < limit; shift += 7) { - uint32_t byte = *(reinterpret_cast<const unsigned char*>(p)); + uint32_t byte = *(reinterpret_cast<const uint8_t*>(p)); p++; if (byte & 128) { // More bytes are present @@ -125,14 +98,14 @@ const char* GetVarint32PtrFallback(const char* p, return reinterpret_cast<const char*>(p); } } - return NULL; + return nullptr; } bool GetVarint32(Slice* input, uint32_t* value) { const char* p = input->data(); const char* limit = p + input->size(); const char* q = GetVarint32Ptr(p, limit, value); - if (q == NULL) { + if (q == nullptr) { return false; } else { *input = Slice(q, limit - q); @@ -143,7 +116,7 @@ bool GetVarint32(Slice* input, uint32_t* value) { const char* GetVarint64Ptr(const char* p, const char* limit, uint64_t* value) { uint64_t result = 0; for (uint32_t shift = 0; shift <= 63 && p < limit; shift += 7) { - uint64_t byte = *(reinterpret_cast<const unsigned char*>(p)); + uint64_t byte = *(reinterpret_cast<const uint8_t*>(p)); p++; if (byte & 128) { // More bytes are present @@ -154,14 +127,14 @@ const char* GetVarint64Ptr(const char* p, const char* limit, uint64_t* value) { return reinterpret_cast<const char*>(p); } } - return NULL; + return nullptr; } bool GetVarint64(Slice* input, uint64_t* value) { const char* p = input->data(); const char* limit = p + input->size(); const char* q = GetVarint64Ptr(p, limit, value); - if (q == NULL) { + if (q == nullptr) { return false; } else { *input = Slice(q, limit - q); @@ -173,16 +146,15 @@ const char* GetLengthPrefixedSlice(const char* p, const char* limit, Slice* result) { uint32_t len; p = GetVarint32Ptr(p, limit, &len); - if (p == NULL) return NULL; - if (p + len > limit) return NULL; + if (p == nullptr) return nullptr; + if (p + len > limit) return nullptr; *result = Slice(p, len); return p + len; } bool GetLengthPrefixedSlice(Slice* input, Slice* result) { uint32_t len; - if (GetVarint32(input, &len) && - input->size() >= len) { + if (GetVarint32(input, &len) && input->size() >= len) { *result = Slice(input->data(), len); input->remove_prefix(len); return true; diff --git a/src/leveldb/util/coding.h b/src/leveldb/util/coding.h index 3993c4a755..1983ae7173 100644 --- a/src/leveldb/util/coding.h +++ b/src/leveldb/util/coding.h @@ -10,87 +10,147 @@ #ifndef STORAGE_LEVELDB_UTIL_CODING_H_ #define STORAGE_LEVELDB_UTIL_CODING_H_ -#include <stdint.h> -#include <string.h> +#include <cstdint> +#include <cstring> #include <string> + #include "leveldb/slice.h" #include "port/port.h" namespace leveldb { // Standard Put... routines append to a string -extern void PutFixed32(std::string* dst, uint32_t value); -extern void PutFixed64(std::string* dst, uint64_t value); -extern void PutVarint32(std::string* dst, uint32_t value); -extern void PutVarint64(std::string* dst, uint64_t value); -extern void PutLengthPrefixedSlice(std::string* dst, const Slice& value); +void PutFixed32(std::string* dst, uint32_t value); +void PutFixed64(std::string* dst, uint64_t value); +void PutVarint32(std::string* dst, uint32_t value); +void PutVarint64(std::string* dst, uint64_t value); +void PutLengthPrefixedSlice(std::string* dst, const Slice& value); // Standard Get... routines parse a value from the beginning of a Slice // and advance the slice past the parsed value. -extern bool GetVarint32(Slice* input, uint32_t* value); -extern bool GetVarint64(Slice* input, uint64_t* value); -extern bool GetLengthPrefixedSlice(Slice* input, Slice* result); +bool GetVarint32(Slice* input, uint32_t* value); +bool GetVarint64(Slice* input, uint64_t* value); +bool GetLengthPrefixedSlice(Slice* input, Slice* result); // Pointer-based variants of GetVarint... These either store a value // in *v and return a pointer just past the parsed value, or return -// NULL on error. These routines only look at bytes in the range +// nullptr on error. These routines only look at bytes in the range // [p..limit-1] -extern const char* GetVarint32Ptr(const char* p,const char* limit, uint32_t* v); -extern const char* GetVarint64Ptr(const char* p,const char* limit, uint64_t* v); +const char* GetVarint32Ptr(const char* p, const char* limit, uint32_t* v); +const char* GetVarint64Ptr(const char* p, const char* limit, uint64_t* v); // Returns the length of the varint32 or varint64 encoding of "v" -extern int VarintLength(uint64_t v); +int VarintLength(uint64_t v); // Lower-level versions of Put... that write directly into a character buffer +// and return a pointer just past the last byte written. // REQUIRES: dst has enough space for the value being written -extern void EncodeFixed32(char* dst, uint32_t value); -extern void EncodeFixed64(char* dst, uint64_t value); +char* EncodeVarint32(char* dst, uint32_t value); +char* EncodeVarint64(char* dst, uint64_t value); + +// TODO(costan): Remove port::kLittleEndian and the fast paths based on +// std::memcpy when clang learns to optimize the generic code, as +// described in https://bugs.llvm.org/show_bug.cgi?id=41761 +// +// The platform-independent code in DecodeFixed{32,64}() gets optimized to mov +// on x86 and ldr on ARM64, by both clang and gcc. However, only gcc optimizes +// the platform-independent code in EncodeFixed{32,64}() to mov / str. // Lower-level versions of Put... that write directly into a character buffer -// and return a pointer just past the last byte written. // REQUIRES: dst has enough space for the value being written -extern char* EncodeVarint32(char* dst, uint32_t value); -extern char* EncodeVarint64(char* dst, uint64_t value); + +inline void EncodeFixed32(char* dst, uint32_t value) { + uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst); + + if (port::kLittleEndian) { + // Fast path for little-endian CPUs. All major compilers optimize this to a + // single mov (x86_64) / str (ARM) instruction. + std::memcpy(buffer, &value, sizeof(uint32_t)); + return; + } + + // Platform-independent code. + // Currently, only gcc optimizes this to a single mov / str instruction. + buffer[0] = static_cast<uint8_t>(value); + buffer[1] = static_cast<uint8_t>(value >> 8); + buffer[2] = static_cast<uint8_t>(value >> 16); + buffer[3] = static_cast<uint8_t>(value >> 24); +} + +inline void EncodeFixed64(char* dst, uint64_t value) { + uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst); + + if (port::kLittleEndian) { + // Fast path for little-endian CPUs. All major compilers optimize this to a + // single mov (x86_64) / str (ARM) instruction. + std::memcpy(buffer, &value, sizeof(uint64_t)); + return; + } + + // Platform-independent code. + // Currently, only gcc optimizes this to a single mov / str instruction. + buffer[0] = static_cast<uint8_t>(value); + buffer[1] = static_cast<uint8_t>(value >> 8); + buffer[2] = static_cast<uint8_t>(value >> 16); + buffer[3] = static_cast<uint8_t>(value >> 24); + buffer[4] = static_cast<uint8_t>(value >> 32); + buffer[5] = static_cast<uint8_t>(value >> 40); + buffer[6] = static_cast<uint8_t>(value >> 48); + buffer[7] = static_cast<uint8_t>(value >> 56); +} // Lower-level versions of Get... that read directly from a character buffer // without any bounds checking. inline uint32_t DecodeFixed32(const char* ptr) { + const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr); + if (port::kLittleEndian) { - // Load the raw bytes + // Fast path for little-endian CPUs. All major compilers optimize this to a + // single mov (x86_64) / ldr (ARM) instruction. uint32_t result; - memcpy(&result, ptr, sizeof(result)); // gcc optimizes this to a plain load + std::memcpy(&result, buffer, sizeof(uint32_t)); return result; - } else { - return ((static_cast<uint32_t>(static_cast<unsigned char>(ptr[0]))) - | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[1])) << 8) - | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[2])) << 16) - | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[3])) << 24)); } + + // Platform-independent code. + // Clang and gcc optimize this to a single mov / ldr instruction. + return (static_cast<uint32_t>(buffer[0])) | + (static_cast<uint32_t>(buffer[1]) << 8) | + (static_cast<uint32_t>(buffer[2]) << 16) | + (static_cast<uint32_t>(buffer[3]) << 24); } inline uint64_t DecodeFixed64(const char* ptr) { + const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr); + if (port::kLittleEndian) { - // Load the raw bytes + // Fast path for little-endian CPUs. All major compilers optimize this to a + // single mov (x86_64) / ldr (ARM) instruction. uint64_t result; - memcpy(&result, ptr, sizeof(result)); // gcc optimizes this to a plain load + std::memcpy(&result, buffer, sizeof(uint64_t)); return result; - } else { - uint64_t lo = DecodeFixed32(ptr); - uint64_t hi = DecodeFixed32(ptr + 4); - return (hi << 32) | lo; } + + // Platform-independent code. + // Clang and gcc optimize this to a single mov / ldr instruction. + return (static_cast<uint64_t>(buffer[0])) | + (static_cast<uint64_t>(buffer[1]) << 8) | + (static_cast<uint64_t>(buffer[2]) << 16) | + (static_cast<uint64_t>(buffer[3]) << 24) | + (static_cast<uint64_t>(buffer[4]) << 32) | + (static_cast<uint64_t>(buffer[5]) << 40) | + (static_cast<uint64_t>(buffer[6]) << 48) | + (static_cast<uint64_t>(buffer[7]) << 56); } // Internal routine for use by fallback path of GetVarint32Ptr -extern const char* GetVarint32PtrFallback(const char* p, - const char* limit, - uint32_t* value); -inline const char* GetVarint32Ptr(const char* p, - const char* limit, +const char* GetVarint32PtrFallback(const char* p, const char* limit, + uint32_t* value); +inline const char* GetVarint32Ptr(const char* p, const char* limit, uint32_t* value) { if (p < limit) { - uint32_t result = *(reinterpret_cast<const unsigned char*>(p)); + uint32_t result = *(reinterpret_cast<const uint8_t*>(p)); if ((result & 128) == 0) { *value = result; return p + 1; diff --git a/src/leveldb/util/coding_test.cc b/src/leveldb/util/coding_test.cc index 521541ea61..0d2a0c51f6 100644 --- a/src/leveldb/util/coding_test.cc +++ b/src/leveldb/util/coding_test.cc @@ -2,13 +2,14 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#include "util/coding.h" +#include <vector> +#include "util/coding.h" #include "util/testharness.h" namespace leveldb { -class Coding { }; +class Coding {}; TEST(Coding, Fixed32) { std::string s; @@ -38,15 +39,15 @@ TEST(Coding, Fixed64) { uint64_t v = static_cast<uint64_t>(1) << power; uint64_t actual; actual = DecodeFixed64(p); - ASSERT_EQ(v-1, actual); + ASSERT_EQ(v - 1, actual); p += sizeof(uint64_t); actual = DecodeFixed64(p); - ASSERT_EQ(v+0, actual); + ASSERT_EQ(v + 0, actual); p += sizeof(uint64_t); actual = DecodeFixed64(p); - ASSERT_EQ(v+1, actual); + ASSERT_EQ(v + 1, actual); p += sizeof(uint64_t); } } @@ -88,7 +89,7 @@ TEST(Coding, Varint32) { uint32_t actual; const char* start = p; p = GetVarint32Ptr(p, limit, &actual); - ASSERT_TRUE(p != NULL); + ASSERT_TRUE(p != nullptr); ASSERT_EQ(expected, actual); ASSERT_EQ(VarintLength(actual), p - start); } @@ -107,8 +108,8 @@ TEST(Coding, Varint64) { // Test values near powers of two const uint64_t power = 1ull << k; values.push_back(power); - values.push_back(power-1); - values.push_back(power+1); + values.push_back(power - 1); + values.push_back(power + 1); } std::string s; @@ -123,19 +124,18 @@ TEST(Coding, Varint64) { uint64_t actual; const char* start = p; p = GetVarint64Ptr(p, limit, &actual); - ASSERT_TRUE(p != NULL); + ASSERT_TRUE(p != nullptr); ASSERT_EQ(values[i], actual); ASSERT_EQ(VarintLength(actual), p - start); } ASSERT_EQ(p, limit); - } TEST(Coding, Varint32Overflow) { uint32_t result; std::string input("\x81\x82\x83\x84\x85\x11"); - ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(), &result) - == NULL); + ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(), + &result) == nullptr); } TEST(Coding, Varint32Truncation) { @@ -144,17 +144,18 @@ TEST(Coding, Varint32Truncation) { PutVarint32(&s, large_value); uint32_t result; for (size_t len = 0; len < s.size() - 1; len++) { - ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == NULL); + ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == nullptr); } - ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + s.size(), &result) != NULL); + ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + s.size(), &result) != + nullptr); ASSERT_EQ(large_value, result); } TEST(Coding, Varint64Overflow) { uint64_t result; std::string input("\x81\x82\x83\x84\x85\x81\x82\x83\x84\x85\x11"); - ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(), &result) - == NULL); + ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(), + &result) == nullptr); } TEST(Coding, Varint64Truncation) { @@ -163,9 +164,10 @@ TEST(Coding, Varint64Truncation) { PutVarint64(&s, large_value); uint64_t result; for (size_t len = 0; len < s.size() - 1; len++) { - ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == NULL); + ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == nullptr); } - ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) != NULL); + ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) != + nullptr); ASSERT_EQ(large_value, result); } @@ -191,6 +193,4 @@ TEST(Coding, Strings) { } // namespace leveldb -int main(int argc, char** argv) { - return leveldb::test::RunAllTests(); -} +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/util/comparator.cc b/src/leveldb/util/comparator.cc index 4b7b5724ef..c5766e9462 100644 --- a/src/leveldb/util/comparator.cc +++ b/src/leveldb/util/comparator.cc @@ -2,33 +2,34 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#include <algorithm> -#include <stdint.h> #include "leveldb/comparator.h" + +#include <algorithm> +#include <cstdint> +#include <string> +#include <type_traits> + #include "leveldb/slice.h" -#include "port/port.h" #include "util/logging.h" +#include "util/no_destructor.h" namespace leveldb { -Comparator::~Comparator() { } +Comparator::~Comparator() = default; namespace { class BytewiseComparatorImpl : public Comparator { public: - BytewiseComparatorImpl() { } + BytewiseComparatorImpl() = default; - virtual const char* Name() const { - return "leveldb.BytewiseComparator"; - } + const char* Name() const override { return "leveldb.BytewiseComparator"; } - virtual int Compare(const Slice& a, const Slice& b) const { + int Compare(const Slice& a, const Slice& b) const override { return a.compare(b); } - virtual void FindShortestSeparator( - std::string* start, - const Slice& limit) const { + void FindShortestSeparator(std::string* start, + const Slice& limit) const override { // Find length of common prefix size_t min_length = std::min(start->size(), limit.size()); size_t diff_index = 0; @@ -50,14 +51,14 @@ class BytewiseComparatorImpl : public Comparator { } } - virtual void FindShortSuccessor(std::string* key) const { + void FindShortSuccessor(std::string* key) const override { // Find first character that can be incremented size_t n = key->size(); for (size_t i = 0; i < n; i++) { const uint8_t byte = (*key)[i]; if (byte != static_cast<uint8_t>(0xff)) { (*key)[i] = byte + 1; - key->resize(i+1); + key->resize(i + 1); return; } } @@ -66,16 +67,9 @@ class BytewiseComparatorImpl : public Comparator { }; } // namespace -static port::OnceType once = LEVELDB_ONCE_INIT; -static const Comparator* bytewise; - -static void InitModule() { - bytewise = new BytewiseComparatorImpl; -} - const Comparator* BytewiseComparator() { - port::InitOnce(&once, InitModule); - return bytewise; + static NoDestructor<BytewiseComparatorImpl> singleton; + return singleton.get(); } } // namespace leveldb diff --git a/src/leveldb/util/crc32c.cc b/src/leveldb/util/crc32c.cc index b3f40eeeed..c2e61f7dba 100644 --- a/src/leveldb/util/crc32c.cc +++ b/src/leveldb/util/crc32c.cc @@ -2,11 +2,11 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // -// A portable implementation of crc32c, optimized to handle -// four bytes at a time. +// A portable implementation of crc32c. #include "util/crc32c.h" +#include <stddef.h> #include <stdint.h> #include "port/port.h" @@ -15,283 +15,256 @@ namespace leveldb { namespace crc32c { -static const uint32_t table0_[256] = { - 0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, - 0xc79a971f, 0x35f1141c, 0x26a1e7e8, 0xd4ca64eb, - 0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b, - 0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24, - 0x105ec76f, 0xe235446c, 0xf165b798, 0x030e349b, - 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384, - 0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54, - 0x5d1d08bf, 0xaf768bbc, 0xbc267848, 0x4e4dfb4b, - 0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a, - 0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35, - 0xaa64d611, 0x580f5512, 0x4b5fa6e6, 0xb93425e5, - 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa, - 0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45, - 0xf779deae, 0x05125dad, 0x1642ae59, 0xe4292d5a, - 0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a, - 0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595, - 0x417b1dbc, 0xb3109ebf, 0xa0406d4b, 0x522bee48, - 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957, - 0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687, - 0x0c38d26c, 0xfe53516f, 0xed03a29b, 0x1f682198, - 0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927, - 0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38, - 0xdbfc821c, 0x2997011f, 0x3ac7f2eb, 0xc8ac71e8, - 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7, - 0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096, - 0xa65c047d, 0x5437877e, 0x4767748a, 0xb50cf789, - 0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859, - 0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46, - 0x7198540d, 0x83f3d70e, 0x90a324fa, 0x62c8a7f9, - 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6, - 0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36, - 0x3cdb9bdd, 0xceb018de, 0xdde0eb2a, 0x2f8b6829, - 0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c, - 0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93, - 0x082f63b7, 0xfa44e0b4, 0xe9141340, 0x1b7f9043, - 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c, - 0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3, - 0x55326b08, 0xa759e80b, 0xb4091bff, 0x466298fc, - 0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c, - 0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033, - 0xa24bb5a6, 0x502036a5, 0x4370c551, 0xb11b4652, - 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d, - 0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d, - 0xef087a76, 0x1d63f975, 0x0e330a81, 0xfc588982, - 0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d, - 0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622, - 0x38cc2a06, 0xcaa7a905, 0xd9f75af1, 0x2b9cd9f2, - 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed, - 0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530, - 0x0417b1db, 0xf67c32d8, 0xe52cc12c, 0x1747422f, - 0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff, - 0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0, - 0xd3d3e1ab, 0x21b862a8, 0x32e8915c, 0xc083125f, - 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540, - 0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90, - 0x9e902e7b, 0x6cfbad78, 0x7fab5e8c, 0x8dc0dd8f, - 0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee, - 0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1, - 0x69e9f0d5, 0x9b8273d6, 0x88d28022, 0x7ab90321, - 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e, - 0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81, - 0x34f4f86a, 0xc69f7b69, 0xd5cf889d, 0x27a40b9e, - 0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e, - 0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351 -}; -static const uint32_t table1_[256] = { - 0x00000000, 0x13a29877, 0x274530ee, 0x34e7a899, - 0x4e8a61dc, 0x5d28f9ab, 0x69cf5132, 0x7a6dc945, - 0x9d14c3b8, 0x8eb65bcf, 0xba51f356, 0xa9f36b21, - 0xd39ea264, 0xc03c3a13, 0xf4db928a, 0xe7790afd, - 0x3fc5f181, 0x2c6769f6, 0x1880c16f, 0x0b225918, - 0x714f905d, 0x62ed082a, 0x560aa0b3, 0x45a838c4, - 0xa2d13239, 0xb173aa4e, 0x859402d7, 0x96369aa0, - 0xec5b53e5, 0xfff9cb92, 0xcb1e630b, 0xd8bcfb7c, - 0x7f8be302, 0x6c297b75, 0x58ced3ec, 0x4b6c4b9b, - 0x310182de, 0x22a31aa9, 0x1644b230, 0x05e62a47, - 0xe29f20ba, 0xf13db8cd, 0xc5da1054, 0xd6788823, - 0xac154166, 0xbfb7d911, 0x8b507188, 0x98f2e9ff, - 0x404e1283, 0x53ec8af4, 0x670b226d, 0x74a9ba1a, - 0x0ec4735f, 0x1d66eb28, 0x298143b1, 0x3a23dbc6, - 0xdd5ad13b, 0xcef8494c, 0xfa1fe1d5, 0xe9bd79a2, - 0x93d0b0e7, 0x80722890, 0xb4958009, 0xa737187e, - 0xff17c604, 0xecb55e73, 0xd852f6ea, 0xcbf06e9d, - 0xb19da7d8, 0xa23f3faf, 0x96d89736, 0x857a0f41, - 0x620305bc, 0x71a19dcb, 0x45463552, 0x56e4ad25, - 0x2c896460, 0x3f2bfc17, 0x0bcc548e, 0x186eccf9, - 0xc0d23785, 0xd370aff2, 0xe797076b, 0xf4359f1c, - 0x8e585659, 0x9dface2e, 0xa91d66b7, 0xbabffec0, - 0x5dc6f43d, 0x4e646c4a, 0x7a83c4d3, 0x69215ca4, - 0x134c95e1, 0x00ee0d96, 0x3409a50f, 0x27ab3d78, - 0x809c2506, 0x933ebd71, 0xa7d915e8, 0xb47b8d9f, - 0xce1644da, 0xddb4dcad, 0xe9537434, 0xfaf1ec43, - 0x1d88e6be, 0x0e2a7ec9, 0x3acdd650, 0x296f4e27, - 0x53028762, 0x40a01f15, 0x7447b78c, 0x67e52ffb, - 0xbf59d487, 0xacfb4cf0, 0x981ce469, 0x8bbe7c1e, - 0xf1d3b55b, 0xe2712d2c, 0xd69685b5, 0xc5341dc2, - 0x224d173f, 0x31ef8f48, 0x050827d1, 0x16aabfa6, - 0x6cc776e3, 0x7f65ee94, 0x4b82460d, 0x5820de7a, - 0xfbc3faf9, 0xe861628e, 0xdc86ca17, 0xcf245260, - 0xb5499b25, 0xa6eb0352, 0x920cabcb, 0x81ae33bc, - 0x66d73941, 0x7575a136, 0x419209af, 0x523091d8, - 0x285d589d, 0x3bffc0ea, 0x0f186873, 0x1cbaf004, - 0xc4060b78, 0xd7a4930f, 0xe3433b96, 0xf0e1a3e1, - 0x8a8c6aa4, 0x992ef2d3, 0xadc95a4a, 0xbe6bc23d, - 0x5912c8c0, 0x4ab050b7, 0x7e57f82e, 0x6df56059, - 0x1798a91c, 0x043a316b, 0x30dd99f2, 0x237f0185, - 0x844819fb, 0x97ea818c, 0xa30d2915, 0xb0afb162, - 0xcac27827, 0xd960e050, 0xed8748c9, 0xfe25d0be, - 0x195cda43, 0x0afe4234, 0x3e19eaad, 0x2dbb72da, - 0x57d6bb9f, 0x447423e8, 0x70938b71, 0x63311306, - 0xbb8de87a, 0xa82f700d, 0x9cc8d894, 0x8f6a40e3, - 0xf50789a6, 0xe6a511d1, 0xd242b948, 0xc1e0213f, - 0x26992bc2, 0x353bb3b5, 0x01dc1b2c, 0x127e835b, - 0x68134a1e, 0x7bb1d269, 0x4f567af0, 0x5cf4e287, - 0x04d43cfd, 0x1776a48a, 0x23910c13, 0x30339464, - 0x4a5e5d21, 0x59fcc556, 0x6d1b6dcf, 0x7eb9f5b8, - 0x99c0ff45, 0x8a626732, 0xbe85cfab, 0xad2757dc, - 0xd74a9e99, 0xc4e806ee, 0xf00fae77, 0xe3ad3600, - 0x3b11cd7c, 0x28b3550b, 0x1c54fd92, 0x0ff665e5, - 0x759baca0, 0x663934d7, 0x52de9c4e, 0x417c0439, - 0xa6050ec4, 0xb5a796b3, 0x81403e2a, 0x92e2a65d, - 0xe88f6f18, 0xfb2df76f, 0xcfca5ff6, 0xdc68c781, - 0x7b5fdfff, 0x68fd4788, 0x5c1aef11, 0x4fb87766, - 0x35d5be23, 0x26772654, 0x12908ecd, 0x013216ba, - 0xe64b1c47, 0xf5e98430, 0xc10e2ca9, 0xd2acb4de, - 0xa8c17d9b, 0xbb63e5ec, 0x8f844d75, 0x9c26d502, - 0x449a2e7e, 0x5738b609, 0x63df1e90, 0x707d86e7, - 0x0a104fa2, 0x19b2d7d5, 0x2d557f4c, 0x3ef7e73b, - 0xd98eedc6, 0xca2c75b1, 0xfecbdd28, 0xed69455f, - 0x97048c1a, 0x84a6146d, 0xb041bcf4, 0xa3e32483 -}; -static const uint32_t table2_[256] = { - 0x00000000, 0xa541927e, 0x4f6f520d, 0xea2ec073, - 0x9edea41a, 0x3b9f3664, 0xd1b1f617, 0x74f06469, - 0x38513ec5, 0x9d10acbb, 0x773e6cc8, 0xd27ffeb6, - 0xa68f9adf, 0x03ce08a1, 0xe9e0c8d2, 0x4ca15aac, - 0x70a27d8a, 0xd5e3eff4, 0x3fcd2f87, 0x9a8cbdf9, - 0xee7cd990, 0x4b3d4bee, 0xa1138b9d, 0x045219e3, - 0x48f3434f, 0xedb2d131, 0x079c1142, 0xa2dd833c, - 0xd62de755, 0x736c752b, 0x9942b558, 0x3c032726, - 0xe144fb14, 0x4405696a, 0xae2ba919, 0x0b6a3b67, - 0x7f9a5f0e, 0xdadbcd70, 0x30f50d03, 0x95b49f7d, - 0xd915c5d1, 0x7c5457af, 0x967a97dc, 0x333b05a2, - 0x47cb61cb, 0xe28af3b5, 0x08a433c6, 0xade5a1b8, - 0x91e6869e, 0x34a714e0, 0xde89d493, 0x7bc846ed, - 0x0f382284, 0xaa79b0fa, 0x40577089, 0xe516e2f7, - 0xa9b7b85b, 0x0cf62a25, 0xe6d8ea56, 0x43997828, - 0x37691c41, 0x92288e3f, 0x78064e4c, 0xdd47dc32, - 0xc76580d9, 0x622412a7, 0x880ad2d4, 0x2d4b40aa, - 0x59bb24c3, 0xfcfab6bd, 0x16d476ce, 0xb395e4b0, - 0xff34be1c, 0x5a752c62, 0xb05bec11, 0x151a7e6f, - 0x61ea1a06, 0xc4ab8878, 0x2e85480b, 0x8bc4da75, - 0xb7c7fd53, 0x12866f2d, 0xf8a8af5e, 0x5de93d20, - 0x29195949, 0x8c58cb37, 0x66760b44, 0xc337993a, - 0x8f96c396, 0x2ad751e8, 0xc0f9919b, 0x65b803e5, - 0x1148678c, 0xb409f5f2, 0x5e273581, 0xfb66a7ff, - 0x26217bcd, 0x8360e9b3, 0x694e29c0, 0xcc0fbbbe, - 0xb8ffdfd7, 0x1dbe4da9, 0xf7908dda, 0x52d11fa4, - 0x1e704508, 0xbb31d776, 0x511f1705, 0xf45e857b, - 0x80aee112, 0x25ef736c, 0xcfc1b31f, 0x6a802161, - 0x56830647, 0xf3c29439, 0x19ec544a, 0xbcadc634, - 0xc85da25d, 0x6d1c3023, 0x8732f050, 0x2273622e, - 0x6ed23882, 0xcb93aafc, 0x21bd6a8f, 0x84fcf8f1, - 0xf00c9c98, 0x554d0ee6, 0xbf63ce95, 0x1a225ceb, - 0x8b277743, 0x2e66e53d, 0xc448254e, 0x6109b730, - 0x15f9d359, 0xb0b84127, 0x5a968154, 0xffd7132a, - 0xb3764986, 0x1637dbf8, 0xfc191b8b, 0x595889f5, - 0x2da8ed9c, 0x88e97fe2, 0x62c7bf91, 0xc7862def, - 0xfb850ac9, 0x5ec498b7, 0xb4ea58c4, 0x11abcaba, - 0x655baed3, 0xc01a3cad, 0x2a34fcde, 0x8f756ea0, - 0xc3d4340c, 0x6695a672, 0x8cbb6601, 0x29faf47f, - 0x5d0a9016, 0xf84b0268, 0x1265c21b, 0xb7245065, - 0x6a638c57, 0xcf221e29, 0x250cde5a, 0x804d4c24, - 0xf4bd284d, 0x51fcba33, 0xbbd27a40, 0x1e93e83e, - 0x5232b292, 0xf77320ec, 0x1d5de09f, 0xb81c72e1, - 0xccec1688, 0x69ad84f6, 0x83834485, 0x26c2d6fb, - 0x1ac1f1dd, 0xbf8063a3, 0x55aea3d0, 0xf0ef31ae, - 0x841f55c7, 0x215ec7b9, 0xcb7007ca, 0x6e3195b4, - 0x2290cf18, 0x87d15d66, 0x6dff9d15, 0xc8be0f6b, - 0xbc4e6b02, 0x190ff97c, 0xf321390f, 0x5660ab71, - 0x4c42f79a, 0xe90365e4, 0x032da597, 0xa66c37e9, - 0xd29c5380, 0x77ddc1fe, 0x9df3018d, 0x38b293f3, - 0x7413c95f, 0xd1525b21, 0x3b7c9b52, 0x9e3d092c, - 0xeacd6d45, 0x4f8cff3b, 0xa5a23f48, 0x00e3ad36, - 0x3ce08a10, 0x99a1186e, 0x738fd81d, 0xd6ce4a63, - 0xa23e2e0a, 0x077fbc74, 0xed517c07, 0x4810ee79, - 0x04b1b4d5, 0xa1f026ab, 0x4bdee6d8, 0xee9f74a6, - 0x9a6f10cf, 0x3f2e82b1, 0xd50042c2, 0x7041d0bc, - 0xad060c8e, 0x08479ef0, 0xe2695e83, 0x4728ccfd, - 0x33d8a894, 0x96993aea, 0x7cb7fa99, 0xd9f668e7, - 0x9557324b, 0x3016a035, 0xda386046, 0x7f79f238, - 0x0b899651, 0xaec8042f, 0x44e6c45c, 0xe1a75622, - 0xdda47104, 0x78e5e37a, 0x92cb2309, 0x378ab177, - 0x437ad51e, 0xe63b4760, 0x0c158713, 0xa954156d, - 0xe5f54fc1, 0x40b4ddbf, 0xaa9a1dcc, 0x0fdb8fb2, - 0x7b2bebdb, 0xde6a79a5, 0x3444b9d6, 0x91052ba8 -}; -static const uint32_t table3_[256] = { - 0x00000000, 0xdd45aab8, 0xbf672381, 0x62228939, - 0x7b2231f3, 0xa6679b4b, 0xc4451272, 0x1900b8ca, - 0xf64463e6, 0x2b01c95e, 0x49234067, 0x9466eadf, - 0x8d665215, 0x5023f8ad, 0x32017194, 0xef44db2c, - 0xe964b13d, 0x34211b85, 0x560392bc, 0x8b463804, - 0x924680ce, 0x4f032a76, 0x2d21a34f, 0xf06409f7, - 0x1f20d2db, 0xc2657863, 0xa047f15a, 0x7d025be2, - 0x6402e328, 0xb9474990, 0xdb65c0a9, 0x06206a11, - 0xd725148b, 0x0a60be33, 0x6842370a, 0xb5079db2, - 0xac072578, 0x71428fc0, 0x136006f9, 0xce25ac41, - 0x2161776d, 0xfc24ddd5, 0x9e0654ec, 0x4343fe54, - 0x5a43469e, 0x8706ec26, 0xe524651f, 0x3861cfa7, - 0x3e41a5b6, 0xe3040f0e, 0x81268637, 0x5c632c8f, - 0x45639445, 0x98263efd, 0xfa04b7c4, 0x27411d7c, - 0xc805c650, 0x15406ce8, 0x7762e5d1, 0xaa274f69, - 0xb327f7a3, 0x6e625d1b, 0x0c40d422, 0xd1057e9a, - 0xaba65fe7, 0x76e3f55f, 0x14c17c66, 0xc984d6de, - 0xd0846e14, 0x0dc1c4ac, 0x6fe34d95, 0xb2a6e72d, - 0x5de23c01, 0x80a796b9, 0xe2851f80, 0x3fc0b538, - 0x26c00df2, 0xfb85a74a, 0x99a72e73, 0x44e284cb, - 0x42c2eeda, 0x9f874462, 0xfda5cd5b, 0x20e067e3, - 0x39e0df29, 0xe4a57591, 0x8687fca8, 0x5bc25610, - 0xb4868d3c, 0x69c32784, 0x0be1aebd, 0xd6a40405, - 0xcfa4bccf, 0x12e11677, 0x70c39f4e, 0xad8635f6, - 0x7c834b6c, 0xa1c6e1d4, 0xc3e468ed, 0x1ea1c255, - 0x07a17a9f, 0xdae4d027, 0xb8c6591e, 0x6583f3a6, - 0x8ac7288a, 0x57828232, 0x35a00b0b, 0xe8e5a1b3, - 0xf1e51979, 0x2ca0b3c1, 0x4e823af8, 0x93c79040, - 0x95e7fa51, 0x48a250e9, 0x2a80d9d0, 0xf7c57368, - 0xeec5cba2, 0x3380611a, 0x51a2e823, 0x8ce7429b, - 0x63a399b7, 0xbee6330f, 0xdcc4ba36, 0x0181108e, - 0x1881a844, 0xc5c402fc, 0xa7e68bc5, 0x7aa3217d, - 0x52a0c93f, 0x8fe56387, 0xedc7eabe, 0x30824006, - 0x2982f8cc, 0xf4c75274, 0x96e5db4d, 0x4ba071f5, - 0xa4e4aad9, 0x79a10061, 0x1b838958, 0xc6c623e0, - 0xdfc69b2a, 0x02833192, 0x60a1b8ab, 0xbde41213, - 0xbbc47802, 0x6681d2ba, 0x04a35b83, 0xd9e6f13b, - 0xc0e649f1, 0x1da3e349, 0x7f816a70, 0xa2c4c0c8, - 0x4d801be4, 0x90c5b15c, 0xf2e73865, 0x2fa292dd, - 0x36a22a17, 0xebe780af, 0x89c50996, 0x5480a32e, - 0x8585ddb4, 0x58c0770c, 0x3ae2fe35, 0xe7a7548d, - 0xfea7ec47, 0x23e246ff, 0x41c0cfc6, 0x9c85657e, - 0x73c1be52, 0xae8414ea, 0xcca69dd3, 0x11e3376b, - 0x08e38fa1, 0xd5a62519, 0xb784ac20, 0x6ac10698, - 0x6ce16c89, 0xb1a4c631, 0xd3864f08, 0x0ec3e5b0, - 0x17c35d7a, 0xca86f7c2, 0xa8a47efb, 0x75e1d443, - 0x9aa50f6f, 0x47e0a5d7, 0x25c22cee, 0xf8878656, - 0xe1873e9c, 0x3cc29424, 0x5ee01d1d, 0x83a5b7a5, - 0xf90696d8, 0x24433c60, 0x4661b559, 0x9b241fe1, - 0x8224a72b, 0x5f610d93, 0x3d4384aa, 0xe0062e12, - 0x0f42f53e, 0xd2075f86, 0xb025d6bf, 0x6d607c07, - 0x7460c4cd, 0xa9256e75, 0xcb07e74c, 0x16424df4, - 0x106227e5, 0xcd278d5d, 0xaf050464, 0x7240aedc, - 0x6b401616, 0xb605bcae, 0xd4273597, 0x09629f2f, - 0xe6264403, 0x3b63eebb, 0x59416782, 0x8404cd3a, - 0x9d0475f0, 0x4041df48, 0x22635671, 0xff26fcc9, - 0x2e238253, 0xf36628eb, 0x9144a1d2, 0x4c010b6a, - 0x5501b3a0, 0x88441918, 0xea669021, 0x37233a99, - 0xd867e1b5, 0x05224b0d, 0x6700c234, 0xba45688c, - 0xa345d046, 0x7e007afe, 0x1c22f3c7, 0xc167597f, - 0xc747336e, 0x1a0299d6, 0x782010ef, 0xa565ba57, - 0xbc65029d, 0x6120a825, 0x0302211c, 0xde478ba4, - 0x31035088, 0xec46fa30, 0x8e647309, 0x5321d9b1, - 0x4a21617b, 0x9764cbc3, 0xf54642fa, 0x2803e842 -}; +namespace { -// Used to fetch a naturally-aligned 32-bit word in little endian byte-order -static inline uint32_t LE_LOAD32(const uint8_t *p) { - return DecodeFixed32(reinterpret_cast<const char*>(p)); +const uint32_t kByteExtensionTable[256] = { + 0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, 0xc79a971f, 0x35f1141c, + 0x26a1e7e8, 0xd4ca64eb, 0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b, + 0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24, 0x105ec76f, 0xe235446c, + 0xf165b798, 0x030e349b, 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384, + 0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54, 0x5d1d08bf, 0xaf768bbc, + 0xbc267848, 0x4e4dfb4b, 0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a, + 0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35, 0xaa64d611, 0x580f5512, + 0x4b5fa6e6, 0xb93425e5, 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa, + 0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45, 0xf779deae, 0x05125dad, + 0x1642ae59, 0xe4292d5a, 0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a, + 0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595, 0x417b1dbc, 0xb3109ebf, + 0xa0406d4b, 0x522bee48, 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957, + 0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687, 0x0c38d26c, 0xfe53516f, + 0xed03a29b, 0x1f682198, 0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927, + 0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38, 0xdbfc821c, 0x2997011f, + 0x3ac7f2eb, 0xc8ac71e8, 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7, + 0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096, 0xa65c047d, 0x5437877e, + 0x4767748a, 0xb50cf789, 0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859, + 0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46, 0x7198540d, 0x83f3d70e, + 0x90a324fa, 0x62c8a7f9, 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6, + 0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36, 0x3cdb9bdd, 0xceb018de, + 0xdde0eb2a, 0x2f8b6829, 0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c, + 0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93, 0x082f63b7, 0xfa44e0b4, + 0xe9141340, 0x1b7f9043, 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c, + 0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3, 0x55326b08, 0xa759e80b, + 0xb4091bff, 0x466298fc, 0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c, + 0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033, 0xa24bb5a6, 0x502036a5, + 0x4370c551, 0xb11b4652, 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d, + 0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d, 0xef087a76, 0x1d63f975, + 0x0e330a81, 0xfc588982, 0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d, + 0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622, 0x38cc2a06, 0xcaa7a905, + 0xd9f75af1, 0x2b9cd9f2, 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed, + 0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530, 0x0417b1db, 0xf67c32d8, + 0xe52cc12c, 0x1747422f, 0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff, + 0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0, 0xd3d3e1ab, 0x21b862a8, + 0x32e8915c, 0xc083125f, 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540, + 0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90, 0x9e902e7b, 0x6cfbad78, + 0x7fab5e8c, 0x8dc0dd8f, 0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee, + 0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1, 0x69e9f0d5, 0x9b8273d6, + 0x88d28022, 0x7ab90321, 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e, + 0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81, 0x34f4f86a, 0xc69f7b69, + 0xd5cf889d, 0x27a40b9e, 0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e, + 0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351}; + +const uint32_t kStrideExtensionTable0[256] = { + 0x00000000, 0x30d23865, 0x61a470ca, 0x517648af, 0xc348e194, 0xf39ad9f1, + 0xa2ec915e, 0x923ea93b, 0x837db5d9, 0xb3af8dbc, 0xe2d9c513, 0xd20bfd76, + 0x4035544d, 0x70e76c28, 0x21912487, 0x11431ce2, 0x03171d43, 0x33c52526, + 0x62b36d89, 0x526155ec, 0xc05ffcd7, 0xf08dc4b2, 0xa1fb8c1d, 0x9129b478, + 0x806aa89a, 0xb0b890ff, 0xe1ced850, 0xd11ce035, 0x4322490e, 0x73f0716b, + 0x228639c4, 0x125401a1, 0x062e3a86, 0x36fc02e3, 0x678a4a4c, 0x57587229, + 0xc566db12, 0xf5b4e377, 0xa4c2abd8, 0x941093bd, 0x85538f5f, 0xb581b73a, + 0xe4f7ff95, 0xd425c7f0, 0x461b6ecb, 0x76c956ae, 0x27bf1e01, 0x176d2664, + 0x053927c5, 0x35eb1fa0, 0x649d570f, 0x544f6f6a, 0xc671c651, 0xf6a3fe34, + 0xa7d5b69b, 0x97078efe, 0x8644921c, 0xb696aa79, 0xe7e0e2d6, 0xd732dab3, + 0x450c7388, 0x75de4bed, 0x24a80342, 0x147a3b27, 0x0c5c750c, 0x3c8e4d69, + 0x6df805c6, 0x5d2a3da3, 0xcf149498, 0xffc6acfd, 0xaeb0e452, 0x9e62dc37, + 0x8f21c0d5, 0xbff3f8b0, 0xee85b01f, 0xde57887a, 0x4c692141, 0x7cbb1924, + 0x2dcd518b, 0x1d1f69ee, 0x0f4b684f, 0x3f99502a, 0x6eef1885, 0x5e3d20e0, + 0xcc0389db, 0xfcd1b1be, 0xada7f911, 0x9d75c174, 0x8c36dd96, 0xbce4e5f3, + 0xed92ad5c, 0xdd409539, 0x4f7e3c02, 0x7fac0467, 0x2eda4cc8, 0x1e0874ad, + 0x0a724f8a, 0x3aa077ef, 0x6bd63f40, 0x5b040725, 0xc93aae1e, 0xf9e8967b, + 0xa89eded4, 0x984ce6b1, 0x890ffa53, 0xb9ddc236, 0xe8ab8a99, 0xd879b2fc, + 0x4a471bc7, 0x7a9523a2, 0x2be36b0d, 0x1b315368, 0x096552c9, 0x39b76aac, + 0x68c12203, 0x58131a66, 0xca2db35d, 0xfaff8b38, 0xab89c397, 0x9b5bfbf2, + 0x8a18e710, 0xbacadf75, 0xebbc97da, 0xdb6eafbf, 0x49500684, 0x79823ee1, + 0x28f4764e, 0x18264e2b, 0x18b8ea18, 0x286ad27d, 0x791c9ad2, 0x49cea2b7, + 0xdbf00b8c, 0xeb2233e9, 0xba547b46, 0x8a864323, 0x9bc55fc1, 0xab1767a4, + 0xfa612f0b, 0xcab3176e, 0x588dbe55, 0x685f8630, 0x3929ce9f, 0x09fbf6fa, + 0x1baff75b, 0x2b7dcf3e, 0x7a0b8791, 0x4ad9bff4, 0xd8e716cf, 0xe8352eaa, + 0xb9436605, 0x89915e60, 0x98d24282, 0xa8007ae7, 0xf9763248, 0xc9a40a2d, + 0x5b9aa316, 0x6b489b73, 0x3a3ed3dc, 0x0aecebb9, 0x1e96d09e, 0x2e44e8fb, + 0x7f32a054, 0x4fe09831, 0xddde310a, 0xed0c096f, 0xbc7a41c0, 0x8ca879a5, + 0x9deb6547, 0xad395d22, 0xfc4f158d, 0xcc9d2de8, 0x5ea384d3, 0x6e71bcb6, + 0x3f07f419, 0x0fd5cc7c, 0x1d81cddd, 0x2d53f5b8, 0x7c25bd17, 0x4cf78572, + 0xdec92c49, 0xee1b142c, 0xbf6d5c83, 0x8fbf64e6, 0x9efc7804, 0xae2e4061, + 0xff5808ce, 0xcf8a30ab, 0x5db49990, 0x6d66a1f5, 0x3c10e95a, 0x0cc2d13f, + 0x14e49f14, 0x2436a771, 0x7540efde, 0x4592d7bb, 0xd7ac7e80, 0xe77e46e5, + 0xb6080e4a, 0x86da362f, 0x97992acd, 0xa74b12a8, 0xf63d5a07, 0xc6ef6262, + 0x54d1cb59, 0x6403f33c, 0x3575bb93, 0x05a783f6, 0x17f38257, 0x2721ba32, + 0x7657f29d, 0x4685caf8, 0xd4bb63c3, 0xe4695ba6, 0xb51f1309, 0x85cd2b6c, + 0x948e378e, 0xa45c0feb, 0xf52a4744, 0xc5f87f21, 0x57c6d61a, 0x6714ee7f, + 0x3662a6d0, 0x06b09eb5, 0x12caa592, 0x22189df7, 0x736ed558, 0x43bced3d, + 0xd1824406, 0xe1507c63, 0xb02634cc, 0x80f40ca9, 0x91b7104b, 0xa165282e, + 0xf0136081, 0xc0c158e4, 0x52fff1df, 0x622dc9ba, 0x335b8115, 0x0389b970, + 0x11ddb8d1, 0x210f80b4, 0x7079c81b, 0x40abf07e, 0xd2955945, 0xe2476120, + 0xb331298f, 0x83e311ea, 0x92a00d08, 0xa272356d, 0xf3047dc2, 0xc3d645a7, + 0x51e8ec9c, 0x613ad4f9, 0x304c9c56, 0x009ea433}; + +const uint32_t kStrideExtensionTable1[256] = { + 0x00000000, 0x54075546, 0xa80eaa8c, 0xfc09ffca, 0x55f123e9, 0x01f676af, + 0xfdff8965, 0xa9f8dc23, 0xabe247d2, 0xffe51294, 0x03eced5e, 0x57ebb818, + 0xfe13643b, 0xaa14317d, 0x561dceb7, 0x021a9bf1, 0x5228f955, 0x062fac13, + 0xfa2653d9, 0xae21069f, 0x07d9dabc, 0x53de8ffa, 0xafd77030, 0xfbd02576, + 0xf9cabe87, 0xadcdebc1, 0x51c4140b, 0x05c3414d, 0xac3b9d6e, 0xf83cc828, + 0x043537e2, 0x503262a4, 0xa451f2aa, 0xf056a7ec, 0x0c5f5826, 0x58580d60, + 0xf1a0d143, 0xa5a78405, 0x59ae7bcf, 0x0da92e89, 0x0fb3b578, 0x5bb4e03e, + 0xa7bd1ff4, 0xf3ba4ab2, 0x5a429691, 0x0e45c3d7, 0xf24c3c1d, 0xa64b695b, + 0xf6790bff, 0xa27e5eb9, 0x5e77a173, 0x0a70f435, 0xa3882816, 0xf78f7d50, + 0x0b86829a, 0x5f81d7dc, 0x5d9b4c2d, 0x099c196b, 0xf595e6a1, 0xa192b3e7, + 0x086a6fc4, 0x5c6d3a82, 0xa064c548, 0xf463900e, 0x4d4f93a5, 0x1948c6e3, + 0xe5413929, 0xb1466c6f, 0x18beb04c, 0x4cb9e50a, 0xb0b01ac0, 0xe4b74f86, + 0xe6add477, 0xb2aa8131, 0x4ea37efb, 0x1aa42bbd, 0xb35cf79e, 0xe75ba2d8, + 0x1b525d12, 0x4f550854, 0x1f676af0, 0x4b603fb6, 0xb769c07c, 0xe36e953a, + 0x4a964919, 0x1e911c5f, 0xe298e395, 0xb69fb6d3, 0xb4852d22, 0xe0827864, + 0x1c8b87ae, 0x488cd2e8, 0xe1740ecb, 0xb5735b8d, 0x497aa447, 0x1d7df101, + 0xe91e610f, 0xbd193449, 0x4110cb83, 0x15179ec5, 0xbcef42e6, 0xe8e817a0, + 0x14e1e86a, 0x40e6bd2c, 0x42fc26dd, 0x16fb739b, 0xeaf28c51, 0xbef5d917, + 0x170d0534, 0x430a5072, 0xbf03afb8, 0xeb04fafe, 0xbb36985a, 0xef31cd1c, + 0x133832d6, 0x473f6790, 0xeec7bbb3, 0xbac0eef5, 0x46c9113f, 0x12ce4479, + 0x10d4df88, 0x44d38ace, 0xb8da7504, 0xecdd2042, 0x4525fc61, 0x1122a927, + 0xed2b56ed, 0xb92c03ab, 0x9a9f274a, 0xce98720c, 0x32918dc6, 0x6696d880, + 0xcf6e04a3, 0x9b6951e5, 0x6760ae2f, 0x3367fb69, 0x317d6098, 0x657a35de, + 0x9973ca14, 0xcd749f52, 0x648c4371, 0x308b1637, 0xcc82e9fd, 0x9885bcbb, + 0xc8b7de1f, 0x9cb08b59, 0x60b97493, 0x34be21d5, 0x9d46fdf6, 0xc941a8b0, + 0x3548577a, 0x614f023c, 0x635599cd, 0x3752cc8b, 0xcb5b3341, 0x9f5c6607, + 0x36a4ba24, 0x62a3ef62, 0x9eaa10a8, 0xcaad45ee, 0x3eced5e0, 0x6ac980a6, + 0x96c07f6c, 0xc2c72a2a, 0x6b3ff609, 0x3f38a34f, 0xc3315c85, 0x973609c3, + 0x952c9232, 0xc12bc774, 0x3d2238be, 0x69256df8, 0xc0ddb1db, 0x94dae49d, + 0x68d31b57, 0x3cd44e11, 0x6ce62cb5, 0x38e179f3, 0xc4e88639, 0x90efd37f, + 0x39170f5c, 0x6d105a1a, 0x9119a5d0, 0xc51ef096, 0xc7046b67, 0x93033e21, + 0x6f0ac1eb, 0x3b0d94ad, 0x92f5488e, 0xc6f21dc8, 0x3afbe202, 0x6efcb744, + 0xd7d0b4ef, 0x83d7e1a9, 0x7fde1e63, 0x2bd94b25, 0x82219706, 0xd626c240, + 0x2a2f3d8a, 0x7e2868cc, 0x7c32f33d, 0x2835a67b, 0xd43c59b1, 0x803b0cf7, + 0x29c3d0d4, 0x7dc48592, 0x81cd7a58, 0xd5ca2f1e, 0x85f84dba, 0xd1ff18fc, + 0x2df6e736, 0x79f1b270, 0xd0096e53, 0x840e3b15, 0x7807c4df, 0x2c009199, + 0x2e1a0a68, 0x7a1d5f2e, 0x8614a0e4, 0xd213f5a2, 0x7beb2981, 0x2fec7cc7, + 0xd3e5830d, 0x87e2d64b, 0x73814645, 0x27861303, 0xdb8fecc9, 0x8f88b98f, + 0x267065ac, 0x727730ea, 0x8e7ecf20, 0xda799a66, 0xd8630197, 0x8c6454d1, + 0x706dab1b, 0x246afe5d, 0x8d92227e, 0xd9957738, 0x259c88f2, 0x719bddb4, + 0x21a9bf10, 0x75aeea56, 0x89a7159c, 0xdda040da, 0x74589cf9, 0x205fc9bf, + 0xdc563675, 0x88516333, 0x8a4bf8c2, 0xde4cad84, 0x2245524e, 0x76420708, + 0xdfbadb2b, 0x8bbd8e6d, 0x77b471a7, 0x23b324e1}; + +const uint32_t kStrideExtensionTable2[256] = { + 0x00000000, 0x678efd01, 0xcf1dfa02, 0xa8930703, 0x9bd782f5, 0xfc597ff4, + 0x54ca78f7, 0x334485f6, 0x3243731b, 0x55cd8e1a, 0xfd5e8919, 0x9ad07418, + 0xa994f1ee, 0xce1a0cef, 0x66890bec, 0x0107f6ed, 0x6486e636, 0x03081b37, + 0xab9b1c34, 0xcc15e135, 0xff5164c3, 0x98df99c2, 0x304c9ec1, 0x57c263c0, + 0x56c5952d, 0x314b682c, 0x99d86f2f, 0xfe56922e, 0xcd1217d8, 0xaa9cead9, + 0x020fedda, 0x658110db, 0xc90dcc6c, 0xae83316d, 0x0610366e, 0x619ecb6f, + 0x52da4e99, 0x3554b398, 0x9dc7b49b, 0xfa49499a, 0xfb4ebf77, 0x9cc04276, + 0x34534575, 0x53ddb874, 0x60993d82, 0x0717c083, 0xaf84c780, 0xc80a3a81, + 0xad8b2a5a, 0xca05d75b, 0x6296d058, 0x05182d59, 0x365ca8af, 0x51d255ae, + 0xf94152ad, 0x9ecfafac, 0x9fc85941, 0xf846a440, 0x50d5a343, 0x375b5e42, + 0x041fdbb4, 0x639126b5, 0xcb0221b6, 0xac8cdcb7, 0x97f7ee29, 0xf0791328, + 0x58ea142b, 0x3f64e92a, 0x0c206cdc, 0x6bae91dd, 0xc33d96de, 0xa4b36bdf, + 0xa5b49d32, 0xc23a6033, 0x6aa96730, 0x0d279a31, 0x3e631fc7, 0x59ede2c6, + 0xf17ee5c5, 0x96f018c4, 0xf371081f, 0x94fff51e, 0x3c6cf21d, 0x5be20f1c, + 0x68a68aea, 0x0f2877eb, 0xa7bb70e8, 0xc0358de9, 0xc1327b04, 0xa6bc8605, + 0x0e2f8106, 0x69a17c07, 0x5ae5f9f1, 0x3d6b04f0, 0x95f803f3, 0xf276fef2, + 0x5efa2245, 0x3974df44, 0x91e7d847, 0xf6692546, 0xc52da0b0, 0xa2a35db1, + 0x0a305ab2, 0x6dbea7b3, 0x6cb9515e, 0x0b37ac5f, 0xa3a4ab5c, 0xc42a565d, + 0xf76ed3ab, 0x90e02eaa, 0x387329a9, 0x5ffdd4a8, 0x3a7cc473, 0x5df23972, + 0xf5613e71, 0x92efc370, 0xa1ab4686, 0xc625bb87, 0x6eb6bc84, 0x09384185, + 0x083fb768, 0x6fb14a69, 0xc7224d6a, 0xa0acb06b, 0x93e8359d, 0xf466c89c, + 0x5cf5cf9f, 0x3b7b329e, 0x2a03aaa3, 0x4d8d57a2, 0xe51e50a1, 0x8290ada0, + 0xb1d42856, 0xd65ad557, 0x7ec9d254, 0x19472f55, 0x1840d9b8, 0x7fce24b9, + 0xd75d23ba, 0xb0d3debb, 0x83975b4d, 0xe419a64c, 0x4c8aa14f, 0x2b045c4e, + 0x4e854c95, 0x290bb194, 0x8198b697, 0xe6164b96, 0xd552ce60, 0xb2dc3361, + 0x1a4f3462, 0x7dc1c963, 0x7cc63f8e, 0x1b48c28f, 0xb3dbc58c, 0xd455388d, + 0xe711bd7b, 0x809f407a, 0x280c4779, 0x4f82ba78, 0xe30e66cf, 0x84809bce, + 0x2c139ccd, 0x4b9d61cc, 0x78d9e43a, 0x1f57193b, 0xb7c41e38, 0xd04ae339, + 0xd14d15d4, 0xb6c3e8d5, 0x1e50efd6, 0x79de12d7, 0x4a9a9721, 0x2d146a20, + 0x85876d23, 0xe2099022, 0x878880f9, 0xe0067df8, 0x48957afb, 0x2f1b87fa, + 0x1c5f020c, 0x7bd1ff0d, 0xd342f80e, 0xb4cc050f, 0xb5cbf3e2, 0xd2450ee3, + 0x7ad609e0, 0x1d58f4e1, 0x2e1c7117, 0x49928c16, 0xe1018b15, 0x868f7614, + 0xbdf4448a, 0xda7ab98b, 0x72e9be88, 0x15674389, 0x2623c67f, 0x41ad3b7e, + 0xe93e3c7d, 0x8eb0c17c, 0x8fb73791, 0xe839ca90, 0x40aacd93, 0x27243092, + 0x1460b564, 0x73ee4865, 0xdb7d4f66, 0xbcf3b267, 0xd972a2bc, 0xbefc5fbd, + 0x166f58be, 0x71e1a5bf, 0x42a52049, 0x252bdd48, 0x8db8da4b, 0xea36274a, + 0xeb31d1a7, 0x8cbf2ca6, 0x242c2ba5, 0x43a2d6a4, 0x70e65352, 0x1768ae53, + 0xbffba950, 0xd8755451, 0x74f988e6, 0x137775e7, 0xbbe472e4, 0xdc6a8fe5, + 0xef2e0a13, 0x88a0f712, 0x2033f011, 0x47bd0d10, 0x46bafbfd, 0x213406fc, + 0x89a701ff, 0xee29fcfe, 0xdd6d7908, 0xbae38409, 0x1270830a, 0x75fe7e0b, + 0x107f6ed0, 0x77f193d1, 0xdf6294d2, 0xb8ec69d3, 0x8ba8ec25, 0xec261124, + 0x44b51627, 0x233beb26, 0x223c1dcb, 0x45b2e0ca, 0xed21e7c9, 0x8aaf1ac8, + 0xb9eb9f3e, 0xde65623f, 0x76f6653c, 0x1178983d}; + +const uint32_t kStrideExtensionTable3[256] = { + 0x00000000, 0xf20c0dfe, 0xe1f46d0d, 0x13f860f3, 0xc604aceb, 0x3408a115, + 0x27f0c1e6, 0xd5fccc18, 0x89e52f27, 0x7be922d9, 0x6811422a, 0x9a1d4fd4, + 0x4fe183cc, 0xbded8e32, 0xae15eec1, 0x5c19e33f, 0x162628bf, 0xe42a2541, + 0xf7d245b2, 0x05de484c, 0xd0228454, 0x222e89aa, 0x31d6e959, 0xc3dae4a7, + 0x9fc30798, 0x6dcf0a66, 0x7e376a95, 0x8c3b676b, 0x59c7ab73, 0xabcba68d, + 0xb833c67e, 0x4a3fcb80, 0x2c4c517e, 0xde405c80, 0xcdb83c73, 0x3fb4318d, + 0xea48fd95, 0x1844f06b, 0x0bbc9098, 0xf9b09d66, 0xa5a97e59, 0x57a573a7, + 0x445d1354, 0xb6511eaa, 0x63add2b2, 0x91a1df4c, 0x8259bfbf, 0x7055b241, + 0x3a6a79c1, 0xc866743f, 0xdb9e14cc, 0x29921932, 0xfc6ed52a, 0x0e62d8d4, + 0x1d9ab827, 0xef96b5d9, 0xb38f56e6, 0x41835b18, 0x527b3beb, 0xa0773615, + 0x758bfa0d, 0x8787f7f3, 0x947f9700, 0x66739afe, 0x5898a2fc, 0xaa94af02, + 0xb96ccff1, 0x4b60c20f, 0x9e9c0e17, 0x6c9003e9, 0x7f68631a, 0x8d646ee4, + 0xd17d8ddb, 0x23718025, 0x3089e0d6, 0xc285ed28, 0x17792130, 0xe5752cce, + 0xf68d4c3d, 0x048141c3, 0x4ebe8a43, 0xbcb287bd, 0xaf4ae74e, 0x5d46eab0, + 0x88ba26a8, 0x7ab62b56, 0x694e4ba5, 0x9b42465b, 0xc75ba564, 0x3557a89a, + 0x26afc869, 0xd4a3c597, 0x015f098f, 0xf3530471, 0xe0ab6482, 0x12a7697c, + 0x74d4f382, 0x86d8fe7c, 0x95209e8f, 0x672c9371, 0xb2d05f69, 0x40dc5297, + 0x53243264, 0xa1283f9a, 0xfd31dca5, 0x0f3dd15b, 0x1cc5b1a8, 0xeec9bc56, + 0x3b35704e, 0xc9397db0, 0xdac11d43, 0x28cd10bd, 0x62f2db3d, 0x90fed6c3, + 0x8306b630, 0x710abbce, 0xa4f677d6, 0x56fa7a28, 0x45021adb, 0xb70e1725, + 0xeb17f41a, 0x191bf9e4, 0x0ae39917, 0xf8ef94e9, 0x2d1358f1, 0xdf1f550f, + 0xcce735fc, 0x3eeb3802, 0xb13145f8, 0x433d4806, 0x50c528f5, 0xa2c9250b, + 0x7735e913, 0x8539e4ed, 0x96c1841e, 0x64cd89e0, 0x38d46adf, 0xcad86721, + 0xd92007d2, 0x2b2c0a2c, 0xfed0c634, 0x0cdccbca, 0x1f24ab39, 0xed28a6c7, + 0xa7176d47, 0x551b60b9, 0x46e3004a, 0xb4ef0db4, 0x6113c1ac, 0x931fcc52, + 0x80e7aca1, 0x72eba15f, 0x2ef24260, 0xdcfe4f9e, 0xcf062f6d, 0x3d0a2293, + 0xe8f6ee8b, 0x1afae375, 0x09028386, 0xfb0e8e78, 0x9d7d1486, 0x6f711978, + 0x7c89798b, 0x8e857475, 0x5b79b86d, 0xa975b593, 0xba8dd560, 0x4881d89e, + 0x14983ba1, 0xe694365f, 0xf56c56ac, 0x07605b52, 0xd29c974a, 0x20909ab4, + 0x3368fa47, 0xc164f7b9, 0x8b5b3c39, 0x795731c7, 0x6aaf5134, 0x98a35cca, + 0x4d5f90d2, 0xbf539d2c, 0xacabfddf, 0x5ea7f021, 0x02be131e, 0xf0b21ee0, + 0xe34a7e13, 0x114673ed, 0xc4babff5, 0x36b6b20b, 0x254ed2f8, 0xd742df06, + 0xe9a9e704, 0x1ba5eafa, 0x085d8a09, 0xfa5187f7, 0x2fad4bef, 0xdda14611, + 0xce5926e2, 0x3c552b1c, 0x604cc823, 0x9240c5dd, 0x81b8a52e, 0x73b4a8d0, + 0xa64864c8, 0x54446936, 0x47bc09c5, 0xb5b0043b, 0xff8fcfbb, 0x0d83c245, + 0x1e7ba2b6, 0xec77af48, 0x398b6350, 0xcb876eae, 0xd87f0e5d, 0x2a7303a3, + 0x766ae09c, 0x8466ed62, 0x979e8d91, 0x6592806f, 0xb06e4c77, 0x42624189, + 0x519a217a, 0xa3962c84, 0xc5e5b67a, 0x37e9bb84, 0x2411db77, 0xd61dd689, + 0x03e11a91, 0xf1ed176f, 0xe215779c, 0x10197a62, 0x4c00995d, 0xbe0c94a3, + 0xadf4f450, 0x5ff8f9ae, 0x8a0435b6, 0x78083848, 0x6bf058bb, 0x99fc5545, + 0xd3c39ec5, 0x21cf933b, 0x3237f3c8, 0xc03bfe36, 0x15c7322e, 0xe7cb3fd0, + 0xf4335f23, 0x063f52dd, 0x5a26b1e2, 0xa82abc1c, 0xbbd2dcef, 0x49ded111, + 0x9c221d09, 0x6e2e10f7, 0x7dd67004, 0x8fda7dfa}; + +// CRCs are pre- and post- conditioned by xoring with all ones. +static constexpr const uint32_t kCRC32Xor = static_cast<uint32_t>(0xffffffffU); + +// Reads a little-endian 32-bit integer from a 32-bit-aligned buffer. +inline uint32_t ReadUint32LE(const uint8_t* buffer) { + return DecodeFixed32(reinterpret_cast<const char*>(buffer)); } +// Returns the smallest address >= the given address that is aligned to N bytes. +// +// N must be a power of two. +template <int N> +constexpr inline const uint8_t* RoundUp(const uint8_t* pointer) { + return reinterpret_cast<uint8_t*>( + (reinterpret_cast<uintptr_t>(pointer) + (N - 1)) & + ~static_cast<uintptr_t>(N - 1)); +} + +} // namespace + // Determine if the CPU running this program can accelerate the CRC32C // calculation. static bool CanAccelerateCRC32C() { - if (!port::HasAcceleratedCRC32C()) - return false; - - // Double-check that the accelerated implementation functions correctly. // port::AcceleretedCRC32C returns zero when unable to accelerate. static const char kTestCRCBuffer[] = "TestCRCBuffer"; static const char kBufSize = sizeof(kTestCRCBuffer) - 1; @@ -300,54 +273,107 @@ static bool CanAccelerateCRC32C() { return port::AcceleratedCRC32C(0, kTestCRCBuffer, kBufSize) == kTestCRCValue; } -uint32_t Extend(uint32_t crc, const char* buf, size_t size) { +uint32_t Extend(uint32_t crc, const char* data, size_t n) { static bool accelerate = CanAccelerateCRC32C(); if (accelerate) { - return port::AcceleratedCRC32C(crc, buf, size); + return port::AcceleratedCRC32C(crc, data, n); } - const uint8_t *p = reinterpret_cast<const uint8_t *>(buf); - const uint8_t *e = p + size; - uint32_t l = crc ^ 0xffffffffu; + const uint8_t* p = reinterpret_cast<const uint8_t*>(data); + const uint8_t* e = p + n; + uint32_t l = crc ^ kCRC32Xor; + +// Process one byte at a time. +#define STEP1 \ + do { \ + int c = (l & 0xff) ^ *p++; \ + l = kByteExtensionTable[c] ^ (l >> 8); \ + } while (0) -#define STEP1 do { \ - int c = (l & 0xff) ^ *p++; \ - l = table0_[c] ^ (l >> 8); \ -} while (0) -#define STEP4 do { \ - uint32_t c = l ^ LE_LOAD32(p); \ - p += 4; \ - l = table3_[c & 0xff] ^ \ - table2_[(c >> 8) & 0xff] ^ \ - table1_[(c >> 16) & 0xff] ^ \ - table0_[c >> 24]; \ -} while (0) +// Process one of the 4 strides of 4-byte data. +#define STEP4(s) \ + do { \ + crc##s = ReadUint32LE(p + s * 4) ^ kStrideExtensionTable3[crc##s & 0xff] ^ \ + kStrideExtensionTable2[(crc##s >> 8) & 0xff] ^ \ + kStrideExtensionTable1[(crc##s >> 16) & 0xff] ^ \ + kStrideExtensionTable0[crc##s >> 24]; \ + } while (0) - // Point x at first 4-byte aligned byte in string. This might be - // just past the end of the string. - const uintptr_t pval = reinterpret_cast<uintptr_t>(p); - const uint8_t* x = reinterpret_cast<const uint8_t*>(((pval + 3) >> 2) << 2); +// Process a 16-byte swath of 4 strides, each of which has 4 bytes of data. +#define STEP16 \ + do { \ + STEP4(0); \ + STEP4(1); \ + STEP4(2); \ + STEP4(3); \ + p += 16; \ + } while (0) + +// Process 4 bytes that were already loaded into a word. +#define STEP4W(w) \ + do { \ + w ^= l; \ + for (size_t i = 0; i < 4; ++i) { \ + w = (w >> 8) ^ kByteExtensionTable[w & 0xff]; \ + } \ + l = w; \ + } while (0) + + // Point x at first 4-byte aligned byte in the buffer. This might be past the + // end of the buffer. + const uint8_t* x = RoundUp<4>(p); if (x <= e) { - // Process bytes until finished or p is 4-byte aligned + // Process bytes p is 4-byte aligned. while (p != x) { STEP1; } } - // Process bytes 16 at a time - while ((e-p) >= 16) { - STEP4; STEP4; STEP4; STEP4; - } - // Process bytes 4 at a time - while ((e-p) >= 4) { - STEP4; + + if ((e - p) >= 16) { + // Load a 16-byte swath into the stride partial results. + uint32_t crc0 = ReadUint32LE(p + 0 * 4) ^ l; + uint32_t crc1 = ReadUint32LE(p + 1 * 4); + uint32_t crc2 = ReadUint32LE(p + 2 * 4); + uint32_t crc3 = ReadUint32LE(p + 3 * 4); + p += 16; + + // It is possible to get better speeds (at least on x86) by interleaving + // prefetching 256 bytes ahead with processing 64 bytes at a time. See the + // portable implementation in https://github.com/google/crc32c/. + + // Process one 16-byte swath at a time. + while ((e - p) >= 16) { + STEP16; + } + + // Advance one word at a time as far as possible. + while ((e - p) >= 4) { + STEP4(0); + uint32_t tmp = crc0; + crc0 = crc1; + crc1 = crc2; + crc2 = crc3; + crc3 = tmp; + p += 4; + } + + // Combine the 4 partial stride results. + l = 0; + STEP4W(crc0); + STEP4W(crc1); + STEP4W(crc2); + STEP4W(crc3); } - // Process the last few bytes + + // Process the last few bytes. while (p != e) { STEP1; } +#undef STEP4W +#undef STEP16 #undef STEP4 #undef STEP1 - return l ^ 0xffffffffu; + return l ^ kCRC32Xor; } } // namespace crc32c diff --git a/src/leveldb/util/crc32c.h b/src/leveldb/util/crc32c.h index 1d7e5c075d..98fabb0d2f 100644 --- a/src/leveldb/util/crc32c.h +++ b/src/leveldb/util/crc32c.h @@ -14,12 +14,10 @@ namespace crc32c { // Return the crc32c of concat(A, data[0,n-1]) where init_crc is the // crc32c of some string A. Extend() is often used to maintain the // crc32c of a stream of data. -extern uint32_t Extend(uint32_t init_crc, const char* data, size_t n); +uint32_t Extend(uint32_t init_crc, const char* data, size_t n); // Return the crc32c of data[0,n-1] -inline uint32_t Value(const char* data, size_t n) { - return Extend(0, data, n); -} +inline uint32_t Value(const char* data, size_t n) { return Extend(0, data, n); } static const uint32_t kMaskDelta = 0xa282ead8ul; diff --git a/src/leveldb/util/crc32c_test.cc b/src/leveldb/util/crc32c_test.cc index 4b957ee120..18a8494824 100644 --- a/src/leveldb/util/crc32c_test.cc +++ b/src/leveldb/util/crc32c_test.cc @@ -8,7 +8,7 @@ namespace leveldb { namespace crc32c { -class CRC { }; +class CRC {}; TEST(CRC, StandardResults) { // From rfc3720 section B.4. @@ -30,30 +30,19 @@ TEST(CRC, StandardResults) { } ASSERT_EQ(0x113fdb5c, Value(buf, sizeof(buf))); - unsigned char data[48] = { - 0x01, 0xc0, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x04, 0x00, - 0x00, 0x00, 0x00, 0x14, - 0x00, 0x00, 0x00, 0x18, - 0x28, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, + uint8_t data[48] = { + 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; ASSERT_EQ(0xd9963a56, Value(reinterpret_cast<char*>(data), sizeof(data))); } -TEST(CRC, Values) { - ASSERT_NE(Value("a", 1), Value("foo", 3)); -} +TEST(CRC, Values) { ASSERT_NE(Value("a", 1), Value("foo", 3)); } TEST(CRC, Extend) { - ASSERT_EQ(Value("hello world", 11), - Extend(Value("hello ", 6), "world", 5)); + ASSERT_EQ(Value("hello world", 11), Extend(Value("hello ", 6), "world", 5)); } TEST(CRC, Mask) { @@ -67,6 +56,4 @@ TEST(CRC, Mask) { } // namespace crc32c } // namespace leveldb -int main(int argc, char** argv) { - return leveldb::test::RunAllTests(); -} +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/util/env.cc b/src/leveldb/util/env.cc index c58a0821ef..d2f0aef326 100644 --- a/src/leveldb/util/env.cc +++ b/src/leveldb/util/env.cc @@ -6,30 +6,24 @@ namespace leveldb { -Env::~Env() { -} +Env::~Env() = default; Status Env::NewAppendableFile(const std::string& fname, WritableFile** result) { return Status::NotSupported("NewAppendableFile", fname); } -SequentialFile::~SequentialFile() { -} +SequentialFile::~SequentialFile() = default; -RandomAccessFile::~RandomAccessFile() { -} +RandomAccessFile::~RandomAccessFile() = default; -WritableFile::~WritableFile() { -} +WritableFile::~WritableFile() = default; -Logger::~Logger() { -} +Logger::~Logger() = default; -FileLock::~FileLock() { -} +FileLock::~FileLock() = default; void Log(Logger* info_log, const char* format, ...) { - if (info_log != NULL) { + if (info_log != nullptr) { va_list ap; va_start(ap, format); info_log->Logv(format, ap); @@ -38,8 +32,7 @@ void Log(Logger* info_log, const char* format, ...) { } static Status DoWriteStringToFile(Env* env, const Slice& data, - const std::string& fname, - bool should_sync) { + const std::string& fname, bool should_sync) { WritableFile* file; Status s = env->NewWritableFile(fname, &file); if (!s.ok()) { @@ -94,7 +87,6 @@ Status ReadFileToString(Env* env, const std::string& fname, std::string* data) { return s; } -EnvWrapper::~EnvWrapper() { -} +EnvWrapper::~EnvWrapper() {} } // namespace leveldb diff --git a/src/leveldb/util/env_posix.cc b/src/leveldb/util/env_posix.cc index f77918313e..9f5863a0f3 100644 --- a/src/leveldb/util/env_posix.cc +++ b/src/leveldb/util/env_posix.cc @@ -1,706 +1,906 @@ // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#if !defined(LEVELDB_PLATFORM_WINDOWS) #include <dirent.h> -#include <errno.h> #include <fcntl.h> #include <pthread.h> -#include <stdio.h> -#include <stdlib.h> -#include <string.h> #include <sys/mman.h> #include <sys/resource.h> #include <sys/stat.h> #include <sys/time.h> #include <sys/types.h> -#include <time.h> #include <unistd.h> -#include <deque> + +#include <atomic> +#include <cerrno> +#include <cstddef> +#include <cstdint> +#include <cstdio> +#include <cstdlib> +#include <cstring> #include <limits> +#include <queue> #include <set> +#include <string> +#include <thread> +#include <type_traits> +#include <utility> + #include "leveldb/env.h" #include "leveldb/slice.h" +#include "leveldb/status.h" #include "port/port.h" -#include "util/logging.h" -#include "util/mutexlock.h" -#include "util/posix_logger.h" +#include "port/thread_annotations.h" #include "util/env_posix_test_helper.h" +#include "util/posix_logger.h" namespace leveldb { namespace { -static int open_read_only_file_limit = -1; -static int mmap_limit = -1; +// Set by EnvPosixTestHelper::SetReadOnlyMMapLimit() and MaxOpenFiles(). +int g_open_read_only_file_limit = -1; + +// Up to 4096 mmap regions for 64-bit binaries; none for 32-bit. +constexpr const int kDefaultMmapLimit = (sizeof(void*) >= 8) ? 4096 : 0; + +// Can be set using EnvPosixTestHelper::SetReadOnlyMMapLimit(). +int g_mmap_limit = kDefaultMmapLimit; -static Status IOError(const std::string& context, int err_number) { - return Status::IOError(context, strerror(err_number)); +// Common flags defined for all posix open operations +#if defined(HAVE_O_CLOEXEC) +constexpr const int kOpenBaseFlags = O_CLOEXEC; +#else +constexpr const int kOpenBaseFlags = 0; +#endif // defined(HAVE_O_CLOEXEC) + +constexpr const size_t kWritableFileBufferSize = 65536; + +Status PosixError(const std::string& context, int error_number) { + if (error_number == ENOENT) { + return Status::NotFound(context, std::strerror(error_number)); + } else { + return Status::IOError(context, std::strerror(error_number)); + } } // Helper class to limit resource usage to avoid exhaustion. // Currently used to limit read-only file descriptors and mmap file usage -// so that we do not end up running out of file descriptors, virtual memory, -// or running into kernel performance problems for very large databases. +// so that we do not run out of file descriptors or virtual memory, or run into +// kernel performance problems for very large databases. class Limiter { public: - // Limit maximum number of resources to |n|. - Limiter(intptr_t n) { - SetAllowed(n); - } + // Limit maximum number of resources to |max_acquires|. + Limiter(int max_acquires) : acquires_allowed_(max_acquires) {} + + Limiter(const Limiter&) = delete; + Limiter operator=(const Limiter&) = delete; // If another resource is available, acquire it and return true. // Else return false. bool Acquire() { - if (GetAllowed() <= 0) { - return false; - } - MutexLock l(&mu_); - intptr_t x = GetAllowed(); - if (x <= 0) { - return false; - } else { - SetAllowed(x - 1); - return true; - } + int old_acquires_allowed = + acquires_allowed_.fetch_sub(1, std::memory_order_relaxed); + + if (old_acquires_allowed > 0) return true; + + acquires_allowed_.fetch_add(1, std::memory_order_relaxed); + return false; } // Release a resource acquired by a previous call to Acquire() that returned // true. - void Release() { - MutexLock l(&mu_); - SetAllowed(GetAllowed() + 1); - } + void Release() { acquires_allowed_.fetch_add(1, std::memory_order_relaxed); } private: - port::Mutex mu_; - port::AtomicPointer allowed_; - - intptr_t GetAllowed() const { - return reinterpret_cast<intptr_t>(allowed_.Acquire_Load()); - } - - // REQUIRES: mu_ must be held - void SetAllowed(intptr_t v) { - allowed_.Release_Store(reinterpret_cast<void*>(v)); - } - - Limiter(const Limiter&); - void operator=(const Limiter&); + // The number of available resources. + // + // This is a counter and is not tied to the invariants of any other class, so + // it can be operated on safely using std::memory_order_relaxed. + std::atomic<int> acquires_allowed_; }; -class PosixSequentialFile: public SequentialFile { - private: - std::string filename_; - FILE* file_; - +// Implements sequential read access in a file using read(). +// +// Instances of this class are thread-friendly but not thread-safe, as required +// by the SequentialFile API. +class PosixSequentialFile final : public SequentialFile { public: - PosixSequentialFile(const std::string& fname, FILE* f) - : filename_(fname), file_(f) { } - virtual ~PosixSequentialFile() { fclose(file_); } - - virtual Status Read(size_t n, Slice* result, char* scratch) { - Status s; - size_t r = fread_unlocked(scratch, 1, n, file_); - *result = Slice(scratch, r); - if (r < n) { - if (feof(file_)) { - // We leave status as ok if we hit the end of the file - } else { - // A partial read with an error: return a non-ok status - s = IOError(filename_, errno); + PosixSequentialFile(std::string filename, int fd) + : fd_(fd), filename_(filename) {} + ~PosixSequentialFile() override { close(fd_); } + + Status Read(size_t n, Slice* result, char* scratch) override { + Status status; + while (true) { + ::ssize_t read_size = ::read(fd_, scratch, n); + if (read_size < 0) { // Read error. + if (errno == EINTR) { + continue; // Retry + } + status = PosixError(filename_, errno); + break; } + *result = Slice(scratch, read_size); + break; } - return s; + return status; } - virtual Status Skip(uint64_t n) { - if (fseek(file_, n, SEEK_CUR)) { - return IOError(filename_, errno); + Status Skip(uint64_t n) override { + if (::lseek(fd_, n, SEEK_CUR) == static_cast<off_t>(-1)) { + return PosixError(filename_, errno); } return Status::OK(); } - virtual std::string GetName() const { return filename_; } -}; + virtual std::string GetName() const override { return filename_; } -// pread() based random-access -class PosixRandomAccessFile: public RandomAccessFile { private: - std::string filename_; - bool temporary_fd_; // If true, fd_ is -1 and we open on every read. - int fd_; - Limiter* limiter_; + const int fd_; + const std::string filename_; +}; +// Implements random read access in a file using pread(). +// +// Instances of this class are thread-safe, as required by the RandomAccessFile +// API. Instances are immutable and Read() only calls thread-safe library +// functions. +class PosixRandomAccessFile final : public RandomAccessFile { public: - PosixRandomAccessFile(const std::string& fname, int fd, Limiter* limiter) - : filename_(fname), fd_(fd), limiter_(limiter) { - temporary_fd_ = !limiter->Acquire(); - if (temporary_fd_) { - // Open file on every access. - close(fd_); - fd_ = -1; + // The new instance takes ownership of |fd|. |fd_limiter| must outlive this + // instance, and will be used to determine if . + PosixRandomAccessFile(std::string filename, int fd, Limiter* fd_limiter) + : has_permanent_fd_(fd_limiter->Acquire()), + fd_(has_permanent_fd_ ? fd : -1), + fd_limiter_(fd_limiter), + filename_(std::move(filename)) { + if (!has_permanent_fd_) { + assert(fd_ == -1); + ::close(fd); // The file will be opened on every read. } } - virtual ~PosixRandomAccessFile() { - if (!temporary_fd_) { - close(fd_); - limiter_->Release(); + ~PosixRandomAccessFile() override { + if (has_permanent_fd_) { + assert(fd_ != -1); + ::close(fd_); + fd_limiter_->Release(); } } - virtual Status Read(uint64_t offset, size_t n, Slice* result, - char* scratch) const { + Status Read(uint64_t offset, size_t n, Slice* result, + char* scratch) const override { int fd = fd_; - if (temporary_fd_) { - fd = open(filename_.c_str(), O_RDONLY); + if (!has_permanent_fd_) { + fd = ::open(filename_.c_str(), O_RDONLY | kOpenBaseFlags); if (fd < 0) { - return IOError(filename_, errno); + return PosixError(filename_, errno); } } - Status s; - ssize_t r = pread(fd, scratch, n, static_cast<off_t>(offset)); - *result = Slice(scratch, (r < 0) ? 0 : r); - if (r < 0) { - // An error: return a non-ok status - s = IOError(filename_, errno); + assert(fd != -1); + + Status status; + ssize_t read_size = ::pread(fd, scratch, n, static_cast<off_t>(offset)); + *result = Slice(scratch, (read_size < 0) ? 0 : read_size); + if (read_size < 0) { + // An error: return a non-ok status. + status = PosixError(filename_, errno); } - if (temporary_fd_) { + if (!has_permanent_fd_) { // Close the temporary file descriptor opened earlier. - close(fd); + assert(fd != fd_); + ::close(fd); } - return s; + return status; } - virtual std::string GetName() const { return filename_; } -}; + virtual std::string GetName() const override { return filename_; } -// mmap() based random-access -class PosixMmapReadableFile: public RandomAccessFile { private: - std::string filename_; - void* mmapped_region_; - size_t length_; - Limiter* limiter_; + const bool has_permanent_fd_; // If false, the file is opened on every read. + const int fd_; // -1 if has_permanent_fd_ is false. + Limiter* const fd_limiter_; + const std::string filename_; +}; +// Implements random read access in a file using mmap(). +// +// Instances of this class are thread-safe, as required by the RandomAccessFile +// API. Instances are immutable and Read() only calls thread-safe library +// functions. +class PosixMmapReadableFile final : public RandomAccessFile { public: - // base[0,length-1] contains the mmapped contents of the file. - PosixMmapReadableFile(const std::string& fname, void* base, size_t length, - Limiter* limiter) - : filename_(fname), mmapped_region_(base), length_(length), - limiter_(limiter) { - } - - virtual ~PosixMmapReadableFile() { - munmap(mmapped_region_, length_); - limiter_->Release(); - } - - virtual Status Read(uint64_t offset, size_t n, Slice* result, - char* scratch) const { - Status s; + // mmap_base[0, length-1] points to the memory-mapped contents of the file. It + // must be the result of a successful call to mmap(). This instances takes + // over the ownership of the region. + // + // |mmap_limiter| must outlive this instance. The caller must have already + // aquired the right to use one mmap region, which will be released when this + // instance is destroyed. + PosixMmapReadableFile(std::string filename, char* mmap_base, size_t length, + Limiter* mmap_limiter) + : mmap_base_(mmap_base), + length_(length), + mmap_limiter_(mmap_limiter), + filename_(std::move(filename)) {} + + ~PosixMmapReadableFile() override { + ::munmap(static_cast<void*>(mmap_base_), length_); + mmap_limiter_->Release(); + } + + Status Read(uint64_t offset, size_t n, Slice* result, + char* scratch) const override { if (offset + n > length_) { *result = Slice(); - s = IOError(filename_, EINVAL); - } else { - *result = Slice(reinterpret_cast<char*>(mmapped_region_) + offset, n); + return PosixError(filename_, EINVAL); } - return s; + + *result = Slice(mmap_base_ + offset, n); + return Status::OK(); } - virtual std::string GetName() const { return filename_; } -}; + virtual std::string GetName() const override { return filename_; } -class PosixWritableFile : public WritableFile { private: - std::string filename_; - FILE* file_; + char* const mmap_base_; + const size_t length_; + Limiter* const mmap_limiter_; + const std::string filename_; +}; +class PosixWritableFile final : public WritableFile { public: - PosixWritableFile(const std::string& fname, FILE* f) - : filename_(fname), file_(f) { } - - ~PosixWritableFile() { - if (file_ != NULL) { + PosixWritableFile(std::string filename, int fd) + : pos_(0), + fd_(fd), + is_manifest_(IsManifest(filename)), + filename_(std::move(filename)), + dirname_(Dirname(filename_)) {} + + ~PosixWritableFile() override { + if (fd_ >= 0) { // Ignoring any potential errors - fclose(file_); + Close(); } } - virtual Status Append(const Slice& data) { - size_t r = fwrite_unlocked(data.data(), 1, data.size(), file_); - if (r != data.size()) { - return IOError(filename_, errno); + Status Append(const Slice& data) override { + size_t write_size = data.size(); + const char* write_data = data.data(); + + // Fit as much as possible into buffer. + size_t copy_size = std::min(write_size, kWritableFileBufferSize - pos_); + std::memcpy(buf_ + pos_, write_data, copy_size); + write_data += copy_size; + write_size -= copy_size; + pos_ += copy_size; + if (write_size == 0) { + return Status::OK(); } - return Status::OK(); + + // Can't fit in buffer, so need to do at least one write. + Status status = FlushBuffer(); + if (!status.ok()) { + return status; + } + + // Small writes go to buffer, large writes are written directly. + if (write_size < kWritableFileBufferSize) { + std::memcpy(buf_, write_data, write_size); + pos_ = write_size; + return Status::OK(); + } + return WriteUnbuffered(write_data, write_size); } - virtual Status Close() { - Status result; - if (fclose(file_) != 0) { - result = IOError(filename_, errno); + Status Close() override { + Status status = FlushBuffer(); + const int close_result = ::close(fd_); + if (close_result < 0 && status.ok()) { + status = PosixError(filename_, errno); } - file_ = NULL; - return result; + fd_ = -1; + return status; } - virtual Status Flush() { - if (fflush_unlocked(file_) != 0) { - return IOError(filename_, errno); + Status Flush() override { return FlushBuffer(); } + + Status Sync() override { + // Ensure new files referred to by the manifest are in the filesystem. + // + // This needs to happen before the manifest file is flushed to disk, to + // avoid crashing in a state where the manifest refers to files that are not + // yet on disk. + Status status = SyncDirIfManifest(); + if (!status.ok()) { + return status; + } + + status = FlushBuffer(); + if (!status.ok()) { + return status; + } + + return SyncFd(fd_, filename_, false); + } + + private: + Status FlushBuffer() { + Status status = WriteUnbuffered(buf_, pos_); + pos_ = 0; + return status; + } + + Status WriteUnbuffered(const char* data, size_t size) { + while (size > 0) { + ssize_t write_result = ::write(fd_, data, size); + if (write_result < 0) { + if (errno == EINTR) { + continue; // Retry + } + return PosixError(filename_, errno); + } + data += write_result; + size -= write_result; } return Status::OK(); } Status SyncDirIfManifest() { - const char* f = filename_.c_str(); - const char* sep = strrchr(f, '/'); - Slice basename; - std::string dir; - if (sep == NULL) { - dir = "."; - basename = f; + Status status; + if (!is_manifest_) { + return status; + } + + int fd = ::open(dirname_.c_str(), O_RDONLY | kOpenBaseFlags); + if (fd < 0) { + status = PosixError(dirname_, errno); } else { - dir = std::string(f, sep - f); - basename = sep + 1; + status = SyncFd(fd, dirname_, true); + ::close(fd); + } + return status; + } + + // Ensures that all the caches associated with the given file descriptor's + // data are flushed all the way to durable media, and can withstand power + // failures. + // + // The path argument is only used to populate the description string in the + // returned Status if an error occurs. + static Status SyncFd(int fd, const std::string& fd_path, bool syncing_dir) { +#if HAVE_FULLFSYNC + // On macOS and iOS, fsync() doesn't guarantee durability past power + // failures. fcntl(F_FULLFSYNC) is required for that purpose. Some + // filesystems don't support fcntl(F_FULLFSYNC), and require a fallback to + // fsync(). + if (::fcntl(fd, F_FULLFSYNC) == 0) { + return Status::OK(); } - Status s; - if (basename.starts_with("MANIFEST")) { - int fd = open(dir.c_str(), O_RDONLY); - if (fd < 0) { - s = IOError(dir, errno); - } else { - if (fsync(fd) < 0 && errno != EINVAL) { - s = IOError(dir, errno); - } - close(fd); - } +#endif // HAVE_FULLFSYNC + +#if HAVE_FDATASYNC + bool sync_success = ::fdatasync(fd) == 0; +#else + bool sync_success = ::fsync(fd) == 0; +#endif // HAVE_FDATASYNC + + if (sync_success) { + return Status::OK(); } - return s; + // Do not crash if filesystem can't fsync directories + // (see https://github.com/bitcoin/bitcoin/pull/10000) + if (syncing_dir && errno == EINVAL) { + return Status::OK(); + } + return PosixError(fd_path, errno); } - virtual Status Sync() { - // Ensure new files referred to by the manifest are in the filesystem. - Status s = SyncDirIfManifest(); - if (!s.ok()) { - return s; + // Returns the directory name in a path pointing to a file. + // + // Returns "." if the path does not contain any directory separator. + static std::string Dirname(const std::string& filename) { + std::string::size_type separator_pos = filename.rfind('/'); + if (separator_pos == std::string::npos) { + return std::string("."); } - if (fflush_unlocked(file_) != 0 || - fdatasync(fileno(file_)) != 0) { - s = Status::IOError(filename_, strerror(errno)); + // The filename component should not contain a path separator. If it does, + // the splitting was done incorrectly. + assert(filename.find('/', separator_pos + 1) == std::string::npos); + + return filename.substr(0, separator_pos); + } + + // Extracts the file name from a path pointing to a file. + // + // The returned Slice points to |filename|'s data buffer, so it is only valid + // while |filename| is alive and unchanged. + static Slice Basename(const std::string& filename) { + std::string::size_type separator_pos = filename.rfind('/'); + if (separator_pos == std::string::npos) { + return Slice(filename); } - return s; + // The filename component should not contain a path separator. If it does, + // the splitting was done incorrectly. + assert(filename.find('/', separator_pos + 1) == std::string::npos); + + return Slice(filename.data() + separator_pos + 1, + filename.length() - separator_pos - 1); } - virtual std::string GetName() const { return filename_; } + // True if the given file is a manifest file. + static bool IsManifest(const std::string& filename) { + return Basename(filename).starts_with("MANIFEST"); + } + + virtual std::string GetName() const override { return filename_; } + + // buf_[0, pos_ - 1] contains data to be written to fd_. + char buf_[kWritableFileBufferSize]; + size_t pos_; + int fd_; + + const bool is_manifest_; // True if the file's name starts with MANIFEST. + const std::string filename_; + const std::string dirname_; // The directory of filename_. }; -static int LockOrUnlock(int fd, bool lock) { +int LockOrUnlock(int fd, bool lock) { errno = 0; - struct flock f; - memset(&f, 0, sizeof(f)); - f.l_type = (lock ? F_WRLCK : F_UNLCK); - f.l_whence = SEEK_SET; - f.l_start = 0; - f.l_len = 0; // Lock/unlock entire file - return fcntl(fd, F_SETLK, &f); + struct ::flock file_lock_info; + std::memset(&file_lock_info, 0, sizeof(file_lock_info)); + file_lock_info.l_type = (lock ? F_WRLCK : F_UNLCK); + file_lock_info.l_whence = SEEK_SET; + file_lock_info.l_start = 0; + file_lock_info.l_len = 0; // Lock/unlock entire file. + return ::fcntl(fd, F_SETLK, &file_lock_info); } +// Instances are thread-safe because they are immutable. class PosixFileLock : public FileLock { public: - int fd_; - std::string name_; + PosixFileLock(int fd, std::string filename) + : fd_(fd), filename_(std::move(filename)) {} + + int fd() const { return fd_; } + const std::string& filename() const { return filename_; } + + private: + const int fd_; + const std::string filename_; }; -// Set of locked files. We keep a separate set instead of just -// relying on fcntrl(F_SETLK) since fcntl(F_SETLK) does not provide -// any protection against multiple uses from the same process. +// Tracks the files locked by PosixEnv::LockFile(). +// +// We maintain a separate set instead of relying on fcntl(F_SETLK) because +// fcntl(F_SETLK) does not provide any protection against multiple uses from the +// same process. +// +// Instances are thread-safe because all member data is guarded by a mutex. class PosixLockTable { - private: - port::Mutex mu_; - std::set<std::string> locked_files_; public: - bool Insert(const std::string& fname) { - MutexLock l(&mu_); - return locked_files_.insert(fname).second; - } - void Remove(const std::string& fname) { - MutexLock l(&mu_); + bool Insert(const std::string& fname) LOCKS_EXCLUDED(mu_) { + mu_.Lock(); + bool succeeded = locked_files_.insert(fname).second; + mu_.Unlock(); + return succeeded; + } + void Remove(const std::string& fname) LOCKS_EXCLUDED(mu_) { + mu_.Lock(); locked_files_.erase(fname); + mu_.Unlock(); } + + private: + port::Mutex mu_; + std::set<std::string> locked_files_ GUARDED_BY(mu_); }; class PosixEnv : public Env { public: PosixEnv(); - virtual ~PosixEnv() { - char msg[] = "Destroying Env::Default()\n"; - fwrite(msg, 1, sizeof(msg), stderr); - abort(); - } - - virtual Status NewSequentialFile(const std::string& fname, - SequentialFile** result) { - FILE* f = fopen(fname.c_str(), "r"); - if (f == NULL) { - *result = NULL; - return IOError(fname, errno); - } else { - *result = new PosixSequentialFile(fname, f); - return Status::OK(); + ~PosixEnv() override { + static const char msg[] = + "PosixEnv singleton destroyed. Unsupported behavior!\n"; + std::fwrite(msg, 1, sizeof(msg), stderr); + std::abort(); + } + + Status NewSequentialFile(const std::string& filename, + SequentialFile** result) override { + int fd = ::open(filename.c_str(), O_RDONLY | kOpenBaseFlags); + if (fd < 0) { + *result = nullptr; + return PosixError(filename, errno); } + + *result = new PosixSequentialFile(filename, fd); + return Status::OK(); } - virtual Status NewRandomAccessFile(const std::string& fname, - RandomAccessFile** result) { - *result = NULL; - Status s; - int fd = open(fname.c_str(), O_RDONLY); + Status NewRandomAccessFile(const std::string& filename, + RandomAccessFile** result) override { + *result = nullptr; + int fd = ::open(filename.c_str(), O_RDONLY | kOpenBaseFlags); if (fd < 0) { - s = IOError(fname, errno); - } else if (mmap_limit_.Acquire()) { - uint64_t size; - s = GetFileSize(fname, &size); - if (s.ok()) { - void* base = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, 0); - if (base != MAP_FAILED) { - *result = new PosixMmapReadableFile(fname, base, size, &mmap_limit_); - } else { - s = IOError(fname, errno); - } - } - close(fd); - if (!s.ok()) { - mmap_limit_.Release(); + return PosixError(filename, errno); + } + + if (!mmap_limiter_.Acquire()) { + *result = new PosixRandomAccessFile(filename, fd, &fd_limiter_); + return Status::OK(); + } + + uint64_t file_size; + Status status = GetFileSize(filename, &file_size); + if (status.ok()) { + void* mmap_base = + ::mmap(/*addr=*/nullptr, file_size, PROT_READ, MAP_SHARED, fd, 0); + if (mmap_base != MAP_FAILED) { + *result = new PosixMmapReadableFile(filename, + reinterpret_cast<char*>(mmap_base), + file_size, &mmap_limiter_); + } else { + status = PosixError(filename, errno); } - } else { - *result = new PosixRandomAccessFile(fname, fd, &fd_limit_); } - return s; + ::close(fd); + if (!status.ok()) { + mmap_limiter_.Release(); + } + return status; } - virtual Status NewWritableFile(const std::string& fname, - WritableFile** result) { - Status s; - FILE* f = fopen(fname.c_str(), "w"); - if (f == NULL) { - *result = NULL; - s = IOError(fname, errno); - } else { - *result = new PosixWritableFile(fname, f); + Status NewWritableFile(const std::string& filename, + WritableFile** result) override { + int fd = ::open(filename.c_str(), + O_TRUNC | O_WRONLY | O_CREAT | kOpenBaseFlags, 0644); + if (fd < 0) { + *result = nullptr; + return PosixError(filename, errno); } - return s; + + *result = new PosixWritableFile(filename, fd); + return Status::OK(); } - virtual Status NewAppendableFile(const std::string& fname, - WritableFile** result) { - Status s; - FILE* f = fopen(fname.c_str(), "a"); - if (f == NULL) { - *result = NULL; - s = IOError(fname, errno); - } else { - *result = new PosixWritableFile(fname, f); + Status NewAppendableFile(const std::string& filename, + WritableFile** result) override { + int fd = ::open(filename.c_str(), + O_APPEND | O_WRONLY | O_CREAT | kOpenBaseFlags, 0644); + if (fd < 0) { + *result = nullptr; + return PosixError(filename, errno); } - return s; + + *result = new PosixWritableFile(filename, fd); + return Status::OK(); } - virtual bool FileExists(const std::string& fname) { - return access(fname.c_str(), F_OK) == 0; + bool FileExists(const std::string& filename) override { + return ::access(filename.c_str(), F_OK) == 0; } - virtual Status GetChildren(const std::string& dir, - std::vector<std::string>* result) { + Status GetChildren(const std::string& directory_path, + std::vector<std::string>* result) override { result->clear(); - DIR* d = opendir(dir.c_str()); - if (d == NULL) { - return IOError(dir, errno); + ::DIR* dir = ::opendir(directory_path.c_str()); + if (dir == nullptr) { + return PosixError(directory_path, errno); } - struct dirent* entry; - while ((entry = readdir(d)) != NULL) { - result->push_back(entry->d_name); + struct ::dirent* entry; + while ((entry = ::readdir(dir)) != nullptr) { + result->emplace_back(entry->d_name); } - closedir(d); + ::closedir(dir); return Status::OK(); } - virtual Status DeleteFile(const std::string& fname) { - Status result; - if (unlink(fname.c_str()) != 0) { - result = IOError(fname, errno); + Status DeleteFile(const std::string& filename) override { + if (::unlink(filename.c_str()) != 0) { + return PosixError(filename, errno); } - return result; + return Status::OK(); } - virtual Status CreateDir(const std::string& name) { - Status result; - if (mkdir(name.c_str(), 0755) != 0) { - result = IOError(name, errno); + Status CreateDir(const std::string& dirname) override { + if (::mkdir(dirname.c_str(), 0755) != 0) { + return PosixError(dirname, errno); } - return result; + return Status::OK(); } - virtual Status DeleteDir(const std::string& name) { - Status result; - if (rmdir(name.c_str()) != 0) { - result = IOError(name, errno); + Status DeleteDir(const std::string& dirname) override { + if (::rmdir(dirname.c_str()) != 0) { + return PosixError(dirname, errno); } - return result; + return Status::OK(); } - virtual Status GetFileSize(const std::string& fname, uint64_t* size) { - Status s; - struct stat sbuf; - if (stat(fname.c_str(), &sbuf) != 0) { + Status GetFileSize(const std::string& filename, uint64_t* size) override { + struct ::stat file_stat; + if (::stat(filename.c_str(), &file_stat) != 0) { *size = 0; - s = IOError(fname, errno); - } else { - *size = sbuf.st_size; + return PosixError(filename, errno); } - return s; + *size = file_stat.st_size; + return Status::OK(); } - virtual Status RenameFile(const std::string& src, const std::string& target) { - Status result; - if (rename(src.c_str(), target.c_str()) != 0) { - result = IOError(src, errno); + Status RenameFile(const std::string& from, const std::string& to) override { + if (std::rename(from.c_str(), to.c_str()) != 0) { + return PosixError(from, errno); } - return result; + return Status::OK(); } - virtual Status LockFile(const std::string& fname, FileLock** lock) { - *lock = NULL; - Status result; - int fd = open(fname.c_str(), O_RDWR | O_CREAT, 0644); + Status LockFile(const std::string& filename, FileLock** lock) override { + *lock = nullptr; + + int fd = ::open(filename.c_str(), O_RDWR | O_CREAT | kOpenBaseFlags, 0644); if (fd < 0) { - result = IOError(fname, errno); - } else if (!locks_.Insert(fname)) { - close(fd); - result = Status::IOError("lock " + fname, "already held by process"); - } else if (LockOrUnlock(fd, true) == -1) { - result = IOError("lock " + fname, errno); - close(fd); - locks_.Remove(fname); - } else { - PosixFileLock* my_lock = new PosixFileLock; - my_lock->fd_ = fd; - my_lock->name_ = fname; - *lock = my_lock; + return PosixError(filename, errno); } - return result; + + if (!locks_.Insert(filename)) { + ::close(fd); + return Status::IOError("lock " + filename, "already held by process"); + } + + if (LockOrUnlock(fd, true) == -1) { + int lock_errno = errno; + ::close(fd); + locks_.Remove(filename); + return PosixError("lock " + filename, lock_errno); + } + + *lock = new PosixFileLock(fd, filename); + return Status::OK(); } - virtual Status UnlockFile(FileLock* lock) { - PosixFileLock* my_lock = reinterpret_cast<PosixFileLock*>(lock); - Status result; - if (LockOrUnlock(my_lock->fd_, false) == -1) { - result = IOError("unlock", errno); + Status UnlockFile(FileLock* lock) override { + PosixFileLock* posix_file_lock = static_cast<PosixFileLock*>(lock); + if (LockOrUnlock(posix_file_lock->fd(), false) == -1) { + return PosixError("unlock " + posix_file_lock->filename(), errno); } - locks_.Remove(my_lock->name_); - close(my_lock->fd_); - delete my_lock; - return result; + locks_.Remove(posix_file_lock->filename()); + ::close(posix_file_lock->fd()); + delete posix_file_lock; + return Status::OK(); } - virtual void Schedule(void (*function)(void*), void* arg); + void Schedule(void (*background_work_function)(void* background_work_arg), + void* background_work_arg) override; - virtual void StartThread(void (*function)(void* arg), void* arg); + void StartThread(void (*thread_main)(void* thread_main_arg), + void* thread_main_arg) override { + std::thread new_thread(thread_main, thread_main_arg); + new_thread.detach(); + } - virtual Status GetTestDirectory(std::string* result) { - const char* env = getenv("TEST_TMPDIR"); + Status GetTestDirectory(std::string* result) override { + const char* env = std::getenv("TEST_TMPDIR"); if (env && env[0] != '\0') { *result = env; } else { char buf[100]; - snprintf(buf, sizeof(buf), "/tmp/leveldbtest-%d", int(geteuid())); + std::snprintf(buf, sizeof(buf), "/tmp/leveldbtest-%d", + static_cast<int>(::geteuid())); *result = buf; } - // Directory may already exist + + // The CreateDir status is ignored because the directory may already exist. CreateDir(*result); + return Status::OK(); } - static uint64_t gettid() { - pthread_t tid = pthread_self(); - uint64_t thread_id = 0; - memcpy(&thread_id, &tid, std::min(sizeof(thread_id), sizeof(tid))); - return thread_id; - } + Status NewLogger(const std::string& filename, Logger** result) override { + int fd = ::open(filename.c_str(), + O_APPEND | O_WRONLY | O_CREAT | kOpenBaseFlags, 0644); + if (fd < 0) { + *result = nullptr; + return PosixError(filename, errno); + } - virtual Status NewLogger(const std::string& fname, Logger** result) { - FILE* f = fopen(fname.c_str(), "w"); - if (f == NULL) { - *result = NULL; - return IOError(fname, errno); + std::FILE* fp = ::fdopen(fd, "w"); + if (fp == nullptr) { + ::close(fd); + *result = nullptr; + return PosixError(filename, errno); } else { - *result = new PosixLogger(f, &PosixEnv::gettid); + *result = new PosixLogger(fp); return Status::OK(); } } - virtual uint64_t NowMicros() { - struct timeval tv; - gettimeofday(&tv, NULL); - return static_cast<uint64_t>(tv.tv_sec) * 1000000 + tv.tv_usec; + uint64_t NowMicros() override { + static constexpr uint64_t kUsecondsPerSecond = 1000000; + struct ::timeval tv; + ::gettimeofday(&tv, nullptr); + return static_cast<uint64_t>(tv.tv_sec) * kUsecondsPerSecond + tv.tv_usec; } - virtual void SleepForMicroseconds(int micros) { - usleep(micros); + void SleepForMicroseconds(int micros) override { + std::this_thread::sleep_for(std::chrono::microseconds(micros)); } private: - void PthreadCall(const char* label, int result) { - if (result != 0) { - fprintf(stderr, "pthread %s: %s\n", label, strerror(result)); - abort(); - } - } + void BackgroundThreadMain(); - // BGThread() is the body of the background thread - void BGThread(); - static void* BGThreadWrapper(void* arg) { - reinterpret_cast<PosixEnv*>(arg)->BGThread(); - return NULL; + static void BackgroundThreadEntryPoint(PosixEnv* env) { + env->BackgroundThreadMain(); } - pthread_mutex_t mu_; - pthread_cond_t bgsignal_; - pthread_t bgthread_; - bool started_bgthread_; + // Stores the work item data in a Schedule() call. + // + // Instances are constructed on the thread calling Schedule() and used on the + // background thread. + // + // This structure is thread-safe beacuse it is immutable. + struct BackgroundWorkItem { + explicit BackgroundWorkItem(void (*function)(void* arg), void* arg) + : function(function), arg(arg) {} + + void (*const function)(void*); + void* const arg; + }; - // Entry per Schedule() call - struct BGItem { void* arg; void (*function)(void*); }; - typedef std::deque<BGItem> BGQueue; - BGQueue queue_; + port::Mutex background_work_mutex_; + port::CondVar background_work_cv_ GUARDED_BY(background_work_mutex_); + bool started_background_thread_ GUARDED_BY(background_work_mutex_); - PosixLockTable locks_; - Limiter mmap_limit_; - Limiter fd_limit_; + std::queue<BackgroundWorkItem> background_work_queue_ + GUARDED_BY(background_work_mutex_); + + PosixLockTable locks_; // Thread-safe. + Limiter mmap_limiter_; // Thread-safe. + Limiter fd_limiter_; // Thread-safe. }; // Return the maximum number of concurrent mmaps. -static int MaxMmaps() { - if (mmap_limit >= 0) { - return mmap_limit; - } - // Up to 4096 mmaps for 64-bit binaries; none for smaller pointer sizes. - mmap_limit = sizeof(void*) >= 8 ? 4096 : 0; - return mmap_limit; -} +int MaxMmaps() { return g_mmap_limit; } // Return the maximum number of read-only files to keep open. -static intptr_t MaxOpenFiles() { - if (open_read_only_file_limit >= 0) { - return open_read_only_file_limit; +int MaxOpenFiles() { + if (g_open_read_only_file_limit >= 0) { + return g_open_read_only_file_limit; } - struct rlimit rlim; - if (getrlimit(RLIMIT_NOFILE, &rlim)) { + struct ::rlimit rlim; + if (::getrlimit(RLIMIT_NOFILE, &rlim)) { // getrlimit failed, fallback to hard-coded default. - open_read_only_file_limit = 50; + g_open_read_only_file_limit = 50; } else if (rlim.rlim_cur == RLIM_INFINITY) { - open_read_only_file_limit = std::numeric_limits<int>::max(); + g_open_read_only_file_limit = std::numeric_limits<int>::max(); } else { // Allow use of 20% of available file descriptors for read-only files. - open_read_only_file_limit = rlim.rlim_cur / 5; + g_open_read_only_file_limit = rlim.rlim_cur / 5; } - return open_read_only_file_limit; + return g_open_read_only_file_limit; } +} // namespace + PosixEnv::PosixEnv() - : started_bgthread_(false), - mmap_limit_(MaxMmaps()), - fd_limit_(MaxOpenFiles()) { - PthreadCall("mutex_init", pthread_mutex_init(&mu_, NULL)); - PthreadCall("cvar_init", pthread_cond_init(&bgsignal_, NULL)); -} + : background_work_cv_(&background_work_mutex_), + started_background_thread_(false), + mmap_limiter_(MaxMmaps()), + fd_limiter_(MaxOpenFiles()) {} -void PosixEnv::Schedule(void (*function)(void*), void* arg) { - PthreadCall("lock", pthread_mutex_lock(&mu_)); +void PosixEnv::Schedule( + void (*background_work_function)(void* background_work_arg), + void* background_work_arg) { + background_work_mutex_.Lock(); - // Start background thread if necessary - if (!started_bgthread_) { - started_bgthread_ = true; - PthreadCall( - "create thread", - pthread_create(&bgthread_, NULL, &PosixEnv::BGThreadWrapper, this)); + // Start the background thread, if we haven't done so already. + if (!started_background_thread_) { + started_background_thread_ = true; + std::thread background_thread(PosixEnv::BackgroundThreadEntryPoint, this); + background_thread.detach(); } - // If the queue is currently empty, the background thread may currently be - // waiting. - if (queue_.empty()) { - PthreadCall("signal", pthread_cond_signal(&bgsignal_)); + // If the queue is empty, the background thread may be waiting for work. + if (background_work_queue_.empty()) { + background_work_cv_.Signal(); } - // Add to priority queue - queue_.push_back(BGItem()); - queue_.back().function = function; - queue_.back().arg = arg; - - PthreadCall("unlock", pthread_mutex_unlock(&mu_)); + background_work_queue_.emplace(background_work_function, background_work_arg); + background_work_mutex_.Unlock(); } -void PosixEnv::BGThread() { +void PosixEnv::BackgroundThreadMain() { while (true) { - // Wait until there is an item that is ready to run - PthreadCall("lock", pthread_mutex_lock(&mu_)); - while (queue_.empty()) { - PthreadCall("wait", pthread_cond_wait(&bgsignal_, &mu_)); + background_work_mutex_.Lock(); + + // Wait until there is work to be done. + while (background_work_queue_.empty()) { + background_work_cv_.Wait(); } - void (*function)(void*) = queue_.front().function; - void* arg = queue_.front().arg; - queue_.pop_front(); + assert(!background_work_queue_.empty()); + auto background_work_function = background_work_queue_.front().function; + void* background_work_arg = background_work_queue_.front().arg; + background_work_queue_.pop(); - PthreadCall("unlock", pthread_mutex_unlock(&mu_)); - (*function)(arg); + background_work_mutex_.Unlock(); + background_work_function(background_work_arg); } } namespace { -struct StartThreadState { - void (*user_function)(void*); - void* arg; + +// Wraps an Env instance whose destructor is never created. +// +// Intended usage: +// using PlatformSingletonEnv = SingletonEnv<PlatformEnv>; +// void ConfigurePosixEnv(int param) { +// PlatformSingletonEnv::AssertEnvNotInitialized(); +// // set global configuration flags. +// } +// Env* Env::Default() { +// static PlatformSingletonEnv default_env; +// return default_env.env(); +// } +template <typename EnvType> +class SingletonEnv { + public: + SingletonEnv() { +#if !defined(NDEBUG) + env_initialized_.store(true, std::memory_order::memory_order_relaxed); +#endif // !defined(NDEBUG) + static_assert(sizeof(env_storage_) >= sizeof(EnvType), + "env_storage_ will not fit the Env"); + static_assert(alignof(decltype(env_storage_)) >= alignof(EnvType), + "env_storage_ does not meet the Env's alignment needs"); + new (&env_storage_) EnvType(); + } + ~SingletonEnv() = default; + + SingletonEnv(const SingletonEnv&) = delete; + SingletonEnv& operator=(const SingletonEnv&) = delete; + + Env* env() { return reinterpret_cast<Env*>(&env_storage_); } + + static void AssertEnvNotInitialized() { +#if !defined(NDEBUG) + assert(!env_initialized_.load(std::memory_order::memory_order_relaxed)); +#endif // !defined(NDEBUG) + } + + private: + typename std::aligned_storage<sizeof(EnvType), alignof(EnvType)>::type + env_storage_; +#if !defined(NDEBUG) + static std::atomic<bool> env_initialized_; +#endif // !defined(NDEBUG) }; -} -static void* StartThreadWrapper(void* arg) { - StartThreadState* state = reinterpret_cast<StartThreadState*>(arg); - state->user_function(state->arg); - delete state; - return NULL; -} -void PosixEnv::StartThread(void (*function)(void* arg), void* arg) { - pthread_t t; - StartThreadState* state = new StartThreadState; - state->user_function = function; - state->arg = arg; - PthreadCall("start thread", - pthread_create(&t, NULL, &StartThreadWrapper, state)); -} +#if !defined(NDEBUG) +template <typename EnvType> +std::atomic<bool> SingletonEnv<EnvType>::env_initialized_; +#endif // !defined(NDEBUG) -} // namespace +using PosixDefaultEnv = SingletonEnv<PosixEnv>; -static pthread_once_t once = PTHREAD_ONCE_INIT; -static Env* default_env; -static void InitDefaultEnv() { default_env = new PosixEnv; } +} // namespace void EnvPosixTestHelper::SetReadOnlyFDLimit(int limit) { - assert(default_env == NULL); - open_read_only_file_limit = limit; + PosixDefaultEnv::AssertEnvNotInitialized(); + g_open_read_only_file_limit = limit; } void EnvPosixTestHelper::SetReadOnlyMMapLimit(int limit) { - assert(default_env == NULL); - mmap_limit = limit; + PosixDefaultEnv::AssertEnvNotInitialized(); + g_mmap_limit = limit; } Env* Env::Default() { - pthread_once(&once, InitDefaultEnv); - return default_env; + static PosixDefaultEnv env_container; + return env_container.env(); } } // namespace leveldb - -#endif diff --git a/src/leveldb/util/env_posix_test.cc b/src/leveldb/util/env_posix_test.cc index 295f8ae440..9675d739ad 100644 --- a/src/leveldb/util/env_posix_test.cc +++ b/src/leveldb/util/env_posix_test.cc @@ -2,27 +2,182 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#include "leveldb/env.h" +#include <sys/resource.h> +#include <sys/wait.h> +#include <unistd.h> + +#include <cstdio> +#include <cstdlib> +#include <cstring> +#include <string> +#include <unordered_set> +#include <vector> +#include "leveldb/env.h" #include "port/port.h" -#include "util/testharness.h" #include "util/env_posix_test_helper.h" +#include "util/testharness.h" + +#if HAVE_O_CLOEXEC + +namespace { + +// Exit codes for the helper process spawned by TestCloseOnExec* tests. +// Useful for debugging test failures. +constexpr int kTextCloseOnExecHelperExecFailedCode = 61; +constexpr int kTextCloseOnExecHelperDup2FailedCode = 62; +constexpr int kTextCloseOnExecHelperFoundOpenFdCode = 63; + +// Global set by main() and read in TestCloseOnExec. +// +// The argv[0] value is stored in a std::vector instead of a std::string because +// std::string does not return a mutable pointer to its buffer until C++17. +// +// The vector stores the string pointed to by argv[0], plus the trailing null. +std::vector<char>* GetArgvZero() { + static std::vector<char> program_name; + return &program_name; +} + +// Command-line switch used to run this test as the CloseOnExecSwitch helper. +static const char kTestCloseOnExecSwitch[] = "--test-close-on-exec-helper"; + +// Executed in a separate process by TestCloseOnExec* tests. +// +// main() delegates to this function when the test executable is launched with +// a special command-line switch. TestCloseOnExec* tests fork()+exec() the test +// executable and pass the special command-line switch. +// + +// main() delegates to this function when the test executable is launched with +// a special command-line switch. TestCloseOnExec* tests fork()+exec() the test +// executable and pass the special command-line switch. +// +// When main() delegates to this function, the process probes whether a given +// file descriptor is open, and communicates the result via its exit code. +int TestCloseOnExecHelperMain(char* pid_arg) { + int fd = std::atoi(pid_arg); + // When given the same file descriptor twice, dup2() returns -1 if the + // file descriptor is closed, or the given file descriptor if it is open. + if (::dup2(fd, fd) == fd) { + std::fprintf(stderr, "Unexpected open fd %d\n", fd); + return kTextCloseOnExecHelperFoundOpenFdCode; + } + // Double-check that dup2() is saying the file descriptor is closed. + if (errno != EBADF) { + std::fprintf(stderr, "Unexpected errno after calling dup2 on fd %d: %s\n", + fd, std::strerror(errno)); + return kTextCloseOnExecHelperDup2FailedCode; + } + return 0; +} + +// File descriptors are small non-negative integers. +// +// Returns void so the implementation can use ASSERT_EQ. +void GetMaxFileDescriptor(int* result_fd) { + // Get the maximum file descriptor number. + ::rlimit fd_rlimit; + ASSERT_EQ(0, ::getrlimit(RLIMIT_NOFILE, &fd_rlimit)); + *result_fd = fd_rlimit.rlim_cur; +} + +// Iterates through all possible FDs and returns the currently open ones. +// +// Returns void so the implementation can use ASSERT_EQ. +void GetOpenFileDescriptors(std::unordered_set<int>* open_fds) { + int max_fd = 0; + GetMaxFileDescriptor(&max_fd); + + for (int fd = 0; fd < max_fd; ++fd) { + if (::dup2(fd, fd) != fd) { + // When given the same file descriptor twice, dup2() returns -1 if the + // file descriptor is closed, or the given file descriptor if it is open. + // + // Double-check that dup2() is saying the fd is closed. + ASSERT_EQ(EBADF, errno) + << "dup2() should set errno to EBADF on closed file descriptors"; + continue; + } + open_fds->insert(fd); + } +} + +// Finds an FD open since a previous call to GetOpenFileDescriptors(). +// +// |baseline_open_fds| is the result of a previous GetOpenFileDescriptors() +// call. Assumes that exactly one FD was opened since that call. +// +// Returns void so the implementation can use ASSERT_EQ. +void GetNewlyOpenedFileDescriptor( + const std::unordered_set<int>& baseline_open_fds, int* result_fd) { + std::unordered_set<int> open_fds; + GetOpenFileDescriptors(&open_fds); + for (int fd : baseline_open_fds) { + ASSERT_EQ(1, open_fds.count(fd)) + << "Previously opened file descriptor was closed during test setup"; + open_fds.erase(fd); + } + ASSERT_EQ(1, open_fds.size()) + << "Expected exactly one newly opened file descriptor during test setup"; + *result_fd = *open_fds.begin(); +} + +// Check that a fork()+exec()-ed child process does not have an extra open FD. +void CheckCloseOnExecDoesNotLeakFDs( + const std::unordered_set<int>& baseline_open_fds) { + // Prepare the argument list for the child process. + // execv() wants mutable buffers. + char switch_buffer[sizeof(kTestCloseOnExecSwitch)]; + std::memcpy(switch_buffer, kTestCloseOnExecSwitch, + sizeof(kTestCloseOnExecSwitch)); + + int probed_fd; + GetNewlyOpenedFileDescriptor(baseline_open_fds, &probed_fd); + std::string fd_string = std::to_string(probed_fd); + std::vector<char> fd_buffer(fd_string.begin(), fd_string.end()); + fd_buffer.emplace_back('\0'); + + // The helper process is launched with the command below. + // env_posix_tests --test-close-on-exec-helper 3 + char* child_argv[] = {GetArgvZero()->data(), switch_buffer, fd_buffer.data(), + nullptr}; + + constexpr int kForkInChildProcessReturnValue = 0; + int child_pid = fork(); + if (child_pid == kForkInChildProcessReturnValue) { + ::execv(child_argv[0], child_argv); + std::fprintf(stderr, "Error spawning child process: %s\n", strerror(errno)); + std::exit(kTextCloseOnExecHelperExecFailedCode); + } + + int child_status = 0; + ASSERT_EQ(child_pid, ::waitpid(child_pid, &child_status, 0)); + ASSERT_TRUE(WIFEXITED(child_status)) + << "The helper process did not exit with an exit code"; + ASSERT_EQ(0, WEXITSTATUS(child_status)) + << "The helper process encountered an error"; +} + +} // namespace + +#endif // HAVE_O_CLOEXEC namespace leveldb { -static const int kDelayMicros = 100000; static const int kReadOnlyFileLimit = 4; static const int kMMapLimit = 4; class EnvPosixTest { public: - Env* env_; - EnvPosixTest() : env_(Env::Default()) { } - static void SetFileLimits(int read_only_file_limit, int mmap_limit) { EnvPosixTestHelper::SetReadOnlyFDLimit(read_only_file_limit); EnvPosixTestHelper::SetReadOnlyMMapLimit(mmap_limit); } + + EnvPosixTest() : env_(Env::Default()) {} + + Env* env_; }; TEST(EnvPosixTest, TestOpenOnRead) { @@ -31,8 +186,8 @@ TEST(EnvPosixTest, TestOpenOnRead) { ASSERT_OK(env_->GetTestDirectory(&test_dir)); std::string test_file = test_dir + "/open_on_read.txt"; - FILE* f = fopen(test_file.c_str(), "w"); - ASSERT_TRUE(f != NULL); + FILE* f = fopen(test_file.c_str(), "we"); + ASSERT_TRUE(f != nullptr); const char kFileData[] = "abcdefghijklmnopqrstuvwxyz"; fputs(kFileData, f); fclose(f); @@ -56,9 +211,138 @@ TEST(EnvPosixTest, TestOpenOnRead) { ASSERT_OK(env_->DeleteFile(test_file)); } +#if HAVE_O_CLOEXEC + +TEST(EnvPosixTest, TestCloseOnExecSequentialFile) { + std::unordered_set<int> open_fds; + GetOpenFileDescriptors(&open_fds); + + std::string test_dir; + ASSERT_OK(env_->GetTestDirectory(&test_dir)); + std::string file_path = test_dir + "/close_on_exec_sequential.txt"; + ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path)); + + leveldb::SequentialFile* file = nullptr; + ASSERT_OK(env_->NewSequentialFile(file_path, &file)); + CheckCloseOnExecDoesNotLeakFDs(open_fds); + delete file; + + ASSERT_OK(env_->DeleteFile(file_path)); +} + +TEST(EnvPosixTest, TestCloseOnExecRandomAccessFile) { + std::unordered_set<int> open_fds; + GetOpenFileDescriptors(&open_fds); + + std::string test_dir; + ASSERT_OK(env_->GetTestDirectory(&test_dir)); + std::string file_path = test_dir + "/close_on_exec_random_access.txt"; + ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path)); + + // Exhaust the RandomAccessFile mmap limit. This way, the test + // RandomAccessFile instance below is backed by a file descriptor, not by an + // mmap region. + leveldb::RandomAccessFile* mmapped_files[kReadOnlyFileLimit] = {nullptr}; + for (int i = 0; i < kReadOnlyFileLimit; i++) { + ASSERT_OK(env_->NewRandomAccessFile(file_path, &mmapped_files[i])); + } + + leveldb::RandomAccessFile* file = nullptr; + ASSERT_OK(env_->NewRandomAccessFile(file_path, &file)); + CheckCloseOnExecDoesNotLeakFDs(open_fds); + delete file; + + for (int i = 0; i < kReadOnlyFileLimit; i++) { + delete mmapped_files[i]; + } + ASSERT_OK(env_->DeleteFile(file_path)); +} + +TEST(EnvPosixTest, TestCloseOnExecWritableFile) { + std::unordered_set<int> open_fds; + GetOpenFileDescriptors(&open_fds); + + std::string test_dir; + ASSERT_OK(env_->GetTestDirectory(&test_dir)); + std::string file_path = test_dir + "/close_on_exec_writable.txt"; + ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path)); + + leveldb::WritableFile* file = nullptr; + ASSERT_OK(env_->NewWritableFile(file_path, &file)); + CheckCloseOnExecDoesNotLeakFDs(open_fds); + delete file; + + ASSERT_OK(env_->DeleteFile(file_path)); +} + +TEST(EnvPosixTest, TestCloseOnExecAppendableFile) { + std::unordered_set<int> open_fds; + GetOpenFileDescriptors(&open_fds); + + std::string test_dir; + ASSERT_OK(env_->GetTestDirectory(&test_dir)); + std::string file_path = test_dir + "/close_on_exec_appendable.txt"; + ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path)); + + leveldb::WritableFile* file = nullptr; + ASSERT_OK(env_->NewAppendableFile(file_path, &file)); + CheckCloseOnExecDoesNotLeakFDs(open_fds); + delete file; + + ASSERT_OK(env_->DeleteFile(file_path)); +} + +TEST(EnvPosixTest, TestCloseOnExecLockFile) { + std::unordered_set<int> open_fds; + GetOpenFileDescriptors(&open_fds); + + std::string test_dir; + ASSERT_OK(env_->GetTestDirectory(&test_dir)); + std::string file_path = test_dir + "/close_on_exec_lock.txt"; + ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path)); + + leveldb::FileLock* lock = nullptr; + ASSERT_OK(env_->LockFile(file_path, &lock)); + CheckCloseOnExecDoesNotLeakFDs(open_fds); + ASSERT_OK(env_->UnlockFile(lock)); + + ASSERT_OK(env_->DeleteFile(file_path)); +} + +TEST(EnvPosixTest, TestCloseOnExecLogger) { + std::unordered_set<int> open_fds; + GetOpenFileDescriptors(&open_fds); + + std::string test_dir; + ASSERT_OK(env_->GetTestDirectory(&test_dir)); + std::string file_path = test_dir + "/close_on_exec_logger.txt"; + ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path)); + + leveldb::Logger* file = nullptr; + ASSERT_OK(env_->NewLogger(file_path, &file)); + CheckCloseOnExecDoesNotLeakFDs(open_fds); + delete file; + + ASSERT_OK(env_->DeleteFile(file_path)); +} + +#endif // HAVE_O_CLOEXEC + } // namespace leveldb int main(int argc, char** argv) { +#if HAVE_O_CLOEXEC + // Check if we're invoked as a helper program, or as the test suite. + for (int i = 1; i < argc; ++i) { + if (!std::strcmp(argv[i], kTestCloseOnExecSwitch)) { + return TestCloseOnExecHelperMain(argv[i + 1]); + } + } + + // Save argv[0] early, because googletest may modify argv. + GetArgvZero()->assign(argv[0], argv[0] + std::strlen(argv[0]) + 1); +#endif // HAVE_O_CLOEXEC + // All tests currently run with the same read-only file limits. leveldb::EnvPosixTest::SetFileLimits(leveldb::kReadOnlyFileLimit, leveldb::kMMapLimit); diff --git a/src/leveldb/util/env_test.cc b/src/leveldb/util/env_test.cc index 839ae56a1a..7db03fc11c 100644 --- a/src/leveldb/util/env_test.cc +++ b/src/leveldb/util/env_test.cc @@ -4,72 +4,144 @@ #include "leveldb/env.h" +#include <algorithm> + #include "port/port.h" +#include "port/thread_annotations.h" +#include "util/mutexlock.h" #include "util/testharness.h" +#include "util/testutil.h" namespace leveldb { static const int kDelayMicros = 100000; -static const int kReadOnlyFileLimit = 4; -static const int kMMapLimit = 4; class EnvTest { - private: - port::Mutex mu_; - std::string events_; - public: + EnvTest() : env_(Env::Default()) {} + Env* env_; - EnvTest() : env_(Env::Default()) { } }; -static void SetBool(void* ptr) { - reinterpret_cast<port::AtomicPointer*>(ptr)->NoBarrier_Store(ptr); +TEST(EnvTest, ReadWrite) { + Random rnd(test::RandomSeed()); + + // Get file to use for testing. + std::string test_dir; + ASSERT_OK(env_->GetTestDirectory(&test_dir)); + std::string test_file_name = test_dir + "/open_on_read.txt"; + WritableFile* writable_file; + ASSERT_OK(env_->NewWritableFile(test_file_name, &writable_file)); + + // Fill a file with data generated via a sequence of randomly sized writes. + static const size_t kDataSize = 10 * 1048576; + std::string data; + while (data.size() < kDataSize) { + int len = rnd.Skewed(18); // Up to 2^18 - 1, but typically much smaller + std::string r; + test::RandomString(&rnd, len, &r); + ASSERT_OK(writable_file->Append(r)); + data += r; + if (rnd.OneIn(10)) { + ASSERT_OK(writable_file->Flush()); + } + } + ASSERT_OK(writable_file->Sync()); + ASSERT_OK(writable_file->Close()); + delete writable_file; + + // Read all data using a sequence of randomly sized reads. + SequentialFile* sequential_file; + ASSERT_OK(env_->NewSequentialFile(test_file_name, &sequential_file)); + std::string read_result; + std::string scratch; + while (read_result.size() < data.size()) { + int len = std::min<int>(rnd.Skewed(18), data.size() - read_result.size()); + scratch.resize(std::max(len, 1)); // at least 1 so &scratch[0] is legal + Slice read; + ASSERT_OK(sequential_file->Read(len, &read, &scratch[0])); + if (len > 0) { + ASSERT_GT(read.size(), 0); + } + ASSERT_LE(read.size(), len); + read_result.append(read.data(), read.size()); + } + ASSERT_EQ(read_result, data); + delete sequential_file; } TEST(EnvTest, RunImmediately) { - port::AtomicPointer called (NULL); - env_->Schedule(&SetBool, &called); - env_->SleepForMicroseconds(kDelayMicros); - ASSERT_TRUE(called.NoBarrier_Load() != NULL); + struct RunState { + port::Mutex mu; + port::CondVar cvar{&mu}; + bool called = false; + + static void Run(void* arg) { + RunState* state = reinterpret_cast<RunState*>(arg); + MutexLock l(&state->mu); + ASSERT_EQ(state->called, false); + state->called = true; + state->cvar.Signal(); + } + }; + + RunState state; + env_->Schedule(&RunState::Run, &state); + + MutexLock l(&state.mu); + while (!state.called) { + state.cvar.Wait(); + } } TEST(EnvTest, RunMany) { - port::AtomicPointer last_id (NULL); + struct RunState { + port::Mutex mu; + port::CondVar cvar{&mu}; + int last_id = 0; + }; + + struct Callback { + RunState* state_; // Pointer to shared state. + const int id_; // Order# for the execution of this callback. - struct CB { - port::AtomicPointer* last_id_ptr; // Pointer to shared slot - uintptr_t id; // Order# for the execution of this callback + Callback(RunState* s, int id) : state_(s), id_(id) {} - CB(port::AtomicPointer* p, int i) : last_id_ptr(p), id(i) { } + static void Run(void* arg) { + Callback* callback = reinterpret_cast<Callback*>(arg); + RunState* state = callback->state_; - static void Run(void* v) { - CB* cb = reinterpret_cast<CB*>(v); - void* cur = cb->last_id_ptr->NoBarrier_Load(); - ASSERT_EQ(cb->id-1, reinterpret_cast<uintptr_t>(cur)); - cb->last_id_ptr->Release_Store(reinterpret_cast<void*>(cb->id)); + MutexLock l(&state->mu); + ASSERT_EQ(state->last_id, callback->id_ - 1); + state->last_id = callback->id_; + state->cvar.Signal(); } }; - // Schedule in different order than start time - CB cb1(&last_id, 1); - CB cb2(&last_id, 2); - CB cb3(&last_id, 3); - CB cb4(&last_id, 4); - env_->Schedule(&CB::Run, &cb1); - env_->Schedule(&CB::Run, &cb2); - env_->Schedule(&CB::Run, &cb3); - env_->Schedule(&CB::Run, &cb4); - - env_->SleepForMicroseconds(kDelayMicros); - void* cur = last_id.Acquire_Load(); - ASSERT_EQ(4, reinterpret_cast<uintptr_t>(cur)); + RunState state; + Callback callback1(&state, 1); + Callback callback2(&state, 2); + Callback callback3(&state, 3); + Callback callback4(&state, 4); + env_->Schedule(&Callback::Run, &callback1); + env_->Schedule(&Callback::Run, &callback2); + env_->Schedule(&Callback::Run, &callback3); + env_->Schedule(&Callback::Run, &callback4); + + MutexLock l(&state.mu); + while (state.last_id != 4) { + state.cvar.Wait(); + } } struct State { port::Mutex mu; - int val; - int num_running; + port::CondVar cvar{&mu}; + + int val GUARDED_BY(mu); + int num_running GUARDED_BY(mu); + + State(int val, int num_running) : val(val), num_running(num_running) {} }; static void ThreadBody(void* arg) { @@ -77,30 +149,89 @@ static void ThreadBody(void* arg) { s->mu.Lock(); s->val += 1; s->num_running -= 1; + s->cvar.Signal(); s->mu.Unlock(); } TEST(EnvTest, StartThread) { - State state; - state.val = 0; - state.num_running = 3; + State state(0, 3); for (int i = 0; i < 3; i++) { env_->StartThread(&ThreadBody, &state); } - while (true) { - state.mu.Lock(); - int num = state.num_running; - state.mu.Unlock(); - if (num == 0) { - break; - } - env_->SleepForMicroseconds(kDelayMicros); + + MutexLock l(&state.mu); + while (state.num_running != 0) { + state.cvar.Wait(); } ASSERT_EQ(state.val, 3); } -} // namespace leveldb +TEST(EnvTest, TestOpenNonExistentFile) { + // Write some test data to a single file that will be opened |n| times. + std::string test_dir; + ASSERT_OK(env_->GetTestDirectory(&test_dir)); + + std::string non_existent_file = test_dir + "/non_existent_file"; + ASSERT_TRUE(!env_->FileExists(non_existent_file)); + + RandomAccessFile* random_access_file; + Status status = + env_->NewRandomAccessFile(non_existent_file, &random_access_file); + ASSERT_TRUE(status.IsNotFound()); -int main(int argc, char** argv) { - return leveldb::test::RunAllTests(); + SequentialFile* sequential_file; + status = env_->NewSequentialFile(non_existent_file, &sequential_file); + ASSERT_TRUE(status.IsNotFound()); } + +TEST(EnvTest, ReopenWritableFile) { + std::string test_dir; + ASSERT_OK(env_->GetTestDirectory(&test_dir)); + std::string test_file_name = test_dir + "/reopen_writable_file.txt"; + env_->DeleteFile(test_file_name); + + WritableFile* writable_file; + ASSERT_OK(env_->NewWritableFile(test_file_name, &writable_file)); + std::string data("hello world!"); + ASSERT_OK(writable_file->Append(data)); + ASSERT_OK(writable_file->Close()); + delete writable_file; + + ASSERT_OK(env_->NewWritableFile(test_file_name, &writable_file)); + data = "42"; + ASSERT_OK(writable_file->Append(data)); + ASSERT_OK(writable_file->Close()); + delete writable_file; + + ASSERT_OK(ReadFileToString(env_, test_file_name, &data)); + ASSERT_EQ(std::string("42"), data); + env_->DeleteFile(test_file_name); +} + +TEST(EnvTest, ReopenAppendableFile) { + std::string test_dir; + ASSERT_OK(env_->GetTestDirectory(&test_dir)); + std::string test_file_name = test_dir + "/reopen_appendable_file.txt"; + env_->DeleteFile(test_file_name); + + WritableFile* appendable_file; + ASSERT_OK(env_->NewAppendableFile(test_file_name, &appendable_file)); + std::string data("hello world!"); + ASSERT_OK(appendable_file->Append(data)); + ASSERT_OK(appendable_file->Close()); + delete appendable_file; + + ASSERT_OK(env_->NewAppendableFile(test_file_name, &appendable_file)); + data = "42"; + ASSERT_OK(appendable_file->Append(data)); + ASSERT_OK(appendable_file->Close()); + delete appendable_file; + + ASSERT_OK(ReadFileToString(env_, test_file_name, &data)); + ASSERT_EQ(std::string("hello world!42"), data); + env_->DeleteFile(test_file_name); +} + +} // namespace leveldb + +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/util/env_win.cc b/src/leveldb/util/env_win.cc deleted file mode 100644 index 830332abe9..0000000000 --- a/src/leveldb/util/env_win.cc +++ /dev/null @@ -1,902 +0,0 @@ -// This file contains source that originates from: -// http://code.google.com/p/leveldbwin/source/browse/trunk/win32_impl_src/env_win32.h -// http://code.google.com/p/leveldbwin/source/browse/trunk/win32_impl_src/port_win32.cc -// Those files don't have any explicit license headers but the -// project (http://code.google.com/p/leveldbwin/) lists the 'New BSD License' -// as the license. -#if defined(LEVELDB_PLATFORM_WINDOWS) -#include <map> - - -#include "leveldb/env.h" - -#include "port/port.h" -#include "leveldb/slice.h" -#include "util/logging.h" - -#include <shlwapi.h> -#include <process.h> -#include <cstring> -#include <stdio.h> -#include <errno.h> -#include <io.h> -#include <algorithm> - -#ifdef max -#undef max -#endif - -#ifndef va_copy -#define va_copy(d,s) ((d) = (s)) -#endif - -#if defined DeleteFile -#undef DeleteFile -#endif - -//Declarations -namespace leveldb -{ - -namespace Win32 -{ - -#define DISALLOW_COPY_AND_ASSIGN(TypeName) \ - TypeName(const TypeName&); \ - void operator=(const TypeName&) - -std::string GetCurrentDir(); -std::wstring GetCurrentDirW(); - -static const std::string CurrentDir = GetCurrentDir(); -static const std::wstring CurrentDirW = GetCurrentDirW(); - -std::string& ModifyPath(std::string& path); -std::wstring& ModifyPath(std::wstring& path); - -std::string GetLastErrSz(); -std::wstring GetLastErrSzW(); - -size_t GetPageSize(); - -typedef void (*ScheduleProc)(void*) ; - -struct WorkItemWrapper -{ - WorkItemWrapper(ScheduleProc proc_,void* content_); - ScheduleProc proc; - void* pContent; -}; - -DWORD WINAPI WorkItemWrapperProc(LPVOID pContent); - -class Win32SequentialFile : public SequentialFile -{ -public: - friend class Win32Env; - virtual ~Win32SequentialFile(); - virtual Status Read(size_t n, Slice* result, char* scratch); - virtual Status Skip(uint64_t n); - BOOL isEnable(); - virtual std::string GetName() const { return _filename; } -private: - BOOL _Init(); - void _CleanUp(); - Win32SequentialFile(const std::string& fname); - std::string _filename; - ::HANDLE _hFile; - DISALLOW_COPY_AND_ASSIGN(Win32SequentialFile); -}; - -class Win32RandomAccessFile : public RandomAccessFile -{ -public: - friend class Win32Env; - virtual ~Win32RandomAccessFile(); - virtual Status Read(uint64_t offset, size_t n, Slice* result,char* scratch) const; - BOOL isEnable(); - virtual std::string GetName() const { return _filename; } -private: - BOOL _Init(LPCWSTR path); - void _CleanUp(); - Win32RandomAccessFile(const std::string& fname); - HANDLE _hFile; - const std::string _filename; - DISALLOW_COPY_AND_ASSIGN(Win32RandomAccessFile); -}; - -class Win32WritableFile : public WritableFile -{ -public: - Win32WritableFile(const std::string& fname, bool append); - ~Win32WritableFile(); - - virtual Status Append(const Slice& data); - virtual Status Close(); - virtual Status Flush(); - virtual Status Sync(); - BOOL isEnable(); - virtual std::string GetName() const { return filename_; } -private: - std::string filename_; - ::HANDLE _hFile; -}; - -class Win32FileLock : public FileLock -{ -public: - friend class Win32Env; - virtual ~Win32FileLock(); - BOOL isEnable(); -private: - BOOL _Init(LPCWSTR path); - void _CleanUp(); - Win32FileLock(const std::string& fname); - HANDLE _hFile; - std::string _filename; - DISALLOW_COPY_AND_ASSIGN(Win32FileLock); -}; - -class Win32Logger : public Logger -{ -public: - friend class Win32Env; - virtual ~Win32Logger(); - virtual void Logv(const char* format, va_list ap); -private: - explicit Win32Logger(WritableFile* pFile); - WritableFile* _pFileProxy; - DISALLOW_COPY_AND_ASSIGN(Win32Logger); -}; - -class Win32Env : public Env -{ -public: - Win32Env(); - virtual ~Win32Env(); - virtual Status NewSequentialFile(const std::string& fname, - SequentialFile** result); - - virtual Status NewRandomAccessFile(const std::string& fname, - RandomAccessFile** result); - virtual Status NewWritableFile(const std::string& fname, - WritableFile** result); - virtual Status NewAppendableFile(const std::string& fname, - WritableFile** result); - - virtual bool FileExists(const std::string& fname); - - virtual Status GetChildren(const std::string& dir, - std::vector<std::string>* result); - - virtual Status DeleteFile(const std::string& fname); - - virtual Status CreateDir(const std::string& dirname); - - virtual Status DeleteDir(const std::string& dirname); - - virtual Status GetFileSize(const std::string& fname, uint64_t* file_size); - - virtual Status RenameFile(const std::string& src, - const std::string& target); - - virtual Status LockFile(const std::string& fname, FileLock** lock); - - virtual Status UnlockFile(FileLock* lock); - - virtual void Schedule( - void (*function)(void* arg), - void* arg); - - virtual void StartThread(void (*function)(void* arg), void* arg); - - virtual Status GetTestDirectory(std::string* path); - - //virtual void Logv(WritableFile* log, const char* format, va_list ap); - - virtual Status NewLogger(const std::string& fname, Logger** result); - - virtual uint64_t NowMicros(); - - virtual void SleepForMicroseconds(int micros); -}; - -void ToWidePath(const std::string& value, std::wstring& target) { - wchar_t buffer[MAX_PATH]; - MultiByteToWideChar(CP_UTF8, 0, value.c_str(), -1, buffer, MAX_PATH); - target = buffer; -} - -void ToNarrowPath(const std::wstring& value, std::string& target) { - char buffer[MAX_PATH]; - WideCharToMultiByte(CP_UTF8, 0, value.c_str(), -1, buffer, MAX_PATH, NULL, NULL); - target = buffer; -} - -std::wstring GetCurrentDirW() -{ - WCHAR path[MAX_PATH]; - ::GetModuleFileNameW(::GetModuleHandleW(NULL),path,MAX_PATH); - *wcsrchr(path,L'\\') = 0; - return std::wstring(path); -} - -std::string GetCurrentDir() -{ - std::string path; - ToNarrowPath(GetCurrentDirW(), path); - return path; -} - -std::string& ModifyPath(std::string& path) -{ - if(path[0] == '/' || path[0] == '\\'){ - path = CurrentDir + path; - } - std::replace(path.begin(),path.end(),'/','\\'); - - return path; -} - -std::wstring& ModifyPath(std::wstring& path) -{ - if(path[0] == L'/' || path[0] == L'\\'){ - path = CurrentDirW + path; - } - std::replace(path.begin(),path.end(),L'/',L'\\'); - return path; -} - -std::string GetLastErrSz() -{ - LPWSTR lpMsgBuf; - FormatMessageW( - FORMAT_MESSAGE_ALLOCATE_BUFFER | - FORMAT_MESSAGE_FROM_SYSTEM | - FORMAT_MESSAGE_IGNORE_INSERTS, - NULL, - GetLastError(), - 0, // Default language - (LPWSTR) &lpMsgBuf, - 0, - NULL - ); - std::string Err; - ToNarrowPath(lpMsgBuf, Err); - LocalFree( lpMsgBuf ); - return Err; -} - -std::wstring GetLastErrSzW() -{ - LPVOID lpMsgBuf; - FormatMessageW( - FORMAT_MESSAGE_ALLOCATE_BUFFER | - FORMAT_MESSAGE_FROM_SYSTEM | - FORMAT_MESSAGE_IGNORE_INSERTS, - NULL, - GetLastError(), - 0, // Default language - (LPWSTR) &lpMsgBuf, - 0, - NULL - ); - std::wstring Err = (LPCWSTR)lpMsgBuf; - LocalFree(lpMsgBuf); - return Err; -} - -WorkItemWrapper::WorkItemWrapper( ScheduleProc proc_,void* content_ ) : - proc(proc_),pContent(content_) -{ - -} - -DWORD WINAPI WorkItemWrapperProc(LPVOID pContent) -{ - WorkItemWrapper* item = static_cast<WorkItemWrapper*>(pContent); - ScheduleProc TempProc = item->proc; - void* arg = item->pContent; - delete item; - TempProc(arg); - return 0; -} - -size_t GetPageSize() -{ - SYSTEM_INFO si; - GetSystemInfo(&si); - return std::max(si.dwPageSize,si.dwAllocationGranularity); -} - -const size_t g_PageSize = GetPageSize(); - - -Win32SequentialFile::Win32SequentialFile( const std::string& fname ) : - _filename(fname),_hFile(NULL) -{ - _Init(); -} - -Win32SequentialFile::~Win32SequentialFile() -{ - _CleanUp(); -} - -Status Win32SequentialFile::Read( size_t n, Slice* result, char* scratch ) -{ - Status sRet; - DWORD hasRead = 0; - if(_hFile && ReadFile(_hFile,scratch,n,&hasRead,NULL) ){ - *result = Slice(scratch,hasRead); - } else { - sRet = Status::IOError(_filename, Win32::GetLastErrSz() ); - } - return sRet; -} - -Status Win32SequentialFile::Skip( uint64_t n ) -{ - Status sRet; - LARGE_INTEGER Move,NowPointer; - Move.QuadPart = n; - if(!SetFilePointerEx(_hFile,Move,&NowPointer,FILE_CURRENT)){ - sRet = Status::IOError(_filename,Win32::GetLastErrSz()); - } - return sRet; -} - -BOOL Win32SequentialFile::isEnable() -{ - return _hFile ? TRUE : FALSE; -} - -BOOL Win32SequentialFile::_Init() -{ - std::wstring path; - ToWidePath(_filename, path); - _hFile = CreateFileW(path.c_str(), - GENERIC_READ, - FILE_SHARE_READ | FILE_SHARE_WRITE, - NULL, - OPEN_EXISTING, - FILE_ATTRIBUTE_NORMAL | FILE_FLAG_SEQUENTIAL_SCAN, - NULL); - if (_hFile == INVALID_HANDLE_VALUE) - _hFile = NULL; - return _hFile ? TRUE : FALSE; -} - -void Win32SequentialFile::_CleanUp() -{ - if(_hFile){ - CloseHandle(_hFile); - _hFile = NULL; - } -} - -Win32RandomAccessFile::Win32RandomAccessFile( const std::string& fname ) : - _filename(fname),_hFile(NULL) -{ - std::wstring path; - ToWidePath(fname, path); - _Init( path.c_str() ); -} - -Win32RandomAccessFile::~Win32RandomAccessFile() -{ - _CleanUp(); -} - -Status Win32RandomAccessFile::Read(uint64_t offset,size_t n,Slice* result,char* scratch) const -{ - Status sRet; - OVERLAPPED ol = {0}; - ZeroMemory(&ol,sizeof(ol)); - ol.Offset = (DWORD)offset; - ol.OffsetHigh = (DWORD)(offset >> 32); - DWORD hasRead = 0; - if(!ReadFile(_hFile,scratch,n,&hasRead,&ol)) - sRet = Status::IOError(_filename,Win32::GetLastErrSz()); - else - *result = Slice(scratch,hasRead); - return sRet; -} - -BOOL Win32RandomAccessFile::_Init( LPCWSTR path ) -{ - BOOL bRet = FALSE; - if(!_hFile) - _hFile = ::CreateFileW(path,GENERIC_READ,FILE_SHARE_READ|FILE_SHARE_WRITE,NULL,OPEN_EXISTING, - FILE_ATTRIBUTE_NORMAL | FILE_FLAG_RANDOM_ACCESS,NULL); - if(!_hFile || _hFile == INVALID_HANDLE_VALUE ) - _hFile = NULL; - else - bRet = TRUE; - return bRet; -} - -BOOL Win32RandomAccessFile::isEnable() -{ - return _hFile ? TRUE : FALSE; -} - -void Win32RandomAccessFile::_CleanUp() -{ - if(_hFile){ - ::CloseHandle(_hFile); - _hFile = NULL; - } -} - -Win32WritableFile::Win32WritableFile(const std::string& fname, bool append) - : filename_(fname) -{ - std::wstring path; - ToWidePath(fname, path); - // NewAppendableFile: append to an existing file, or create a new one - // if none exists - this is OPEN_ALWAYS behavior, with - // FILE_APPEND_DATA to avoid having to manually position the file - // pointer at the end of the file. - // NewWritableFile: create a new file, delete if it exists - this is - // CREATE_ALWAYS behavior. This file is used for writing only so - // use GENERIC_WRITE. - _hFile = CreateFileW(path.c_str(), - append ? FILE_APPEND_DATA : GENERIC_WRITE, - FILE_SHARE_READ|FILE_SHARE_DELETE|FILE_SHARE_WRITE, - NULL, - append ? OPEN_ALWAYS : CREATE_ALWAYS, - FILE_ATTRIBUTE_NORMAL, - NULL); - // CreateFileW returns INVALID_HANDLE_VALUE in case of error, always check isEnable() before use -} - -Win32WritableFile::~Win32WritableFile() -{ - if (_hFile != INVALID_HANDLE_VALUE) - Close(); -} - -Status Win32WritableFile::Append(const Slice& data) -{ - DWORD r = 0; - if (!WriteFile(_hFile, data.data(), data.size(), &r, NULL) || r != data.size()) { - return Status::IOError("Win32WritableFile.Append::WriteFile: "+filename_, Win32::GetLastErrSz()); - } - return Status::OK(); -} - -Status Win32WritableFile::Close() -{ - if (!CloseHandle(_hFile)) { - return Status::IOError("Win32WritableFile.Close::CloseHandle: "+filename_, Win32::GetLastErrSz()); - } - _hFile = INVALID_HANDLE_VALUE; - return Status::OK(); -} - -Status Win32WritableFile::Flush() -{ - // Nothing to do here, there are no application-side buffers - return Status::OK(); -} - -Status Win32WritableFile::Sync() -{ - if (!FlushFileBuffers(_hFile)) { - return Status::IOError("Win32WritableFile.Sync::FlushFileBuffers "+filename_, Win32::GetLastErrSz()); - } - return Status::OK(); -} - -BOOL Win32WritableFile::isEnable() -{ - return _hFile != INVALID_HANDLE_VALUE; -} - -Win32FileLock::Win32FileLock( const std::string& fname ) : - _hFile(NULL),_filename(fname) -{ - std::wstring path; - ToWidePath(fname, path); - _Init(path.c_str()); -} - -Win32FileLock::~Win32FileLock() -{ - _CleanUp(); -} - -BOOL Win32FileLock::_Init( LPCWSTR path ) -{ - BOOL bRet = FALSE; - if(!_hFile) - _hFile = ::CreateFileW(path,0,0,NULL,CREATE_ALWAYS,FILE_ATTRIBUTE_NORMAL,NULL); - if(!_hFile || _hFile == INVALID_HANDLE_VALUE ){ - _hFile = NULL; - } - else - bRet = TRUE; - return bRet; -} - -void Win32FileLock::_CleanUp() -{ - ::CloseHandle(_hFile); - _hFile = NULL; -} - -BOOL Win32FileLock::isEnable() -{ - return _hFile ? TRUE : FALSE; -} - -Win32Logger::Win32Logger(WritableFile* pFile) : _pFileProxy(pFile) -{ - assert(_pFileProxy); -} - -Win32Logger::~Win32Logger() -{ - if(_pFileProxy) - delete _pFileProxy; -} - -void Win32Logger::Logv( const char* format, va_list ap ) -{ - uint64_t thread_id = ::GetCurrentThreadId(); - - // We try twice: the first time with a fixed-size stack allocated buffer, - // and the second time with a much larger dynamically allocated buffer. - char buffer[500]; - for (int iter = 0; iter < 2; iter++) { - char* base; - int bufsize; - if (iter == 0) { - bufsize = sizeof(buffer); - base = buffer; - } else { - bufsize = 30000; - base = new char[bufsize]; - } - char* p = base; - char* limit = base + bufsize; - - SYSTEMTIME st; - GetLocalTime(&st); - p += snprintf(p, limit - p, - "%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ", - int(st.wYear), - int(st.wMonth), - int(st.wDay), - int(st.wHour), - int(st.wMinute), - int(st.wMinute), - int(st.wMilliseconds), - static_cast<long long unsigned int>(thread_id)); - - // Print the message - if (p < limit) { - va_list backup_ap; - va_copy(backup_ap, ap); - p += vsnprintf(p, limit - p, format, backup_ap); - va_end(backup_ap); - } - - // Truncate to available space if necessary - if (p >= limit) { - if (iter == 0) { - continue; // Try again with larger buffer - } else { - p = limit - 1; - } - } - - // Add newline if necessary - if (p == base || p[-1] != '\n') { - *p++ = '\n'; - } - - assert(p <= limit); - DWORD hasWritten = 0; - if(_pFileProxy){ - _pFileProxy->Append(Slice(base, p - base)); - _pFileProxy->Flush(); - } - if (base != buffer) { - delete[] base; - } - break; - } -} - -bool Win32Env::FileExists(const std::string& fname) -{ - std::string path = fname; - std::wstring wpath; - ToWidePath(ModifyPath(path), wpath); - return ::PathFileExistsW(wpath.c_str()) ? true : false; -} - -Status Win32Env::GetChildren(const std::string& dir, std::vector<std::string>* result) -{ - Status sRet; - ::WIN32_FIND_DATAW wfd; - std::string path = dir; - ModifyPath(path); - path += "\\*.*"; - std::wstring wpath; - ToWidePath(path, wpath); - - ::HANDLE hFind = ::FindFirstFileW(wpath.c_str() ,&wfd); - if(hFind && hFind != INVALID_HANDLE_VALUE){ - BOOL hasNext = TRUE; - std::string child; - while(hasNext){ - ToNarrowPath(wfd.cFileName, child); - if(child != ".." && child != ".") { - result->push_back(child); - } - hasNext = ::FindNextFileW(hFind,&wfd); - } - ::FindClose(hFind); - } - else - sRet = Status::IOError(dir,"Could not get children."); - return sRet; -} - -void Win32Env::SleepForMicroseconds( int micros ) -{ - ::Sleep((micros + 999) /1000); -} - - -Status Win32Env::DeleteFile( const std::string& fname ) -{ - Status sRet; - std::string path = fname; - std::wstring wpath; - ToWidePath(ModifyPath(path), wpath); - - if(!::DeleteFileW(wpath.c_str())) { - sRet = Status::IOError(path, "Could not delete file."); - } - return sRet; -} - -Status Win32Env::GetFileSize( const std::string& fname, uint64_t* file_size ) -{ - Status sRet; - std::string path = fname; - std::wstring wpath; - ToWidePath(ModifyPath(path), wpath); - - HANDLE file = ::CreateFileW(wpath.c_str(), - GENERIC_READ,FILE_SHARE_READ|FILE_SHARE_WRITE,NULL,OPEN_EXISTING,FILE_ATTRIBUTE_NORMAL,NULL); - LARGE_INTEGER li; - if(::GetFileSizeEx(file,&li)){ - *file_size = (uint64_t)li.QuadPart; - }else - sRet = Status::IOError(path,"Could not get the file size."); - CloseHandle(file); - return sRet; -} - -Status Win32Env::RenameFile( const std::string& src, const std::string& target ) -{ - Status sRet; - std::string src_path = src; - std::wstring wsrc_path; - ToWidePath(ModifyPath(src_path), wsrc_path); - std::string target_path = target; - std::wstring wtarget_path; - ToWidePath(ModifyPath(target_path), wtarget_path); - - if(!MoveFileW(wsrc_path.c_str(), wtarget_path.c_str() ) ){ - DWORD err = GetLastError(); - if(err == 0x000000b7){ - if(!::DeleteFileW(wtarget_path.c_str() ) ) - sRet = Status::IOError(src, "Could not rename file."); - else if(!::MoveFileW(wsrc_path.c_str(), - wtarget_path.c_str() ) ) - sRet = Status::IOError(src, "Could not rename file."); - } - } - return sRet; -} - -Status Win32Env::LockFile( const std::string& fname, FileLock** lock ) -{ - Status sRet; - std::string path = fname; - ModifyPath(path); - Win32FileLock* _lock = new Win32FileLock(path); - if(!_lock->isEnable()){ - delete _lock; - *lock = NULL; - sRet = Status::IOError(path, "Could not lock file."); - } - else - *lock = _lock; - return sRet; -} - -Status Win32Env::UnlockFile( FileLock* lock ) -{ - Status sRet; - delete lock; - return sRet; -} - -void Win32Env::Schedule( void (*function)(void* arg), void* arg ) -{ - QueueUserWorkItem(Win32::WorkItemWrapperProc, - new Win32::WorkItemWrapper(function,arg), - WT_EXECUTEDEFAULT); -} - -void Win32Env::StartThread( void (*function)(void* arg), void* arg ) -{ - ::_beginthread(function,0,arg); -} - -Status Win32Env::GetTestDirectory( std::string* path ) -{ - Status sRet; - WCHAR TempPath[MAX_PATH]; - ::GetTempPathW(MAX_PATH,TempPath); - ToNarrowPath(TempPath, *path); - path->append("leveldb\\test\\"); - ModifyPath(*path); - return sRet; -} - -uint64_t Win32Env::NowMicros() -{ -#ifndef USE_VISTA_API -#define GetTickCount64 GetTickCount -#endif - return (uint64_t)(GetTickCount64()*1000); -} - -static Status CreateDirInner( const std::string& dirname ) -{ - Status sRet; - std::wstring dirnameW; - ToWidePath(dirname, dirnameW); - DWORD attr = ::GetFileAttributesW(dirnameW.c_str()); - if (attr == INVALID_FILE_ATTRIBUTES) { // doesn't exist: - std::size_t slash = dirname.find_last_of("\\"); - if (slash != std::string::npos){ - sRet = CreateDirInner(dirname.substr(0, slash)); - if (!sRet.ok()) return sRet; - } - BOOL result = ::CreateDirectoryW(dirnameW.c_str(), NULL); - if (result == FALSE) { - sRet = Status::IOError(dirname, "Could not create directory."); - return sRet; - } - } - return sRet; -} - -Status Win32Env::CreateDir( const std::string& dirname ) -{ - std::string path = dirname; - if(path[path.length() - 1] != '\\'){ - path += '\\'; - } - ModifyPath(path); - - return CreateDirInner(path); -} - -Status Win32Env::DeleteDir( const std::string& dirname ) -{ - Status sRet; - std::wstring path; - ToWidePath(dirname, path); - ModifyPath(path); - if(!::RemoveDirectoryW( path.c_str() ) ){ - sRet = Status::IOError(dirname, "Could not delete directory."); - } - return sRet; -} - -Status Win32Env::NewSequentialFile( const std::string& fname, SequentialFile** result ) -{ - Status sRet; - std::string path = fname; - ModifyPath(path); - Win32SequentialFile* pFile = new Win32SequentialFile(path); - if(pFile->isEnable()){ - *result = pFile; - }else { - delete pFile; - sRet = Status::IOError(path, Win32::GetLastErrSz()); - } - return sRet; -} - -Status Win32Env::NewRandomAccessFile( const std::string& fname, RandomAccessFile** result ) -{ - Status sRet; - std::string path = fname; - Win32RandomAccessFile* pFile = new Win32RandomAccessFile(ModifyPath(path)); - if(!pFile->isEnable()){ - delete pFile; - *result = NULL; - sRet = Status::IOError(path, Win32::GetLastErrSz()); - }else - *result = pFile; - return sRet; -} - -Status Win32Env::NewLogger( const std::string& fname, Logger** result ) -{ - Status sRet; - std::string path = fname; - // Logs are opened with write semantics, not with append semantics - // (see PosixEnv::NewLogger) - Win32WritableFile* pMapFile = new Win32WritableFile(ModifyPath(path), false); - if(!pMapFile->isEnable()){ - delete pMapFile; - *result = NULL; - sRet = Status::IOError(path,"could not create a logger."); - }else - *result = new Win32Logger(pMapFile); - return sRet; -} - -Status Win32Env::NewWritableFile( const std::string& fname, WritableFile** result ) -{ - Status sRet; - std::string path = fname; - Win32WritableFile* pFile = new Win32WritableFile(ModifyPath(path), false); - if(!pFile->isEnable()){ - *result = NULL; - sRet = Status::IOError(fname,Win32::GetLastErrSz()); - }else - *result = pFile; - return sRet; -} - -Status Win32Env::NewAppendableFile( const std::string& fname, WritableFile** result ) -{ - Status sRet; - std::string path = fname; - Win32WritableFile* pFile = new Win32WritableFile(ModifyPath(path), true); - if(!pFile->isEnable()){ - *result = NULL; - sRet = Status::IOError(fname,Win32::GetLastErrSz()); - }else - *result = pFile; - return sRet; -} - -Win32Env::Win32Env() -{ - -} - -Win32Env::~Win32Env() -{ - -} - - -} // Win32 namespace - -static port::OnceType once = LEVELDB_ONCE_INIT; -static Env* default_env; -static void InitDefaultEnv() { default_env = new Win32::Win32Env(); } - -Env* Env::Default() { - port::InitOnce(&once, InitDefaultEnv); - return default_env; -} - -} // namespace leveldb - -#endif // defined(LEVELDB_PLATFORM_WINDOWS) diff --git a/src/leveldb/util/env_windows.cc b/src/leveldb/util/env_windows.cc new file mode 100644 index 0000000000..1834206562 --- /dev/null +++ b/src/leveldb/util/env_windows.cc @@ -0,0 +1,849 @@ +// Copyright (c) 2018 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +// Prevent Windows headers from defining min/max macros and instead +// use STL. +#ifndef NOMINMAX +#define NOMINMAX +#endif // ifndef NOMINMAX +#include <windows.h> + +#include <algorithm> +#include <atomic> +#include <chrono> +#include <condition_variable> +#include <cstddef> +#include <cstdint> +#include <cstdlib> +#include <cstring> +#include <memory> +#include <mutex> +#include <queue> +#include <sstream> +#include <string> +#include <vector> + +#include "leveldb/env.h" +#include "leveldb/slice.h" +#include "port/port.h" +#include "port/thread_annotations.h" +#include "util/env_windows_test_helper.h" +#include "util/logging.h" +#include "util/mutexlock.h" +#include "util/windows_logger.h" + +#if defined(DeleteFile) +#undef DeleteFile +#endif // defined(DeleteFile) + +namespace leveldb { + +namespace { + +constexpr const size_t kWritableFileBufferSize = 65536; + +// Up to 1000 mmaps for 64-bit binaries; none for 32-bit. +constexpr int kDefaultMmapLimit = (sizeof(void*) >= 8) ? 1000 : 0; + +// Can be set by by EnvWindowsTestHelper::SetReadOnlyMMapLimit(). +int g_mmap_limit = kDefaultMmapLimit; + +std::string GetWindowsErrorMessage(DWORD error_code) { + std::string message; + char* error_text = nullptr; + // Use MBCS version of FormatMessage to match return value. + size_t error_text_size = ::FormatMessageA( + FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER | + FORMAT_MESSAGE_IGNORE_INSERTS, + nullptr, error_code, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), + reinterpret_cast<char*>(&error_text), 0, nullptr); + if (!error_text) { + return message; + } + message.assign(error_text, error_text_size); + ::LocalFree(error_text); + return message; +} + +Status WindowsError(const std::string& context, DWORD error_code) { + if (error_code == ERROR_FILE_NOT_FOUND || error_code == ERROR_PATH_NOT_FOUND) + return Status::NotFound(context, GetWindowsErrorMessage(error_code)); + return Status::IOError(context, GetWindowsErrorMessage(error_code)); +} + +class ScopedHandle { + public: + ScopedHandle(HANDLE handle) : handle_(handle) {} + ScopedHandle(const ScopedHandle&) = delete; + ScopedHandle(ScopedHandle&& other) noexcept : handle_(other.Release()) {} + ~ScopedHandle() { Close(); } + + ScopedHandle& operator=(const ScopedHandle&) = delete; + + ScopedHandle& operator=(ScopedHandle&& rhs) noexcept { + if (this != &rhs) handle_ = rhs.Release(); + return *this; + } + + bool Close() { + if (!is_valid()) { + return true; + } + HANDLE h = handle_; + handle_ = INVALID_HANDLE_VALUE; + return ::CloseHandle(h); + } + + bool is_valid() const { + return handle_ != INVALID_HANDLE_VALUE && handle_ != nullptr; + } + + HANDLE get() const { return handle_; } + + HANDLE Release() { + HANDLE h = handle_; + handle_ = INVALID_HANDLE_VALUE; + return h; + } + + private: + HANDLE handle_; +}; + +// Helper class to limit resource usage to avoid exhaustion. +// Currently used to limit read-only file descriptors and mmap file usage +// so that we do not run out of file descriptors or virtual memory, or run into +// kernel performance problems for very large databases. +class Limiter { + public: + // Limit maximum number of resources to |max_acquires|. + Limiter(int max_acquires) : acquires_allowed_(max_acquires) {} + + Limiter(const Limiter&) = delete; + Limiter operator=(const Limiter&) = delete; + + // If another resource is available, acquire it and return true. + // Else return false. + bool Acquire() { + int old_acquires_allowed = + acquires_allowed_.fetch_sub(1, std::memory_order_relaxed); + + if (old_acquires_allowed > 0) return true; + + acquires_allowed_.fetch_add(1, std::memory_order_relaxed); + return false; + } + + // Release a resource acquired by a previous call to Acquire() that returned + // true. + void Release() { acquires_allowed_.fetch_add(1, std::memory_order_relaxed); } + + private: + // The number of available resources. + // + // This is a counter and is not tied to the invariants of any other class, so + // it can be operated on safely using std::memory_order_relaxed. + std::atomic<int> acquires_allowed_; +}; + +class WindowsSequentialFile : public SequentialFile { + public: + WindowsSequentialFile(std::string filename, ScopedHandle handle) + : handle_(std::move(handle)), filename_(std::move(filename)) {} + ~WindowsSequentialFile() override {} + + Status Read(size_t n, Slice* result, char* scratch) override { + DWORD bytes_read; + // DWORD is 32-bit, but size_t could technically be larger. However leveldb + // files are limited to leveldb::Options::max_file_size which is clamped to + // 1<<30 or 1 GiB. + assert(n <= std::numeric_limits<DWORD>::max()); + if (!::ReadFile(handle_.get(), scratch, static_cast<DWORD>(n), &bytes_read, + nullptr)) { + return WindowsError(filename_, ::GetLastError()); + } + + *result = Slice(scratch, bytes_read); + return Status::OK(); + } + + Status Skip(uint64_t n) override { + LARGE_INTEGER distance; + distance.QuadPart = n; + if (!::SetFilePointerEx(handle_.get(), distance, nullptr, FILE_CURRENT)) { + return WindowsError(filename_, ::GetLastError()); + } + return Status::OK(); + } + + std::string GetName() const override { return filename_; } + + private: + const ScopedHandle handle_; + const std::string filename_; +}; + +class WindowsRandomAccessFile : public RandomAccessFile { + public: + WindowsRandomAccessFile(std::string filename, ScopedHandle handle) + : handle_(std::move(handle)), filename_(std::move(filename)) {} + + ~WindowsRandomAccessFile() override = default; + + Status Read(uint64_t offset, size_t n, Slice* result, + char* scratch) const override { + DWORD bytes_read = 0; + OVERLAPPED overlapped = {0}; + + overlapped.OffsetHigh = static_cast<DWORD>(offset >> 32); + overlapped.Offset = static_cast<DWORD>(offset); + if (!::ReadFile(handle_.get(), scratch, static_cast<DWORD>(n), &bytes_read, + &overlapped)) { + DWORD error_code = ::GetLastError(); + if (error_code != ERROR_HANDLE_EOF) { + *result = Slice(scratch, 0); + return Status::IOError(filename_, GetWindowsErrorMessage(error_code)); + } + } + + *result = Slice(scratch, bytes_read); + return Status::OK(); + } + + std::string GetName() const override { return filename_; } + + private: + const ScopedHandle handle_; + const std::string filename_; +}; + +class WindowsMmapReadableFile : public RandomAccessFile { + public: + // base[0,length-1] contains the mmapped contents of the file. + WindowsMmapReadableFile(std::string filename, char* mmap_base, size_t length, + Limiter* mmap_limiter) + : mmap_base_(mmap_base), + length_(length), + mmap_limiter_(mmap_limiter), + filename_(std::move(filename)) {} + + ~WindowsMmapReadableFile() override { + ::UnmapViewOfFile(mmap_base_); + mmap_limiter_->Release(); + } + + Status Read(uint64_t offset, size_t n, Slice* result, + char* scratch) const override { + if (offset + n > length_) { + *result = Slice(); + return WindowsError(filename_, ERROR_INVALID_PARAMETER); + } + + *result = Slice(mmap_base_ + offset, n); + return Status::OK(); + } + + std::string GetName() const override { return filename_; } + + private: + char* const mmap_base_; + const size_t length_; + Limiter* const mmap_limiter_; + const std::string filename_; +}; + +class WindowsWritableFile : public WritableFile { + public: + WindowsWritableFile(std::string filename, ScopedHandle handle) + : pos_(0), handle_(std::move(handle)), filename_(std::move(filename)) {} + + ~WindowsWritableFile() override = default; + + Status Append(const Slice& data) override { + size_t write_size = data.size(); + const char* write_data = data.data(); + + // Fit as much as possible into buffer. + size_t copy_size = std::min(write_size, kWritableFileBufferSize - pos_); + std::memcpy(buf_ + pos_, write_data, copy_size); + write_data += copy_size; + write_size -= copy_size; + pos_ += copy_size; + if (write_size == 0) { + return Status::OK(); + } + + // Can't fit in buffer, so need to do at least one write. + Status status = FlushBuffer(); + if (!status.ok()) { + return status; + } + + // Small writes go to buffer, large writes are written directly. + if (write_size < kWritableFileBufferSize) { + std::memcpy(buf_, write_data, write_size); + pos_ = write_size; + return Status::OK(); + } + return WriteUnbuffered(write_data, write_size); + } + + Status Close() override { + Status status = FlushBuffer(); + if (!handle_.Close() && status.ok()) { + status = WindowsError(filename_, ::GetLastError()); + } + return status; + } + + Status Flush() override { return FlushBuffer(); } + + Status Sync() override { + // On Windows no need to sync parent directory. Its metadata will be updated + // via the creation of the new file, without an explicit sync. + + Status status = FlushBuffer(); + if (!status.ok()) { + return status; + } + + if (!::FlushFileBuffers(handle_.get())) { + return Status::IOError(filename_, + GetWindowsErrorMessage(::GetLastError())); + } + return Status::OK(); + } + + std::string GetName() const override { return filename_; } + + private: + Status FlushBuffer() { + Status status = WriteUnbuffered(buf_, pos_); + pos_ = 0; + return status; + } + + Status WriteUnbuffered(const char* data, size_t size) { + DWORD bytes_written; + if (!::WriteFile(handle_.get(), data, static_cast<DWORD>(size), + &bytes_written, nullptr)) { + return Status::IOError(filename_, + GetWindowsErrorMessage(::GetLastError())); + } + return Status::OK(); + } + + // buf_[0, pos_-1] contains data to be written to handle_. + char buf_[kWritableFileBufferSize]; + size_t pos_; + + ScopedHandle handle_; + const std::string filename_; +}; + +// Lock or unlock the entire file as specified by |lock|. Returns true +// when successful, false upon failure. Caller should call ::GetLastError() +// to determine cause of failure +bool LockOrUnlock(HANDLE handle, bool lock) { + if (lock) { + return ::LockFile(handle, + /*dwFileOffsetLow=*/0, /*dwFileOffsetHigh=*/0, + /*nNumberOfBytesToLockLow=*/MAXDWORD, + /*nNumberOfBytesToLockHigh=*/MAXDWORD); + } else { + return ::UnlockFile(handle, + /*dwFileOffsetLow=*/0, /*dwFileOffsetHigh=*/0, + /*nNumberOfBytesToLockLow=*/MAXDWORD, + /*nNumberOfBytesToLockHigh=*/MAXDWORD); + } +} + +class WindowsFileLock : public FileLock { + public: + WindowsFileLock(ScopedHandle handle, std::string filename) + : handle_(std::move(handle)), filename_(std::move(filename)) {} + + const ScopedHandle& handle() const { return handle_; } + const std::string& filename() const { return filename_; } + + private: + const ScopedHandle handle_; + const std::string filename_; +}; + +class WindowsEnv : public Env { + public: + WindowsEnv(); + ~WindowsEnv() override { + static const char msg[] = + "WindowsEnv singleton destroyed. Unsupported behavior!\n"; + std::fwrite(msg, 1, sizeof(msg), stderr); + std::abort(); + } + + Status NewSequentialFile(const std::string& filename, + SequentialFile** result) override { + *result = nullptr; + DWORD desired_access = GENERIC_READ; + DWORD share_mode = FILE_SHARE_READ; + auto wFilename = toUtf16(filename); + ScopedHandle handle = ::CreateFileW( + wFilename.c_str(), desired_access, share_mode, + /*lpSecurityAttributes=*/nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, + /*hTemplateFile=*/nullptr); + if (!handle.is_valid()) { + return WindowsError(filename, ::GetLastError()); + } + + *result = new WindowsSequentialFile(filename, std::move(handle)); + return Status::OK(); + } + + Status NewRandomAccessFile(const std::string& filename, + RandomAccessFile** result) override { + *result = nullptr; + DWORD desired_access = GENERIC_READ; + DWORD share_mode = FILE_SHARE_READ; + auto wFilename = toUtf16(filename); + ScopedHandle handle = + ::CreateFileW(wFilename.c_str(), desired_access, share_mode, + /*lpSecurityAttributes=*/nullptr, OPEN_EXISTING, + FILE_ATTRIBUTE_READONLY, + /*hTemplateFile=*/nullptr); + if (!handle.is_valid()) { + return WindowsError(filename, ::GetLastError()); + } + if (!mmap_limiter_.Acquire()) { + *result = new WindowsRandomAccessFile(filename, std::move(handle)); + return Status::OK(); + } + + LARGE_INTEGER file_size; + Status status; + if (!::GetFileSizeEx(handle.get(), &file_size)) { + mmap_limiter_.Release(); + return WindowsError(filename, ::GetLastError()); + } + + ScopedHandle mapping = + ::CreateFileMappingW(handle.get(), + /*security attributes=*/nullptr, PAGE_READONLY, + /*dwMaximumSizeHigh=*/0, + /*dwMaximumSizeLow=*/0, + /*lpName=*/nullptr); + if (mapping.is_valid()) { + void* mmap_base = ::MapViewOfFile(mapping.get(), FILE_MAP_READ, + /*dwFileOffsetHigh=*/0, + /*dwFileOffsetLow=*/0, + /*dwNumberOfBytesToMap=*/0); + if (mmap_base) { + *result = new WindowsMmapReadableFile( + filename, reinterpret_cast<char*>(mmap_base), + static_cast<size_t>(file_size.QuadPart), &mmap_limiter_); + return Status::OK(); + } + } + mmap_limiter_.Release(); + return WindowsError(filename, ::GetLastError()); + } + + Status NewWritableFile(const std::string& filename, + WritableFile** result) override { + DWORD desired_access = GENERIC_WRITE; + DWORD share_mode = 0; // Exclusive access. + auto wFilename = toUtf16(filename); + ScopedHandle handle = ::CreateFileW( + wFilename.c_str(), desired_access, share_mode, + /*lpSecurityAttributes=*/nullptr, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, + /*hTemplateFile=*/nullptr); + if (!handle.is_valid()) { + *result = nullptr; + return WindowsError(filename, ::GetLastError()); + } + + *result = new WindowsWritableFile(filename, std::move(handle)); + return Status::OK(); + } + + Status NewAppendableFile(const std::string& filename, + WritableFile** result) override { + DWORD desired_access = FILE_APPEND_DATA; + DWORD share_mode = 0; // Exclusive access. + auto wFilename = toUtf16(filename); + ScopedHandle handle = ::CreateFileW( + wFilename.c_str(), desired_access, share_mode, + /*lpSecurityAttributes=*/nullptr, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, + /*hTemplateFile=*/nullptr); + if (!handle.is_valid()) { + *result = nullptr; + return WindowsError(filename, ::GetLastError()); + } + + *result = new WindowsWritableFile(filename, std::move(handle)); + return Status::OK(); + } + + bool FileExists(const std::string& filename) override { + auto wFilename = toUtf16(filename); + return GetFileAttributesW(wFilename.c_str()) != INVALID_FILE_ATTRIBUTES; + } + + Status GetChildren(const std::string& directory_path, + std::vector<std::string>* result) override { + const std::string find_pattern = directory_path + "\\*"; + WIN32_FIND_DATAW find_data; + auto wFind_pattern = toUtf16(find_pattern); + HANDLE dir_handle = ::FindFirstFileW(wFind_pattern.c_str(), &find_data); + if (dir_handle == INVALID_HANDLE_VALUE) { + DWORD last_error = ::GetLastError(); + if (last_error == ERROR_FILE_NOT_FOUND) { + return Status::OK(); + } + return WindowsError(directory_path, last_error); + } + do { + char base_name[_MAX_FNAME]; + char ext[_MAX_EXT]; + + auto find_data_filename = toUtf8(find_data.cFileName); + if (!_splitpath_s(find_data_filename.c_str(), nullptr, 0, nullptr, 0, + base_name, ARRAYSIZE(base_name), ext, ARRAYSIZE(ext))) { + result->emplace_back(std::string(base_name) + ext); + } + } while (::FindNextFileW(dir_handle, &find_data)); + DWORD last_error = ::GetLastError(); + ::FindClose(dir_handle); + if (last_error != ERROR_NO_MORE_FILES) { + return WindowsError(directory_path, last_error); + } + return Status::OK(); + } + + Status DeleteFile(const std::string& filename) override { + auto wFilename = toUtf16(filename); + if (!::DeleteFileW(wFilename.c_str())) { + return WindowsError(filename, ::GetLastError()); + } + return Status::OK(); + } + + Status CreateDir(const std::string& dirname) override { + auto wDirname = toUtf16(dirname); + if (!::CreateDirectoryW(wDirname.c_str(), nullptr)) { + return WindowsError(dirname, ::GetLastError()); + } + return Status::OK(); + } + + Status DeleteDir(const std::string& dirname) override { + auto wDirname = toUtf16(dirname); + if (!::RemoveDirectoryW(wDirname.c_str())) { + return WindowsError(dirname, ::GetLastError()); + } + return Status::OK(); + } + + Status GetFileSize(const std::string& filename, uint64_t* size) override { + WIN32_FILE_ATTRIBUTE_DATA file_attributes; + auto wFilename = toUtf16(filename); + if (!::GetFileAttributesExW(wFilename.c_str(), GetFileExInfoStandard, + &file_attributes)) { + return WindowsError(filename, ::GetLastError()); + } + ULARGE_INTEGER file_size; + file_size.HighPart = file_attributes.nFileSizeHigh; + file_size.LowPart = file_attributes.nFileSizeLow; + *size = file_size.QuadPart; + return Status::OK(); + } + + Status RenameFile(const std::string& from, const std::string& to) override { + // Try a simple move first. It will only succeed when |to| doesn't already + // exist. + auto wFrom = toUtf16(from); + auto wTo = toUtf16(to); + if (::MoveFileW(wFrom.c_str(), wTo.c_str())) { + return Status::OK(); + } + DWORD move_error = ::GetLastError(); + + // Try the full-blown replace if the move fails, as ReplaceFile will only + // succeed when |to| does exist. When writing to a network share, we may not + // be able to change the ACLs. Ignore ACL errors then + // (REPLACEFILE_IGNORE_MERGE_ERRORS). + if (::ReplaceFileW(wTo.c_str(), wFrom.c_str(), /*lpBackupFileName=*/nullptr, + REPLACEFILE_IGNORE_MERGE_ERRORS, + /*lpExclude=*/nullptr, /*lpReserved=*/nullptr)) { + return Status::OK(); + } + DWORD replace_error = ::GetLastError(); + // In the case of FILE_ERROR_NOT_FOUND from ReplaceFile, it is likely that + // |to| does not exist. In this case, the more relevant error comes from the + // call to MoveFile. + if (replace_error == ERROR_FILE_NOT_FOUND || + replace_error == ERROR_PATH_NOT_FOUND) { + return WindowsError(from, move_error); + } else { + return WindowsError(from, replace_error); + } + } + + Status LockFile(const std::string& filename, FileLock** lock) override { + *lock = nullptr; + Status result; + auto wFilename = toUtf16(filename); + ScopedHandle handle = ::CreateFileW( + wFilename.c_str(), GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ, + /*lpSecurityAttributes=*/nullptr, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, + nullptr); + if (!handle.is_valid()) { + result = WindowsError(filename, ::GetLastError()); + } else if (!LockOrUnlock(handle.get(), true)) { + result = WindowsError("lock " + filename, ::GetLastError()); + } else { + *lock = new WindowsFileLock(std::move(handle), filename); + } + return result; + } + + Status UnlockFile(FileLock* lock) override { + WindowsFileLock* windows_file_lock = + reinterpret_cast<WindowsFileLock*>(lock); + if (!LockOrUnlock(windows_file_lock->handle().get(), false)) { + return WindowsError("unlock " + windows_file_lock->filename(), + ::GetLastError()); + } + delete windows_file_lock; + return Status::OK(); + } + + void Schedule(void (*background_work_function)(void* background_work_arg), + void* background_work_arg) override; + + void StartThread(void (*thread_main)(void* thread_main_arg), + void* thread_main_arg) override { + std::thread new_thread(thread_main, thread_main_arg); + new_thread.detach(); + } + + Status GetTestDirectory(std::string* result) override { + const char* env = getenv("TEST_TMPDIR"); + if (env && env[0] != '\0') { + *result = env; + return Status::OK(); + } + + wchar_t wtmp_path[MAX_PATH]; + if (!GetTempPathW(ARRAYSIZE(wtmp_path), wtmp_path)) { + return WindowsError("GetTempPath", ::GetLastError()); + } + std::string tmp_path = toUtf8(std::wstring(wtmp_path)); + std::stringstream ss; + ss << tmp_path << "leveldbtest-" << std::this_thread::get_id(); + *result = ss.str(); + + // Directory may already exist + CreateDir(*result); + return Status::OK(); + } + + Status NewLogger(const std::string& filename, Logger** result) override { + auto wFilename = toUtf16(filename); + std::FILE* fp = _wfopen(wFilename.c_str(), L"w"); + if (fp == nullptr) { + *result = nullptr; + return WindowsError(filename, ::GetLastError()); + } else { + *result = new WindowsLogger(fp); + return Status::OK(); + } + } + + uint64_t NowMicros() override { + // GetSystemTimeAsFileTime typically has a resolution of 10-20 msec. + // TODO(cmumford): Switch to GetSystemTimePreciseAsFileTime which is + // available in Windows 8 and later. + FILETIME ft; + ::GetSystemTimeAsFileTime(&ft); + // Each tick represents a 100-nanosecond intervals since January 1, 1601 + // (UTC). + uint64_t num_ticks = + (static_cast<uint64_t>(ft.dwHighDateTime) << 32) + ft.dwLowDateTime; + return num_ticks / 10; + } + + void SleepForMicroseconds(int micros) override { + std::this_thread::sleep_for(std::chrono::microseconds(micros)); + } + + private: + void BackgroundThreadMain(); + + static void BackgroundThreadEntryPoint(WindowsEnv* env) { + env->BackgroundThreadMain(); + } + + // Stores the work item data in a Schedule() call. + // + // Instances are constructed on the thread calling Schedule() and used on the + // background thread. + // + // This structure is thread-safe beacuse it is immutable. + struct BackgroundWorkItem { + explicit BackgroundWorkItem(void (*function)(void* arg), void* arg) + : function(function), arg(arg) {} + + void (*const function)(void*); + void* const arg; + }; + + port::Mutex background_work_mutex_; + port::CondVar background_work_cv_ GUARDED_BY(background_work_mutex_); + bool started_background_thread_ GUARDED_BY(background_work_mutex_); + + std::queue<BackgroundWorkItem> background_work_queue_ + GUARDED_BY(background_work_mutex_); + + Limiter mmap_limiter_; // Thread-safe. + + // Converts a Windows wide multi-byte UTF-16 string to a UTF-8 string. + // See http://utf8everywhere.org/#windows + std::string toUtf8(const std::wstring& wstr) { + if (wstr.empty()) return std::string(); + int size_needed = WideCharToMultiByte( + CP_UTF8, 0, &wstr[0], (int)wstr.size(), NULL, 0, NULL, NULL); + std::string strTo(size_needed, 0); + WideCharToMultiByte(CP_UTF8, 0, &wstr[0], (int)wstr.size(), &strTo[0], + size_needed, NULL, NULL); + return strTo; + } + + // Converts a UTF-8 string to a Windows UTF-16 multi-byte wide character + // string. + // See http://utf8everywhere.org/#windows + std::wstring toUtf16(const std::string& str) { + if (str.empty()) return std::wstring(); + int size_needed = + MultiByteToWideChar(CP_UTF8, 0, &str[0], (int)str.size(), NULL, 0); + std::wstring strTo(size_needed, 0); + MultiByteToWideChar(CP_UTF8, 0, &str[0], (int)str.size(), &strTo[0], + size_needed); + return strTo; + } +}; + +// Return the maximum number of concurrent mmaps. +int MaxMmaps() { return g_mmap_limit; } + +WindowsEnv::WindowsEnv() + : background_work_cv_(&background_work_mutex_), + started_background_thread_(false), + mmap_limiter_(MaxMmaps()) {} + +void WindowsEnv::Schedule( + void (*background_work_function)(void* background_work_arg), + void* background_work_arg) { + background_work_mutex_.Lock(); + + // Start the background thread, if we haven't done so already. + if (!started_background_thread_) { + started_background_thread_ = true; + std::thread background_thread(WindowsEnv::BackgroundThreadEntryPoint, this); + background_thread.detach(); + } + + // If the queue is empty, the background thread may be waiting for work. + if (background_work_queue_.empty()) { + background_work_cv_.Signal(); + } + + background_work_queue_.emplace(background_work_function, background_work_arg); + background_work_mutex_.Unlock(); +} + +void WindowsEnv::BackgroundThreadMain() { + while (true) { + background_work_mutex_.Lock(); + + // Wait until there is work to be done. + while (background_work_queue_.empty()) { + background_work_cv_.Wait(); + } + + assert(!background_work_queue_.empty()); + auto background_work_function = background_work_queue_.front().function; + void* background_work_arg = background_work_queue_.front().arg; + background_work_queue_.pop(); + + background_work_mutex_.Unlock(); + background_work_function(background_work_arg); + } +} + +// Wraps an Env instance whose destructor is never created. +// +// Intended usage: +// using PlatformSingletonEnv = SingletonEnv<PlatformEnv>; +// void ConfigurePosixEnv(int param) { +// PlatformSingletonEnv::AssertEnvNotInitialized(); +// // set global configuration flags. +// } +// Env* Env::Default() { +// static PlatformSingletonEnv default_env; +// return default_env.env(); +// } +template <typename EnvType> +class SingletonEnv { + public: + SingletonEnv() { +#if !defined(NDEBUG) + env_initialized_.store(true, std::memory_order::memory_order_relaxed); +#endif // !defined(NDEBUG) + static_assert(sizeof(env_storage_) >= sizeof(EnvType), + "env_storage_ will not fit the Env"); + static_assert(alignof(decltype(env_storage_)) >= alignof(EnvType), + "env_storage_ does not meet the Env's alignment needs"); + new (&env_storage_) EnvType(); + } + ~SingletonEnv() = default; + + SingletonEnv(const SingletonEnv&) = delete; + SingletonEnv& operator=(const SingletonEnv&) = delete; + + Env* env() { return reinterpret_cast<Env*>(&env_storage_); } + + static void AssertEnvNotInitialized() { +#if !defined(NDEBUG) + assert(!env_initialized_.load(std::memory_order::memory_order_relaxed)); +#endif // !defined(NDEBUG) + } + + private: + typename std::aligned_storage<sizeof(EnvType), alignof(EnvType)>::type + env_storage_; +#if !defined(NDEBUG) + static std::atomic<bool> env_initialized_; +#endif // !defined(NDEBUG) +}; + +#if !defined(NDEBUG) +template <typename EnvType> +std::atomic<bool> SingletonEnv<EnvType>::env_initialized_; +#endif // !defined(NDEBUG) + +using WindowsDefaultEnv = SingletonEnv<WindowsEnv>; + +} // namespace + +void EnvWindowsTestHelper::SetReadOnlyMMapLimit(int limit) { + WindowsDefaultEnv::AssertEnvNotInitialized(); + g_mmap_limit = limit; +} + +Env* Env::Default() { + static WindowsDefaultEnv env_container; + return env_container.env(); +} + +} // namespace leveldb diff --git a/src/leveldb/util/env_windows_test.cc b/src/leveldb/util/env_windows_test.cc new file mode 100644 index 0000000000..3c22133891 --- /dev/null +++ b/src/leveldb/util/env_windows_test.cc @@ -0,0 +1,64 @@ +// Copyright (c) 2018 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "leveldb/env.h" + +#include "port/port.h" +#include "util/env_windows_test_helper.h" +#include "util/testharness.h" + +namespace leveldb { + +static const int kMMapLimit = 4; + +class EnvWindowsTest { + public: + static void SetFileLimits(int mmap_limit) { + EnvWindowsTestHelper::SetReadOnlyMMapLimit(mmap_limit); + } + + EnvWindowsTest() : env_(Env::Default()) {} + + Env* env_; +}; + +TEST(EnvWindowsTest, TestOpenOnRead) { + // Write some test data to a single file that will be opened |n| times. + std::string test_dir; + ASSERT_OK(env_->GetTestDirectory(&test_dir)); + std::string test_file = test_dir + "/open_on_read.txt"; + + FILE* f = fopen(test_file.c_str(), "w"); + ASSERT_TRUE(f != nullptr); + const char kFileData[] = "abcdefghijklmnopqrstuvwxyz"; + fputs(kFileData, f); + fclose(f); + + // Open test file some number above the sum of the two limits to force + // leveldb::WindowsEnv to switch from mapping the file into memory + // to basic file reading. + const int kNumFiles = kMMapLimit + 5; + leveldb::RandomAccessFile* files[kNumFiles] = {0}; + for (int i = 0; i < kNumFiles; i++) { + ASSERT_OK(env_->NewRandomAccessFile(test_file, &files[i])); + } + char scratch; + Slice read_result; + for (int i = 0; i < kNumFiles; i++) { + ASSERT_OK(files[i]->Read(i, 1, &read_result, &scratch)); + ASSERT_EQ(kFileData[i], read_result[0]); + } + for (int i = 0; i < kNumFiles; i++) { + delete files[i]; + } + ASSERT_OK(env_->DeleteFile(test_file)); +} + +} // namespace leveldb + +int main(int argc, char** argv) { + // All tests currently run with the same read-only file limits. + leveldb::EnvWindowsTest::SetFileLimits(leveldb::kMMapLimit); + return leveldb::test::RunAllTests(); +} diff --git a/src/leveldb/util/env_windows_test_helper.h b/src/leveldb/util/env_windows_test_helper.h new file mode 100644 index 0000000000..e6f6020561 --- /dev/null +++ b/src/leveldb/util/env_windows_test_helper.h @@ -0,0 +1,25 @@ +// Copyright 2018 (c) The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef STORAGE_LEVELDB_UTIL_ENV_WINDOWS_TEST_HELPER_H_ +#define STORAGE_LEVELDB_UTIL_ENV_WINDOWS_TEST_HELPER_H_ + +namespace leveldb { + +class EnvWindowsTest; + +// A helper for the Windows Env to facilitate testing. +class EnvWindowsTestHelper { + private: + friend class CorruptionTest; + friend class EnvWindowsTest; + + // Set the maximum number of read-only files that will be mapped via mmap. + // Must be called before creating an Env. + static void SetReadOnlyMMapLimit(int limit); +}; + +} // namespace leveldb + +#endif // STORAGE_LEVELDB_UTIL_ENV_WINDOWS_TEST_HELPER_H_ diff --git a/src/leveldb/util/filter_policy.cc b/src/leveldb/util/filter_policy.cc index 7b045c8c91..90fd754d64 100644 --- a/src/leveldb/util/filter_policy.cc +++ b/src/leveldb/util/filter_policy.cc @@ -6,6 +6,6 @@ namespace leveldb { -FilterPolicy::~FilterPolicy() { } +FilterPolicy::~FilterPolicy() {} } // namespace leveldb diff --git a/src/leveldb/util/hash.cc b/src/leveldb/util/hash.cc index ed439ce7a2..dd47c110ee 100644 --- a/src/leveldb/util/hash.cc +++ b/src/leveldb/util/hash.cc @@ -2,15 +2,19 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#include "util/hash.h" + #include <string.h> + #include "util/coding.h" -#include "util/hash.h" // The FALLTHROUGH_INTENDED macro can be used to annotate implicit fall-through // between switch labels. The real definition should be provided externally. // This one is a fallback version for unsupported compilers. #ifndef FALLTHROUGH_INTENDED -#define FALLTHROUGH_INTENDED do { } while (0) +#define FALLTHROUGH_INTENDED \ + do { \ + } while (0) #endif namespace leveldb { @@ -34,13 +38,13 @@ uint32_t Hash(const char* data, size_t n, uint32_t seed) { // Pick up remaining bytes switch (limit - data) { case 3: - h += static_cast<unsigned char>(data[2]) << 16; + h += static_cast<uint8_t>(data[2]) << 16; FALLTHROUGH_INTENDED; case 2: - h += static_cast<unsigned char>(data[1]) << 8; + h += static_cast<uint8_t>(data[1]) << 8; FALLTHROUGH_INTENDED; case 1: - h += static_cast<unsigned char>(data[0]); + h += static_cast<uint8_t>(data[0]); h *= m; h ^= (h >> r); break; @@ -48,5 +52,4 @@ uint32_t Hash(const char* data, size_t n, uint32_t seed) { return h; } - } // namespace leveldb diff --git a/src/leveldb/util/hash.h b/src/leveldb/util/hash.h index 8889d56be8..74bdb6e7b2 100644 --- a/src/leveldb/util/hash.h +++ b/src/leveldb/util/hash.h @@ -12,8 +12,8 @@ namespace leveldb { -extern uint32_t Hash(const char* data, size_t n, uint32_t seed); +uint32_t Hash(const char* data, size_t n, uint32_t seed); -} +} // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_HASH_H_ diff --git a/src/leveldb/util/hash_test.cc b/src/leveldb/util/hash_test.cc index eaa1c92c23..21f8171da6 100644 --- a/src/leveldb/util/hash_test.cc +++ b/src/leveldb/util/hash_test.cc @@ -7,26 +7,18 @@ namespace leveldb { -class HASH { }; +class HASH {}; TEST(HASH, SignedUnsignedIssue) { - const unsigned char data1[1] = {0x62}; - const unsigned char data2[2] = {0xc3, 0x97}; - const unsigned char data3[3] = {0xe2, 0x99, 0xa5}; - const unsigned char data4[4] = {0xe1, 0x80, 0xb9, 0x32}; - const unsigned char data5[48] = { - 0x01, 0xc0, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x04, 0x00, - 0x00, 0x00, 0x00, 0x14, - 0x00, 0x00, 0x00, 0x18, - 0x28, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, + const uint8_t data1[1] = {0x62}; + const uint8_t data2[2] = {0xc3, 0x97}; + const uint8_t data3[3] = {0xe2, 0x99, 0xa5}; + const uint8_t data4[4] = {0xe1, 0x80, 0xb9, 0x32}; + const uint8_t data5[48] = { + 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; ASSERT_EQ(Hash(0, 0, 0xbc9f1d34), 0xbc9f1d34); @@ -49,6 +41,4 @@ TEST(HASH, SignedUnsignedIssue) { } // namespace leveldb -int main(int argc, char** argv) { - return leveldb::test::RunAllTests(); -} +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/util/histogram.cc b/src/leveldb/util/histogram.cc index bb95f583ea..65092c88f2 100644 --- a/src/leveldb/util/histogram.cc +++ b/src/leveldb/util/histogram.cc @@ -2,36 +2,174 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#include "util/histogram.h" + #include <math.h> #include <stdio.h> + #include "port/port.h" -#include "util/histogram.h" namespace leveldb { const double Histogram::kBucketLimit[kNumBuckets] = { - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 25, 30, 35, 40, 45, - 50, 60, 70, 80, 90, 100, 120, 140, 160, 180, 200, 250, 300, 350, 400, 450, - 500, 600, 700, 800, 900, 1000, 1200, 1400, 1600, 1800, 2000, 2500, 3000, - 3500, 4000, 4500, 5000, 6000, 7000, 8000, 9000, 10000, 12000, 14000, - 16000, 18000, 20000, 25000, 30000, 35000, 40000, 45000, 50000, 60000, - 70000, 80000, 90000, 100000, 120000, 140000, 160000, 180000, 200000, - 250000, 300000, 350000, 400000, 450000, 500000, 600000, 700000, 800000, - 900000, 1000000, 1200000, 1400000, 1600000, 1800000, 2000000, 2500000, - 3000000, 3500000, 4000000, 4500000, 5000000, 6000000, 7000000, 8000000, - 9000000, 10000000, 12000000, 14000000, 16000000, 18000000, 20000000, - 25000000, 30000000, 35000000, 40000000, 45000000, 50000000, 60000000, - 70000000, 80000000, 90000000, 100000000, 120000000, 140000000, 160000000, - 180000000, 200000000, 250000000, 300000000, 350000000, 400000000, - 450000000, 500000000, 600000000, 700000000, 800000000, 900000000, - 1000000000, 1200000000, 1400000000, 1600000000, 1800000000, 2000000000, - 2500000000.0, 3000000000.0, 3500000000.0, 4000000000.0, 4500000000.0, - 5000000000.0, 6000000000.0, 7000000000.0, 8000000000.0, 9000000000.0, - 1e200, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 12, + 14, + 16, + 18, + 20, + 25, + 30, + 35, + 40, + 45, + 50, + 60, + 70, + 80, + 90, + 100, + 120, + 140, + 160, + 180, + 200, + 250, + 300, + 350, + 400, + 450, + 500, + 600, + 700, + 800, + 900, + 1000, + 1200, + 1400, + 1600, + 1800, + 2000, + 2500, + 3000, + 3500, + 4000, + 4500, + 5000, + 6000, + 7000, + 8000, + 9000, + 10000, + 12000, + 14000, + 16000, + 18000, + 20000, + 25000, + 30000, + 35000, + 40000, + 45000, + 50000, + 60000, + 70000, + 80000, + 90000, + 100000, + 120000, + 140000, + 160000, + 180000, + 200000, + 250000, + 300000, + 350000, + 400000, + 450000, + 500000, + 600000, + 700000, + 800000, + 900000, + 1000000, + 1200000, + 1400000, + 1600000, + 1800000, + 2000000, + 2500000, + 3000000, + 3500000, + 4000000, + 4500000, + 5000000, + 6000000, + 7000000, + 8000000, + 9000000, + 10000000, + 12000000, + 14000000, + 16000000, + 18000000, + 20000000, + 25000000, + 30000000, + 35000000, + 40000000, + 45000000, + 50000000, + 60000000, + 70000000, + 80000000, + 90000000, + 100000000, + 120000000, + 140000000, + 160000000, + 180000000, + 200000000, + 250000000, + 300000000, + 350000000, + 400000000, + 450000000, + 500000000, + 600000000, + 700000000, + 800000000, + 900000000, + 1000000000, + 1200000000, + 1400000000, + 1600000000, + 1800000000, + 2000000000, + 2500000000.0, + 3000000000.0, + 3500000000.0, + 4000000000.0, + 4500000000.0, + 5000000000.0, + 6000000000.0, + 7000000000.0, + 8000000000.0, + 9000000000.0, + 1e200, }; void Histogram::Clear() { - min_ = kBucketLimit[kNumBuckets-1]; + min_ = kBucketLimit[kNumBuckets - 1]; max_ = 0; num_ = 0; sum_ = 0; @@ -66,9 +204,7 @@ void Histogram::Merge(const Histogram& other) { } } -double Histogram::Median() const { - return Percentile(50.0); -} +double Histogram::Median() const { return Percentile(50.0); } double Histogram::Percentile(double p) const { double threshold = num_ * (p / 100.0); @@ -77,7 +213,7 @@ double Histogram::Percentile(double p) const { sum += buckets_[b]; if (sum >= threshold) { // Scale linearly within this bucket - double left_point = (b == 0) ? 0 : kBucketLimit[b-1]; + double left_point = (b == 0) ? 0 : kBucketLimit[b - 1]; double right_point = kBucketLimit[b]; double left_sum = sum - buckets_[b]; double right_sum = sum; @@ -105,12 +241,10 @@ double Histogram::StandardDeviation() const { std::string Histogram::ToString() const { std::string r; char buf[200]; - snprintf(buf, sizeof(buf), - "Count: %.0f Average: %.4f StdDev: %.2f\n", - num_, Average(), StandardDeviation()); + snprintf(buf, sizeof(buf), "Count: %.0f Average: %.4f StdDev: %.2f\n", num_, + Average(), StandardDeviation()); r.append(buf); - snprintf(buf, sizeof(buf), - "Min: %.4f Median: %.4f Max: %.4f\n", + snprintf(buf, sizeof(buf), "Min: %.4f Median: %.4f Max: %.4f\n", (num_ == 0.0 ? 0.0 : min_), Median(), max_); r.append(buf); r.append("------------------------------------------------------\n"); @@ -119,17 +253,16 @@ std::string Histogram::ToString() const { for (int b = 0; b < kNumBuckets; b++) { if (buckets_[b] <= 0.0) continue; sum += buckets_[b]; - snprintf(buf, sizeof(buf), - "[ %7.0f, %7.0f ) %7.0f %7.3f%% %7.3f%% ", - ((b == 0) ? 0.0 : kBucketLimit[b-1]), // left - kBucketLimit[b], // right - buckets_[b], // count - mult * buckets_[b], // percentage - mult * sum); // cumulative percentage + snprintf(buf, sizeof(buf), "[ %7.0f, %7.0f ) %7.0f %7.3f%% %7.3f%% ", + ((b == 0) ? 0.0 : kBucketLimit[b - 1]), // left + kBucketLimit[b], // right + buckets_[b], // count + mult * buckets_[b], // percentage + mult * sum); // cumulative percentage r.append(buf); // Add hash marks based on percentage; 20 marks for 100%. - int marks = static_cast<int>(20*(buckets_[b] / num_) + 0.5); + int marks = static_cast<int>(20 * (buckets_[b] / num_) + 0.5); r.append(marks, '#'); r.push_back('\n'); } diff --git a/src/leveldb/util/histogram.h b/src/leveldb/util/histogram.h index 1ef9f3c8ab..4da60fba45 100644 --- a/src/leveldb/util/histogram.h +++ b/src/leveldb/util/histogram.h @@ -11,8 +11,8 @@ namespace leveldb { class Histogram { public: - Histogram() { } - ~Histogram() { } + Histogram() {} + ~Histogram() {} void Clear(); void Add(double value); @@ -21,20 +21,22 @@ class Histogram { std::string ToString() const; private: + enum { kNumBuckets = 154 }; + + double Median() const; + double Percentile(double p) const; + double Average() const; + double StandardDeviation() const; + + static const double kBucketLimit[kNumBuckets]; + double min_; double max_; double num_; double sum_; double sum_squares_; - enum { kNumBuckets = 154 }; - static const double kBucketLimit[kNumBuckets]; double buckets_[kNumBuckets]; - - double Median() const; - double Percentile(double p) const; - double Average() const; - double StandardDeviation() const; }; } // namespace leveldb diff --git a/src/leveldb/util/logging.cc b/src/leveldb/util/logging.cc index db6160c8f1..75e9d037d3 100644 --- a/src/leveldb/util/logging.cc +++ b/src/leveldb/util/logging.cc @@ -8,6 +8,9 @@ #include <stdarg.h> #include <stdio.h> #include <stdlib.h> + +#include <limits> + #include "leveldb/env.h" #include "leveldb/slice.h" @@ -15,7 +18,7 @@ namespace leveldb { void AppendNumberTo(std::string* str, uint64_t num) { char buf[30]; - snprintf(buf, sizeof(buf), "%llu", (unsigned long long) num); + snprintf(buf, sizeof(buf), "%llu", (unsigned long long)num); str->append(buf); } @@ -46,27 +49,36 @@ std::string EscapeString(const Slice& value) { } bool ConsumeDecimalNumber(Slice* in, uint64_t* val) { - uint64_t v = 0; - int digits = 0; - while (!in->empty()) { - unsigned char c = (*in)[0]; - if (c >= '0' && c <= '9') { - ++digits; - const int delta = (c - '0'); - static const uint64_t kMaxUint64 = ~static_cast<uint64_t>(0); - if (v > kMaxUint64/10 || - (v == kMaxUint64/10 && delta > kMaxUint64%10)) { - // Overflow - return false; - } - v = (v * 10) + delta; - in->remove_prefix(1); - } else { - break; + // Constants that will be optimized away. + constexpr const uint64_t kMaxUint64 = std::numeric_limits<uint64_t>::max(); + constexpr const char kLastDigitOfMaxUint64 = + '0' + static_cast<char>(kMaxUint64 % 10); + + uint64_t value = 0; + + // reinterpret_cast-ing from char* to uint8_t* to avoid signedness. + const uint8_t* start = reinterpret_cast<const uint8_t*>(in->data()); + + const uint8_t* end = start + in->size(); + const uint8_t* current = start; + for (; current != end; ++current) { + const uint8_t ch = *current; + if (ch < '0' || ch > '9') break; + + // Overflow check. + // kMaxUint64 / 10 is also constant and will be optimized away. + if (value > kMaxUint64 / 10 || + (value == kMaxUint64 / 10 && ch > kLastDigitOfMaxUint64)) { + return false; } + + value = (value * 10) + (ch - '0'); } - *val = v; - return (digits > 0); + + *val = value; + const size_t digits_consumed = current - start; + in->remove_prefix(digits_consumed); + return digits_consumed != 0; } } // namespace leveldb diff --git a/src/leveldb/util/logging.h b/src/leveldb/util/logging.h index 1b450d2480..8ff2da86b4 100644 --- a/src/leveldb/util/logging.h +++ b/src/leveldb/util/logging.h @@ -8,9 +8,11 @@ #ifndef STORAGE_LEVELDB_UTIL_LOGGING_H_ #define STORAGE_LEVELDB_UTIL_LOGGING_H_ -#include <stdio.h> #include <stdint.h> +#include <stdio.h> + #include <string> + #include "port/port.h" namespace leveldb { @@ -19,24 +21,24 @@ class Slice; class WritableFile; // Append a human-readable printout of "num" to *str -extern void AppendNumberTo(std::string* str, uint64_t num); +void AppendNumberTo(std::string* str, uint64_t num); // Append a human-readable printout of "value" to *str. // Escapes any non-printable characters found in "value". -extern void AppendEscapedStringTo(std::string* str, const Slice& value); +void AppendEscapedStringTo(std::string* str, const Slice& value); // Return a human-readable printout of "num" -extern std::string NumberToString(uint64_t num); +std::string NumberToString(uint64_t num); // Return a human-readable version of "value". // Escapes any non-printable characters found in "value". -extern std::string EscapeString(const Slice& value); +std::string EscapeString(const Slice& value); // Parse a human-readable number from "*in" into *value. On success, // advances "*in" past the consumed number and sets "*val" to the // numeric value. Otherwise, returns false and leaves *in in an // unspecified state. -extern bool ConsumeDecimalNumber(Slice* in, uint64_t* val); +bool ConsumeDecimalNumber(Slice* in, uint64_t* val); } // namespace leveldb diff --git a/src/leveldb/util/logging_test.cc b/src/leveldb/util/logging_test.cc new file mode 100644 index 0000000000..389cbeb14f --- /dev/null +++ b/src/leveldb/util/logging_test.cc @@ -0,0 +1,143 @@ +// Copyright (c) 2018 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include <limits> +#include <string> + +#include "leveldb/slice.h" +#include "util/logging.h" +#include "util/testharness.h" + +namespace leveldb { + +class Logging {}; + +TEST(Logging, NumberToString) { + ASSERT_EQ("0", NumberToString(0)); + ASSERT_EQ("1", NumberToString(1)); + ASSERT_EQ("9", NumberToString(9)); + + ASSERT_EQ("10", NumberToString(10)); + ASSERT_EQ("11", NumberToString(11)); + ASSERT_EQ("19", NumberToString(19)); + ASSERT_EQ("99", NumberToString(99)); + + ASSERT_EQ("100", NumberToString(100)); + ASSERT_EQ("109", NumberToString(109)); + ASSERT_EQ("190", NumberToString(190)); + ASSERT_EQ("123", NumberToString(123)); + ASSERT_EQ("12345678", NumberToString(12345678)); + + static_assert(std::numeric_limits<uint64_t>::max() == 18446744073709551615U, + "Test consistency check"); + ASSERT_EQ("18446744073709551000", NumberToString(18446744073709551000U)); + ASSERT_EQ("18446744073709551600", NumberToString(18446744073709551600U)); + ASSERT_EQ("18446744073709551610", NumberToString(18446744073709551610U)); + ASSERT_EQ("18446744073709551614", NumberToString(18446744073709551614U)); + ASSERT_EQ("18446744073709551615", NumberToString(18446744073709551615U)); +} + +void ConsumeDecimalNumberRoundtripTest(uint64_t number, + const std::string& padding = "") { + std::string decimal_number = NumberToString(number); + std::string input_string = decimal_number + padding; + Slice input(input_string); + Slice output = input; + uint64_t result; + ASSERT_TRUE(ConsumeDecimalNumber(&output, &result)); + ASSERT_EQ(number, result); + ASSERT_EQ(decimal_number.size(), output.data() - input.data()); + ASSERT_EQ(padding.size(), output.size()); +} + +TEST(Logging, ConsumeDecimalNumberRoundtrip) { + ConsumeDecimalNumberRoundtripTest(0); + ConsumeDecimalNumberRoundtripTest(1); + ConsumeDecimalNumberRoundtripTest(9); + + ConsumeDecimalNumberRoundtripTest(10); + ConsumeDecimalNumberRoundtripTest(11); + ConsumeDecimalNumberRoundtripTest(19); + ConsumeDecimalNumberRoundtripTest(99); + + ConsumeDecimalNumberRoundtripTest(100); + ConsumeDecimalNumberRoundtripTest(109); + ConsumeDecimalNumberRoundtripTest(190); + ConsumeDecimalNumberRoundtripTest(123); + ASSERT_EQ("12345678", NumberToString(12345678)); + + for (uint64_t i = 0; i < 100; ++i) { + uint64_t large_number = std::numeric_limits<uint64_t>::max() - i; + ConsumeDecimalNumberRoundtripTest(large_number); + } +} + +TEST(Logging, ConsumeDecimalNumberRoundtripWithPadding) { + ConsumeDecimalNumberRoundtripTest(0, " "); + ConsumeDecimalNumberRoundtripTest(1, "abc"); + ConsumeDecimalNumberRoundtripTest(9, "x"); + + ConsumeDecimalNumberRoundtripTest(10, "_"); + ConsumeDecimalNumberRoundtripTest(11, std::string("\0\0\0", 3)); + ConsumeDecimalNumberRoundtripTest(19, "abc"); + ConsumeDecimalNumberRoundtripTest(99, "padding"); + + ConsumeDecimalNumberRoundtripTest(100, " "); + + for (uint64_t i = 0; i < 100; ++i) { + uint64_t large_number = std::numeric_limits<uint64_t>::max() - i; + ConsumeDecimalNumberRoundtripTest(large_number, "pad"); + } +} + +void ConsumeDecimalNumberOverflowTest(const std::string& input_string) { + Slice input(input_string); + Slice output = input; + uint64_t result; + ASSERT_EQ(false, ConsumeDecimalNumber(&output, &result)); +} + +TEST(Logging, ConsumeDecimalNumberOverflow) { + static_assert(std::numeric_limits<uint64_t>::max() == 18446744073709551615U, + "Test consistency check"); + ConsumeDecimalNumberOverflowTest("18446744073709551616"); + ConsumeDecimalNumberOverflowTest("18446744073709551617"); + ConsumeDecimalNumberOverflowTest("18446744073709551618"); + ConsumeDecimalNumberOverflowTest("18446744073709551619"); + ConsumeDecimalNumberOverflowTest("18446744073709551620"); + ConsumeDecimalNumberOverflowTest("18446744073709551621"); + ConsumeDecimalNumberOverflowTest("18446744073709551622"); + ConsumeDecimalNumberOverflowTest("18446744073709551623"); + ConsumeDecimalNumberOverflowTest("18446744073709551624"); + ConsumeDecimalNumberOverflowTest("18446744073709551625"); + ConsumeDecimalNumberOverflowTest("18446744073709551626"); + + ConsumeDecimalNumberOverflowTest("18446744073709551700"); + + ConsumeDecimalNumberOverflowTest("99999999999999999999"); +} + +void ConsumeDecimalNumberNoDigitsTest(const std::string& input_string) { + Slice input(input_string); + Slice output = input; + uint64_t result; + ASSERT_EQ(false, ConsumeDecimalNumber(&output, &result)); + ASSERT_EQ(input.data(), output.data()); + ASSERT_EQ(input.size(), output.size()); +} + +TEST(Logging, ConsumeDecimalNumberNoDigits) { + ConsumeDecimalNumberNoDigitsTest(""); + ConsumeDecimalNumberNoDigitsTest(" "); + ConsumeDecimalNumberNoDigitsTest("a"); + ConsumeDecimalNumberNoDigitsTest(" 123"); + ConsumeDecimalNumberNoDigitsTest("a123"); + ConsumeDecimalNumberNoDigitsTest(std::string("\000123", 4)); + ConsumeDecimalNumberNoDigitsTest(std::string("\177123", 4)); + ConsumeDecimalNumberNoDigitsTest(std::string("\377123", 4)); +} + +} // namespace leveldb + +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/util/mutexlock.h b/src/leveldb/util/mutexlock.h index 1ff5a9efa1..0cb2e250fb 100644 --- a/src/leveldb/util/mutexlock.h +++ b/src/leveldb/util/mutexlock.h @@ -22,20 +22,18 @@ namespace leveldb { class SCOPED_LOCKABLE MutexLock { public: - explicit MutexLock(port::Mutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu) - : mu_(mu) { + explicit MutexLock(port::Mutex* mu) EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) { this->mu_->Lock(); } ~MutexLock() UNLOCK_FUNCTION() { this->mu_->Unlock(); } + MutexLock(const MutexLock&) = delete; + MutexLock& operator=(const MutexLock&) = delete; + private: - port::Mutex *const mu_; - // No copying allowed - MutexLock(const MutexLock&); - void operator=(const MutexLock&); + port::Mutex* const mu_; }; } // namespace leveldb - #endif // STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_ diff --git a/src/leveldb/util/no_destructor.h b/src/leveldb/util/no_destructor.h new file mode 100644 index 0000000000..a0d3b8703d --- /dev/null +++ b/src/leveldb/util/no_destructor.h @@ -0,0 +1,46 @@ +// Copyright (c) 2018 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef STORAGE_LEVELDB_UTIL_NO_DESTRUCTOR_H_ +#define STORAGE_LEVELDB_UTIL_NO_DESTRUCTOR_H_ + +#include <type_traits> +#include <utility> + +namespace leveldb { + +// Wraps an instance whose destructor is never called. +// +// This is intended for use with function-level static variables. +template <typename InstanceType> +class NoDestructor { + public: + template <typename... ConstructorArgTypes> + explicit NoDestructor(ConstructorArgTypes&&... constructor_args) { + static_assert(sizeof(instance_storage_) >= sizeof(InstanceType), + "instance_storage_ is not large enough to hold the instance"); + static_assert( + alignof(decltype(instance_storage_)) >= alignof(InstanceType), + "instance_storage_ does not meet the instance's alignment requirement"); + new (&instance_storage_) + InstanceType(std::forward<ConstructorArgTypes>(constructor_args)...); + } + + ~NoDestructor() = default; + + NoDestructor(const NoDestructor&) = delete; + NoDestructor& operator=(const NoDestructor&) = delete; + + InstanceType* get() { + return reinterpret_cast<InstanceType*>(&instance_storage_); + } + + private: + typename std::aligned_storage<sizeof(InstanceType), + alignof(InstanceType)>::type instance_storage_; +}; + +} // namespace leveldb + +#endif // STORAGE_LEVELDB_UTIL_NO_DESTRUCTOR_H_ diff --git a/src/leveldb/util/no_destructor_test.cc b/src/leveldb/util/no_destructor_test.cc new file mode 100644 index 0000000000..b41caca694 --- /dev/null +++ b/src/leveldb/util/no_destructor_test.cc @@ -0,0 +1,47 @@ +// Copyright (c) 2018 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include <cstdint> +#include <cstdlib> +#include <utility> + +#include "util/no_destructor.h" +#include "util/testharness.h" + +namespace leveldb { + +namespace { + +struct DoNotDestruct { + public: + DoNotDestruct(uint32_t a, uint64_t b) : a(a), b(b) {} + ~DoNotDestruct() { std::abort(); } + + // Used to check constructor argument forwarding. + uint32_t a; + uint64_t b; +}; + +constexpr const uint32_t kGoldenA = 0xdeadbeef; +constexpr const uint64_t kGoldenB = 0xaabbccddeeffaabb; + +} // namespace + +class NoDestructorTest {}; + +TEST(NoDestructorTest, StackInstance) { + NoDestructor<DoNotDestruct> instance(kGoldenA, kGoldenB); + ASSERT_EQ(kGoldenA, instance.get()->a); + ASSERT_EQ(kGoldenB, instance.get()->b); +} + +TEST(NoDestructorTest, StaticInstance) { + static NoDestructor<DoNotDestruct> instance(kGoldenA, kGoldenB); + ASSERT_EQ(kGoldenA, instance.get()->a); + ASSERT_EQ(kGoldenB, instance.get()->b); +} + +} // namespace leveldb + +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/util/options.cc b/src/leveldb/util/options.cc index b5e6227613..62de5bf0d2 100644 --- a/src/leveldb/util/options.cc +++ b/src/leveldb/util/options.cc @@ -9,22 +9,6 @@ namespace leveldb { -Options::Options() - : comparator(BytewiseComparator()), - create_if_missing(false), - error_if_exists(false), - paranoid_checks(false), - env(Env::Default()), - info_log(NULL), - write_buffer_size(4<<20), - max_open_files(1000), - block_cache(NULL), - block_size(4096), - block_restart_interval(16), - max_file_size(2<<20), - compression(kSnappyCompression), - reuse_logs(false), - filter_policy(NULL) { -} +Options::Options() : comparator(BytewiseComparator()), env(Env::Default()) {} } // namespace leveldb diff --git a/src/leveldb/util/posix_logger.h b/src/leveldb/util/posix_logger.h index c063c2b7cb..28e15d10b4 100644 --- a/src/leveldb/util/posix_logger.h +++ b/src/leveldb/util/posix_logger.h @@ -3,94 +3,126 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. // // Logger implementation that can be shared by all environments -// where enough Posix functionality is available. +// where enough posix functionality is available. #ifndef STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_ #define STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_ -#include <algorithm> -#include <stdio.h> #include <sys/time.h> -#include <time.h> + +#include <cassert> +#include <cstdarg> +#include <cstdio> +#include <ctime> +#include <sstream> +#include <thread> + #include "leveldb/env.h" namespace leveldb { -class PosixLogger : public Logger { - private: - FILE* file_; - uint64_t (*gettid_)(); // Return the thread id for the current thread +class PosixLogger final : public Logger { public: - PosixLogger(FILE* f, uint64_t (*gettid)()) : file_(f), gettid_(gettid) { } - virtual ~PosixLogger() { - fclose(file_); - } - virtual void Logv(const char* format, va_list ap) { - const uint64_t thread_id = (*gettid_)(); - - // We try twice: the first time with a fixed-size stack allocated buffer, - // and the second time with a much larger dynamically allocated buffer. - char buffer[500]; - for (int iter = 0; iter < 2; iter++) { - char* base; - int bufsize; - if (iter == 0) { - bufsize = sizeof(buffer); - base = buffer; - } else { - bufsize = 30000; - base = new char[bufsize]; - } - char* p = base; - char* limit = base + bufsize; - - struct timeval now_tv; - gettimeofday(&now_tv, NULL); - const time_t seconds = now_tv.tv_sec; - struct tm t; - localtime_r(&seconds, &t); - p += snprintf(p, limit - p, - "%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ", - t.tm_year + 1900, - t.tm_mon + 1, - t.tm_mday, - t.tm_hour, - t.tm_min, - t.tm_sec, - static_cast<int>(now_tv.tv_usec), - static_cast<long long unsigned int>(thread_id)); - - // Print the message - if (p < limit) { - va_list backup_ap; - va_copy(backup_ap, ap); - p += vsnprintf(p, limit - p, format, backup_ap); - va_end(backup_ap); - } + // Creates a logger that writes to the given file. + // + // The PosixLogger instance takes ownership of the file handle. + explicit PosixLogger(std::FILE* fp) : fp_(fp) { assert(fp != nullptr); } + + ~PosixLogger() override { std::fclose(fp_); } + + void Logv(const char* format, va_list arguments) override { + // Record the time as close to the Logv() call as possible. + struct ::timeval now_timeval; + ::gettimeofday(&now_timeval, nullptr); + const std::time_t now_seconds = now_timeval.tv_sec; + struct std::tm now_components; + ::localtime_r(&now_seconds, &now_components); + + // Record the thread ID. + constexpr const int kMaxThreadIdSize = 32; + std::ostringstream thread_stream; + thread_stream << std::this_thread::get_id(); + std::string thread_id = thread_stream.str(); + if (thread_id.size() > kMaxThreadIdSize) { + thread_id.resize(kMaxThreadIdSize); + } - // Truncate to available space if necessary - if (p >= limit) { - if (iter == 0) { - continue; // Try again with larger buffer - } else { - p = limit - 1; + // We first attempt to print into a stack-allocated buffer. If this attempt + // fails, we make a second attempt with a dynamically allocated buffer. + constexpr const int kStackBufferSize = 512; + char stack_buffer[kStackBufferSize]; + static_assert(sizeof(stack_buffer) == static_cast<size_t>(kStackBufferSize), + "sizeof(char) is expected to be 1 in C++"); + + int dynamic_buffer_size = 0; // Computed in the first iteration. + for (int iteration = 0; iteration < 2; ++iteration) { + const int buffer_size = + (iteration == 0) ? kStackBufferSize : dynamic_buffer_size; + char* const buffer = + (iteration == 0) ? stack_buffer : new char[dynamic_buffer_size]; + + // Print the header into the buffer. + int buffer_offset = snprintf( + buffer, buffer_size, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ", + now_components.tm_year + 1900, now_components.tm_mon + 1, + now_components.tm_mday, now_components.tm_hour, now_components.tm_min, + now_components.tm_sec, static_cast<int>(now_timeval.tv_usec), + thread_id.c_str()); + + // The header can be at most 28 characters (10 date + 15 time + + // 3 delimiters) plus the thread ID, which should fit comfortably into the + // static buffer. + assert(buffer_offset <= 28 + kMaxThreadIdSize); + static_assert(28 + kMaxThreadIdSize < kStackBufferSize, + "stack-allocated buffer may not fit the message header"); + assert(buffer_offset < buffer_size); + + // Print the message into the buffer. + std::va_list arguments_copy; + va_copy(arguments_copy, arguments); + buffer_offset += + std::vsnprintf(buffer + buffer_offset, buffer_size - buffer_offset, + format, arguments_copy); + va_end(arguments_copy); + + // The code below may append a newline at the end of the buffer, which + // requires an extra character. + if (buffer_offset >= buffer_size - 1) { + // The message did not fit into the buffer. + if (iteration == 0) { + // Re-run the loop and use a dynamically-allocated buffer. The buffer + // will be large enough for the log message, an extra newline and a + // null terminator. + dynamic_buffer_size = buffer_offset + 2; + continue; } + + // The dynamically-allocated buffer was incorrectly sized. This should + // not happen, assuming a correct implementation of (v)snprintf. Fail + // in tests, recover by truncating the log message in production. + assert(false); + buffer_offset = buffer_size - 1; } - // Add newline if necessary - if (p == base || p[-1] != '\n') { - *p++ = '\n'; + // Add a newline if necessary. + if (buffer[buffer_offset - 1] != '\n') { + buffer[buffer_offset] = '\n'; + ++buffer_offset; } - assert(p <= limit); - fwrite(base, 1, p - base, file_); - fflush(file_); - if (base != buffer) { - delete[] base; + assert(buffer_offset <= buffer_size); + std::fwrite(buffer, 1, buffer_offset, fp_); + std::fflush(fp_); + + if (iteration != 0) { + delete[] buffer; } break; } } + + private: + std::FILE* const fp_; }; } // namespace leveldb diff --git a/src/leveldb/util/random.h b/src/leveldb/util/random.h index ddd51b1c7b..76f7daf52a 100644 --- a/src/leveldb/util/random.h +++ b/src/leveldb/util/random.h @@ -15,6 +15,7 @@ namespace leveldb { class Random { private: uint32_t seed_; + public: explicit Random(uint32_t s) : seed_(s & 0x7fffffffu) { // Avoid bad seeds. @@ -23,8 +24,8 @@ class Random { } } uint32_t Next() { - static const uint32_t M = 2147483647L; // 2^31-1 - static const uint64_t A = 16807; // bits 14, 8, 7, 5, 2, 1, 0 + static const uint32_t M = 2147483647L; // 2^31-1 + static const uint64_t A = 16807; // bits 14, 8, 7, 5, 2, 1, 0 // We are computing // seed_ = (seed_ * A) % M, where M = 2^31-1 // @@ -54,9 +55,7 @@ class Random { // Skewed: pick "base" uniformly from range [0,max_log] and then // return "base" random bits. The effect is to pick a number in the // range [0,2^max_log-1] with exponential bias towards smaller numbers. - uint32_t Skewed(int max_log) { - return Uniform(1 << Uniform(max_log + 1)); - } + uint32_t Skewed(int max_log) { return Uniform(1 << Uniform(max_log + 1)); } }; } // namespace leveldb diff --git a/src/leveldb/util/status.cc b/src/leveldb/util/status.cc index a44f35b314..15ce747d80 100644 --- a/src/leveldb/util/status.cc +++ b/src/leveldb/util/status.cc @@ -2,9 +2,11 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#include "leveldb/status.h" + #include <stdio.h> + #include "port/port.h" -#include "leveldb/status.h" namespace leveldb { @@ -18,8 +20,8 @@ const char* Status::CopyState(const char* state) { Status::Status(Code code, const Slice& msg, const Slice& msg2) { assert(code != kOk); - const uint32_t len1 = msg.size(); - const uint32_t len2 = msg2.size(); + const uint32_t len1 = static_cast<uint32_t>(msg.size()); + const uint32_t len2 = static_cast<uint32_t>(msg2.size()); const uint32_t size = len1 + (len2 ? (2 + len2) : 0); char* result = new char[size + 5]; memcpy(result, &size, sizeof(size)); @@ -34,7 +36,7 @@ Status::Status(Code code, const Slice& msg, const Slice& msg2) { } std::string Status::ToString() const { - if (state_ == NULL) { + if (state_ == nullptr) { return "OK"; } else { char tmp[30]; @@ -59,8 +61,8 @@ std::string Status::ToString() const { type = "IO error: "; break; default: - snprintf(tmp, sizeof(tmp), "Unknown code(%d): ", - static_cast<int>(code())); + snprintf(tmp, sizeof(tmp), + "Unknown code(%d): ", static_cast<int>(code())); type = tmp; break; } diff --git a/src/leveldb/util/status_test.cc b/src/leveldb/util/status_test.cc new file mode 100644 index 0000000000..2842319fbd --- /dev/null +++ b/src/leveldb/util/status_test.cc @@ -0,0 +1,40 @@ +// Copyright (c) 2018 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include <utility> + +#include "leveldb/slice.h" +#include "leveldb/status.h" +#include "util/testharness.h" + +namespace leveldb { + +TEST(Status, MoveConstructor) { + { + Status ok = Status::OK(); + Status ok2 = std::move(ok); + + ASSERT_TRUE(ok2.ok()); + } + + { + Status status = Status::NotFound("custom NotFound status message"); + Status status2 = std::move(status); + + ASSERT_TRUE(status2.IsNotFound()); + ASSERT_EQ("NotFound: custom NotFound status message", status2.ToString()); + } + + { + Status self_moved = Status::IOError("custom IOError status message"); + + // Needed to bypass compiler warning about explicit move-assignment. + Status& self_moved_reference = self_moved; + self_moved_reference = std::move(self_moved); + } +} + +} // namespace leveldb + +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/src/leveldb/util/testharness.cc b/src/leveldb/util/testharness.cc index 402fab34d7..318ecfa3b7 100644 --- a/src/leveldb/util/testharness.cc +++ b/src/leveldb/util/testharness.cc @@ -4,11 +4,15 @@ #include "util/testharness.h" -#include <string> #include <stdlib.h> #include <sys/stat.h> #include <sys/types.h> +#include <string> +#include <vector> + +#include "leveldb/env.h" + namespace leveldb { namespace test { @@ -19,10 +23,10 @@ struct Test { void (*func)(); }; std::vector<Test>* tests; -} +} // namespace bool RegisterTest(const char* base, const char* name, void (*func)()) { - if (tests == NULL) { + if (tests == nullptr) { tests = new std::vector<Test>; } Test t; @@ -37,14 +41,14 @@ int RunAllTests() { const char* matcher = getenv("LEVELDB_TESTS"); int num = 0; - if (tests != NULL) { + if (tests != nullptr) { for (size_t i = 0; i < tests->size(); i++) { const Test& t = (*tests)[i]; - if (matcher != NULL) { + if (matcher != nullptr) { std::string name = t.base; name.push_back('.'); name.append(t.name); - if (strstr(name.c_str(), matcher) == NULL) { + if (strstr(name.c_str(), matcher) == nullptr) { continue; } } @@ -66,7 +70,7 @@ std::string TmpDir() { int RandomSeed() { const char* env = getenv("TEST_RANDOM_SEED"); - int result = (env != NULL ? atoi(env) : 301); + int result = (env != nullptr ? atoi(env) : 301); if (result <= 0) { result = 301; } diff --git a/src/leveldb/util/testharness.h b/src/leveldb/util/testharness.h index da4fe68bb4..72cd1629eb 100644 --- a/src/leveldb/util/testharness.h +++ b/src/leveldb/util/testharness.h @@ -7,10 +7,10 @@ #include <stdio.h> #include <stdlib.h> + #include <sstream> -#include "leveldb/env.h" -#include "leveldb/slice.h" -#include "util/random.h" + +#include "leveldb/status.h" namespace leveldb { namespace test { @@ -27,15 +27,15 @@ namespace test { // // Returns 0 if all tests pass. // Dies or returns a non-zero value if some test fails. -extern int RunAllTests(); +int RunAllTests(); // Return the directory to use for temporary storage. -extern std::string TmpDir(); +std::string TmpDir(); // Return a randomization seed for this run. Typically returns the // same number on repeated invocations of this binary, but automated // runs may be able to vary the seed. -extern int RandomSeed(); +int RandomSeed(); // An instance of Tester is allocated to hold temporary state during // the execution of an assertion. @@ -47,9 +47,7 @@ class Tester { std::stringstream ss_; public: - Tester(const char* f, int l) - : ok_(true), fname_(f), line_(l) { - } + Tester(const char* f, int l) : ok_(true), fname_(f), line_(l) {} ~Tester() { if (!ok_) { @@ -74,14 +72,14 @@ class Tester { return *this; } -#define BINARY_OP(name,op) \ - template <class X, class Y> \ - Tester& name(const X& x, const Y& y) { \ - if (! (x op y)) { \ - ss_ << " failed: " << x << (" " #op " ") << y; \ - ok_ = false; \ - } \ - return *this; \ +#define BINARY_OP(name, op) \ + template <class X, class Y> \ + Tester& name(const X& x, const Y& y) { \ + if (!(x op y)) { \ + ss_ << " failed: " << x << (" " #op " ") << y; \ + ok_ = false; \ + } \ + return *this; \ } BINARY_OP(IsEq, ==) @@ -104,33 +102,38 @@ class Tester { #define ASSERT_TRUE(c) ::leveldb::test::Tester(__FILE__, __LINE__).Is((c), #c) #define ASSERT_OK(s) ::leveldb::test::Tester(__FILE__, __LINE__).IsOk((s)) -#define ASSERT_EQ(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsEq((a),(b)) -#define ASSERT_NE(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsNe((a),(b)) -#define ASSERT_GE(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsGe((a),(b)) -#define ASSERT_GT(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsGt((a),(b)) -#define ASSERT_LE(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsLe((a),(b)) -#define ASSERT_LT(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsLt((a),(b)) - -#define TCONCAT(a,b) TCONCAT1(a,b) -#define TCONCAT1(a,b) a##b - -#define TEST(base,name) \ -class TCONCAT(_Test_,name) : public base { \ - public: \ - void _Run(); \ - static void _RunIt() { \ - TCONCAT(_Test_,name) t; \ - t._Run(); \ - } \ -}; \ -bool TCONCAT(_Test_ignored_,name) = \ - ::leveldb::test::RegisterTest(#base, #name, &TCONCAT(_Test_,name)::_RunIt); \ -void TCONCAT(_Test_,name)::_Run() +#define ASSERT_EQ(a, b) \ + ::leveldb::test::Tester(__FILE__, __LINE__).IsEq((a), (b)) +#define ASSERT_NE(a, b) \ + ::leveldb::test::Tester(__FILE__, __LINE__).IsNe((a), (b)) +#define ASSERT_GE(a, b) \ + ::leveldb::test::Tester(__FILE__, __LINE__).IsGe((a), (b)) +#define ASSERT_GT(a, b) \ + ::leveldb::test::Tester(__FILE__, __LINE__).IsGt((a), (b)) +#define ASSERT_LE(a, b) \ + ::leveldb::test::Tester(__FILE__, __LINE__).IsLe((a), (b)) +#define ASSERT_LT(a, b) \ + ::leveldb::test::Tester(__FILE__, __LINE__).IsLt((a), (b)) + +#define TCONCAT(a, b) TCONCAT1(a, b) +#define TCONCAT1(a, b) a##b + +#define TEST(base, name) \ + class TCONCAT(_Test_, name) : public base { \ + public: \ + void _Run(); \ + static void _RunIt() { \ + TCONCAT(_Test_, name) t; \ + t._Run(); \ + } \ + }; \ + bool TCONCAT(_Test_ignored_, name) = ::leveldb::test::RegisterTest( \ + #base, #name, &TCONCAT(_Test_, name)::_RunIt); \ + void TCONCAT(_Test_, name)::_Run() // Register the specified test. Typically not used directly, but // invoked via the macro expansion of TEST. -extern bool RegisterTest(const char* base, const char* name, void (*func)()); - +bool RegisterTest(const char* base, const char* name, void (*func)()); } // namespace test } // namespace leveldb diff --git a/src/leveldb/util/testutil.cc b/src/leveldb/util/testutil.cc index bee56bf75f..6b151b9e64 100644 --- a/src/leveldb/util/testutil.cc +++ b/src/leveldb/util/testutil.cc @@ -12,7 +12,7 @@ namespace test { Slice RandomString(Random* rnd, int len, std::string* dst) { dst->resize(len); for (int i = 0; i < len; i++) { - (*dst)[i] = static_cast<char>(' ' + rnd->Uniform(95)); // ' ' .. '~' + (*dst)[i] = static_cast<char>(' ' + rnd->Uniform(95)); // ' ' .. '~' } return Slice(*dst); } @@ -20,9 +20,8 @@ Slice RandomString(Random* rnd, int len, std::string* dst) { std::string RandomKey(Random* rnd, int len) { // Make sure to generate a wide variety of characters so we // test the boundary conditions for short-key optimizations. - static const char kTestChars[] = { - '\0', '\1', 'a', 'b', 'c', 'd', 'e', '\xfd', '\xfe', '\xff' - }; + static const char kTestChars[] = {'\0', '\1', 'a', 'b', 'c', + 'd', 'e', '\xfd', '\xfe', '\xff'}; std::string result; for (int i = 0; i < len; i++) { result += kTestChars[rnd->Uniform(sizeof(kTestChars))]; @@ -30,9 +29,8 @@ std::string RandomKey(Random* rnd, int len) { return result; } - -extern Slice CompressibleString(Random* rnd, double compressed_fraction, - size_t len, std::string* dst) { +Slice CompressibleString(Random* rnd, double compressed_fraction, size_t len, + std::string* dst) { int raw = static_cast<int>(len * compressed_fraction); if (raw < 1) raw = 1; std::string raw_data; diff --git a/src/leveldb/util/testutil.h b/src/leveldb/util/testutil.h index d7e4583702..bb4051ba07 100644 --- a/src/leveldb/util/testutil.h +++ b/src/leveldb/util/testutil.h @@ -5,6 +5,7 @@ #ifndef STORAGE_LEVELDB_UTIL_TESTUTIL_H_ #define STORAGE_LEVELDB_UTIL_TESTUTIL_H_ +#include "helpers/memenv/memenv.h" #include "leveldb/env.h" #include "leveldb/slice.h" #include "util/random.h" @@ -14,17 +15,17 @@ namespace test { // Store in *dst a random string of length "len" and return a Slice that // references the generated data. -extern Slice RandomString(Random* rnd, int len, std::string* dst); +Slice RandomString(Random* rnd, int len, std::string* dst); // Return a random key with the specified length that may contain interesting // characters (e.g. \x00, \xff, etc.). -extern std::string RandomKey(Random* rnd, int len); +std::string RandomKey(Random* rnd, int len); // Store in *dst a string of length "len" that will compress to // "N*compressed_fraction" bytes and return a Slice that references // the generated data. -extern Slice CompressibleString(Random* rnd, double compressed_fraction, - size_t len, std::string* dst); +Slice CompressibleString(Random* rnd, double compressed_fraction, size_t len, + std::string* dst); // A wrapper that allows injection of errors. class ErrorEnv : public EnvWrapper { @@ -32,25 +33,27 @@ class ErrorEnv : public EnvWrapper { bool writable_file_error_; int num_writable_file_errors_; - ErrorEnv() : EnvWrapper(Env::Default()), - writable_file_error_(false), - num_writable_file_errors_(0) { } + ErrorEnv() + : EnvWrapper(NewMemEnv(Env::Default())), + writable_file_error_(false), + num_writable_file_errors_(0) {} + ~ErrorEnv() override { delete target(); } - virtual Status NewWritableFile(const std::string& fname, - WritableFile** result) { + Status NewWritableFile(const std::string& fname, + WritableFile** result) override { if (writable_file_error_) { ++num_writable_file_errors_; - *result = NULL; + *result = nullptr; return Status::IOError(fname, "fake error"); } return target()->NewWritableFile(fname, result); } - virtual Status NewAppendableFile(const std::string& fname, - WritableFile** result) { + Status NewAppendableFile(const std::string& fname, + WritableFile** result) override { if (writable_file_error_) { ++num_writable_file_errors_; - *result = NULL; + *result = nullptr; return Status::IOError(fname, "fake error"); } return target()->NewAppendableFile(fname, result); diff --git a/src/leveldb/util/windows_logger.h b/src/leveldb/util/windows_logger.h new file mode 100644 index 0000000000..92960638d1 --- /dev/null +++ b/src/leveldb/util/windows_logger.h @@ -0,0 +1,124 @@ +// Copyright (c) 2018 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// Logger implementation for the Windows platform. + +#ifndef STORAGE_LEVELDB_UTIL_WINDOWS_LOGGER_H_ +#define STORAGE_LEVELDB_UTIL_WINDOWS_LOGGER_H_ + +#include <cassert> +#include <cstdarg> +#include <cstdio> +#include <ctime> +#include <sstream> +#include <thread> + +#include "leveldb/env.h" + +namespace leveldb { + +class WindowsLogger final : public Logger { + public: + // Creates a logger that writes to the given file. + // + // The PosixLogger instance takes ownership of the file handle. + explicit WindowsLogger(std::FILE* fp) : fp_(fp) { assert(fp != nullptr); } + + ~WindowsLogger() override { std::fclose(fp_); } + + void Logv(const char* format, va_list arguments) override { + // Record the time as close to the Logv() call as possible. + SYSTEMTIME now_components; + ::GetLocalTime(&now_components); + + // Record the thread ID. + constexpr const int kMaxThreadIdSize = 32; + std::ostringstream thread_stream; + thread_stream << std::this_thread::get_id(); + std::string thread_id = thread_stream.str(); + if (thread_id.size() > kMaxThreadIdSize) { + thread_id.resize(kMaxThreadIdSize); + } + + // We first attempt to print into a stack-allocated buffer. If this attempt + // fails, we make a second attempt with a dynamically allocated buffer. + constexpr const int kStackBufferSize = 512; + char stack_buffer[kStackBufferSize]; + static_assert(sizeof(stack_buffer) == static_cast<size_t>(kStackBufferSize), + "sizeof(char) is expected to be 1 in C++"); + + int dynamic_buffer_size = 0; // Computed in the first iteration. + for (int iteration = 0; iteration < 2; ++iteration) { + const int buffer_size = + (iteration == 0) ? kStackBufferSize : dynamic_buffer_size; + char* const buffer = + (iteration == 0) ? stack_buffer : new char[dynamic_buffer_size]; + + // Print the header into the buffer. + int buffer_offset = snprintf( + buffer, buffer_size, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ", + now_components.wYear, now_components.wMonth, now_components.wDay, + now_components.wHour, now_components.wMinute, now_components.wSecond, + static_cast<int>(now_components.wMilliseconds * 1000), + thread_id.c_str()); + + // The header can be at most 28 characters (10 date + 15 time + + // 3 delimiters) plus the thread ID, which should fit comfortably into the + // static buffer. + assert(buffer_offset <= 28 + kMaxThreadIdSize); + static_assert(28 + kMaxThreadIdSize < kStackBufferSize, + "stack-allocated buffer may not fit the message header"); + assert(buffer_offset < buffer_size); + + // Print the message into the buffer. + std::va_list arguments_copy; + va_copy(arguments_copy, arguments); + buffer_offset += + std::vsnprintf(buffer + buffer_offset, buffer_size - buffer_offset, + format, arguments_copy); + va_end(arguments_copy); + + // The code below may append a newline at the end of the buffer, which + // requires an extra character. + if (buffer_offset >= buffer_size - 1) { + // The message did not fit into the buffer. + if (iteration == 0) { + // Re-run the loop and use a dynamically-allocated buffer. The buffer + // will be large enough for the log message, an extra newline and a + // null terminator. + dynamic_buffer_size = buffer_offset + 2; + continue; + } + + // The dynamically-allocated buffer was incorrectly sized. This should + // not happen, assuming a correct implementation of (v)snprintf. Fail + // in tests, recover by truncating the log message in production. + assert(false); + buffer_offset = buffer_size - 1; + } + + // Add a newline if necessary. + if (buffer[buffer_offset - 1] != '\n') { + buffer[buffer_offset] = '\n'; + ++buffer_offset; + } + + assert(buffer_offset <= buffer_size); + std::fwrite(buffer, 1, buffer_offset, fp_); + std::fflush(fp_); + + if (iteration != 0) { + delete[] buffer; + } + break; + } + } + + private: + std::FILE* const fp_; +}; + +} // namespace leveldb + +#endif // STORAGE_LEVELDB_UTIL_WINDOWS_LOGGER_H_ diff --git a/src/net.cpp b/src/net.cpp index 9cd2d30d9d..18fe95e675 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -498,7 +498,7 @@ void CNode::SetAddrLocal(const CService& addrLocalIn) { #undef X #define X(name) stats.name = name -void CNode::copyStats(CNodeStats &stats, std::vector<bool> &m_asmap) +void CNode::copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap) { stats.nodeid = this->GetId(); X(nServices); @@ -331,7 +331,7 @@ public: */ int64_t PoissonNextSendInbound(int64_t now, int average_interval_seconds); - void SetAsmap(std::vector<bool> asmap) { addrman.m_asmap = asmap; } + void SetAsmap(std::vector<bool> asmap) { addrman.m_asmap = std::move(asmap); } private: struct ListenSocket { @@ -983,7 +983,7 @@ public: void CloseSocketDisconnect(); - void copyStats(CNodeStats &stats, std::vector<bool> &m_asmap); + void copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap); ServiceFlags GetLocalServices() const { diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 1e065da07d..cf4aee0647 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -148,6 +148,14 @@ namespace { std::unique_ptr<CRollingBloomFilter> recentRejects GUARDED_BY(cs_main); uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main); + /* + * Filter for transactions that have been recently confirmed. + * We use this to avoid requesting transactions that have already been + * confirnmed. + */ + RecursiveMutex g_cs_recent_confirmed_transactions; + std::unique_ptr<CRollingBloomFilter> g_recent_confirmed_transactions GUARDED_BY(g_cs_recent_confirmed_transactions); + /** Blocks that are in flight, and that are in the queue to be downloaded. */ struct QueuedBlock { uint256 hash; @@ -1116,6 +1124,16 @@ PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, BanMan* banman, CS // Initialize global variables that cannot be constructed at startup. recentRejects.reset(new CRollingBloomFilter(120000, 0.000001)); + // Blocks don't typically have more than 4000 transactions, so this should + // be at least six blocks (~1 hr) worth of transactions that we can store. + // If the number of transactions appearing in a block goes up, or if we are + // seeing getdata requests more than an hour after initial announcement, we + // can increase this number. + // The false positive rate of 1/1M should come out to less than 1 + // transaction per day that would be inadvertently ignored (which is the + // same probability that we have in the reject filter). + g_recent_confirmed_transactions.reset(new CRollingBloomFilter(24000, 0.000001)); + const Consensus::Params& consensusParams = Params().GetConsensus(); // Stale tip checking and peer eviction are on two different timers, but we // don't want them to get out of sync due to drift in the scheduler, so we @@ -1129,36 +1147,59 @@ PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, BanMan* banman, CS * Evict orphan txn pool entries (EraseOrphanTx) based on a newly connected * block. Also save the time of the last tip update. */ -void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex, const std::vector<CTransactionRef>& vtxConflicted) { - LOCK(g_cs_orphans); +void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex, const std::vector<CTransactionRef>& vtxConflicted) +{ + { + LOCK(g_cs_orphans); - std::vector<uint256> vOrphanErase; + std::vector<uint256> vOrphanErase; - for (const CTransactionRef& ptx : pblock->vtx) { - const CTransaction& tx = *ptx; + for (const CTransactionRef& ptx : pblock->vtx) { + const CTransaction& tx = *ptx; - // Which orphan pool entries must we evict? - for (const auto& txin : tx.vin) { - auto itByPrev = mapOrphanTransactionsByPrev.find(txin.prevout); - if (itByPrev == mapOrphanTransactionsByPrev.end()) continue; - for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) { - const CTransaction& orphanTx = *(*mi)->second.tx; - const uint256& orphanHash = orphanTx.GetHash(); - vOrphanErase.push_back(orphanHash); + // Which orphan pool entries must we evict? + for (const auto& txin : tx.vin) { + auto itByPrev = mapOrphanTransactionsByPrev.find(txin.prevout); + if (itByPrev == mapOrphanTransactionsByPrev.end()) continue; + for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) { + const CTransaction& orphanTx = *(*mi)->second.tx; + const uint256& orphanHash = orphanTx.GetHash(); + vOrphanErase.push_back(orphanHash); + } } } - } - // Erase orphan transactions included or precluded by this block - if (vOrphanErase.size()) { - int nErased = 0; - for (const uint256& orphanHash : vOrphanErase) { - nErased += EraseOrphanTx(orphanHash); + // Erase orphan transactions included or precluded by this block + if (vOrphanErase.size()) { + int nErased = 0; + for (const uint256& orphanHash : vOrphanErase) { + nErased += EraseOrphanTx(orphanHash); + } + LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased); + } + + g_last_tip_update = GetTime(); + } + { + LOCK(g_cs_recent_confirmed_transactions); + for (const auto& ptx : pblock->vtx) { + g_recent_confirmed_transactions->insert(ptx->GetHash()); } - LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased); } +} - g_last_tip_update = GetTime(); +void PeerLogicValidation::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) +{ + // To avoid relay problems with transactions that were previously + // confirmed, clear our filter of recently confirmed transactions whenever + // there's a reorg. + // This means that in a 1-block reorg (where 1 block is disconnected and + // then another block reconnected), our filter will drop to having only one + // block's worth of transactions in it, but that should be fine, since + // presumably the most common case of relaying a confirmed transaction + // should be just after a new block containing it is found. + LOCK(g_cs_recent_confirmed_transactions); + g_recent_confirmed_transactions->reset(); } // All of the following cache a recent block, and are protected by cs_most_recent_block @@ -1311,12 +1352,14 @@ bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main) LOCK(g_cs_orphans); if (mapOrphanTransactions.count(inv.hash)) return true; } - const CCoinsViewCache& coins_cache = ::ChainstateActive().CoinsTip(); + + { + LOCK(g_cs_recent_confirmed_transactions); + if (g_recent_confirmed_transactions->contains(inv.hash)) return true; + } return recentRejects->contains(inv.hash) || - mempool.exists(inv.hash) || - coins_cache.HaveCoinInCache(COutPoint(inv.hash, 0)) || // Best effort: only try output 0 and 1 - coins_cache.HaveCoinInCache(COutPoint(inv.hash, 1)); + mempool.exists(inv.hash); } case MSG_BLOCK: case MSG_WITNESS_BLOCK: diff --git a/src/net_processing.h b/src/net_processing.h index 2ceadedd99..6f26abc209 100644 --- a/src/net_processing.h +++ b/src/net_processing.h @@ -33,6 +33,7 @@ public: * Overridden from CValidationInterface. */ void BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected, const std::vector<CTransactionRef>& vtxConflicted) override; + void BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) override; /** * Overridden from CValidationInterface. */ diff --git a/src/netaddress.cpp b/src/netaddress.cpp index ce3e17197e..1cac57a817 100644 --- a/src/netaddress.cpp +++ b/src/netaddress.cpp @@ -401,6 +401,26 @@ bool CNetAddr::GetIn6Addr(struct in6_addr* pipv6Addr) const return true; } +bool CNetAddr::HasLinkedIPv4() const +{ + return IsRoutable() && (IsIPv4() || IsRFC6145() || IsRFC6052() || IsRFC3964() || IsRFC4380()); +} + +uint32_t CNetAddr::GetLinkedIPv4() const +{ + if (IsIPv4() || IsRFC6145() || IsRFC6052()) { + // IPv4, mapped IPv4, SIIT translated IPv4: the IPv4 address is the last 4 bytes of the address + return ReadBE32(ip + 12); + } else if (IsRFC3964()) { + // 6to4 tunneled IPv4: the IPv4 address is in bytes 2-6 + return ReadBE32(ip + 2); + } else if (IsRFC4380()) { + // Teredo tunneled IPv4: the IPv4 address is in the last 4 bytes of the address, but bitflipped + return ~ReadBE32(ip + 12); + } + assert(false); +} + uint32_t CNetAddr::GetNetClass() const { uint32_t net_class = NET_IPV6; if (IsLocal()) { @@ -410,7 +430,7 @@ uint32_t CNetAddr::GetNetClass() const { net_class = NET_INTERNAL; } else if (!IsRoutable()) { net_class = NET_UNROUTABLE; - } else if (IsIPv4() || IsRFC6145() || IsRFC6052() || IsRFC3964() || IsRFC4380()) { + } else if (HasLinkedIPv4()) { net_class = NET_IPV4; } else if (IsTor()) { net_class = NET_ONION; @@ -424,10 +444,24 @@ uint32_t CNetAddr::GetMappedAS(const std::vector<bool> &asmap) const { return 0; // Indicates not found, safe because AS0 is reserved per RFC7607. } std::vector<bool> ip_bits(128); - for (int8_t byte_i = 0; byte_i < 16; ++byte_i) { - uint8_t cur_byte = GetByte(15 - byte_i); - for (uint8_t bit_i = 0; bit_i < 8; ++bit_i) { - ip_bits[byte_i * 8 + bit_i] = (cur_byte >> (7 - bit_i)) & 1; + if (HasLinkedIPv4()) { + // For lookup, treat as if it was just an IPv4 address (pchIPv4 prefix + IPv4 bits) + for (int8_t byte_i = 0; byte_i < 12; ++byte_i) { + for (uint8_t bit_i = 0; bit_i < 8; ++bit_i) { + ip_bits[byte_i * 8 + bit_i] = (pchIPv4[byte_i] >> (7 - bit_i)) & 1; + } + } + uint32_t ipv4 = GetLinkedIPv4(); + for (int i = 0; i < 32; ++i) { + ip_bits[96 + i] = (ipv4 >> (31 - i)) & 1; + } + } else { + // Use all 128 bits of the IPv6 address otherwise + for (int8_t byte_i = 0; byte_i < 16; ++byte_i) { + uint8_t cur_byte = GetByte(15 - byte_i); + for (uint8_t bit_i = 0; bit_i < 8; ++bit_i) { + ip_bits[byte_i * 8 + bit_i] = (cur_byte >> (7 - bit_i)) & 1; + } } } uint32_t mapped_as = Interpret(asmap, ip_bits); @@ -463,51 +497,32 @@ std::vector<unsigned char> CNetAddr::GetGroup(const std::vector<bool> &asmap) co int nStartByte = 0; int nBits = 16; - // all local addresses belong to the same group - if (IsLocal()) - { + if (IsLocal()) { + // all local addresses belong to the same group nBits = 0; - } - // all internal-usage addresses get their own group - if (IsInternal()) - { + } else if (IsInternal()) { + // all internal-usage addresses get their own group nStartByte = sizeof(g_internal_prefix); nBits = (sizeof(ip) - sizeof(g_internal_prefix)) * 8; - } - // all other unroutable addresses belong to the same group - else if (!IsRoutable()) - { + } else if (!IsRoutable()) { + // all other unroutable addresses belong to the same group nBits = 0; - } - // for IPv4 addresses, '1' + the 16 higher-order bits of the IP - // includes mapped IPv4, SIIT translated IPv4, and the well-known prefix - else if (IsIPv4() || IsRFC6145() || IsRFC6052()) - { - nStartByte = 12; - } - // for 6to4 tunnelled addresses, use the encapsulated IPv4 address - else if (IsRFC3964()) - { - nStartByte = 2; - } - // for Teredo-tunnelled IPv6 addresses, use the encapsulated IPv4 address - else if (IsRFC4380()) - { - vchRet.push_back(GetByte(3) ^ 0xFF); - vchRet.push_back(GetByte(2) ^ 0xFF); + } else if (HasLinkedIPv4()) { + // IPv4 addresses (and mapped IPv4 addresses) use /16 groups + uint32_t ipv4 = GetLinkedIPv4(); + vchRet.push_back((ipv4 >> 24) & 0xFF); + vchRet.push_back((ipv4 >> 16) & 0xFF); return vchRet; - } - else if (IsTor()) - { + } else if (IsTor()) { nStartByte = 6; nBits = 4; - } - // for he.net, use /36 groups - else if (GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0x04 && GetByte(12) == 0x70) + } else if (GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0x04 && GetByte(12) == 0x70) { + // for he.net, use /36 groups nBits = 36; - // for the rest of the IPv6 network, use /32 groups - else + } else { + // for the rest of the IPv6 network, use /32 groups nBits = 32; + } // push our ip onto vchRet byte by byte... while (nBits >= 8) diff --git a/src/netaddress.h b/src/netaddress.h index 078234595c..b300b709f3 100644 --- a/src/netaddress.h +++ b/src/netaddress.h @@ -39,7 +39,6 @@ class CNetAddr explicit CNetAddr(const struct in_addr& ipv4Addr); void SetIP(const CNetAddr& ip); - private: /** * Set raw IPv4 or IPv6 address (in network byte order) * @note Only NET_IPV4 and NET_IPV6 are allowed for network. @@ -80,6 +79,11 @@ class CNetAddr bool GetInAddr(struct in_addr* pipv4Addr) const; uint32_t GetNetClass() const; + //! For IPv4, mapped IPv4, SIIT translated IPv4, Teredo, 6to4 tunneled addresses, return the relevant IPv4 address as a uint32. + uint32_t GetLinkedIPv4() const; + //! Whether this address has a linked IPv4 address (see GetLinkedIPv4()). + bool HasLinkedIPv4() const; + // The AS on the BGP path to the node we use to diversify // peers in AddrMan bucketing based on the AS infrastructure. // The ip->AS mapping depends on how asmap is constructed. diff --git a/src/node/coinstats.cpp b/src/node/coinstats.cpp index a818f06d51..641b2a5d9c 100644 --- a/src/node/coinstats.cpp +++ b/src/node/coinstats.cpp @@ -23,7 +23,7 @@ static void ApplyStats(CCoinsStats &stats, CHashWriter& ss, const uint256& hash, for (const auto& output : outputs) { ss << VARINT(output.first + 1); ss << output.second.out.scriptPubKey; - ss << VARINT(output.second.out.nValue, VarIntMode::NONNEGATIVE_SIGNED); + ss << VARINT_MODE(output.second.out.nValue, VarIntMode::NONNEGATIVE_SIGNED); stats.nTransactionOutputs++; stats.nTotalAmount += output.second.out.nValue; stats.nBogoSize += 32 /* txid */ + 4 /* vout index */ + 4 /* height + coinbase */ + 8 /* amount */ + diff --git a/src/outputtype.cpp b/src/outputtype.cpp index 85ceb03aa6..567eecb5c9 100644 --- a/src/outputtype.cpp +++ b/src/outputtype.cpp @@ -19,6 +19,8 @@ static const std::string OUTPUT_TYPE_STRING_LEGACY = "legacy"; static const std::string OUTPUT_TYPE_STRING_P2SH_SEGWIT = "p2sh-segwit"; static const std::string OUTPUT_TYPE_STRING_BECH32 = "bech32"; +const std::array<OutputType, 3> OUTPUT_TYPES = {OutputType::LEGACY, OutputType::P2SH_SEGWIT, OutputType::BECH32}; + bool ParseOutputType(const std::string& type, OutputType& output_type) { if (type == OUTPUT_TYPE_STRING_LEGACY) { @@ -80,22 +82,30 @@ CTxDestination AddAndGetDestinationForScript(FillableSigningProvider& keystore, { // Add script to keystore keystore.AddCScript(script); + ScriptHash sh(script); // Note that scripts over 520 bytes are not yet supported. switch (type) { case OutputType::LEGACY: - return ScriptHash(script); + keystore.AddCScript(GetScriptForDestination(sh)); + return sh; case OutputType::P2SH_SEGWIT: case OutputType::BECH32: { CTxDestination witdest = WitnessV0ScriptHash(script); CScript witprog = GetScriptForDestination(witdest); // Check if the resulting program is solvable (i.e. doesn't use an uncompressed key) - if (!IsSolvable(keystore, witprog)) return ScriptHash(script); + if (!IsSolvable(keystore, witprog)) { + // Since the wsh is invalid, add and return the sh instead. + keystore.AddCScript(GetScriptForDestination(sh)); + return sh; + } // Add the redeemscript, so that P2WSH and P2SH-P2WSH outputs are recognized as ours. keystore.AddCScript(witprog); if (type == OutputType::BECH32) { return witdest; } else { - return ScriptHash(witprog); + ScriptHash sh_w = ScriptHash(witprog); + keystore.AddCScript(GetScriptForDestination(sh_w)); + return sh_w; } } default: assert(false); diff --git a/src/outputtype.h b/src/outputtype.h index b91082ddc0..1438f65844 100644 --- a/src/outputtype.h +++ b/src/outputtype.h @@ -10,6 +10,7 @@ #include <script/signingprovider.h> #include <script/standard.h> +#include <array> #include <string> #include <vector> @@ -27,6 +28,8 @@ enum class OutputType { CHANGE_AUTO, }; +extern const std::array<OutputType, 3> OUTPUT_TYPES; + NODISCARD bool ParseOutputType(const std::string& str, OutputType& output_type); const std::string& FormatOutputType(OutputType type); @@ -47,4 +50,3 @@ std::vector<CTxDestination> GetAllDestinationsForKey(const CPubKey& key); CTxDestination AddAndGetDestinationForScript(FillableSigningProvider& keystore, const CScript& script, OutputType); #endif // BITCOIN_OUTPUTTYPE_H - diff --git a/src/prevector.h b/src/prevector.h index f4ece738a8..09debedc4f 100644 --- a/src/prevector.h +++ b/src/prevector.h @@ -15,7 +15,6 @@ #include <type_traits> #include <utility> -#pragma pack(push, 1) /** Implements a drop-in replacement for std::vector<T> which stores up to N * elements directly (without heap allocation). The types Size and Diff are * used to store element counts, and can be any unsigned + signed type. @@ -147,14 +146,20 @@ public: }; private: - size_type _size = 0; +#pragma pack(push, 1) union direct_or_indirect { char direct[sizeof(T) * N]; struct { - size_type capacity; char* indirect; + size_type capacity; }; - } _union = {}; + }; +#pragma pack(pop) + alignas(char*) direct_or_indirect _union = {}; + size_type _size = 0; + + static_assert(alignof(char*) % alignof(size_type) == 0 && sizeof(char*) % alignof(size_type) == 0, "size_type cannot have more restrictive alignment requirement than pointer"); + static_assert(alignof(char*) % alignof(T) == 0, "value_type T cannot have more restrictive alignment requirement than pointer"); T* direct_ptr(difference_type pos) { return reinterpret_cast<T*>(_union.direct) + pos; } const T* direct_ptr(difference_type pos) const { return reinterpret_cast<const T*>(_union.direct) + pos; } @@ -523,6 +528,5 @@ public: return item_ptr(0); } }; -#pragma pack(pop) #endif // BITCOIN_PREVECTOR_H diff --git a/src/qt/bantablemodel.cpp b/src/qt/bantablemodel.cpp index d1ee7fac6a..72f16bb09f 100644 --- a/src/qt/bantablemodel.cpp +++ b/src/qt/bantablemodel.cpp @@ -6,12 +6,13 @@ #include <interfaces/node.h> #include <net_types.h> // For banmap_t -#include <qt/clientmodel.h> #include <utility> -#include <QDebug> +#include <QDateTime> #include <QList> +#include <QModelIndex> +#include <QVariant> bool BannedNodeLessThan::operator()(const CCombinedBan& left, const CCombinedBan& right) const { @@ -78,10 +79,9 @@ public: } }; -BanTableModel::BanTableModel(interfaces::Node& node, ClientModel *parent) : +BanTableModel::BanTableModel(interfaces::Node& node, QObject* parent) : QAbstractTableModel(parent), - m_node(node), - clientModel(parent) + m_node(node) { columns << tr("IP/Netmask") << tr("Banned Until"); priv.reset(new BanTablePriv()); diff --git a/src/qt/bantablemodel.h b/src/qt/bantablemodel.h index 9dec5fa6a9..f01c506a1e 100644 --- a/src/qt/bantablemodel.h +++ b/src/qt/bantablemodel.h @@ -12,7 +12,6 @@ #include <QAbstractTableModel> #include <QStringList> -class ClientModel; class BanTablePriv; namespace interfaces { @@ -45,7 +44,7 @@ class BanTableModel : public QAbstractTableModel Q_OBJECT public: - explicit BanTableModel(interfaces::Node& node, ClientModel *parent = nullptr); + explicit BanTableModel(interfaces::Node& node, QObject* parent); ~BanTableModel(); void startAutoRefresh(); void stopAutoRefresh(); @@ -72,7 +71,6 @@ public Q_SLOTS: private: interfaces::Node& m_node; - ClientModel *clientModel; QStringList columns; std::unique_ptr<BanTablePriv> priv; }; diff --git a/src/qt/clientmodel.cpp b/src/qt/clientmodel.cpp index e8146982f9..a1ec3eaab1 100644 --- a/src/qt/clientmodel.cpp +++ b/src/qt/clientmodel.cpp @@ -242,8 +242,9 @@ static void BlockTipChanged(ClientModel *clientmodel, bool initialSync, int heig clientmodel->cachedBestHeaderHeight = height; clientmodel->cachedBestHeaderTime = blockTime; } - // if we are in-sync or if we notify a header update, update the UI regardless of last update time - if (fHeader || !initialSync || now - nLastUpdateNotification > MODEL_UPDATE_DELAY) { + + // During initial sync, block notifications, and header notifications from reindexing are both throttled. + if (!initialSync || (fHeader && !clientmodel->node().getReindex()) || now - nLastUpdateNotification > MODEL_UPDATE_DELAY) { //pass an async signal to the UI thread bool invoked = QMetaObject::invokeMethod(clientmodel, "numBlocksChanged", Qt::QueuedConnection, Q_ARG(int, height), diff --git a/src/qt/peertablemodel.cpp b/src/qt/peertablemodel.cpp index 631c66e745..a497f58b16 100644 --- a/src/qt/peertablemodel.cpp +++ b/src/qt/peertablemodel.cpp @@ -4,7 +4,6 @@ #include <qt/peertablemodel.h> -#include <qt/clientmodel.h> #include <qt/guiconstants.h> #include <qt/guiutil.h> @@ -100,10 +99,9 @@ public: } }; -PeerTableModel::PeerTableModel(interfaces::Node& node, ClientModel *parent) : +PeerTableModel::PeerTableModel(interfaces::Node& node, QObject* parent) : QAbstractTableModel(parent), m_node(node), - clientModel(parent), timer(nullptr) { columns << tr("NodeId") << tr("Node/Service") << tr("Ping") << tr("Sent") << tr("Received") << tr("User Agent"); diff --git a/src/qt/peertablemodel.h b/src/qt/peertablemodel.h index b3f5dd7dbe..cf45c5a08f 100644 --- a/src/qt/peertablemodel.h +++ b/src/qt/peertablemodel.h @@ -13,7 +13,6 @@ #include <QAbstractTableModel> #include <QStringList> -class ClientModel; class PeerTablePriv; namespace interfaces { @@ -51,7 +50,7 @@ class PeerTableModel : public QAbstractTableModel Q_OBJECT public: - explicit PeerTableModel(interfaces::Node& node, ClientModel *parent = nullptr); + explicit PeerTableModel(interfaces::Node& node, QObject* parent); ~PeerTableModel(); const CNodeCombinedStats *getNodeStats(int idx); int getRowByNodeId(NodeId nodeid); @@ -83,7 +82,6 @@ public Q_SLOTS: private: interfaces::Node& m_node; - ClientModel *clientModel; QStringList columns; std::unique_ptr<PeerTablePriv> priv; QTimer *timer; diff --git a/src/qt/splashscreen.cpp b/src/qt/splashscreen.cpp index e19833019d..e4ffa6cd9a 100644 --- a/src/qt/splashscreen.cpp +++ b/src/qt/splashscreen.cpp @@ -137,7 +137,7 @@ SplashScreen::~SplashScreen() bool SplashScreen::eventFilter(QObject * obj, QEvent * ev) { if (ev->type() == QEvent::KeyPress) { QKeyEvent *keyEvent = static_cast<QKeyEvent *>(ev); - if(keyEvent->text()[0] == 'q') { + if (keyEvent->key() == Qt::Key_Q) { m_node.startShutdown(); } } diff --git a/src/qt/test/addressbooktests.cpp b/src/qt/test/addressbooktests.cpp index 176aa7902b..0f082802cc 100644 --- a/src/qt/test/addressbooktests.cpp +++ b/src/qt/test/addressbooktests.cpp @@ -59,6 +59,7 @@ void TestAddAddressesToSendBook(interfaces::Node& node) { TestChain100Setup test; std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(node.context()->chain.get(), WalletLocation(), WalletDatabase::CreateMock()); + wallet->SetupLegacyScriptPubKeyMan(); bool firstRun; wallet->LoadWallet(firstRun); diff --git a/src/qt/test/wallettests.cpp b/src/qt/test/wallettests.cpp index b4cd7f6bac..c1a0f63f73 100644 --- a/src/qt/test/wallettests.cpp +++ b/src/qt/test/wallettests.cpp @@ -143,10 +143,9 @@ void TestGUI(interfaces::Node& node) bool firstRun; wallet->LoadWallet(firstRun); { - auto spk_man = wallet->GetLegacyScriptPubKeyMan(); + auto spk_man = wallet->GetOrCreateLegacyScriptPubKeyMan(); auto locked_chain = wallet->chain().lock(); - LOCK(wallet->cs_wallet); - AssertLockHeld(spk_man->cs_wallet); + LOCK2(wallet->cs_wallet, spk_man->cs_KeyStore); wallet->SetAddressBook(GetDestinationForKey(test.coinbaseKey.GetPubKey(), wallet->m_default_address_type), "", "receive"); spk_man->AddKeyPubKey(test.coinbaseKey, test.coinbaseKey.GetPubKey()); wallet->SetLastBlockProcessed(105, ::ChainActive().Tip()->GetBlockHash()); diff --git a/src/qt/utilitydialog.cpp b/src/qt/utilitydialog.cpp index efe213902e..3101fb01c3 100644 --- a/src/qt/utilitydialog.cpp +++ b/src/qt/utilitydialog.cpp @@ -10,8 +10,6 @@ #include <qt/forms/ui_helpmessagedialog.h> -#include <qt/bitcoingui.h> - #include <clientversion.h> #include <init.h> #include <util/system.h> @@ -21,9 +19,10 @@ #include <QCloseEvent> #include <QLabel> +#include <QMainWindow> #include <QRegExp> -#include <QTextTable> #include <QTextCursor> +#include <QTextTable> #include <QVBoxLayout> /** "Help message" or "About" dialog box */ @@ -144,10 +143,9 @@ ShutdownWindow::ShutdownWindow(QWidget *parent, Qt::WindowFlags f): setLayout(layout); } -QWidget *ShutdownWindow::showShutdownWindow(BitcoinGUI *window) +QWidget* ShutdownWindow::showShutdownWindow(QMainWindow* window) { - if (!window) - return nullptr; + assert(window != nullptr); // Show a simple window indicating shutdown status QWidget *shutdownWindow = new ShutdownWindow(); diff --git a/src/qt/utilitydialog.h b/src/qt/utilitydialog.h index f1cedff282..833b86fd3e 100644 --- a/src/qt/utilitydialog.h +++ b/src/qt/utilitydialog.h @@ -6,9 +6,11 @@ #define BITCOIN_QT_UTILITYDIALOG_H #include <QDialog> -#include <QObject> +#include <QWidget> -class BitcoinGUI; +QT_BEGIN_NAMESPACE +class QMainWindow; +QT_END_NAMESPACE namespace interfaces { class Node; @@ -46,7 +48,7 @@ class ShutdownWindow : public QWidget public: explicit ShutdownWindow(QWidget *parent=nullptr, Qt::WindowFlags f=Qt::Widget); - static QWidget *showShutdownWindow(BitcoinGUI *window); + static QWidget* showShutdownWindow(QMainWindow* window); protected: void closeEvent(QCloseEvent *event); diff --git a/src/qt/walletframe.cpp b/src/qt/walletframe.cpp index 27a5a5ac64..dac3326cc4 100644 --- a/src/qt/walletframe.cpp +++ b/src/qt/walletframe.cpp @@ -37,6 +37,10 @@ WalletFrame::~WalletFrame() void WalletFrame::setClientModel(ClientModel *_clientModel) { this->clientModel = _clientModel; + + for (auto i = mapWalletViews.constBegin(); i != mapWalletViews.constEnd(); ++i) { + i.value()->setClientModel(_clientModel); + } } bool WalletFrame::addWallet(WalletModel *walletModel) @@ -46,7 +50,6 @@ bool WalletFrame::addWallet(WalletModel *walletModel) if (mapWalletViews.count(walletModel) > 0) return false; WalletView *walletView = new WalletView(platformStyle, this); - walletView->setBitcoinGUI(gui); walletView->setClientModel(clientModel); walletView->setWalletModel(walletModel); walletView->showOutOfSyncWarning(bOutOfSync); @@ -62,6 +65,14 @@ bool WalletFrame::addWallet(WalletModel *walletModel) mapWalletViews[walletModel] = walletView; connect(walletView, &WalletView::outOfSyncWarningClicked, this, &WalletFrame::outOfSyncWarningClicked); + connect(walletView, &WalletView::transactionClicked, gui, &BitcoinGUI::gotoHistoryPage); + connect(walletView, &WalletView::coinsSent, gui, &BitcoinGUI::gotoHistoryPage); + connect(walletView, &WalletView::message, [this](const QString& title, const QString& message, unsigned int style) { + gui->message(title, message, style); + }); + connect(walletView, &WalletView::encryptionStatusChanged, gui, &BitcoinGUI::updateWalletStatus); + connect(walletView, &WalletView::incomingTransaction, gui, &BitcoinGUI::incomingTransaction); + connect(walletView, &WalletView::hdEnabledStatusChanged, gui, &BitcoinGUI::updateWalletStatus); return true; } diff --git a/src/qt/walletmodel.cpp b/src/qt/walletmodel.cpp index 6c3a06f3a2..8a84a8c168 100644 --- a/src/qt/walletmodel.cpp +++ b/src/qt/walletmodel.cpp @@ -82,12 +82,12 @@ void WalletModel::pollBalanceChanged() return; } - if(fForceCheckBalanceChanged || m_node.getNumBlocks() != cachedNumBlocks) + if(fForceCheckBalanceChanged || numBlocks != cachedNumBlocks) { fForceCheckBalanceChanged = false; // Balance and number of transactions might have changed - cachedNumBlocks = m_node.getNumBlocks(); + cachedNumBlocks = numBlocks; checkBalanceChanged(new_balances); if(transactionTableModel) diff --git a/src/qt/walletview.cpp b/src/qt/walletview.cpp index c777d633be..bdcb82e06b 100644 --- a/src/qt/walletview.cpp +++ b/src/qt/walletview.cpp @@ -6,7 +6,6 @@ #include <qt/addressbookpage.h> #include <qt/askpassphrasedialog.h> -#include <qt/bitcoingui.h> #include <qt/clientmodel.h> #include <qt/guiutil.h> #include <qt/optionsmodel.h> @@ -65,11 +64,13 @@ WalletView::WalletView(const PlatformStyle *_platformStyle, QWidget *parent): addWidget(receiveCoinsPage); addWidget(sendCoinsPage); + connect(overviewPage, &OverviewPage::transactionClicked, this, &WalletView::transactionClicked); // Clicking on a transaction on the overview pre-selects the transaction on the transaction history page connect(overviewPage, &OverviewPage::transactionClicked, transactionView, static_cast<void (TransactionView::*)(const QModelIndex&)>(&TransactionView::focusTransaction)); connect(overviewPage, &OverviewPage::outOfSyncWarningClicked, this, &WalletView::requestedSyncWarningInfo); + connect(sendCoinsPage, &SendCoinsDialog::coinsSent, this, &WalletView::coinsSent); // Highlight transaction after send connect(sendCoinsPage, &SendCoinsDialog::coinsSent, transactionView, static_cast<void (TransactionView::*)(const uint256&)>(&TransactionView::focusTransaction)); @@ -86,32 +87,6 @@ WalletView::~WalletView() { } -void WalletView::setBitcoinGUI(BitcoinGUI *gui) -{ - if (gui) - { - // Clicking on a transaction on the overview page simply sends you to transaction history page - connect(overviewPage, &OverviewPage::transactionClicked, gui, &BitcoinGUI::gotoHistoryPage); - - // Navigate to transaction history page after send - connect(sendCoinsPage, &SendCoinsDialog::coinsSent, gui, &BitcoinGUI::gotoHistoryPage); - - // Receive and report messages - connect(this, &WalletView::message, [gui](const QString &title, const QString &message, unsigned int style) { - gui->message(title, message, style); - }); - - // Pass through encryption status changed signals - connect(this, &WalletView::encryptionStatusChanged, gui, &BitcoinGUI::updateWalletStatus); - - // Pass through transaction notifications - connect(this, &WalletView::incomingTransaction, gui, &BitcoinGUI::incomingTransaction); - - // Connect HD enabled state signal - connect(this, &WalletView::hdEnabledStatusChanged, gui, &BitcoinGUI::updateWalletStatus); - } -} - void WalletView::setClientModel(ClientModel *_clientModel) { this->clientModel = _clientModel; diff --git a/src/qt/walletview.h b/src/qt/walletview.h index 4313f0bfa2..78d870f59f 100644 --- a/src/qt/walletview.h +++ b/src/qt/walletview.h @@ -9,7 +9,6 @@ #include <QStackedWidget> -class BitcoinGUI; class ClientModel; class OverviewPage; class PlatformStyle; @@ -39,7 +38,6 @@ public: explicit WalletView(const PlatformStyle *platformStyle, QWidget *parent); ~WalletView(); - void setBitcoinGUI(BitcoinGUI *gui); /** Set the client model. The client model represents the part of the core that communicates with the P2P network, and is wallet-agnostic. */ @@ -68,7 +66,7 @@ private: TransactionView *transactionView; - QProgressDialog *progressDialog; + QProgressDialog* progressDialog{nullptr}; const PlatformStyle *platformStyle; public Q_SLOTS: @@ -115,6 +113,8 @@ public Q_SLOTS: void requestedSyncWarningInfo(); Q_SIGNALS: + void transactionClicked(); + void coinsSent(); /** Fired when a message should be reported to the user */ void message(const QString &title, const QString &message, unsigned int style); /** Encryption status of wallet changed */ diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index eb5148eebd..9b06aba22b 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -229,7 +229,7 @@ static UniValue waitfornewblock(const JSONRPCRequest& request) RPCResult{ "{ (json object)\n" " \"hash\" : { (string) The blockhash\n" - " \"height\" : { (int) Block height\n" + " \"height\" : { (numeric) Block height\n" "}\n" }, RPCExamples{ @@ -269,7 +269,7 @@ static UniValue waitforblock(const JSONRPCRequest& request) RPCResult{ "{ (json object)\n" " \"hash\" : { (string) The blockhash\n" - " \"height\" : { (int) Block height\n" + " \"height\" : { (numeric) Block height\n" "}\n" }, RPCExamples{ @@ -313,7 +313,7 @@ static UniValue waitforblockheight(const JSONRPCRequest& request) RPCResult{ "{ (json object)\n" " \"hash\" : { (string) The blockhash\n" - " \"height\" : { (int) Block height\n" + " \"height\" : { (numeric) Block height\n" "}\n" }, RPCExamples{ @@ -400,10 +400,10 @@ static std::string EntryDescriptionString() " \"ancestor\" : n, (numeric) modified fees (see above) of in-mempool ancestors (including this one) in " + CURRENCY_UNIT + "\n" " \"descendant\" : n, (numeric) modified fees (see above) of in-mempool descendants (including this one) in " + CURRENCY_UNIT + "\n" " }\n" - " \"depends\" : [ (array) unconfirmed transactions used as inputs for this transaction\n" + " \"depends\" : [ (json array) unconfirmed transactions used as inputs for this transaction\n" " \"transactionid\", (string) parent transaction id\n" " ... ]\n" - " \"spentby\" : [ (array) unconfirmed transactions spending outputs from this transaction\n" + " \"spentby\" : [ (json array) unconfirmed transactions spending outputs from this transaction\n" " \"transactionid\", (string) child transaction id\n" " ... ]\n" " \"bip125-replaceable\" : true|false, (boolean) Whether this transaction could be replaced due to BIP125 (replace-by-fee)\n"; @@ -984,14 +984,14 @@ static UniValue gettxoutsetinfo(const JSONRPCRequest& request) {}, RPCResult{ "{\n" - " \"height\":n, (numeric) The current block height (index)\n" - " \"bestblock\": \"hex\", (string) The hash of the block at the tip of the chain\n" - " \"transactions\": n, (numeric) The number of transactions with unspent outputs\n" - " \"txouts\": n, (numeric) The number of unspent transaction outputs\n" - " \"bogosize\": n, (numeric) A meaningless metric for UTXO set size\n" + " \"height\" : n, (numeric) The current block height (index)\n" + " \"bestblock\" : \"hex\", (string) The hash of the block at the tip of the chain\n" + " \"transactions\" : n, (numeric) The number of transactions with unspent outputs\n" + " \"txouts\" : n, (numeric) The number of unspent transaction outputs\n" + " \"bogosize\" : n, (numeric) A meaningless metric for UTXO set size\n" " \"hash_serialized_2\": \"hash\", (string) The serialized hash\n" - " \"disk_size\": n, (numeric) The estimated size of the chainstate on disk\n" - " \"total_amount\": x.xxx (numeric) The total amount\n" + " \"disk_size\" : n, (numeric) The estimated size of the chainstate on disk\n" + " \"total_amount\" : x.xxx (numeric) The total amount\n" "}\n" }, RPCExamples{ @@ -1032,7 +1032,7 @@ UniValue gettxout(const JSONRPCRequest& request) }, RPCResult{ "{\n" - " \"bestblock\": \"hash\", (string) The hash of the block at the tip of the chain\n" + " \"bestblock\" : \"hash\", (string) The hash of the block at the tip of the chain\n" " \"confirmations\" : n, (numeric) The number of confirmations\n" " \"value\" : x.xxx, (numeric) The transaction value in " + CURRENCY_UNIT + "\n" " \"scriptPubKey\" : { (json object)\n" @@ -1204,39 +1204,39 @@ UniValue getblockchaininfo(const JSONRPCRequest& request) {}, RPCResult{ "{\n" - " \"chain\": \"xxxx\", (string) current network name (main, test, regtest)\n" - " \"blocks\": xxxxxx, (numeric) the height of the most-work fully-validated chain. The genesis block has height 0\n" - " \"headers\": xxxxxx, (numeric) the current number of headers we have validated\n" - " \"bestblockhash\": \"...\", (string) the hash of the currently best block\n" - " \"difficulty\": xxxxxx, (numeric) the current difficulty\n" - " \"mediantime\": xxxxxx, (numeric) median time for the current best block\n" - " \"verificationprogress\": xxxx, (numeric) estimate of verification progress [0..1]\n" - " \"initialblockdownload\": xxxx, (bool) (debug information) estimate of whether this node is in Initial Block Download mode.\n" - " \"chainwork\": \"xxxx\" (string) total amount of work in active chain, in hexadecimal\n" - " \"size_on_disk\": xxxxxx, (numeric) the estimated size of the block and undo files on disk\n" - " \"pruned\": xx, (boolean) if the blocks are subject to pruning\n" - " \"pruneheight\": xxxxxx, (numeric) lowest-height complete block stored (only present if pruning is enabled)\n" - " \"automatic_pruning\": xx, (boolean) whether automatic pruning is enabled (only present if pruning is enabled)\n" - " \"prune_target_size\": xxxxxx, (numeric) the target size used by pruning (only present if automatic pruning is enabled)\n" - " \"softforks\": { (object) status of softforks\n" + " \"chain\" : \"xxxx\", (string) current network name (main, test, regtest)\n" + " \"blocks\" : xxxxxx, (numeric) the height of the most-work fully-validated chain. The genesis block has height 0\n" + " \"headers\" : xxxxxx, (numeric) the current number of headers we have validated\n" + " \"bestblockhash\" : \"...\", (string) the hash of the currently best block\n" + " \"difficulty\" : xxxxxx, (numeric) the current difficulty\n" + " \"mediantime\" : xxxxxx, (numeric) median time for the current best block\n" + " \"verificationprogress\" : xxxx, (numeric) estimate of verification progress [0..1]\n" + " \"initialblockdownload\" : xxxx, (boolean) (debug information) estimate of whether this node is in Initial Block Download mode.\n" + " \"chainwork\" : \"xxxx\" (string) total amount of work in active chain, in hexadecimal\n" + " \"size_on_disk\" : xxxxxx, (numeric) the estimated size of the block and undo files on disk\n" + " \"pruned\" : xx, (boolean) if the blocks are subject to pruning\n" + " \"pruneheight\" : xxxxxx, (numeric) lowest-height complete block stored (only present if pruning is enabled)\n" + " \"automatic_pruning\" : xx, (boolean) whether automatic pruning is enabled (only present if pruning is enabled)\n" + " \"prune_target_size\" : xxxxxx, (numeric) the target size used by pruning (only present if automatic pruning is enabled)\n" + " \"softforks\" : { (json object) status of softforks\n" " \"xxxx\" : { (string) name of the softfork\n" - " \"type\": \"xxxx\", (string) one of \"buried\", \"bip9\"\n" - " \"bip9\": { (object) status of bip9 softforks (only for \"bip9\" type)\n" - " \"status\": \"xxxx\", (string) one of \"defined\", \"started\", \"locked_in\", \"active\", \"failed\"\n" - " \"bit\": xx, (numeric) the bit (0-28) in the block version field used to signal this softfork (only for \"started\" status)\n" - " \"start_time\": xx, (numeric) the minimum median time past of a block at which the bit gains its meaning\n" - " \"timeout\": xx, (numeric) the median time past of a block at which the deployment is considered failed if not yet locked in\n" - " \"since\": xx, (numeric) height of the first block to which the status applies\n" - " \"statistics\": { (object) numeric statistics about BIP9 signalling for a softfork\n" - " \"period\": xx, (numeric) the length in blocks of the BIP9 signalling period \n" - " \"threshold\": xx, (numeric) the number of blocks with the version bit set required to activate the feature \n" - " \"elapsed\": xx, (numeric) the number of blocks elapsed since the beginning of the current period \n" - " \"count\": xx, (numeric) the number of blocks with the version bit set in the current period \n" - " \"possible\": xx (boolean) returns false if there are not enough blocks left in this period to pass activation threshold \n" + " \"type\" : \"xxxx\", (string) one of \"buried\", \"bip9\"\n" + " \"bip9\": { (json object) status of bip9 softforks (only for \"bip9\" type)\n" + " \"status\" : \"xxxx\", (string) one of \"defined\", \"started\", \"locked_in\", \"active\", \"failed\"\n" + " \"bit\" : xx, (numeric) the bit (0-28) in the block version field used to signal this softfork (only for \"started\" status)\n" + " \"start_time\" : xx, (numeric) the minimum median time past of a block at which the bit gains its meaning\n" + " \"timeout\" : xx, (numeric) the median time past of a block at which the deployment is considered failed if not yet locked in\n" + " \"since\" : xx, (numeric) height of the first block to which the status applies\n" + " \"statistics\" : { (json object) numeric statistics about BIP9 signalling for a softfork\n" + " \"period\" : xx, (numeric) the length in blocks of the BIP9 signalling period \n" + " \"threshold\" : xx, (numeric) the number of blocks with the version bit set required to activate the feature \n" + " \"elapsed\" : xx, (numeric) the number of blocks elapsed since the beginning of the current period \n" + " \"count\" : xx, (numeric) the number of blocks with the version bit set in the current period \n" + " \"possible\" : xx (boolean) returns false if there are not enough blocks left in this period to pass activation threshold \n" " }\n" " },\n" - " \"height\": \"xxxxxx\", (numeric) height of the first block which the rules are or will be enforced (only for \"buried\" type, or \"bip9\" type with \"active\" status)\n" - " \"active\": xx, (boolean) true if the rules are enforced for the mempool and the next block\n" + " \"height\" : \"xxxxxx\", (numeric) height of the first block which the rules are or will be enforced (only for \"buried\" type, or \"bip9\" type with \"active\" status)\n" + " \"active\" : xx, (boolean) true if the rules are enforced for the mempool and the next block\n" " }\n" " }\n" " \"warnings\" : \"...\", (string) any network and blockchain warnings.\n" @@ -1318,16 +1318,16 @@ static UniValue getchaintips(const JSONRPCRequest& request) RPCResult{ "[\n" " {\n" - " \"height\": xxxx, (numeric) height of the chain tip\n" - " \"hash\": \"xxxx\", (string) block hash of the tip\n" - " \"branchlen\": 0 (numeric) zero for main chain\n" - " \"status\": \"active\" (string) \"active\" for the main chain\n" + " \"height\" : xxxx, (numeric) height of the chain tip\n" + " \"hash\" : \"xxxx\", (string) block hash of the tip\n" + " \"branchlen\" : 0 (numeric) zero for main chain\n" + " \"status\" : \"active\" (string) \"active\" for the main chain\n" " },\n" " {\n" - " \"height\": xxxx,\n" - " \"hash\": \"xxxx\",\n" - " \"branchlen\": 1 (numeric) length of branch connecting the tip to the main chain\n" - " \"status\": \"xxxx\" (string) status of the chain (active, valid-fork, valid-headers, headers-only, invalid)\n" + " \"height\" : xxxx,\n" + " \"hash\" : \"xxxx\",\n" + " \"branchlen\" : 1 (numeric) length of branch connecting the tip to the main chain\n" + " \"status\" : \"xxxx\" (string) status of the chain (active, valid-fork, valid-headers, headers-only, invalid)\n" " }\n" "]\n" "Possible values for status:\n" @@ -1437,13 +1437,13 @@ static UniValue getmempoolinfo(const JSONRPCRequest& request) {}, RPCResult{ "{\n" - " \"loaded\": true|false (boolean) True if the mempool is fully loaded\n" - " \"size\": xxxxx, (numeric) Current tx count\n" - " \"bytes\": xxxxx, (numeric) Sum of all virtual transaction sizes as defined in BIP 141. Differs from actual serialized size because witness data is discounted\n" - " \"usage\": xxxxx, (numeric) Total memory usage for the mempool\n" - " \"maxmempool\": xxxxx, (numeric) Maximum memory usage for the mempool\n" - " \"mempoolminfee\": xxxxx (numeric) Minimum fee rate in " + CURRENCY_UNIT + "/kB for tx to be accepted. Is the maximum of minrelaytxfee and minimum mempool fee\n" - " \"minrelaytxfee\": xxxxx (numeric) Current minimum relay fee for transactions\n" + " \"loaded\" : true|false (boolean) True if the mempool is fully loaded\n" + " \"size\" : xxxxx, (numeric) Current tx count\n" + " \"bytes\" : xxxxx, (numeric) Sum of all virtual transaction sizes as defined in BIP 141. Differs from actual serialized size because witness data is discounted\n" + " \"usage\" : xxxxx, (numeric) Total memory usage for the mempool\n" + " \"maxmempool\" : xxxxx, (numeric) Maximum memory usage for the mempool\n" + " \"mempoolminfee\" : xxxxx (numeric) Minimum fee rate in " + CURRENCY_UNIT + "/kB for tx to be accepted. Is the maximum of minrelaytxfee and minimum mempool fee\n" + " \"minrelaytxfee\" : xxxxx (numeric) Current minimum relay fee for transactions\n" "}\n" }, RPCExamples{ @@ -1577,14 +1577,14 @@ static UniValue getchaintxstats(const JSONRPCRequest& request) }, RPCResult{ "{\n" - " \"time\": xxxxx, (numeric) The timestamp for the final block in the window, expressed in " + UNIX_EPOCH_TIME + ".\n" - " \"txcount\": xxxxx, (numeric) The total number of transactions in the chain up to that point.\n" - " \"window_final_block_hash\": \"...\", (string) The hash of the final block in the window.\n" - " \"window_final_block_height\": xxxxx, (numeric) The height of the final block in the window.\n" - " \"window_block_count\": xxxxx, (numeric) Size of the window in number of blocks.\n" - " \"window_tx_count\": xxxxx, (numeric) The number of transactions in the window. Only returned if \"window_block_count\" is > 0.\n" - " \"window_interval\": xxxxx, (numeric) The elapsed time in the window in seconds. Only returned if \"window_block_count\" is > 0.\n" - " \"txrate\": x.xx, (numeric) The average rate of transactions per second in the window. Only returned if \"window_interval\" is > 0.\n" + " \"time\" : xxxxx, (numeric) The timestamp for the final block in the window, expressed in " + UNIX_EPOCH_TIME + ".\n" + " \"txcount\" : xxxxx, (numeric) The total number of transactions in the chain up to that point.\n" + " \"window_final_block_hash\" : \"...\", (string) The hash of the final block in the window.\n" + " \"window_final_block_height\" : xxxxx, (numeric) The height of the final block in the window.\n" + " \"window_block_count\" : xxxxx, (numeric) Size of the window in number of blocks.\n" + " \"window_tx_count\" : xxxxx, (numeric) The number of transactions in the window. Only returned if \"window_block_count\" is > 0.\n" + " \"window_interval\" : xxxxx, (numeric) The elapsed time in the window in seconds. Only returned if \"window_block_count\" is > 0.\n" + " \"txrate\" : x.xx, (numeric) The average rate of transactions per second in the window. Only returned if \"window_interval\" is > 0.\n" "}\n" }, RPCExamples{ @@ -1716,41 +1716,41 @@ static UniValue getblockstats(const JSONRPCRequest& request) }, RPCResult{ "{ (json object)\n" - " \"avgfee\": xxxxx, (numeric) Average fee in the block\n" - " \"avgfeerate\": xxxxx, (numeric) Average feerate (in satoshis per virtual byte)\n" - " \"avgtxsize\": xxxxx, (numeric) Average transaction size\n" - " \"blockhash\": xxxxx, (string) The block hash (to check for potential reorgs)\n" - " \"feerate_percentiles\": [ (array of numeric) Feerates at the 10th, 25th, 50th, 75th, and 90th percentile weight unit (in satoshis per virtual byte)\n" + " \"avgfee\" : xxxxx, (numeric) Average fee in the block\n" + " \"avgfeerate\" : xxxxx, (numeric) Average feerate (in satoshis per virtual byte)\n" + " \"avgtxsize\" : xxxxx, (numeric) Average transaction size\n" + " \"blockhash\" : xxxxx, (string) The block hash (to check for potential reorgs)\n" + " \"feerate_percentiles\" : [ (array of numeric) Feerates at the 10th, 25th, 50th, 75th, and 90th percentile weight unit (in satoshis per virtual byte)\n" " \"10th_percentile_feerate\", (numeric) The 10th percentile feerate\n" " \"25th_percentile_feerate\", (numeric) The 25th percentile feerate\n" " \"50th_percentile_feerate\", (numeric) The 50th percentile feerate\n" " \"75th_percentile_feerate\", (numeric) The 75th percentile feerate\n" " \"90th_percentile_feerate\", (numeric) The 90th percentile feerate\n" " ],\n" - " \"height\": xxxxx, (numeric) The height of the block\n" - " \"ins\": xxxxx, (numeric) The number of inputs (excluding coinbase)\n" - " \"maxfee\": xxxxx, (numeric) Maximum fee in the block\n" - " \"maxfeerate\": xxxxx, (numeric) Maximum feerate (in satoshis per virtual byte)\n" - " \"maxtxsize\": xxxxx, (numeric) Maximum transaction size\n" - " \"medianfee\": xxxxx, (numeric) Truncated median fee in the block\n" - " \"mediantime\": xxxxx, (numeric) The block median time past\n" - " \"mediantxsize\": xxxxx, (numeric) Truncated median transaction size\n" - " \"minfee\": xxxxx, (numeric) Minimum fee in the block\n" - " \"minfeerate\": xxxxx, (numeric) Minimum feerate (in satoshis per virtual byte)\n" - " \"mintxsize\": xxxxx, (numeric) Minimum transaction size\n" - " \"outs\": xxxxx, (numeric) The number of outputs\n" - " \"subsidy\": xxxxx, (numeric) The block subsidy\n" - " \"swtotal_size\": xxxxx, (numeric) Total size of all segwit transactions\n" - " \"swtotal_weight\": xxxxx, (numeric) Total weight of all segwit transactions divided by segwit scale factor (4)\n" - " \"swtxs\": xxxxx, (numeric) The number of segwit transactions\n" - " \"time\": xxxxx, (numeric) The block time\n" - " \"total_out\": xxxxx, (numeric) Total amount in all outputs (excluding coinbase and thus reward [ie subsidy + totalfee])\n" - " \"total_size\": xxxxx, (numeric) Total size of all non-coinbase transactions\n" - " \"total_weight\": xxxxx, (numeric) Total weight of all non-coinbase transactions divided by segwit scale factor (4)\n" - " \"totalfee\": xxxxx, (numeric) The fee total\n" - " \"txs\": xxxxx, (numeric) The number of transactions (excluding coinbase)\n" - " \"utxo_increase\": xxxxx, (numeric) The increase/decrease in the number of unspent outputs\n" - " \"utxo_size_inc\": xxxxx, (numeric) The increase/decrease in size for the utxo index (not discounting op_return and similar)\n" + " \"height\" : xxxxx, (numeric) The height of the block\n" + " \"ins\" : xxxxx, (numeric) The number of inputs (excluding coinbase)\n" + " \"maxfee\" : xxxxx, (numeric) Maximum fee in the block\n" + " \"maxfeerate\" : xxxxx, (numeric) Maximum feerate (in satoshis per virtual byte)\n" + " \"maxtxsize\" : xxxxx, (numeric) Maximum transaction size\n" + " \"medianfee\" : xxxxx, (numeric) Truncated median fee in the block\n" + " \"mediantime\" : xxxxx, (numeric) The block median time past\n" + " \"mediantxsize\" : xxxxx, (numeric) Truncated median transaction size\n" + " \"minfee\" : xxxxx, (numeric) Minimum fee in the block\n" + " \"minfeerate\" : xxxxx, (numeric) Minimum feerate (in satoshis per virtual byte)\n" + " \"mintxsize\" : xxxxx, (numeric) Minimum transaction size\n" + " \"outs\" : xxxxx, (numeric) The number of outputs\n" + " \"subsidy\" : xxxxx, (numeric) The block subsidy\n" + " \"swtotal_size\" : xxxxx, (numeric) Total size of all segwit transactions\n" + " \"swtotal_weight\" : xxxxx, (numeric) Total weight of all segwit transactions divided by segwit scale factor (4)\n" + " \"swtxs\" : xxxxx, (numeric) The number of segwit transactions\n" + " \"time\" : xxxxx, (numeric) The block time\n" + " \"total_out\" : xxxxx, (numeric) Total amount in all outputs (excluding coinbase and thus reward [ie subsidy + totalfee])\n" + " \"total_size\" : xxxxx, (numeric) Total size of all non-coinbase transactions\n" + " \"total_weight\" : xxxxx, (numeric) Total weight of all non-coinbase transactions divided by segwit scale factor (4)\n" + " \"totalfee\" : xxxxx, (numeric) The fee total\n" + " \"txs\" : xxxxx, (numeric) The number of transactions (excluding coinbase)\n" + " \"utxo_increase\" : xxxxx, (numeric) The increase/decrease in the number of unspent outputs\n" + " \"utxo_size_inc\" : xxxxx, (numeric) The increase/decrease in size for the utxo index (not discounting op_return and similar)\n" "}\n" }, RPCExamples{ @@ -2076,21 +2076,21 @@ UniValue scantxoutset(const JSONRPCRequest& request) }, RPCResult{ "{\n" - " \"success\": true|false, (boolean) Whether the scan was completed\n" - " \"txouts\": n, (numeric) The number of unspent transaction outputs scanned\n" - " \"height\": n, (numeric) The current block height (index)\n" - " \"bestblock\": \"hex\", (string) The hash of the block at the tip of the chain\n" - " \"unspents\": [\n" + " \"success\" : true|false, (boolean) Whether the scan was completed\n" + " \"txouts\" : n, (numeric) The number of unspent transaction outputs scanned\n" + " \"height\" : n, (numeric) The current block height (index)\n" + " \"bestblock\" : \"hex\", (string) The hash of the block at the tip of the chain\n" + " \"unspents\" : [\n" " {\n" - " \"txid\": \"hash\", (string) The transaction id\n" - " \"vout\": n, (numeric) The vout value\n" - " \"scriptPubKey\": \"script\", (string) The script key\n" - " \"desc\": \"descriptor\", (string) A specialized descriptor for the matched scriptPubKey\n" - " \"amount\": x.xxx, (numeric) The total amount in " + CURRENCY_UNIT + " of the unspent output\n" - " \"height\": n, (numeric) Height of the unspent transaction output\n" + " \"txid\" : \"hash\", (string) The transaction id\n" + " \"vout\" : n, (numeric) The vout value\n" + " \"scriptPubKey\" : \"script\", (string) The script key\n" + " \"desc\" : \"descriptor\", (string) A specialized descriptor for the matched scriptPubKey\n" + " \"amount\" : x.xxx, (numeric) The total amount in " + CURRENCY_UNIT + " of the unspent output\n" + " \"height\" : n, (numeric) Height of the unspent transaction output\n" " }\n" " ,...],\n" - " \"total_amount\": x.xxx, (numeric) The total amount of all found unspent outputs in " + CURRENCY_UNIT + "\n" + " \"total_amount\" : x.xxx, (numeric) The total amount of all found unspent outputs in " + CURRENCY_UNIT + "\n" "]\n" }, RPCExamples{""}, @@ -2284,10 +2284,10 @@ UniValue dumptxoutset(const JSONRPCRequest& request) }, RPCResult{ "{\n" - " \"coins_written\": n, (numeric) the number of coins written in the snapshot\n" - " \"base_hash\": \"...\", (string) the hash of the base of the snapshot\n" - " \"base_height\": n, (string) the height of the base of the snapshot\n" - " \"path\": \"...\" (string) the absolute path that the snapshot was written to\n" + " \"coins_written\" : n, (numeric) the number of coins written in the snapshot\n" + " \"base_hash\" : \"...\", (string) the hash of the base of the snapshot\n" + " \"base_height\" : n, (string) the height of the base of the snapshot\n" + " \"path\" : \"...\" (string) the absolute path that the snapshot was written to\n" "]\n" }, RPCExamples{ diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index e5994b172b..ab5d830b2a 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -154,7 +154,7 @@ static UniValue generatetodescriptor(const JSONRPCRequest& request) {"maxtries", RPCArg::Type::NUM, /* default */ "1000000", "How many iterations to try."}, }, RPCResult{ - "[ blockhashes ] (array) hashes of blocks generated\n"}, + "[ blockhashes ] (json array) hashes of blocks generated\n"}, RPCExamples{ "\nGenerate 11 blocks to mydesc\n" + HelpExampleCli("generatetodescriptor", "11 \"mydesc\"")}, } @@ -196,7 +196,7 @@ static UniValue generatetoaddress(const JSONRPCRequest& request) {"maxtries", RPCArg::Type::NUM, /* default */ "1000000", "How many iterations to try."}, }, RPCResult{ - "[ blockhashes ] (array) hashes of blocks generated\n" + "[ blockhashes ] (json array) hashes of blocks generated\n" }, RPCExamples{ "\nGenerate 11 blocks to myaddress\n" @@ -231,14 +231,14 @@ static UniValue getmininginfo(const JSONRPCRequest& request) {}, RPCResult{ "{\n" - " \"blocks\": nnn, (numeric) The current block\n" - " \"currentblockweight\": nnn, (numeric, optional) The block weight of the last assembled block (only present if a block was ever assembled)\n" - " \"currentblocktx\": nnn, (numeric, optional) The number of block transactions of the last assembled block (only present if a block was ever assembled)\n" - " \"difficulty\": xxx.xxxxx (numeric) The current difficulty\n" - " \"networkhashps\": nnn, (numeric) The network hashes per second\n" - " \"pooledtx\": n (numeric) The size of the mempool\n" - " \"chain\": \"xxxx\", (string) current network name (main, test, regtest)\n" - " \"warnings\": \"...\" (string) any network and blockchain warnings\n" + " \"blocks\" : nnn, (numeric) The current block\n" + " \"currentblockweight\" : nnn, (numeric, optional) The block weight of the last assembled block (only present if a block was ever assembled)\n" + " \"currentblocktx\" : nnn, (numeric, optional) The number of block transactions of the last assembled block (only present if a block was ever assembled)\n" + " \"difficulty\" : xxx.xxxxx (numeric) The current difficulty\n" + " \"networkhashps\" : nnn, (numeric) The network hashes per second\n" + " \"pooledtx\" : n (numeric) The size of the mempool\n" + " \"chain\" : \"xxxx\", (string) current network name (main, test, regtest)\n" + " \"warnings\" : \"...\" (string) any network and blockchain warnings\n" "}\n" }, RPCExamples{ @@ -365,16 +365,16 @@ static UniValue getblocktemplate(const JSONRPCRequest& request) " },\n" " \"vbrequired\" : n, (numeric) bit mask of versionbits the server requires set in submissions\n" " \"previousblockhash\" : \"xxxx\", (string) The hash of current highest block\n" - " \"transactions\" : [ (array) contents of non-coinbase transactions that should be included in the next block\n" + " \"transactions\" : [ (json array) contents of non-coinbase transactions that should be included in the next block\n" " {\n" " \"data\" : \"xxxx\", (string) transaction data encoded in hexadecimal (byte-for-byte)\n" " \"txid\" : \"xxxx\", (string) transaction id encoded in little-endian hexadecimal\n" " \"hash\" : \"xxxx\", (string) hash encoded in little-endian hexadecimal (including witness data)\n" - " \"depends\" : [ (array) array of numbers \n" + " \"depends\" : [ (json array) array of numbers \n" " n (numeric) transactions before this one (by 1-based index in 'transactions' list) that must be present in the final block if this one is\n" " ,...\n" " ],\n" - " \"fee\": n, (numeric) difference in value between transaction inputs and outputs (in satoshis); for coinbase transactions, this is a negative Number of the total collected block fees (ie, not including the block subsidy); if key is not present, fee is unknown and clients MUST NOT assume there isn't one\n" + " \"fee\" : n, (numeric) difference in value between transaction inputs and outputs (in satoshis); for coinbase transactions, this is a negative Number of the total collected block fees (ie, not including the block subsidy); if key is not present, fee is unknown and clients MUST NOT assume there isn't one\n" " \"sigops\" : n, (numeric) total SigOps cost, as counted for purposes of block limits; if key is not present, sigop cost is unknown and clients MUST NOT assume it is zero\n" " \"weight\" : n, (numeric) total transaction weight, as counted for purposes of block limits\n" " }\n" @@ -850,7 +850,7 @@ static UniValue estimatesmartfee(const JSONRPCRequest& request) RPCResult{ "{\n" " \"feerate\" : x.x, (numeric, optional) estimate fee rate in " + CURRENCY_UNIT + "/kB\n" - " \"errors\": [ str... ] (json array of strings, optional) Errors encountered during processing\n" + " \"errors\" : [ str... ] (json array of strings, optional) Errors encountered during processing\n" " \"blocks\" : n (numeric) block number where estimate was found\n" "}\n" "\n" @@ -908,7 +908,7 @@ static UniValue estimaterawfee(const JSONRPCRequest& request) " lower buckets."}, }, RPCResult{ - "{\n" + "{ (json object) Results are returned for any horizon which tracks blocks up to the confirmation target\n" " \"short\" : { (json object, optional) estimate for short time horizon\n" " \"feerate\" : x.x, (numeric, optional) estimate fee rate in " + CURRENCY_UNIT + "/kB\n" " \"decay\" : x.x, (numeric) exponential decay (per block) for historical moving average of confirmation data\n" @@ -921,14 +921,22 @@ static UniValue estimaterawfee(const JSONRPCRequest& request) " \"inmempool\" : x.x, (numeric) current number of txs in mempool in the feerate range unconfirmed for at least target blocks\n" " \"leftmempool\" : x.x, (numeric) number of txs over history horizon in the feerate range that left mempool unconfirmed after target\n" " },\n" - " \"fail\" : { ... }, (json object, optional) information about the highest range of feerates to fail to meet the threshold\n" - " \"errors\": [ str... ] (json array of strings, optional) Errors encountered during processing\n" + " \"fail\" : { (json object, optional) information about the highest range of feerates to fail to meet the threshold\n" + " ...\n" + " },\n" + " \"errors\" : [ (json array, optional) Errors encountered during processing\n" + " \"str\", (string)\n" + " ...\n" + " ],\n" + " },\n" + " \"medium\" : { (json object, optional) estimate for medium time horizon\n" + " ...\n" + " },\n" + " \"long\" : { (json object, optional) estimate for long time horizon\n" + " ...\n" " },\n" - " \"medium\" : { ... }, (json object, optional) estimate for medium time horizon\n" - " \"long\" : { ... } (json object) estimate for long time horizon\n" "}\n" "\n" - "Results are returned for any horizon which tracks blocks up to the confirmation target.\n" }, RPCExamples{ HelpExampleCli("estimaterawfee", "6 0.9") diff --git a/src/rpc/misc.cpp b/src/rpc/misc.cpp index ba5a3d83d1..2b4ee62c71 100644 --- a/src/rpc/misc.cpp +++ b/src/rpc/misc.cpp @@ -83,8 +83,9 @@ static UniValue createmultisig(const JSONRPCRequest& request) }, RPCResult{ "{\n" - " \"address\":\"multisigaddress\", (string) The value of the new multisig address.\n" - " \"redeemScript\":\"script\" (string) The string value of the hex-encoded redemption script.\n" + " \"address\" : \"multisigaddress\", (string) The value of the new multisig address.\n" + " \"redeemScript\" : \"script\" (string) The string value of the hex-encoded redemption script.\n" + " \"descriptor\" : \"descriptor\" (string) The descriptor for this multisig\n" "}\n" }, RPCExamples{ @@ -121,9 +122,13 @@ static UniValue createmultisig(const JSONRPCRequest& request) CScript inner; const CTxDestination dest = AddAndGetMultisigDestination(required, pubkeys, output_type, keystore, inner); + // Make the descriptor + std::unique_ptr<Descriptor> descriptor = InferDescriptor(GetScriptForDestination(dest), keystore); + UniValue result(UniValue::VOBJ); result.pushKV("address", EncodeDestination(dest)); result.pushKV("redeemScript", HexStr(inner.begin(), inner.end())); + result.pushKV("descriptor", descriptor->ToString()); return result; } @@ -184,7 +189,7 @@ UniValue deriveaddresses(const JSONRPCRequest& request) {"range", RPCArg::Type::RANGE, RPCArg::Optional::OMITTED_NAMED_ARG, "If a ranged descriptor is used, this specifies the end or the range (in [begin,end] notation) to derive."}, }, RPCResult{ - "[ address ] (array) the derived addresses\n" + "[ address ] (json array) the derived addresses\n" }, RPCExamples{ "First three native segwit receive addresses\n" + @@ -445,13 +450,13 @@ static UniValue getmemoryinfo(const JSONRPCRequest& request) { RPCResult{"mode \"stats\"", "{\n" - " \"locked\": { (json object) Information about locked memory manager\n" - " \"used\": xxxxx, (numeric) Number of bytes used\n" - " \"free\": xxxxx, (numeric) Number of bytes available in current arenas\n" - " \"total\": xxxxxxx, (numeric) Total number of bytes managed\n" - " \"locked\": xxxxxx, (numeric) Amount of bytes that succeeded locking. If this number is smaller than total, locking pages failed at some point and key data could be swapped to disk.\n" - " \"chunks_used\": xxxxx, (numeric) Number allocated chunks\n" - " \"chunks_free\": xxxxx, (numeric) Number unused chunks\n" + " \"locked\" : { (json object) Information about locked memory manager\n" + " \"used\" : xxxxx, (numeric) Number of bytes used\n" + " \"free\" : xxxxx, (numeric) Number of bytes available in current arenas\n" + " \"total\" : xxxxxxx, (numeric) Total number of bytes managed\n" + " \"locked\" : xxxxxx, (numeric) Amount of bytes that succeeded locking. If this number is smaller than total, locking pages failed at some point and key data could be swapped to disk.\n" + " \"chunks_used\" : xxxxx, (numeric) Number allocated chunks\n" + " \"chunks_free\" : xxxxx, (numeric) Number unused chunks\n" " }\n" "}\n" }, @@ -524,7 +529,7 @@ UniValue logging(const JSONRPCRequest& request) }, RPCResult{ "{ (json object where keys are the logging categories, and values indicates its status\n" - " \"category\": true|false, (bool) if being debug logged or not. false:inactive, true:active\n" + " \"category\" : true|false, (boolean) if being debug logged or not. false:inactive, true:active\n" " ...\n" "}\n" }, diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index 1ce49709b2..92542539df 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -79,48 +79,48 @@ static UniValue getpeerinfo(const JSONRPCRequest& request) RPCResult{ "[\n" " {\n" - " \"id\": n, (numeric) Peer index\n" - " \"addr\":\"host:port\", (string) The IP address and port of the peer\n" - " \"addrbind\":\"ip:port\", (string) Bind address of the connection to the peer\n" - " \"addrlocal\":\"ip:port\", (string) Local address as reported by the peer\n" - " \"mapped_as\":\"mapped_as\", (string) The AS in the BGP route to the peer used for diversifying peer selection\n" - " \"services\":\"xxxxxxxxxxxxxxxx\", (string) The services offered\n" - " \"servicesnames\":[ (array) the services offered, in human-readable form\n" + " \"id\" : n, (numeric) Peer index\n" + " \"addr\" : \"host:port\", (string) The IP address and port of the peer\n" + " \"addrbind\" : \"ip:port\", (string) Bind address of the connection to the peer\n" + " \"addrlocal\" : \"ip:port\", (string) Local address as reported by the peer\n" + " \"mapped_as\" : \"mapped_as\", (string) The AS in the BGP route to the peer used for diversifying peer selection\n" + " \"services\" : \"xxxxxxxxxxxxxxxx\", (string) The services offered\n" + " \"servicesnames\" : [ (json array) the services offered, in human-readable form\n" " \"SERVICE_NAME\", (string) the service name if it is recognised\n" " ...\n" " ],\n" - " \"relaytxes\":true|false, (boolean) Whether peer has asked us to relay transactions to it\n" - " \"lastsend\": ttt, (numeric) The " + UNIX_EPOCH_TIME + " of the last send\n" - " \"lastrecv\": ttt, (numeric) The " + UNIX_EPOCH_TIME + " of the last receive\n" - " \"bytessent\": n, (numeric) The total bytes sent\n" - " \"bytesrecv\": n, (numeric) The total bytes received\n" - " \"conntime\": ttt, (numeric) The " + UNIX_EPOCH_TIME + " of the connection\n" - " \"timeoffset\": ttt, (numeric) The time offset in seconds\n" - " \"pingtime\": n, (numeric) ping time (if available)\n" - " \"minping\": n, (numeric) minimum observed ping time (if any at all)\n" - " \"pingwait\": n, (numeric) ping wait (if non-zero)\n" - " \"version\": v, (numeric) The peer version, such as 70001\n" - " \"subver\": \"/Satoshi:0.8.5/\", (string) The string version\n" - " \"inbound\": true|false, (boolean) Inbound (true) or Outbound (false)\n" - " \"addnode\": true|false, (boolean) Whether connection was due to addnode/-connect or if it was an automatic/inbound connection\n" - " \"startingheight\": n, (numeric) The starting height (block) of the peer\n" - " \"banscore\": n, (numeric) The ban score\n" - " \"synced_headers\": n, (numeric) The last header we have in common with this peer\n" - " \"synced_blocks\": n, (numeric) The last block we have in common with this peer\n" - " \"inflight\": [\n" + " \"relaytxes\" : true|false, (boolean) Whether peer has asked us to relay transactions to it\n" + " \"lastsend\" : ttt, (numeric) The " + UNIX_EPOCH_TIME + " of the last send\n" + " \"lastrecv\" : ttt, (numeric) The " + UNIX_EPOCH_TIME + " of the last receive\n" + " \"bytessent\" : n, (numeric) The total bytes sent\n" + " \"bytesrecv\" : n, (numeric) The total bytes received\n" + " \"conntime\" : ttt, (numeric) The " + UNIX_EPOCH_TIME + " of the connection\n" + " \"timeoffset\" : ttt, (numeric) The time offset in seconds\n" + " \"pingtime\" : n, (numeric) ping time (if available)\n" + " \"minping\" : n, (numeric) minimum observed ping time (if any at all)\n" + " \"pingwait\" : n, (numeric) ping wait (if non-zero)\n" + " \"version\" : v, (numeric) The peer version, such as 70001\n" + " \"subver\" : \"/Satoshi:0.8.5/\", (string) The string version\n" + " \"inbound\" : true|false, (boolean) Inbound (true) or Outbound (false)\n" + " \"addnode\" : true|false, (boolean) Whether connection was due to addnode/-connect or if it was an automatic/inbound connection\n" + " \"startingheight\" : n, (numeric) The starting height (block) of the peer\n" + " \"banscore\" : n, (numeric) The ban score\n" + " \"synced_headers\" : n, (numeric) The last header we have in common with this peer\n" + " \"synced_blocks\" : n, (numeric) The last block we have in common with this peer\n" + " \"inflight\" : [\n" " n, (numeric) The heights of blocks we're currently asking from this peer\n" " ...\n" " ],\n" - " \"whitelisted\": true|false, (boolean) Whether the peer is whitelisted\n" - " \"minfeefilter\": n, (numeric) The minimum fee rate for transactions this peer accepts\n" - " \"bytessent_per_msg\": {\n" - " \"msg\": n, (numeric) The total bytes sent aggregated by message type\n" + " \"whitelisted\" : true|false, (boolean) Whether the peer is whitelisted\n" + " \"minfeefilter\" : n, (numeric) The minimum fee rate for transactions this peer accepts\n" + " \"bytessent_per_msg\" : {\n" + " \"msg\" : n, (numeric) The total bytes sent aggregated by message type\n" " When a message type is not listed in this json object, the bytes sent are 0.\n" " Only known message types can appear as keys in the object.\n" " ...\n" " },\n" - " \"bytesrecv_per_msg\": {\n" - " \"msg\": n, (numeric) The total bytes received aggregated by message type\n" + " \"bytesrecv_per_msg\" : {\n" + " \"msg\" : n, (numeric) The total bytes received aggregated by message type\n" " When a message type is not listed in this json object, the bytes received are 0.\n" " Only known message types can appear as keys in the object and all bytes received of unknown message types are listed under '"+NET_MESSAGE_COMMAND_OTHER+"'.\n" " ...\n" @@ -387,17 +387,17 @@ static UniValue getnettotals(const JSONRPCRequest& request) {}, RPCResult{ "{\n" - " \"totalbytesrecv\": n, (numeric) Total bytes received\n" - " \"totalbytessent\": n, (numeric) Total bytes sent\n" - " \"timemillis\": t, (numeric) Current UNIX time in milliseconds\n" - " \"uploadtarget\":\n" + " \"totalbytesrecv\" : n, (numeric) Total bytes received\n" + " \"totalbytessent\" : n, (numeric) Total bytes sent\n" + " \"timemillis\" : t, (numeric) Current UNIX time in milliseconds\n" + " \"uploadtarget\" : \n" " {\n" - " \"timeframe\": n, (numeric) Length of the measuring timeframe in seconds\n" - " \"target\": n, (numeric) Target in bytes\n" - " \"target_reached\": true|false, (boolean) True if target is reached\n" - " \"serve_historical_blocks\": true|false, (boolean) True if serving historical blocks\n" - " \"bytes_left_in_cycle\": t, (numeric) Bytes left in current time cycle\n" - " \"time_left_in_cycle\": t (numeric) Seconds left in current time cycle\n" + " \"timeframe\" : n, (numeric) Length of the measuring timeframe in seconds\n" + " \"target\" : n, (numeric) Target in bytes\n" + " \"target_reached\" : true|false, (boolean) True if target is reached\n" + " \"serve_historical_blocks\" : true|false, (boolean) True if serving historical blocks\n" + " \"bytes_left_in_cycle\" : t, (numeric) Bytes left in current time cycle\n" + " \"time_left_in_cycle\" : t (numeric) Seconds left in current time cycle\n" " }\n" "}\n" }, @@ -452,40 +452,40 @@ static UniValue getnetworkinfo(const JSONRPCRequest& request) "Returns an object containing various state info regarding P2P networking.\n", {}, RPCResult{ - "{\n" - " \"version\": xxxxx, (numeric) the server version\n" - " \"subversion\": \"/Satoshi:x.x.x/\", (string) the server subversion string\n" - " \"protocolversion\": xxxxx, (numeric) the protocol version\n" - " \"localservices\": \"xxxxxxxxxxxxxxxx\", (string) the services we offer to the network\n" - " \"localservicesnames\": [ (array) the services we offer to the network, in human-readable form\n" + "{ (json object)\n" + " \"version\" : xxxxx, (numeric) the server version\n" + " \"subversion\" : \"str\", (string) the server subversion string\n" + " \"protocolversion\" : xxxxx, (numeric) the protocol version\n" + " \"localservices\" : \"hex\", (string) the services we offer to the network\n" + " \"localservicesnames\" : [ (json array) the services we offer to the network, in human-readable form\n" " \"SERVICE_NAME\", (string) the service name\n" " ...\n" " ],\n" - " \"localrelay\": true|false, (bool) true if transaction relay is requested from peers\n" - " \"timeoffset\": xxxxx, (numeric) the time offset\n" - " \"connections\": xxxxx, (numeric) the number of connections\n" - " \"networkactive\": true|false, (bool) whether p2p networking is enabled\n" - " \"networks\": [ (array) information per network\n" - " {\n" - " \"name\": \"xxx\", (string) network (ipv4, ipv6 or onion)\n" - " \"limited\": true|false, (boolean) is the network limited using -onlynet?\n" - " \"reachable\": true|false, (boolean) is the network reachable?\n" - " \"proxy\": \"host:port\" (string) the proxy that is used for this network, or empty if none\n" - " \"proxy_randomize_credentials\": true|false, (string) Whether randomized credentials are used\n" - " }\n" - " ,...\n" + " \"localrelay\" : true|false, (boolean) true if transaction relay is requested from peers\n" + " \"timeoffset\" : xxxxx, (numeric) the time offset\n" + " \"connections\" : xxxxx, (numeric) the number of connections\n" + " \"networkactive\" : true|false, (boolean) whether p2p networking is enabled\n" + " \"networks\" : [ (json array) information per network\n" + " { (json object)\n" + " \"name\" : \"str\", (string) network (ipv4, ipv6 or onion)\n" + " \"limited\" : true|false, (boolean) is the network limited using -onlynet?\n" + " \"reachable\" : true|false, (boolean) is the network reachable?\n" + " \"proxy\" : \"str\" (string) (\"host:port\") the proxy that is used for this network, or empty if none\n" + " \"proxy_randomize_credentials\" : true|false, (boolean) Whether randomized credentials are used\n" + " },\n" + " ...\n" " ],\n" - " \"relayfee\": x.xxxxxxxx, (numeric) minimum relay fee for transactions in " + CURRENCY_UNIT + "/kB\n" - " \"incrementalfee\": x.xxxxxxxx, (numeric) minimum fee increment for mempool limiting or BIP 125 replacement in " + CURRENCY_UNIT + "/kB\n" - " \"localaddresses\": [ (array) list of local addresses\n" - " {\n" - " \"address\": \"xxxx\", (string) network address\n" - " \"port\": xxx, (numeric) network port\n" - " \"score\": xxx (numeric) relative score\n" - " }\n" - " ,...\n" - " ]\n" - " \"warnings\": \"...\" (string) any network and blockchain warnings\n" + " \"relayfee\" : x.xxxxxxxx, (numeric) minimum relay fee for transactions in " + CURRENCY_UNIT + "/kB\n" + " \"incrementalfee\" : x.xxxxxxxx, (numeric) minimum fee increment for mempool limiting or BIP 125 replacement in " + CURRENCY_UNIT + "/kB\n" + " \"localaddresses\" : [ (json array) list of local addresses\n" + " { (json object)\n" + " \"address\" : \"xxxx\", (string) network address\n" + " \"port\" : xxx, (numeric) network port\n" + " \"score\" : xxx (numeric) relative score\n" + " },\n" + " ...\n" + " ],\n" + " \"warnings\" : \"str\", (string) any network and blockchain warnings\n" "}\n" }, RPCExamples{ @@ -695,10 +695,10 @@ static UniValue getnodeaddresses(const JSONRPCRequest& request) RPCResult{ "[\n" " {\n" - " \"time\": ttt, (numeric) The " + UNIX_EPOCH_TIME + " of when the node was last seen\n" - " \"services\": n, (numeric) The services offered\n" - " \"address\": \"host\", (string) The address of the node\n" - " \"port\": n (numeric) The port of the node\n" + " \"time\" : ttt, (numeric) The " + UNIX_EPOCH_TIME + " of when the node was last seen\n" + " \"services\" : n, (numeric) The services offered\n" + " \"address\" : \"host\", (string) The address of the node\n" + " \"port\" : n (numeric) The port of the node\n" " }\n" " ,....\n" "]\n" diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index cea59b2c7a..cd1c657c26 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -98,7 +98,7 @@ static UniValue getrawtransaction(const JSONRPCRequest& request) }, RPCResult{"if verbose is set to true", "{\n" - " \"in_active_chain\": b, (bool) Whether specified block is in the active chain or not (only present with explicit \"blockhash\" argument)\n" + " \"in_active_chain\" : b, (boolean) Whether specified block is in the active chain or not (only present with explicit \"blockhash\" argument)\n" " \"hex\" : \"data\", (string) The serialized, hex-encoded data for 'txid'\n" " \"txid\" : \"id\", (string) The transaction id (same as provided)\n" " \"hash\" : \"id\", (string) The transaction hash (differs from txid for witness transactions)\n" @@ -109,14 +109,14 @@ static UniValue getrawtransaction(const JSONRPCRequest& request) " \"locktime\" : ttt, (numeric) The lock time\n" " \"vin\" : [ (array of json objects)\n" " {\n" - " \"txid\": \"id\", (string) The transaction id\n" - " \"vout\": n, (numeric) \n" - " \"scriptSig\": { (json object) The script\n" - " \"asm\": \"asm\", (string) asm\n" - " \"hex\": \"hex\" (string) hex\n" + " \"txid\" : \"id\", (string) The transaction id\n" + " \"vout\" : n, (numeric) \n" + " \"scriptSig\" : { (json object) The script\n" + " \"asm\" : \"asm\", (string) asm\n" + " \"hex\" : \"hex\" (string) hex\n" " },\n" - " \"sequence\": n (numeric) The script sequence number\n" - " \"txinwitness\": [\"hex\", ...] (array of string) hex-encoded witness data (if any)\n" + " \"sequence\" : n (numeric) The script sequence number\n" + " \"txinwitness\" : [\"hex\", ...] (array of string) hex-encoded witness data (if any)\n" " }\n" " ,...\n" " ],\n" @@ -442,14 +442,14 @@ static UniValue decoderawtransaction(const JSONRPCRequest& request) " \"locktime\" : ttt, (numeric) The lock time\n" " \"vin\" : [ (array of json objects)\n" " {\n" - " \"txid\": \"id\", (string) The transaction id\n" - " \"vout\": n, (numeric) The output number\n" - " \"scriptSig\": { (json object) The script\n" - " \"asm\": \"asm\", (string) asm\n" - " \"hex\": \"hex\" (string) hex\n" + " \"txid\" : \"id\", (string) The transaction id\n" + " \"vout\" : n, (numeric) The output number\n" + " \"scriptSig\" : { (json object) The script\n" + " \"asm\" : \"asm\", (string) asm\n" + " \"hex\" : \"hex\" (string) hex\n" " },\n" - " \"txinwitness\": [\"hex\", ...] (array of string) hex-encoded witness data (if any)\n" - " \"sequence\": n (numeric) The script sequence number\n" + " \"txinwitness\" : [\"hex\", ...] (array of string) hex-encoded witness data (if any)\n" + " \"sequence\" : n (numeric) The script sequence number\n" " }\n" " ,...\n" " ],\n" @@ -514,20 +514,20 @@ static UniValue decodescript(const JSONRPCRequest& request) }, RPCResult{ "{\n" - " \"asm\":\"asm\", (string) Script public key\n" - " \"type\":\"type\", (string) The output type (e.g. "+GetAllOutputTypes()+")\n" - " \"reqSigs\": n, (numeric) The required signatures\n" - " \"addresses\": [ (json array of string)\n" + " \"asm\" : \"asm\", (string) Script public key\n" + " \"type\" : \"type\", (string) The output type (e.g. "+GetAllOutputTypes()+")\n" + " \"reqSigs\" : n, (numeric) The required signatures\n" + " \"addresses\" : [ (json array of string)\n" " \"address\" (string) bitcoin address\n" " ,...\n" " ],\n" " \"p2sh\":\"str\" (string) address of P2SH script wrapping this redeem script (not returned if the script is already a P2SH).\n" - " \"segwit\": { (json object) Result of a witness script public key wrapping this redeem script (not returned if the script is a P2SH or witness).\n" - " \"asm\":\"str\", (string) String representation of the script public key\n" - " \"hex\":\"hexstr\", (string) Hex string of the script public key\n" - " \"type\":\"str\", (string) The type of the script public key (e.g. witness_v0_keyhash or witness_v0_scripthash)\n" - " \"reqSigs\": n, (numeric) The required signatures (always 1)\n" - " \"addresses\": [ (json array of string) (always length 1)\n" + " \"segwit\" : { (json object) Result of a witness script public key wrapping this redeem script (not returned if the script is a P2SH or witness).\n" + " \"asm\" : \"str\", (string) String representation of the script public key\n" + " \"hex\" : \"hexstr\", (string) Hex string of the script public key\n" + " \"type\" : \"str\", (string) The type of the script public key (e.g. witness_v0_keyhash or witness_v0_scripthash)\n" + " \"reqSigs\" : n, (numeric) The required signatures (always 1)\n" + " \"addresses\" : [ (json array of string) (always length 1)\n" " \"address\" (string) segwit address\n" " ,...\n" " ],\n" @@ -846,7 +846,7 @@ static UniValue testmempoolaccept(const JSONRPCRequest& request) {"maxfeerate", RPCArg::Type::AMOUNT, /* default */ FormatMoney(DEFAULT_MAX_RAW_TX_FEE_RATE.GetFeePerK()), "Reject transactions whose fee rate is higher than the specified value, expressed in " + CURRENCY_UNIT + "/kB\n"}, }, RPCResult{ - "[ (array) The result of the mempool acceptance test for each raw transaction in the input array.\n" + "[ (json array) The result of the mempool acceptance test for each raw transaction in the input array.\n" " Length is exactly one for now.\n" " {\n" " \"txid\" (string) The transaction hash in hex\n" @@ -998,7 +998,7 @@ UniValue decodepsbt(const JSONRPCRequest& request) " \"asm\" : \"asm\", (string) The asm\n" " \"hex\" : \"hex\", (string) The hex\n" " }\n" - " \"final_scriptwitness\": [\"hex\", ...] (array of string) hex-encoded witness data (if any)\n" + " \"final_scriptwitness\" : [\"hex\", ...] (array of string) hex-encoded witness data (if any)\n" " \"unknown\" : { (json object) The unknown global fields\n" " \"key\" : \"value\" (key-value pair) An unknown key-value pair\n" " ...\n" @@ -1292,11 +1292,10 @@ UniValue finalizepsbt(const JSONRPCRequest& request) " extract and return the complete transaction in normal network serialization instead of the PSBT."}, }, RPCResult{ - "{\n" - " \"psbt\" : \"value\", (string) The base64-encoded partially signed transaction if not extracted\n" - " \"hex\" : \"value\", (string) The hex-encoded network transaction if extracted\n" - " \"complete\" : true|false, (boolean) If the transaction has a complete set of signatures\n" - " ]\n" + "{ (json object)\n" + " \"psbt\" : \"str\", (string) The base64-encoded partially signed transaction if not extracted\n" + " \"hex\" : \"hex\", (string) The hex-encoded network transaction if extracted\n" + " \"complete\" : true|false, (boolean) If the transaction has a complete set of signatures\n" "}\n" }, RPCExamples{ diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp index df8e687d82..b62490ed29 100644 --- a/src/rpc/server.cpp +++ b/src/rpc/server.cpp @@ -198,14 +198,14 @@ static UniValue getrpcinfo(const JSONRPCRequest& request) {}, RPCResult{ "{\n" - " \"active_commands\" (array) All active commands\n" + " \"active_commands\" (json array) All active commands\n" " [\n" - " { (object) Information about an active command\n" + " { (json object) Information about an active command\n" " \"method\" (string) The name of the RPC command \n" " \"duration\" (numeric) The running time in microseconds\n" " },...\n" " ],\n" - " \"logpath\": \"xxx\" (string) The complete file path to the debug log\n" + " \"logpath\" : \"xxx\" (string) The complete file path to the debug log\n" "}\n" }, RPCExamples{ diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp index b919046ab6..d0865d2793 100644 --- a/src/script/interpreter.cpp +++ b/src/script/interpreter.cpp @@ -1281,13 +1281,11 @@ uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn return ss.GetHash(); } - static const uint256 one(uint256S("0000000000000000000000000000000000000000000000000000000000000001")); - // Check for invalid use of SIGHASH_SINGLE if ((nHashType & 0x1f) == SIGHASH_SINGLE) { if (nIn >= txTo.vout.size()) { // nOut out of range - return one; + return UINT256_ONE(); } } diff --git a/src/script/sign.cpp b/src/script/sign.cpp index 8791d1542a..58eae3ce96 100644 --- a/src/script/sign.cpp +++ b/src/script/sign.cpp @@ -144,8 +144,13 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator ret.push_back(valtype()); // workaround CHECKMULTISIG bug for (size_t i = 1; i < vSolutions.size() - 1; ++i) { CPubKey pubkey = CPubKey(vSolutions[i]); - if (ret.size() < required + 1 && CreateSig(creator, sigdata, provider, sig, pubkey, scriptPubKey, sigversion)) { - ret.push_back(std::move(sig)); + // We need to always call CreateSig in order to fill sigdata with all + // possible signatures that we can create. This will allow further PSBT + // processing to work as it needs all possible signature and pubkey pairs + if (CreateSig(creator, sigdata, provider, sig, pubkey, scriptPubKey, sigversion)) { + if (ret.size() < required + 1) { + ret.push_back(std::move(sig)); + } } } bool ok = ret.size() == required + 1; diff --git a/src/serialize.h b/src/serialize.h index 7fa669ebdb..cee7225bcb 100644 --- a/src/serialize.h +++ b/src/serialize.h @@ -25,6 +25,9 @@ static const unsigned int MAX_SIZE = 0x02000000; +/** Maximum amount of memory (in bytes) to allocate at once when deserializing vectors. */ +static const unsigned int MAX_VECTOR_ALLOCATE = 5000000; + /** * Dummy data type to identify deserializing constructors. * @@ -490,12 +493,13 @@ public: template<typename Formatter, typename T> static inline Wrapper<Formatter, T&> Using(T&& t) { return Wrapper<Formatter, T&>(t); } -#define VARINT(obj, ...) Using<VarIntFormatter<__VA_ARGS__>>(obj) +#define VARINT_MODE(obj, mode) Using<VarIntFormatter<mode>>(obj) +#define VARINT(obj) Using<VarIntFormatter<VarIntMode::DEFAULT>>(obj) #define COMPACTSIZE(obj) CCompactSize(REF(obj)) #define LIMITED_STRING(obj,n) LimitedString< n >(REF(obj)) /** Serialization wrapper class for integers in VarInt format. */ -template<VarIntMode Mode=VarIntMode::DEFAULT> +template<VarIntMode Mode> struct VarIntFormatter { template<typename Stream, typename I> void Ser(Stream &s, I v) @@ -593,6 +597,53 @@ public: template<typename I> BigEndian<I> WrapBigEndian(I& n) { return BigEndian<I>(n); } +/** Formatter to serialize/deserialize vector elements using another formatter + * + * Example: + * struct X { + * std::vector<uint64_t> v; + * SERIALIZE_METHODS(X, obj) { READWRITE(Using<VectorFormatter<VarInt>>(obj.v)); } + * }; + * will define a struct that contains a vector of uint64_t, which is serialized + * as a vector of VarInt-encoded integers. + * + * V is not required to be an std::vector type. It works for any class that + * exposes a value_type, size, reserve, push_back, and const iterators. + */ +template<class Formatter> +struct VectorFormatter +{ + template<typename Stream, typename V> + void Ser(Stream& s, const V& v) + { + WriteCompactSize(s, v.size()); + for (const typename V::value_type& elem : v) { + s << Using<Formatter>(elem); + } + } + + template<typename Stream, typename V> + void Unser(Stream& s, V& v) + { + v.clear(); + size_t size = ReadCompactSize(s); + size_t allocated = 0; + while (allocated < size) { + // For DoS prevention, do not blindly allocate as much as the stream claims to contain. + // Instead, allocate in 5MiB batches, so that an attacker actually needs to provide + // X MiB of data to make us allocate X+5 Mib. + static_assert(sizeof(typename V::value_type) <= MAX_VECTOR_ALLOCATE, "Vector element size too large"); + allocated = std::min(size, allocated + MAX_VECTOR_ALLOCATE / sizeof(typename V::value_type)); + v.reserve(allocated); + while (v.size() < allocated) { + typename V::value_type val; + s >> Using<Formatter>(val); + v.push_back(std::move(val)); + } + } + }; +}; + /** * Forward declarations */ @@ -673,6 +724,20 @@ inline void Unserialize(Stream& is, T&& a) a.Unserialize(is); } +/** Default formatter. Serializes objects as themselves. + * + * The vector/prevector serialization code passes this to VectorFormatter + * to enable reusing that logic. It shouldn't be needed elsewhere. + */ +struct DefaultFormatter +{ + template<typename Stream, typename T> + static void Ser(Stream& s, const T& t) { Serialize(s, t); } + + template<typename Stream, typename T> + static void Unser(Stream& s, T& t) { Unserialize(s, t); } +}; + @@ -713,9 +778,7 @@ void Serialize_impl(Stream& os, const prevector<N, T>& v, const unsigned char&) template<typename Stream, unsigned int N, typename T, typename V> void Serialize_impl(Stream& os, const prevector<N, T>& v, const V&) { - WriteCompactSize(os, v.size()); - for (typename prevector<N, T>::const_iterator vi = v.begin(); vi != v.end(); ++vi) - ::Serialize(os, (*vi)); + Serialize(os, Using<VectorFormatter<DefaultFormatter>>(v)); } template<typename Stream, unsigned int N, typename T> @@ -744,19 +807,7 @@ void Unserialize_impl(Stream& is, prevector<N, T>& v, const unsigned char&) template<typename Stream, unsigned int N, typename T, typename V> void Unserialize_impl(Stream& is, prevector<N, T>& v, const V&) { - v.clear(); - unsigned int nSize = ReadCompactSize(is); - unsigned int i = 0; - unsigned int nMid = 0; - while (nMid < nSize) - { - nMid += 5000000 / sizeof(T); - if (nMid > nSize) - nMid = nSize; - v.resize_uninitialized(nMid); - for (; i < nMid; ++i) - Unserialize(is, v[i]); - } + Unserialize(is, Using<VectorFormatter<DefaultFormatter>>(v)); } template<typename Stream, unsigned int N, typename T> @@ -793,9 +844,7 @@ void Serialize_impl(Stream& os, const std::vector<T, A>& v, const bool&) template<typename Stream, typename T, typename A, typename V> void Serialize_impl(Stream& os, const std::vector<T, A>& v, const V&) { - WriteCompactSize(os, v.size()); - for (typename std::vector<T, A>::const_iterator vi = v.begin(); vi != v.end(); ++vi) - ::Serialize(os, (*vi)); + Serialize(os, Using<VectorFormatter<DefaultFormatter>>(v)); } template<typename Stream, typename T, typename A> @@ -824,19 +873,7 @@ void Unserialize_impl(Stream& is, std::vector<T, A>& v, const unsigned char&) template<typename Stream, typename T, typename A, typename V> void Unserialize_impl(Stream& is, std::vector<T, A>& v, const V&) { - v.clear(); - unsigned int nSize = ReadCompactSize(is); - unsigned int i = 0; - unsigned int nMid = 0; - while (nMid < nSize) - { - nMid += 5000000 / sizeof(T); - if (nMid > nSize) - nMid = nSize; - v.resize(nMid); - for (; i < nMid; i++) - Unserialize(is, v[i]); - } + Unserialize(is, Using<VectorFormatter<DefaultFormatter>>(v)); } template<typename Stream, typename T, typename A> diff --git a/src/test/fuzz/FuzzedDataProvider.h b/src/test/fuzz/FuzzedDataProvider.h index 1b5b4bb012..3e069eba69 100644 --- a/src/test/fuzz/FuzzedDataProvider.h +++ b/src/test/fuzz/FuzzedDataProvider.h @@ -13,11 +13,10 @@ #ifndef LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_ #define LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_ -#include <limits.h> -#include <stddef.h> -#include <stdint.h> - #include <algorithm> +#include <climits> +#include <cstddef> +#include <cstdint> #include <cstring> #include <initializer_list> #include <string> @@ -25,8 +24,10 @@ #include <utility> #include <vector> +// In addition to the comments below, the API is also briefly documented at +// https://github.com/google/fuzzing/blob/master/docs/split-inputs.md#fuzzed-data-provider class FuzzedDataProvider { -public: + public: // |data| is an array of length |size| that the FuzzedDataProvider wraps to // provide more granular access. |data| must outlive the FuzzedDataProvider. FuzzedDataProvider(const uint8_t *data, size_t size) @@ -143,9 +144,9 @@ public: return ConsumeBytes<T>(remaining_bytes_); } + // Returns a std::string containing all remaining bytes of the input data. // Prefer using |ConsumeRemainingBytes| unless you actually need a std::string // object. - // Returns a std::vector containing all remaining bytes of the input data. std::string ConsumeRemainingBytesAsString() { return ConsumeBytesAsString(remaining_bytes_); } @@ -161,7 +162,7 @@ public: // Reads one byte and returns a bool, or false when no data remains. bool ConsumeBool() { return 1 & ConsumeIntegral<uint8_t>(); } - // Returns a copy of a value selected from a fixed-size |array|. + // Returns a copy of the value selected from the given fixed-size |array|. template <typename T, size_t size> T PickValueInArray(const T (&array)[size]) { static_assert(size > 0, "The array must be non empty."); @@ -170,11 +171,14 @@ public: template <typename T> T PickValueInArray(std::initializer_list<const T> list) { - // static_assert(list.size() > 0, "The array must be non empty."); + // TODO(Dor1s): switch to static_assert once C++14 is allowed. + if (!list.size()) + abort(); + return *(list.begin() + ConsumeIntegralInRange<size_t>(0, list.size() - 1)); } - // Return an enum value. The enum must start at 0 and be contiguous. It must + // Returns an enum value. The enum must start at 0 and be contiguous. It must // also contain |kMaxValue| aliased to its largest (inclusive) value. Such as: // enum class Foo { SomeValue, OtherValue, kMaxValue = OtherValue }; template <typename T> T ConsumeEnum() { @@ -183,10 +187,60 @@ public: 0, static_cast<uint32_t>(T::kMaxValue))); } + // Returns a floating point number in the range [0.0, 1.0]. If there's no + // input data left, always returns 0. + template <typename T> T ConsumeProbability() { + static_assert(std::is_floating_point<T>::value, + "A floating point type is required."); + + // Use different integral types for different floating point types in order + // to provide better density of the resulting values. + using IntegralType = + typename std::conditional<(sizeof(T) <= sizeof(uint32_t)), uint32_t, + uint64_t>::type; + + T result = static_cast<T>(ConsumeIntegral<IntegralType>()); + result /= static_cast<T>(std::numeric_limits<IntegralType>::max()); + return result; + } + + // Returns a floating point value in the range [Type's lowest, Type's max] by + // consuming bytes from the input data. If there's no input data left, always + // returns approximately 0. + template <typename T> T ConsumeFloatingPoint() { + return ConsumeFloatingPointInRange<T>(std::numeric_limits<T>::lowest(), + std::numeric_limits<T>::max()); + } + + // Returns a floating point value in the given range by consuming bytes from + // the input data. If there's no input data left, returns |min|. Note that + // |min| must be less than or equal to |max|. + template <typename T> T ConsumeFloatingPointInRange(T min, T max) { + if (min > max) + abort(); + + T range = .0; + T result = min; + constexpr T zero(.0); + if (max > zero && min < zero && max > min + std::numeric_limits<T>::max()) { + // The diff |max - min| would overflow the given floating point type. Use + // the half of the diff as the range and consume a bool to decide whether + // the result is in the first of the second part of the diff. + range = (max / 2.0) - (min / 2.0); + if (ConsumeBool()) { + result += range; + } + } else { + range = max - min; + } + + return result + range * ConsumeProbability<T>(); + } + // Reports the remaining bytes available for fuzzed input. size_t remaining_bytes() { return remaining_bytes_; } -private: + private: FuzzedDataProvider(const FuzzedDataProvider &) = delete; FuzzedDataProvider &operator=(const FuzzedDataProvider &) = delete; @@ -209,6 +263,12 @@ private: // which seems to be a natural choice for other implementations as well. // To increase the odds even more, we also call |shrink_to_fit| below. std::vector<T> result(size); + if (size == 0) { + if (num_bytes_to_consume != 0) + abort(); + return result; + } + std::memcpy(result.data(), data_ptr_, num_bytes_to_consume); Advance(num_bytes_to_consume); @@ -230,9 +290,9 @@ private: // Avoid using implementation-defined unsigned to signer conversions. // To learn more, see https://stackoverflow.com/questions/13150449. - if (value <= std::numeric_limits<TS>::max()) + if (value <= std::numeric_limits<TS>::max()) { return static_cast<TS>(value); - else { + } else { constexpr auto TS_min = std::numeric_limits<TS>::min(); return TS_min + static_cast<char>(value - TS_min); } diff --git a/src/test/fuzz/asmap.cpp b/src/test/fuzz/asmap.cpp new file mode 100644 index 0000000000..7f3eef79a1 --- /dev/null +++ b/src/test/fuzz/asmap.cpp @@ -0,0 +1,28 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <netaddress.h> +#include <test/fuzz/FuzzedDataProvider.h> +#include <test/fuzz/fuzz.h> + +#include <cstdint> +#include <vector> + +void test_one_input(const std::vector<uint8_t>& buffer) +{ + FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); + const Network network = fuzzed_data_provider.PickValueInArray({NET_IPV4, NET_IPV6}); + if (fuzzed_data_provider.remaining_bytes() < 16) { + return; + } + CNetAddr net_addr; + net_addr.SetRaw(network, fuzzed_data_provider.ConsumeBytes<uint8_t>(16).data()); + std::vector<bool> asmap; + for (const char cur_byte : fuzzed_data_provider.ConsumeRemainingBytes<char>()) { + for (int bit = 0; bit < 8; ++bit) { + asmap.push_back((cur_byte >> bit) & 1); + } + } + (void)net_addr.GetMappedAS(asmap); +} diff --git a/src/test/fuzz/strprintf.cpp b/src/test/fuzz/strprintf.cpp new file mode 100644 index 0000000000..0de21f0e7c --- /dev/null +++ b/src/test/fuzz/strprintf.cpp @@ -0,0 +1,147 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <test/fuzz/FuzzedDataProvider.h> +#include <test/fuzz/fuzz.h> +#include <tinyformat.h> +#include <util/strencodings.h> + +#include <algorithm> +#include <cassert> +#include <cstdint> +#include <string> +#include <vector> + +void test_one_input(const std::vector<uint8_t>& buffer) +{ + FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); + const std::string format_string = fuzzed_data_provider.ConsumeRandomLengthString(64); + + const int digits_in_format_specifier = std::count_if(format_string.begin(), format_string.end(), IsDigit); + + // Avoid triggering the following crash bug: + // * strprintf("%987654321000000:", 1); + // + // Avoid triggering the following OOM bug: + // * strprintf("%.222222200000000$", 1.1); + // + // Upstream bug report: https://github.com/c42f/tinyformat/issues/70 + if (format_string.find("%") != std::string::npos && digits_in_format_specifier >= 7) { + return; + } + + // Avoid triggering the following crash bug: + // * strprintf("%1$*1$*", -11111111); + // + // Upstream bug report: https://github.com/c42f/tinyformat/issues/70 + if (format_string.find("%") != std::string::npos && format_string.find("$") != std::string::npos && format_string.find("*") != std::string::npos && digits_in_format_specifier > 0) { + return; + } + + // Avoid triggering the following crash bug: + // * strprintf("%.1s", (char*)nullptr); + // + // (void)strprintf(format_string, (char*)nullptr); + // + // Upstream bug report: https://github.com/c42f/tinyformat/issues/70 + + try { + (void)strprintf(format_string, (signed char*)nullptr); + } catch (const tinyformat::format_error&) { + } + try { + (void)strprintf(format_string, (unsigned char*)nullptr); + } catch (const tinyformat::format_error&) { + } + try { + (void)strprintf(format_string, (void*)nullptr); + } catch (const tinyformat::format_error&) { + } + try { + (void)strprintf(format_string, (bool*)nullptr); + } catch (const tinyformat::format_error&) { + } + try { + (void)strprintf(format_string, (float*)nullptr); + } catch (const tinyformat::format_error&) { + } + try { + (void)strprintf(format_string, (double*)nullptr); + } catch (const tinyformat::format_error&) { + } + try { + (void)strprintf(format_string, (int16_t*)nullptr); + } catch (const tinyformat::format_error&) { + } + try { + (void)strprintf(format_string, (uint16_t*)nullptr); + } catch (const tinyformat::format_error&) { + } + try { + (void)strprintf(format_string, (int32_t*)nullptr); + } catch (const tinyformat::format_error&) { + } + try { + (void)strprintf(format_string, (uint32_t*)nullptr); + } catch (const tinyformat::format_error&) { + } + try { + (void)strprintf(format_string, (int64_t*)nullptr); + } catch (const tinyformat::format_error&) { + } + try { + (void)strprintf(format_string, (uint64_t*)nullptr); + } catch (const tinyformat::format_error&) { + } + + try { + switch (fuzzed_data_provider.ConsumeIntegralInRange(0, 13)) { + case 0: + (void)strprintf(format_string, fuzzed_data_provider.ConsumeRandomLengthString(32)); + break; + case 1: + (void)strprintf(format_string, fuzzed_data_provider.ConsumeRandomLengthString(32).c_str()); + break; + case 2: + (void)strprintf(format_string, fuzzed_data_provider.ConsumeIntegral<signed char>()); + break; + case 3: + (void)strprintf(format_string, fuzzed_data_provider.ConsumeIntegral<unsigned char>()); + break; + case 4: + (void)strprintf(format_string, fuzzed_data_provider.ConsumeIntegral<char>()); + break; + case 5: + (void)strprintf(format_string, fuzzed_data_provider.ConsumeBool()); + break; + case 6: + (void)strprintf(format_string, fuzzed_data_provider.ConsumeFloatingPoint<float>()); + break; + case 7: + (void)strprintf(format_string, fuzzed_data_provider.ConsumeFloatingPoint<double>()); + break; + case 8: + (void)strprintf(format_string, fuzzed_data_provider.ConsumeIntegral<int16_t>()); + break; + case 9: + (void)strprintf(format_string, fuzzed_data_provider.ConsumeIntegral<uint16_t>()); + break; + case 10: + (void)strprintf(format_string, fuzzed_data_provider.ConsumeIntegral<int32_t>()); + break; + case 11: + (void)strprintf(format_string, fuzzed_data_provider.ConsumeIntegral<uint32_t>()); + break; + case 12: + (void)strprintf(format_string, fuzzed_data_provider.ConsumeIntegral<int64_t>()); + break; + case 13: + (void)strprintf(format_string, fuzzed_data_provider.ConsumeIntegral<uint64_t>()); + break; + default: + assert(false); + } + } catch (const tinyformat::format_error&) { + } +} diff --git a/src/test/getarg_tests.cpp b/src/test/getarg_tests.cpp index 4c64d8c833..10fb05ca8a 100644 --- a/src/test/getarg_tests.cpp +++ b/src/test/getarg_tests.cpp @@ -183,4 +183,32 @@ BOOST_AUTO_TEST_CASE(boolargno) BOOST_CHECK(gArgs.GetBoolArg("-foo", false)); } +BOOST_AUTO_TEST_CASE(logargs) +{ + const auto okaylog_bool = std::make_pair("-okaylog-bool", ArgsManager::ALLOW_BOOL); + const auto okaylog_negbool = std::make_pair("-okaylog-negbool", ArgsManager::ALLOW_BOOL); + const auto okaylog = std::make_pair("-okaylog", ArgsManager::ALLOW_ANY); + const auto dontlog = std::make_pair("-dontlog", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE); + SetupArgs({okaylog_bool, okaylog_negbool, okaylog, dontlog}); + ResetArgs("-okaylog-bool -nookaylog-negbool -okaylog=public -dontlog=private"); + + // Everything logged to debug.log will also append to str + std::string str; + auto print_connection = LogInstance().PushBackCallback( + [&str](const std::string& s) { + str += s; + }); + + // Log the arguments + gArgs.LogArgs(); + + LogInstance().DeleteCallback(print_connection); + // Check that what should appear does, and what shouldn't doesn't. + BOOST_CHECK(str.find("Command-line arg: okaylog-bool=\"\"") != std::string::npos); + BOOST_CHECK(str.find("Command-line arg: okaylog-negbool=false") != std::string::npos); + BOOST_CHECK(str.find("Command-line arg: okaylog=\"public\"") != std::string::npos); + BOOST_CHECK(str.find("dontlog=****") != std::string::npos); + BOOST_CHECK(str.find("private") == std::string::npos); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp index 303bb9b88c..ea600499ca 100644 --- a/src/test/serialize_tests.cpp +++ b/src/test/serialize_tests.cpp @@ -182,8 +182,8 @@ BOOST_AUTO_TEST_CASE(varints) CDataStream ss(SER_DISK, 0); CDataStream::size_type size = 0; for (int i = 0; i < 100000; i++) { - ss << VARINT(i, VarIntMode::NONNEGATIVE_SIGNED); - size += ::GetSerializeSize(VARINT(i, VarIntMode::NONNEGATIVE_SIGNED), 0); + ss << VARINT_MODE(i, VarIntMode::NONNEGATIVE_SIGNED); + size += ::GetSerializeSize(VARINT_MODE(i, VarIntMode::NONNEGATIVE_SIGNED), 0); BOOST_CHECK(size == ss.size()); } @@ -196,7 +196,7 @@ BOOST_AUTO_TEST_CASE(varints) // decode for (int i = 0; i < 100000; i++) { int j = -1; - ss >> VARINT(j, VarIntMode::NONNEGATIVE_SIGNED); + ss >> VARINT_MODE(j, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_MESSAGE(i == j, "decoded:" << j << " expected:" << i); } @@ -210,21 +210,21 @@ BOOST_AUTO_TEST_CASE(varints) BOOST_AUTO_TEST_CASE(varints_bitpatterns) { CDataStream ss(SER_DISK, 0); - ss << VARINT(0, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "00"); ss.clear(); - ss << VARINT(0x7f, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "7f"); ss.clear(); - ss << VARINT((int8_t)0x7f, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "7f"); ss.clear(); - ss << VARINT(0x80, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "8000"); ss.clear(); + ss << VARINT_MODE(0, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "00"); ss.clear(); + ss << VARINT_MODE(0x7f, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "7f"); ss.clear(); + ss << VARINT_MODE((int8_t)0x7f, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "7f"); ss.clear(); + ss << VARINT_MODE(0x80, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "8000"); ss.clear(); ss << VARINT((uint8_t)0x80); BOOST_CHECK_EQUAL(HexStr(ss), "8000"); ss.clear(); - ss << VARINT(0x1234, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "a334"); ss.clear(); - ss << VARINT((int16_t)0x1234, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "a334"); ss.clear(); - ss << VARINT(0xffff, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "82fe7f"); ss.clear(); + ss << VARINT_MODE(0x1234, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "a334"); ss.clear(); + ss << VARINT_MODE((int16_t)0x1234, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "a334"); ss.clear(); + ss << VARINT_MODE(0xffff, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "82fe7f"); ss.clear(); ss << VARINT((uint16_t)0xffff); BOOST_CHECK_EQUAL(HexStr(ss), "82fe7f"); ss.clear(); - ss << VARINT(0x123456, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "c7e756"); ss.clear(); - ss << VARINT((int32_t)0x123456, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "c7e756"); ss.clear(); + ss << VARINT_MODE(0x123456, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "c7e756"); ss.clear(); + ss << VARINT_MODE((int32_t)0x123456, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "c7e756"); ss.clear(); ss << VARINT(0x80123456U); BOOST_CHECK_EQUAL(HexStr(ss), "86ffc7e756"); ss.clear(); ss << VARINT((uint32_t)0x80123456U); BOOST_CHECK_EQUAL(HexStr(ss), "86ffc7e756"); ss.clear(); ss << VARINT(0xffffffff); BOOST_CHECK_EQUAL(HexStr(ss), "8efefefe7f"); ss.clear(); - ss << VARINT(0x7fffffffffffffffLL, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "fefefefefefefefe7f"); ss.clear(); + ss << VARINT_MODE(0x7fffffffffffffffLL, VarIntMode::NONNEGATIVE_SIGNED); BOOST_CHECK_EQUAL(HexStr(ss), "fefefefefefefefe7f"); ss.clear(); ss << VARINT(0xffffffffffffffffULL); BOOST_CHECK_EQUAL(HexStr(ss), "80fefefefefefefefe7f"); ss.clear(); } diff --git a/src/test/sighash_tests.cpp b/src/test/sighash_tests.cpp index 2c56bbdbb0..bcc4a46873 100644 --- a/src/test/sighash_tests.cpp +++ b/src/test/sighash_tests.cpp @@ -26,10 +26,9 @@ extern UniValue read_json(const std::string& jsondata); // Old script.cpp SignatureHash function uint256 static SignatureHashOld(CScript scriptCode, const CTransaction& txTo, unsigned int nIn, int nHashType) { - static const uint256 one(uint256S("0000000000000000000000000000000000000000000000000000000000000001")); if (nIn >= txTo.vin.size()) { - return one; + return UINT256_ONE(); } CMutableTransaction txTmp(txTo); @@ -59,7 +58,7 @@ uint256 static SignatureHashOld(CScript scriptCode, const CTransaction& txTo, un unsigned int nOut = nIn; if (nOut >= txTmp.vout.size()) { - return one; + return UINT256_ONE(); } txTmp.vout.resize(nOut+1); for (unsigned int i = 0; i < nOut; i++) diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp index 0939803953..0352d2d95a 100644 --- a/src/test/transaction_tests.cpp +++ b/src/test/transaction_tests.cpp @@ -821,9 +821,29 @@ BOOST_AUTO_TEST_CASE(test_IsStandard) BOOST_CHECK(!IsStandardTx(CTransaction(t), reason)); BOOST_CHECK_EQUAL(reason, "scriptsig-size"); + // Check tx-size (non-standard if transaction weight is > MAX_STANDARD_TX_WEIGHT) + t.vin.clear(); + t.vin.resize(2438); // size per input (empty scriptSig): 41 bytes + t.vout[0].scriptPubKey = CScript() << OP_RETURN << std::vector<unsigned char>(19, 0); // output size: 30 bytes + // tx header: 12 bytes => 48 vbytes + // 2438 inputs: 2438*41 = 99958 bytes => 399832 vbytes + // 1 output: 30 bytes => 120 vbytes + // =============================== + // total: 400000 vbytes + BOOST_CHECK_EQUAL(GetTransactionWeight(CTransaction(t)), 400000); + BOOST_CHECK(IsStandardTx(CTransaction(t), reason)); + + // increase output size by one byte, so we end up with 400004 vbytes + t.vout[0].scriptPubKey = CScript() << OP_RETURN << std::vector<unsigned char>(20, 0); // output size: 31 bytes + BOOST_CHECK_EQUAL(GetTransactionWeight(CTransaction(t)), 400004); + reason.clear(); + BOOST_CHECK(!IsStandardTx(CTransaction(t), reason)); + BOOST_CHECK_EQUAL(reason, "tx-size"); + // Check bare multisig (standard if policy flag fIsBareMultisigStd is set) fIsBareMultisigStd = true; t.vout[0].scriptPubKey = GetScriptForMultisig(1, {key.GetPubKey()}); // simple 1-of-1 + t.vin.resize(1); t.vin[0].scriptSig = CScript() << std::vector<unsigned char>(65, 0); BOOST_CHECK(IsStandardTx(CTransaction(t), reason)); @@ -831,6 +851,7 @@ BOOST_AUTO_TEST_CASE(test_IsStandard) reason.clear(); BOOST_CHECK(!IsStandardTx(CTransaction(t), reason)); BOOST_CHECK_EQUAL(reason, "bare-multisig"); + fIsBareMultisigStd = DEFAULT_PERMIT_BAREMULTISIG; } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/util/wallet.cpp b/src/test/util/wallet.cpp index 226d2df6e4..fd6012e9fe 100644 --- a/src/test/util/wallet.cpp +++ b/src/test/util/wallet.cpp @@ -27,8 +27,7 @@ std::string getnewaddress(CWallet& w) void importaddress(CWallet& wallet, const std::string& address) { auto spk_man = wallet.GetLegacyScriptPubKeyMan(); - LOCK(wallet.cs_wallet); - AssertLockHeld(spk_man->cs_wallet); + LOCK2(wallet.cs_wallet, spk_man->cs_KeyStore); const auto dest = DecodeDestination(address); assert(IsValidDestination(dest)); const auto script = GetScriptForDestination(dest); diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp index 6f0e464891..42c2c50fa5 100644 --- a/src/test/util_tests.cpp +++ b/src/test/util_tests.cpp @@ -340,6 +340,27 @@ BOOST_AUTO_TEST_CASE(util_ParseParameters) BOOST_CHECK(testArgs.GetArgs("-ccc").size() == 2); } +BOOST_AUTO_TEST_CASE(util_ParseInvalidParameters) +{ + TestArgsManager test; + test.SetupArgs({{"-registered", ArgsManager::ALLOW_ANY}}); + + const char* argv[] = {"ignored", "-registered"}; + std::string error; + BOOST_CHECK(test.ParseParameters(2, (char**)argv, error)); + BOOST_CHECK_EQUAL(error, ""); + + argv[1] = "-unregistered"; + BOOST_CHECK(!test.ParseParameters(2, (char**)argv, error)); + BOOST_CHECK_EQUAL(error, "Invalid parameter -unregistered"); + + // Make sure registered parameters prefixed with a chain name trigger errors. + // (Previously, they were accepted and ignored.) + argv[1] = "-test.registered"; + BOOST_CHECK(!test.ParseParameters(2, (char**)argv, error)); + BOOST_CHECK_EQUAL(error, "Invalid parameter -test.registered"); +} + static void TestParse(const std::string& str, bool expected_bool, int64_t expected_int) { TestArgsManager test; @@ -835,7 +856,8 @@ struct ArgsMergeTestingSetup : public BasicTestingSetup { void ForEachMergeSetup(Fn&& fn) { ActionList arg_actions = {}; - ForEachNoDup(arg_actions, SET, SECTION_NEGATE, [&] { + // command_line_options do not have sections. Only iterate over SET and NEGATE + ForEachNoDup(arg_actions, SET, NEGATE, [&] { ActionList conf_actions = {}; ForEachNoDup(conf_actions, SET, SECTION_NEGATE, [&] { for (bool soft_set : {false, true}) { @@ -995,7 +1017,7 @@ BOOST_FIXTURE_TEST_CASE(util_ArgsMerge, ArgsMergeTestingSetup) // Results file is formatted like: // // <input> || <IsArgSet/IsArgNegated/GetArg output> | <GetArgs output> | <GetUnsuitable output> - BOOST_CHECK_EQUAL(out_sha_hex, "b835eef5977d69114eb039a976201f8c7121f34fe2b7ea2b73cafb516e5c9dc8"); + BOOST_CHECK_EQUAL(out_sha_hex, "8fd4877bb8bf337badca950ede6c917441901962f160e52514e06a60dea46cde"); } // Similar test as above, but for ArgsManager::GetChainName function. diff --git a/src/txdb.cpp b/src/txdb.cpp index 35bbdab00d..acc47ab45e 100644 --- a/src/txdb.cpp +++ b/src/txdb.cpp @@ -339,7 +339,7 @@ public: ::Unserialize(s, Using<TxOutCompression>(vout[i])); } // coinbase height - ::Unserialize(s, VARINT(nHeight, VarIntMode::NONNEGATIVE_SIGNED)); + ::Unserialize(s, VARINT_MODE(nHeight, VarIntMode::NONNEGATIVE_SIGNED)); } }; diff --git a/src/txmempool.cpp b/src/txmempool.cpp index 441255182e..5768219f3a 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -23,7 +23,7 @@ CTxMemPoolEntry::CTxMemPoolEntry(const CTransactionRef& _tx, const CAmount& _nFe int64_t _nTime, unsigned int _entryHeight, bool _spendsCoinbase, int64_t _sigOpsCost, LockPoints lp) : tx(_tx), nFee(_nFee), nTxWeight(GetTransactionWeight(*tx)), nUsageSize(RecursiveDynamicUsage(tx)), nTime(_nTime), entryHeight(_entryHeight), - spendsCoinbase(_spendsCoinbase), sigOpCost(_sigOpsCost), lockPoints(lp) + spendsCoinbase(_spendsCoinbase), sigOpCost(_sigOpsCost), lockPoints(lp), m_epoch(0) { nCountWithDescendants = 1; nSizeWithDescendants = GetTxSize(); @@ -122,8 +122,6 @@ void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashes // setMemPoolChildren will be updated, an assumption made in // UpdateForDescendants. for (const uint256 &hash : reverse_iterate(vHashesToUpdate)) { - // we cache the in-mempool children to avoid duplicate updates - setEntries setChildren; // calculate children from mapNextTx txiter it = mapTx.find(hash); if (it == mapTx.end()) { @@ -132,17 +130,21 @@ void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashes auto iter = mapNextTx.lower_bound(COutPoint(hash, 0)); // First calculate the children, and update setMemPoolChildren to // include them, and update their setMemPoolParents to include this tx. - for (; iter != mapNextTx.end() && iter->first->hash == hash; ++iter) { - const uint256 &childHash = iter->second->GetHash(); - txiter childIter = mapTx.find(childHash); - assert(childIter != mapTx.end()); - // We can skip updating entries we've encountered before or that - // are in the block (which are already accounted for). - if (setChildren.insert(childIter).second && !setAlreadyIncluded.count(childHash)) { - UpdateChild(it, childIter, true); - UpdateParent(childIter, it, true); + // we cache the in-mempool children to avoid duplicate updates + { + const auto epoch = GetFreshEpoch(); + for (; iter != mapNextTx.end() && iter->first->hash == hash; ++iter) { + const uint256 &childHash = iter->second->GetHash(); + txiter childIter = mapTx.find(childHash); + assert(childIter != mapTx.end()); + // We can skip updating entries we've encountered before or that + // are in the block (which are already accounted for). + if (!visited(childIter) && !setAlreadyIncluded.count(childHash)) { + UpdateChild(it, childIter, true); + UpdateParent(childIter, it, true); + } } - } + } // release epoch guard for UpdateForDescendants UpdateForDescendants(it, mapMemPoolDescendantsToUpdate, setAlreadyIncluded); } } @@ -325,7 +327,7 @@ void CTxMemPoolEntry::UpdateAncestorState(int64_t modifySize, CAmount modifyFee, } CTxMemPool::CTxMemPool(CBlockPolicyEstimator* estimator) - : nTransactionsUpdated(0), minerPolicyEstimator(estimator) + : nTransactionsUpdated(0), minerPolicyEstimator(estimator), m_epoch(0), m_has_epoch_guard(false) { _clear(); //lock free clear @@ -1105,4 +1107,23 @@ void CTxMemPool::SetIsLoaded(bool loaded) m_is_loaded = loaded; } + +CTxMemPool::EpochGuard CTxMemPool::GetFreshEpoch() const +{ + return EpochGuard(*this); +} +CTxMemPool::EpochGuard::EpochGuard(const CTxMemPool& in) : pool(in) +{ + assert(!pool.m_has_epoch_guard); + ++pool.m_epoch; + pool.m_has_epoch_guard = true; +} + +CTxMemPool::EpochGuard::~EpochGuard() +{ + // prevents stale results being used + ++pool.m_epoch; + pool.m_has_epoch_guard = false; +} + SaltedTxidHasher::SaltedTxidHasher() : k0(GetRand(std::numeric_limits<uint64_t>::max())), k1(GetRand(std::numeric_limits<uint64_t>::max())) {} diff --git a/src/txmempool.h b/src/txmempool.h index 01db59e859..de11d626b4 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -129,6 +129,7 @@ public: int64_t GetSigOpCostWithAncestors() const { return nSigOpCostWithAncestors; } mutable size_t vTxHashesIdx; //!< Index in mempool's vTxHashes + mutable uint64_t m_epoch; //!< epoch when last touched, useful for graph algorithms }; // Helpers for modifying CTxMemPool::mapTx, which is a boost multi_index. @@ -453,6 +454,8 @@ private: mutable int64_t lastRollingFeeUpdate; mutable bool blockSinceLastRollingFeeBump; mutable double rollingMinimumFeeRate; //!< minimum fee to get into the pool, decreases exponentially + mutable uint64_t m_epoch; + mutable bool m_has_epoch_guard; void trackPackageRemoved(const CFeeRate& rate) EXCLUSIVE_LOCKS_REQUIRED(cs); @@ -736,6 +739,55 @@ private: * removal. */ void removeUnchecked(txiter entry, MemPoolRemovalReason reason) EXCLUSIVE_LOCKS_REQUIRED(cs); +public: + /** EpochGuard: RAII-style guard for using epoch-based graph traversal algorithms. + * When walking ancestors or descendants, we generally want to avoid + * visiting the same transactions twice. Some traversal algorithms use + * std::set (or setEntries) to deduplicate the transaction we visit. + * However, use of std::set is algorithmically undesirable because it both + * adds an asymptotic factor of O(log n) to traverals cost and triggers O(n) + * more dynamic memory allocations. + * In many algorithms we can replace std::set with an internal mempool + * counter to track the time (or, "epoch") that we began a traversal, and + * check + update a per-transaction epoch for each transaction we look at to + * determine if that transaction has not yet been visited during the current + * traversal's epoch. + * Algorithms using std::set can be replaced on a one by one basis. + * Both techniques are not fundamentally incomaptible across the codebase. + * Generally speaking, however, the remaining use of std::set for mempool + * traversal should be viewed as a TODO for replacement with an epoch based + * traversal, rather than a preference for std::set over epochs in that + * algorithm. + */ + class EpochGuard { + const CTxMemPool& pool; + public: + EpochGuard(const CTxMemPool& in); + ~EpochGuard(); + }; + // N.B. GetFreshEpoch modifies mutable state via the EpochGuard construction + // (and later destruction) + EpochGuard GetFreshEpoch() const EXCLUSIVE_LOCKS_REQUIRED(cs); + + /** visited marks a CTxMemPoolEntry as having been traversed + * during the lifetime of the most recently created EpochGuard + * and returns false if we are the first visitor, true otherwise. + * + * An EpochGuard must be held when visited is called or an assert will be + * triggered. + * + */ + bool visited(txiter it) const EXCLUSIVE_LOCKS_REQUIRED(cs) { + assert(m_has_epoch_guard); + bool ret = it->m_epoch >= m_epoch; + it->m_epoch = std::max(it->m_epoch, m_epoch); + return ret; + } + + bool visited(Optional<txiter> it) const EXCLUSIVE_LOCKS_REQUIRED(cs) { + assert(m_has_epoch_guard); + return !it || visited(*it); + } }; /** diff --git a/src/uint256.cpp b/src/uint256.cpp index 6398d6326f..a943e71062 100644 --- a/src/uint256.cpp +++ b/src/uint256.cpp @@ -75,3 +75,8 @@ template std::string base_blob<256>::GetHex() const; template std::string base_blob<256>::ToString() const; template void base_blob<256>::SetHex(const char*); template void base_blob<256>::SetHex(const std::string&); + +uint256& UINT256_ONE() { + static uint256* one = new uint256(uint256S("0000000000000000000000000000000000000000000000000000000000000001")); + return *one; +} diff --git a/src/uint256.h b/src/uint256.h index ff0b74e117..b36598f572 100644 --- a/src/uint256.h +++ b/src/uint256.h @@ -144,4 +144,6 @@ inline uint256 uint256S(const std::string& str) return rv; } +uint256& UINT256_ONE(); + #endif // BITCOIN_UINT256_H diff --git a/src/undo.h b/src/undo.h index 2009c721ab..47f132c7d8 100644 --- a/src/undo.h +++ b/src/undo.h @@ -13,58 +13,42 @@ #include <serialize.h> #include <version.h> -/** Undo information for a CTxIn +/** Formatter for undo information for a CTxIn * * Contains the prevout's CTxOut being spent, and its metadata as well * (coinbase or not, height). The serialization contains a dummy value of * zero. This is compatible with older versions which expect to see * the transaction version there. */ -class TxInUndoSerializer +struct TxInUndoFormatter { - const Coin* txout; - -public: template<typename Stream> - void Serialize(Stream &s) const { - ::Serialize(s, VARINT(txout->nHeight * 2 + (txout->fCoinBase ? 1u : 0u))); - if (txout->nHeight > 0) { + void Ser(Stream &s, const Coin& txout) { + ::Serialize(s, VARINT(txout.nHeight * 2 + (txout.fCoinBase ? 1u : 0u))); + if (txout.nHeight > 0) { // Required to maintain compatibility with older undo format. ::Serialize(s, (unsigned char)0); } - ::Serialize(s, Using<TxOutCompression>(REF(txout->out))); + ::Serialize(s, Using<TxOutCompression>(txout.out)); } - explicit TxInUndoSerializer(const Coin* coin) : txout(coin) {} -}; - -class TxInUndoDeserializer -{ - Coin* txout; - -public: template<typename Stream> - void Unserialize(Stream &s) { + void Unser(Stream &s, Coin& txout) { unsigned int nCode = 0; ::Unserialize(s, VARINT(nCode)); - txout->nHeight = nCode / 2; - txout->fCoinBase = nCode & 1; - if (txout->nHeight > 0) { + txout.nHeight = nCode / 2; + txout.fCoinBase = nCode & 1; + if (txout.nHeight > 0) { // Old versions stored the version number for the last spend of // a transaction's outputs. Non-final spends were indicated with // height = 0. unsigned int nVersionDummy; ::Unserialize(s, VARINT(nVersionDummy)); } - ::Unserialize(s, Using<TxOutCompression>(REF(txout->out))); + ::Unserialize(s, Using<TxOutCompression>(txout.out)); } - - explicit TxInUndoDeserializer(Coin* coin) : txout(coin) {} }; -static const size_t MIN_TRANSACTION_INPUT_WEIGHT = WITNESS_SCALE_FACTOR * ::GetSerializeSize(CTxIn(), PROTOCOL_VERSION); -static const size_t MAX_INPUTS_PER_BLOCK = MAX_BLOCK_WEIGHT / MIN_TRANSACTION_INPUT_WEIGHT; - /** Undo information for a CTransaction */ class CTxUndo { @@ -72,29 +56,7 @@ public: // undo information for all txins std::vector<Coin> vprevout; - template <typename Stream> - void Serialize(Stream& s) const { - // TODO: avoid reimplementing vector serializer - uint64_t count = vprevout.size(); - ::Serialize(s, COMPACTSIZE(REF(count))); - for (const auto& prevout : vprevout) { - ::Serialize(s, TxInUndoSerializer(&prevout)); - } - } - - template <typename Stream> - void Unserialize(Stream& s) { - // TODO: avoid reimplementing vector deserializer - uint64_t count = 0; - ::Unserialize(s, COMPACTSIZE(count)); - if (count > MAX_INPUTS_PER_BLOCK) { - throw std::ios_base::failure("Too many input undo records"); - } - vprevout.resize(count); - for (auto& prevout : vprevout) { - ::Unserialize(s, TxInUndoDeserializer(&prevout)); - } - } + SERIALIZE_METHODS(CTxUndo, obj) { READWRITE(Using<VectorFormatter<TxInUndoFormatter>>(obj.vprevout)); } }; /** Undo information for a CBlock */ @@ -103,12 +65,7 @@ class CBlockUndo public: std::vector<CTxUndo> vtxundo; // for all but the coinbase - ADD_SERIALIZE_METHODS; - - template <typename Stream, typename Operation> - inline void SerializationOp(Stream& s, Operation ser_action) { - READWRITE(vtxundo); - } + SERIALIZE_METHODS(CBlockUndo, obj) { READWRITE(obj.vtxundo); } }; #endif // BITCOIN_UNDO_H diff --git a/src/univalue/Makefile.am b/src/univalue/Makefile.am index e283fc890e..0f5ba59954 100644 --- a/src/univalue/Makefile.am +++ b/src/univalue/Makefile.am @@ -95,6 +95,7 @@ TEST_FILES = \ $(TEST_DATA_DIR)/fail41.json \ $(TEST_DATA_DIR)/fail42.json \ $(TEST_DATA_DIR)/fail44.json \ + $(TEST_DATA_DIR)/fail45.json \ $(TEST_DATA_DIR)/fail3.json \ $(TEST_DATA_DIR)/fail4.json \ $(TEST_DATA_DIR)/fail5.json \ @@ -105,6 +106,7 @@ TEST_FILES = \ $(TEST_DATA_DIR)/pass1.json \ $(TEST_DATA_DIR)/pass2.json \ $(TEST_DATA_DIR)/pass3.json \ + $(TEST_DATA_DIR)/pass4.json \ $(TEST_DATA_DIR)/round1.json \ $(TEST_DATA_DIR)/round2.json \ $(TEST_DATA_DIR)/round3.json \ diff --git a/src/univalue/lib/univalue_read.cpp b/src/univalue/lib/univalue_read.cpp index 14834db24d..5c6a1acf75 100644 --- a/src/univalue/lib/univalue_read.cpp +++ b/src/univalue/lib/univalue_read.cpp @@ -8,6 +8,14 @@ #include "univalue.h" #include "univalue_utffilter.h" +/* + * According to stackexchange, the original json test suite wanted + * to limit depth to 22. Widely-deployed PHP bails at depth 512, + * so we will follow PHP's lead, which should be more than sufficient + * (further stackexchange comments indicate depth > 32 rarely occurs). + */ +static const size_t MAX_JSON_DEPTH = 512; + static bool json_isdigit(int ch) { return ((ch >= '0') && (ch <= '9')); @@ -323,6 +331,9 @@ bool UniValue::read(const char *raw, size_t size) stack.push_back(newTop); } + if (stack.size() > MAX_JSON_DEPTH) + return false; + if (utyp == VOBJ) setExpect(OBJ_NAME); else diff --git a/src/univalue/test/fail45.json b/src/univalue/test/fail45.json new file mode 100644 index 0000000000..03a30d8800 --- /dev/null +++ b/src/univalue/test/fail45.json @@ -0,0 +1 @@ +[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]] diff --git a/src/univalue/test/pass4.json b/src/univalue/test/pass4.json new file mode 100644 index 0000000000..f5a680b31c --- /dev/null +++ b/src/univalue/test/pass4.json @@ -0,0 +1 @@ +[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]] diff --git a/src/univalue/test/unitester.cpp b/src/univalue/test/unitester.cpp index 75c0dc225a..2308afbcdf 100644 --- a/src/univalue/test/unitester.cpp +++ b/src/univalue/test/unitester.cpp @@ -114,6 +114,7 @@ static const char *filenames[] = { "fail41.json", // invalid unicode: unfinished UTF-8 "fail42.json", // valid json with garbage following a nul byte "fail44.json", // unterminated string + "fail45.json", // nested beyond max depth "fail3.json", "fail4.json", // extra comma "fail5.json", @@ -124,6 +125,7 @@ static const char *filenames[] = { "pass1.json", "pass2.json", "pass3.json", + "pass4.json", "round1.json", // round-trip test "round2.json", // unicode "round3.json", // bare string diff --git a/src/util/asmap.cpp b/src/util/asmap.cpp index ac230e9ee5..60bd27bf90 100644 --- a/src/util/asmap.cpp +++ b/src/util/asmap.cpp @@ -8,13 +8,14 @@ namespace { -uint32_t DecodeBits(std::vector<bool>::const_iterator& bitpos, uint8_t minval, const std::vector<uint8_t> &bit_sizes) +uint32_t DecodeBits(std::vector<bool>::const_iterator& bitpos, const std::vector<bool>::const_iterator& endpos, uint8_t minval, const std::vector<uint8_t> &bit_sizes) { uint32_t val = minval; bool bit; for (std::vector<uint8_t>::const_iterator bit_sizes_it = bit_sizes.begin(); bit_sizes_it != bit_sizes.end(); ++bit_sizes_it) { if (bit_sizes_it + 1 != bit_sizes.end()) { + if (bitpos == endpos) break; bit = *bitpos; bitpos++; } else { @@ -24,6 +25,7 @@ uint32_t DecodeBits(std::vector<bool>::const_iterator& bitpos, uint8_t minval, c val += (1 << *bit_sizes_it); } else { for (int b = 0; b < *bit_sizes_it; b++) { + if (bitpos == endpos) break; bit = *bitpos; bitpos++; val += bit << (*bit_sizes_it - 1 - b); @@ -35,29 +37,29 @@ uint32_t DecodeBits(std::vector<bool>::const_iterator& bitpos, uint8_t minval, c } const std::vector<uint8_t> TYPE_BIT_SIZES{0, 0, 1}; -uint32_t DecodeType(std::vector<bool>::const_iterator& bitpos) +uint32_t DecodeType(std::vector<bool>::const_iterator& bitpos, const std::vector<bool>::const_iterator& endpos) { - return DecodeBits(bitpos, 0, TYPE_BIT_SIZES); + return DecodeBits(bitpos, endpos, 0, TYPE_BIT_SIZES); } const std::vector<uint8_t> ASN_BIT_SIZES{15, 16, 17, 18, 19, 20, 21, 22, 23, 24}; -uint32_t DecodeASN(std::vector<bool>::const_iterator& bitpos) +uint32_t DecodeASN(std::vector<bool>::const_iterator& bitpos, const std::vector<bool>::const_iterator& endpos) { - return DecodeBits(bitpos, 1, ASN_BIT_SIZES); + return DecodeBits(bitpos, endpos, 1, ASN_BIT_SIZES); } const std::vector<uint8_t> MATCH_BIT_SIZES{1, 2, 3, 4, 5, 6, 7, 8}; -uint32_t DecodeMatch(std::vector<bool>::const_iterator& bitpos) +uint32_t DecodeMatch(std::vector<bool>::const_iterator& bitpos, const std::vector<bool>::const_iterator& endpos) { - return DecodeBits(bitpos, 2, MATCH_BIT_SIZES); + return DecodeBits(bitpos, endpos, 2, MATCH_BIT_SIZES); } const std::vector<uint8_t> JUMP_BIT_SIZES{5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30}; -uint32_t DecodeJump(std::vector<bool>::const_iterator& bitpos) +uint32_t DecodeJump(std::vector<bool>::const_iterator& bitpos, const std::vector<bool>::const_iterator& endpos) { - return DecodeBits(bitpos, 17, JUMP_BIT_SIZES); + return DecodeBits(bitpos, endpos, 17, JUMP_BIT_SIZES); } } @@ -65,33 +67,37 @@ uint32_t DecodeJump(std::vector<bool>::const_iterator& bitpos) uint32_t Interpret(const std::vector<bool> &asmap, const std::vector<bool> &ip) { std::vector<bool>::const_iterator pos = asmap.begin(); + const std::vector<bool>::const_iterator endpos = asmap.end(); uint8_t bits = ip.size(); - uint8_t default_asn = 0; + uint32_t default_asn = 0; uint32_t opcode, jump, match, matchlen; - while (1) { - assert(pos != asmap.end()); - opcode = DecodeType(pos); + while (pos != endpos) { + opcode = DecodeType(pos, endpos); if (opcode == 0) { - return DecodeASN(pos); + return DecodeASN(pos, endpos); } else if (opcode == 1) { - jump = DecodeJump(pos); + jump = DecodeJump(pos, endpos); + if (bits == 0) break; if (ip[ip.size() - bits]) { + if (jump >= endpos - pos) break; pos += jump; } bits--; } else if (opcode == 2) { - match = DecodeMatch(pos); + match = DecodeMatch(pos, endpos); matchlen = CountBits(match) - 1; for (uint32_t bit = 0; bit < matchlen; bit++) { + if (bits == 0) break; if ((ip[ip.size() - bits]) != ((match >> (matchlen - 1 - bit)) & 1)) { return default_asn; } bits--; } } else if (opcode == 3) { - default_asn = DecodeASN(pos); + default_asn = DecodeASN(pos, endpos); } else { - assert(0); + break; } } + return 0; // 0 is not a valid ASN } diff --git a/src/util/system.cpp b/src/util/system.cpp index 588ddc1fcf..b0a538b527 100644 --- a/src/util/system.cpp +++ b/src/util/system.cpp @@ -63,6 +63,7 @@ #include <malloc.h> #endif +#include <boost/algorithm/string/replace.hpp> #include <thread> #include <typeinfo> #include <univalue.h> @@ -312,21 +313,18 @@ bool ArgsManager::ParseParameters(int argc, const char* const argv[], std::strin std::string section; util::SettingsValue value = InterpretOption(section, key, val); Optional<unsigned int> flags = GetArgFlags('-' + key); - if (flags) { - if (!CheckValid(key, value, *flags, error)) { - return false; - } - // Weird behavior preserved for backwards compatibility: command - // line options with section prefixes are allowed but ignored. It - // would be better if these options triggered the Invalid parameter - // error below. - if (section.empty()) { - m_settings.command_line_options[key].push_back(value); - } - } else { - error = strprintf("Invalid parameter -%s", key); + + // Unknown command line options and command line options with dot + // characters (which are returned from InterpretOption with nonempty + // section strings) are not valid. + if (!flags || !section.empty()) { + error = strprintf("Invalid parameter %s", argv[i]); return false; } + + if (!CheckValid(key, value, *flags, error)) return false; + + m_settings.command_line_options[key].push_back(value); } // we do not allow -includeconf from command line @@ -864,6 +862,32 @@ std::vector<util::SettingsValue> ArgsManager::GetSettingsList(const std::string& return util::GetSettingsList(m_settings, m_network, SettingName(arg), !UseDefaultSection(arg)); } +void ArgsManager::logArgsPrefix( + const std::string& prefix, + const std::string& section, + const std::map<std::string, std::vector<util::SettingsValue>>& args) const +{ + std::string section_str = section.empty() ? "" : "[" + section + "] "; + for (const auto& arg : args) { + for (const auto& value : arg.second) { + Optional<unsigned int> flags = GetArgFlags('-' + arg.first); + if (flags) { + std::string value_str = (*flags & SENSITIVE) ? "****" : value.write(); + LogPrintf("%s %s%s=%s\n", prefix, section_str, arg.first, value_str); + } + } + } +} + +void ArgsManager::LogArgs() const +{ + LOCK(cs_args); + for (const auto& section : m_settings.ro_config) { + logArgsPrefix("Config file arg:", section.first, section.second); + } + logArgsPrefix("Command-line arg:", "", m_settings.command_line_options); +} + bool RenameOver(fs::path src, fs::path dest) { #ifdef WIN32 @@ -1024,6 +1048,15 @@ fs::path GetSpecialFolderPath(int nFolder, bool fCreate) } #endif +#ifndef WIN32 +std::string ShellEscape(const std::string& arg) +{ + std::string escaped = arg; + boost::replace_all(escaped, "'", "'\"'\"'"); + return "'" + escaped + "'"; +} +#endif + #if HAVE_SYSTEM void runCommand(const std::string& strCommand) { diff --git a/src/util/system.h b/src/util/system.h index 473019bbed..3138522b5c 100644 --- a/src/util/system.h +++ b/src/util/system.h @@ -81,6 +81,9 @@ fs::path GetConfigFile(const std::string& confPath); #ifdef WIN32 fs::path GetSpecialFolderPath(int nFolder, bool fCreate = true); #endif +#ifndef WIN32 +std::string ShellEscape(const std::string& arg); +#endif #if HAVE_SYSTEM void runCommand(const std::string& strCommand); #endif @@ -145,6 +148,8 @@ public: * between mainnet and regtest/testnet won't cause problems due to these * parameters by accident. */ NETWORK_ONLY = 0x200, + // This argument's value is sensitive (such as a password). + SENSITIVE = 0x400, }; protected: @@ -318,6 +323,19 @@ public: * Return nullopt for unknown arg. */ Optional<unsigned int> GetArgFlags(const std::string& name) const; + + /** + * Log the config file options and the command line arguments, + * useful for troubleshooting. + */ + void LogArgs() const; + +private: + // Helper function for LogArgs(). + void logArgsPrefix( + const std::string& prefix, + const std::string& section, + const std::map<std::string, std::vector<util::SettingsValue>>& args) const; }; extern ArgsManager gArgs; diff --git a/src/validation.cpp b/src/validation.cpp index 9854740e6f..bab04b8e34 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -1392,10 +1392,14 @@ static void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip) E CheckForkWarningConditions(); } +// Called both upon regular invalid block discovery *and* InvalidateBlock void static InvalidChainFound(CBlockIndex* pindexNew) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork) pindexBestInvalid = pindexNew; + if (pindexBestHeader != nullptr && pindexBestHeader->GetAncestor(pindexNew->nHeight) == pindexNew) { + pindexBestHeader = ::ChainActive().Tip(); + } LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__, pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, @@ -1408,6 +1412,8 @@ void static InvalidChainFound(CBlockIndex* pindexNew) EXCLUSIVE_LOCKS_REQUIRED(c CheckForkWarningConditions(); } +// Same as InvalidChainFound, above, except not called directly from InvalidateBlock, +// which does its own setBlockIndexCandidates manageent. void CChainState::InvalidBlockFound(CBlockIndex *pindex, const BlockValidationState &state) { if (state.GetResult() != BlockValidationResult::BLOCK_MUTATED) { pindex->nStatus |= BLOCK_FAILED_VALID; diff --git a/src/wallet/init.cpp b/src/wallet/init.cpp index 2ebc9aba39..50f064b305 100644 --- a/src/wallet/init.cpp +++ b/src/wallet/init.cpp @@ -62,7 +62,7 @@ void WalletInit::AddWalletOptions() const gArgs.AddArg("-walletbroadcast", strprintf("Make the wallet broadcast transactions (default: %u)", DEFAULT_WALLETBROADCAST), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); gArgs.AddArg("-walletdir=<dir>", "Specify directory to hold wallets (default: <datadir>/wallets if it exists, otherwise <datadir>)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::WALLET); #if HAVE_SYSTEM - gArgs.AddArg("-walletnotify=<cmd>", "Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); + gArgs.AddArg("-walletnotify=<cmd>", "Execute command when a wallet transaction changes. %s in cmd is replaced by TxID and %w is replaced by wallet name. %w is not currently implemented on windows. On systems where %w is supported, it should NOT be quoted because this would break shell escaping used to invoke the command.", ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); #endif gArgs.AddArg("-walletrbf", strprintf("Send transactions with full-RBF opt-in enabled (RPC only, default: %u)", DEFAULT_WALLET_RBF), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET); gArgs.AddArg("-zapwallettxes=<mode>", "Delete all wallet transactions and only recover those parts of the blockchain through -rescan on startup" diff --git a/src/wallet/psbtwallet.cpp b/src/wallet/psbtwallet.cpp index 3e6386a63f..d995fb06d4 100644 --- a/src/wallet/psbtwallet.cpp +++ b/src/wallet/psbtwallet.cpp @@ -55,21 +55,21 @@ TransactionError FillPSBT(const CWallet* pwallet, PartiallySignedTransaction& ps } SignatureData sigdata; input.FillSignatureData(sigdata); - const SigningProvider* provider = pwallet->GetSigningProvider(script, sigdata); + std::unique_ptr<SigningProvider> provider = pwallet->GetSigningProvider(script, sigdata); if (!provider) { complete = false; continue; } - complete &= SignPSBTInput(HidingSigningProvider(provider, !sign, !bip32derivs), psbtx, i, sighash_type); + complete &= SignPSBTInput(HidingSigningProvider(provider.get(), !sign, !bip32derivs), psbtx, i, sighash_type); } // Fill in the bip32 keypaths and redeemscripts for the outputs so that hardware wallets can identify change for (unsigned int i = 0; i < psbtx.tx->vout.size(); ++i) { const CTxOut& out = psbtx.tx->vout.at(i); - const SigningProvider* provider = pwallet->GetSigningProvider(out.scriptPubKey); + std::unique_ptr<SigningProvider> provider = pwallet->GetSigningProvider(out.scriptPubKey); if (provider) { - UpdatePSBTOutput(HidingSigningProvider(provider, true, !bip32derivs), psbtx, i); + UpdatePSBTOutput(HidingSigningProvider(provider.get(), true, !bip32derivs), psbtx, i); } } diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp index 633ac1b16d..7e704a95fe 100644 --- a/src/wallet/rpcdump.cpp +++ b/src/wallet/rpcdump.cpp @@ -125,7 +125,7 @@ UniValue importprivkey(const JSONRPCRequest& request) throw JSONRPCError(RPC_WALLET_ERROR, "Cannot import private keys to a wallet with private keys disabled"); } - EnsureLegacyScriptPubKeyMan(*wallet); + EnsureLegacyScriptPubKeyMan(*wallet, true); WalletRescanReserver reserver(pwallet); bool fRescan = true; @@ -253,7 +253,7 @@ UniValue importaddress(const JSONRPCRequest& request) }, }.Check(request); - EnsureLegacyScriptPubKeyMan(*pwallet); + EnsureLegacyScriptPubKeyMan(*pwallet, true); std::string strLabel; if (!request.params[1].isNull()) @@ -454,7 +454,7 @@ UniValue importpubkey(const JSONRPCRequest& request) }, }.Check(request); - EnsureLegacyScriptPubKeyMan(*wallet); + EnsureLegacyScriptPubKeyMan(*wallet, true); std::string strLabel; if (!request.params[1].isNull()) @@ -538,7 +538,7 @@ UniValue importwallet(const JSONRPCRequest& request) }, }.Check(request); - EnsureLegacyScriptPubKeyMan(*wallet); + EnsureLegacyScriptPubKeyMan(*wallet, true); if (pwallet->chain().havePruned()) { // Exit early and print an error. @@ -700,7 +700,7 @@ UniValue dumpprivkey(const JSONRPCRequest& request) LegacyScriptPubKeyMan& spk_man = EnsureLegacyScriptPubKeyMan(*wallet); auto locked_chain = pwallet->chain().lock(); - LOCK(pwallet->cs_wallet); + LOCK2(pwallet->cs_wallet, spk_man.cs_KeyStore); EnsureWalletIsUnlocked(pwallet); @@ -751,8 +751,7 @@ UniValue dumpwallet(const JSONRPCRequest& request) LegacyScriptPubKeyMan& spk_man = EnsureLegacyScriptPubKeyMan(*wallet); auto locked_chain = pwallet->chain().lock(); - LOCK(pwallet->cs_wallet); - AssertLockHeld(spk_man.cs_wallet); + LOCK2(pwallet->cs_wallet, spk_man.cs_KeyStore); EnsureWalletIsUnlocked(pwallet); @@ -1322,8 +1321,19 @@ UniValue importmulti(const JSONRPCRequest& mainRequest) "\"options\""}, }, RPCResult{ - "\nResponse is an array with the same size as the input that has the execution result :\n" - " [{\"success\": true}, {\"success\": true, \"warnings\": [\"Ignoring irrelevant private key\"]}, {\"success\": false, \"error\": {\"code\": -1, \"message\": \"Internal Server Error\"}}, ...]\n" + "[ (json array) Response is an array with the same size as the input that has the execution result\n" + " { (json object)\n" + " \"success\" : true|false, (boolean)\n" + " \"warnings\" : [ (json array, optional)\n" + " \"str\", (string)\n" + " ...\n" + " ],\n" + " \"error\" : { (json object, optional)\n" + " ... JSONRPC error\n" + " },\n" + " },\n" + " ...\n" + "]\n" }, RPCExamples{ HelpExampleCli("importmulti", "'[{ \"scriptPubKey\": { \"address\": \"<my address>\" }, \"timestamp\":1455191478 }, " @@ -1335,7 +1345,7 @@ UniValue importmulti(const JSONRPCRequest& mainRequest) RPCTypeCheck(mainRequest.params, {UniValue::VARR, UniValue::VOBJ}); - EnsureLegacyScriptPubKeyMan(*wallet); + EnsureLegacyScriptPubKeyMan(*wallet, true); const UniValue& requests = mainRequest.params[0]; diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp index 05719b4754..bc4ec77e31 100644 --- a/src/wallet/rpcwallet.cpp +++ b/src/wallet/rpcwallet.cpp @@ -124,9 +124,13 @@ void EnsureWalletIsUnlocked(const CWallet* pwallet) } } -LegacyScriptPubKeyMan& EnsureLegacyScriptPubKeyMan(CWallet& wallet) +// also_create should only be set to true only when the RPC is expected to add things to a blank wallet and make it no longer blank +LegacyScriptPubKeyMan& EnsureLegacyScriptPubKeyMan(CWallet& wallet, bool also_create) { LegacyScriptPubKeyMan* spk_man = wallet.GetLegacyScriptPubKeyMan(); + if (!spk_man && also_create) { + spk_man = wallet.GetOrCreateLegacyScriptPubKeyMan(); + } if (!spk_man) { throw JSONRPCError(RPC_WALLET_ERROR, "This type of wallet does not support this command"); } @@ -561,7 +565,7 @@ static UniValue signmessage(const JSONRPCRequest& request) } CScript script_pub_key = GetScriptForDestination(*pkhash); - const SigningProvider* provider = pwallet->GetSigningProvider(script_pub_key); + std::unique_ptr<SigningProvider> provider = pwallet->GetSigningProvider(script_pub_key); if (!provider) { throw JSONRPCError(RPC_WALLET_ERROR, "Private key not available"); } @@ -968,8 +972,9 @@ static UniValue addmultisigaddress(const JSONRPCRequest& request) }, RPCResult{ "{\n" - " \"address\":\"multisigaddress\", (string) The value of the new multisig address.\n" - " \"redeemScript\":\"script\" (string) The string value of the hex-encoded redemption script.\n" + " \"address\" : \"multisigaddress\", (string) The value of the new multisig address.\n" + " \"redeemScript\" : \"script\" (string) The string value of the hex-encoded redemption script.\n" + " \"descriptor\" : \"descriptor\" (string) The descriptor for this multisig\n" "}\n" }, RPCExamples{ @@ -983,7 +988,7 @@ static UniValue addmultisigaddress(const JSONRPCRequest& request) LegacyScriptPubKeyMan& spk_man = EnsureLegacyScriptPubKeyMan(*pwallet); auto locked_chain = pwallet->chain().lock(); - LOCK(pwallet->cs_wallet); + LOCK2(pwallet->cs_wallet, spk_man.cs_KeyStore); std::string label; if (!request.params[2].isNull()) @@ -1014,9 +1019,13 @@ static UniValue addmultisigaddress(const JSONRPCRequest& request) CTxDestination dest = AddAndGetMultisigDestination(required, pubkeys, output_type, spk_man, inner); pwallet->SetAddressBook(dest, label, "send"); + // Make the descriptor + std::unique_ptr<Descriptor> descriptor = InferDescriptor(GetScriptForDestination(dest), spk_man); + UniValue result(UniValue::VOBJ); result.pushKV("address", EncodeDestination(dest)); result.pushKV("redeemScript", HexStr(inner.begin(), inner.end())); + result.pushKV("descriptor", descriptor->ToString()); return result; } @@ -1197,12 +1206,12 @@ static UniValue listreceivedbyaddress(const JSONRPCRequest& request) RPCResult{ "[\n" " {\n" - " \"involvesWatchonly\" : true, (bool) Only returns true if imported addresses were involved in transaction.\n" + " \"involvesWatchonly\" : true, (boolean) Only returns true if imported addresses were involved in transaction.\n" " \"address\" : \"receivingaddress\", (string) The receiving address\n" " \"amount\" : x.xxx, (numeric) The total amount in " + CURRENCY_UNIT + " received by the address\n" " \"confirmations\" : n, (numeric) The number of confirmations of the most recent transaction included\n" " \"label\" : \"label\", (string) The label of the receiving address. The default label is \"\".\n" - " \"txids\": [\n" + " \"txids\" : [\n" " \"txid\", (string) The ids of transactions received with the address \n" " ...\n" " ]\n" @@ -1247,7 +1256,7 @@ static UniValue listreceivedbylabel(const JSONRPCRequest& request) RPCResult{ "[\n" " {\n" - " \"involvesWatchonly\" : true, (bool) Only returns true if imported addresses were involved in transaction.\n" + " \"involvesWatchonly\" : true, (boolean) Only returns true if imported addresses were involved in transaction.\n" " \"amount\" : x.xxx, (numeric) The total amount received by addresses with this label\n" " \"confirmations\" : n, (numeric) The number of confirmations of the most recent transaction included\n" " \"label\" : \"label\" (string) The label of the receiving address. The default label is \"\".\n" @@ -1369,21 +1378,21 @@ static const std::string TransactionDescriptionString() { return " \"confirmations\": n, (numeric) The number of confirmations for the transaction. Negative confirmations means the\n" " transaction conflicted that many blocks ago.\n" - " \"generated\": xxx, (bool) Only present if transaction only input is a coinbase one.\n" - " \"trusted\": xxx, (bool) Only present if we consider transaction to be trusted and so safe to spend from.\n" - " \"blockhash\": \"hashvalue\", (string) The block hash containing the transaction.\n" - " \"blockheight\": n, (numeric) The block height containing the transaction.\n" - " \"blockindex\": n, (numeric) The index of the transaction in the block that includes it.\n" - " \"blocktime\": xxx, (numeric) The block time expressed in " + UNIX_EPOCH_TIME + ".\n" - " \"txid\": \"transactionid\", (string) The transaction id.\n" - " \"walletconflicts\": [ (array) Conflicting transaction ids.\n" + " \"generated\" : xxx, (boolean) Only present if transaction only input is a coinbase one.\n" + " \"trusted\" : xxx, (boolean) Only present if we consider transaction to be trusted and so safe to spend from.\n" + " \"blockhash\" : \"hashvalue\", (string) The block hash containing the transaction.\n" + " \"blockheight\" : n, (numeric) The block height containing the transaction.\n" + " \"blockindex\" : n, (numeric) The index of the transaction in the block that includes it.\n" + " \"blocktime\" : xxx, (numeric) The block time expressed in " + UNIX_EPOCH_TIME + ".\n" + " \"txid\" : \"transactionid\", (string) The transaction id.\n" + " \"walletconflicts\" : [ (json array) Conflicting transaction ids.\n" " \"txid\", (string) The transaction id.\n" " ...\n" " ],\n" - " \"time\": xxx, (numeric) The transaction time expressed in " + UNIX_EPOCH_TIME + ".\n" - " \"timereceived\": xxx, (numeric) The time received expressed in " + UNIX_EPOCH_TIME + ".\n" - " \"comment\": \"...\", (string) If a comment is associated with the transaction, only present if not empty.\n" - " \"bip125-replaceable\": \"yes|no|unknown\", (string) Whether this transaction could be replaced due to BIP125 (replace-by-fee);\n" + " \"time\" : xxx, (numeric) The transaction time expressed in " + UNIX_EPOCH_TIME + ".\n" + " \"timereceived\" : xxx, (numeric) The time received expressed in " + UNIX_EPOCH_TIME + ".\n" + " \"comment\" : \"...\", (string) If a comment is associated with the transaction, only present if not empty.\n" + " \"bip125-replaceable\" : \"str\", (string) (\"yes|no|unknown\") Whether this transaction could be replaced due to BIP125 (replace-by-fee);\n" " may be unknown for unconfirmed transactions not in the mempool\n"; } @@ -1409,22 +1418,22 @@ UniValue listtransactions(const JSONRPCRequest& request) RPCResult{ "[\n" " {\n" - " \"involvesWatchonly\": xxx, (bool) Only returns true if imported addresses were involved in transaction.\n" - " \"address\":\"address\", (string) The bitcoin address of the transaction.\n" - " \"category\": (string) The transaction category.\n" + " \"involvesWatchonly\" : xxx, (boolean) Only returns true if imported addresses were involved in transaction.\n" + " \"address\" : \"address\", (string) The bitcoin address of the transaction.\n" + " \"category\" : (string) The transaction category.\n" " \"send\" Transactions sent.\n" " \"receive\" Non-coinbase transactions received.\n" " \"generate\" Coinbase transactions received with more than 100 confirmations.\n" " \"immature\" Coinbase transactions received with 100 or fewer confirmations.\n" " \"orphan\" Orphaned coinbase transactions received.\n" - " \"amount\": x.xxx, (numeric) The amount in " + CURRENCY_UNIT + ". This is negative for the 'send' category, and is positive\n" + " \"amount\" : x.xxx, (numeric) The amount in " + CURRENCY_UNIT + ". This is negative for the 'send' category, and is positive\n" " for all other categories\n" - " \"label\": \"label\", (string) A comment for the address/transaction, if any\n" - " \"vout\": n, (numeric) the vout value\n" - " \"fee\": x.xxx, (numeric) The amount of the fee in " + CURRENCY_UNIT + ". This is negative and only available for the \n" + " \"label\" : \"label\", (string) A comment for the address/transaction, if any\n" + " \"vout\" : n, (numeric) the vout value\n" + " \"fee\" : x.xxx, (numeric) The amount of the fee in " + CURRENCY_UNIT + ". This is negative and only available for the \n" " 'send' category of transactions.\n" + TransactionDescriptionString() - + " \"abandoned\": xxx (bool) 'true' if the transaction has been abandoned (inputs are respendable). Only available for the \n" + + " \"abandoned\": xxx (boolean) 'true' if the transaction has been abandoned (inputs are respendable). Only available for the \n" " 'send' category of transactions.\n" " }\n" "]\n" @@ -1491,23 +1500,10 @@ UniValue listtransactions(const JSONRPCRequest& request) if ((nFrom + nCount) > (int)ret.size()) nCount = ret.size() - nFrom; - std::vector<UniValue> arrTmp = ret.getValues(); - - std::vector<UniValue>::iterator first = arrTmp.begin(); - std::advance(first, nFrom); - std::vector<UniValue>::iterator last = arrTmp.begin(); - std::advance(last, nFrom+nCount); - - if (last != arrTmp.end()) arrTmp.erase(last, arrTmp.end()); - if (first != arrTmp.begin()) arrTmp.erase(arrTmp.begin(), first); - - std::reverse(arrTmp.begin(), arrTmp.end()); // Return oldest to newest - - ret.clear(); - ret.setArray(); - ret.push_backV(arrTmp); - - return ret; + const std::vector<UniValue>& txs = ret.getValues(); + UniValue result{UniValue::VARR}; + result.push_backV({ txs.rend() - nFrom - nCount, txs.rend() - nFrom }); // Return oldest to newest + return result; } static UniValue listsinceblock(const JSONRPCRequest& request) @@ -1531,31 +1527,33 @@ static UniValue listsinceblock(const JSONRPCRequest& request) " (not guaranteed to work on pruned nodes)"}, }, RPCResult{ - "{\n" - " \"transactions\": [\n" - " \"involvesWatchonly\": xxx, (bool) Only returns true if imported addresses were involved in transaction.\n" - " \"address\":\"address\", (string) The bitcoin address of the transaction.\n" - " \"category\": (string) The transaction category.\n" + "{ (json object)\n" + " \"transactions\" : [ (json array)\n" + " { (json object)\n" + " \"involvesWatchonly\" : xxx, (boolean) Only returns true if imported addresses were involved in transaction.\n" + " \"address\" : \"str\", (string) The bitcoin address of the transaction.\n" + " \"category\" : \"str\", (string) The transaction category.\n" " \"send\" Transactions sent.\n" " \"receive\" Non-coinbase transactions received.\n" " \"generate\" Coinbase transactions received with more than 100 confirmations.\n" " \"immature\" Coinbase transactions received with 100 or fewer confirmations.\n" " \"orphan\" Orphaned coinbase transactions received.\n" - " \"amount\": x.xxx, (numeric) The amount in " + CURRENCY_UNIT + ". This is negative for the 'send' category, and is positive\n" + " \"amount\" : x.xxx, (numeric) The amount in " + CURRENCY_UNIT + ". This is negative for the 'send' category, and is positive\n" " for all other categories\n" " \"vout\" : n, (numeric) the vout value\n" - " \"fee\": x.xxx, (numeric) The amount of the fee in " + CURRENCY_UNIT + ". This is negative and only available for the 'send' category of transactions.\n" + " \"fee\" : x.xxx, (numeric) The amount of the fee in " + CURRENCY_UNIT + ". This is negative and only available for the 'send' category of transactions.\n" + TransactionDescriptionString() - + " \"abandoned\": xxx, (bool) 'true' if the transaction has been abandoned (inputs are respendable). Only available for the 'send' category of transactions.\n" - " \"comment\": \"...\", (string) If a comment is associated with the transaction.\n" + + " \"abandoned\": xxx, (boolean) 'true' if the transaction has been abandoned (inputs are respendable). Only available for the 'send' category of transactions.\n" " \"label\" : \"label\" (string) A comment for the address/transaction, if any\n" - " \"to\": \"...\", (string) If a comment to is associated with the transaction.\n" + " \"to\" : \"...\", (string) If a comment to is associated with the transaction.\n" + " },\n" + " ...\n" " ],\n" - " \"removed\": [\n" + " \"removed\" : [ (json array)\n" " <structure is the same as \"transactions\" above, only present if include_removed=true>\n" " Note: transactions that were re-added in the active chain will appear as-is in this array, and may thus have a positive confirmation count.\n" " ],\n" - " \"lastblock\": \"lastblockhash\" (string) The hash of the block (target_confirmations-1) from the best block on the main chain. This is typically used to feed back into listsinceblock the next time you call it. So you would generally use a target_confirmations of say 6, so you will be continually re-notified of transactions until they've reached 6 confirmations plus any new ones\n" + " \"lastblock\" : \"hex\" (string) The hash of the block (target_confirmations-1) from the best block on the main chain. This is typically used to feed back into listsinceblock the next time you call it. So you would generally use a target_confirmations of say 6, so you will be continually re-notified of transactions until they've reached 6 confirmations plus any new ones\n" "}\n" }, RPCExamples{ @@ -1666,12 +1664,12 @@ static UniValue gettransaction(const JSONRPCRequest& request) RPCResult{ "{\n" " \"amount\" : x.xxx, (numeric) The transaction amount in " + CURRENCY_UNIT + "\n" - " \"fee\": x.xxx, (numeric) The amount of the fee in " + CURRENCY_UNIT + ". This is negative and only available for the \n" + " \"fee\" : x.xxx, (numeric) The amount of the fee in " + CURRENCY_UNIT + ". This is negative and only available for the \n" " 'send' category of transactions.\n" + TransactionDescriptionString() + " \"details\" : [\n" " {\n" - " \"involvesWatchonly\": xxx, (bool) Only returns true if imported addresses were involved in transaction.\n" + " \"involvesWatchonly\" : xxx, (boolean) Only returns true if imported addresses were involved in transaction.\n" " \"address\" : \"address\", (string) The bitcoin address involved in the transaction\n" " \"category\" : (string) The transaction category.\n" " \"send\" Transactions sent.\n" @@ -1682,9 +1680,9 @@ static UniValue gettransaction(const JSONRPCRequest& request) " \"amount\" : x.xxx, (numeric) The amount in " + CURRENCY_UNIT + "\n" " \"label\" : \"label\", (string) A comment for the address/transaction, if any\n" " \"vout\" : n, (numeric) the vout value\n" - " \"fee\": x.xxx, (numeric) The amount of the fee in " + CURRENCY_UNIT + ". This is negative and only available for the \n" + " \"fee\" : x.xxx, (numeric) The amount of the fee in " + CURRENCY_UNIT + ". This is negative and only available for the \n" " 'send' category of transactions.\n" - " \"abandoned\": xxx (bool) 'true' if the transaction has been abandoned (inputs are respendable). Only available for the \n" + " \"abandoned\" : xxx (boolean) 'true' if the transaction has been abandoned (inputs are respendable). Only available for the \n" " 'send' category of transactions.\n" " }\n" " ,...\n" @@ -2356,16 +2354,16 @@ static UniValue getbalances(const JSONRPCRequest& request) {}, RPCResult{ "{\n" - " \"mine\": { (object) balances from outputs that the wallet can sign\n" - " \"trusted\": xxx (numeric) trusted balance (outputs created by the wallet or confirmed outputs)\n" - " \"untrusted_pending\": xxx (numeric) untrusted pending balance (outputs created by others that are in the mempool)\n" - " \"immature\": xxx (numeric) balance from immature coinbase outputs\n" - " \"used\": xxx (numeric) (only present if avoid_reuse is set) balance from coins sent to addresses that were previously spent from (potentially privacy violating)\n" + " \"mine\" : { (json object) balances from outputs that the wallet can sign\n" + " \"trusted\" : xxx (numeric) trusted balance (outputs created by the wallet or confirmed outputs)\n" + " \"untrusted_pending\" : xxx (numeric) untrusted pending balance (outputs created by others that are in the mempool)\n" + " \"immature\" : xxx (numeric) balance from immature coinbase outputs\n" + " \"used\" : xxx (numeric) (only present if avoid_reuse is set) balance from coins sent to addresses that were previously spent from (potentially privacy violating)\n" " },\n" - " \"watchonly\": { (object) watchonly balances (not present if wallet does not watch anything)\n" - " \"trusted\": xxx (numeric) trusted balance (outputs created by the wallet or confirmed outputs)\n" - " \"untrusted_pending\": xxx (numeric) untrusted pending balance (outputs created by others that are in the mempool)\n" - " \"immature\": xxx (numeric) balance from immature coinbase outputs\n" + " \"watchonly\" : { (json object) watchonly balances (not present if wallet does not watch anything)\n" + " \"trusted\" : xxx (numeric) trusted balance (outputs created by the wallet or confirmed outputs)\n" + " \"untrusted_pending\" : xxx (numeric) untrusted pending balance (outputs created by others that are in the mempool)\n" + " \"immature\" : xxx (numeric) balance from immature coinbase outputs\n" " },\n" "}\n"}, RPCExamples{ @@ -2422,21 +2420,21 @@ static UniValue getwalletinfo(const JSONRPCRequest& request) {}, RPCResult{ "{\n" - " \"walletname\": xxxxx, (string) the wallet name\n" - " \"walletversion\": xxxxx, (numeric) the wallet version\n" - " \"balance\": xxxxxxx, (numeric) DEPRECATED. Identical to getbalances().mine.trusted\n" - " \"unconfirmed_balance\": xxx, (numeric) DEPRECATED. Identical to getbalances().mine.untrusted_pending\n" - " \"immature_balance\": xxxxxx, (numeric) DEPRECATED. Identical to getbalances().mine.immature\n" - " \"txcount\": xxxxxxx, (numeric) the total number of transactions in the wallet\n" - " \"keypoololdest\": xxxxxx, (numeric) the " + UNIX_EPOCH_TIME + " of the oldest pre-generated key in the key pool\n" - " \"keypoolsize\": xxxx, (numeric) how many new keys are pre-generated (only counts external keys)\n" - " \"keypoolsize_hd_internal\": xxxx, (numeric) how many new keys are pre-generated for internal use (used for change outputs, only appears if the wallet is using this feature, otherwise external keys are used)\n" - " \"unlocked_until\": ttt, (numeric) the " + UNIX_EPOCH_TIME + " until which the wallet is unlocked for transfers, or 0 if the wallet is locked\n" - " \"paytxfee\": x.xxxx, (numeric) the transaction fee configuration, set in " + CURRENCY_UNIT + "/kB\n" - " \"hdseedid\": \"<hash160>\" (string, optional) the Hash160 of the HD seed (only present when HD is enabled)\n" - " \"private_keys_enabled\": true|false (boolean) false if privatekeys are disabled for this wallet (enforced watch-only wallet)\n" - " \"avoid_reuse\": true|false (boolean) whether this wallet tracks clean/dirty coins in terms of reuse\n" - " \"scanning\": (json object) current scanning details, or false if no scan is in progress\n" + " \"walletname\" : xxxxx, (string) the wallet name\n" + " \"walletversion\" : xxxxx, (numeric) the wallet version\n" + " \"balance\" : xxxxxxx, (numeric) DEPRECATED. Identical to getbalances().mine.trusted\n" + " \"unconfirmed_balance\" : xxx, (numeric) DEPRECATED. Identical to getbalances().mine.untrusted_pending\n" + " \"immature_balance\" : xxxxxx, (numeric) DEPRECATED. Identical to getbalances().mine.immature\n" + " \"txcount\" : xxxxxxx, (numeric) the total number of transactions in the wallet\n" + " \"keypoololdest\" : xxxxxx, (numeric) the " + UNIX_EPOCH_TIME + " of the oldest pre-generated key in the key pool\n" + " \"keypoolsize\" : xxxx, (numeric) how many new keys are pre-generated (only counts external keys)\n" + " \"keypoolsize_hd_internal\" : xxxx, (numeric) how many new keys are pre-generated for internal use (used for change outputs, only appears if the wallet is using this feature, otherwise external keys are used)\n" + " \"unlocked_until\" : ttt, (numeric) the " + UNIX_EPOCH_TIME + " until which the wallet is unlocked for transfers, or 0 if the wallet is locked\n" + " \"paytxfee\" : x.xxxx, (numeric) the transaction fee configuration, set in " + CURRENCY_UNIT + "/kB\n" + " \"hdseedid\" : \"<hash160>\" (string, optional) the Hash160 of the HD seed (only present when HD is enabled)\n" + " \"private_keys_enabled\" : true|false (boolean) false if privatekeys are disabled for this wallet (enforced watch-only wallet)\n" + " \"avoid_reuse\" : true|false (boolean) whether this wallet tracks clean/dirty coins in terms of reuse\n" + " \"scanning\" : (json object) current scanning details, or false if no scan is in progress\n" " {\n" " \"duration\" : xxxx (numeric) elapsed seconds since scan start\n" " \"progress\" : x.xxxx, (numeric) scanning progress percentage [0.0, 1.0]\n" @@ -2629,9 +2627,9 @@ static UniValue setwalletflag(const JSONRPCRequest& request) }, RPCResult{ "{\n" - " \"flag_name\": string (string) The name of the flag that was modified\n" - " \"flag_state\": bool (bool) The new state of the flag\n" - " \"warnings\": string (string) Any warnings associated with the change\n" + " \"flag_name\" : string (string) The name of the flag that was modified\n" + " \"flag_state\" : bool (boolean) The new state of the flag\n" + " \"warnings\" : string (string) Any warnings associated with the change\n" "}\n" }, RPCExamples{ @@ -2828,11 +2826,11 @@ static UniValue listunspent(const JSONRPCRequest& request) " \"confirmations\" : n, (numeric) The number of confirmations\n" " \"redeemScript\" : \"script\" (string) The redeemScript if scriptPubKey is P2SH\n" " \"witnessScript\" : \"script\" (string) witnessScript if the scriptPubKey is P2WSH or P2SH-P2WSH\n" - " \"spendable\" : xxx, (bool) Whether we have the private keys to spend this output\n" - " \"solvable\" : xxx, (bool) Whether we know how to spend this output, ignoring the lack of keys\n" - " \"reused\" : xxx, (bool) (only present if avoid_reuse is set) Whether this output is reused/dirty (sent to an address that was previously spent from)\n" + " \"spendable\" : xxx, (boolean) Whether we have the private keys to spend this output\n" + " \"solvable\" : xxx, (boolean) Whether we know how to spend this output, ignoring the lack of keys\n" + " \"reused\" : xxx, (boolean) (only present if avoid_reuse is set) Whether this output is reused/dirty (sent to an address that was previously spent from)\n" " \"desc\" : xxx, (string, only when solvable) A descriptor for spending this output\n" - " \"safe\" : xxx (bool) Whether this output is considered safe to spend. Unconfirmed transactions\n" + " \"safe\" : xxx (boolean) Whether this output is considered safe to spend. Unconfirmed transactions\n" " from outside keys and unconfirmed replacement transactions are considered unsafe\n" " and are not eligible for spending by fundrawtransaction and sendtoaddress.\n" " }\n" @@ -2944,7 +2942,7 @@ static UniValue listunspent(const JSONRPCRequest& request) entry.pushKV("label", i->second.name); } - const SigningProvider* provider = pwallet->GetSigningProvider(scriptPubKey); + std::unique_ptr<SigningProvider> provider = pwallet->GetSigningProvider(scriptPubKey); if (provider) { if (scriptPubKey.IsPayToScriptHash()) { const CScriptID& hash = CScriptID(boost::get<ScriptHash>(address)); @@ -2984,7 +2982,7 @@ static UniValue listunspent(const JSONRPCRequest& request) entry.pushKV("spendable", out.fSpendable); entry.pushKV("solvable", out.fSolvable); if (out.fSolvable) { - const SigningProvider* provider = pwallet->GetSigningProvider(scriptPubKey); + std::unique_ptr<SigningProvider> provider = pwallet->GetSigningProvider(scriptPubKey); if (provider) { auto descriptor = InferDescriptor(scriptPubKey, *provider); entry.pushKV("desc", descriptor->ToString()); @@ -3176,9 +3174,9 @@ static UniValue fundrawtransaction(const JSONRPCRequest& request) }, RPCResult{ "{\n" - " \"hex\": \"value\", (string) The resulting raw transaction (hex-encoded string)\n" - " \"fee\": n, (numeric) Fee in " + CURRENCY_UNIT + " the resulting transaction pays\n" - " \"changepos\": n (numeric) The position of the added change output, or -1\n" + " \"hex\" : \"value\", (string) The resulting raw transaction (hex-encoded string)\n" + " \"fee\" : n, (numeric) Fee in " + CURRENCY_UNIT + " the resulting transaction pays\n" + " \"changepos\" : n (numeric) The position of the added change output, or -1\n" "}\n" }, RPCExamples{ @@ -3297,21 +3295,21 @@ UniValue signrawtransactionwithwallet(const JSONRPCRequest& request) // Parse the prevtxs array ParsePrevouts(request.params[1], nullptr, coins); - std::set<const SigningProvider*> providers; + std::set<std::shared_ptr<SigningProvider>> providers; for (const std::pair<COutPoint, Coin> coin_pair : coins) { - const SigningProvider* provider = pwallet->GetSigningProvider(coin_pair.second.out.scriptPubKey); + std::unique_ptr<SigningProvider> provider = pwallet->GetSigningProvider(coin_pair.second.out.scriptPubKey); if (provider) { providers.insert(std::move(provider)); } } if (providers.size() == 0) { - // When there are no available providers, use DUMMY_SIGNING_PROVIDER so we can check if the tx is complete - providers.insert(&DUMMY_SIGNING_PROVIDER); + // When there are no available providers, use a dummy SigningProvider so we can check if the tx is complete + providers.insert(std::make_shared<SigningProvider>()); } UniValue result(UniValue::VOBJ); - for (const SigningProvider* provider : providers) { - SignTransaction(mtx, provider, coins, request.params[2], result); + for (std::shared_ptr<SigningProvider> provider : providers) { + SignTransaction(mtx, provider.get(), coins, request.params[2], result); } return result; } @@ -3365,11 +3363,11 @@ static UniValue bumpfee(const JSONRPCRequest& request) }, RPCResult{ "{\n" - " \"psbt\": \"psbt\", (string) The base64-encoded unsigned PSBT of the new transaction. Only returned when wallet private keys are disabled.\n" - " \"txid\": \"value\", (string) The id of the new transaction. Only returned when wallet private keys are enabled.\n" - " \"origfee\": n, (numeric) The fee of the replaced transaction.\n" - " \"fee\": n, (numeric) The fee of the new transaction.\n" - " \"errors\": [ str... ] (json array of strings) Errors encountered during processing (may be empty).\n" + " \"psbt\" : \"psbt\", (string) The base64-encoded unsigned PSBT of the new transaction. Only returned when wallet private keys are disabled.\n" + " \"txid\" : \"value\", (string) The id of the new transaction. Only returned when wallet private keys are enabled.\n" + " \"origfee\" : n, (numeric) The fee of the replaced transaction.\n" + " \"fee\" : n, (numeric) The fee of the new transaction.\n" + " \"errors\" : [ str... ] (json array of strings) Errors encountered during processing (may be empty).\n" "}\n" }, RPCExamples{ @@ -3697,12 +3695,12 @@ static UniValue DescribeWalletAddress(CWallet* pwallet, const CTxDestination& de UniValue ret(UniValue::VOBJ); UniValue detail = DescribeAddress(dest); CScript script = GetScriptForDestination(dest); - const SigningProvider* provider = nullptr; + std::unique_ptr<SigningProvider> provider = nullptr; if (pwallet) { provider = pwallet->GetSigningProvider(script); } ret.pushKVs(detail); - ret.pushKVs(boost::apply_visitor(DescribeWalletAddressVisitor(provider), dest)); + ret.pushKVs(boost::apply_visitor(DescribeWalletAddressVisitor(provider.get()), dest)); return ret; } @@ -3762,18 +3760,18 @@ UniValue getaddressinfo(const JSONRPCRequest& request) " getaddressinfo output fields for the embedded address, excluding metadata (timestamp, hdkeypath,\n" " hdseedid) and relation to the wallet (ismine, iswatchonly).\n" " \"iscompressed\" : true|false, (boolean, optional) If the pubkey is compressed.\n" - " \"label\" : \"label\" (string) The label associated with the address. Defaults to \"\". Equivalent to the label name in the labels array below.\n" + " \"label\" : \"label\" (string) DEPRECATED. The label associated with the address. Defaults to \"\". Replaced by the labels array below.\n" " \"timestamp\" : timestamp, (number, optional) The creation time of the key, if available, expressed in " + UNIX_EPOCH_TIME + ".\n" " \"hdkeypath\" : \"keypath\" (string, optional) The HD keypath, if the key is HD and available.\n" " \"hdseedid\" : \"<hash160>\" (string, optional) The Hash160 of the HD seed.\n" " \"hdmasterfingerprint\" : \"<hash160>\" (string, optional) The fingerprint of the master key.\n" - " \"labels\" (json object) An array of labels associated with the address. Currently limited to one label but returned\n" - " as an array to keep the API stable if multiple labels are enabled in the future.\n" + " \"labels\" (json array) Array of labels associated with the address. Currently limited to one label but returned\n" + " as an array to keep the API stable if multiple labels are enabled in the future.\n" " [\n" - " \"label name\" (string) The label name. Defaults to \"\". Equivalent to the label field above.\n\n" + " \"label name\" (string) The label name. Defaults to \"\".\n" " DEPRECATED, will be removed in 0.21. To re-enable, launch bitcoind with `-deprecatedrpc=labelspurpose`:\n" - " { (json object of label data)\n" - " \"name\" : \"label name\" (string) The label name. Defaults to \"\". Equivalent to the label field above.\n" + " {\n" + " \"name\" : \"label name\" (string) The label name. Defaults to \"\".\n" " \"purpose\" : \"purpose\" (string) The purpose of the associated address (send or receive).\n" " }\n" " ]\n" @@ -3800,7 +3798,7 @@ UniValue getaddressinfo(const JSONRPCRequest& request) CScript scriptPubKey = GetScriptForDestination(dest); ret.pushKV("scriptPubKey", HexStr(scriptPubKey.begin(), scriptPubKey.end())); - const SigningProvider* provider = pwallet->GetSigningProvider(scriptPubKey); + std::unique_ptr<SigningProvider> provider = pwallet->GetSigningProvider(scriptPubKey); isminetype mine = pwallet->IsMine(dest); ret.pushKV("ismine", bool(mine & ISMINE_SPENDABLE)); @@ -3817,10 +3815,10 @@ UniValue getaddressinfo(const JSONRPCRequest& request) UniValue detail = DescribeWalletAddress(pwallet, dest); ret.pushKVs(detail); - // Return label field if existing. Currently only one label can be - // associated with an address, so the label should be equivalent to the + // DEPRECATED: Return label field if existing. Currently only one label can + // be associated with an address, so the label should be equivalent to the // value of the name key/value pair in the labels array below. - if (pwallet->mapAddressBook.count(dest)) { + if ((pwallet->chain().rpcEnableDeprecated("label")) && (pwallet->mapAddressBook.count(dest))) { ret.pushKV("label", pwallet->mapAddressBook[dest].name); } @@ -3843,12 +3841,11 @@ UniValue getaddressinfo(const JSONRPCRequest& request) // associated with an address, but we return an array so the API remains // stable if we allow multiple labels to be associated with an address in // the future. - // - // DEPRECATED: The previous behavior of returning an array containing a JSON - // object of `name` and `purpose` key/value pairs has been deprecated. UniValue labels(UniValue::VARR); std::map<CTxDestination, CAddressBookData>::iterator mi = pwallet->mapAddressBook.find(dest); if (mi != pwallet->mapAddressBook.end()) { + // DEPRECATED: The previous behavior of returning an array containing a + // JSON object of `name` and `purpose` key/value pairs is deprecated. if (pwallet->chain().rpcEnableDeprecated("labelspurpose")) { labels.push_back(AddressBookDataToJSON(mi->second, true)); } else { @@ -3876,8 +3873,8 @@ static UniValue getaddressesbylabel(const JSONRPCRequest& request) }, RPCResult{ "{ (json object with addresses as keys)\n" - " \"address\": { (json object with information about address)\n" - " \"purpose\": \"string\" (string) Purpose of address (\"send\" for sending address, \"receive\" for receiving address)\n" + " \"address\" : { (json object with information about address)\n" + " \"purpose\" : \"string\" (string) Purpose of address (\"send\" for sending address, \"receive\" for receiving address)\n" " },...\n" "}\n" }, @@ -4003,7 +4000,7 @@ UniValue sethdseed(const JSONRPCRequest& request) }, }.Check(request); - LegacyScriptPubKeyMan& spk_man = EnsureLegacyScriptPubKeyMan(*pwallet); + LegacyScriptPubKeyMan& spk_man = EnsureLegacyScriptPubKeyMan(*pwallet, true); if (pwallet->chain().isInitialBlockDownload()) { throw JSONRPCError(RPC_CLIENT_IN_INITIAL_DOWNLOAD, "Cannot set a new HD seed while still in Initial Block Download"); @@ -4014,7 +4011,7 @@ UniValue sethdseed(const JSONRPCRequest& request) } auto locked_chain = pwallet->chain().lock(); - LOCK(pwallet->cs_wallet); + LOCK2(pwallet->cs_wallet, spk_man.cs_KeyStore); // Do not do anything to non-HD wallets if (!pwallet->CanSupportFeature(FEATURE_HD)) { @@ -4076,10 +4073,9 @@ UniValue walletprocesspsbt(const JSONRPCRequest& request) {"bip32derivs", RPCArg::Type::BOOL, /* default */ "false", "If true, includes the BIP 32 derivation paths for public keys if we know them"}, }, RPCResult{ - "{\n" - " \"psbt\" : \"value\", (string) The base64-encoded partially signed transaction\n" + "{ (json object)\n" + " \"psbt\" : \"str\", (string) The base64-encoded partially signed transaction\n" " \"complete\" : true|false, (boolean) If the transaction has a complete set of signatures\n" - " ]\n" "}\n" }, RPCExamples{ @@ -4188,9 +4184,9 @@ UniValue walletcreatefundedpsbt(const JSONRPCRequest& request) }, RPCResult{ "{\n" - " \"psbt\": \"value\", (string) The resulting raw transaction (base64-encoded string)\n" - " \"fee\": n, (numeric) Fee in " + CURRENCY_UNIT + " the resulting transaction pays\n" - " \"changepos\": n (numeric) The position of the added change output, or -1\n" + " \"psbt\" : \"value\", (string) The resulting raw transaction (base64-encoded string)\n" + " \"fee\" : n, (numeric) Fee in " + CURRENCY_UNIT + " the resulting transaction pays\n" + " \"changepos\" : n (numeric) The position of the added change output, or -1\n" "}\n" }, RPCExamples{ diff --git a/src/wallet/rpcwallet.h b/src/wallet/rpcwallet.h index becca455f6..2813fa2bfc 100644 --- a/src/wallet/rpcwallet.h +++ b/src/wallet/rpcwallet.h @@ -41,7 +41,7 @@ std::shared_ptr<CWallet> GetWalletForJSONRPCRequest(const JSONRPCRequest& reques std::string HelpRequiringPassphrase(const CWallet*); void EnsureWalletIsUnlocked(const CWallet*); bool EnsureWalletIsAvailable(const CWallet*, bool avoidException); -LegacyScriptPubKeyMan& EnsureLegacyScriptPubKeyMan(CWallet& wallet); +LegacyScriptPubKeyMan& EnsureLegacyScriptPubKeyMan(CWallet& wallet, bool also_create = false); UniValue getaddressinfo(const JSONRPCRequest& request); UniValue signrawtransactionwithwallet(const JSONRPCRequest& request); diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp index be8a71da97..4c9d88973e 100644 --- a/src/wallet/scriptpubkeyman.cpp +++ b/src/wallet/scriptpubkeyman.cpp @@ -9,10 +9,10 @@ #include <util/strencodings.h> #include <util/translation.h> #include <wallet/scriptpubkeyman.h> -#include <wallet/wallet.h> bool LegacyScriptPubKeyMan::GetNewDestination(const OutputType type, CTxDestination& dest, std::string& error) { + LOCK(cs_KeyStore); error.clear(); // Generate a new key that is added to wallet @@ -238,7 +238,6 @@ bool LegacyScriptPubKeyMan::CheckDecryptionKey(const CKeyingMaterial& master_key bool LegacyScriptPubKeyMan::Encrypt(const CKeyingMaterial& master_key, WalletBatch* batch) { - AssertLockHeld(cs_wallet); LOCK(cs_KeyStore); encrypted_batch = batch; if (!mapCryptedKeys.empty()) { @@ -269,6 +268,7 @@ bool LegacyScriptPubKeyMan::Encrypt(const CKeyingMaterial& master_key, WalletBat bool LegacyScriptPubKeyMan::GetReservedDestination(const OutputType type, bool internal, CTxDestination& address, int64_t& index, CKeyPool& keypool) { + LOCK(cs_KeyStore); if (!CanGetAddresses(internal)) { return false; } @@ -282,7 +282,7 @@ bool LegacyScriptPubKeyMan::GetReservedDestination(const OutputType type, bool i void LegacyScriptPubKeyMan::MarkUnusedAddresses(const CScript& script) { - AssertLockHeld(cs_wallet); + LOCK(cs_KeyStore); // extract addresses and check if they match with an unused keypool key for (const auto& keyid : GetAffectedKeys(script, *this)) { std::map<CKeyID, int64_t>::const_iterator mi = m_pool_key_to_index.find(keyid); @@ -299,7 +299,7 @@ void LegacyScriptPubKeyMan::MarkUnusedAddresses(const CScript& script) void LegacyScriptPubKeyMan::UpgradeKeyMetadata() { - AssertLockHeld(cs_wallet); + LOCK(cs_KeyStore); if (m_storage.IsLocked() || m_storage.IsWalletFlagSet(WALLET_FLAG_KEY_ORIGIN_METADATA)) { return; } @@ -352,7 +352,7 @@ bool LegacyScriptPubKeyMan::IsHDEnabled() const bool LegacyScriptPubKeyMan::CanGetAddresses(bool internal) { - LOCK(cs_wallet); + LOCK(cs_KeyStore); // Check if the keypool has keys bool keypool_has_keys; if (internal && m_storage.CanSupportFeature(FEATURE_HD_SPLIT)) { @@ -369,7 +369,7 @@ bool LegacyScriptPubKeyMan::CanGetAddresses(bool internal) bool LegacyScriptPubKeyMan::Upgrade(int prev_version, std::string& error) { - AssertLockHeld(cs_wallet); + LOCK(cs_KeyStore); error = ""; bool hd_upgrade = false; bool split_upgrade = false; @@ -383,7 +383,7 @@ bool LegacyScriptPubKeyMan::Upgrade(int prev_version, std::string& error) hd_upgrade = true; } // Upgrade to HD chain split if necessary - if (m_storage.CanSupportFeature(FEATURE_HD_SPLIT)) { + if (m_storage.CanSupportFeature(FEATURE_HD_SPLIT) && CHDChain::VERSION_HD_CHAIN_SPLIT) { WalletLogPrintf("Upgrading wallet to use HD chain split\n"); m_storage.SetMinVersion(FEATURE_PRE_SPLIT_KEYPOOL); split_upgrade = FEATURE_HD_SPLIT > prev_version; @@ -410,7 +410,7 @@ bool LegacyScriptPubKeyMan::HavePrivateKeys() const void LegacyScriptPubKeyMan::RewriteDB() { - AssertLockHeld(cs_wallet); + LOCK(cs_KeyStore); setInternalKeyPool.clear(); setExternalKeyPool.clear(); m_pool_key_to_index.clear(); @@ -435,7 +435,7 @@ static int64_t GetOldestKeyTimeInPool(const std::set<int64_t>& setKeyPool, Walle int64_t LegacyScriptPubKeyMan::GetOldestKeyPoolTime() { - LOCK(cs_wallet); + LOCK(cs_KeyStore); WalletBatch batch(m_storage.GetDatabase()); @@ -453,25 +453,53 @@ int64_t LegacyScriptPubKeyMan::GetOldestKeyPoolTime() size_t LegacyScriptPubKeyMan::KeypoolCountExternalKeys() { - AssertLockHeld(cs_wallet); + LOCK(cs_KeyStore); return setExternalKeyPool.size() + set_pre_split_keypool.size(); } unsigned int LegacyScriptPubKeyMan::GetKeyPoolSize() const { - AssertLockHeld(cs_wallet); + LOCK(cs_KeyStore); return setInternalKeyPool.size() + setExternalKeyPool.size() + set_pre_split_keypool.size(); } int64_t LegacyScriptPubKeyMan::GetTimeFirstKey() const { - AssertLockHeld(cs_wallet); + LOCK(cs_KeyStore); return nTimeFirstKey; } +std::unique_ptr<SigningProvider> LegacyScriptPubKeyMan::GetSigningProvider(const CScript& script) const +{ + return MakeUnique<LegacySigningProvider>(*this); +} + +bool LegacyScriptPubKeyMan::CanProvide(const CScript& script, SignatureData& sigdata) +{ + if (IsMine(script) != ISMINE_NO) { + // If it IsMine, we can always provide in some way + return true; + } else if (HaveCScript(CScriptID(script))) { + // We can still provide some stuff if we have the script, but IsMine failed because we don't have keys + return true; + } else { + // If, given the stuff in sigdata, we could make a valid sigature, then we can provide for this script + ProduceSignature(*this, DUMMY_SIGNATURE_CREATOR, script, sigdata); + if (!sigdata.signatures.empty()) { + // If we could make signatures, make sure we have a private key to actually make a signature + bool has_privkeys = false; + for (const auto& key_sig_pair : sigdata.signatures) { + has_privkeys |= HaveKey(key_sig_pair.first); + } + return has_privkeys; + } + return false; + } +} + const CKeyMetadata* LegacyScriptPubKeyMan::GetMetadata(const CTxDestination& dest) const { - AssertLockHeld(cs_wallet); + LOCK(cs_KeyStore); CKeyID key_id = GetKeyForDestination(*this, dest); if (!key_id.IsNull()) { @@ -490,13 +518,18 @@ const CKeyMetadata* LegacyScriptPubKeyMan::GetMetadata(const CTxDestination& des return nullptr; } +uint256 LegacyScriptPubKeyMan::GetID() const +{ + return UINT256_ONE(); +} + /** * Update wallet first key creation time. This should be called whenever keys * are added to the wallet, with the oldest key creation time. */ void LegacyScriptPubKeyMan::UpdateTimeFirstKey(int64_t nCreateTime) { - AssertLockHeld(cs_wallet); + AssertLockHeld(cs_KeyStore); if (nCreateTime <= 1) { // Cannot determine birthday information, so set the wallet birthday to // the beginning of time. @@ -513,13 +546,14 @@ bool LegacyScriptPubKeyMan::LoadKey(const CKey& key, const CPubKey &pubkey) bool LegacyScriptPubKeyMan::AddKeyPubKey(const CKey& secret, const CPubKey &pubkey) { + LOCK(cs_KeyStore); WalletBatch batch(m_storage.GetDatabase()); return LegacyScriptPubKeyMan::AddKeyPubKeyWithDB(batch, secret, pubkey); } bool LegacyScriptPubKeyMan::AddKeyPubKeyWithDB(WalletBatch& batch, const CKey& secret, const CPubKey& pubkey) { - AssertLockHeld(cs_wallet); + AssertLockHeld(cs_KeyStore); // Make sure we aren't adding private keys to private key disabled wallets assert(!m_storage.IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)); @@ -574,14 +608,14 @@ bool LegacyScriptPubKeyMan::LoadCScript(const CScript& redeemScript) void LegacyScriptPubKeyMan::LoadKeyMetadata(const CKeyID& keyID, const CKeyMetadata& meta) { - AssertLockHeld(cs_wallet); + LOCK(cs_KeyStore); UpdateTimeFirstKey(meta.nCreateTime); mapKeyMetadata[keyID] = meta; } void LegacyScriptPubKeyMan::LoadScriptMetadata(const CScriptID& script_id, const CKeyMetadata& meta) { - AssertLockHeld(cs_wallet); + LOCK(cs_KeyStore); UpdateTimeFirstKey(meta.nCreateTime); m_script_metadata[script_id] = meta; } @@ -630,7 +664,7 @@ bool LegacyScriptPubKeyMan::AddCryptedKey(const CPubKey &vchPubKey, if (!AddCryptedKeyInner(vchPubKey, vchCryptedSecret)) return false; { - LOCK(cs_wallet); + LOCK(cs_KeyStore); if (encrypted_batch) return encrypted_batch->WriteCryptedKey(vchPubKey, vchCryptedSecret, @@ -663,7 +697,6 @@ static bool ExtractPubKey(const CScript &dest, CPubKey& pubKeyOut) bool LegacyScriptPubKeyMan::RemoveWatchOnly(const CScript &dest) { - AssertLockHeld(cs_wallet); { LOCK(cs_KeyStore); setWatchOnly.erase(dest); @@ -734,7 +767,7 @@ bool LegacyScriptPubKeyMan::AddWatchOnly(const CScript& dest, int64_t nCreateTim void LegacyScriptPubKeyMan::SetHDChain(const CHDChain& chain, bool memonly) { - LOCK(cs_wallet); + LOCK(cs_KeyStore); if (!memonly && !WalletBatch(m_storage.GetDatabase()).WriteHDChain(chain)) throw std::runtime_error(std::string(__func__) + ": writing chain failed"); @@ -771,7 +804,7 @@ bool LegacyScriptPubKeyMan::GetKeyOrigin(const CKeyID& keyID, KeyOriginInfo& inf { CKeyMetadata meta; { - LOCK(cs_wallet); + LOCK(cs_KeyStore); auto it = mapKeyMetadata.find(keyID); if (it != mapKeyMetadata.end()) { meta = it->second; @@ -821,7 +854,7 @@ CPubKey LegacyScriptPubKeyMan::GenerateNewKey(WalletBatch &batch, bool internal) { assert(!m_storage.IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)); assert(!m_storage.IsWalletFlagSet(WALLET_FLAG_BLANK_WALLET)); - AssertLockHeld(cs_wallet); + AssertLockHeld(cs_KeyStore); bool fCompressed = m_storage.CanSupportFeature(FEATURE_COMPRPUBKEY); // default to compressed public keys if we want 0.6.0 wallets CKey secret; @@ -913,7 +946,7 @@ void LegacyScriptPubKeyMan::DeriveNewChildKey(WalletBatch &batch, CKeyMetadata& void LegacyScriptPubKeyMan::LoadKeyPool(int64_t nIndex, const CKeyPool &keypool) { - AssertLockHeld(cs_wallet); + LOCK(cs_KeyStore); if (keypool.m_pre_split) { set_pre_split_keypool.insert(nIndex); } else if (keypool.fInternal) { @@ -935,7 +968,7 @@ void LegacyScriptPubKeyMan::LoadKeyPool(int64_t nIndex, const CKeyPool &keypool) bool LegacyScriptPubKeyMan::CanGenerateKeys() { // A wallet can generate keys if it has an HD seed (IsHDEnabled) or it is a non-HD wallet (pre FEATURE_HD) - LOCK(cs_wallet); + LOCK(cs_KeyStore); return IsHDEnabled() || !m_storage.CanSupportFeature(FEATURE_HD); } @@ -962,7 +995,7 @@ CPubKey LegacyScriptPubKeyMan::DeriveNewSeed(const CKey& key) metadata.hd_seed_id = seed.GetID(); { - LOCK(cs_wallet); + LOCK(cs_KeyStore); // mem store the metadata mapKeyMetadata[seed.GetID()] = metadata; @@ -977,7 +1010,7 @@ CPubKey LegacyScriptPubKeyMan::DeriveNewSeed(const CKey& key) void LegacyScriptPubKeyMan::SetHDSeed(const CPubKey& seed) { - LOCK(cs_wallet); + LOCK(cs_KeyStore); // store the keyid (hash160) together with // the child index counter in the database // as a hdchain object @@ -1000,7 +1033,7 @@ bool LegacyScriptPubKeyMan::NewKeyPool() return false; } { - LOCK(cs_wallet); + LOCK(cs_KeyStore); WalletBatch batch(m_storage.GetDatabase()); for (const int64_t nIndex : setInternalKeyPool) { @@ -1034,7 +1067,7 @@ bool LegacyScriptPubKeyMan::TopUp(unsigned int kpSize) return false; } { - LOCK(cs_wallet); + LOCK(cs_KeyStore); if (m_storage.IsLocked()) return false; @@ -1076,7 +1109,7 @@ bool LegacyScriptPubKeyMan::TopUp(unsigned int kpSize) void LegacyScriptPubKeyMan::AddKeypoolPubkeyWithDB(const CPubKey& pubkey, const bool internal, WalletBatch& batch) { - LOCK(cs_wallet); + LOCK(cs_KeyStore); assert(m_max_keypool_index < std::numeric_limits<int64_t>::max()); // How in the hell did you use so many keys? int64_t index = ++m_max_keypool_index; if (!batch.WritePool(index, CKeyPool(pubkey, internal))) { @@ -1107,7 +1140,7 @@ void LegacyScriptPubKeyMan::ReturnDestination(int64_t nIndex, bool fInternal, co { // Return to key pool { - LOCK(cs_wallet); + LOCK(cs_KeyStore); if (fInternal) { setInternalKeyPool.insert(nIndex); } else if (!set_pre_split_keypool.empty()) { @@ -1131,7 +1164,7 @@ bool LegacyScriptPubKeyMan::GetKeyFromPool(CPubKey& result, const OutputType typ CKeyPool keypool; { - LOCK(cs_wallet); + LOCK(cs_KeyStore); int64_t nIndex; if (!ReserveKeyFromKeyPool(nIndex, keypool, internal) && !m_storage.IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)) { if (m_storage.IsLocked()) return false; @@ -1150,7 +1183,7 @@ bool LegacyScriptPubKeyMan::ReserveKeyFromKeyPool(int64_t& nIndex, CKeyPool& key nIndex = -1; keypool.vchPubKey = CPubKey(); { - LOCK(cs_wallet); + LOCK(cs_KeyStore); bool fReturningInternal = fRequestedInternal; fReturningInternal &= (IsHDEnabled() && m_storage.CanSupportFeature(FEATURE_HD_SPLIT)) || m_storage.IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS); @@ -1210,7 +1243,7 @@ void LegacyScriptPubKeyMan::LearnAllRelatedScripts(const CPubKey& key) void LegacyScriptPubKeyMan::MarkReserveKeysAsUsed(int64_t keypool_id) { - AssertLockHeld(cs_wallet); + AssertLockHeld(cs_KeyStore); bool internal = setInternalKeyPool.count(keypool_id); if (!internal) assert(setExternalKeyPool.count(keypool_id) || set_pre_split_keypool.count(keypool_id)); std::set<int64_t> *setKeyPool = internal ? &setInternalKeyPool : (set_pre_split_keypool.empty() ? &setExternalKeyPool : &set_pre_split_keypool); @@ -1281,7 +1314,7 @@ bool LegacyScriptPubKeyMan::AddCScriptWithDB(WalletBatch& batch, const CScript& bool LegacyScriptPubKeyMan::AddKeyOriginWithDB(WalletBatch& batch, const CPubKey& pubkey, const KeyOriginInfo& info) { - LOCK(cs_wallet); + LOCK(cs_KeyStore); std::copy(info.fingerprint, info.fingerprint + 4, mapKeyMetadata[pubkey.GetID()].key_origin.fingerprint); mapKeyMetadata[pubkey.GetID()].key_origin.path = info.path; mapKeyMetadata[pubkey.GetID()].has_key_origin = true; @@ -1393,13 +1426,3 @@ std::set<CKeyID> LegacyScriptPubKeyMan::GetKeys() const } return set_address; } - -// Temporary CWallet accessors and aliases. -LegacyScriptPubKeyMan::LegacyScriptPubKeyMan(CWallet& wallet) - : ScriptPubKeyMan(wallet), - m_wallet(wallet), - cs_wallet(wallet.cs_wallet) {} - -void LegacyScriptPubKeyMan::NotifyWatchonlyChanged(bool fHaveWatchOnly) const { return m_wallet.NotifyWatchonlyChanged(fHaveWatchOnly); } -void LegacyScriptPubKeyMan::NotifyCanGetAddressesChanged() const { return m_wallet.NotifyCanGetAddressesChanged(); } -template<typename... Params> void LegacyScriptPubKeyMan::WalletLogPrintf(const std::string& fmt, const Params&... parameters) const { return m_wallet.WalletLogPrintf(fmt, parameters...); } diff --git a/src/wallet/scriptpubkeyman.h b/src/wallet/scriptpubkeyman.h index bef646755c..7b1c023bc9 100644 --- a/src/wallet/scriptpubkeyman.h +++ b/src/wallet/scriptpubkeyman.h @@ -201,8 +201,28 @@ public: virtual int64_t GetTimeFirstKey() const { return 0; } - //! Return address metadata virtual const CKeyMetadata* GetMetadata(const CTxDestination& dest) const { return nullptr; } + + virtual std::unique_ptr<SigningProvider> GetSigningProvider(const CScript& script) const { return nullptr; } + + /** Whether this ScriptPubKeyMan can provide a SigningProvider (via GetSigningProvider) that, combined with + * sigdata, can produce a valid signature. + */ + virtual bool CanProvide(const CScript& script, SignatureData& sigdata) { return false; } + + virtual uint256 GetID() const { return uint256(); } + + /** Prepends the wallet name in logging output to ease debugging in multi-wallet use cases */ + template<typename... Params> + void WalletLogPrintf(std::string fmt, Params... parameters) const { + LogPrintf(("%s " + fmt).c_str(), m_storage.GetDisplayName(), parameters...); + }; + + /** Watch-only address added */ + boost::signals2::signal<void (bool fHaveWatchOnly)> NotifyWatchonlyChanged; + + /** Keypool has new keys */ + boost::signals2::signal<void ()> NotifyCanGetAddressesChanged; }; class LegacyScriptPubKeyMan : public ScriptPubKeyMan, public FillableSigningProvider @@ -214,7 +234,7 @@ private: using WatchOnlySet = std::set<CScript>; using WatchKeyMap = std::map<CKeyID, CPubKey>; - WalletBatch *encrypted_batch GUARDED_BY(cs_wallet) = nullptr; + WalletBatch *encrypted_batch GUARDED_BY(cs_KeyStore) = nullptr; using CryptedKeyMap = std::map<CKeyID, std::pair<CPubKey, std::vector<unsigned char>>>; @@ -222,7 +242,7 @@ private: WatchOnlySet setWatchOnly GUARDED_BY(cs_KeyStore); WatchKeyMap mapWatchKeys GUARDED_BY(cs_KeyStore); - int64_t nTimeFirstKey GUARDED_BY(cs_wallet) = 0; + int64_t nTimeFirstKey GUARDED_BY(cs_KeyStore) = 0; bool AddKeyPubKeyInner(const CKey& key, const CPubKey &pubkey); bool AddCryptedKeyInner(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret); @@ -236,14 +256,14 @@ private: * of the other AddWatchOnly which accepts a timestamp and sets * nTimeFirstKey more intelligently for more efficient rescans. */ - bool AddWatchOnly(const CScript& dest) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); - bool AddWatchOnlyWithDB(WalletBatch &batch, const CScript& dest) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); + bool AddWatchOnly(const CScript& dest) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore); + bool AddWatchOnlyWithDB(WalletBatch &batch, const CScript& dest) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore); bool AddWatchOnlyInMem(const CScript &dest); //! Adds a watch-only address to the store, and saves it to disk. - bool AddWatchOnlyWithDB(WalletBatch &batch, const CScript& dest, int64_t create_time) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); + bool AddWatchOnlyWithDB(WalletBatch &batch, const CScript& dest, int64_t create_time) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore); //! Adds a key to the store, and saves it to disk. - bool AddKeyPubKeyWithDB(WalletBatch &batch,const CKey& key, const CPubKey &pubkey) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); + bool AddKeyPubKeyWithDB(WalletBatch &batch,const CKey& key, const CPubKey &pubkey) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore); void AddKeypoolPubkeyWithDB(const CPubKey& pubkey, const bool internal, WalletBatch& batch); @@ -257,12 +277,12 @@ private: CHDChain hdChain; /* HD derive new child key (on internal or external chain) */ - void DeriveNewChildKey(WalletBatch& batch, CKeyMetadata& metadata, CKey& secret, bool internal = false) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); + void DeriveNewChildKey(WalletBatch& batch, CKeyMetadata& metadata, CKey& secret, bool internal = false) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore); - std::set<int64_t> setInternalKeyPool GUARDED_BY(cs_wallet); - std::set<int64_t> setExternalKeyPool GUARDED_BY(cs_wallet); - std::set<int64_t> set_pre_split_keypool GUARDED_BY(cs_wallet); - int64_t m_max_keypool_index GUARDED_BY(cs_wallet) = 0; + std::set<int64_t> setInternalKeyPool GUARDED_BY(cs_KeyStore); + std::set<int64_t> setExternalKeyPool GUARDED_BY(cs_KeyStore); + std::set<int64_t> set_pre_split_keypool GUARDED_BY(cs_KeyStore); + int64_t m_max_keypool_index GUARDED_BY(cs_KeyStore) = 0; std::map<CKeyID, int64_t> m_pool_key_to_index; // Tracks keypool indexes to CKeyIDs of keys that have been taken out of the keypool but may be returned to it std::map<int64_t, CKeyID> m_index_to_reserved_key; @@ -287,6 +307,8 @@ private: bool ReserveKeyFromKeyPool(int64_t& nIndex, CKeyPool& keypool, bool fRequestedInternal); public: + using ScriptPubKeyMan::ScriptPubKeyMan; + bool GetNewDestination(const OutputType type, CTxDestination& dest, std::string& error) override; isminetype IsMine(const CScript& script) const override; @@ -302,7 +324,7 @@ public: void MarkUnusedAddresses(const CScript& script) override; //! Upgrade stored CKeyMetadata objects to store key origin info as KeyOriginInfo - void UpgradeKeyMetadata() EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); + void UpgradeKeyMetadata(); bool IsHDEnabled() const override; @@ -315,7 +337,7 @@ public: void RewriteDB() override; int64_t GetOldestKeyPoolTime() override; - size_t KeypoolCountExternalKeys() override EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); + size_t KeypoolCountExternalKeys() override; unsigned int GetKeyPoolSize() const override; int64_t GetTimeFirstKey() const override; @@ -324,28 +346,34 @@ public: bool CanGetAddresses(bool internal = false) override; + std::unique_ptr<SigningProvider> GetSigningProvider(const CScript& script) const override; + + bool CanProvide(const CScript& script, SignatureData& sigdata) override; + + uint256 GetID() const override; + // Map from Key ID to key metadata. - std::map<CKeyID, CKeyMetadata> mapKeyMetadata GUARDED_BY(cs_wallet); + std::map<CKeyID, CKeyMetadata> mapKeyMetadata GUARDED_BY(cs_KeyStore); // Map from Script ID to key metadata (for watch-only keys). - std::map<CScriptID, CKeyMetadata> m_script_metadata GUARDED_BY(cs_wallet); + std::map<CScriptID, CKeyMetadata> m_script_metadata GUARDED_BY(cs_KeyStore); //! Adds a key to the store, and saves it to disk. - bool AddKeyPubKey(const CKey& key, const CPubKey &pubkey) override EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); + bool AddKeyPubKey(const CKey& key, const CPubKey &pubkey) override; //! Adds a key to the store, without saving it to disk (used by LoadWallet) bool LoadKey(const CKey& key, const CPubKey &pubkey); //! Adds an encrypted key to the store, and saves it to disk. bool AddCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret); //! Adds an encrypted key to the store, without saving it to disk (used by LoadWallet) bool LoadCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret); - void UpdateTimeFirstKey(int64_t nCreateTime) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); + void UpdateTimeFirstKey(int64_t nCreateTime) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore); //! Adds a CScript to the store bool LoadCScript(const CScript& redeemScript); //! Load metadata (used by LoadWallet) - void LoadKeyMetadata(const CKeyID& keyID, const CKeyMetadata &metadata) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); - void LoadScriptMetadata(const CScriptID& script_id, const CKeyMetadata &metadata) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); + void LoadKeyMetadata(const CKeyID& keyID, const CKeyMetadata &metadata); + void LoadScriptMetadata(const CScriptID& script_id, const CKeyMetadata &metadata); //! Generate a new key - CPubKey GenerateNewKey(WalletBatch& batch, bool internal = false) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); + CPubKey GenerateNewKey(WalletBatch& batch, bool internal = false) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore); /* Set the HD chain model (chain child index counters) */ void SetHDChain(const CHDChain& chain, bool memonly); @@ -358,8 +386,8 @@ public: //! Returns whether there are any watch-only things in the wallet bool HaveWatchOnly() const; //! Remove a watch only script from the keystore - bool RemoveWatchOnly(const CScript &dest) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); - bool AddWatchOnly(const CScript& dest, int64_t nCreateTime) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); + bool RemoveWatchOnly(const CScript &dest); + bool AddWatchOnly(const CScript& dest, int64_t nCreateTime) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore); //! Fetches a pubkey from mapWatchKeys if it exists there bool GetWatchPubKey(const CKeyID &address, CPubKey &pubkey_out) const; @@ -372,14 +400,14 @@ public: bool GetKeyOrigin(const CKeyID& keyid, KeyOriginInfo& info) const override; //! Load a keypool entry - void LoadKeyPool(int64_t nIndex, const CKeyPool &keypool) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); + void LoadKeyPool(int64_t nIndex, const CKeyPool &keypool); bool NewKeyPool(); - void MarkPreSplitKeys() EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); + void MarkPreSplitKeys() EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore); - bool ImportScripts(const std::set<CScript> scripts, int64_t timestamp) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); - bool ImportPrivKeys(const std::map<CKeyID, CKey>& privkey_map, const int64_t timestamp) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); - bool ImportPubKeys(const std::vector<CKeyID>& ordered_pubkeys, const std::map<CKeyID, CPubKey>& pubkey_map, const std::map<CKeyID, std::pair<CPubKey, KeyOriginInfo>>& key_origins, const bool add_keypool, const bool internal, const int64_t timestamp) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); - bool ImportScriptPubKeys(const std::set<CScript>& script_pub_keys, const bool have_solving_data, const int64_t timestamp) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); + bool ImportScripts(const std::set<CScript> scripts, int64_t timestamp) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore); + bool ImportPrivKeys(const std::map<CKeyID, CKey>& privkey_map, const int64_t timestamp) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore); + bool ImportPubKeys(const std::vector<CKeyID>& ordered_pubkeys, const std::map<CKeyID, CPubKey>& pubkey_map, const std::map<CKeyID, std::pair<CPubKey, KeyOriginInfo>>& key_origins, const bool add_keypool, const bool internal, const int64_t timestamp) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore); + bool ImportScriptPubKeys(const std::set<CScript>& script_pub_keys, const bool have_solving_data, const int64_t timestamp) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore); /* Returns true if the wallet can generate new keys */ bool CanGenerateKeys(); @@ -413,19 +441,26 @@ public: /** * Marks all keys in the keypool up to and including reserve_key as used. */ - void MarkReserveKeysAsUsed(int64_t keypool_id) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); + void MarkReserveKeysAsUsed(int64_t keypool_id) EXCLUSIVE_LOCKS_REQUIRED(cs_KeyStore); const std::map<CKeyID, int64_t>& GetAllReserveKeys() const { return m_pool_key_to_index; } std::set<CKeyID> GetKeys() const override; - // Temporary CWallet accessors and aliases. - friend class CWallet; - friend class ReserveDestination; - LegacyScriptPubKeyMan(CWallet& wallet); - void NotifyWatchonlyChanged(bool fHaveWatchOnly) const; - void NotifyCanGetAddressesChanged() const; - template<typename... Params> void WalletLogPrintf(const std::string& fmt, const Params&... parameters) const; - CWallet& m_wallet; - RecursiveMutex& cs_wallet; +}; + +/** Wraps a LegacyScriptPubKeyMan so that it can be returned in a new unique_ptr */ +class LegacySigningProvider : public SigningProvider +{ +private: + const LegacyScriptPubKeyMan& m_spk_man; +public: + LegacySigningProvider(const LegacyScriptPubKeyMan& spk_man) : m_spk_man(spk_man) {} + + bool GetCScript(const CScriptID &scriptid, CScript& script) const override { return m_spk_man.GetCScript(scriptid, script); } + bool HaveCScript(const CScriptID &scriptid) const override { return m_spk_man.HaveCScript(scriptid); } + bool GetPubKey(const CKeyID &address, CPubKey& pubkey) const override { return m_spk_man.GetPubKey(address, pubkey); } + bool GetKey(const CKeyID &address, CKey& key) const override { return m_spk_man.GetKey(address, key); } + bool HaveKey(const CKeyID &address) const override { return m_spk_man.HaveKey(address); } + bool GetKeyOrigin(const CKeyID& keyid, KeyOriginInfo& info) const override { return m_spk_man.GetKeyOrigin(keyid, info); } }; #endif // BITCOIN_WALLET_SCRIPTPUBKEYMAN_H diff --git a/src/wallet/test/coinselector_tests.cpp b/src/wallet/test/coinselector_tests.cpp index 0e0f06c64c..d65a0e9075 100644 --- a/src/wallet/test/coinselector_tests.cpp +++ b/src/wallet/test/coinselector_tests.cpp @@ -136,6 +136,7 @@ BOOST_AUTO_TEST_CASE(bnb_search_test) { LOCK(testWallet.cs_wallet); + testWallet.SetupLegacyScriptPubKeyMan(); // Setup std::vector<CInputCoin> utxo_pool; @@ -278,6 +279,7 @@ BOOST_AUTO_TEST_CASE(bnb_search_test) std::unique_ptr<CWallet> wallet = MakeUnique<CWallet>(m_chain.get(), WalletLocation(), WalletDatabase::CreateMock()); bool firstRun; wallet->LoadWallet(firstRun); + wallet->SetupLegacyScriptPubKeyMan(); LOCK(wallet->cs_wallet); add_coin(*wallet, 5 * CENT, 6 * 24, false, 0, true); add_coin(*wallet, 3 * CENT, 6 * 24, false, 0, true); @@ -299,6 +301,7 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test) bool bnb_used; LOCK(testWallet.cs_wallet); + testWallet.SetupLegacyScriptPubKeyMan(); // test multiple times to allow for differences in the shuffle order for (int i = 0; i < RUN_TESTS; i++) @@ -578,6 +581,7 @@ BOOST_AUTO_TEST_CASE(ApproximateBestSubset) bool bnb_used; LOCK(testWallet.cs_wallet); + testWallet.SetupLegacyScriptPubKeyMan(); empty_wallet(); @@ -596,6 +600,8 @@ BOOST_AUTO_TEST_CASE(ApproximateBestSubset) // Tests that with the ideal conditions, the coin selector will always be able to find a solution that can pay the target value BOOST_AUTO_TEST_CASE(SelectCoins_test) { + testWallet.SetupLegacyScriptPubKeyMan(); + // Random generator stuff std::default_random_engine generator; std::exponential_distribution<double> distribution (100); diff --git a/src/wallet/test/ismine_tests.cpp b/src/wallet/test/ismine_tests.cpp index 76c3639d16..4c0e4dc653 100644 --- a/src/wallet/test/ismine_tests.cpp +++ b/src/wallet/test/ismine_tests.cpp @@ -36,7 +36,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard) // P2PK compressed { CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); - LOCK(keystore.cs_wallet); + keystore.SetupLegacyScriptPubKeyMan(); + LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore); scriptPubKey = GetScriptForRawPubKey(pubkeys[0]); // Keystore does not have key @@ -52,7 +53,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard) // P2PK uncompressed { CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); - LOCK(keystore.cs_wallet); + keystore.SetupLegacyScriptPubKeyMan(); + LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore); scriptPubKey = GetScriptForRawPubKey(uncompressedPubkey); // Keystore does not have key @@ -68,7 +70,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard) // P2PKH compressed { CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); - LOCK(keystore.cs_wallet); + keystore.SetupLegacyScriptPubKeyMan(); + LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore); scriptPubKey = GetScriptForDestination(PKHash(pubkeys[0])); // Keystore does not have key @@ -84,7 +87,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard) // P2PKH uncompressed { CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); - LOCK(keystore.cs_wallet); + keystore.SetupLegacyScriptPubKeyMan(); + LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore); scriptPubKey = GetScriptForDestination(PKHash(uncompressedPubkey)); // Keystore does not have key @@ -100,7 +104,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard) // P2SH { CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); - LOCK(keystore.cs_wallet); + keystore.SetupLegacyScriptPubKeyMan(); + LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore); CScript redeemScript = GetScriptForDestination(PKHash(pubkeys[0])); scriptPubKey = GetScriptForDestination(ScriptHash(redeemScript)); @@ -123,7 +128,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard) // (P2PKH inside) P2SH inside P2SH (invalid) { CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); - LOCK(keystore.cs_wallet); + keystore.SetupLegacyScriptPubKeyMan(); + LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore); CScript redeemscript_inner = GetScriptForDestination(PKHash(pubkeys[0])); CScript redeemscript = GetScriptForDestination(ScriptHash(redeemscript_inner)); @@ -140,7 +146,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard) // (P2PKH inside) P2SH inside P2WSH (invalid) { CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); - LOCK(keystore.cs_wallet); + keystore.SetupLegacyScriptPubKeyMan(); + LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore); CScript redeemscript = GetScriptForDestination(PKHash(pubkeys[0])); CScript witnessscript = GetScriptForDestination(ScriptHash(redeemscript)); @@ -157,7 +164,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard) // P2WPKH inside P2WSH (invalid) { CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); - LOCK(keystore.cs_wallet); + keystore.SetupLegacyScriptPubKeyMan(); + LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore); CScript witnessscript = GetScriptForDestination(WitnessV0KeyHash(PKHash(pubkeys[0]))); scriptPubKey = GetScriptForDestination(WitnessV0ScriptHash(witnessscript)); @@ -172,7 +180,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard) // (P2PKH inside) P2WSH inside P2WSH (invalid) { CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); - LOCK(keystore.cs_wallet); + keystore.SetupLegacyScriptPubKeyMan(); + LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore); CScript witnessscript_inner = GetScriptForDestination(PKHash(pubkeys[0])); CScript witnessscript = GetScriptForDestination(WitnessV0ScriptHash(witnessscript_inner)); @@ -189,7 +198,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard) // P2WPKH compressed { CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); - LOCK(keystore.cs_wallet); + keystore.SetupLegacyScriptPubKeyMan(); + LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore); BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[0])); scriptPubKey = GetScriptForDestination(WitnessV0KeyHash(PKHash(pubkeys[0]))); @@ -203,7 +213,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard) // P2WPKH uncompressed { CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); - LOCK(keystore.cs_wallet); + keystore.SetupLegacyScriptPubKeyMan(); + LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore); BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(uncompressedKey)); scriptPubKey = GetScriptForDestination(WitnessV0KeyHash(PKHash(uncompressedPubkey))); @@ -221,7 +232,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard) // scriptPubKey multisig { CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); - LOCK(keystore.cs_wallet); + keystore.SetupLegacyScriptPubKeyMan(); + LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore); scriptPubKey = GetScriptForMultisig(2, {uncompressedPubkey, pubkeys[1]}); @@ -251,7 +263,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard) // P2SH multisig { CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); - LOCK(keystore.cs_wallet); + keystore.SetupLegacyScriptPubKeyMan(); + LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore); BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(uncompressedKey)); BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[1])); @@ -271,7 +284,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard) // P2WSH multisig with compressed keys { CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); - LOCK(keystore.cs_wallet); + keystore.SetupLegacyScriptPubKeyMan(); + LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore); BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[0])); BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[1])); @@ -296,7 +310,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard) // P2WSH multisig with uncompressed key { CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); - LOCK(keystore.cs_wallet); + keystore.SetupLegacyScriptPubKeyMan(); + LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore); BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(uncompressedKey)); BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[1])); @@ -321,7 +336,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard) // P2WSH multisig wrapped in P2SH { CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); - LOCK(keystore.cs_wallet); + keystore.SetupLegacyScriptPubKeyMan(); + LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore); CScript witnessScript = GetScriptForMultisig(2, {pubkeys[0], pubkeys[1]}); CScript redeemScript = GetScriptForDestination(WitnessV0ScriptHash(witnessScript)); @@ -347,7 +363,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard) // OP_RETURN { CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); - LOCK(keystore.cs_wallet); + keystore.SetupLegacyScriptPubKeyMan(); + LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore); BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[0])); scriptPubKey.clear(); @@ -360,7 +377,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard) // witness unspendable { CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); - LOCK(keystore.cs_wallet); + keystore.SetupLegacyScriptPubKeyMan(); + LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore); BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[0])); scriptPubKey.clear(); @@ -373,7 +391,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard) // witness unknown { CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); - LOCK(keystore.cs_wallet); + keystore.SetupLegacyScriptPubKeyMan(); + LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore); BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[0])); scriptPubKey.clear(); @@ -386,7 +405,8 @@ BOOST_AUTO_TEST_CASE(ismine_standard) // Nonstandard { CWallet keystore(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); - LOCK(keystore.cs_wallet); + keystore.SetupLegacyScriptPubKeyMan(); + LOCK(keystore.GetLegacyScriptPubKeyMan()->cs_KeyStore); BOOST_CHECK(keystore.GetLegacyScriptPubKeyMan()->AddKey(keys[0])); scriptPubKey.clear(); diff --git a/src/wallet/test/psbt_wallet_tests.cpp b/src/wallet/test/psbt_wallet_tests.cpp index 5368842ff5..f923de6178 100644 --- a/src/wallet/test/psbt_wallet_tests.cpp +++ b/src/wallet/test/psbt_wallet_tests.cpp @@ -16,8 +16,8 @@ BOOST_FIXTURE_TEST_SUITE(psbt_wallet_tests, WalletTestingSetup) BOOST_AUTO_TEST_CASE(psbt_updater_test) { - auto spk_man = m_wallet.GetLegacyScriptPubKeyMan(); - LOCK(m_wallet.cs_wallet); + auto spk_man = m_wallet.GetOrCreateLegacyScriptPubKeyMan(); + LOCK2(m_wallet.cs_wallet, spk_man->cs_KeyStore); // Create prevtxs and add to wallet CDataStream s_prev_tx1(ParseHex("0200000000010158e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd7501000000171600145f275f436b09a8cc9a2eb2a2f528485c68a56323feffffff02d8231f1b0100000017a914aed962d6654f9a2b36608eb9d64d2b260db4f1118700c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e88702483045022100a22edcc6e5bc511af4cc4ae0de0fcd75c7e04d8c1c3a8aa9d820ed4b967384ec02200642963597b9b1bc22c75e9f3e117284a962188bf5e8a74c895089046a20ad770121035509a48eb623e10aace8bfd0212fdb8a8e5af3c94b0b133b95e114cab89e4f7965000000"), SER_NETWORK, PROTOCOL_VERSION); @@ -75,7 +75,7 @@ BOOST_AUTO_TEST_CASE(psbt_updater_test) // Try to sign the mutated input SignatureData sigdata; psbtx.inputs[0].FillSignatureData(sigdata); - const SigningProvider* provider = m_wallet.GetSigningProvider(ws1, sigdata); + const std::unique_ptr<SigningProvider> provider = m_wallet.GetSigningProvider(ws1, sigdata); BOOST_CHECK(!SignPSBTInput(*provider, psbtx, 0, SIGHASH_ALL)); } diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp index 2f21b2439b..a487e9e2e0 100644 --- a/src/wallet/test/wallet_tests.cpp +++ b/src/wallet/test/wallet_tests.cpp @@ -28,9 +28,8 @@ BOOST_FIXTURE_TEST_SUITE(wallet_tests, WalletTestingSetup) static void AddKey(CWallet& wallet, const CKey& key) { - auto spk_man = wallet.GetLegacyScriptPubKeyMan(); - LOCK(wallet.cs_wallet); - AssertLockHeld(spk_man->cs_wallet); + auto spk_man = wallet.GetOrCreateLegacyScriptPubKeyMan(); + LOCK2(wallet.cs_wallet, spk_man->cs_KeyStore); spk_man->AddKeyPubKey(key, key.GetPubKey()); } @@ -152,6 +151,7 @@ BOOST_FIXTURE_TEST_CASE(importmulti_rescan, TestChain100Setup) // after. { std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); + wallet->SetupLegacyScriptPubKeyMan(); AddWallet(wallet); UniValue keys; keys.setArray(); @@ -216,9 +216,8 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup) // Import key into wallet and call dumpwallet to create backup file. { std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); - auto spk_man = wallet->GetLegacyScriptPubKeyMan(); - LOCK(wallet->cs_wallet); - AssertLockHeld(spk_man->cs_wallet); + auto spk_man = wallet->GetOrCreateLegacyScriptPubKeyMan(); + LOCK2(wallet->cs_wallet, spk_man->cs_KeyStore); spk_man->mapKeyMetadata[coinbaseKey.GetPubKey().GetID()].nCreateTime = KEY_TIME; spk_man->AddKeyPubKey(coinbaseKey, coinbaseKey.GetPubKey()); @@ -234,6 +233,7 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup) // were scanned, and no prior blocks were scanned. { std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); + wallet->SetupLegacyScriptPubKeyMan(); JSONRPCRequest request; request.params.setArray(); @@ -267,13 +267,12 @@ BOOST_FIXTURE_TEST_CASE(coin_mark_dirty_immature_credit, TestChain100Setup) auto chain = interfaces::MakeChain(node); CWallet wallet(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); - auto spk_man = wallet.GetLegacyScriptPubKeyMan(); + auto spk_man = wallet.GetOrCreateLegacyScriptPubKeyMan(); CWalletTx wtx(&wallet, m_coinbase_txns.back()); auto locked_chain = chain->lock(); LockAssertion lock(::cs_main); - LOCK(wallet.cs_wallet); - AssertLockHeld(spk_man->cs_wallet); + LOCK2(wallet.cs_wallet, spk_man->cs_KeyStore); wallet.SetLastBlockProcessed(::ChainActive().Height(), ::ChainActive().Tip()->GetBlockHash()); CWalletTx::Confirmation confirm(CWalletTx::Status::CONFIRMED, ::ChainActive().Height(), ::ChainActive().Tip()->GetBlockHash(), 0); @@ -283,7 +282,7 @@ BOOST_FIXTURE_TEST_CASE(coin_mark_dirty_immature_credit, TestChain100Setup) // cache the current immature credit amount, which is 0. BOOST_CHECK_EQUAL(wtx.GetImmatureCredit(), 0); - // Invalidate the cached vanue, add the key, and make sure a new immature + // Invalidate the cached value, add the key, and make sure a new immature // credit amount is calculated. wtx.MarkDirty(); BOOST_CHECK(spk_man->AddKeyPubKey(coinbaseKey, coinbaseKey.GetPubKey())); @@ -377,7 +376,7 @@ static void TestWatchOnlyPubKey(LegacyScriptPubKeyMan* spk_man, const CPubKey& a CScript p2pk = GetScriptForRawPubKey(add_pubkey); CKeyID add_address = add_pubkey.GetID(); CPubKey found_pubkey; - LOCK(spk_man->cs_wallet); + LOCK(spk_man->cs_KeyStore); // all Scripts (i.e. also all PubKeys) are added to the general watch-only set BOOST_CHECK(!spk_man->HaveWatchOnly(p2pk)); @@ -394,7 +393,6 @@ static void TestWatchOnlyPubKey(LegacyScriptPubKeyMan* spk_man, const CPubKey& a BOOST_CHECK(found_pubkey == CPubKey()); // passed key is unchanged } - AssertLockHeld(spk_man->cs_wallet); spk_man->RemoveWatchOnly(p2pk); BOOST_CHECK(!spk_man->HaveWatchOnly(p2pk)); @@ -419,7 +417,7 @@ BOOST_AUTO_TEST_CASE(WatchOnlyPubKeys) { CKey key; CPubKey pubkey; - LegacyScriptPubKeyMan* spk_man = m_wallet.GetLegacyScriptPubKeyMan(); + LegacyScriptPubKeyMan* spk_man = m_wallet.GetOrCreateLegacyScriptPubKeyMan(); BOOST_CHECK(!spk_man->HaveWatchOnly()); @@ -581,6 +579,7 @@ BOOST_FIXTURE_TEST_CASE(wallet_disableprivkeys, TestChain100Setup) NodeContext node; auto chain = interfaces::MakeChain(node); std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(chain.get(), WalletLocation(), WalletDatabase::CreateDummy()); + wallet->SetupLegacyScriptPubKeyMan(); wallet->SetMinVersion(FEATURE_LATEST); wallet->SetWalletFlag(WALLET_FLAG_DISABLE_PRIVATE_KEYS); BOOST_CHECK(!wallet->TopUpKeyPool(1000)); diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 724997a36d..6b9f53f7c5 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -13,6 +13,7 @@ #include <interfaces/wallet.h> #include <key.h> #include <key_io.h> +#include <optional.h> #include <policy/fees.h> #include <policy/policy.h> #include <primitives/block.h> @@ -56,6 +57,7 @@ bool AddWallet(const std::shared_ptr<CWallet>& wallet) std::vector<std::shared_ptr<CWallet>>::const_iterator i = std::find(vpwallets.begin(), vpwallets.end(), wallet); if (i != vpwallets.end()) return false; vpwallets.push_back(wallet); + wallet->ConnectScriptPubKeyManNotifiers(); return true; } @@ -219,7 +221,8 @@ WalletCreationStatus CreateWallet(interfaces::Chain& chain, const SecureString& // Set a seed for the wallet { - if (auto spk_man = wallet->m_spk_man.get()) { + LOCK(wallet->cs_wallet); + for (auto spk_man : wallet->GetActiveScriptPubKeyMans()) { if (!spk_man->SetupGeneration()) { error = "Unable to generate initial keys"; return WalletCreationStatus::CREATION_FAILED; @@ -237,7 +240,7 @@ WalletCreationStatus CreateWallet(interfaces::Chain& chain, const SecureString& return WalletCreationStatus::SUCCESS; } -const uint256 CWalletTx::ABANDON_HASH(uint256S("0000000000000000000000000000000000000000000000000000000000000001")); +const uint256 CWalletTx::ABANDON_HASH(UINT256_ONE()); /** @defgroup mapWallet * @@ -264,10 +267,12 @@ void CWallet::UpgradeKeyMetadata() return; } - if (m_spk_man) { - AssertLockHeld(m_spk_man->cs_wallet); - m_spk_man->UpgradeKeyMetadata(); + auto spk_man = GetLegacyScriptPubKeyMan(); + if (!spk_man) { + return; } + + spk_man->UpgradeKeyMetadata(); SetWalletFlag(WALLET_FLAG_KEY_ORIGIN_METADATA); } @@ -548,7 +553,8 @@ bool CWallet::EncryptWallet(const SecureString& strWalletPassphrase) } encrypted_batch->WriteMasterKey(nMasterKeyMaxID, kMasterKey); - if (auto spk_man = m_spk_man.get()) { + for (const auto& spk_man_pair : m_spk_managers) { + auto spk_man = spk_man_pair.second.get(); if (!spk_man->Encrypt(_vMasterKey, encrypted_batch)) { encrypted_batch->TxnAbort(); delete encrypted_batch; @@ -577,7 +583,7 @@ bool CWallet::EncryptWallet(const SecureString& strWalletPassphrase) Unlock(strWalletPassphrase); // if we are using HD, replace the HD seed with a new one - if (auto spk_man = m_spk_man.get()) { + if (auto spk_man = GetLegacyScriptPubKeyMan()) { if (spk_man->IsHDEnabled()) { if (!spk_man->SetupGeneration(true)) { return false; @@ -841,6 +847,14 @@ bool CWallet::AddToWallet(const CWalletTx& wtxIn, bool fFlushOnClose) if (!strCmd.empty()) { boost::replace_all(strCmd, "%s", wtxIn.GetHash().GetHex()); +#ifndef WIN32 + // Substituting the wallet name isn't currently supported on windows + // because windows shell escaping has not been implemented yet: + // https://github.com/bitcoin/bitcoin/pull/13339#issuecomment-537384875 + // A few ways it could be implemented in the future are described in: + // https://github.com/bitcoin/bitcoin/pull/13339#issuecomment-461288094 + boost::replace_all(strCmd, "%w", ShellEscape(GetName())); +#endif std::thread t(runCommand, strCmd); t.detach(); // thread runs free } @@ -922,8 +936,8 @@ bool CWallet::AddToWalletIfInvolvingMe(const CTransactionRef& ptx, CWalletTx::Co // loop though all outputs for (const CTxOut& txout: tx.vout) { - if (auto spk_man = m_spk_man.get()) { - spk_man->MarkUnusedAddresses(txout.scriptPubKey); + for (const auto& spk_man_pair : m_spk_managers) { + spk_man_pair.second->MarkUnusedAddresses(txout.scriptPubKey); } } @@ -1194,8 +1208,8 @@ isminetype CWallet::IsMine(const CTxDestination& dest) const isminetype CWallet::IsMine(const CScript& script) const { isminetype result = ISMINE_NO; - if (auto spk_man = m_spk_man.get()) { - result = spk_man->IsMine(script); + for (const auto& spk_man_pair : m_spk_managers) { + result = std::max(result, spk_man_pair.second->IsMine(script)); } return result; } @@ -1314,16 +1328,18 @@ CAmount CWallet::GetChange(const CTransaction& tx) const bool CWallet::IsHDEnabled() const { bool result = true; - if (auto spk_man = m_spk_man.get()) { - result &= spk_man->IsHDEnabled(); + for (const auto& spk_man_pair : m_spk_managers) { + result &= spk_man_pair.second->IsHDEnabled(); } return result; } bool CWallet::CanGetAddresses(bool internal) { - { - auto spk_man = m_spk_man.get(); + LOCK(cs_wallet); + if (m_spk_managers.empty()) return false; + for (OutputType t : OUTPUT_TYPES) { + auto spk_man = GetScriptPubKeyMan(t, internal); if (spk_man && spk_man->CanGetAddresses(internal)) { return true; } @@ -1392,7 +1408,7 @@ bool CWallet::DummySignInput(CTxIn &tx_in, const CTxOut &txout, bool use_max_sig const CScript& scriptPubKey = txout.scriptPubKey; SignatureData sigdata; - const SigningProvider* provider = GetSigningProvider(scriptPubKey); + std::unique_ptr<SigningProvider> provider = GetSigningProvider(scriptPubKey); if (!provider) { // We don't know about this scriptpbuKey; return false; @@ -1427,7 +1443,7 @@ bool CWallet::ImportScripts(const std::set<CScript> scripts, int64_t timestamp) if (!spk_man) { return false; } - AssertLockHeld(spk_man->cs_wallet); + LOCK(spk_man->cs_KeyStore); return spk_man->ImportScripts(scripts, timestamp); } @@ -1437,7 +1453,7 @@ bool CWallet::ImportPrivKeys(const std::map<CKeyID, CKey>& privkey_map, const in if (!spk_man) { return false; } - AssertLockHeld(spk_man->cs_wallet); + LOCK(spk_man->cs_KeyStore); return spk_man->ImportPrivKeys(privkey_map, timestamp); } @@ -1447,7 +1463,7 @@ bool CWallet::ImportPubKeys(const std::vector<CKeyID>& ordered_pubkeys, const st if (!spk_man) { return false; } - AssertLockHeld(spk_man->cs_wallet); + LOCK(spk_man->cs_KeyStore); return spk_man->ImportPubKeys(ordered_pubkeys, pubkey_map, key_origins, add_keypool, internal, timestamp); } @@ -1457,7 +1473,7 @@ bool CWallet::ImportScriptPubKeys(const std::string& label, const std::set<CScri if (!spk_man) { return false; } - AssertLockHeld(spk_man->cs_wallet); + LOCK(spk_man->cs_KeyStore); if (!spk_man->ImportScriptPubKeys(script_pub_keys, have_solving_data, timestamp)) { return false; } @@ -2156,7 +2172,7 @@ void CWallet::AvailableCoins(interfaces::Chain::Lock& locked_chain, std::vector< continue; } - const SigningProvider* provider = GetSigningProvider(wtx.tx->vout[i].scriptPubKey); + std::unique_ptr<SigningProvider> provider = GetSigningProvider(wtx.tx->vout[i].scriptPubKey); bool solvable = provider ? IsSolvable(*provider, wtx.tx->vout[i].scriptPubKey) : false; bool spendable = ((mine & ISMINE_SPENDABLE) != ISMINE_NO) || (((mine & ISMINE_WATCH_ONLY) != ISMINE_NO) && (coinControl && coinControl->fAllowWatchOnly && solvable)); @@ -2410,7 +2426,7 @@ bool CWallet::SignTransaction(CMutableTransaction& tx) const CAmount& amount = mi->second.tx->vout[input.prevout.n].nValue; SignatureData sigdata; - const SigningProvider* provider = GetSigningProvider(scriptPubKey); + std::unique_ptr<SigningProvider> provider = GetSigningProvider(scriptPubKey); if (!provider) { // We don't know about this scriptpbuKey; return false; @@ -2879,7 +2895,7 @@ bool CWallet::CreateTransaction(interfaces::Chain::Lock& locked_chain, const std const CScript& scriptPubKey = coin.txout.scriptPubKey; SignatureData sigdata; - const SigningProvider* provider = GetSigningProvider(scriptPubKey); + std::unique_ptr<SigningProvider> provider = GetSigningProvider(scriptPubKey); if (!provider || !ProduceSignature(*provider, MutableTransactionSignatureCreator(&txNew, nIn, coin.txout.nValue, SIGHASH_ALL), scriptPubKey, sigdata)) { strFailReason = _("Signing transaction failed").translated; @@ -2986,17 +3002,17 @@ DBErrors CWallet::LoadWallet(bool& fFirstRunRet) { if (database->Rewrite("\x04pool")) { - if (auto spk_man = m_spk_man.get()) { - spk_man->RewriteDB(); + for (const auto& spk_man_pair : m_spk_managers) { + spk_man_pair.second->RewriteDB(); } } } - { - LOCK(cs_KeyStore); - // This wallet is in its first run if all of these are empty - fFirstRunRet = mapKeys.empty() && mapCryptedKeys.empty() && mapWatchKeys.empty() && setWatchOnly.empty() && mapScripts.empty() - && !IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS) && !IsWalletFlagSet(WALLET_FLAG_BLANK_WALLET); + // This wallet is in its first run if there are no ScriptPubKeyMans and it isn't blank or no privkeys + fFirstRunRet = m_spk_managers.empty() && !IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS) && !IsWalletFlagSet(WALLET_FLAG_BLANK_WALLET); + if (fFirstRunRet) { + assert(m_external_spk_managers.empty()); + assert(m_internal_spk_managers.empty()); } if (nLoadWalletRet != DBErrors::LOAD_OK) @@ -3020,8 +3036,8 @@ DBErrors CWallet::ZapSelectTx(std::vector<uint256>& vHashIn, std::vector<uint256 { if (database->Rewrite("\x04pool")) { - if (auto spk_man = m_spk_man.get()) { - spk_man->RewriteDB(); + for (const auto& spk_man_pair : m_spk_managers) { + spk_man_pair.second->RewriteDB(); } } } @@ -3041,8 +3057,8 @@ DBErrors CWallet::ZapWalletTx(std::vector<CWalletTx>& vWtx) { if (database->Rewrite("\x04pool")) { - if (auto spk_man = m_spk_man.get()) { - spk_man->RewriteDB(); + for (const auto& spk_man_pair : m_spk_managers) { + spk_man_pair.second->RewriteDB(); } } } @@ -3102,8 +3118,7 @@ size_t CWallet::KeypoolCountExternalKeys() AssertLockHeld(cs_wallet); unsigned int count = 0; - if (auto spk_man = m_spk_man.get()) { - AssertLockHeld(spk_man->cs_wallet); + for (auto spk_man : GetActiveScriptPubKeyMans()) { count += spk_man->KeypoolCountExternalKeys(); } @@ -3115,7 +3130,7 @@ unsigned int CWallet::GetKeyPoolSize() const AssertLockHeld(cs_wallet); unsigned int count = 0; - if (auto spk_man = m_spk_man.get()) { + for (auto spk_man : GetActiveScriptPubKeyMans()) { count += spk_man->GetKeyPoolSize(); } return count; @@ -3123,8 +3138,9 @@ unsigned int CWallet::GetKeyPoolSize() const bool CWallet::TopUpKeyPool(unsigned int kpSize) { + LOCK(cs_wallet); bool res = true; - if (auto spk_man = m_spk_man.get()) { + for (auto spk_man : GetActiveScriptPubKeyMans()) { res &= spk_man->TopUp(kpSize); } return res; @@ -3135,7 +3151,7 @@ bool CWallet::GetNewDestination(const OutputType type, const std::string label, LOCK(cs_wallet); error.clear(); bool result = false; - auto spk_man = m_spk_man.get(); + auto spk_man = GetScriptPubKeyMan(type, false /* internal */); if (spk_man) { spk_man->TopUp(); result = spk_man->GetNewDestination(type, dest, error); @@ -3149,6 +3165,7 @@ bool CWallet::GetNewDestination(const OutputType type, const std::string label, bool CWallet::GetNewChangeDestination(const OutputType type, CTxDestination& dest, std::string& error) { + LOCK(cs_wallet); error.clear(); ReserveDestination reservedest(this, type); @@ -3163,9 +3180,10 @@ bool CWallet::GetNewChangeDestination(const OutputType type, CTxDestination& des int64_t CWallet::GetOldestKeyPoolTime() { + LOCK(cs_wallet); int64_t oldestKey = std::numeric_limits<int64_t>::max(); - if (auto spk_man = m_spk_man.get()) { - oldestKey = spk_man->GetOldestKeyPoolTime(); + for (const auto& spk_man_pair : m_spk_managers) { + oldestKey = std::min(oldestKey, spk_man_pair.second->GetOldestKeyPoolTime()); } return oldestKey; } @@ -3334,7 +3352,7 @@ std::set<CTxDestination> CWallet::GetLabelAddresses(const std::string& label) co bool ReserveDestination::GetReservedDestination(CTxDestination& dest, bool internal) { - m_spk_man = pwallet->GetLegacyScriptPubKeyMan(); + m_spk_man = pwallet->GetScriptPubKeyMan(type, internal); if (!m_spk_man) { return false; } @@ -3416,7 +3434,7 @@ void CWallet::GetKeyBirthTimes(interfaces::Chain::Lock& locked_chain, std::map<C LegacyScriptPubKeyMan* spk_man = GetLegacyScriptPubKeyMan(); assert(spk_man != nullptr); - AssertLockHeld(spk_man->cs_wallet); + LOCK(spk_man->cs_KeyStore); // get birth times for keys with metadata for (const auto& entry : spk_man->mapKeyMetadata) { @@ -3711,7 +3729,7 @@ std::shared_ptr<CWallet> CWallet::CreateWalletFromFile(interfaces::Chain& chain, return nullptr; } - if (auto spk_man = walletInstance->m_spk_man.get()) { + for (auto spk_man : walletInstance->GetActiveScriptPubKeyMans()) { if (!spk_man->Upgrade(prev_version, error)) { return nullptr; } @@ -3724,8 +3742,13 @@ std::shared_ptr<CWallet> CWallet::CreateWalletFromFile(interfaces::Chain& chain, walletInstance->SetMinVersion(FEATURE_LATEST); walletInstance->SetWalletFlags(wallet_creation_flags, false); + + // Always create LegacyScriptPubKeyMan for now + walletInstance->SetupLegacyScriptPubKeyMan(); + if (!(wallet_creation_flags & (WALLET_FLAG_DISABLE_PRIVATE_KEYS | WALLET_FLAG_BLANK_WALLET))) { - if (auto spk_man = walletInstance->m_spk_man.get()) { + LOCK(walletInstance->cs_wallet); + for (auto spk_man : walletInstance->GetActiveScriptPubKeyMans()) { if (!spk_man->SetupGeneration()) { error = _("Unable to generate initial keys").translated; return nullptr; @@ -3740,9 +3763,10 @@ std::shared_ptr<CWallet> CWallet::CreateWalletFromFile(interfaces::Chain& chain, error = strprintf(_("Error loading %s: Private keys can only be disabled during creation").translated, walletFile); return NULL; } else if (walletInstance->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)) { - if (walletInstance->m_spk_man) { - if (walletInstance->m_spk_man->HavePrivateKeys()) { + for (auto spk_man : walletInstance->GetActiveScriptPubKeyMans()) { + if (spk_man->HavePrivateKeys()) { warnings.push_back(strprintf(_("Warning: Private keys detected in wallet {%s} with disabled private keys").translated, walletFile)); + break; } } } @@ -3895,8 +3919,9 @@ std::shared_ptr<CWallet> CWallet::CreateWalletFromFile(interfaces::Chain& chain, // No need to read and scan block if block was created before // our wallet birthday (as adjusted for block time variability) - Optional<int64_t> time_first_key; - if (auto spk_man = walletInstance->m_spk_man.get()) { + // The way the 'time_first_key' is initialized is just a workaround for the gcc bug #47679 since version 4.6.0. + Optional<int64_t> time_first_key = MakeOptional(false, int64_t());; + for (auto spk_man : walletInstance->GetAllScriptPubKeyMans()) { int64_t time = spk_man->GetTimeFirstKey(); if (!time_first_key || time < *time_first_key) time_first_key = time; } @@ -4064,7 +4089,7 @@ bool CWallet::IsLocked() const if (!IsCrypted()) { return false; } - LOCK(cs_KeyStore); + LOCK(cs_wallet); return vMasterKey.empty(); } @@ -4074,7 +4099,7 @@ bool CWallet::Lock() return false; { - LOCK(cs_KeyStore); + LOCK(cs_wallet); vMasterKey.clear(); } @@ -4085,9 +4110,9 @@ bool CWallet::Lock() bool CWallet::Unlock(const CKeyingMaterial& vMasterKeyIn, bool accept_no_keys) { { - LOCK(cs_KeyStore); - if (m_spk_man) { - if (!m_spk_man->CheckDecryptionKey(vMasterKeyIn, accept_no_keys)) { + LOCK(cs_wallet); + for (const auto& spk_man_pair : m_spk_managers) { + if (!spk_man_pair.second->CheckDecryptionKey(vMasterKeyIn, accept_no_keys)) { return false; } } @@ -4097,24 +4122,102 @@ bool CWallet::Unlock(const CKeyingMaterial& vMasterKeyIn, bool accept_no_keys) return true; } +std::set<ScriptPubKeyMan*> CWallet::GetActiveScriptPubKeyMans() const +{ + std::set<ScriptPubKeyMan*> spk_mans; + for (bool internal : {false, true}) { + for (OutputType t : OUTPUT_TYPES) { + auto spk_man = GetScriptPubKeyMan(t, internal); + if (spk_man) { + spk_mans.insert(spk_man); + } + } + } + return spk_mans; +} + +std::set<ScriptPubKeyMan*> CWallet::GetAllScriptPubKeyMans() const +{ + std::set<ScriptPubKeyMan*> spk_mans; + for (const auto& spk_man_pair : m_spk_managers) { + spk_mans.insert(spk_man_pair.second.get()); + } + return spk_mans; +} + +ScriptPubKeyMan* CWallet::GetScriptPubKeyMan(const OutputType& type, bool internal) const +{ + const std::map<OutputType, ScriptPubKeyMan*>& spk_managers = internal ? m_internal_spk_managers : m_external_spk_managers; + std::map<OutputType, ScriptPubKeyMan*>::const_iterator it = spk_managers.find(type); + if (it == spk_managers.end()) { + WalletLogPrintf("%s scriptPubKey Manager for output type %d does not exist\n", internal ? "Internal" : "External", static_cast<int>(type)); + return nullptr; + } + return it->second; +} + ScriptPubKeyMan* CWallet::GetScriptPubKeyMan(const CScript& script) const { - return m_spk_man.get(); + SignatureData sigdata; + for (const auto& spk_man_pair : m_spk_managers) { + if (spk_man_pair.second->CanProvide(script, sigdata)) { + return spk_man_pair.second.get(); + } + } + return nullptr; } -const SigningProvider* CWallet::GetSigningProvider(const CScript& script) const +ScriptPubKeyMan* CWallet::GetScriptPubKeyMan(const uint256& id) const { - return m_spk_man.get(); + if (m_spk_managers.count(id) > 0) { + return m_spk_managers.at(id).get(); + } + return nullptr; } -const SigningProvider* CWallet::GetSigningProvider(const CScript& script, SignatureData& sigdata) const +std::unique_ptr<SigningProvider> CWallet::GetSigningProvider(const CScript& script) const { - return m_spk_man.get(); + SignatureData sigdata; + return GetSigningProvider(script, sigdata); +} + +std::unique_ptr<SigningProvider> CWallet::GetSigningProvider(const CScript& script, SignatureData& sigdata) const +{ + for (const auto& spk_man_pair : m_spk_managers) { + if (spk_man_pair.second->CanProvide(script, sigdata)) { + return spk_man_pair.second->GetSigningProvider(script); + } + } + return nullptr; } LegacyScriptPubKeyMan* CWallet::GetLegacyScriptPubKeyMan() const { - return m_spk_man.get(); + // Legacy wallets only have one ScriptPubKeyMan which is a LegacyScriptPubKeyMan. + // Everything in m_internal_spk_managers and m_external_spk_managers point to the same legacyScriptPubKeyMan. + auto it = m_internal_spk_managers.find(OutputType::LEGACY); + if (it == m_internal_spk_managers.end()) return nullptr; + return dynamic_cast<LegacyScriptPubKeyMan*>(it->second); +} + +LegacyScriptPubKeyMan* CWallet::GetOrCreateLegacyScriptPubKeyMan() +{ + SetupLegacyScriptPubKeyMan(); + return GetLegacyScriptPubKeyMan(); +} + +void CWallet::SetupLegacyScriptPubKeyMan() +{ + if (!m_internal_spk_managers.empty() || !m_external_spk_managers.empty() || !m_spk_managers.empty()) { + return; + } + + auto spk_manager = std::unique_ptr<ScriptPubKeyMan>(new LegacyScriptPubKeyMan(*this)); + for (const auto& type : OUTPUT_TYPES) { + m_internal_spk_managers[type] = spk_manager.get(); + m_external_spk_managers[type] = spk_manager.get(); + } + m_spk_managers[spk_manager->GetID()] = std::move(spk_manager); } const CKeyingMaterial& CWallet::GetEncryptionKey() const @@ -4126,3 +4229,11 @@ bool CWallet::HasEncryptionKeys() const { return !mapMasterKeys.empty(); } + +void CWallet::ConnectScriptPubKeyManNotifiers() +{ + for (const auto& spk_man : GetActiveScriptPubKeyMans()) { + spk_man->NotifyWatchonlyChanged.connect(NotifyWatchonlyChanged); + spk_man->NotifyCanGetAddressesChanged.connect(NotifyCanGetAddressesChanged); + } +} diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index 44bfa20612..a918bb8833 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -606,7 +606,7 @@ class WalletRescanReserver; //forward declarations for ScanForWalletTransactions class CWallet final : public WalletStorage, private interfaces::Chain::Notifications { private: - CKeyingMaterial vMasterKey GUARDED_BY(cs_KeyStore); + CKeyingMaterial vMasterKey GUARDED_BY(cs_wallet); bool Unlock(const CKeyingMaterial& vMasterKeyIn, bool accept_no_keys = false); @@ -702,6 +702,13 @@ private: */ int m_last_block_processed_height GUARDED_BY(cs_wallet) = -1; + std::map<OutputType, ScriptPubKeyMan*> m_external_spk_managers; + std::map<OutputType, ScriptPubKeyMan*> m_internal_spk_managers; + + // Indexed by a unique identifier produced by each ScriptPubKeyMan using + // ScriptPubKeyMan::GetID. In many cases it will be the hash of an internal structure + std::map<uint256, std::unique_ptr<ScriptPubKeyMan>> m_spk_managers; + public: /* * Main wallet lock. @@ -1132,28 +1139,34 @@ public: LogPrintf(("%s " + fmt).c_str(), GetDisplayName(), parameters...); }; + //! Returns all unique ScriptPubKeyMans in m_internal_spk_managers and m_external_spk_managers + std::set<ScriptPubKeyMan*> GetActiveScriptPubKeyMans() const; + + //! Returns all unique ScriptPubKeyMans + std::set<ScriptPubKeyMan*> GetAllScriptPubKeyMans() const; + + //! Get the ScriptPubKeyMan for the given OutputType and internal/external chain. + ScriptPubKeyMan* GetScriptPubKeyMan(const OutputType& type, bool internal) const; + //! Get the ScriptPubKeyMan for a script ScriptPubKeyMan* GetScriptPubKeyMan(const CScript& script) const; + //! Get the ScriptPubKeyMan by id + ScriptPubKeyMan* GetScriptPubKeyMan(const uint256& id) const; //! Get the SigningProvider for a script - const SigningProvider* GetSigningProvider(const CScript& script) const; - const SigningProvider* GetSigningProvider(const CScript& script, SignatureData& sigdata) const; + std::unique_ptr<SigningProvider> GetSigningProvider(const CScript& script) const; + std::unique_ptr<SigningProvider> GetSigningProvider(const CScript& script, SignatureData& sigdata) const; + //! Get the LegacyScriptPubKeyMan which is used for all types, internal, and external. LegacyScriptPubKeyMan* GetLegacyScriptPubKeyMan() const; + LegacyScriptPubKeyMan* GetOrCreateLegacyScriptPubKeyMan(); + + //! Make a LegacyScriptPubKeyMan and set it for all types, internal, and external. + void SetupLegacyScriptPubKeyMan(); const CKeyingMaterial& GetEncryptionKey() const override; bool HasEncryptionKeys() const override; - // Temporary LegacyScriptPubKeyMan accessors and aliases. - friend class LegacyScriptPubKeyMan; - std::unique_ptr<LegacyScriptPubKeyMan> m_spk_man = MakeUnique<LegacyScriptPubKeyMan>(*this); - RecursiveMutex& cs_KeyStore = m_spk_man->cs_KeyStore; - LegacyScriptPubKeyMan::KeyMap& mapKeys GUARDED_BY(cs_KeyStore) = m_spk_man->mapKeys; - LegacyScriptPubKeyMan::ScriptMap& mapScripts GUARDED_BY(cs_KeyStore) = m_spk_man->mapScripts; - LegacyScriptPubKeyMan::CryptedKeyMap& mapCryptedKeys GUARDED_BY(cs_KeyStore) = m_spk_man->mapCryptedKeys; - LegacyScriptPubKeyMan::WatchOnlySet& setWatchOnly GUARDED_BY(cs_KeyStore) = m_spk_man->setWatchOnly; - LegacyScriptPubKeyMan::WatchKeyMap& mapWatchKeys GUARDED_BY(cs_KeyStore) = m_spk_man->mapWatchKeys; - /** Get last block processed height */ int GetLastBlockHeight() const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet) { @@ -1168,6 +1181,9 @@ public: m_last_block_processed_height = block_height; m_last_block_processed = block_hash; }; + + //! Connect the signals from ScriptPubKeyMans to the signals in CWallet + void ConnectScriptPubKeyManNotifiers(); }; /** diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp index 7d04b04764..a1928f45c4 100644 --- a/src/wallet/walletdb.cpp +++ b/src/wallet/walletdb.cpp @@ -196,7 +196,7 @@ public: static bool ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, - CWalletScanState &wss, std::string& strType, std::string& strErr) EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet, pwallet->GetLegacyScriptPubKeyMan()->cs_wallet) + CWalletScanState &wss, std::string& strType, std::string& strErr) EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet) { try { // Unserialize @@ -251,7 +251,7 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, char fYes; ssValue >> fYes; if (fYes == '1') { - pwallet->GetLegacyScriptPubKeyMan()->LoadWatchOnly(script); + pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadWatchOnly(script); } } else if (strType == DBKeys::KEY) { CPubKey vchPubKey; @@ -303,7 +303,7 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, strErr = "Error reading wallet database: CPrivKey corrupt"; return false; } - if (!pwallet->GetLegacyScriptPubKeyMan()->LoadKey(key, vchPubKey)) + if (!pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadKey(key, vchPubKey)) { strErr = "Error reading wallet database: LegacyScriptPubKeyMan::LoadKey failed"; return false; @@ -334,7 +334,7 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, ssValue >> vchPrivKey; wss.nCKeys++; - if (!pwallet->GetLegacyScriptPubKeyMan()->LoadCryptedKey(vchPubKey, vchPrivKey)) + if (!pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadCryptedKey(vchPubKey, vchPrivKey)) { strErr = "Error reading wallet database: LegacyScriptPubKeyMan::LoadCryptedKey failed"; return false; @@ -346,14 +346,14 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, CKeyMetadata keyMeta; ssValue >> keyMeta; wss.nKeyMeta++; - pwallet->GetLegacyScriptPubKeyMan()->LoadKeyMetadata(vchPubKey.GetID(), keyMeta); + pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadKeyMetadata(vchPubKey.GetID(), keyMeta); } else if (strType == DBKeys::WATCHMETA) { CScript script; ssKey >> script; CKeyMetadata keyMeta; ssValue >> keyMeta; wss.nKeyMeta++; - pwallet->GetLegacyScriptPubKeyMan()->LoadScriptMetadata(CScriptID(script), keyMeta); + pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadScriptMetadata(CScriptID(script), keyMeta); } else if (strType == DBKeys::DEFAULTKEY) { // We don't want or need the default key, but if there is one set, // we want to make sure that it is valid so that we can detect corruption @@ -369,13 +369,13 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, CKeyPool keypool; ssValue >> keypool; - pwallet->GetLegacyScriptPubKeyMan()->LoadKeyPool(nIndex, keypool); + pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadKeyPool(nIndex, keypool); } else if (strType == DBKeys::CSCRIPT) { uint160 hash; ssKey >> hash; CScript script; ssValue >> script; - if (!pwallet->GetLegacyScriptPubKeyMan()->LoadCScript(script)) + if (!pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadCScript(script)) { strErr = "Error reading wallet database: LegacyScriptPubKeyMan::LoadCScript failed"; return false; @@ -391,7 +391,7 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, } else if (strType == DBKeys::HDCHAIN) { CHDChain chain; ssValue >> chain; - pwallet->GetLegacyScriptPubKeyMan()->SetHDChain(chain, true); + pwallet->GetOrCreateLegacyScriptPubKeyMan()->SetHDChain(chain, true); } else if (strType == DBKeys::FLAGS) { uint64_t flags; ssValue >> flags; @@ -434,7 +434,6 @@ DBErrors WalletBatch::LoadWallet(CWallet* pwallet) DBErrors result = DBErrors::LOAD_OK; LOCK(pwallet->cs_wallet); - AssertLockHeld(pwallet->GetLegacyScriptPubKeyMan()->cs_wallet); try { int nMinVersion = 0; if (m_batch.Read(DBKeys::MINVERSION, nMinVersion)) { @@ -516,8 +515,9 @@ DBErrors WalletBatch::LoadWallet(CWallet* pwallet) // nTimeFirstKey is only reliable if all keys have metadata if ((wss.nKeys + wss.nCKeys + wss.nWatchKeys) != wss.nKeyMeta) { - auto spk_man = pwallet->GetLegacyScriptPubKeyMan(); + auto spk_man = pwallet->GetOrCreateLegacyScriptPubKeyMan(); if (spk_man) { + LOCK(spk_man->cs_KeyStore); spk_man->UpdateTimeFirstKey(1); } } @@ -713,7 +713,6 @@ bool WalletBatch::RecoverKeysOnlyFilter(void *callbackData, CDataStream ssKey, C { // Required in LoadKeyMetadata(): LOCK(dummyWallet->cs_wallet); - AssertLockHeld(dummyWallet->GetLegacyScriptPubKeyMan()->cs_wallet); fReadOK = ReadKeyValue(dummyWallet, ssKey, ssValue, dummyWss, strType, strErr); } diff --git a/src/wallet/wallettool.cpp b/src/wallet/wallettool.cpp index dc0cac60bd..fbfdf9dd6b 100644 --- a/src/wallet/wallettool.cpp +++ b/src/wallet/wallettool.cpp @@ -27,6 +27,7 @@ static std::shared_ptr<CWallet> CreateWallet(const std::string& name, const fs:: } // dummy chain interface std::shared_ptr<CWallet> wallet_instance(new CWallet(nullptr /* chain */, WalletLocation(name), WalletDatabase::Create(path)), WalletToolReleaseWallet); + LOCK(wallet_instance->cs_wallet); bool first_run = true; DBErrors load_wallet_ret = wallet_instance->LoadWallet(first_run); if (load_wallet_ret != DBErrors::LOAD_OK) { @@ -37,7 +38,7 @@ static std::shared_ptr<CWallet> CreateWallet(const std::string& name, const fs:: wallet_instance->SetMinVersion(FEATURE_HD_SPLIT); // generate a new HD seed - auto spk_man = wallet_instance->GetLegacyScriptPubKeyMan(); + auto spk_man = wallet_instance->GetOrCreateLegacyScriptPubKeyMan(); CPubKey seed = spk_man->GenerateNewSeed(); spk_man->SetHDSeed(seed); diff --git a/src/zmq/zmqrpc.cpp b/src/zmq/zmqrpc.cpp index 5652877f3c..9c9b27a413 100644 --- a/src/zmq/zmqrpc.cpp +++ b/src/zmq/zmqrpc.cpp @@ -21,9 +21,9 @@ UniValue getzmqnotifications(const JSONRPCRequest& request) RPCResult{ "[\n" " { (json object)\n" - " \"type\": \"pubhashtx\", (string) Type of notification\n" - " \"address\": \"...\", (string) Address of the publisher\n" - " \"hwm\": n (numeric) Outbound message high water mark\n" + " \"type\" : \"pubhashtx\", (string) Type of notification\n" + " \"address\" : \"...\", (string) Address of the publisher\n" + " \"hwm\" : n (numeric) Outbound message high water mark\n" " },\n" " ...\n" "]\n" |