diff options
-rwxr-xr-x | contrib/devtools/clang-format-diff.py | 1 | ||||
-rw-r--r-- | doc/developer-notes.md | 51 | ||||
-rw-r--r-- | src/dbwrapper.cpp | 31 | ||||
-rw-r--r-- | src/rpc/rawtransaction.cpp | 12 | ||||
-rwxr-xr-x | test/functional/feature_fee_estimation.py | 8 | ||||
-rwxr-xr-x | test/functional/feature_pruning.py | 4 |
6 files changed, 90 insertions, 17 deletions
diff --git a/contrib/devtools/clang-format-diff.py b/contrib/devtools/clang-format-diff.py index ca1bd8854f..5402870fba 100755 --- a/contrib/devtools/clang-format-diff.py +++ b/contrib/devtools/clang-format-diff.py @@ -71,7 +71,6 @@ import argparse import difflib import io import re -import string import subprocess import sys diff --git a/doc/developer-notes.md b/doc/developer-notes.md index a55308fceb..0a4ad32a2b 100644 --- a/doc/developer-notes.md +++ b/doc/developer-notes.md @@ -581,7 +581,10 @@ its upstream repository. Current subtrees include: - src/leveldb - - Upstream at https://github.com/google/leveldb ; Maintained by Google, but open important PRs to Core to avoid delay + - Upstream at https://github.com/google/leveldb ; Maintained by Google, but + open important PRs to Core to avoid delay. + - **Note**: Follow the instructions in [Upgrading LevelDB](#upgrading-leveldb) when + merging upstream changes to the leveldb subtree. - src/libsecp256k1 - Upstream at https://github.com/bitcoin-core/secp256k1/ ; actively maintaned by Core contributors. @@ -592,6 +595,52 @@ Current subtrees include: - src/univalue - Upstream at https://github.com/jgarzik/univalue ; report important PRs to Core to avoid delay. +Upgrading LevelDB +--------------------- + +Extra care must be taken when upgrading LevelDB. This section explains issues +you must be aware of. + +### File Descriptor Counts + +In most configurations we use the default LevelDB value for `max_open_files`, +which is 1000 at the time of this writing. If LevelDB actually uses this many +file descriptors it will cause problems with Bitcoin's `select()` loop, because +it may cause new sockets to be created where the fd value is >= 1024. For this +reason, on 64-bit Unix systems we rely on an internal LevelDB optimization that +uses `mmap()` + `close()` to open table files without actually retaining +references to the table file descriptors. If you are upgrading LevelDB, you must +sanity check the changes to make sure that this assumption remains valid. + +In addition to reviewing the upstream changes in `env_posix.cc`, you can use `lsof` to +check this. For example, on Linux this command will show open `.ldb` file counts: + +```bash +$ lsof -p $(pidof bitcoind) |\ + awk 'BEGIN { fd=0; mem=0; } /ldb$/ { if ($4 == "mem") mem++; else fd++ } END { printf "mem = %s, fd = %s\n", mem, fd}' +mem = 119, fd = 0 +``` + +The `mem` value shows how many files are mmap'ed, and the `fd` value shows you +many file descriptors these files are using. You should check that `fd` is a +small number (usually 0 on 64-bit hosts). + +See the notes in the `SetMaxOpenFiles()` function in `dbwrapper.cc` for more +details. + +### Consensus Compatibility + +It is possible for LevelDB changes to inadvertently change consensus +compatibility between nodes. This happened in Bitcoin 0.8 (when LevelDB was +first introduced). When upgrading LevelDB you should review the upstream changes +to check for issues affecting consensus compatibility. + +For example, if LevelDB had a bug that accidentally prevented a key from being +returned in an edge case, and that bug was fixed upstream, the bug "fix" would +be an incompatible consensus change. In this situation the correct behavior +would be to revert the upstream fix before applying the updates to Bitcoin's +copy of LevelDB. In general you should be wary of any upstream changes affecting +what data is returned from LevelDB queries. Git and GitHub tips --------------------- diff --git a/src/dbwrapper.cpp b/src/dbwrapper.cpp index fb0d4215a2..752f985bc0 100644 --- a/src/dbwrapper.cpp +++ b/src/dbwrapper.cpp @@ -71,6 +71,31 @@ public: } }; +static void SetMaxOpenFiles(leveldb::Options *options) { + // On most platforms the default setting of max_open_files (which is 1000) + // is optimal. On Windows using a large file count is OK because the handles + // do not interfere with select() loops. On 64-bit Unix hosts this value is + // also OK, because up to that amount LevelDB will use an mmap + // implementation that does not use extra file descriptors (the fds are + // closed after being mmaped). + // + // Increasing the value beyond the default is dangerous because LevelDB will + // fall back to a non-mmap implementation when the file count is too large. + // On 32-bit Unix host we should decrease the value because the handles use + // up real fds, and we want to avoid fd exhaustion issues. + // + // See PR #12495 for further discussion. + + int default_open_files = options->max_open_files; +#ifndef WIN32 + if (sizeof(void*) < 8) { + options->max_open_files = 64; + } +#endif + LogPrint(BCLog::LEVELDB, "LevelDB using max_open_files=%d (default=%d)\n", + options->max_open_files, default_open_files); +} + static leveldb::Options GetOptions(size_t nCacheSize) { leveldb::Options options; @@ -78,13 +103,13 @@ static leveldb::Options GetOptions(size_t nCacheSize) options.write_buffer_size = nCacheSize / 4; // up to two write buffers may be held in memory simultaneously options.filter_policy = leveldb::NewBloomFilterPolicy(10); options.compression = leveldb::kNoCompression; - options.max_open_files = 64; options.info_log = new CBitcoinLevelDBLogger(); if (leveldb::kMajorVersion > 1 || (leveldb::kMajorVersion == 1 && leveldb::kMinorVersion >= 16)) { // LevelDB versions before 1.16 consider short writes to be corruption. Only trigger error // on corruption in later versions. options.paranoid_checks = true; } + SetMaxOpenFiles(&options); return options; } @@ -159,12 +184,12 @@ bool CDBWrapper::WriteBatch(CDBBatch& batch, bool fSync) const bool log_memory = LogAcceptCategory(BCLog::LEVELDB); double mem_before = 0; if (log_memory) { - mem_before = DynamicMemoryUsage() / 1024 / 1024; + mem_before = DynamicMemoryUsage() / 1024.0 / 1024; } leveldb::Status status = pdb->Write(fSync ? syncoptions : writeoptions, &batch.batch); dbwrapper_private::HandleError(status); if (log_memory) { - double mem_after = DynamicMemoryUsage() / 1024 / 1024; + double mem_after = DynamicMemoryUsage() / 1024.0 / 1024; LogPrint(BCLog::LEVELDB, "WriteBatch memory usage: db=%s, before=%.1fMiB, after=%.1fMiB\n", m_name, mem_before, mem_after); } diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index 20bfd3f355..77040f75fd 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -1023,18 +1023,18 @@ UniValue signrawtransaction(const JSONRPCRequest& request) new_request.params.push_back(request.params[1]); new_request.params.push_back(request.params[3]); return signrawtransactionwithkey(new_request); - } - // Otherwise sign with the wallet which does not take a privkeys parameter + } else { #ifdef ENABLE_WALLET - else { + // Otherwise sign with the wallet which does not take a privkeys parameter new_request.params.push_back(request.params[0]); new_request.params.push_back(request.params[1]); new_request.params.push_back(request.params[3]); return signrawtransactionwithwallet(new_request); - } +#else + // If we have made it this far, then wallet is disabled and no private keys were given, so fail here. + throw JSONRPCError(RPC_INVALID_PARAMETER, "No private keys available."); #endif - // If we have made it this far, then wallet is disabled and no private keys were given, so fail here. - throw JSONRPCError(RPC_INVALID_PARAMETER, "No private keys available."); + } } UniValue sendrawtransaction(const JSONRPCRequest& request) diff --git a/test/functional/feature_fee_estimation.py b/test/functional/feature_fee_estimation.py index 8e97df2361..32a6bd5d59 100755 --- a/test/functional/feature_fee_estimation.py +++ b/test/functional/feature_fee_estimation.py @@ -133,12 +133,12 @@ class EstimateFeeTest(BitcoinTestFramework): which we will use to generate our transactions. """ self.add_nodes(3, extra_args=[["-maxorphantx=1000", "-whitelist=127.0.0.1"], - ["-maxorphantx=1000"], - ["-maxorphantx=1000"]]) + ["-blockmaxweight=68000", "-maxorphantx=1000"], + ["-blockmaxweight=32000", "-maxorphantx=1000"]]) # Use node0 to mine blocks for input splitting # Node1 mines small blocks but that are bigger than the expected transaction rate. - # NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes, - # (17k is room enough for 110 or so transactions) + # NOTE: the CreateNewBlock code starts counting block weight at 4,000 weight, + # (68k weight is room enough for 120 or so transactions) # Node2 is a stingy miner, that # produces too small blocks (room for only 55 or so transactions) diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py index 227ae6cb36..3adde8dd73 100755 --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -124,7 +124,7 @@ class PruneTest(BitcoinTestFramework): # Reboot node 1 to clear its mempool (hopefully make the invalidate faster) # Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks) self.stop_node(1) - self.start_node(1, extra_args=["-maxreceivebuffer=20000","-checkblocks=5", "-disablesafemode"]) + self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxweight=20000", "-checkblocks=5", "-disablesafemode"]) height = self.nodes[1].getblockcount() self.log.info("Current block height: %d" % height) @@ -147,7 +147,7 @@ class PruneTest(BitcoinTestFramework): # Reboot node1 to clear those giant tx's from mempool self.stop_node(1) - self.start_node(1, extra_args=["-maxreceivebuffer=20000","-checkblocks=5", "-disablesafemode"]) + self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxweight=20000", "-checkblocks=5", "-disablesafemode"]) self.log.info("Generating new longer chain of 300 more blocks") self.nodes[1].generate(300) |