diff options
author | fanquake <fanquake@gmail.com> | 2023-10-06 11:18:45 +0100 |
---|---|---|
committer | fanquake <fanquake@gmail.com> | 2023-10-06 11:36:51 +0100 |
commit | 1416d09cbabe80e57b8dad334d9571c2e8f2006e (patch) | |
tree | 8b5d071eb5069c9905abf4e87bbdf2b8b9868b67 | |
parent | 0df8f98d654291c08b9fe6cb11ec1685828f1504 (diff) | |
parent | 9077f214f55386af12419235deaff52b23446856 (diff) |
Merge bitcoin/bitcoin#28535: [24.x] Further backports
9077f214f55386af12419235deaff52b23446856 depends: fix unusable memory_resource in macos qt build (fanquake)
dccacf0bf7d5815036c64d4c040b5702ad890cd8 build, macos: Fix `qt` package build with new Xcode 15 linker (Hennadii Stepanov)
43596499b2c2b8cc0a51c0b9db9153047c266627 ci: Switch to `amd64` container in "ARM" task (Hennadii Stepanov)
805f98b79aa9e5ecda70516578296bd0a065a707 ci: Nuke Android APK task, Use credits for tsan (MarcoFalke)
cb5512da2336c9145a670c287f1abecc372906b9 test: ensure old fee_estimate.dat not read on restart and flushed (ismaelsadeeq)
01f8ee48efc1f46563f2841c0a7125b20b0df159 tx fees, policy: read stale fee estimates with a regtest-only option (ismaelsadeeq)
1c98029b3913a99e7bfe563d8ded2a5074e75fa1 tx fees, policy: do not read estimates of old fee_estimates.dat (ismaelsadeeq)
77979e0172d0bc86bfbc60f6a652e26c77722e29 tx fees, policy: periodically flush fee estimates to fee_estimates.dat (ismaelsadeeq)
67b6d99aead0d1b2030bc3e88256d279477894b5 Do not use std::vector = {} to release memory (Pieter Wuille)
defdc1502372863f700720e8d5cde69190371a64 ci: Use podman stop over podman kill (MarcoFalke)
7f1357de5136bd6f80758f1f31e6dba21acb9954 ci: Use podman for persistent workers (MarcoFalke)
0db69a3d500020e11fd67c55732e0d02eb606204 ci: Prune dangling images on RESTART_CI_DOCKER_BEFORE_RUN (MarcoFalke)
Pull request description:
Backports to the 24.x branch. Currently:
* https://github.com/bitcoin/bitcoin/pull/27622
* https://github.com/bitcoin/bitcoin/pull/27777
* https://github.com/bitcoin/bitcoin/pull/27834
* https://github.com/bitcoin/bitcoin/pull/27844
* https://github.com/bitcoin/bitcoin/pull/27886
* https://github.com/bitcoin/bitcoin/pull/28452
* https://github.com/bitcoin/bitcoin/pull/28543
* https://github.com/bitcoin/bitcoin/pull/28571
ACKs for top commit:
stickies-v:
ACK 9077f214f5
Tree-SHA512: abaafc9a048b67b494993134fd332457ea52695ec007b963c283f962ec40c3b6b3a7e98407481be55d3271a595088a0281cc84b79dad4f24d260381ea0153076
-rw-r--r-- | .cirrus.yml | 26 | ||||
-rwxr-xr-x | ci/test/04_install.sh | 4 | ||||
-rw-r--r-- | depends/packages/qt.mk | 4 | ||||
-rw-r--r-- | depends/patches/qt/fix-macos-linker.patch | 55 | ||||
-rw-r--r-- | depends/patches/qt/memory_resource.patch | 49 | ||||
-rw-r--r-- | src/headerssync.cpp | 5 | ||||
-rw-r--r-- | src/init.cpp | 14 | ||||
-rw-r--r-- | src/policy/fees.cpp | 33 | ||||
-rw-r--r-- | src/policy/fees.h | 22 | ||||
-rw-r--r-- | src/test/fuzz/policy_estimator.cpp | 2 | ||||
-rw-r--r-- | src/test/fuzz/policy_estimator_io.cpp | 2 | ||||
-rw-r--r-- | src/test/util/setup_common.cpp | 2 | ||||
-rw-r--r-- | src/test/util_tests.cpp | 25 | ||||
-rw-r--r-- | src/util/vector.h | 18 | ||||
-rwxr-xr-x | test/functional/feature_fee_estimation.py | 101 |
15 files changed, 329 insertions, 33 deletions
diff --git a/.cirrus.yml b/.cirrus.yml index fdecfc1eb6..9bfcd28e65 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -189,14 +189,12 @@ task: task: name: 'ARM [unit tests, no functional tests] [bullseye]' << : *GLOBAL_TASK_TEMPLATE - arm_container: - image: debian:bullseye - cpu: 2 - memory: 8G + container: + docker_arguments: + CI_IMAGE_NAME_TAG: debian:bullseye + FILE_ENV: "./ci/test/00_setup_env_arm.sh" env: << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV - FILE_ENV: "./ci/test/00_setup_env_arm.sh" - QEMU_USER_CMD: "" # Disable qemu and run the test natively task: name: 'Win64 [unit tests, no gui tests, no boost::process, no functional tests] [jammy]' @@ -322,19 +320,3 @@ task: CI_USE_APT_INSTALL: "no" PACKAGE_MANAGER_INSTALL: "echo" # Nothing to do FILE_ENV: "./ci/test/00_setup_env_mac_native_arm64.sh" - -task: - name: 'ARM64 Android APK [focal]' - << : *BASE_TEMPLATE - android_sdk_cache: - folder: "depends/SDKs/android" - fingerprint_key: "ANDROID_API_LEVEL=28 ANDROID_BUILD_TOOLS_VERSION=28.0.3 ANDROID_NDK_VERSION=23.2.8568313" - depends_sources_cache: - folder: "depends/sources" - fingerprint_script: git rev-list -1 HEAD ./depends - << : *MAIN_TEMPLATE - container: - image: ubuntu:focal - env: - << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV - FILE_ENV: "./ci/test/00_setup_env_android.sh" diff --git a/ci/test/04_install.sh b/ci/test/04_install.sh index a4f1a8a7ff..f23abcd23b 100755 --- a/ci/test/04_install.sh +++ b/ci/test/04_install.sh @@ -31,7 +31,9 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then if [ -n "${RESTART_CI_DOCKER_BEFORE_RUN}" ] ; then echo "Restart docker before run to stop and clear all containers started with --rm" - systemctl restart docker + podman container stop --all # Similar to "systemctl restart docker" + echo "Prune all dangling images" + docker image prune --force fi # shellcheck disable=SC2086 diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk index d9ae918d71..8fb8df6ec8 100644 --- a/depends/packages/qt.mk +++ b/depends/packages/qt.mk @@ -22,6 +22,8 @@ $(package)_patches += rcc_hardcode_timestamp.patch $(package)_patches += duplicate_lcqpafonts.patch $(package)_patches += fast_fixed_dtoa_no_optimize.patch $(package)_patches += guix_cross_lib_path.patch +$(package)_patches += fix-macos-linker.patch +$(package)_patches += memory_resource.patch $(package)_qttranslations_file_name=qttranslations-$($(package)_suffix) $(package)_qttranslations_sha256_hash=c92af4171397a0ed272330b4fa0669790fcac8d050b07c8b8cc565ebeba6735e @@ -231,6 +233,7 @@ endef define $(package)_preprocess_cmds cp $($(package)_patch_dir)/qt.pro qt.pro && \ cp $($(package)_patch_dir)/qttools_src.pro qttools/src/src.pro && \ + patch -p1 -i $($(package)_patch_dir)/fix-macos-linker.patch && \ patch -p1 -i $($(package)_patch_dir)/dont_hardcode_pwd.patch && \ patch -p1 -i $($(package)_patch_dir)/fix_qt_pkgconfig.patch && \ patch -p1 -i $($(package)_patch_dir)/fix_android_jni_static.patch && \ @@ -239,6 +242,7 @@ define $(package)_preprocess_cmds patch -p1 -i $($(package)_patch_dir)/qtbase-moc-ignore-gcc-macro.patch && \ patch -p1 -i $($(package)_patch_dir)/fix_montery_include.patch && \ patch -p1 -i $($(package)_patch_dir)/use_android_ndk23.patch && \ + patch -p1 -i $($(package)_patch_dir)/memory_resource.patch && \ patch -p1 -i $($(package)_patch_dir)/rcc_hardcode_timestamp.patch && \ patch -p1 -i $($(package)_patch_dir)/duplicate_lcqpafonts.patch && \ patch -p1 -i $($(package)_patch_dir)/fast_fixed_dtoa_no_optimize.patch && \ diff --git a/depends/patches/qt/fix-macos-linker.patch b/depends/patches/qt/fix-macos-linker.patch new file mode 100644 index 0000000000..db056de4d9 --- /dev/null +++ b/depends/patches/qt/fix-macos-linker.patch @@ -0,0 +1,55 @@ +qmake: Don't error out if QMAKE_DEFAULT_LIBDIRS is empty on macOS + +The new linker in Xcode 15 doesn't provide any default linker or +framework paths when requested via -v, but still seems to use the +default paths documented in the ld man page. + +We trust that linker will do the right thing, even if we don't +know of its default linker paths. + +We also need to opt out of the default fallback logic to +set the libdirs to /lib and /usr/lib. + +This may result in UnixMakefileGenerator::findLibraries finding +different libraries than expected, if additional paths are +passed with -L, which will then take precedence for qmake, +even if the linker itself will use the library from the +SDK's default paths. This should hopefully not be an issue +in practice, as we don't turn -lFoo into absolute paths in +qmake, so the only risk is that we're picking up the wrong +prl files and adding additional dependencies that the lib +in the SDK doesn't have. + +Upstream commits: + - Qt 5.15.16: Not yet publicly available. + - Qt dev: cdf64b0e47115cc473e1afd1472b4b09e130b2a5 + +For other Qt branches see +https://codereview.qt-project.org/q/I2347b26e2df0828471373b0e15b8c9089274c65d + +--- old/qtbase/mkspecs/features/toolchain.prf ++++ new/qtbase/mkspecs/features/toolchain.prf +@@ -283,9 +283,12 @@ isEmpty($${target_prefix}.INCDIRS) { + } + } + } +- isEmpty(QMAKE_DEFAULT_LIBDIRS)|isEmpty(QMAKE_DEFAULT_INCDIRS): \ ++ isEmpty(QMAKE_DEFAULT_INCDIRS): \ + !integrity: \ +- error("failed to parse default search paths from compiler output") ++ error("failed to parse default include paths from compiler output") ++ isEmpty(QMAKE_DEFAULT_LIBDIRS): \ ++ !integrity:!darwin: \ ++ error("failed to parse default library paths from compiler output") + QMAKE_DEFAULT_LIBDIRS = $$unique(QMAKE_DEFAULT_LIBDIRS) + } else: ghs { + cmd = $$QMAKE_CXX $$QMAKE_CXXFLAGS -$${LITERAL_HASH} -o /tmp/fake_output /tmp/fake_input.cpp +@@ -407,7 +410,7 @@ isEmpty($${target_prefix}.INCDIRS) { + QMAKE_DEFAULT_INCDIRS = $$split(INCLUDE, $$QMAKE_DIRLIST_SEP) + } + +- unix:if(!cross_compile|host_build) { ++ unix:!darwin:if(!cross_compile|host_build) { + isEmpty(QMAKE_DEFAULT_INCDIRS): QMAKE_DEFAULT_INCDIRS = /usr/include /usr/local/include + isEmpty(QMAKE_DEFAULT_LIBDIRS): QMAKE_DEFAULT_LIBDIRS = /lib /usr/lib + } diff --git a/depends/patches/qt/memory_resource.patch b/depends/patches/qt/memory_resource.patch new file mode 100644 index 0000000000..e41d68db30 --- /dev/null +++ b/depends/patches/qt/memory_resource.patch @@ -0,0 +1,49 @@ +Fix unusable memory_resource on macos + +See https://bugreports.qt.io/browse/QTBUG-117484 +and https://bugreports.qt.io/browse/QTBUG-114316 + +--- a/qtbase/src/corelib/tools/qduplicatetracker_p.h ++++ b/qtbase/src/corelib/tools/qduplicatetracker_p.h +@@ -52,7 +52,7 @@ + + #include <qglobal.h> + +-#if QT_HAS_INCLUDE(<memory_resource>) && __cplusplus > 201402L ++#ifdef __cpp_lib_memory_resource + # include <unordered_set> + # include <memory_resource> + #else + +--- a/qtbase/src/corelib/global/qcompilerdetection.h ++++ b/qtbase/src/corelib/global/qcompilerdetection.h +@@ -1041,16 +1041,22 @@ + # endif // !_HAS_CONSTEXPR + # endif // !__GLIBCXX__ && !_LIBCPP_VERSION + # endif // Q_OS_QNX +-# if (defined(Q_CC_CLANG) || defined(Q_CC_INTEL)) && defined(Q_OS_MAC) && defined(__GNUC_LIBSTD__) \ +- && ((__GNUC_LIBSTD__-0) * 100 + __GNUC_LIBSTD_MINOR__-0 <= 402) ++# if (defined(Q_CC_CLANG) || defined(Q_CC_INTEL)) && defined(Q_OS_MAC) ++# if defined(__GNUC_LIBSTD__) && ((__GNUC_LIBSTD__-0) * 100 + __GNUC_LIBSTD_MINOR__-0 <= 402) + // Apple has not updated libstdc++ since 2007, which means it does not have + // <initializer_list> or std::move. Let's disable these features +-# undef Q_COMPILER_INITIALIZER_LISTS +-# undef Q_COMPILER_RVALUE_REFS +-# undef Q_COMPILER_REF_QUALIFIERS ++# undef Q_COMPILER_INITIALIZER_LISTS ++# undef Q_COMPILER_RVALUE_REFS ++# undef Q_COMPILER_REF_QUALIFIERS + // Also disable <atomic>, since it's clearly not there +-# undef Q_COMPILER_ATOMICS +-# endif ++# undef Q_COMPILER_ATOMICS ++# endif ++# if defined(__cpp_lib_memory_resource) \ ++ && ((defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED < 140000) \ ++ || (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED < 170000)) ++# undef __cpp_lib_memory_resource // Only supported on macOS 14 and iOS 17 ++# endif ++# endif // (defined(Q_CC_CLANG) || defined(Q_CC_INTEL)) && defined(Q_OS_MAC) + # if defined(Q_CC_CLANG) && defined(Q_CC_INTEL) && Q_CC_INTEL >= 1500 + // ICC 15.x and 16.0 have their own implementation of std::atomic, which is activated when in Clang mode + // (probably because libc++'s <atomic> on OS X failed to compile), but they're missing some diff --git a/src/headerssync.cpp b/src/headerssync.cpp index 757b942cd9..994f8414d1 100644 --- a/src/headerssync.cpp +++ b/src/headerssync.cpp @@ -7,6 +7,7 @@ #include <pow.h> #include <timedata.h> #include <util/check.h> +#include <util/vector.h> // The two constants below are computed using the simulation script on // https://gist.github.com/sipa/016ae445c132cdf65a2791534dfb7ae1 @@ -51,9 +52,9 @@ HeadersSyncState::HeadersSyncState(NodeId id, const Consensus::Params& consensus void HeadersSyncState::Finalize() { Assume(m_download_state != State::FINAL); - m_header_commitments = {}; + ClearShrink(m_header_commitments); m_last_header_received.SetNull(); - m_redownloaded_headers = {}; + ClearShrink(m_redownloaded_headers); m_redownload_buffer_last_hash.SetNull(); m_redownload_buffer_first_prev_hash.SetNull(); m_process_all_remaining_headers = false; diff --git a/src/init.cpp b/src/init.cpp index 25b40c6c6e..ec1ea071b7 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -76,6 +76,7 @@ #include <util/system.h> #include <util/thread.h> #include <util/threadnames.h> +#include <util/time.h> #include <util/translation.h> #include <validation.h> #include <validationinterface.h> @@ -561,6 +562,7 @@ void SetupServerArgs(ArgsManager& argsman) argsman.AddArg("-acceptnonstdtxn", strprintf("Relay and mine \"non-standard\" transactions (%sdefault: %u)", "testnet/regtest only; ", !testnetChainParams->RequireStandard()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::NODE_RELAY); argsman.AddArg("-incrementalrelayfee=<amt>", strprintf("Fee rate (in %s/kvB) used to define cost of relay, used for mempool limiting and replacement policy. (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_INCREMENTAL_RELAY_FEE)), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::NODE_RELAY); argsman.AddArg("-dustrelayfee=<amt>", strprintf("Fee rate (in %s/kvB) used to define dust, the value of an output such that it will cost more than its value in fees at this fee rate to spend it. (default: %s)", CURRENCY_UNIT, FormatMoney(DUST_RELAY_TX_FEE)), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::NODE_RELAY); + argsman.AddArg("-acceptstalefeeestimates", strprintf("Read fee estimates even if they are stale (%sdefault: %u) fee estimates are considered stale if they are %s hours old", "regtest only; ", DEFAULT_ACCEPT_STALE_FEE_ESTIMATES, Ticks<std::chrono::hours>(MAX_FILE_AGE)), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-bytespersigop", strprintf("Equivalent bytes per sigop in transactions for relay and mining (default: %u)", DEFAULT_BYTES_PER_SIGOP), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); argsman.AddArg("-datacarrier", strprintf("Relay and mine data carrier transactions (default: %u)", DEFAULT_ACCEPT_DATACARRIER), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); argsman.AddArg("-datacarriersize", strprintf("Maximum size of data in data carrier transactions we relay and mine (default: %u)", MAX_OP_RETURN_RELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); @@ -1253,7 +1255,17 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) assert(!node.fee_estimator); // Don't initialize fee estimation with old data if we don't relay transactions, // as they would never get updated. - if (!ignores_incoming_txs) node.fee_estimator = std::make_unique<CBlockPolicyEstimator>(FeeestPath(args)); + if (!ignores_incoming_txs) { + bool read_stale_estimates = args.GetBoolArg("-acceptstalefeeestimates", DEFAULT_ACCEPT_STALE_FEE_ESTIMATES); + if (read_stale_estimates && (chainparams.NetworkIDString() != CBaseChainParams::REGTEST)) { + return InitError(strprintf(_("acceptstalefeeestimates is not supported on %s chain."), chainparams.NetworkIDString())); + } + node.fee_estimator = std::make_unique<CBlockPolicyEstimator>(FeeestPath(args), read_stale_estimates); + + // Flush estimates to disk periodically + CBlockPolicyEstimator* fee_estimator = node.fee_estimator.get(); + node.scheduler->scheduleEvery([fee_estimator] { fee_estimator->FlushFeeEstimates(); }, FEE_FLUSH_INTERVAL); + } // sanitize comments per BIP-0014, format user agent and check total size std::vector<std::string> uacomments; diff --git a/src/policy/fees.cpp b/src/policy/fees.cpp index 2b940be07e..54b63a5aa6 100644 --- a/src/policy/fees.cpp +++ b/src/policy/fees.cpp @@ -24,6 +24,7 @@ #include <algorithm> #include <cassert> +#include <chrono> #include <cmath> #include <cstddef> #include <cstdint> @@ -527,7 +528,7 @@ bool CBlockPolicyEstimator::_removeTx(const uint256& hash, bool inBlock) } } -CBlockPolicyEstimator::CBlockPolicyEstimator(const fs::path& estimation_filepath) +CBlockPolicyEstimator::CBlockPolicyEstimator(const fs::path& estimation_filepath, const bool read_stale_estimates) : m_estimation_filepath{estimation_filepath}, nBestSeenHeight{0}, firstRecordedHeight{0}, historicalFirst{0}, historicalBest{0}, trackedTxs{0}, untrackedTxs{0} { static_assert(MIN_BUCKET_FEERATE > 0, "Min feerate must be nonzero"); @@ -545,9 +546,22 @@ CBlockPolicyEstimator::CBlockPolicyEstimator(const fs::path& estimation_filepath shortStats = std::unique_ptr<TxConfirmStats>(new TxConfirmStats(buckets, bucketMap, SHORT_BLOCK_PERIODS, SHORT_DECAY, SHORT_SCALE)); longStats = std::unique_ptr<TxConfirmStats>(new TxConfirmStats(buckets, bucketMap, LONG_BLOCK_PERIODS, LONG_DECAY, LONG_SCALE)); - // If the fee estimation file is present, read recorded estimations AutoFile est_file{fsbridge::fopen(m_estimation_filepath, "rb")}; - if (est_file.IsNull() || !Read(est_file)) { + + // Whenever the fee estimation file is not present return early + if (est_file.IsNull()) { + LogPrintf("%s is not found. Continue anyway.\n", fs::PathToString(m_estimation_filepath)); + return; + } + + std::chrono::hours file_age = GetFeeEstimatorFileAge(); + // fee estimate file must not be too old to avoid wrong fee estimates. + if (file_age > MAX_FILE_AGE && !read_stale_estimates) { + LogPrintf("Fee estimation file %s too old (age=%lld > %lld hours) and will not be used to avoid serving stale estimates.\n", fs::PathToString(m_estimation_filepath), Ticks<std::chrono::hours>(file_age), Ticks<std::chrono::hours>(MAX_FILE_AGE)); + return; + } + + if (!Read(est_file)) { LogPrintf("Failed to read fee estimates from %s. Continue anyway.\n", fs::PathToString(m_estimation_filepath)); } } @@ -903,10 +917,16 @@ CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, FeeCalculation void CBlockPolicyEstimator::Flush() { FlushUnconfirmed(); + FlushFeeEstimates(); +} +void CBlockPolicyEstimator::FlushFeeEstimates() +{ AutoFile est_file{fsbridge::fopen(m_estimation_filepath, "wb")}; if (est_file.IsNull() || !Write(est_file)) { LogPrintf("Failed to write fee estimates to %s. Continue anyway.\n", fs::PathToString(m_estimation_filepath)); + } else { + LogPrintf("Flushed fee estimates to %s.\n", fs::PathToString(m_estimation_filepath.filename())); } } @@ -1010,6 +1030,13 @@ void CBlockPolicyEstimator::FlushUnconfirmed() { LogPrint(BCLog::ESTIMATEFEE, "Recorded %u unconfirmed txs from mempool in %gs\n", num_entries, (endclear - startclear)*0.000001); } +std::chrono::hours CBlockPolicyEstimator::GetFeeEstimatorFileAge() +{ + auto file_time = std::filesystem::last_write_time(m_estimation_filepath); + auto now = std::filesystem::file_time_type::clock::now(); + return std::chrono::duration_cast<std::chrono::hours>(now - file_time); +} + FeeFilterRounder::FeeFilterRounder(const CFeeRate& minIncrementalFee) { CAmount minFeeLimit = std::max(CAmount(1), minIncrementalFee.GetFeePerK() / 2); diff --git a/src/policy/fees.h b/src/policy/fees.h index e4628bf853..b91e241c96 100644 --- a/src/policy/fees.h +++ b/src/policy/fees.h @@ -14,12 +14,25 @@ #include <uint256.h> #include <array> +#include <chrono> #include <map> #include <memory> #include <set> #include <string> #include <vector> + +// How often to flush fee estimates to fee_estimates.dat. +static constexpr std::chrono::hours FEE_FLUSH_INTERVAL{1}; + +/** fee_estimates.dat that are more than 60 hours (2.5 days) will not be read, + * as the estimates in the file are stale. + */ +static constexpr std::chrono::hours MAX_FILE_AGE{60}; + +// Whether we allow importing a fee_estimates file older than MAX_FILE_AGE. +static constexpr bool DEFAULT_ACCEPT_STALE_FEE_ESTIMATES{false}; + class AutoFile; class CTxMemPoolEntry; class TxConfirmStats; @@ -183,7 +196,7 @@ private: const fs::path m_estimation_filepath; public: /** Create new BlockPolicyEstimator and initialize stats tracking classes with default values */ - CBlockPolicyEstimator(const fs::path& estimation_filepath); + CBlockPolicyEstimator(const fs::path& estimation_filepath, const bool read_stale_estimates); ~CBlockPolicyEstimator(); /** Process all the transactions that have been included in a block */ @@ -239,6 +252,13 @@ public: void Flush() EXCLUSIVE_LOCKS_REQUIRED(!m_cs_fee_estimator); + /** Record current fee estimations. */ + void FlushFeeEstimates() + EXCLUSIVE_LOCKS_REQUIRED(!m_cs_fee_estimator); + + /** Calculates the age of the file, since last modified */ + std::chrono::hours GetFeeEstimatorFileAge(); + private: mutable Mutex m_cs_fee_estimator; diff --git a/src/test/fuzz/policy_estimator.cpp b/src/test/fuzz/policy_estimator.cpp index 637ba503c6..e7f14a5f4b 100644 --- a/src/test/fuzz/policy_estimator.cpp +++ b/src/test/fuzz/policy_estimator.cpp @@ -29,7 +29,7 @@ void initialize_policy_estimator() FUZZ_TARGET_INIT(policy_estimator, initialize_policy_estimator) { FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); - CBlockPolicyEstimator block_policy_estimator{FeeestPath(*g_setup->m_node.args)}; + CBlockPolicyEstimator block_policy_estimator{FeeestPath(*g_setup->m_node.args), DEFAULT_ACCEPT_STALE_FEE_ESTIMATES}; LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 10000) { CallOneOf( fuzzed_data_provider, diff --git a/src/test/fuzz/policy_estimator_io.cpp b/src/test/fuzz/policy_estimator_io.cpp index 436873c955..96f8a4c3a9 100644 --- a/src/test/fuzz/policy_estimator_io.cpp +++ b/src/test/fuzz/policy_estimator_io.cpp @@ -28,7 +28,7 @@ FUZZ_TARGET_INIT(policy_estimator_io, initialize_policy_estimator_io) FuzzedAutoFileProvider fuzzed_auto_file_provider = ConsumeAutoFile(fuzzed_data_provider); AutoFile fuzzed_auto_file{fuzzed_auto_file_provider.open()}; // Re-using block_policy_estimator across runs to avoid costly creation of CBlockPolicyEstimator object. - static CBlockPolicyEstimator block_policy_estimator{FeeestPath(*g_setup->m_node.args)}; + static CBlockPolicyEstimator block_policy_estimator{FeeestPath(*g_setup->m_node.args), DEFAULT_ACCEPT_STALE_FEE_ESTIMATES}; if (block_policy_estimator.Read(fuzzed_auto_file)) { block_policy_estimator.Write(fuzzed_auto_file); } diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp index 74b055ee45..194cc8c347 100644 --- a/src/test/util/setup_common.cpp +++ b/src/test/util/setup_common.cpp @@ -186,7 +186,7 @@ ChainTestingSetup::ChainTestingSetup(const std::string& chainName, const std::ve m_node.scheduler->m_service_thread = std::thread(util::TraceThread, "scheduler", [&] { m_node.scheduler->serviceQueue(); }); GetMainSignals().RegisterBackgroundSignalScheduler(*m_node.scheduler); - m_node.fee_estimator = std::make_unique<CBlockPolicyEstimator>(FeeestPath(*m_node.args)); + m_node.fee_estimator = std::make_unique<CBlockPolicyEstimator>(FeeestPath(*m_node.args), DEFAULT_ACCEPT_STALE_FEE_ESTIMATES); m_node.mempool = std::make_unique<CTxMemPool>(MemPoolOptionsForTest(m_node)); m_cache_sizes = CalculateCacheSizes(m_args); diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp index 0f9f332dc6..a8c611727f 100644 --- a/src/test/util_tests.cpp +++ b/src/test/util_tests.cpp @@ -2782,4 +2782,29 @@ BOOST_AUTO_TEST_CASE(util_WriteBinaryFile) BOOST_CHECK(valid); BOOST_CHECK_EQUAL(actual_text, expected_text); } + +BOOST_AUTO_TEST_CASE(clearshrink_test) +{ + { + std::vector<uint8_t> v = {1, 2, 3}; + ClearShrink(v); + BOOST_CHECK_EQUAL(v.size(), 0); + BOOST_CHECK_EQUAL(v.capacity(), 0); + } + + { + std::vector<bool> v = {false, true, false, false, true, true}; + ClearShrink(v); + BOOST_CHECK_EQUAL(v.size(), 0); + BOOST_CHECK_EQUAL(v.capacity(), 0); + } + + { + std::deque<int> v = {1, 3, 3, 7}; + ClearShrink(v); + BOOST_CHECK_EQUAL(v.size(), 0); + // std::deque has no capacity() we can observe. + } +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/util/vector.h b/src/util/vector.h index ed745affe5..bc62b64d28 100644 --- a/src/util/vector.h +++ b/src/util/vector.h @@ -49,4 +49,22 @@ inline V Cat(V v1, const V& v2) return v1; } +/** Clear a vector (or std::deque) and release its allocated memory. */ +template<typename V> +inline void ClearShrink(V& v) noexcept +{ + // There are various ways to clear a vector and release its memory: + // + // 1. V{}.swap(v) + // 2. v = V{} + // 3. v = {}; v.shrink_to_fit(); + // 4. v.clear(); v.shrink_to_fit(); + // + // (2) does not appear to release memory in glibc debug mode, even if v.shrink_to_fit() + // follows. (3) and (4) rely on std::vector::shrink_to_fit, which is only a non-binding + // request. Therefore, we use method (1). + + V{}.swap(v); +} + #endif // BITCOIN_UTIL_VECTOR_H diff --git a/test/functional/feature_fee_estimation.py b/test/functional/feature_fee_estimation.py index b0cbcf4edf..3590932c53 100755 --- a/test/functional/feature_fee_estimation.py +++ b/test/functional/feature_fee_estimation.py @@ -7,6 +7,7 @@ from copy import deepcopy from decimal import Decimal import os import random +import time from test_framework.messages import ( COIN, @@ -21,6 +22,8 @@ from test_framework.util import ( ) from test_framework.wallet import MiniWallet +MAX_FILE_AGE = 60 +SECONDS_PER_HOUR = 60 * 60 def small_txpuzzle_randfee( wallet, from_node, conflist, unconflist, amount, min_fee, fee_increment @@ -273,6 +276,95 @@ class EstimateFeeTest(BitcoinTestFramework): est_feerate = node.estimatesmartfee(2)["feerate"] assert_equal(est_feerate, high_feerate_kvb) + def test_old_fee_estimate_file(self): + # Get the initial fee rate while node is running + fee_rate = self.nodes[0].estimatesmartfee(1)["feerate"] + + # Restart node to ensure fee_estimate.dat file is read + self.restart_node(0) + assert_equal(self.nodes[0].estimatesmartfee(1)["feerate"], fee_rate) + + fee_dat = self.nodes[0].chain_path / "fee_estimates.dat" + + # Stop the node and backdate the fee_estimates.dat file more than MAX_FILE_AGE + self.stop_node(0) + last_modified_time = time.time() - (MAX_FILE_AGE + 1) * SECONDS_PER_HOUR + os.utime(fee_dat, (last_modified_time, last_modified_time)) + + # Start node and ensure the fee_estimates.dat file was not read + self.start_node(0) + assert_equal(self.nodes[0].estimatesmartfee(1)["errors"], ["Insufficient data or no feerate found"]) + + + def test_estimate_dat_is_flushed_periodically(self): + fee_dat = self.nodes[0].chain_path / "fee_estimates.dat" + os.remove(fee_dat) if os.path.exists(fee_dat) else None + + # Verify that fee_estimates.dat does not exist + assert_equal(os.path.isfile(fee_dat), False) + + # Verify if the string "Flushed fee estimates to fee_estimates.dat." is present in the debug log file. + # If present, it indicates that fee estimates have been successfully flushed to disk. + with self.nodes[0].assert_debug_log(expected_msgs=["Flushed fee estimates to fee_estimates.dat."], timeout=1): + # Mock the scheduler for an hour to flush fee estimates to fee_estimates.dat + self.nodes[0].mockscheduler(SECONDS_PER_HOUR) + + # Verify that fee estimates were flushed and fee_estimates.dat file is created + assert_equal(os.path.isfile(fee_dat), True) + + # Verify that the estimates remain the same if there are no blocks in the flush interval + block_hash_before = self.nodes[0].getbestblockhash() + fee_dat_initial_content = open(fee_dat, "rb").read() + with self.nodes[0].assert_debug_log(expected_msgs=["Flushed fee estimates to fee_estimates.dat."], timeout=1): + # Mock the scheduler for an hour to flush fee estimates to fee_estimates.dat + self.nodes[0].mockscheduler(SECONDS_PER_HOUR) + + # Verify that there were no blocks in between the flush interval + assert_equal(block_hash_before, self.nodes[0].getbestblockhash()) + + fee_dat_current_content = open(fee_dat, "rb").read() + assert_equal(fee_dat_current_content, fee_dat_initial_content) + + # Verify that the estimates remain the same after shutdown with no blocks before shutdown + self.restart_node(0) + fee_dat_current_content = open(fee_dat, "rb").read() + assert_equal(fee_dat_current_content, fee_dat_initial_content) + + # Verify that the estimates are not the same if new blocks were produced in the flush interval + with self.nodes[0].assert_debug_log(expected_msgs=["Flushed fee estimates to fee_estimates.dat."], timeout=1): + # Mock the scheduler for an hour to flush fee estimates to fee_estimates.dat + self.generate(self.nodes[0], 5, sync_fun=self.no_op) + self.nodes[0].mockscheduler(SECONDS_PER_HOUR) + + fee_dat_current_content = open(fee_dat, "rb").read() + assert fee_dat_current_content != fee_dat_initial_content + + fee_dat_initial_content = fee_dat_current_content + + # Generate blocks before shutdown and verify that the fee estimates are not the same + self.generate(self.nodes[0], 5, sync_fun=self.no_op) + self.restart_node(0) + fee_dat_current_content = open(fee_dat, "rb").read() + assert fee_dat_current_content != fee_dat_initial_content + + + def test_acceptstalefeeestimates_option(self): + # Get the initial fee rate while node is running + fee_rate = self.nodes[0].estimatesmartfee(1)["feerate"] + + self.stop_node(0) + + fee_dat = self.nodes[0].chain_path / "fee_estimates.dat" + + # Stop the node and backdate the fee_estimates.dat file more than MAX_FILE_AGE + last_modified_time = time.time() - (MAX_FILE_AGE + 1) * SECONDS_PER_HOUR + os.utime(fee_dat, (last_modified_time, last_modified_time)) + + # Restart node with -acceptstalefeeestimates option to ensure fee_estimate.dat file is read + self.start_node(0,extra_args=["-acceptstalefeeestimates"]) + assert_equal(self.nodes[0].estimatesmartfee(1)["feerate"], fee_rate) + + def run_test(self): self.log.info("This test is time consuming, please be patient") self.log.info("Splitting inputs so we can generate tx's") @@ -296,12 +388,21 @@ class EstimateFeeTest(BitcoinTestFramework): self.log.info("Testing estimates with single transactions.") self.sanity_check_estimates_range() + self.log.info("Test fee_estimates.dat is flushed periodically") + self.test_estimate_dat_is_flushed_periodically() + # check that the effective feerate is greater than or equal to the mempoolminfee even for high mempoolminfee self.log.info( "Test fee rate estimation after restarting node with high MempoolMinFee" ) self.test_feerate_mempoolminfee() + self.log.info("Test acceptstalefeeestimates option") + self.test_acceptstalefeeestimates_option() + + self.log.info("Test reading old fee_estimates.dat") + self.test_old_fee_estimate_file() + self.log.info("Restarting node with fresh estimation") self.stop_node(0) fee_dat = os.path.join(self.nodes[0].datadir, self.chain, "fee_estimates.dat") |