aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xcontrib/devtools/update-translations.py10
-rw-r--r--doc/developer-notes.md30
-rw-r--r--doc/release-notes-pr12892.md2
-rw-r--r--doc/release-notes.md5
-rw-r--r--doc/translation_process.md8
-rw-r--r--src/Makefile.am40
-rw-r--r--src/Makefile.bench.include2
-rw-r--r--src/Makefile.qt.include2
-rw-r--r--src/Makefile.qttest.include2
-rw-r--r--src/Makefile.test.include2
-rw-r--r--src/arith_uint256.h8
-rw-r--r--src/bitcoin-cli.cpp13
-rw-r--r--src/index/base.cpp278
-rw-r--r--src/index/base.h98
-rw-r--r--src/index/txindex.cpp419
-rw-r--r--src/index/txindex.h74
-rw-r--r--src/init.cpp3
-rw-r--r--src/qt/forms/optionsdialog.ui67
-rw-r--r--src/qt/optionsdialog.cpp19
-rw-r--r--src/qt/optionsdialog.h1
-rw-r--r--src/qt/optionsmodel.cpp26
-rw-r--r--src/qt/optionsmodel.h2
-rw-r--r--src/rest.cpp2
-rw-r--r--src/rpc/blockchain.cpp2
-rw-r--r--src/rpc/client.cpp1
-rw-r--r--src/test/arith_uint256_tests.cpp7
-rw-r--r--src/test/mempool_tests.cpp178
-rw-r--r--src/test/txindex_tests.cpp2
-rw-r--r--src/test/txvalidationcache_tests.cpp2
-rw-r--r--src/txdb.cpp183
-rw-r--r--src/txdb.h59
-rw-r--r--src/txmempool.cpp37
-rw-r--r--src/txmempool.h8
-rw-r--r--src/validation.h6
-rw-r--r--src/wallet/rpcwallet.cpp76
-rw-r--r--src/wallet/wallet.cpp14
-rw-r--r--src/wallet/wallet.h4
-rwxr-xr-xtest/functional/rpc_deprecated.py5
-rwxr-xr-xtest/functional/test_runner.py6
-rwxr-xr-xtest/functional/wallet_labels.py81
-rwxr-xr-xtest/lint/lint-includes.sh8
-rwxr-xr-xtest/lint/lint-locale-dependence.sh229
42 files changed, 1344 insertions, 677 deletions
diff --git a/contrib/devtools/update-translations.py b/contrib/devtools/update-translations.py
index b36e6968bf..f0098cfcdf 100755
--- a/contrib/devtools/update-translations.py
+++ b/contrib/devtools/update-translations.py
@@ -30,6 +30,8 @@ SOURCE_LANG = 'bitcoin_en.ts'
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
+# Regexp to check for Bitcoin addresses
+ADDRESS_REGEXP = re.compile('([13]|bc1)[a-zA-Z0-9]{30,}')
def check_at_repository_root():
if not os.path.exists('.git'):
@@ -122,6 +124,12 @@ def escape_cdata(text):
text = text.replace('"', '"')
return text
+def contains_bitcoin_addr(text, errors):
+ if text != None and ADDRESS_REGEXP.search(text) != None:
+ errors.append('Translation "%s" contains a bitcoin address. This will be removed.' % (text))
+ return True
+ return False
+
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
@@ -160,7 +168,7 @@ def postprocess_translations(reduce_diff_hacks=False):
if translation is None:
continue
errors = []
- valid = check_format_specifiers(source, translation, errors, numerus)
+ valid = check_format_specifiers(source, translation, errors, numerus) and not contains_bitcoin_addr(translation, errors)
for error in errors:
print('%s: %s' % (filename, error))
diff --git a/doc/developer-notes.md b/doc/developer-notes.md
index b722448a07..4821a59f19 100644
--- a/doc/developer-notes.md
+++ b/doc/developer-notes.md
@@ -499,7 +499,35 @@ Strings and formatting
- Use `ParseInt32`, `ParseInt64`, `ParseUInt32`, `ParseUInt64`, `ParseDouble` from `utilstrencodings.h` for number parsing
- - *Rationale*: These functions do overflow checking, and avoid pesky locale issues
+ - *Rationale*: These functions do overflow checking, and avoid pesky locale issues.
+
+- Avoid using locale dependent functions if possible. You can use the provided
+ [`lint-locale-dependence.sh`](/contrib/devtools/lint-locale-dependence.sh)
+ to check for accidental use of locale dependent functions.
+
+ - *Rationale*: Unnecessary locale dependence can cause bugs that are very tricky to isolate and fix.
+
+ - These functions are known to be locale dependent:
+ `alphasort`, `asctime`, `asprintf`, `atof`, `atoi`, `atol`, `atoll`, `atoq`,
+ `btowc`, `ctime`, `dprintf`, `fgetwc`, `fgetws`, `fprintf`, `fputwc`,
+ `fputws`, `fscanf`, `fwprintf`, `getdate`, `getwc`, `getwchar`, `isalnum`,
+ `isalpha`, `isblank`, `iscntrl`, `isdigit`, `isgraph`, `islower`, `isprint`,
+ `ispunct`, `isspace`, `isupper`, `iswalnum`, `iswalpha`, `iswblank`,
+ `iswcntrl`, `iswctype`, `iswdigit`, `iswgraph`, `iswlower`, `iswprint`,
+ `iswpunct`, `iswspace`, `iswupper`, `iswxdigit`, `isxdigit`, `mblen`,
+ `mbrlen`, `mbrtowc`, `mbsinit`, `mbsnrtowcs`, `mbsrtowcs`, `mbstowcs`,
+ `mbtowc`, `mktime`, `putwc`, `putwchar`, `scanf`, `snprintf`, `sprintf`,
+ `sscanf`, `stoi`, `stol`, `stoll`, `strcasecmp`, `strcasestr`, `strcoll`,
+ `strfmon`, `strftime`, `strncasecmp`, `strptime`, `strtod`, `strtof`,
+ `strtoimax`, `strtol`, `strtold`, `strtoll`, `strtoq`, `strtoul`,
+ `strtoull`, `strtoumax`, `strtouq`, `strxfrm`, `swprintf`, `tolower`,
+ `toupper`, `towctrans`, `towlower`, `towupper`, `ungetwc`, `vasprintf`,
+ `vdprintf`, `versionsort`, `vfprintf`, `vfscanf`, `vfwprintf`, `vprintf`,
+ `vscanf`, `vsnprintf`, `vsprintf`, `vsscanf`, `vswprintf`, `vwprintf`,
+ `wcrtomb`, `wcscasecmp`, `wcscoll`, `wcsftime`, `wcsncasecmp`, `wcsnrtombs`,
+ `wcsrtombs`, `wcstod`, `wcstof`, `wcstoimax`, `wcstol`, `wcstold`,
+ `wcstoll`, `wcstombs`, `wcstoul`, `wcstoull`, `wcstoumax`, `wcswidth`,
+ `wcsxfrm`, `wctob`, `wctomb`, `wctrans`, `wctype`, `wcwidth`, `wprintf`
- For `strprintf`, `LogPrint`, `LogPrintf` formatting characters don't need size specifiers
diff --git a/doc/release-notes-pr12892.md b/doc/release-notes-pr12892.md
index 8105eca5c0..f4a95bd40f 100644
--- a/doc/release-notes-pr12892.md
+++ b/doc/release-notes-pr12892.md
@@ -18,7 +18,7 @@ Here are the changes to RPC methods:
| Deprecated Method | New Method | Notes |
| :---------------------- | :-------------------- | :-----------|
| `getaccount` | `getaddressinfo` | `getaddressinfo` returns a json object with address information instead of just the name of the account as a string. |
-| `getaccountaddress` | `getlabeladdress` | `getlabeladdress` throws an error by default if the label does not already exist, but provides a `force` option for compatibility with existing applications. |
+| `getaccountaddress` | n/a | There is no replacement for `getaccountaddress` since labels do not have an associated receive address. |
| `getaddressesbyaccount` | `getaddressesbylabel` | `getaddressesbylabel` returns a json object with the addresses as keys, instead of a list of strings. |
| `getreceivedbyaccount` | `getreceivedbylabel` | _no change in behavior_ |
| `listaccounts` | `listlabels` | `listlabels` does not return a balance or accept `minconf` and `watchonly` arguments. |
diff --git a/doc/release-notes.md b/doc/release-notes.md
index 7a9a98bfec..e1bb84cca9 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -56,6 +56,11 @@ frequently tested on them.
Notable changes
===============
+GUI changes
+-----------
+
+- Block storage can be limited under Preferences, in the Main tab. Undoing this setting requires downloading the full blockchain again. This mode is incompatible with -txindex and -rescan.
+
RPC changes
------------
diff --git a/doc/translation_process.md b/doc/translation_process.md
index 5a9c59914e..022d7bb00b 100644
--- a/doc/translation_process.md
+++ b/doc/translation_process.md
@@ -46,9 +46,7 @@ Visit the [Transifex Signup](https://www.transifex.com/signup/) page to create a
You can find the Bitcoin translation project at [https://www.transifex.com/projects/p/bitcoin/](https://www.transifex.com/projects/p/bitcoin/).
### Installing the Transifex client command-line tool
-The client it used to fetch updated translations. If you are having problems, or need more details, see [http://docs.transifex.com/developer/client/setup](http://docs.transifex.com/developer/client/setup)
-
-**For Linux and Mac**
+The client is used to fetch updated translations. If you are having problems, or need more details, see [https://docs.transifex.com/client/installing-the-client](https://docs.transifex.com/client/installing-the-client)
`pip install transifex-client`
@@ -64,10 +62,6 @@ token =
username = USERNAME
```
-**For Windows**
-
-Please see [http://docs.transifex.com/developer/client/setup#windows](http://docs.transifex.com/developer/client/setup#windows) for details on installation.
-
The Transifex Bitcoin project config file is included as part of the repo. It can be found at `.tx/config`, however you shouldn’t need change anything.
### Synchronising translations
diff --git a/src/Makefile.am b/src/Makefile.am
index 96e56915a6..e03c21f16e 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -29,9 +29,7 @@ LIBBITCOIN_COMMON=libbitcoin_common.a
LIBBITCOIN_CONSENSUS=libbitcoin_consensus.a
LIBBITCOIN_CLI=libbitcoin_cli.a
LIBBITCOIN_UTIL=libbitcoin_util.a
-LIBBITCOIN_CRYPTO=crypto/libbitcoin_crypto.a
-LIBBITCOIN_CRYPTO_SSE41=crypto/libbitcoin_crypto_sse41.a
-LIBBITCOIN_CRYPTO_AVX2=crypto/libbitcoin_crypto_avx2.a
+LIBBITCOIN_CRYPTO_BASE=crypto/libbitcoin_crypto_base.a
LIBBITCOINQT=qt/libbitcoinqt.a
LIBSECP256K1=secp256k1/libsecp256k1.la
@@ -45,6 +43,16 @@ if ENABLE_WALLET
LIBBITCOIN_WALLET=libbitcoin_wallet.a
endif
+LIBBITCOIN_CRYPTO= $(LIBBITCOIN_CRYPTO_BASE)
+if ENABLE_SSE41
+LIBBITCOIN_CRYPTO_SSE41 = crypto/libbitcoin_crypto_sse41.a
+LIBBITCOIN_CRYPTO += $(LIBBITCOIN_CRYPTO_SSE41)
+endif
+if ENABLE_AVX2
+LIBBITCOIN_CRYPTO_AVX2 = crypto/libbitcoin_crypto_avx2.a
+LIBBITCOIN_CRYPTO += $(LIBBITCOIN_CRYPTO_AVX2)
+endif
+
$(LIBSECP256K1): $(wildcard secp256k1/src/*) $(wildcard secp256k1/include/*)
$(AM_V_at)$(MAKE) $(AM_MAKEFLAGS) -C $(@D) $(@F)
@@ -52,8 +60,6 @@ $(LIBSECP256K1): $(wildcard secp256k1/src/*) $(wildcard secp256k1/include/*)
# But to build the less dependent modules first, we manually select their order here:
EXTRA_LIBRARIES += \
$(LIBBITCOIN_CRYPTO) \
- $(LIBBITCOIN_CRYPTO_SSE41) \
- $(LIBBITCOIN_CRYPTO_AVX2) \
$(LIBBITCOIN_UTIL) \
$(LIBBITCOIN_COMMON) \
$(LIBBITCOIN_CONSENSUS) \
@@ -107,6 +113,7 @@ BITCOIN_CORE_H = \
fs.h \
httprpc.h \
httpserver.h \
+ index/base.h \
index/txindex.h \
indirectmap.h \
init.h \
@@ -208,6 +215,7 @@ libbitcoin_server_a_SOURCES = \
consensus/tx_verify.cpp \
httprpc.cpp \
httpserver.cpp \
+ index/base.cpp \
index/txindex.cpp \
init.cpp \
dbwrapper.cpp \
@@ -268,9 +276,9 @@ libbitcoin_wallet_a_SOURCES = \
$(BITCOIN_CORE_H)
# crypto primitives library
-crypto_libbitcoin_crypto_a_CPPFLAGS = $(AM_CPPFLAGS)
-crypto_libbitcoin_crypto_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-crypto_libbitcoin_crypto_a_SOURCES = \
+crypto_libbitcoin_crypto_base_a_CPPFLAGS = $(AM_CPPFLAGS)
+crypto_libbitcoin_crypto_base_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+crypto_libbitcoin_crypto_base_a_SOURCES = \
crypto/aes.cpp \
crypto/aes.h \
crypto/chacha20.h \
@@ -290,23 +298,19 @@ crypto_libbitcoin_crypto_a_SOURCES = \
crypto/sha512.h
if USE_ASM
-crypto_libbitcoin_crypto_a_SOURCES += crypto/sha256_sse4.cpp
+crypto_libbitcoin_crypto_base_a_SOURCES += crypto/sha256_sse4.cpp
endif
crypto_libbitcoin_crypto_sse41_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
crypto_libbitcoin_crypto_sse41_a_CPPFLAGS = $(AM_CPPFLAGS)
-if ENABLE_SSE41
crypto_libbitcoin_crypto_sse41_a_CXXFLAGS += $(SSE41_CXXFLAGS)
crypto_libbitcoin_crypto_sse41_a_CPPFLAGS += -DENABLE_SSE41
-endif
crypto_libbitcoin_crypto_sse41_a_SOURCES = crypto/sha256_sse41.cpp
crypto_libbitcoin_crypto_avx2_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
crypto_libbitcoin_crypto_avx2_a_CPPFLAGS = $(AM_CPPFLAGS)
-if ENABLE_AVX2
crypto_libbitcoin_crypto_avx2_a_CXXFLAGS += $(AVX2_CXXFLAGS)
crypto_libbitcoin_crypto_avx2_a_CPPFLAGS += -DENABLE_AVX2
-endif
crypto_libbitcoin_crypto_avx2_a_SOURCES = crypto/sha256_avx2.cpp
# consensus: shared between all executables that validate any consensus rules.
@@ -431,8 +435,6 @@ bitcoind_LDADD = \
$(LIBBITCOIN_ZMQ) \
$(LIBBITCOIN_CONSENSUS) \
$(LIBBITCOIN_CRYPTO) \
- $(LIBBITCOIN_CRYPTO_SSE41) \
- $(LIBBITCOIN_CRYPTO_AVX2) \
$(LIBLEVELDB) \
$(LIBLEVELDB_SSE42) \
$(LIBMEMENV) \
@@ -454,9 +456,7 @@ bitcoin_cli_LDADD = \
$(LIBBITCOIN_CLI) \
$(LIBUNIVALUE) \
$(LIBBITCOIN_UTIL) \
- $(LIBBITCOIN_CRYPTO) \
- $(LIBBITCOIN_CRYPTO_SSE41) \
- $(LIBBITCOIN_CRYPTO_AVX2)
+ $(LIBBITCOIN_CRYPTO)
bitcoin_cli_LDADD += $(BOOST_LIBS) $(SSL_LIBS) $(CRYPTO_LIBS) $(EVENT_LIBS)
#
@@ -477,8 +477,6 @@ bitcoin_tx_LDADD = \
$(LIBBITCOIN_UTIL) \
$(LIBBITCOIN_CONSENSUS) \
$(LIBBITCOIN_CRYPTO) \
- $(LIBBITCOIN_CRYPTO_SSE41) \
- $(LIBBITCOIN_CRYPTO_AVX2) \
$(LIBSECP256K1)
bitcoin_tx_LDADD += $(BOOST_LIBS) $(CRYPTO_LIBS)
@@ -487,7 +485,7 @@ bitcoin_tx_LDADD += $(BOOST_LIBS) $(CRYPTO_LIBS)
# bitcoinconsensus library #
if BUILD_BITCOIN_LIBS
include_HEADERS = script/bitcoinconsensus.h
-libbitcoinconsensus_la_SOURCES = $(crypto_libbitcoin_crypto_a_SOURCES) $(libbitcoin_consensus_a_SOURCES)
+libbitcoinconsensus_la_SOURCES = $(crypto_libbitcoin_crypto_base_a_SOURCES) $(libbitcoin_consensus_a_SOURCES)
if GLIBC_BACK_COMPAT
libbitcoinconsensus_la_SOURCES += compat/glibc_compat.cpp
diff --git a/src/Makefile.bench.include b/src/Makefile.bench.include
index 804df3bf21..32de1582f8 100644
--- a/src/Makefile.bench.include
+++ b/src/Makefile.bench.include
@@ -39,8 +39,6 @@ bench_bench_bitcoin_LDADD = \
$(LIBBITCOIN_UTIL) \
$(LIBBITCOIN_CONSENSUS) \
$(LIBBITCOIN_CRYPTO) \
- $(LIBBITCOIN_CRYPTO_SSE41) \
- $(LIBBITCOIN_CRYPTO_AVX2) \
$(LIBLEVELDB) \
$(LIBLEVELDB_SSE42) \
$(LIBMEMENV) \
diff --git a/src/Makefile.qt.include b/src/Makefile.qt.include
index f8c31be3d4..a84a11ac45 100644
--- a/src/Makefile.qt.include
+++ b/src/Makefile.qt.include
@@ -408,7 +408,7 @@ endif
if ENABLE_ZMQ
qt_bitcoin_qt_LDADD += $(LIBBITCOIN_ZMQ) $(ZMQ_LIBS)
endif
-qt_bitcoin_qt_LDADD += $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBBITCOIN_CRYPTO_SSE41) $(LIBBITCOIN_CRYPTO_AVX2) $(LIBUNIVALUE) $(LIBLEVELDB) $(LIBLEVELDB_SSE42) $(LIBMEMENV) \
+qt_bitcoin_qt_LDADD += $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBUNIVALUE) $(LIBLEVELDB) $(LIBLEVELDB_SSE42) $(LIBMEMENV) \
$(BOOST_LIBS) $(QT_LIBS) $(QT_DBUS_LIBS) $(QR_LIBS) $(PROTOBUF_LIBS) $(BDB_LIBS) $(SSL_LIBS) $(CRYPTO_LIBS) $(MINIUPNPC_LIBS) $(LIBSECP256K1) \
$(EVENT_PTHREADS_LIBS) $(EVENT_LIBS)
qt_bitcoin_qt_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(QT_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
diff --git a/src/Makefile.qttest.include b/src/Makefile.qttest.include
index a4356f1cbd..4b14212b2e 100644
--- a/src/Makefile.qttest.include
+++ b/src/Makefile.qttest.include
@@ -62,7 +62,7 @@ endif
if ENABLE_ZMQ
qt_test_test_bitcoin_qt_LDADD += $(LIBBITCOIN_ZMQ) $(ZMQ_LIBS)
endif
-qt_test_test_bitcoin_qt_LDADD += $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBBITCOIN_CRYPTO_SSE41) $(LIBBITCOIN_CRYPTO_AVX2) $(LIBUNIVALUE) $(LIBLEVELDB) \
+qt_test_test_bitcoin_qt_LDADD += $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBUNIVALUE) $(LIBLEVELDB) \
$(LIBLEVELDB_SSE42) $(LIBMEMENV) $(BOOST_LIBS) $(QT_DBUS_LIBS) $(QT_TEST_LIBS) $(QT_LIBS) \
$(QR_LIBS) $(PROTOBUF_LIBS) $(BDB_LIBS) $(SSL_LIBS) $(CRYPTO_LIBS) $(MINIUPNPC_LIBS) $(LIBSECP256K1) \
$(EVENT_PTHREADS_LIBS) $(EVENT_LIBS)
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index cbd63cd53d..94f4e38768 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -110,7 +110,7 @@ if ENABLE_WALLET
test_test_bitcoin_LDADD += $(LIBBITCOIN_WALLET)
endif
-test_test_bitcoin_LDADD += $(LIBBITCOIN_SERVER) $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBBITCOIN_CRYPTO_SSE41) $(LIBBITCOIN_CRYPTO_AVX2) $(LIBUNIVALUE) \
+test_test_bitcoin_LDADD += $(LIBBITCOIN_SERVER) $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBUNIVALUE) \
$(LIBLEVELDB) $(LIBLEVELDB_SSE42) $(LIBMEMENV) $(BOOST_LIBS) $(BOOST_UNIT_TEST_FRAMEWORK_LIB) $(LIBSECP256K1) $(EVENT_LIBS) $(EVENT_PTHREADS_LIBS)
test_test_bitcoin_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
diff --git a/src/arith_uint256.h b/src/arith_uint256.h
index 3f4cc8c2bf..e4c7575e2d 100644
--- a/src/arith_uint256.h
+++ b/src/arith_uint256.h
@@ -64,14 +64,6 @@ public:
explicit base_uint(const std::string& str);
- bool operator!() const
- {
- for (int i = 0; i < WIDTH; i++)
- if (pn[i] != 0)
- return false;
- return true;
- }
-
const base_uint operator~() const
{
base_uint ret;
diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp
index be5ce14480..b332b5e581 100644
--- a/src/bitcoin-cli.cpp
+++ b/src/bitcoin-cli.cpp
@@ -56,6 +56,18 @@ static void SetupCliArgs()
gArgs.AddArg("-help", "", false, OptionsCategory::HIDDEN);
}
+/** libevent event log callback */
+static void libevent_log_cb(int severity, const char *msg)
+{
+#ifndef EVENT_LOG_ERR // EVENT_LOG_ERR was added in 2.0.19; but before then _EVENT_LOG_ERR existed.
+# define EVENT_LOG_ERR _EVENT_LOG_ERR
+#endif
+ // Ignore everything other than errors
+ if (severity >= EVENT_LOG_ERR) {
+ throw std::runtime_error(strprintf("libevent error: %s", msg));
+ }
+}
+
//////////////////////////////////////////////////////////////////////////////
//
// Start
@@ -506,6 +518,7 @@ int main(int argc, char* argv[])
fprintf(stderr, "Error: Initializing networking failed\n");
return EXIT_FAILURE;
}
+ event_set_log_callback(&libevent_log_cb);
try {
int ret = AppInitRPC(argc, argv);
diff --git a/src/index/base.cpp b/src/index/base.cpp
new file mode 100644
index 0000000000..738166dc94
--- /dev/null
+++ b/src/index/base.cpp
@@ -0,0 +1,278 @@
+// Copyright (c) 2017-2018 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <chainparams.h>
+#include <index/base.h>
+#include <init.h>
+#include <tinyformat.h>
+#include <ui_interface.h>
+#include <util.h>
+#include <validation.h>
+#include <warnings.h>
+
+constexpr char DB_BEST_BLOCK = 'B';
+
+constexpr int64_t SYNC_LOG_INTERVAL = 30; // seconds
+constexpr int64_t SYNC_LOCATOR_WRITE_INTERVAL = 30; // seconds
+
+template<typename... Args>
+static void FatalError(const char* fmt, const Args&... args)
+{
+ std::string strMessage = tfm::format(fmt, args...);
+ SetMiscWarning(strMessage);
+ LogPrintf("*** %s\n", strMessage);
+ uiInterface.ThreadSafeMessageBox(
+ "Error: A fatal internal error occurred, see debug.log for details",
+ "", CClientUIInterface::MSG_ERROR);
+ StartShutdown();
+}
+
+BaseIndex::DB::DB(const fs::path& path, size_t n_cache_size, bool f_memory, bool f_wipe, bool f_obfuscate) :
+ CDBWrapper(path, n_cache_size, f_memory, f_wipe, f_obfuscate)
+{}
+
+bool BaseIndex::DB::ReadBestBlock(CBlockLocator& locator) const
+{
+ bool success = Read(DB_BEST_BLOCK, locator);
+ if (!success) {
+ locator.SetNull();
+ }
+ return success;
+}
+
+bool BaseIndex::DB::WriteBestBlock(const CBlockLocator& locator)
+{
+ return Write(DB_BEST_BLOCK, locator);
+}
+
+BaseIndex::~BaseIndex()
+{
+ Interrupt();
+ Stop();
+}
+
+bool BaseIndex::Init()
+{
+ CBlockLocator locator;
+ if (!GetDB().ReadBestBlock(locator)) {
+ locator.SetNull();
+ }
+
+ LOCK(cs_main);
+ m_best_block_index = FindForkInGlobalIndex(chainActive, locator);
+ m_synced = m_best_block_index.load() == chainActive.Tip();
+ return true;
+}
+
+static const CBlockIndex* NextSyncBlock(const CBlockIndex* pindex_prev)
+{
+ AssertLockHeld(cs_main);
+
+ if (!pindex_prev) {
+ return chainActive.Genesis();
+ }
+
+ const CBlockIndex* pindex = chainActive.Next(pindex_prev);
+ if (pindex) {
+ return pindex;
+ }
+
+ return chainActive.Next(chainActive.FindFork(pindex_prev));
+}
+
+void BaseIndex::ThreadSync()
+{
+ const CBlockIndex* pindex = m_best_block_index.load();
+ if (!m_synced) {
+ auto& consensus_params = Params().GetConsensus();
+
+ int64_t last_log_time = 0;
+ int64_t last_locator_write_time = 0;
+ while (true) {
+ if (m_interrupt) {
+ WriteBestBlock(pindex);
+ return;
+ }
+
+ {
+ LOCK(cs_main);
+ const CBlockIndex* pindex_next = NextSyncBlock(pindex);
+ if (!pindex_next) {
+ WriteBestBlock(pindex);
+ m_best_block_index = pindex;
+ m_synced = true;
+ break;
+ }
+ pindex = pindex_next;
+ }
+
+ int64_t current_time = GetTime();
+ if (last_log_time + SYNC_LOG_INTERVAL < current_time) {
+ LogPrintf("Syncing %s with block chain from height %d\n",
+ GetName(), pindex->nHeight);
+ last_log_time = current_time;
+ }
+
+ if (last_locator_write_time + SYNC_LOCATOR_WRITE_INTERVAL < current_time) {
+ WriteBestBlock(pindex);
+ last_locator_write_time = current_time;
+ }
+
+ CBlock block;
+ if (!ReadBlockFromDisk(block, pindex, consensus_params)) {
+ FatalError("%s: Failed to read block %s from disk",
+ __func__, pindex->GetBlockHash().ToString());
+ return;
+ }
+ if (!WriteBlock(block, pindex)) {
+ FatalError("%s: Failed to write block %s to index database",
+ __func__, pindex->GetBlockHash().ToString());
+ return;
+ }
+ }
+ }
+
+ if (pindex) {
+ LogPrintf("%s is enabled at height %d\n", GetName(), pindex->nHeight);
+ } else {
+ LogPrintf("%s is enabled\n", GetName());
+ }
+}
+
+bool BaseIndex::WriteBestBlock(const CBlockIndex* block_index)
+{
+ LOCK(cs_main);
+ if (!GetDB().WriteBestBlock(chainActive.GetLocator(block_index))) {
+ return error("%s: Failed to write locator to disk", __func__);
+ }
+ return true;
+}
+
+void BaseIndex::BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex,
+ const std::vector<CTransactionRef>& txn_conflicted)
+{
+ if (!m_synced) {
+ return;
+ }
+
+ const CBlockIndex* best_block_index = m_best_block_index.load();
+ if (!best_block_index) {
+ if (pindex->nHeight != 0) {
+ FatalError("%s: First block connected is not the genesis block (height=%d)",
+ __func__, pindex->nHeight);
+ return;
+ }
+ } else {
+ // Ensure block connects to an ancestor of the current best block. This should be the case
+ // most of the time, but may not be immediately after the sync thread catches up and sets
+ // m_synced. Consider the case where there is a reorg and the blocks on the stale branch are
+ // in the ValidationInterface queue backlog even after the sync thread has caught up to the
+ // new chain tip. In this unlikely event, log a warning and let the queue clear.
+ if (best_block_index->GetAncestor(pindex->nHeight - 1) != pindex->pprev) {
+ LogPrintf("%s: WARNING: Block %s does not connect to an ancestor of " /* Continued */
+ "known best chain (tip=%s); not updating index\n",
+ __func__, pindex->GetBlockHash().ToString(),
+ best_block_index->GetBlockHash().ToString());
+ return;
+ }
+ }
+
+ if (WriteBlock(*block, pindex)) {
+ m_best_block_index = pindex;
+ } else {
+ FatalError("%s: Failed to write block %s to index",
+ __func__, pindex->GetBlockHash().ToString());
+ return;
+ }
+}
+
+void BaseIndex::ChainStateFlushed(const CBlockLocator& locator)
+{
+ if (!m_synced) {
+ return;
+ }
+
+ const uint256& locator_tip_hash = locator.vHave.front();
+ const CBlockIndex* locator_tip_index;
+ {
+ LOCK(cs_main);
+ locator_tip_index = LookupBlockIndex(locator_tip_hash);
+ }
+
+ if (!locator_tip_index) {
+ FatalError("%s: First block (hash=%s) in locator was not found",
+ __func__, locator_tip_hash.ToString());
+ return;
+ }
+
+ // This checks that ChainStateFlushed callbacks are received after BlockConnected. The check may fail
+ // immediately after the sync thread catches up and sets m_synced. Consider the case where
+ // there is a reorg and the blocks on the stale branch are in the ValidationInterface queue
+ // backlog even after the sync thread has caught up to the new chain tip. In this unlikely
+ // event, log a warning and let the queue clear.
+ const CBlockIndex* best_block_index = m_best_block_index.load();
+ if (best_block_index->GetAncestor(locator_tip_index->nHeight) != locator_tip_index) {
+ LogPrintf("%s: WARNING: Locator contains block (hash=%s) not on known best " /* Continued */
+ "chain (tip=%s); not writing index locator\n",
+ __func__, locator_tip_hash.ToString(),
+ best_block_index->GetBlockHash().ToString());
+ return;
+ }
+
+ if (!GetDB().WriteBestBlock(locator)) {
+ error("%s: Failed to write locator to disk", __func__);
+ }
+}
+
+bool BaseIndex::BlockUntilSyncedToCurrentChain()
+{
+ AssertLockNotHeld(cs_main);
+
+ if (!m_synced) {
+ return false;
+ }
+
+ {
+ // Skip the queue-draining stuff if we know we're caught up with
+ // chainActive.Tip().
+ LOCK(cs_main);
+ const CBlockIndex* chain_tip = chainActive.Tip();
+ const CBlockIndex* best_block_index = m_best_block_index.load();
+ if (best_block_index->GetAncestor(chain_tip->nHeight) == chain_tip) {
+ return true;
+ }
+ }
+
+ LogPrintf("%s: %s is catching up on block notifications\n", __func__, GetName());
+ SyncWithValidationInterfaceQueue();
+ return true;
+}
+
+void BaseIndex::Interrupt()
+{
+ m_interrupt();
+}
+
+void BaseIndex::Start()
+{
+ // Need to register this ValidationInterface before running Init(), so that
+ // callbacks are not missed if Init sets m_synced to true.
+ RegisterValidationInterface(this);
+ if (!Init()) {
+ FatalError("%s: %s failed to initialize", __func__, GetName());
+ return;
+ }
+
+ m_thread_sync = std::thread(&TraceThread<std::function<void()>>, GetName(),
+ std::bind(&BaseIndex::ThreadSync, this));
+}
+
+void BaseIndex::Stop()
+{
+ UnregisterValidationInterface(this);
+
+ if (m_thread_sync.joinable()) {
+ m_thread_sync.join();
+ }
+}
diff --git a/src/index/base.h b/src/index/base.h
new file mode 100644
index 0000000000..04ee6e6cc2
--- /dev/null
+++ b/src/index/base.h
@@ -0,0 +1,98 @@
+// Copyright (c) 2017-2018 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_INDEX_BASE_H
+#define BITCOIN_INDEX_BASE_H
+
+#include <dbwrapper.h>
+#include <primitives/block.h>
+#include <primitives/transaction.h>
+#include <threadinterrupt.h>
+#include <uint256.h>
+#include <validationinterface.h>
+
+class CBlockIndex;
+
+/**
+ * Base class for indices of blockchain data. This implements
+ * CValidationInterface and ensures blocks are indexed sequentially according
+ * to their position in the active chain.
+ */
+class BaseIndex : public CValidationInterface
+{
+protected:
+ class DB : public CDBWrapper
+ {
+ public:
+ DB(const fs::path& path, size_t n_cache_size,
+ bool f_memory = false, bool f_wipe = false, bool f_obfuscate = false);
+
+ /// Read block locator of the chain that the txindex is in sync with.
+ bool ReadBestBlock(CBlockLocator& locator) const;
+
+ /// Write block locator of the chain that the txindex is in sync with.
+ bool WriteBestBlock(const CBlockLocator& locator);
+ };
+
+private:
+ /// Whether the index is in sync with the main chain. The flag is flipped
+ /// from false to true once, after which point this starts processing
+ /// ValidationInterface notifications to stay in sync.
+ std::atomic<bool> m_synced{false};
+
+ /// The last block in the chain that the index is in sync with.
+ std::atomic<const CBlockIndex*> m_best_block_index{nullptr};
+
+ std::thread m_thread_sync;
+ CThreadInterrupt m_interrupt;
+
+ /// Sync the index with the block index starting from the current best block.
+ /// Intended to be run in its own thread, m_thread_sync, and can be
+ /// interrupted with m_interrupt. Once the index gets in sync, the m_synced
+ /// flag is set and the BlockConnected ValidationInterface callback takes
+ /// over and the sync thread exits.
+ void ThreadSync();
+
+ /// Write the current chain block locator to the DB.
+ bool WriteBestBlock(const CBlockIndex* block_index);
+
+protected:
+ void BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex,
+ const std::vector<CTransactionRef>& txn_conflicted) override;
+
+ void ChainStateFlushed(const CBlockLocator& locator) override;
+
+ /// Initialize internal state from the database and block index.
+ virtual bool Init();
+
+ /// Write update index entries for a newly connected block.
+ virtual bool WriteBlock(const CBlock& block, const CBlockIndex* pindex) { return true; }
+
+ virtual DB& GetDB() const = 0;
+
+ /// Get the name of the index for display in logs.
+ virtual const char* GetName() const = 0;
+
+public:
+ /// Destructor interrupts sync thread if running and blocks until it exits.
+ virtual ~BaseIndex();
+
+ /// Blocks the current thread until the index is caught up to the current
+ /// state of the block chain. This only blocks if the index has gotten in
+ /// sync once and only needs to process blocks in the ValidationInterface
+ /// queue. If the index is catching up from far behind, this method does
+ /// not block and immediately returns false.
+ bool BlockUntilSyncedToCurrentChain();
+
+ void Interrupt();
+
+ /// Start initializes the sync state and registers the instance as a
+ /// ValidationInterface so that it stays in sync with blockchain updates.
+ void Start();
+
+ /// Stops the instance from staying in sync with blockchain updates.
+ void Stop();
+};
+
+#endif // BITCOIN_INDEX_BASE_H
diff --git a/src/index/txindex.cpp b/src/index/txindex.cpp
index 3ff16b7664..e106b9b420 100644
--- a/src/index/txindex.cpp
+++ b/src/index/txindex.cpp
@@ -2,258 +2,261 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <chainparams.h>
#include <index/txindex.h>
#include <init.h>
-#include <tinyformat.h>
#include <ui_interface.h>
#include <util.h>
#include <validation.h>
-#include <warnings.h>
-constexpr int64_t SYNC_LOG_INTERVAL = 30; // seconds
-constexpr int64_t SYNC_LOCATOR_WRITE_INTERVAL = 30; // seconds
+#include <boost/thread.hpp>
+
+constexpr char DB_BEST_BLOCK = 'B';
+constexpr char DB_TXINDEX = 't';
+constexpr char DB_TXINDEX_BLOCK = 'T';
std::unique_ptr<TxIndex> g_txindex;
-template<typename... Args>
-static void FatalError(const char* fmt, const Args&... args)
+struct CDiskTxPos : public CDiskBlockPos
{
- std::string strMessage = tfm::format(fmt, args...);
- SetMiscWarning(strMessage);
- LogPrintf("*** %s\n", strMessage);
- uiInterface.ThreadSafeMessageBox(
- "Error: A fatal internal error occurred, see debug.log for details",
- "", CClientUIInterface::MSG_ERROR);
- StartShutdown();
-}
+ unsigned int nTxOffset; // after header
-TxIndex::TxIndex(std::unique_ptr<TxIndexDB> db) :
- m_db(std::move(db)), m_synced(false), m_best_block_index(nullptr)
-{}
+ ADD_SERIALIZE_METHODS;
-TxIndex::~TxIndex()
-{
- Interrupt();
- Stop();
-}
-
-bool TxIndex::Init()
-{
- LOCK(cs_main);
-
- // Attempt to migrate txindex from the old database to the new one. Even if
- // chain_tip is null, the node could be reindexing and we still want to
- // delete txindex records in the old database.
- if (!m_db->MigrateData(*pblocktree, chainActive.GetLocator())) {
- return false;
+ template <typename Stream, typename Operation>
+ inline void SerializationOp(Stream& s, Operation ser_action) {
+ READWRITEAS(CDiskBlockPos, *this);
+ READWRITE(VARINT(nTxOffset));
}
- CBlockLocator locator;
- if (!m_db->ReadBestBlock(locator)) {
- locator.SetNull();
+ CDiskTxPos(const CDiskBlockPos &blockIn, unsigned int nTxOffsetIn) : CDiskBlockPos(blockIn.nFile, blockIn.nPos), nTxOffset(nTxOffsetIn) {
}
- m_best_block_index = FindForkInGlobalIndex(chainActive, locator);
- m_synced = m_best_block_index.load() == chainActive.Tip();
- return true;
-}
-
-static const CBlockIndex* NextSyncBlock(const CBlockIndex* pindex_prev)
-{
- AssertLockHeld(cs_main);
-
- if (!pindex_prev) {
- return chainActive.Genesis();
+ CDiskTxPos() {
+ SetNull();
}
- const CBlockIndex* pindex = chainActive.Next(pindex_prev);
- if (pindex) {
- return pindex;
+ void SetNull() {
+ CDiskBlockPos::SetNull();
+ nTxOffset = 0;
}
-
- return chainActive.Next(chainActive.FindFork(pindex_prev));
-}
-
-void TxIndex::ThreadSync()
+};
+
+/**
+ * Access to the txindex database (indexes/txindex/)
+ *
+ * The database stores a block locator of the chain the database is synced to
+ * so that the TxIndex can efficiently determine the point it last stopped at.
+ * A locator is used instead of a simple hash of the chain tip because blocks
+ * and block index entries may not be flushed to disk until after this database
+ * is updated.
+ */
+class TxIndex::DB : public BaseIndex::DB
{
- const CBlockIndex* pindex = m_best_block_index.load();
- if (!m_synced) {
- auto& consensus_params = Params().GetConsensus();
-
- int64_t last_log_time = 0;
- int64_t last_locator_write_time = 0;
- while (true) {
- if (m_interrupt) {
- WriteBestBlock(pindex);
- return;
- }
+public:
+ explicit DB(size_t n_cache_size, bool f_memory = false, bool f_wipe = false);
- {
- LOCK(cs_main);
- const CBlockIndex* pindex_next = NextSyncBlock(pindex);
- if (!pindex_next) {
- WriteBestBlock(pindex);
- m_best_block_index = pindex;
- m_synced = true;
- break;
- }
- pindex = pindex_next;
- }
+ /// Read the disk location of the transaction data with the given hash. Returns false if the
+ /// transaction hash is not indexed.
+ bool ReadTxPos(const uint256& txid, CDiskTxPos& pos) const;
- int64_t current_time = GetTime();
- if (last_log_time + SYNC_LOG_INTERVAL < current_time) {
- LogPrintf("Syncing txindex with block chain from height %d\n", pindex->nHeight);
- last_log_time = current_time;
- }
+ /// Write a batch of transaction positions to the DB.
+ bool WriteTxs(const std::vector<std::pair<uint256, CDiskTxPos>>& v_pos);
- if (last_locator_write_time + SYNC_LOCATOR_WRITE_INTERVAL < current_time) {
- WriteBestBlock(pindex);
- last_locator_write_time = current_time;
- }
+ /// Migrate txindex data from the block tree DB, where it may be for older nodes that have not
+ /// been upgraded yet to the new database.
+ bool MigrateData(CBlockTreeDB& block_tree_db, const CBlockLocator& best_locator);
+};
- CBlock block;
- if (!ReadBlockFromDisk(block, pindex, consensus_params)) {
- FatalError("%s: Failed to read block %s from disk",
- __func__, pindex->GetBlockHash().ToString());
- return;
- }
- if (!WriteBlock(block, pindex)) {
- FatalError("%s: Failed to write block %s to tx index database",
- __func__, pindex->GetBlockHash().ToString());
- return;
- }
- }
- }
+TxIndex::DB::DB(size_t n_cache_size, bool f_memory, bool f_wipe) :
+ BaseIndex::DB(GetDataDir() / "indexes" / "txindex", n_cache_size, f_memory, f_wipe)
+{}
- if (pindex) {
- LogPrintf("txindex is enabled at height %d\n", pindex->nHeight);
- } else {
- LogPrintf("txindex is enabled\n");
- }
+bool TxIndex::DB::ReadTxPos(const uint256 &txid, CDiskTxPos& pos) const
+{
+ return Read(std::make_pair(DB_TXINDEX, txid), pos);
}
-bool TxIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex)
+bool TxIndex::DB::WriteTxs(const std::vector<std::pair<uint256, CDiskTxPos>>& v_pos)
{
- CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(block.vtx.size()));
- std::vector<std::pair<uint256, CDiskTxPos>> vPos;
- vPos.reserve(block.vtx.size());
- for (const auto& tx : block.vtx) {
- vPos.emplace_back(tx->GetHash(), pos);
- pos.nTxOffset += ::GetSerializeSize(*tx, SER_DISK, CLIENT_VERSION);
+ CDBBatch batch(*this);
+ for (const auto& tuple : v_pos) {
+ batch.Write(std::make_pair(DB_TXINDEX, tuple.first), tuple.second);
}
- return m_db->WriteTxs(vPos);
+ return WriteBatch(batch);
}
-bool TxIndex::WriteBestBlock(const CBlockIndex* block_index)
+/*
+ * Safely persist a transfer of data from the old txindex database to the new one, and compact the
+ * range of keys updated. This is used internally by MigrateData.
+ */
+static void WriteTxIndexMigrationBatches(CDBWrapper& newdb, CDBWrapper& olddb,
+ CDBBatch& batch_newdb, CDBBatch& batch_olddb,
+ const std::pair<unsigned char, uint256>& begin_key,
+ const std::pair<unsigned char, uint256>& end_key)
{
- LOCK(cs_main);
- if (!m_db->WriteBestBlock(chainActive.GetLocator(block_index))) {
- return error("%s: Failed to write locator to disk", __func__);
- }
- return true;
+ // Sync new DB changes to disk before deleting from old DB.
+ newdb.WriteBatch(batch_newdb, /*fSync=*/ true);
+ olddb.WriteBatch(batch_olddb);
+ olddb.CompactRange(begin_key, end_key);
+
+ batch_newdb.Clear();
+ batch_olddb.Clear();
}
-void TxIndex::BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex,
- const std::vector<CTransactionRef>& txn_conflicted)
+bool TxIndex::DB::MigrateData(CBlockTreeDB& block_tree_db, const CBlockLocator& best_locator)
{
- if (!m_synced) {
- return;
- }
-
- const CBlockIndex* best_block_index = m_best_block_index.load();
- if (!best_block_index) {
- if (pindex->nHeight != 0) {
- FatalError("%s: First block connected is not the genesis block (height=%d)",
- __func__, pindex->nHeight);
- return;
+ // The prior implementation of txindex was always in sync with block index
+ // and presence was indicated with a boolean DB flag. If the flag is set,
+ // this means the txindex from a previous version is valid and in sync with
+ // the chain tip. The first step of the migration is to unset the flag and
+ // write the chain hash to a separate key, DB_TXINDEX_BLOCK. After that, the
+ // index entries are copied over in batches to the new database. Finally,
+ // DB_TXINDEX_BLOCK is erased from the old database and the block hash is
+ // written to the new database.
+ //
+ // Unsetting the boolean flag ensures that if the node is downgraded to a
+ // previous version, it will not see a corrupted, partially migrated index
+ // -- it will see that the txindex is disabled. When the node is upgraded
+ // again, the migration will pick up where it left off and sync to the block
+ // with hash DB_TXINDEX_BLOCK.
+ bool f_legacy_flag = false;
+ block_tree_db.ReadFlag("txindex", f_legacy_flag);
+ if (f_legacy_flag) {
+ if (!block_tree_db.Write(DB_TXINDEX_BLOCK, best_locator)) {
+ return error("%s: cannot write block indicator", __func__);
}
- } else {
- // Ensure block connects to an ancestor of the current best block. This should be the case
- // most of the time, but may not be immediately after the sync thread catches up and sets
- // m_synced. Consider the case where there is a reorg and the blocks on the stale branch are
- // in the ValidationInterface queue backlog even after the sync thread has caught up to the
- // new chain tip. In this unlikely event, log a warning and let the queue clear.
- if (best_block_index->GetAncestor(pindex->nHeight - 1) != pindex->pprev) {
- LogPrintf("%s: WARNING: Block %s does not connect to an ancestor of " /* Continued */
- "known best chain (tip=%s); not updating txindex\n",
- __func__, pindex->GetBlockHash().ToString(),
- best_block_index->GetBlockHash().ToString());
- return;
+ if (!block_tree_db.WriteFlag("txindex", false)) {
+ return error("%s: cannot write block index db flag", __func__);
}
}
- if (WriteBlock(*block, pindex)) {
- m_best_block_index = pindex;
- } else {
- FatalError("%s: Failed to write block %s to txindex",
- __func__, pindex->GetBlockHash().ToString());
- return;
+ CBlockLocator locator;
+ if (!block_tree_db.Read(DB_TXINDEX_BLOCK, locator)) {
+ return true;
}
-}
-void TxIndex::ChainStateFlushed(const CBlockLocator& locator)
-{
- if (!m_synced) {
- return;
- }
+ int64_t count = 0;
+ LogPrintf("Upgrading txindex database... [0%%]\n");
+ uiInterface.ShowProgress(_("Upgrading txindex database"), 0, true);
+ int report_done = 0;
+ const size_t batch_size = 1 << 24; // 16 MiB
+
+ CDBBatch batch_newdb(*this);
+ CDBBatch batch_olddb(block_tree_db);
+
+ std::pair<unsigned char, uint256> key;
+ std::pair<unsigned char, uint256> begin_key{DB_TXINDEX, uint256()};
+ std::pair<unsigned char, uint256> prev_key = begin_key;
+
+ bool interrupted = false;
+ std::unique_ptr<CDBIterator> cursor(block_tree_db.NewIterator());
+ for (cursor->Seek(begin_key); cursor->Valid(); cursor->Next()) {
+ boost::this_thread::interruption_point();
+ if (ShutdownRequested()) {
+ interrupted = true;
+ break;
+ }
- const uint256& locator_tip_hash = locator.vHave.front();
- const CBlockIndex* locator_tip_index;
- {
- LOCK(cs_main);
- locator_tip_index = LookupBlockIndex(locator_tip_hash);
- }
+ if (!cursor->GetKey(key)) {
+ return error("%s: cannot get key from valid cursor", __func__);
+ }
+ if (key.first != DB_TXINDEX) {
+ break;
+ }
- if (!locator_tip_index) {
- FatalError("%s: First block (hash=%s) in locator was not found",
- __func__, locator_tip_hash.ToString());
- return;
+ // Log progress every 10%.
+ if (++count % 256 == 0) {
+ // Since txids are uniformly random and traversed in increasing order, the high 16 bits
+ // of the hash can be used to estimate the current progress.
+ const uint256& txid = key.second;
+ uint32_t high_nibble =
+ (static_cast<uint32_t>(*(txid.begin() + 0)) << 8) +
+ (static_cast<uint32_t>(*(txid.begin() + 1)) << 0);
+ int percentage_done = (int)(high_nibble * 100.0 / 65536.0 + 0.5);
+
+ uiInterface.ShowProgress(_("Upgrading txindex database"), percentage_done, true);
+ if (report_done < percentage_done/10) {
+ LogPrintf("Upgrading txindex database... [%d%%]\n", percentage_done);
+ report_done = percentage_done/10;
+ }
+ }
+
+ CDiskTxPos value;
+ if (!cursor->GetValue(value)) {
+ return error("%s: cannot parse txindex record", __func__);
+ }
+ batch_newdb.Write(key, value);
+ batch_olddb.Erase(key);
+
+ if (batch_newdb.SizeEstimate() > batch_size || batch_olddb.SizeEstimate() > batch_size) {
+ // NOTE: it's OK to delete the key pointed at by the current DB cursor while iterating
+ // because LevelDB iterators are guaranteed to provide a consistent view of the
+ // underlying data, like a lightweight snapshot.
+ WriteTxIndexMigrationBatches(*this, block_tree_db,
+ batch_newdb, batch_olddb,
+ prev_key, key);
+ prev_key = key;
+ }
}
- // This checks that ChainStateFlushed callbacks are received after BlockConnected. The check may fail
- // immediately after the sync thread catches up and sets m_synced. Consider the case where
- // there is a reorg and the blocks on the stale branch are in the ValidationInterface queue
- // backlog even after the sync thread has caught up to the new chain tip. In this unlikely
- // event, log a warning and let the queue clear.
- const CBlockIndex* best_block_index = m_best_block_index.load();
- if (best_block_index->GetAncestor(locator_tip_index->nHeight) != locator_tip_index) {
- LogPrintf("%s: WARNING: Locator contains block (hash=%s) not on known best " /* Continued */
- "chain (tip=%s); not writing txindex locator\n",
- __func__, locator_tip_hash.ToString(),
- best_block_index->GetBlockHash().ToString());
- return;
+ // If these final DB batches complete the migration, write the best block
+ // hash marker to the new database and delete from the old one. This signals
+ // that the former is fully caught up to that point in the blockchain and
+ // that all txindex entries have been removed from the latter.
+ if (!interrupted) {
+ batch_olddb.Erase(DB_TXINDEX_BLOCK);
+ batch_newdb.Write(DB_BEST_BLOCK, locator);
}
- if (!m_db->WriteBestBlock(locator)) {
- error("%s: Failed to write locator to disk", __func__);
+ WriteTxIndexMigrationBatches(*this, block_tree_db,
+ batch_newdb, batch_olddb,
+ begin_key, key);
+
+ if (interrupted) {
+ LogPrintf("[CANCELLED].\n");
+ return false;
}
+
+ uiInterface.ShowProgress("", 100, false);
+
+ LogPrintf("[DONE].\n");
+ return true;
}
-bool TxIndex::BlockUntilSyncedToCurrentChain()
+TxIndex::TxIndex(size_t n_cache_size, bool f_memory, bool f_wipe)
+ : m_db(MakeUnique<TxIndex::DB>(n_cache_size, f_memory, f_wipe))
+{}
+
+TxIndex::~TxIndex() {}
+
+bool TxIndex::Init()
{
- AssertLockNotHeld(cs_main);
+ LOCK(cs_main);
- if (!m_synced) {
+ // Attempt to migrate txindex from the old database to the new one. Even if
+ // chain_tip is null, the node could be reindexing and we still want to
+ // delete txindex records in the old database.
+ if (!m_db->MigrateData(*pblocktree, chainActive.GetLocator())) {
return false;
}
- {
- // Skip the queue-draining stuff if we know we're caught up with
- // chainActive.Tip().
- LOCK(cs_main);
- const CBlockIndex* chain_tip = chainActive.Tip();
- const CBlockIndex* best_block_index = m_best_block_index.load();
- if (best_block_index->GetAncestor(chain_tip->nHeight) == chain_tip) {
- return true;
- }
- }
+ return BaseIndex::Init();
+}
- LogPrintf("%s: txindex is catching up on block notifications\n", __func__);
- SyncWithValidationInterfaceQueue();
- return true;
+bool TxIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex)
+{
+ CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(block.vtx.size()));
+ std::vector<std::pair<uint256, CDiskTxPos>> vPos;
+ vPos.reserve(block.vtx.size());
+ for (const auto& tx : block.vtx) {
+ vPos.emplace_back(tx->GetHash(), pos);
+ pos.nTxOffset += ::GetSerializeSize(*tx, SER_DISK, CLIENT_VERSION);
+ }
+ return m_db->WriteTxs(vPos);
}
+BaseIndex::DB& TxIndex::GetDB() const { return *m_db; }
+
bool TxIndex::FindTx(const uint256& tx_hash, uint256& block_hash, CTransactionRef& tx) const
{
CDiskTxPos postx;
@@ -281,31 +284,3 @@ bool TxIndex::FindTx(const uint256& tx_hash, uint256& block_hash, CTransactionRe
block_hash = header.GetHash();
return true;
}
-
-void TxIndex::Interrupt()
-{
- m_interrupt();
-}
-
-void TxIndex::Start()
-{
- // Need to register this ValidationInterface before running Init(), so that
- // callbacks are not missed if Init sets m_synced to true.
- RegisterValidationInterface(this);
- if (!Init()) {
- FatalError("%s: txindex failed to initialize", __func__);
- return;
- }
-
- m_thread_sync = std::thread(&TraceThread<std::function<void()>>, "txindex",
- std::bind(&TxIndex::ThreadSync, this));
-}
-
-void TxIndex::Stop()
-{
- UnregisterValidationInterface(this);
-
- if (m_thread_sync.joinable()) {
- m_thread_sync.join();
- }
-}
diff --git a/src/index/txindex.h b/src/index/txindex.h
index 4937bd64e9..8202c3c951 100644
--- a/src/index/txindex.h
+++ b/src/index/txindex.h
@@ -5,70 +5,39 @@
#ifndef BITCOIN_INDEX_TXINDEX_H
#define BITCOIN_INDEX_TXINDEX_H
-#include <primitives/block.h>
-#include <primitives/transaction.h>
-#include <threadinterrupt.h>
+#include <chain.h>
+#include <index/base.h>
#include <txdb.h>
-#include <uint256.h>
-#include <validationinterface.h>
-
-class CBlockIndex;
/**
* TxIndex is used to look up transactions included in the blockchain by hash.
* The index is written to a LevelDB database and records the filesystem
* location of each transaction by transaction hash.
*/
-class TxIndex final : public CValidationInterface
+class TxIndex final : public BaseIndex
{
-private:
- const std::unique_ptr<TxIndexDB> m_db;
-
- /// Whether the index is in sync with the main chain. The flag is flipped
- /// from false to true once, after which point this starts processing
- /// ValidationInterface notifications to stay in sync.
- std::atomic<bool> m_synced;
-
- /// The last block in the chain that the TxIndex is in sync with.
- std::atomic<const CBlockIndex*> m_best_block_index;
-
- std::thread m_thread_sync;
- CThreadInterrupt m_interrupt;
-
- /// Initialize internal state from the database and block index.
- bool Init();
+protected:
+ class DB;
- /// Sync the tx index with the block index starting from the current best
- /// block. Intended to be run in its own thread, m_thread_sync, and can be
- /// interrupted with m_interrupt. Once the txindex gets in sync, the
- /// m_synced flag is set and the BlockConnected ValidationInterface callback
- /// takes over and the sync thread exits.
- void ThreadSync();
+private:
+ const std::unique_ptr<DB> m_db;
- /// Write update index entries for a newly connected block.
- bool WriteBlock(const CBlock& block, const CBlockIndex* pindex);
+protected:
+ /// Override base class init to migrate from old database.
+ bool Init() override;
- /// Write the current chain block locator to the DB.
- bool WriteBestBlock(const CBlockIndex* block_index);
+ bool WriteBlock(const CBlock& block, const CBlockIndex* pindex) override;
-protected:
- void BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex,
- const std::vector<CTransactionRef>& txn_conflicted) override;
+ BaseIndex::DB& GetDB() const override;
- void ChainStateFlushed(const CBlockLocator& locator) override;
+ const char* GetName() const override { return "txindex"; }
public:
- /// Constructs the TxIndex, which becomes available to be queried.
- explicit TxIndex(std::unique_ptr<TxIndexDB> db);
-
- /// Destructor interrupts sync thread if running and blocks until it exits.
- ~TxIndex();
+ /// Constructs the index, which becomes available to be queried.
+ explicit TxIndex(size_t n_cache_size, bool f_memory = false, bool f_wipe = false);
- /// Blocks the current thread until the transaction index is caught up to
- /// the current state of the block chain. This only blocks if the index has gotten in sync once
- /// and only needs to process blocks in the ValidationInterface queue. If the index is catching
- /// up from far behind, this method does not block and immediately returns false.
- bool BlockUntilSyncedToCurrentChain();
+ // Destructor is declared because this class contains a unique_ptr to an incomplete type.
+ virtual ~TxIndex() override;
/// Look up a transaction by hash.
///
@@ -77,15 +46,6 @@ public:
/// @param[out] tx The transaction itself.
/// @return true if transaction is found, false otherwise
bool FindTx(const uint256& tx_hash, uint256& block_hash, CTransactionRef& tx) const;
-
- void Interrupt();
-
- /// Start initializes the sync state and registers the instance as a
- /// ValidationInterface so that it stays in sync with blockchain updates.
- void Start();
-
- /// Stops the instance from staying in sync with blockchain updates.
- void Stop();
};
/// The global transaction index, used in GetTransaction. May be null.
diff --git a/src/init.cpp b/src/init.cpp
index b4e2eec0d2..9246f6e71c 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -1606,8 +1606,7 @@ bool AppInitMain()
// ********************************************************* Step 8: start indexers
if (gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX)) {
- auto txindex_db = MakeUnique<TxIndexDB>(nTxIndexCache, false, fReindex);
- g_txindex = MakeUnique<TxIndex>(std::move(txindex_db));
+ g_txindex = MakeUnique<TxIndex>(nTxIndexCache, false, fReindex);
g_txindex->Start();
}
diff --git a/src/qt/forms/optionsdialog.ui b/src/qt/forms/optionsdialog.ui
index a3721991ee..8f34e6bc82 100644
--- a/src/qt/forms/optionsdialog.ui
+++ b/src/qt/forms/optionsdialog.ui
@@ -38,6 +38,69 @@
</widget>
</item>
<item>
+ <spacer name="horizontalSpacer_0_Main">
+ <property name="orientation">
+ <enum>Qt::Horizontal</enum>
+ </property>
+ <property name="sizeHint" stdset="0">
+ <size>
+ <width>40</width>
+ <height>5</height>
+ </size>
+ </property>
+ </spacer>
+ </item>
+ <item>
+ <layout class="QHBoxLayout" name="horizontalLayout_Main_Prune">
+ <item>
+ <widget class="QCheckBox" name="prune">
+ <property name="toolTip">
+ <string>Disables some advanced features but all blocks will still be fully validated. Reverting this setting requires re-downloading the entire blockchain. Actual disk usage may be somewhat higher.</string>
+ </property>
+ <property name="text">
+ <string>Prune &amp;block storage to</string>
+ </property>
+ </widget>
+ </item>
+ <item>
+ <widget class="QSpinBox" name="pruneSize"/>
+ </item>
+ <item>
+ <widget class="QLabel" name="pruneSizeUnitLabel">
+ <property name="text">
+ <string>GB</string>
+ </property>
+ <property name="textFormat">
+ <enum>Qt::PlainText</enum>
+ </property>
+ </widget>
+ </item>
+ <item>
+ <spacer name="horizontalSpacer_Main_Prune">
+ <property name="orientation">
+ <enum>Qt::Horizontal</enum>
+ </property>
+ <property name="sizeHint" stdset="0">
+ <size>
+ <width>40</width>
+ <height>20</height>
+ </size>
+ </property>
+ </spacer>
+ </item>
+ </layout>
+ </item>
+ <item>
+ <widget class="QLabel" name="pruneWarning">
+ <property name="text">
+ <string>Reverting this setting requires re-downloading the entire blockchain.</string>
+ </property>
+ <property name="textFormat">
+ <enum>Qt::PlainText</enum>
+ </property>
+ </widget>
+ </item>
+ <item>
<layout class="QHBoxLayout" name="horizontalLayout_2_Main">
<item>
<widget class="QLabel" name="databaseCacheLabel">
@@ -81,7 +144,7 @@
</layout>
</item>
<item>
- <layout class="QHBoxLayout" name="horizontalLayout_3_Main">
+ <layout class="QHBoxLayout" name="horizontalLayout_Main_VerifyLabel">
<item>
<widget class="QLabel" name="threadsScriptVerifLabel">
<property name="text">
@@ -103,7 +166,7 @@
</widget>
</item>
<item>
- <spacer name="horizontalSpacer_3_Main">
+ <spacer name="horizontalSpacer_Main_Threads">
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp
index c0ddb89b40..108aa4f99c 100644
--- a/src/qt/optionsdialog.cpp
+++ b/src/qt/optionsdialog.cpp
@@ -36,8 +36,17 @@ OptionsDialog::OptionsDialog(QWidget *parent, bool enableWallet) :
/* Main elements init */
ui->databaseCache->setMinimum(nMinDbCache);
ui->databaseCache->setMaximum(nMaxDbCache);
+ static const uint64_t GiB = 1024 * 1024 * 1024;
+ static const uint64_t nMinDiskSpace = MIN_DISK_SPACE_FOR_BLOCK_FILES / GiB +
+ (MIN_DISK_SPACE_FOR_BLOCK_FILES % GiB) ? 1 : 0;
+ ui->pruneSize->setMinimum(nMinDiskSpace);
ui->threadsScriptVerif->setMinimum(-GetNumCores());
ui->threadsScriptVerif->setMaximum(MAX_SCRIPTCHECK_THREADS);
+ ui->pruneWarning->setVisible(false);
+ ui->pruneWarning->setStyleSheet("QLabel { color: red; }");
+
+ ui->pruneSize->setEnabled(false);
+ connect(ui->prune, SIGNAL(toggled(bool)), ui->pruneSize, SLOT(setEnabled(bool)));
/* Network elements init */
#ifndef USE_UPNP
@@ -157,6 +166,9 @@ void OptionsDialog::setModel(OptionsModel *_model)
/* warn when one of the following settings changes by user action (placed here so init via mapper doesn't trigger them) */
/* Main */
+ connect(ui->prune, SIGNAL(clicked(bool)), this, SLOT(showRestartWarning()));
+ connect(ui->prune, SIGNAL(clicked(bool)), this, SLOT(togglePruneWarning(bool)));
+ connect(ui->pruneSize, SIGNAL(valueChanged(int)), this, SLOT(showRestartWarning()));
connect(ui->databaseCache, SIGNAL(valueChanged(int)), this, SLOT(showRestartWarning()));
connect(ui->threadsScriptVerif, SIGNAL(valueChanged(int)), this, SLOT(showRestartWarning()));
/* Wallet */
@@ -176,6 +188,8 @@ void OptionsDialog::setMapper()
mapper->addMapping(ui->bitcoinAtStartup, OptionsModel::StartAtStartup);
mapper->addMapping(ui->threadsScriptVerif, OptionsModel::ThreadsScriptVerif);
mapper->addMapping(ui->databaseCache, OptionsModel::DatabaseCache);
+ mapper->addMapping(ui->prune, OptionsModel::Prune);
+ mapper->addMapping(ui->pruneSize, OptionsModel::PruneSize);
/* Wallet */
mapper->addMapping(ui->spendZeroConfChange, OptionsModel::SpendZeroConfChange);
@@ -266,6 +280,11 @@ void OptionsDialog::on_hideTrayIcon_stateChanged(int fState)
}
}
+void OptionsDialog::togglePruneWarning(bool enabled)
+{
+ ui->pruneWarning->setVisible(!ui->pruneWarning->isVisible());
+}
+
void OptionsDialog::showRestartWarning(bool fPersistent)
{
ui->statusLabel->setStyleSheet("QLabel { color: red; }");
diff --git a/src/qt/optionsdialog.h b/src/qt/optionsdialog.h
index faf9ff8959..5aad484ce7 100644
--- a/src/qt/optionsdialog.h
+++ b/src/qt/optionsdialog.h
@@ -53,6 +53,7 @@ private Q_SLOTS:
void on_hideTrayIcon_stateChanged(int fState);
+ void togglePruneWarning(bool enabled);
void showRestartWarning(bool fPersistent = false);
void clearStatusLabel();
void updateProxyValidationState();
diff --git a/src/qt/optionsmodel.cpp b/src/qt/optionsmodel.cpp
index cae9dace4c..31a85f4e23 100644
--- a/src/qt/optionsmodel.cpp
+++ b/src/qt/optionsmodel.cpp
@@ -88,6 +88,16 @@ void OptionsModel::Init(bool resetSettings)
// by command-line and show this in the UI.
// Main
+ if (!settings.contains("bPrune"))
+ settings.setValue("bPrune", false);
+ if (!settings.contains("nPruneSize"))
+ settings.setValue("nPruneSize", 2);
+ // Convert prune size to MB:
+ const uint64_t nPruneSizeMB = settings.value("nPruneSize").toInt() * 1000;
+ if (!m_node.softSetArg("-prune", settings.value("bPrune").toBool() ? std::to_string(nPruneSizeMB) : "0")) {
+ addOverriddenOption("-prune");
+ }
+
if (!settings.contains("nDatabaseCache"))
settings.setValue("nDatabaseCache", (qint64)nDefaultDbCache);
if (!m_node.softSetArg("-dbcache", settings.value("nDatabaseCache").toString().toStdString()))
@@ -281,6 +291,10 @@ QVariant OptionsModel::data(const QModelIndex & index, int role) const
return settings.value("language");
case CoinControlFeatures:
return fCoinControlFeatures;
+ case Prune:
+ return settings.value("bPrune");
+ case PruneSize:
+ return settings.value("nPruneSize");
case DatabaseCache:
return settings.value("nDatabaseCache");
case ThreadsScriptVerif:
@@ -405,6 +419,18 @@ bool OptionsModel::setData(const QModelIndex & index, const QVariant & value, in
settings.setValue("fCoinControlFeatures", fCoinControlFeatures);
Q_EMIT coinControlFeaturesChanged(fCoinControlFeatures);
break;
+ case Prune:
+ if (settings.value("bPrune") != value) {
+ settings.setValue("bPrune", value);
+ setRestartRequired(true);
+ }
+ break;
+ case PruneSize:
+ if (settings.value("nPruneSize") != value) {
+ settings.setValue("nPruneSize", value);
+ setRestartRequired(true);
+ }
+ break;
case DatabaseCache:
if (settings.value("nDatabaseCache") != value) {
settings.setValue("nDatabaseCache", value);
diff --git a/src/qt/optionsmodel.h b/src/qt/optionsmodel.h
index fc1d119a71..2777cbeaf2 100644
--- a/src/qt/optionsmodel.h
+++ b/src/qt/optionsmodel.h
@@ -50,6 +50,8 @@ public:
Language, // QString
CoinControlFeatures, // bool
ThreadsScriptVerif, // int
+ Prune, // bool
+ PruneSize, // int
DatabaseCache, // int
SpendZeroConfChange, // bool
Listen, // bool
diff --git a/src/rest.cpp b/src/rest.cpp
index ffa75c241f..a5f164497d 100644
--- a/src/rest.cpp
+++ b/src/rest.cpp
@@ -217,7 +217,7 @@ static bool rest_block(HTTPRequest* req,
return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found");
}
- if (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0)
+ if (IsBlockPruned(pblockindex))
return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not available (pruned data)");
if (!ReadBlockFromDisk(block, pblockindex, Params().GetConsensus()))
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 48e0e02d1a..f70d506e13 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -732,7 +732,7 @@ static UniValue getblockheader(const JSONRPCRequest& request)
static CBlock GetBlockChecked(const CBlockIndex* pblockindex)
{
CBlock block;
- if (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0) {
+ if (IsBlockPruned(pblockindex)) {
throw JSONRPCError(RPC_MISC_ERROR, "Block not available (pruned data)");
}
diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp
index bb68f72ccc..0f35fd3770 100644
--- a/src/rpc/client.cpp
+++ b/src/rpc/client.cpp
@@ -52,7 +52,6 @@ static const CRPCConvertParam vRPCConvertParams[] =
{ "listreceivedbylabel", 0, "minconf" },
{ "listreceivedbylabel", 1, "include_empty" },
{ "listreceivedbylabel", 2, "include_watchonly" },
- { "getlabeladdress", 1, "force" },
{ "getbalance", 1, "minconf" },
{ "getbalance", 2, "include_watchonly" },
{ "getblockhash", 0, "height" },
diff --git a/src/test/arith_uint256_tests.cpp b/src/test/arith_uint256_tests.cpp
index 13ec19834a..8644aea371 100644
--- a/src/test/arith_uint256_tests.cpp
+++ b/src/test/arith_uint256_tests.cpp
@@ -198,13 +198,6 @@ BOOST_AUTO_TEST_CASE( shifts ) { // "<<" ">>" "<<=" ">>="
BOOST_AUTO_TEST_CASE( unaryOperators ) // ! ~ -
{
- BOOST_CHECK(!ZeroL);
- BOOST_CHECK(!(!OneL));
- for (unsigned int i = 0; i < 256; ++i)
- BOOST_CHECK(!(!(OneL<<i)));
- BOOST_CHECK(!(!R1L));
- BOOST_CHECK(!(!MaxL));
-
BOOST_CHECK(~ZeroL == MaxL);
unsigned char TmpArray[32];
diff --git a/src/test/mempool_tests.cpp b/src/test/mempool_tests.cpp
index 5ca243f42e..4070642537 100644
--- a/src/test/mempool_tests.cpp
+++ b/src/test/mempool_tests.cpp
@@ -571,4 +571,182 @@ BOOST_AUTO_TEST_CASE(MempoolSizeLimitTest)
SetMockTime(0);
}
+inline CTransactionRef make_tx(std::vector<CAmount>&& output_values, std::vector<CTransactionRef>&& inputs=std::vector<CTransactionRef>(), std::vector<uint32_t>&& input_indices=std::vector<uint32_t>())
+{
+ CMutableTransaction tx = CMutableTransaction();
+ tx.vin.resize(inputs.size());
+ tx.vout.resize(output_values.size());
+ for (size_t i = 0; i < inputs.size(); ++i) {
+ tx.vin[i].prevout.hash = inputs[i]->GetHash();
+ tx.vin[i].prevout.n = input_indices.size() > i ? input_indices[i] : 0;
+ }
+ for (size_t i = 0; i < output_values.size(); ++i) {
+ tx.vout[i].scriptPubKey = CScript() << OP_11 << OP_EQUAL;
+ tx.vout[i].nValue = output_values[i];
+ }
+ return MakeTransactionRef(tx);
+}
+
+#define MK_OUTPUTS(amounts...) std::vector<CAmount>{amounts}
+#define MK_INPUTS(txs...) std::vector<CTransactionRef>{txs}
+#define MK_INPUT_IDX(idxes...) std::vector<uint32_t>{idxes}
+
+BOOST_AUTO_TEST_CASE(MempoolAncestryTests)
+{
+ size_t ancestors, descendants;
+
+ CTxMemPool pool;
+ TestMemPoolEntryHelper entry;
+
+ /* Base transaction */
+ //
+ // [tx1]
+ //
+ CTransactionRef tx1 = make_tx(MK_OUTPUTS(10 * COIN));
+ pool.addUnchecked(tx1->GetHash(), entry.Fee(10000LL).FromTx(tx1));
+
+ // Ancestors / descendants should be 1 / 1 (itself / itself)
+ pool.GetTransactionAncestry(tx1->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 1ULL);
+ BOOST_CHECK_EQUAL(descendants, 1ULL);
+
+ /* Child transaction */
+ //
+ // [tx1].0 <- [tx2]
+ //
+ CTransactionRef tx2 = make_tx(MK_OUTPUTS(495 * CENT, 5 * COIN), MK_INPUTS(tx1));
+ pool.addUnchecked(tx2->GetHash(), entry.Fee(10000LL).FromTx(tx2));
+
+ // Ancestors / descendants should be:
+ // transaction ancestors descendants
+ // ============ =========== ===========
+ // tx1 1 (tx1) 2 (tx1,2)
+ // tx2 2 (tx1,2) 2 (tx1,2)
+ pool.GetTransactionAncestry(tx1->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 1ULL);
+ BOOST_CHECK_EQUAL(descendants, 2ULL);
+ pool.GetTransactionAncestry(tx2->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 2ULL);
+ BOOST_CHECK_EQUAL(descendants, 2ULL);
+
+ /* Grand-child 1 */
+ //
+ // [tx1].0 <- [tx2].0 <- [tx3]
+ //
+ CTransactionRef tx3 = make_tx(MK_OUTPUTS(290 * CENT, 200 * CENT), MK_INPUTS(tx2));
+ pool.addUnchecked(tx3->GetHash(), entry.Fee(10000LL).FromTx(tx3));
+
+ // Ancestors / descendants should be:
+ // transaction ancestors descendants
+ // ============ =========== ===========
+ // tx1 1 (tx1) 3 (tx1,2,3)
+ // tx2 2 (tx1,2) 3 (tx1,2,3)
+ // tx3 3 (tx1,2,3) 3 (tx1,2,3)
+ pool.GetTransactionAncestry(tx1->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 1ULL);
+ BOOST_CHECK_EQUAL(descendants, 3ULL);
+ pool.GetTransactionAncestry(tx2->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 2ULL);
+ BOOST_CHECK_EQUAL(descendants, 3ULL);
+ pool.GetTransactionAncestry(tx3->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 3ULL);
+ BOOST_CHECK_EQUAL(descendants, 3ULL);
+
+ /* Grand-child 2 */
+ //
+ // [tx1].0 <- [tx2].0 <- [tx3]
+ // |
+ // \---1 <- [tx4]
+ //
+ CTransactionRef tx4 = make_tx(MK_OUTPUTS(290 * CENT, 250 * CENT), MK_INPUTS(tx2), MK_INPUT_IDX(1));
+ pool.addUnchecked(tx4->GetHash(), entry.Fee(10000LL).FromTx(tx4));
+
+ // Ancestors / descendants should be:
+ // transaction ancestors descendants
+ // ============ =========== ===========
+ // tx1 1 (tx1) 4 (tx1,2,3,4)
+ // tx2 2 (tx1,2) 4 (tx1,2,3,4)
+ // tx3 3 (tx1,2,3) 4 (tx1,2,3,4)
+ // tx4 3 (tx1,2,4) 4 (tx1,2,3,4)
+ pool.GetTransactionAncestry(tx1->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 1ULL);
+ BOOST_CHECK_EQUAL(descendants, 4ULL);
+ pool.GetTransactionAncestry(tx2->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 2ULL);
+ BOOST_CHECK_EQUAL(descendants, 4ULL);
+ pool.GetTransactionAncestry(tx3->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 3ULL);
+ BOOST_CHECK_EQUAL(descendants, 4ULL);
+ pool.GetTransactionAncestry(tx4->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 3ULL);
+ BOOST_CHECK_EQUAL(descendants, 4ULL);
+
+ /* Make an alternate branch that is longer and connect it to tx3 */
+ //
+ // [ty1].0 <- [ty2].0 <- [ty3].0 <- [ty4].0 <- [ty5].0
+ // |
+ // [tx1].0 <- [tx2].0 <- [tx3].0 <- [ty6] --->--/
+ // |
+ // \---1 <- [tx4]
+ //
+ CTransactionRef ty1, ty2, ty3, ty4, ty5;
+ CTransactionRef* ty[5] = {&ty1, &ty2, &ty3, &ty4, &ty5};
+ CAmount v = 5 * COIN;
+ for (uint64_t i = 0; i < 5; i++) {
+ CTransactionRef& tyi = *ty[i];
+ tyi = make_tx(MK_OUTPUTS(v), i > 0 ? MK_INPUTS(*ty[i-1]) : std::vector<CTransactionRef>());
+ v -= 50 * CENT;
+ pool.addUnchecked(tyi->GetHash(), entry.Fee(10000LL).FromTx(tyi));
+ pool.GetTransactionAncestry(tyi->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, i+1);
+ BOOST_CHECK_EQUAL(descendants, i+1);
+ }
+ CTransactionRef ty6 = make_tx(MK_OUTPUTS(5 * COIN), MK_INPUTS(tx3, ty5));
+ pool.addUnchecked(ty6->GetHash(), entry.Fee(10000LL).FromTx(ty6));
+
+ // Ancestors / descendants should be:
+ // transaction ancestors descendants
+ // ============ =================== ===========
+ // tx1 1 (tx1) 5 (tx1,2,3,4, ty6)
+ // tx2 2 (tx1,2) 5 (tx1,2,3,4, ty6)
+ // tx3 3 (tx1,2,3) 5 (tx1,2,3,4, ty6)
+ // tx4 3 (tx1,2,4) 5 (tx1,2,3,4, ty6)
+ // ty1 1 (ty1) 6 (ty1,2,3,4,5,6)
+ // ty2 2 (ty1,2) 6 (ty1,2,3,4,5,6)
+ // ty3 3 (ty1,2,3) 6 (ty1,2,3,4,5,6)
+ // ty4 4 (y1234) 6 (ty1,2,3,4,5,6)
+ // ty5 5 (y12345) 6 (ty1,2,3,4,5,6)
+ // ty6 9 (tx123, ty123456) 6 (ty1,2,3,4,5,6)
+ pool.GetTransactionAncestry(tx1->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 1ULL);
+ BOOST_CHECK_EQUAL(descendants, 5ULL);
+ pool.GetTransactionAncestry(tx2->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 2ULL);
+ BOOST_CHECK_EQUAL(descendants, 5ULL);
+ pool.GetTransactionAncestry(tx3->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 3ULL);
+ BOOST_CHECK_EQUAL(descendants, 5ULL);
+ pool.GetTransactionAncestry(tx4->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 3ULL);
+ BOOST_CHECK_EQUAL(descendants, 5ULL);
+ pool.GetTransactionAncestry(ty1->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 1ULL);
+ BOOST_CHECK_EQUAL(descendants, 6ULL);
+ pool.GetTransactionAncestry(ty2->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 2ULL);
+ BOOST_CHECK_EQUAL(descendants, 6ULL);
+ pool.GetTransactionAncestry(ty3->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 3ULL);
+ BOOST_CHECK_EQUAL(descendants, 6ULL);
+ pool.GetTransactionAncestry(ty4->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 4ULL);
+ BOOST_CHECK_EQUAL(descendants, 6ULL);
+ pool.GetTransactionAncestry(ty5->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 5ULL);
+ BOOST_CHECK_EQUAL(descendants, 6ULL);
+ pool.GetTransactionAncestry(ty6->GetHash(), ancestors, descendants);
+ BOOST_CHECK_EQUAL(ancestors, 9ULL);
+ BOOST_CHECK_EQUAL(descendants, 6ULL);
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/txindex_tests.cpp b/src/test/txindex_tests.cpp
index 14158f2875..be7ee2428b 100644
--- a/src/test/txindex_tests.cpp
+++ b/src/test/txindex_tests.cpp
@@ -15,7 +15,7 @@ BOOST_AUTO_TEST_SUITE(txindex_tests)
BOOST_FIXTURE_TEST_CASE(txindex_initial_sync, TestChain100Setup)
{
- TxIndex txindex(MakeUnique<TxIndexDB>(1 << 20, true));
+ TxIndex txindex(1 << 20, true);
CTransactionRef tx_disk;
uint256 block_hash;
diff --git a/src/test/txvalidationcache_tests.cpp b/src/test/txvalidationcache_tests.cpp
index 2b00064cd0..d32d4b267c 100644
--- a/src/test/txvalidationcache_tests.cpp
+++ b/src/test/txvalidationcache_tests.cpp
@@ -102,7 +102,7 @@ BOOST_FIXTURE_TEST_CASE(tx_mempool_block_doublespend, TestChain100Setup)
// should fail.
// Capture this interaction with the upgraded_nop argument: set it when evaluating
// any script flag that is implemented as an upgraded NOP code.
-static void ValidateCheckInputsForAllFlags(CMutableTransaction &tx, uint32_t failing_flags, bool add_to_cache)
+static void ValidateCheckInputsForAllFlags(const CTransaction &tx, uint32_t failing_flags, bool add_to_cache)
{
PrecomputedTransactionData txdata(tx);
// If we add many more flags, this loop can get too expensive, but we can
diff --git a/src/txdb.cpp b/src/txdb.cpp
index 333d3596c1..b1d5879c83 100644
--- a/src/txdb.cpp
+++ b/src/txdb.cpp
@@ -21,8 +21,6 @@
static const char DB_COIN = 'C';
static const char DB_COINS = 'c';
static const char DB_BLOCK_FILES = 'f';
-static const char DB_TXINDEX = 't';
-static const char DB_TXINDEX_BLOCK = 'T';
static const char DB_BLOCK_INDEX = 'b';
static const char DB_BEST_BLOCK = 'B';
@@ -237,17 +235,6 @@ bool CBlockTreeDB::WriteBatchSync(const std::vector<std::pair<int, const CBlockF
return WriteBatch(batch, true);
}
-bool CBlockTreeDB::ReadTxIndex(const uint256 &txid, CDiskTxPos &pos) {
- return Read(std::make_pair(DB_TXINDEX, txid), pos);
-}
-
-bool CBlockTreeDB::WriteTxIndex(const std::vector<std::pair<uint256, CDiskTxPos> >&vect) {
- CDBBatch batch(*this);
- for (std::vector<std::pair<uint256,CDiskTxPos> >::const_iterator it=vect.begin(); it!=vect.end(); it++)
- batch.Write(std::make_pair(DB_TXINDEX, it->first), it->second);
- return WriteBatch(batch);
-}
-
bool CBlockTreeDB::WriteFlag(const std::string &name, bool fValue) {
return Write(std::make_pair(DB_FLAG, name), fValue ? '1' : '0');
}
@@ -425,173 +412,3 @@ bool CCoinsViewDB::Upgrade() {
LogPrintf("[%s].\n", ShutdownRequested() ? "CANCELLED" : "DONE");
return !ShutdownRequested();
}
-
-TxIndexDB::TxIndexDB(size_t n_cache_size, bool f_memory, bool f_wipe) :
- CDBWrapper(GetDataDir() / "indexes" / "txindex", n_cache_size, f_memory, f_wipe)
-{}
-
-bool TxIndexDB::ReadTxPos(const uint256 &txid, CDiskTxPos& pos) const
-{
- return Read(std::make_pair(DB_TXINDEX, txid), pos);
-}
-
-bool TxIndexDB::WriteTxs(const std::vector<std::pair<uint256, CDiskTxPos>>& v_pos)
-{
- CDBBatch batch(*this);
- for (const auto& tuple : v_pos) {
- batch.Write(std::make_pair(DB_TXINDEX, tuple.first), tuple.second);
- }
- return WriteBatch(batch);
-}
-
-bool TxIndexDB::ReadBestBlock(CBlockLocator& locator) const
-{
- bool success = Read(DB_BEST_BLOCK, locator);
- if (!success) {
- locator.SetNull();
- }
- return success;
-}
-
-bool TxIndexDB::WriteBestBlock(const CBlockLocator& locator)
-{
- return Write(DB_BEST_BLOCK, locator);
-}
-
-/*
- * Safely persist a transfer of data from the old txindex database to the new one, and compact the
- * range of keys updated. This is used internally by MigrateData.
- */
-static void WriteTxIndexMigrationBatches(TxIndexDB& newdb, CBlockTreeDB& olddb,
- CDBBatch& batch_newdb, CDBBatch& batch_olddb,
- const std::pair<unsigned char, uint256>& begin_key,
- const std::pair<unsigned char, uint256>& end_key)
-{
- // Sync new DB changes to disk before deleting from old DB.
- newdb.WriteBatch(batch_newdb, /*fSync=*/ true);
- olddb.WriteBatch(batch_olddb);
- olddb.CompactRange(begin_key, end_key);
-
- batch_newdb.Clear();
- batch_olddb.Clear();
-}
-
-bool TxIndexDB::MigrateData(CBlockTreeDB& block_tree_db, const CBlockLocator& best_locator)
-{
- // The prior implementation of txindex was always in sync with block index
- // and presence was indicated with a boolean DB flag. If the flag is set,
- // this means the txindex from a previous version is valid and in sync with
- // the chain tip. The first step of the migration is to unset the flag and
- // write the chain hash to a separate key, DB_TXINDEX_BLOCK. After that, the
- // index entries are copied over in batches to the new database. Finally,
- // DB_TXINDEX_BLOCK is erased from the old database and the block hash is
- // written to the new database.
- //
- // Unsetting the boolean flag ensures that if the node is downgraded to a
- // previous version, it will not see a corrupted, partially migrated index
- // -- it will see that the txindex is disabled. When the node is upgraded
- // again, the migration will pick up where it left off and sync to the block
- // with hash DB_TXINDEX_BLOCK.
- bool f_legacy_flag = false;
- block_tree_db.ReadFlag("txindex", f_legacy_flag);
- if (f_legacy_flag) {
- if (!block_tree_db.Write(DB_TXINDEX_BLOCK, best_locator)) {
- return error("%s: cannot write block indicator", __func__);
- }
- if (!block_tree_db.WriteFlag("txindex", false)) {
- return error("%s: cannot write block index db flag", __func__);
- }
- }
-
- CBlockLocator locator;
- if (!block_tree_db.Read(DB_TXINDEX_BLOCK, locator)) {
- return true;
- }
-
- int64_t count = 0;
- LogPrintf("Upgrading txindex database... [0%%]\n");
- uiInterface.ShowProgress(_("Upgrading txindex database"), 0, true);
- int report_done = 0;
- const size_t batch_size = 1 << 24; // 16 MiB
-
- CDBBatch batch_newdb(*this);
- CDBBatch batch_olddb(block_tree_db);
-
- std::pair<unsigned char, uint256> key;
- std::pair<unsigned char, uint256> begin_key{DB_TXINDEX, uint256()};
- std::pair<unsigned char, uint256> prev_key = begin_key;
-
- bool interrupted = false;
- std::unique_ptr<CDBIterator> cursor(block_tree_db.NewIterator());
- for (cursor->Seek(begin_key); cursor->Valid(); cursor->Next()) {
- boost::this_thread::interruption_point();
- if (ShutdownRequested()) {
- interrupted = true;
- break;
- }
-
- if (!cursor->GetKey(key)) {
- return error("%s: cannot get key from valid cursor", __func__);
- }
- if (key.first != DB_TXINDEX) {
- break;
- }
-
- // Log progress every 10%.
- if (++count % 256 == 0) {
- // Since txids are uniformly random and traversed in increasing order, the high 16 bits
- // of the hash can be used to estimate the current progress.
- const uint256& txid = key.second;
- uint32_t high_nibble =
- (static_cast<uint32_t>(*(txid.begin() + 0)) << 8) +
- (static_cast<uint32_t>(*(txid.begin() + 1)) << 0);
- int percentage_done = (int)(high_nibble * 100.0 / 65536.0 + 0.5);
-
- uiInterface.ShowProgress(_("Upgrading txindex database"), percentage_done, true);
- if (report_done < percentage_done/10) {
- LogPrintf("Upgrading txindex database... [%d%%]\n", percentage_done);
- report_done = percentage_done/10;
- }
- }
-
- CDiskTxPos value;
- if (!cursor->GetValue(value)) {
- return error("%s: cannot parse txindex record", __func__);
- }
- batch_newdb.Write(key, value);
- batch_olddb.Erase(key);
-
- if (batch_newdb.SizeEstimate() > batch_size || batch_olddb.SizeEstimate() > batch_size) {
- // NOTE: it's OK to delete the key pointed at by the current DB cursor while iterating
- // because LevelDB iterators are guaranteed to provide a consistent view of the
- // underlying data, like a lightweight snapshot.
- WriteTxIndexMigrationBatches(*this, block_tree_db,
- batch_newdb, batch_olddb,
- prev_key, key);
- prev_key = key;
- }
- }
-
- // If these final DB batches complete the migration, write the best block
- // hash marker to the new database and delete from the old one. This signals
- // that the former is fully caught up to that point in the blockchain and
- // that all txindex entries have been removed from the latter.
- if (!interrupted) {
- batch_olddb.Erase(DB_TXINDEX_BLOCK);
- batch_newdb.Write(DB_BEST_BLOCK, locator);
- }
-
- WriteTxIndexMigrationBatches(*this, block_tree_db,
- batch_newdb, batch_olddb,
- begin_key, key);
-
- if (interrupted) {
- LogPrintf("[CANCELLED].\n");
- return false;
- }
-
- uiInterface.ShowProgress("", 100, false);
-
- LogPrintf("[DONE].\n");
- return true;
-}
diff --git a/src/txdb.h b/src/txdb.h
index 4193f98de1..100adb428d 100644
--- a/src/txdb.h
+++ b/src/txdb.h
@@ -40,31 +40,6 @@ static const int64_t nMaxTxIndexCache = 1024;
//! Max memory allocated to coin DB specific cache (MiB)
static const int64_t nMaxCoinsDBCache = 8;
-struct CDiskTxPos : public CDiskBlockPos
-{
- unsigned int nTxOffset; // after header
-
- ADD_SERIALIZE_METHODS;
-
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action) {
- READWRITEAS(CDiskBlockPos, *this);
- READWRITE(VARINT(nTxOffset));
- }
-
- CDiskTxPos(const CDiskBlockPos &blockIn, unsigned int nTxOffsetIn) : CDiskBlockPos(blockIn.nFile, blockIn.nPos), nTxOffset(nTxOffsetIn) {
- }
-
- CDiskTxPos() {
- SetNull();
- }
-
- void SetNull() {
- CDiskBlockPos::SetNull();
- nTxOffset = 0;
- }
-};
-
/** CCoinsView backed by the coin database (chainstate/) */
class CCoinsViewDB final : public CCoinsView
{
@@ -118,43 +93,9 @@ public:
bool ReadLastBlockFile(int &nFile);
bool WriteReindexing(bool fReindexing);
bool ReadReindexing(bool &fReindexing);
- bool ReadTxIndex(const uint256 &txid, CDiskTxPos &pos);
- bool WriteTxIndex(const std::vector<std::pair<uint256, CDiskTxPos> > &vect);
bool WriteFlag(const std::string &name, bool fValue);
bool ReadFlag(const std::string &name, bool &fValue);
bool LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex);
};
-/**
- * Access to the txindex database (indexes/txindex/)
- *
- * The database stores a block locator of the chain the database is synced to
- * so that the TxIndex can efficiently determine the point it last stopped at.
- * A locator is used instead of a simple hash of the chain tip because blocks
- * and block index entries may not be flushed to disk until after this database
- * is updated.
- */
-class TxIndexDB : public CDBWrapper
-{
-public:
- explicit TxIndexDB(size_t n_cache_size, bool f_memory = false, bool f_wipe = false);
-
- /// Read the disk location of the transaction data with the given hash. Returns false if the
- /// transaction hash is not indexed.
- bool ReadTxPos(const uint256& txid, CDiskTxPos& pos) const;
-
- /// Write a batch of transaction positions to the DB.
- bool WriteTxs(const std::vector<std::pair<uint256, CDiskTxPos>>& v_pos);
-
- /// Read block locator of the chain that the txindex is in sync with.
- bool ReadBestBlock(CBlockLocator& locator) const;
-
- /// Write block locator of the chain that the txindex is in sync with.
- bool WriteBestBlock(const CBlockLocator& locator);
-
- /// Migrate txindex data from the block tree DB, where it may be for older nodes that have not
- /// been upgraded yet to the new database.
- bool MigrateData(CBlockTreeDB& block_tree_db, const CBlockLocator& best_locator);
-};
-
#endif // BITCOIN_TXDB_H
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index bb585fc075..1c6dba0c9c 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -693,18 +693,18 @@ void CTxMemPool::check(const CCoinsViewCache *pcoins) const
// Check children against mapNextTx
CTxMemPool::setEntries setChildrenCheck;
auto iter = mapNextTx.lower_bound(COutPoint(it->GetTx().GetHash(), 0));
- int64_t childSizes = 0;
+ uint64_t child_sizes = 0;
for (; iter != mapNextTx.end() && iter->first->hash == it->GetTx().GetHash(); ++iter) {
txiter childit = mapTx.find(iter->second->GetHash());
assert(childit != mapTx.end()); // mapNextTx points to in-mempool transactions
if (setChildrenCheck.insert(childit).second) {
- childSizes += childit->GetTxSize();
+ child_sizes += childit->GetTxSize();
}
}
assert(setChildrenCheck == GetMemPoolChildren(it));
// Also check to make sure size is greater than sum with immediate children.
// just a sanity check, not definitive that this calc is correct...
- assert(it->GetSizeWithDescendants() >= childSizes + it->GetTxSize());
+ assert(it->GetSizeWithDescendants() >= child_sizes + it->GetTxSize());
if (fDependsWait)
waitingOnDependants.push_back(&(*it));
@@ -1055,11 +1055,36 @@ void CTxMemPool::TrimToSize(size_t sizelimit, std::vector<COutPoint>* pvNoSpends
}
}
-bool CTxMemPool::TransactionWithinChainLimit(const uint256& txid, size_t chainLimit) const {
+uint64_t CTxMemPool::CalculateDescendantMaximum(txiter entry) const {
+ // find parent with highest descendant count
+ std::vector<txiter> candidates;
+ setEntries counted;
+ candidates.push_back(entry);
+ uint64_t maximum = 0;
+ while (candidates.size()) {
+ txiter candidate = candidates.back();
+ candidates.pop_back();
+ if (!counted.insert(candidate).second) continue;
+ const setEntries& parents = GetMemPoolParents(candidate);
+ if (parents.size() == 0) {
+ maximum = std::max(maximum, candidate->GetCountWithDescendants());
+ } else {
+ for (txiter i : parents) {
+ candidates.push_back(i);
+ }
+ }
+ }
+ return maximum;
+}
+
+void CTxMemPool::GetTransactionAncestry(const uint256& txid, size_t& ancestors, size_t& descendants) const {
LOCK(cs);
auto it = mapTx.find(txid);
- return it == mapTx.end() || (it->GetCountWithAncestors() < chainLimit &&
- it->GetCountWithDescendants() < chainLimit);
+ ancestors = descendants = 0;
+ if (it != mapTx.end()) {
+ ancestors = it->GetCountWithAncestors();
+ descendants = CalculateDescendantMaximum(it);
+ }
}
SaltedTxidHasher::SaltedTxidHasher() : k0(GetRand(std::numeric_limits<uint64_t>::max())), k1(GetRand(std::numeric_limits<uint64_t>::max())) {}
diff --git a/src/txmempool.h b/src/txmempool.h
index ca7b1cd4be..bda812b42f 100644
--- a/src/txmempool.h
+++ b/src/txmempool.h
@@ -498,6 +498,7 @@ public:
const setEntries & GetMemPoolParents(txiter entry) const EXCLUSIVE_LOCKS_REQUIRED(cs);
const setEntries & GetMemPoolChildren(txiter entry) const EXCLUSIVE_LOCKS_REQUIRED(cs);
+ uint64_t CalculateDescendantMaximum(txiter entry) const EXCLUSIVE_LOCKS_REQUIRED(cs);
private:
typedef std::map<txiter, setEntries, CompareIteratorByHash> cacheMap;
@@ -619,8 +620,11 @@ public:
/** Expire all transaction (and their dependencies) in the mempool older than time. Return the number of removed transactions. */
int Expire(int64_t time);
- /** Returns false if the transaction is in the mempool and not within the chain limit specified. */
- bool TransactionWithinChainLimit(const uint256& txid, size_t chainLimit) const;
+ /**
+ * Calculate the ancestor and descendant count for the given transaction.
+ * The counts include the transaction itself.
+ */
+ void GetTransactionAncestry(const uint256& txid, size_t& ancestors, size_t& descendants) const;
unsigned long size()
{
diff --git a/src/validation.h b/src/validation.h
index b5ab10786a..04f5b6cb80 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -497,4 +497,10 @@ bool DumpMempool();
/** Load the mempool from disk. */
bool LoadMempool();
+//! Check whether the block associated with this index entry is pruned or not.
+inline bool IsBlockPruned(const CBlockIndex* pblockindex)
+{
+ return (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0);
+}
+
#endif // BITCOIN_VALIDATION_H
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index 456f08bc14..84e91643ba 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -198,7 +198,7 @@ CTxDestination GetLabelDestination(CWallet* const pwallet, const std::string& la
return dest;
}
-static UniValue getlabeladdress(const JSONRPCRequest& request)
+static UniValue getaccountaddress(const JSONRPCRequest& request)
{
std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
CWallet* const pwallet = wallet.get();
@@ -207,53 +207,36 @@ static UniValue getlabeladdress(const JSONRPCRequest& request)
return NullUniValue;
}
- if (!IsDeprecatedRPCEnabled("accounts") && request.strMethod == "getaccountaddress") {
+ if (!IsDeprecatedRPCEnabled("accounts")) {
if (request.fHelp) {
throw std::runtime_error("getaccountaddress (Deprecated, will be removed in V0.18. To use this command, start bitcoind with -deprecatedrpc=accounts)");
}
throw JSONRPCError(RPC_METHOD_DEPRECATED, "getaccountaddress is deprecated and will be removed in V0.18. To use this command, start bitcoind with -deprecatedrpc=accounts.");
}
- if (request.fHelp || request.params.size() < 1 || request.params.size() > 2)
+ if (request.fHelp || request.params.size() != 1)
throw std::runtime_error(
- "getlabeladdress \"label\" ( force ) \n"
- "\nReturns the default receiving address for this label. This will reset to a fresh address once there's a transaction that spends to it.\n"
+ "getaccountaddress \"account\"\n"
+ "\n\nDEPRECATED. Returns the current Bitcoin address for receiving payments to this account.\n"
"\nArguments:\n"
- "1. \"label\" (string, required) The label for the address. It can also be set to the empty string \"\" to represent the default label.\n"
- "2. \"force\" (bool, optional) Whether the label should be created if it does not yet exist. If False, the RPC will return an error if called with a label that doesn't exist.\n"
- " Defaults to false (unless the getaccountaddress method alias is being called, in which case defaults to true for backwards compatibility).\n"
+ "1. \"account\" (string, required) The account for the address. It can also be set to the empty string \"\" to represent the default account. The account does not need to exist, it will be created and a new address created if there is no account by the given name.\n"
"\nResult:\n"
- "\"address\" (string) The current receiving address for the label.\n"
+ "\"address\" (string) The account bitcoin address\n"
"\nExamples:\n"
- + HelpExampleCli("getlabeladdress", "")
- + HelpExampleCli("getlabeladdress", "\"\"")
- + HelpExampleCli("getlabeladdress", "\"mylabel\"")
- + HelpExampleRpc("getlabeladdress", "\"mylabel\"")
+ + HelpExampleCli("getaccountaddress", "")
+ + HelpExampleCli("getaccountaddress", "\"\"")
+ + HelpExampleCli("getaccountaddress", "\"myaccount\"")
+ + HelpExampleRpc("getaccountaddress", "\"myaccount\"")
);
LOCK2(cs_main, pwallet->cs_wallet);
- // Parse the label first so we don't generate a key if there's an error
- std::string label = LabelFromValue(request.params[0]);
- bool force = request.strMethod == "getaccountaddress";
- if (!request.params[1].isNull()) {
- force = request.params[1].get_bool();
- }
-
- bool label_found = false;
- for (const std::pair<CTxDestination, CAddressBookData>& item : pwallet->mapAddressBook) {
- if (item.second.name == label) {
- label_found = true;
- break;
- }
- }
- if (!force && !label_found) {
- throw JSONRPCError(RPC_WALLET_INVALID_LABEL_NAME, std::string("No addresses with label " + label));
- }
+ // Parse the account first so we don't generate a key if there's an error
+ std::string account = LabelFromValue(request.params[0]);
UniValue ret(UniValue::VSTR);
- ret = EncodeDestination(GetLabelDestination(pwallet, label));
+ ret = EncodeDestination(GetLabelDestination(pwallet, account));
return ret;
}
@@ -343,23 +326,33 @@ static UniValue setlabel(const JSONRPCRequest& request)
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid Bitcoin address");
}
+ std::string old_label = pwallet->mapAddressBook[dest].name;
std::string label = LabelFromValue(request.params[1]);
if (IsMine(*pwallet, dest)) {
- // Detect when changing the label of an address that is the receiving address of another label:
- // If so, delete the account record for it. Labels, unlike addresses, can be deleted,
- // and if we wouldn't do this, the record would stick around forever.
- if (pwallet->mapAddressBook.count(dest)) {
- std::string old_label = pwallet->mapAddressBook[dest].name;
- if (old_label != label && dest == GetLabelDestination(pwallet, old_label)) {
- pwallet->DeleteLabel(old_label);
- }
- }
pwallet->SetAddressBook(dest, label, "receive");
+ if (request.strMethod == "setaccount" && old_label != label && dest == GetLabelDestination(pwallet, old_label)) {
+ // for setaccount, call GetLabelDestination so a new receive address is created for the old account
+ GetLabelDestination(pwallet, old_label, true);
+ }
} else {
pwallet->SetAddressBook(dest, label, "send");
}
+ // Detect when there are no addresses using this label.
+ // If so, delete the account record for it. Labels, unlike addresses, can be deleted,
+ // and if we wouldn't do this, the record would stick around forever.
+ bool found_address = false;
+ for (const std::pair<CTxDestination, CAddressBookData>& item : pwallet->mapAddressBook) {
+ if (item.second.name == label) {
+ found_address = true;
+ break;
+ }
+ }
+ if (!found_address) {
+ pwallet->DeleteLabel(old_label);
+ }
+
return NullUniValue;
}
@@ -4404,7 +4397,7 @@ static const CRPCCommand commands[] =
{ "wallet", "sethdseed", &sethdseed, {"newkeypool","seed"} },
/** Account functions (deprecated) */
- { "wallet", "getaccountaddress", &getlabeladdress, {"account"} },
+ { "wallet", "getaccountaddress", &getaccountaddress, {"account"} },
{ "wallet", "getaccount", &getaccount, {"address"} },
{ "wallet", "getaddressesbyaccount", &getaddressesbyaccount, {"account"} },
{ "wallet", "getreceivedbyaccount", &getreceivedbylabel, {"account","minconf"} },
@@ -4414,7 +4407,6 @@ static const CRPCCommand commands[] =
{ "wallet", "move", &movecmd, {"fromaccount","toaccount","amount","minconf","comment"} },
/** Label functions (to replace non-balance account functions) */
- { "wallet", "getlabeladdress", &getlabeladdress, {"label","force"} },
{ "wallet", "getaddressesbylabel", &getaddressesbylabel, {"label"} },
{ "wallet", "getreceivedbylabel", &getreceivedbylabel, {"label","minconf"} },
{ "wallet", "listlabels", &listlabels, {"purpose"} },
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index c3597aace8..78cacc0206 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -2469,8 +2469,11 @@ bool CWallet::OutputEligibleForSpending(const COutput& output, const CoinEligibi
if (output.nDepth < (output.tx->IsFromMe(ISMINE_ALL) ? eligibility_filter.conf_mine : eligibility_filter.conf_theirs))
return false;
- if (!mempool.TransactionWithinChainLimit(output.tx->GetHash(), eligibility_filter.max_ancestors))
+ size_t ancestors, descendants;
+ mempool.GetTransactionAncestry(output.tx->GetHash(), ancestors, descendants);
+ if (ancestors > eligibility_filter.max_ancestors || descendants > eligibility_filter.max_descendants) {
return false;
+ }
return true;
}
@@ -2582,16 +2585,17 @@ bool CWallet::SelectCoins(const std::vector<COutput>& vAvailableCoins, const CAm
++it;
}
- size_t nMaxChainLength = std::min(gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT), gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT));
+ size_t max_ancestors = (size_t)std::max<int64_t>(1, gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT));
+ size_t max_descendants = (size_t)std::max<int64_t>(1, gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT));
bool fRejectLongChains = gArgs.GetBoolArg("-walletrejectlongchains", DEFAULT_WALLET_REJECT_LONG_CHAINS);
bool res = nTargetValue <= nValueFromPresetInputs ||
SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(1, 6, 0), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used) ||
SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(1, 1, 0), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used) ||
(m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, 2), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) ||
- (m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, std::min((size_t)4, nMaxChainLength/3)), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) ||
- (m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, nMaxChainLength/2), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) ||
- (m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, nMaxChainLength), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) ||
+ (m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, std::min((size_t)4, max_ancestors/3), std::min((size_t)4, max_descendants/3)), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) ||
+ (m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, max_ancestors/2, max_descendants/2), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) ||
+ (m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, max_ancestors-1, max_descendants-1), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) ||
(m_spend_zero_conf_change && !fRejectLongChains && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, std::numeric_limits<uint64_t>::max()), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used));
// because SelectCoinsMinConf clears the setCoinsRet, we now add the possible inputs to the coinset
diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h
index f1761b0fcf..1ec2a9e771 100644
--- a/src/wallet/wallet.h
+++ b/src/wallet/wallet.h
@@ -662,8 +662,10 @@ struct CoinEligibilityFilter
const int conf_mine;
const int conf_theirs;
const uint64_t max_ancestors;
+ const uint64_t max_descendants;
- CoinEligibilityFilter(int conf_mine, int conf_theirs, uint64_t max_ancestors) : conf_mine(conf_mine), conf_theirs(conf_theirs), max_ancestors(max_ancestors) {}
+ CoinEligibilityFilter(int conf_mine, int conf_theirs, uint64_t max_ancestors) : conf_mine(conf_mine), conf_theirs(conf_theirs), max_ancestors(max_ancestors), max_descendants(max_ancestors) {}
+ CoinEligibilityFilter(int conf_mine, int conf_theirs, uint64_t max_ancestors, uint64_t max_descendants) : conf_mine(conf_mine), conf_theirs(conf_theirs), max_ancestors(max_ancestors), max_descendants(max_descendants) {}
};
class WalletRescanReserver; //forward declarations for ScanForWalletTransactions/RescanFromTime
diff --git a/test/functional/rpc_deprecated.py b/test/functional/rpc_deprecated.py
index 2e0500e7c4..bc27c183b1 100755
--- a/test/functional/rpc_deprecated.py
+++ b/test/functional/rpc_deprecated.py
@@ -40,7 +40,6 @@ class DeprecatedRpcTest(BitcoinTestFramework):
#
# The following 'label' RPC methods are usable both with and without the
# -deprecatedrpc=accounts switch enabled.
- # - getlabeladdress
# - getaddressesbylabel
# - getreceivedbylabel
# - listlabels
@@ -69,10 +68,6 @@ class DeprecatedRpcTest(BitcoinTestFramework):
assert_raises_rpc_error(-32, "getaccountaddress is deprecated", self.nodes[0].getaccountaddress, "label0")
self.nodes[1].getaccountaddress("label1")
- self.log.info("- getlabeladdress")
- self.nodes[0].getlabeladdress("label0")
- self.nodes[1].getlabeladdress("label1")
-
self.log.info("- getaddressesbyaccount")
assert_raises_rpc_error(-32, "getaddressesbyaccount is deprecated", self.nodes[0].getaddressesbyaccount, "label0")
self.nodes[1].getaddressesbyaccount("label1")
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index 65e4c0817e..5b3a4df0f9 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -419,10 +419,6 @@ class TestHandler:
self.test_list = test_list
self.flags = flags
self.num_running = 0
- # In case there is a graveyard of zombie bitcoinds, we can apply a
- # pseudorandom offset to hopefully jump over them.
- # (625 is PORT_RANGE/MAX_NODES)
- self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
@@ -430,7 +426,7 @@ class TestHandler:
# Add tests
self.num_running += 1
test = self.test_list.pop(0)
- portseed = len(self.test_list) + self.portseed_offset
+ portseed = len(self.test_list)
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
diff --git a/test/functional/wallet_labels.py b/test/functional/wallet_labels.py
index 705dd8985e..f5d7830fca 100755
--- a/test/functional/wallet_labels.py
+++ b/test/functional/wallet_labels.py
@@ -5,7 +5,7 @@
"""Test label RPCs.
RPCs tested are:
- - getlabeladdress
+ - getaccountaddress
- getaddressesbyaccount/getaddressesbylabel
- listaddressgroupings
- setlabel
@@ -92,17 +92,24 @@ class WalletLabelsTest(BitcoinTestFramework):
# recognize the label/address associations.
labels = [Label(name, accounts_api) for name in ("a", "b", "c", "d", "e")]
for label in labels:
- label.add_receive_address(node.getlabeladdress(label=label.name, force=True))
+ if accounts_api:
+ address = node.getaccountaddress(label.name)
+ else:
+ address = node.getnewaddress(label.name)
+ label.add_receive_address(address)
label.verify(node)
# Check all labels are returned by listlabels.
assert_equal(node.listlabels(), [label.name for label in labels])
# Send a transaction to each label, and make sure this forces
- # getlabeladdress to generate a new receiving address.
+ # getaccountaddress to generate a new receiving address.
for label in labels:
- node.sendtoaddress(label.receive_address, amount_to_send)
- label.add_receive_address(node.getlabeladdress(label.name))
+ if accounts_api:
+ node.sendtoaddress(label.receive_address, amount_to_send)
+ label.add_receive_address(node.getaccountaddress(label.name))
+ else:
+ node.sendtoaddress(label.addresses[0], amount_to_send)
label.verify(node)
# Check the amounts received.
@@ -115,10 +122,17 @@ class WalletLabelsTest(BitcoinTestFramework):
# Check that sendfrom label reduces listaccounts balances.
for i, label in enumerate(labels):
to_label = labels[(i + 1) % len(labels)]
- node.sendfrom(label.name, to_label.receive_address, amount_to_send)
+ if accounts_api:
+ node.sendfrom(label.name, to_label.receive_address, amount_to_send)
+ else:
+ node.sendfrom(label.name, to_label.addresses[0], amount_to_send)
node.generate(1)
for label in labels:
- label.add_receive_address(node.getlabeladdress(label.name))
+ if accounts_api:
+ address = node.getaccountaddress(label.name)
+ else:
+ address = node.getnewaddress(label.name)
+ label.add_receive_address(address)
label.verify(node)
assert_equal(node.getreceivedbylabel(label.name), 2)
if accounts_api:
@@ -134,12 +148,12 @@ class WalletLabelsTest(BitcoinTestFramework):
# Check that setlabel can assign a label to a new unused address.
for label in labels:
- address = node.getlabeladdress(label="", force=True)
+ address = node.getnewaddress()
node.setlabel(address, label.name)
label.add_address(address)
label.verify(node)
if accounts_api:
- assert(address not in node.getaddressesbyaccount(""))
+ assert address not in node.getaddressesbyaccount("")
else:
assert_raises_rpc_error(-11, "No addresses with label", node.getaddressesbylabel, "")
@@ -160,19 +174,20 @@ class WalletLabelsTest(BitcoinTestFramework):
# Check that setlabel can change the label of an address from a
# different label.
- change_label(node, labels[0].addresses[0], labels[0], labels[1])
-
- # Check that setlabel can change the label of an address which
- # is the receiving address of a different label.
- change_label(node, labels[0].receive_address, labels[0], labels[1])
+ change_label(node, labels[0].addresses[0], labels[0], labels[1], accounts_api)
# Check that setlabel can set the label of an address already
# in the label. This is a no-op.
- change_label(node, labels[2].addresses[0], labels[2], labels[2])
+ change_label(node, labels[2].addresses[0], labels[2], labels[2], accounts_api)
+
+ if accounts_api:
+ # Check that setaccount can change the label of an address which
+ # is the receiving address of a different label.
+ change_label(node, labels[0].receive_address, labels[0], labels[1], accounts_api)
- # Check that setlabel can set the label of an address which is
- # already the receiving address of the label. This is a no-op.
- change_label(node, labels[2].receive_address, labels[2], labels[2])
+ # Check that setaccount can set the label of an address which is
+ # already the receiving address of the label. This is a no-op.
+ change_label(node, labels[2].receive_address, labels[2], labels[2], accounts_api)
class Label:
def __init__(self, name, accounts_api):
@@ -192,12 +207,14 @@ class Label:
def add_receive_address(self, address):
self.add_address(address)
- self.receive_address = address
+ if self.accounts_api:
+ self.receive_address = address
def verify(self, node):
if self.receive_address is not None:
assert self.receive_address in self.addresses
- assert_equal(node.getlabeladdress(self.name), self.receive_address)
+ if self.accounts_api:
+ assert_equal(node.getaccountaddress(self.name), self.receive_address)
for address in self.addresses:
assert_equal(
@@ -216,22 +233,26 @@ class Label:
assert_equal(set(node.getaddressesbyaccount(self.name)), set(self.addresses))
-def change_label(node, address, old_label, new_label):
+def change_label(node, address, old_label, new_label, accounts_api):
assert_equal(address in old_label.addresses, True)
- node.setlabel(address, new_label.name)
+ if accounts_api:
+ node.setaccount(address, new_label.name)
+ else:
+ node.setlabel(address, new_label.name)
old_label.addresses.remove(address)
new_label.add_address(address)
- # Calling setlabel on an address which was previously the receiving
- # address of a different label should reset the receiving address of
- # the old label, causing getlabeladdress to return a brand new
+ # Calling setaccount on an address which was previously the receiving
+ # address of a different account should reset the receiving address of
+ # the old account, causing getaccountaddress to return a brand new
# address.
- if old_label.name != new_label.name and address == old_label.receive_address:
- new_address = node.getlabeladdress(old_label.name)
- assert_equal(new_address not in old_label.addresses, True)
- assert_equal(new_address not in new_label.addresses, True)
- old_label.add_receive_address(new_address)
+ if accounts_api:
+ if old_label.name != new_label.name and address == old_label.receive_address:
+ new_address = node.getaccountaddress(old_label.name)
+ assert_equal(new_address not in old_label.addresses, True)
+ assert_equal(new_address not in new_label.addresses, True)
+ old_label.add_receive_address(new_address)
old_label.verify(node)
new_label.verify(node)
diff --git a/test/lint/lint-includes.sh b/test/lint/lint-includes.sh
index b0ba5f63af..f5daf4f9ac 100755
--- a/test/lint/lint-includes.sh
+++ b/test/lint/lint-includes.sh
@@ -36,6 +36,14 @@ for CPP_FILE in $(filter_suffix cpp); do
fi
done
+INCLUDED_CPP_FILES=$(git grep -E "^#include [<\"][^>\"]+\.cpp[>\"]" -- "*.cpp" "*.h")
+if [[ ${INCLUDED_CPP_FILES} != "" ]]; then
+ echo "The following files #include .cpp files:"
+ echo "${INCLUDED_CPP_FILES}"
+ echo
+ EXIT_CODE=1
+fi
+
EXPECTED_BOOST_INCLUDES=(
boost/algorithm/string.hpp
boost/algorithm/string/case_conv.hpp
diff --git a/test/lint/lint-locale-dependence.sh b/test/lint/lint-locale-dependence.sh
new file mode 100755
index 0000000000..3144f2c841
--- /dev/null
+++ b/test/lint/lint-locale-dependence.sh
@@ -0,0 +1,229 @@
+#!/bin/bash
+
+KNOWN_VIOLATIONS=(
+ "src/base58.cpp:.*isspace"
+ "src/bitcoin-tx.cpp.*stoul"
+ "src/bitcoin-tx.cpp.*trim_right"
+ "src/bitcoin-tx.cpp:.*atoi"
+ "src/core_read.cpp.*is_digit"
+ "src/dbwrapper.cpp.*stoul"
+ "src/dbwrapper.cpp:.*vsnprintf"
+ "src/httprpc.cpp.*trim"
+ "src/init.cpp:.*atoi"
+ "src/netbase.cpp.*to_lower"
+ "src/qt/rpcconsole.cpp:.*atoi"
+ "src/qt/rpcconsole.cpp:.*isdigit"
+ "src/rest.cpp:.*strtol"
+ "src/rpc/server.cpp.*to_upper"
+ "src/test/dbwrapper_tests.cpp:.*snprintf"
+ "src/test/getarg_tests.cpp.*split"
+ "src/torcontrol.cpp:.*atoi"
+ "src/torcontrol.cpp:.*strtol"
+ "src/uint256.cpp:.*isspace"
+ "src/uint256.cpp:.*tolower"
+ "src/util.cpp:.*atoi"
+ "src/util.cpp:.*fprintf"
+ "src/util.cpp:.*tolower"
+ "src/utilmoneystr.cpp:.*isdigit"
+ "src/utilmoneystr.cpp:.*isspace"
+ "src/utilstrencodings.cpp:.*atoi"
+ "src/utilstrencodings.cpp:.*isspace"
+ "src/utilstrencodings.cpp:.*strtol"
+ "src/utilstrencodings.cpp:.*strtoll"
+ "src/utilstrencodings.cpp:.*strtoul"
+ "src/utilstrencodings.cpp:.*strtoull"
+ "src/utilstrencodings.h:.*atoi"
+)
+
+REGEXP_IGNORE_EXTERNAL_DEPENDENCIES="^src/(crypto/ctaes/|leveldb/|secp256k1/|tinyformat.h|univalue/)"
+
+LOCALE_DEPENDENT_FUNCTIONS=(
+ alphasort # LC_COLLATE (via strcoll)
+ asctime # LC_TIME (directly)
+ asprintf # (via vasprintf)
+ atof # LC_NUMERIC (via strtod)
+ atoi # LC_NUMERIC (via strtol)
+ atol # LC_NUMERIC (via strtol)
+ atoll # (via strtoll)
+ atoq
+ btowc # LC_CTYPE (directly)
+ ctime # (via asctime or localtime)
+ dprintf # (via vdprintf)
+ fgetwc
+ fgetws
+ fold_case # boost::locale::fold_case
+ fprintf # (via vfprintf)
+ fputwc
+ fputws
+ fscanf # (via __vfscanf)
+ fwprintf # (via __vfwprintf)
+ getdate # via __getdate_r => isspace // __localtime_r
+ getwc
+ getwchar
+ is_digit # boost::algorithm::is_digit
+ is_space # boost::algorithm::is_space
+ isalnum # LC_CTYPE
+ isalpha # LC_CTYPE
+ isblank # LC_CTYPE
+ iscntrl # LC_CTYPE
+ isctype # LC_CTYPE
+ isdigit # LC_CTYPE
+ isgraph # LC_CTYPE
+ islower # LC_CTYPE
+ isprint # LC_CTYPE
+ ispunct # LC_CTYPE
+ isspace # LC_CTYPE
+ isupper # LC_CTYPE
+ iswalnum # LC_CTYPE
+ iswalpha # LC_CTYPE
+ iswblank # LC_CTYPE
+ iswcntrl # LC_CTYPE
+ iswctype # LC_CTYPE
+ iswdigit # LC_CTYPE
+ iswgraph # LC_CTYPE
+ iswlower # LC_CTYPE
+ iswprint # LC_CTYPE
+ iswpunct # LC_CTYPE
+ iswspace # LC_CTYPE
+ iswupper # LC_CTYPE
+ iswxdigit # LC_CTYPE
+ isxdigit # LC_CTYPE
+ localeconv # LC_NUMERIC + LC_MONETARY
+ mblen # LC_CTYPE
+ mbrlen
+ mbrtowc
+ mbsinit
+ mbsnrtowcs
+ mbsrtowcs
+ mbstowcs # LC_CTYPE
+ mbtowc # LC_CTYPE
+ mktime
+ normalize # boost::locale::normalize
+# printf # LC_NUMERIC
+ putwc
+ putwchar
+ scanf # LC_NUMERIC
+ setlocale
+ snprintf
+ sprintf
+ sscanf
+ stod
+ stof
+ stoi
+ stol
+ stold
+ stoll
+ stoul
+ stoull
+ strcasecmp
+ strcasestr
+ strcoll # LC_COLLATE
+# strerror
+ strfmon
+ strftime # LC_TIME
+ strncasecmp
+ strptime
+ strtod # LC_NUMERIC
+ strtof
+ strtoimax
+ strtol # LC_NUMERIC
+ strtold
+ strtoll
+ strtoq
+ strtoul # LC_NUMERIC
+ strtoull
+ strtoumax
+ strtouq
+ strxfrm # LC_COLLATE
+ swprintf
+ to_lower # boost::locale::to_lower
+ to_title # boost::locale::to_title
+ to_upper # boost::locale::to_upper
+ tolower # LC_CTYPE
+ toupper # LC_CTYPE
+ towctrans
+ towlower # LC_CTYPE
+ towupper # LC_CTYPE
+ trim # boost::algorithm::trim
+ trim_left # boost::algorithm::trim_left
+ trim_right # boost::algorithm::trim_right
+ ungetwc
+ vasprintf
+ vdprintf
+ versionsort
+ vfprintf
+ vfscanf
+ vfwprintf
+ vprintf
+ vscanf
+ vsnprintf
+ vsprintf
+ vsscanf
+ vswprintf
+ vwprintf
+ wcrtomb
+ wcscasecmp
+ wcscoll # LC_COLLATE
+ wcsftime # LC_TIME
+ wcsncasecmp
+ wcsnrtombs
+ wcsrtombs
+ wcstod # LC_NUMERIC
+ wcstof
+ wcstoimax
+ wcstol # LC_NUMERIC
+ wcstold
+ wcstoll
+ wcstombs # LC_CTYPE
+ wcstoul # LC_NUMERIC
+ wcstoull
+ wcstoumax
+ wcswidth
+ wcsxfrm # LC_COLLATE
+ wctob
+ wctomb # LC_CTYPE
+ wctrans
+ wctype
+ wcwidth
+ wprintf
+)
+
+function join_array {
+ local IFS="$1"
+ shift
+ echo "$*"
+}
+
+REGEXP_IGNORE_KNOWN_VIOLATIONS=$(join_array "|" "${KNOWN_VIOLATIONS[@]}")
+
+# Invoke "git grep" only once in order to minimize run-time
+REGEXP_LOCALE_DEPENDENT_FUNCTIONS=$(join_array "|" "${LOCALE_DEPENDENT_FUNCTIONS[@]}")
+GIT_GREP_OUTPUT=$(git grep -E "[^a-zA-Z0-9_\`'\"<>](${REGEXP_LOCALE_DEPENDENT_FUNCTIONS}(|_r|_s))[^a-zA-Z0-9_\`'\"<>]" -- "*.cpp" "*.h")
+
+EXIT_CODE=0
+for LOCALE_DEPENDENT_FUNCTION in "${LOCALE_DEPENDENT_FUNCTIONS[@]}"; do
+ MATCHES=$(grep -E "[^a-zA-Z0-9_\`'\"<>]${LOCALE_DEPENDENT_FUNCTION}(|_r|_s)[^a-zA-Z0-9_\`'\"<>]" <<< "${GIT_GREP_OUTPUT}" | \
+ grep -vE "\.(c|cpp|h):\s*(//|\*|/\*|\").*${LOCALE_DEPENDENT_FUNCTION}" | \
+ grep -vE 'fprintf\(.*(stdout|stderr)')
+ if [[ ${REGEXP_IGNORE_EXTERNAL_DEPENDENCIES} != "" ]]; then
+ MATCHES=$(grep -vE "${REGEXP_IGNORE_EXTERNAL_DEPENDENCIES}" <<< "${MATCHES}")
+ fi
+ if [[ ${REGEXP_IGNORE_KNOWN_VIOLATIONS} != "" ]]; then
+ MATCHES=$(grep -vE "${REGEXP_IGNORE_KNOWN_VIOLATIONS}" <<< "${MATCHES}")
+ fi
+ if [[ ${MATCHES} != "" ]]; then
+ echo "The locale dependent function ${LOCALE_DEPENDENT_FUNCTION}(...) appears to be used:"
+ echo "${MATCHES}"
+ echo
+ EXIT_CODE=1
+ fi
+done
+if [[ ${EXIT_CODE} != 0 ]]; then
+ echo "Unnecessary locale dependence can cause bugs that are very"
+ echo "tricky to isolate and fix. Please avoid using locale dependent"
+ echo "functions if possible."
+ echo
+ echo "Advice not applicable in this specific case? Add an exception"
+ echo "by updating the ignore list in $0"
+fi
+exit ${EXIT_CODE}