aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.appveyor.yml2
-rw-r--r--.cirrus.yml2
-rw-r--r--.travis.yml31
-rw-r--r--build_msvc/bitcoin_config.h2
-rw-r--r--ci/test/00_setup_env_mac_host.sh1
-rw-r--r--ci/test/00_setup_env_native_qt5.sh2
-rw-r--r--ci/test/00_setup_env_s390x_host.sh14
-rwxr-xr-xci/test/04_install.sh7
-rwxr-xr-xci/test/06_script_b.sh9
-rw-r--r--configure.ac2
-rwxr-xr-xcontrib/devtools/security-check.py20
-rw-r--r--doc/developer-notes.md2
-rw-r--r--doc/release-notes.md72
-rw-r--r--src/Makefile.test.include7
-rw-r--r--src/init.cpp16
-rw-r--r--src/net.h4
-rw-r--r--src/net_processing.cpp24
-rw-r--r--src/rpc/blockchain.cpp17
-rw-r--r--src/test/fuzz/prevector.cpp263
-rwxr-xr-xtest/functional/p2p_addr_relay.py71
-rwxr-xr-xtest/functional/test_framework/test_node.py4
-rwxr-xr-xtest/functional/test_runner.py1
-rwxr-xr-xtest/fuzz/test_runner.py1
23 files changed, 526 insertions, 48 deletions
diff --git a/.appveyor.yml b/.appveyor.yml
index 777eebd2c3..18dc78fe5f 100644
--- a/.appveyor.yml
+++ b/.appveyor.yml
@@ -81,7 +81,7 @@ after_build:
- ps: clcache -z
#- 7z a bitcoin-%APPVEYOR_BUILD_VERSION%.zip %APPVEYOR_BUILD_FOLDER%\build_msvc\%platform%\%configuration%\*.exe
test_script:
-- cmd: src\test_bitcoin.exe -k stdout -e stdout 2> NUL
+- cmd: src\test_bitcoin.exe -l test_suite
- cmd: src\bench_bitcoin.exe -evals=1 -scaling=0 > NUL
- ps: python test\util\bitcoin-util-test.py
- cmd: python test\util\rpcauth-test.py
diff --git a/.cirrus.yml b/.cirrus.yml
index f4a3878ed8..f9c3d844be 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -37,7 +37,7 @@ task:
timeout_in: 60m
env:
MAKEJOBS: "-j9"
- RUN_CI_ON_HOST: "1"
+ DANGER_RUN_CI_ON_HOST: "1"
TEST_RUNNER_PORT_MIN: "14000" # Must be larger than 12321, which is used for the http cache. See https://cirrus-ci.org/guide/writing-tasks/#http-cache
CCACHE_SIZE: "200M"
CCACHE_DIR: "/tmp/ccache_dir"
diff --git a/.travis.yml b/.travis.yml
index 878f5ce518..d66a66ef0c 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -38,9 +38,6 @@ cache:
- $TRAVIS_BUILD_DIR/depends/sdk-sources
- $TRAVIS_BUILD_DIR/ci/scratch/.ccache
- $TRAVIS_BUILD_DIR/releases/$HOST
- # macOS
- - $HOME/Library/Caches/Homebrew
- - /usr/local/Homebrew
before_cache:
- if [ "${TRAVIS_OS_NAME}" = "osx" ]; then brew cleanup; fi
stages:
@@ -91,6 +88,26 @@ jobs:
FILE_ENV="./ci/test/00_setup_env_arm.sh"
QEMU_USER_CMD=""
+ - stage: test
+ name: 's390x native BE [GOAL: install] [bionic] [no depends, no GUI]'
+ arch: s390x
+ dist: bionic
+ addons:
+ apt:
+ packages:
+ - bsdmainutils
+ - libboost-filesystem-dev
+ - libboost-system-dev
+ - libboost-test-dev
+ - libboost-thread-dev
+ - libdb++-dev
+ - libdb-dev
+ - libevent-dev
+ env: >-
+ DANGER_RUN_CI_ON_HOST=true
+ CI_USE_APT_INSTALL=no
+ FILE_ENV="./ci/test/00_setup_env_s390x_host.sh"
+
# s390 build was disabled temporarily because of disk space issues on the Travis VM
#
# - stage: test
@@ -158,6 +175,12 @@ jobs:
# Xcode 11.3.1, macOS 10.14, SDK 10.15
# https://docs.travis-ci.com/user/reference/osx/#macos-version
osx_image: xcode11.3
+ cache:
+ directories:
+ - $TRAVIS_BUILD_DIR/ci/scratch/.ccache
+ - $TRAVIS_BUILD_DIR/releases/$HOST
+ - $HOME/Library/Caches/Homebrew
+ - /usr/local/Homebrew
addons:
homebrew:
packages:
@@ -171,4 +194,6 @@ jobs:
- ccache
- zeromq
env: >-
+ DANGER_RUN_CI_ON_HOST=true
+ CI_USE_APT_INSTALL=no
FILE_ENV="./ci/test/00_setup_env_mac_host.sh"
diff --git a/build_msvc/bitcoin_config.h b/build_msvc/bitcoin_config.h
index 5f0640ac27..d63ff2b9d5 100644
--- a/build_msvc/bitcoin_config.h
+++ b/build_msvc/bitcoin_config.h
@@ -18,7 +18,7 @@
#define CLIENT_VERSION_MAJOR 0
/* Minor version */
-#define CLIENT_VERSION_MINOR 19
+#define CLIENT_VERSION_MINOR 20
/* Build revision */
#define CLIENT_VERSION_REVISION 99
diff --git a/ci/test/00_setup_env_mac_host.sh b/ci/test/00_setup_env_mac_host.sh
index 5753c3af31..a90d83734e 100644
--- a/ci/test/00_setup_env_mac_host.sh
+++ b/ci/test/00_setup_env_mac_host.sh
@@ -8,7 +8,6 @@ export LC_ALL=C.UTF-8
export HOST=x86_64-apple-darwin16
export PIP_PACKAGES="zmq"
-export RUN_CI_ON_HOST=true
export RUN_UNIT_TESTS=true
export RUN_FUNCTIONAL_TESTS=false
export GOAL="install"
diff --git a/ci/test/00_setup_env_native_qt5.sh b/ci/test/00_setup_env_native_qt5.sh
index 21c15236d2..a5025339b6 100644
--- a/ci/test/00_setup_env_native_qt5.sh
+++ b/ci/test/00_setup_env_native_qt5.sh
@@ -10,6 +10,8 @@ export CONTAINER_NAME=ci_native_qt5
export PACKAGES="python3-zmq qtbase5-dev qttools5-dev-tools libdbus-1-dev libharfbuzz-dev"
export DEP_OPTS="NO_QT=1 NO_UPNP=1 DEBUG=1 ALLOW_HOST_PACKAGES=1"
export TEST_RUNNER_EXTRA="--coverage --extended --exclude feature_dbcrash" # Run extended tests so that coverage does not fail, but exclude the very slow dbcrash
+export RUN_UNIT_TESTS_SEQUENTIAL="true"
+export RUN_UNIT_TESTS="false"
export GOAL="install"
export TEST_PREVIOUS_RELEASES=true
export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-glibc-back-compat --enable-reduce-exports --enable-debug CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\""
diff --git a/ci/test/00_setup_env_s390x_host.sh b/ci/test/00_setup_env_s390x_host.sh
new file mode 100644
index 0000000000..8f3063604e
--- /dev/null
+++ b/ci/test/00_setup_env_s390x_host.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2019 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+export LC_ALL=C.UTF-8
+
+export HOST=s390x-linux-gnu
+export NO_DEPENDS=1
+export BITCOIN_CONFIG="--with-incompatible-bdb --enable-reduce-exports"
+export RUN_UNIT_TESTS=true
+export RUN_FUNCTIONAL_TESTS=true
+export GOAL="install"
diff --git a/ci/test/04_install.sh b/ci/test/04_install.sh
index acf7eeb920..3cfaf77fde 100755
--- a/ci/test/04_install.sh
+++ b/ci/test/04_install.sh
@@ -35,7 +35,7 @@ fi
export P_CI_DIR="$PWD"
-if [ -z "$RUN_CI_ON_HOST" ]; then
+if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then
echo "Creating $DOCKER_NAME_TAG container to run in"
${CI_RETRY_EXE} docker pull "$DOCKER_NAME_TAG"
@@ -66,7 +66,7 @@ fi
if [[ $DOCKER_NAME_TAG == centos* ]]; then
${CI_RETRY_EXE} DOCKER_EXEC yum -y install epel-release
${CI_RETRY_EXE} DOCKER_EXEC yum -y install $DOCKER_PACKAGES $PACKAGES
-elif [ "$TRAVIS_OS_NAME" != "osx" ]; then
+elif [ "$CI_USE_APT_INSTALL" != "no" ]; then
${CI_RETRY_EXE} DOCKER_EXEC apt-get update
${CI_RETRY_EXE} DOCKER_EXEC apt-get install --no-install-recommends --no-upgrade -y $PACKAGES $DOCKER_PACKAGES
fi
@@ -77,6 +77,7 @@ if [ "$TRAVIS_OS_NAME" == "osx" ]; then
else
DOCKER_EXEC free -m -h
DOCKER_EXEC echo "Number of CPUs \(nproc\):" \$\(nproc\)
+ DOCKER_EXEC echo $(lscpu | grep Endian)
DOCKER_EXEC echo "Free disk space:"
DOCKER_EXEC df -h
fi
@@ -90,7 +91,7 @@ export DIR_FUZZ_IN=${DIR_QA_ASSETS}/fuzz_seed_corpus/
DOCKER_EXEC mkdir -p "${BASE_SCRATCH_DIR}/sanitizer-output/"
-if [ -z "$RUN_CI_ON_HOST" ]; then
+if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then
echo "Create $BASE_ROOT_DIR"
DOCKER_EXEC rsync -a /ro_base/ $BASE_ROOT_DIR
fi
diff --git a/ci/test/06_script_b.sh b/ci/test/06_script_b.sh
index 3b32513353..2a98f3fa5e 100755
--- a/ci/test/06_script_b.sh
+++ b/ci/test/06_script_b.sh
@@ -21,13 +21,20 @@ if [ -n "$USE_VALGRIND" ]; then
END_FOLD
fi
+bash -c "${CI_WAIT}" & # Print dots in case the tests take a long time to run
+
if [ "$RUN_UNIT_TESTS" = "true" ]; then
BEGIN_FOLD unit-tests
- bash -c "${CI_WAIT}" & # Print dots in case the unit tests take a long time to run
DOCKER_EXEC LD_LIBRARY_PATH=$DEPENDS_DIR/$HOST/lib make $MAKEJOBS check VERBOSE=1
END_FOLD
fi
+if [ "$RUN_UNIT_TESTS_SEQUENTIAL" = "true" ]; then
+ BEGIN_FOLD unit-tests-seq
+ DOCKER_EXEC LD_LIBRARY_PATH=$DEPENDS_DIR/$HOST/lib "${BASE_ROOT_DIR}/build/bitcoin-*/src/test/test_bitcoin" --catch_system_errors=no -l test_suite
+ END_FOLD
+fi
+
if [ "$RUN_FUNCTIONAL_TESTS" = "true" ]; then
BEGIN_FOLD functional-tests
DOCKER_EXEC test/functional/test_runner.py --ci $MAKEJOBS --tmpdirprefix "${BASE_SCRATCH_DIR}/test_runner/" --ansi --combinedlogslen=4000 ${TEST_RUNNER_EXTRA} --quiet --failfast
diff --git a/configure.ac b/configure.ac
index a2d33ff5b6..71de55eeed 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1,6 +1,6 @@
AC_PREREQ([2.69])
define(_CLIENT_VERSION_MAJOR, 0)
-define(_CLIENT_VERSION_MINOR, 19)
+define(_CLIENT_VERSION_MINOR, 20)
define(_CLIENT_VERSION_REVISION, 99)
define(_CLIENT_VERSION_BUILD, 0)
define(_CLIENT_VERSION_RC, 0)
diff --git a/contrib/devtools/security-check.py b/contrib/devtools/security-check.py
index c05c38d513..b924698e56 100755
--- a/contrib/devtools/security-check.py
+++ b/contrib/devtools/security-check.py
@@ -206,6 +206,23 @@ def check_MACHO_NX(executable) -> bool:
return False
return True
+def check_MACHO_LAZY_BINDINGS(executable) -> bool:
+ '''
+ Check for no lazy bindings.
+ We don't use or check for MH_BINDATLOAD. See #18295.
+ '''
+ p = subprocess.Popen([OTOOL_CMD, '-l', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
+ (stdout, stderr) = p.communicate()
+ if p.returncode:
+ raise IOError('Error opening file')
+
+ for line in stdout.splitlines():
+ tokens = line.split()
+ if 'lazy_bind_off' in tokens or 'lazy_bind_size' in tokens:
+ if tokens[1] != '0':
+ return False
+ return True
+
CHECKS = {
'ELF': [
('PIE', check_ELF_PIE),
@@ -221,7 +238,8 @@ CHECKS = {
'MACHO': [
('PIE', check_MACHO_PIE),
('NOUNDEFS', check_MACHO_NOUNDEFS),
- ('NX', check_MACHO_NX)
+ ('NX', check_MACHO_NX),
+ ('LAZY_BINDINGS', check_MACHO_LAZY_BINDINGS)
]
}
diff --git a/doc/developer-notes.md b/doc/developer-notes.md
index da07080724..4960ec567a 100644
--- a/doc/developer-notes.md
+++ b/doc/developer-notes.md
@@ -965,7 +965,7 @@ Some good examples of scripted-diff:
- [scripted-diff: Rename InitInterfaces to NodeContext](https://github.com/bitcoin/bitcoin/commit/301bd41a2e6765b185bd55f4c541f9e27aeea29d)
uses an elegant script to replace occurrences of multiple terms in all source files.
-- [scripted-diff: Remove g_connman, g_banman globals](https://github.com/bitcoin/bitcoin/commit/301bd41a2e6765b185bd55f4c541f9e27aeea29d)
+- [scripted-diff: Remove g_connman, g_banman globals](https://github.com/bitcoin/bitcoin/commit/8922d7f6b751a3e6b3b9f6fb7961c442877fb65a)
replaces specific terms in a list of specific source files.
- [scripted-diff: Replace fprintf with tfm::format](https://github.com/bitcoin/bitcoin/commit/fac03ec43a15ad547161e37e53ea82482cc508f9)
diff --git a/doc/release-notes.md b/doc/release-notes.md
index cd6a4d6b59..db5aa0d8a7 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -1,5 +1,73 @@
-Please edit the release notes here:
+*After branching off for a major version release of Bitcoin Core, use this
+template to create the initial release notes draft.*
-https://github.com/bitcoin-core/bitcoin-devwiki/wiki/0.20.0-Release-Notes-Draft
+*The release notes draft is a temporary file that can be added to by anyone. See
+[/doc/developer-notes.md#release-notes](/doc/developer-notes.md#release-notes)
+for the process.*
+
+*Create the draft, named* "*version* Release Notes Draft"
+*(e.g. "0.20.0 Release Notes Draft"), as a collaborative wiki in:*
+
+https://github.com/bitcoin-core/bitcoin-devwiki/wiki/
*Before the final release, move the notes back to this git repository.*
+
+*version* Release Notes Draft
+===============================
+
+Bitcoin Core version *version* is now available from:
+
+ <https://bitcoincore.org/bin/bitcoin-core-*version*/>
+
+This release includes new features, various bug fixes and performance
+improvements, as well as updated translations.
+
+Please report bugs using the issue tracker at GitHub:
+
+ <https://github.com/bitcoin/bitcoin/issues>
+
+To receive security and update notifications, please subscribe to:
+
+ <https://bitcoincore.org/en/list/announcements/join/>
+
+How to Upgrade
+==============
+
+If you are running an older version, shut it down. Wait until it has completely
+shut down (which might take a few minutes for older versions), then run the
+installer (on Windows) or just copy over `/Applications/Bitcoin-Qt` (on Mac)
+or `bitcoind`/`bitcoin-qt` (on Linux).
+
+Upgrading directly from a version of Bitcoin Core that has reached its EOL is
+possible, but it might take some time if the datadir needs to be migrated. Old
+wallet versions of Bitcoin Core are generally supported.
+
+Compatibility
+==============
+
+Bitcoin Core is supported and extensively tested on operating systems using
+the Linux kernel, macOS 10.10+, and Windows 7 and newer. It is not recommended
+to use Bitcoin Core on unsupported systems.
+
+Bitcoin Core should also work on most other Unix-like systems but is not
+as frequently tested on them.
+
+From Bitcoin Core 0.17.0 onwards, macOS versions earlier than 10.10 are no
+longer supported, as Bitcoin Core is now built using Qt 5.9.x which requires
+macOS 10.10+. Additionally, Bitcoin Core does not yet change appearance when
+macOS "dark mode" is activated.
+
+In addition to previously supported CPU platforms, this release's pre-compiled
+distribution provides binaries for the RISC-V platform.
+
+Notable changes
+===============
+
+Credits
+=======
+
+Thanks to everyone who directly contributed to this release:
+
+
+As well as to everyone that helped with translations on
+[Transifex](https://www.transifex.com/bitcoin/bitcoin/).
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index d49db4ddcc..d53477793c 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -63,6 +63,7 @@ FUZZ_TARGETS = \
test/fuzz/parse_numbers \
test/fuzz/parse_script \
test/fuzz/parse_univalue \
+ test/fuzz/prevector \
test/fuzz/partial_merkle_tree_deserialize \
test/fuzz/partially_signed_transaction_deserialize \
test/fuzz/pow \
@@ -646,6 +647,12 @@ test_fuzz_parse_univalue_LDADD = $(FUZZ_SUITE_LD_COMMON)
test_fuzz_parse_univalue_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
test_fuzz_parse_univalue_SOURCES = test/fuzz/parse_univalue.cpp
+test_fuzz_prevector_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+test_fuzz_prevector_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+test_fuzz_prevector_LDADD = $(FUZZ_SUITE_LD_COMMON)
+test_fuzz_prevector_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
+test_fuzz_prevector_SOURCES = test/fuzz/prevector.cpp
+
test_fuzz_partial_merkle_tree_deserialize_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -DPARTIAL_MERKLE_TREE_DESERIALIZE=1
test_fuzz_partial_merkle_tree_deserialize_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
test_fuzz_partial_merkle_tree_deserialize_LDADD = $(FUZZ_SUITE_LD_COMMON)
diff --git a/src/init.cpp b/src/init.cpp
index 437e934093..92faa77059 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -291,13 +291,6 @@ void Shutdown(NodeContext& node)
}
#endif
- try {
- if (!fs::remove(GetPidFile())) {
- LogPrintf("%s: Unable to remove PID file: File does not exist\n", __func__);
- }
- } catch (const fs::filesystem_error& e) {
- LogPrintf("%s: Unable to remove PID file: %s\n", __func__, fsbridge::get_filesystem_error_message(e));
- }
node.chain_clients.clear();
UnregisterAllValidationInterfaces();
GetMainSignals().UnregisterBackgroundSignalScheduler();
@@ -305,6 +298,15 @@ void Shutdown(NodeContext& node)
ECC_Stop();
if (node.mempool) node.mempool = nullptr;
node.scheduler.reset();
+
+ try {
+ if (!fs::remove(GetPidFile())) {
+ LogPrintf("%s: Unable to remove PID file: File does not exist\n", __func__);
+ }
+ } catch (const fs::filesystem_error& e) {
+ LogPrintf("%s: Unable to remove PID file: %s\n", __func__, fsbridge::get_filesystem_error_message(e));
+ }
+
LogPrintf("%s: done\n", __func__);
}
diff --git a/src/net.h b/src/net.h
index a72af83eef..0dbd5e0549 100644
--- a/src/net.h
+++ b/src/net.h
@@ -797,8 +797,8 @@ public:
std::vector<CAddress> vAddrToSend;
const std::unique_ptr<CRollingBloomFilter> m_addr_known;
bool fGetAddr{false};
- int64_t nNextAddrSend GUARDED_BY(cs_sendProcessing){0};
- int64_t nNextLocalAddrSend GUARDED_BY(cs_sendProcessing){0};
+ std::chrono::microseconds m_next_addr_send GUARDED_BY(cs_sendProcessing){0};
+ std::chrono::microseconds m_next_local_addr_send GUARDED_BY(cs_sendProcessing){0};
bool IsAddrRelayPeer() const { return m_addr_known != nullptr; }
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index f63d048aac..d3089c4176 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -97,10 +97,10 @@ void EraseOrphansFor(NodeId peer);
/** Increase a node's misbehavior score. */
void Misbehaving(NodeId nodeid, int howmuch, const std::string& message="") EXCLUSIVE_LOCKS_REQUIRED(cs_main);
-/** Average delay between local address broadcasts in seconds. */
-static constexpr unsigned int AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL = 24 * 60 * 60;
-/** Average delay between peer address broadcasts in seconds. */
-static const unsigned int AVG_ADDRESS_BROADCAST_INTERVAL = 30;
+/** Average delay between local address broadcasts */
+static constexpr std::chrono::hours AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24};
+/** Average delay between peer address broadcasts */
+static constexpr std::chrono::seconds AVG_ADDRESS_BROADCAST_INTERVAL{30};
/** Average delay between trickled inventory transmissions in seconds.
* Blocks and whitelisted receivers bypass this, outbound peers get half this delay. */
static const unsigned int INVENTORY_BROADCAST_INTERVAL = 5;
@@ -1365,7 +1365,7 @@ void RelayTransaction(const uint256& txid, const CConnman& connman)
});
}
-static void RelayAddress(const CAddress& addr, bool fReachable, CConnman* connman)
+static void RelayAddress(const CAddress& addr, bool fReachable, const CConnman& connman)
{
unsigned int nRelayNodes = fReachable ? 2 : 1; // limited relaying of addresses outside our network(s)
@@ -1373,7 +1373,7 @@ static void RelayAddress(const CAddress& addr, bool fReachable, CConnman* connma
// Use deterministic randomness to send to the same nodes for 24 hours
// at a time so the m_addr_knowns of the chosen nodes prevent repeats
uint64_t hashAddr = addr.GetHash();
- const CSipHasher hasher = connman->GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY).Write(hashAddr << 32).Write((GetTime() + hashAddr) / (24*60*60));
+ const CSipHasher hasher = connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY).Write(hashAddr << 32).Write((GetTime() + hashAddr) / (24 * 60 * 60));
FastRandomContext insecure_rand;
std::array<std::pair<uint64_t, CNode*>,2> best{{{0, nullptr}, {0, nullptr}}};
@@ -1398,7 +1398,7 @@ static void RelayAddress(const CAddress& addr, bool fReachable, CConnman* connma
}
};
- connman->ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
+ connman.ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
}
void static ProcessGetBlockData(CNode* pfrom, const CChainParams& chainparams, const CInv& inv, CConnman* connman)
@@ -2192,7 +2192,7 @@ bool ProcessMessage(CNode* pfrom, const std::string& msg_type, CDataStream& vRec
if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable())
{
// Relay to a limited number of other nodes
- RelayAddress(addr, fReachable, connman);
+ RelayAddress(addr, fReachable, *connman);
}
// Do not store addresses outside our network
if (fReachable)
@@ -3583,16 +3583,16 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
int64_t nNow = GetTimeMicros();
auto current_time = GetTime<std::chrono::microseconds>();
- if (pto->IsAddrRelayPeer() && !::ChainstateActive().IsInitialBlockDownload() && pto->nNextLocalAddrSend < nNow) {
+ if (pto->IsAddrRelayPeer() && !::ChainstateActive().IsInitialBlockDownload() && pto->m_next_local_addr_send < current_time) {
AdvertiseLocal(pto);
- pto->nNextLocalAddrSend = PoissonNextSend(nNow, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
+ pto->m_next_local_addr_send = PoissonNextSend(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
}
//
// Message: addr
//
- if (pto->IsAddrRelayPeer() && pto->nNextAddrSend < nNow) {
- pto->nNextAddrSend = PoissonNextSend(nNow, AVG_ADDRESS_BROADCAST_INTERVAL);
+ if (pto->IsAddrRelayPeer() && pto->m_next_addr_send < current_time) {
+ pto->m_next_addr_send = PoissonNextSend(current_time, AVG_ADDRESS_BROADCAST_INTERVAL);
std::vector<CAddress> vAddr;
vAddr.reserve(pto->vAddrToSend.size());
assert(pto->m_addr_known);
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index f71ed99652..142d4c7e63 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -1078,13 +1078,11 @@ UniValue gettxout(const JSONRPCRequest& request)
static UniValue verifychain(const JSONRPCRequest& request)
{
- int nCheckLevel = gArgs.GetArg("-checklevel", DEFAULT_CHECKLEVEL);
- int nCheckDepth = gArgs.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS);
RPCHelpMan{"verifychain",
"\nVerifies blockchain database.\n",
{
- {"checklevel", RPCArg::Type::NUM, /* default */ strprintf("%d, range=0-4", nCheckLevel), "How thorough the block verification is."},
- {"nblocks", RPCArg::Type::NUM, /* default */ strprintf("%d, 0=all", nCheckDepth), "The number of blocks to check."},
+ {"checklevel", RPCArg::Type::NUM, /* default */ strprintf("%d, range=0-4", DEFAULT_CHECKLEVEL), "How thorough the block verification is."},
+ {"nblocks", RPCArg::Type::NUM, /* default */ strprintf("%d, 0=all", DEFAULT_CHECKBLOCKS), "The number of blocks to check."},
},
RPCResult{
RPCResult::Type::BOOL, "", "Verified or not"},
@@ -1094,15 +1092,12 @@ static UniValue verifychain(const JSONRPCRequest& request)
},
}.Check(request);
- LOCK(cs_main);
+ const int check_level(request.params[0].isNull() ? DEFAULT_CHECKLEVEL : request.params[0].get_int());
+ const int check_depth{request.params[1].isNull() ? DEFAULT_CHECKBLOCKS : request.params[1].get_int()};
- if (!request.params[0].isNull())
- nCheckLevel = request.params[0].get_int();
- if (!request.params[1].isNull())
- nCheckDepth = request.params[1].get_int();
+ LOCK(cs_main);
- return CVerifyDB().VerifyDB(
- Params(), &::ChainstateActive().CoinsTip(), nCheckLevel, nCheckDepth);
+ return CVerifyDB().VerifyDB(Params(), &::ChainstateActive().CoinsTip(), check_level, check_depth);
}
static void BuriedForkDescPushBack(UniValue& softforks, const std::string &name, int height) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
diff --git a/src/test/fuzz/prevector.cpp b/src/test/fuzz/prevector.cpp
new file mode 100644
index 0000000000..0e51ee3c95
--- /dev/null
+++ b/src/test/fuzz/prevector.cpp
@@ -0,0 +1,263 @@
+// Copyright (c) 2015-2020 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+
+#include <vector>
+#include <prevector.h>
+
+#include <reverse_iterator.h>
+#include <serialize.h>
+#include <streams.h>
+
+namespace {
+
+template<unsigned int N, typename T>
+class prevector_tester {
+ typedef std::vector<T> realtype;
+ realtype real_vector;
+ realtype real_vector_alt;
+
+ typedef prevector<N, T> pretype;
+ pretype pre_vector;
+ pretype pre_vector_alt;
+
+ typedef typename pretype::size_type Size;
+
+public:
+ void test() const {
+ const pretype& const_pre_vector = pre_vector;
+ assert(real_vector.size() == pre_vector.size());
+ assert(real_vector.empty() == pre_vector.empty());
+ for (Size s = 0; s < real_vector.size(); s++) {
+ assert(real_vector[s] == pre_vector[s]);
+ assert(&(pre_vector[s]) == &(pre_vector.begin()[s]));
+ assert(&(pre_vector[s]) == &*(pre_vector.begin() + s));
+ assert(&(pre_vector[s]) == &*((pre_vector.end() + s) - real_vector.size()));
+ }
+ // assert(realtype(pre_vector) == real_vector);
+ assert(pretype(real_vector.begin(), real_vector.end()) == pre_vector);
+ assert(pretype(pre_vector.begin(), pre_vector.end()) == pre_vector);
+ size_t pos = 0;
+ for (const T& v : pre_vector) {
+ assert(v == real_vector[pos]);
+ ++pos;
+ }
+ for (const T& v : reverse_iterate(pre_vector)) {
+ --pos;
+ assert(v == real_vector[pos]);
+ }
+ for (const T& v : const_pre_vector) {
+ assert(v == real_vector[pos]);
+ ++pos;
+ }
+ for (const T& v : reverse_iterate(const_pre_vector)) {
+ --pos;
+ assert(v == real_vector[pos]);
+ }
+ CDataStream ss1(SER_DISK, 0);
+ CDataStream ss2(SER_DISK, 0);
+ ss1 << real_vector;
+ ss2 << pre_vector;
+ assert(ss1.size() == ss2.size());
+ for (Size s = 0; s < ss1.size(); s++) {
+ assert(ss1[s] == ss2[s]);
+ }
+ }
+
+ void resize(Size s) {
+ real_vector.resize(s);
+ assert(real_vector.size() == s);
+ pre_vector.resize(s);
+ assert(pre_vector.size() == s);
+ }
+
+ void reserve(Size s) {
+ real_vector.reserve(s);
+ assert(real_vector.capacity() >= s);
+ pre_vector.reserve(s);
+ assert(pre_vector.capacity() >= s);
+ }
+
+ void insert(Size position, const T& value) {
+ real_vector.insert(real_vector.begin() + position, value);
+ pre_vector.insert(pre_vector.begin() + position, value);
+ }
+
+ void insert(Size position, Size count, const T& value) {
+ real_vector.insert(real_vector.begin() + position, count, value);
+ pre_vector.insert(pre_vector.begin() + position, count, value);
+ }
+
+ template<typename I>
+ void insert_range(Size position, I first, I last) {
+ real_vector.insert(real_vector.begin() + position, first, last);
+ pre_vector.insert(pre_vector.begin() + position, first, last);
+ }
+
+ void erase(Size position) {
+ real_vector.erase(real_vector.begin() + position);
+ pre_vector.erase(pre_vector.begin() + position);
+ }
+
+ void erase(Size first, Size last) {
+ real_vector.erase(real_vector.begin() + first, real_vector.begin() + last);
+ pre_vector.erase(pre_vector.begin() + first, pre_vector.begin() + last);
+ }
+
+ void update(Size pos, const T& value) {
+ real_vector[pos] = value;
+ pre_vector[pos] = value;
+ }
+
+ void push_back(const T& value) {
+ real_vector.push_back(value);
+ pre_vector.push_back(value);
+ }
+
+ void pop_back() {
+ real_vector.pop_back();
+ pre_vector.pop_back();
+ }
+
+ void clear() {
+ real_vector.clear();
+ pre_vector.clear();
+ }
+
+ void assign(Size n, const T& value) {
+ real_vector.assign(n, value);
+ pre_vector.assign(n, value);
+ }
+
+ Size size() const {
+ return real_vector.size();
+ }
+
+ Size capacity() const {
+ return pre_vector.capacity();
+ }
+
+ void shrink_to_fit() {
+ pre_vector.shrink_to_fit();
+ }
+
+ void swap() {
+ real_vector.swap(real_vector_alt);
+ pre_vector.swap(pre_vector_alt);
+ }
+
+ void move() {
+ real_vector = std::move(real_vector_alt);
+ real_vector_alt.clear();
+ pre_vector = std::move(pre_vector_alt);
+ pre_vector_alt.clear();
+ }
+
+ void copy() {
+ real_vector = real_vector_alt;
+ pre_vector = pre_vector_alt;
+ }
+
+ void resize_uninitialized(realtype values) {
+ size_t r = values.size();
+ size_t s = real_vector.size() / 2;
+ if (real_vector.capacity() < s + r) {
+ real_vector.reserve(s + r);
+ }
+ real_vector.resize(s);
+ pre_vector.resize_uninitialized(s);
+ for (auto v : values) {
+ real_vector.push_back(v);
+ }
+ auto p = pre_vector.size();
+ pre_vector.resize_uninitialized(p + r);
+ for (auto v : values) {
+ pre_vector[p] = v;
+ ++p;
+ }
+ }
+};
+
+}
+
+void test_one_input(const std::vector<uint8_t>& buffer)
+{
+ FuzzedDataProvider prov(buffer.data(), buffer.size());
+ prevector_tester<8, int> test;
+
+ while (prov.remaining_bytes()) {
+ switch (prov.ConsumeIntegralInRange<int>(0, 13 + 3 * (test.size() > 0))) {
+ case 0:
+ test.insert(prov.ConsumeIntegralInRange<size_t>(0, test.size()), prov.ConsumeIntegral<int>());
+ break;
+ case 1:
+ test.resize(std::max(0, std::min(30, (int)test.size() + prov.ConsumeIntegralInRange<int>(0, 4) - 2)));
+ break;
+ case 2:
+ test.insert(prov.ConsumeIntegralInRange<size_t>(0, test.size()), 1 + prov.ConsumeBool(), prov.ConsumeIntegral<int>());
+ break;
+ case 3: {
+ int del = prov.ConsumeIntegralInRange<int>(0, test.size());
+ int beg = prov.ConsumeIntegralInRange<int>(0, test.size() - del);
+ test.erase(beg, beg + del);
+ break;
+ }
+ case 4:
+ test.push_back(prov.ConsumeIntegral<int>());
+ break;
+ case 5: {
+ int values[4];
+ int num = 1 + prov.ConsumeIntegralInRange<int>(0, 3);
+ for (int k = 0; k < num; ++k) {
+ values[k] = prov.ConsumeIntegral<int>();
+ }
+ test.insert_range(prov.ConsumeIntegralInRange<size_t>(0, test.size()), values, values + num);
+ break;
+ }
+ case 6: {
+ int num = 1 + prov.ConsumeIntegralInRange<int>(0, 15);
+ std::vector<int> values(num);
+ for (auto& v : values) {
+ v = prov.ConsumeIntegral<int>();
+ }
+ test.resize_uninitialized(values);
+ break;
+ }
+ case 7:
+ test.reserve(prov.ConsumeIntegralInRange<size_t>(0, 32767));
+ break;
+ case 8:
+ test.shrink_to_fit();
+ break;
+ case 9:
+ test.clear();
+ break;
+ case 10:
+ test.assign(prov.ConsumeIntegralInRange<size_t>(0, 32767), prov.ConsumeIntegral<int>());
+ break;
+ case 11:
+ test.swap();
+ break;
+ case 12:
+ test.copy();
+ break;
+ case 13:
+ test.move();
+ break;
+ case 14:
+ test.update(prov.ConsumeIntegralInRange<size_t>(0, test.size() - 1), prov.ConsumeIntegral<int>());
+ break;
+ case 15:
+ test.erase(prov.ConsumeIntegralInRange<size_t>(0, test.size() - 1));
+ break;
+ case 16:
+ test.pop_back();
+ break;
+ }
+ }
+
+ test.test();
+}
diff --git a/test/functional/p2p_addr_relay.py b/test/functional/p2p_addr_relay.py
new file mode 100755
index 0000000000..6046237101
--- /dev/null
+++ b/test/functional/p2p_addr_relay.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python3
+# Copyright (c) 2020 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""
+Test addr relay
+"""
+
+from test_framework.messages import (
+ CAddress,
+ NODE_NETWORK,
+ NODE_WITNESS,
+ msg_addr,
+)
+from test_framework.mininode import (
+ P2PInterface,
+)
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_equal,
+)
+import time
+
+ADDRS = []
+for i in range(10):
+ addr = CAddress()
+ addr.time = int(time.time()) + i
+ addr.nServices = NODE_NETWORK | NODE_WITNESS
+ addr.ip = "123.123.123.{}".format(i % 256)
+ addr.port = 8333 + i
+ ADDRS.append(addr)
+
+
+class AddrReceiver(P2PInterface):
+ def on_addr(self, message):
+ for addr in message.addrs:
+ assert_equal(addr.nServices, 9)
+ assert addr.ip.startswith('123.123.123.')
+ assert (8333 <= addr.port < 8343)
+
+
+class AddrTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.setup_clean_chain = False
+ self.num_nodes = 1
+
+ def run_test(self):
+ self.log.info('Create connection that sends addr messages')
+ addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
+ msg = msg_addr()
+
+ self.log.info('Send too large addr message')
+ msg.addrs = ADDRS * 101
+ with self.nodes[0].assert_debug_log(['message addr size() = 1010']):
+ addr_source.send_and_ping(msg)
+
+ self.log.info('Check that addr message content is relayed and added to addrman')
+ addr_receiver = self.nodes[0].add_p2p_connection(AddrReceiver())
+ msg.addrs = ADDRS
+ with self.nodes[0].assert_debug_log([
+ 'Added 10 addresses from 127.0.0.1: 0 tried',
+ 'received: addr (301 bytes) peer=0',
+ 'sending addr (301 bytes) peer=1',
+ ]):
+ addr_source.send_and_ping(msg)
+ self.nodes[0].setmocktime(int(time.time()) + 30 * 60)
+ addr_receiver.sync_with_ping()
+
+
+if __name__ == '__main__':
+ AddrTest().main()
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index 8260c917fe..53bc5ca9e7 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -233,6 +233,10 @@ class TestNode():
# -342 Service unavailable, RPC server started but is shutting down due to error
if e.error['code'] != -28 and e.error['code'] != -342:
raise # unknown JSON RPC exception
+ except ConnectionResetError:
+ # This might happen when the RPC server is in warmup, but shut down before the call to getblockcount
+ # succeeds. Try again to properly raise the FailedToStartError
+ pass
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
if "No RPC credentials" not in str(e):
raise
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index 2f307750a9..faa2dee4ed 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -144,6 +144,7 @@ BASE_SCRIPTS = [
'rpc_blockchain.py',
'rpc_deprecated.py',
'wallet_disable.py',
+ 'p2p_addr_relay.py',
'rpc_net.py',
'wallet_keypool.py',
'p2p_mempool.py',
diff --git a/test/fuzz/test_runner.py b/test/fuzz/test_runner.py
index bb93060739..87f4255a6f 100755
--- a/test/fuzz/test_runner.py
+++ b/test/fuzz/test_runner.py
@@ -147,6 +147,7 @@ def merge_inputs(*, corpus, test_list, build_dir, merge_dir):
args = [
os.path.join(build_dir, 'src', 'test', 'fuzz', t),
'-merge=1',
+ '-use_value_profile=1', # Also done by oss-fuzz https://github.com/google/oss-fuzz/issues/1406#issuecomment-387790487
os.path.join(corpus, t),
os.path.join(merge_dir, t),
]